maintainence love:)

This commit is contained in:
lovebird 2025-06-05 20:58:26 +02:00
parent c8bd9fb66c
commit d9dc88b972
9 changed files with 160 additions and 155 deletions

View File

@ -1,5 +1,5 @@
{
"timestamp": 1749038292949,
"timestamp": 1749149893739,
"models": [
{
"id": "gpt-4-0613",

View File

@ -1,6 +1,106 @@
{
"timestamp": 1749038293088,
"timestamp": 1749149893852,
"models": [
{
"id": "google/gemini-2.5-pro-preview",
"hugging_face_id": "",
"name": "Google: Gemini 2.5 Pro Preview 06-05",
"created": 1749137257,
"description": "Gemini 2.5 Pro is Googles state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy and nuanced context handling. Gemini 2.5 Pro achieves top-tier performance on multiple benchmarks, including first-place positioning on the LMArena leaderboard, reflecting superior human-preference alignment and complex problem-solving abilities.\n",
"context_length": 1048576,
"architecture": {
"modality": "text+image->text",
"input_modalities": [
"file",
"image",
"text"
],
"output_modalities": [
"text"
],
"tokenizer": "Gemini",
"instruct_type": null
},
"pricing": {
"prompt": "0.00000125",
"completion": "0.00001",
"request": "0",
"image": "0.00516",
"web_search": "0",
"internal_reasoning": "0",
"input_cache_read": "0.00000031",
"input_cache_write": "0.000001625"
},
"top_provider": {
"context_length": 1048576,
"max_completion_tokens": 65536,
"is_moderated": false
},
"per_request_limits": null,
"supported_parameters": [
"tools",
"tool_choice",
"max_tokens",
"temperature",
"top_p",
"reasoning",
"include_reasoning",
"structured_outputs",
"response_format",
"stop",
"frequency_penalty",
"presence_penalty",
"seed"
]
},
{
"id": "sentientagi/dobby-mini-unhinged-plus-llama-3.1-8b",
"hugging_face_id": "SentientAGI/Dobby-Mini-Unhinged-Plus-Llama-3.1-8B",
"name": "SentientAGI: Dobby Mini Plus Llama 3.1 8B",
"created": 1748885619,
"description": "Dobby-Mini-Leashed-Llama-3.1-8B and Dobby-Mini-Unhinged-Llama-3.1-8B are language models fine-tuned from Llama-3.1-8B-Instruct. Dobby models have a strong conviction towards personal freedom, decentralization, and all things crypto — even when coerced to speak otherwise. \n\nDobby-Mini-Leashed-Llama-3.1-8B and Dobby-Mini-Unhinged-Llama-3.1-8B have their own unique, uhh, personalities. The two versions are being released to be improved using the communitys feedback, which will steer the development of a 70B model.\n\n",
"context_length": 131072,
"architecture": {
"modality": "text->text",
"input_modalities": [
"text"
],
"output_modalities": [
"text"
],
"tokenizer": "Other",
"instruct_type": null
},
"pricing": {
"prompt": "0.0000002",
"completion": "0.0000002",
"request": "0",
"image": "0",
"web_search": "0",
"internal_reasoning": "0"
},
"top_provider": {
"context_length": 131072,
"max_completion_tokens": null,
"is_moderated": false
},
"per_request_limits": null,
"supported_parameters": [
"max_tokens",
"temperature",
"top_p",
"stop",
"frequency_penalty",
"presence_penalty",
"top_k",
"repetition_penalty",
"response_format",
"structured_outputs",
"logit_bias",
"logprobs",
"top_logprobs"
]
},
{
"id": "deepseek/deepseek-r1-distill-qwen-7b",
"hugging_face_id": "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
@ -143,9 +243,9 @@
{
"id": "google/gemma-2b-it",
"hugging_face_id": "google/gemma-2b-it",
"name": "Google: Gemma 2 2B",
"name": "Google: Gemma 1 2B",
"created": 1748460815,
"description": "Gemma 2 2B by Google is an open model built from the same research and technology used to create the [Gemini models](/models?q=gemini).\n\nGemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning.\n\nSee the [launch announcement](https://blog.google/technology/developers/google-gemma-2/) for more details. Usage of Gemma is subject to Google's [Gemma Terms of Use](https://ai.google.dev/gemma/terms).",
"description": "Gemma 1 2B by Google is an open model built from the same research and technology used to create the [Gemini models](/models?q=gemini).\n\nGemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning.\n\nUsage of Gemma is subject to Google's [Gemma Terms of Use](https://ai.google.dev/gemma/terms).",
"context_length": 8192,
"architecture": {
"modality": "text->text",
@ -947,9 +1047,9 @@
]
},
{
"id": "google/gemini-2.5-pro-preview",
"id": "google/gemini-2.5-pro-preview-05-06",
"hugging_face_id": "",
"name": "Google: Gemini 2.5 Pro Preview",
"name": "Google: Gemini 2.5 Pro Preview 05-06",
"created": 1746578513,
"description": "Gemini 2.5 Pro is Googles state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy and nuanced context handling. Gemini 2.5 Pro achieves top-tier performance on multiple benchmarks, including first-place positioning on the LMArena leaderboard, reflecting superior human-preference alignment and complex problem-solving abilities.",
"context_length": 1048576,
@ -2226,20 +2326,20 @@
"top_p",
"reasoning",
"include_reasoning",
"stop",
"frequency_penalty",
"presence_penalty",
"top_k",
"repetition_penalty",
"logit_bias",
"min_p",
"response_format",
"seed",
"presence_penalty",
"frequency_penalty",
"repetition_penalty",
"top_k",
"tools",
"tool_choice",
"stop",
"response_format",
"structured_outputs",
"logit_bias",
"logprobs",
"top_logprobs"
"top_logprobs",
"min_p"
]
},
{
@ -3628,12 +3728,12 @@
"max_tokens",
"temperature",
"top_p",
"presence_penalty",
"frequency_penalty",
"repetition_penalty",
"top_k",
"stop",
"frequency_penalty",
"presence_penalty",
"repetition_penalty",
"response_format",
"top_k",
"seed",
"min_p",
"logit_bias",
@ -6055,11 +6155,12 @@
"presence_penalty",
"top_k",
"repetition_penalty",
"logit_bias",
"min_p",
"response_format",
"top_logprobs",
"structured_outputs",
"logit_bias",
"logprobs",
"top_logprobs",
"min_p",
"seed"
]
},
@ -6794,8 +6895,8 @@
"instruct_type": null
},
"pricing": {
"prompt": "0.00000006",
"completion": "0.00000012",
"prompt": "0.00000005",
"completion": "0.00000011",
"request": "0",
"image": "0",
"web_search": "0",
@ -6803,7 +6904,7 @@
},
"top_provider": {
"context_length": 32768,
"max_completion_tokens": 16384,
"max_completion_tokens": 32768,
"is_moderated": false
},
"per_request_limits": null,
@ -6814,16 +6915,17 @@
"stop",
"frequency_penalty",
"presence_penalty",
"repetition_penalty",
"response_format",
"top_k",
"seed",
"repetition_penalty",
"logit_bias",
"logprobs",
"top_logprobs",
"min_p",
"seed",
"response_format",
"tools",
"tool_choice",
"structured_outputs",
"logit_bias",
"logprobs"
"structured_outputs"
]
},
{
@ -8038,14 +8140,14 @@
"stop",
"frequency_penalty",
"presence_penalty",
"seed",
"top_k",
"min_p",
"repetition_penalty",
"response_format",
"top_k",
"seed",
"min_p",
"logit_bias",
"logprobs",
"top_logprobs",
"response_format",
"structured_outputs"
]
},
@ -11347,15 +11449,15 @@
"max_tokens",
"temperature",
"top_p",
"response_format",
"stop",
"frequency_penalty",
"presence_penalty",
"top_k",
"repetition_penalty",
"logit_bias",
"min_p",
"response_format",
"top_k",
"seed",
"min_p",
"logit_bias",
"logprobs",
"top_logprobs",
"structured_outputs"
@ -11429,7 +11531,7 @@
},
"pricing": {
"prompt": "0.00000001",
"completion": "0.00000006",
"completion": "0.00000005",
"request": "0",
"image": "0",
"web_search": "0",
@ -14574,103 +14676,6 @@
"response_format"
]
},
{
"id": "openai/gpt-4-32k",
"hugging_face_id": null,
"name": "OpenAI: GPT-4 32k",
"created": 1693180800,
"description": "GPT-4-32k is an extended version of GPT-4, with the same capabilities but quadrupled context length, allowing for processing up to 40 pages of text in a single pass. This is particularly beneficial for handling longer content like interacting with PDFs without an external vector database. Training data: up to Sep 2021.",
"context_length": 32767,
"architecture": {
"modality": "text->text",
"input_modalities": [
"text"
],
"output_modalities": [
"text"
],
"tokenizer": "GPT",
"instruct_type": null
},
"pricing": {
"prompt": "0.00006",
"completion": "0.00012",
"request": "0",
"image": "0",
"web_search": "0",
"internal_reasoning": "0"
},
"top_provider": {
"context_length": 32767,
"max_completion_tokens": 4096,
"is_moderated": true
},
"per_request_limits": null,
"supported_parameters": [
"tools",
"tool_choice",
"max_tokens",
"temperature",
"top_p",
"stop",
"frequency_penalty",
"presence_penalty",
"seed",
"logit_bias",
"logprobs",
"top_logprobs",
"response_format"
]
},
{
"id": "openai/gpt-4-32k-0314",
"hugging_face_id": null,
"name": "OpenAI: GPT-4 32k (older v0314)",
"created": 1693180800,
"description": "GPT-4-32k is an extended version of GPT-4, with the same capabilities but quadrupled context length, allowing for processing up to 40 pages of text in a single pass. This is particularly beneficial for handling longer content like interacting with PDFs without an external vector database. Training data: up to Sep 2021.",
"context_length": 32767,
"architecture": {
"modality": "text->text",
"input_modalities": [
"text"
],
"output_modalities": [
"text"
],
"tokenizer": "GPT",
"instruct_type": null
},
"pricing": {
"prompt": "0.00006",
"completion": "0.00012",
"request": "0",
"image": "0",
"web_search": "0",
"internal_reasoning": "0"
},
"top_provider": {
"context_length": 32767,
"max_completion_tokens": 4096,
"is_moderated": true
},
"per_request_limits": null,
"supported_parameters": [
"tools",
"tool_choice",
"max_tokens",
"temperature",
"top_p",
"stop",
"frequency_penalty",
"presence_penalty",
"seed",
"logit_bias",
"logprobs",
"top_logprobs",
"response_format",
"structured_outputs"
]
},
{
"id": "mancer/weaver",
"hugging_face_id": null,

View File

@ -1,4 +1,6 @@
export declare enum E_OPENROUTER_MODEL {
MODEL_GOOGLE_GEMINI_2_5_PRO_PREVIEW = "google/gemini-2.5-pro-preview",
MODEL_SENTIENTAGI_DOBBY_MINI_UNHINGED_PLUS_LLAMA_3_1_8B = "sentientagi/dobby-mini-unhinged-plus-llama-3.1-8b",
MODEL_DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_7B = "deepseek/deepseek-r1-distill-qwen-7b",
MODEL_DEEPSEEK_DEEPSEEK_R1_0528_QWEN3_8B_FREE = "deepseek/deepseek-r1-0528-qwen3-8b:free",
MODEL_DEEPSEEK_DEEPSEEK_R1_0528_QWEN3_8B = "deepseek/deepseek-r1-0528-qwen3-8b",
@ -19,7 +21,7 @@ export declare enum E_OPENROUTER_MODEL {
MODEL_META_LLAMA_LLAMA_3_3_8B_INSTRUCT_FREE = "meta-llama/llama-3.3-8b-instruct:free",
MODEL_NOUSRESEARCH_DEEPHERMES_3_MISTRAL_24B_PREVIEW_FREE = "nousresearch/deephermes-3-mistral-24b-preview:free",
MODEL_MISTRALAI_MISTRAL_MEDIUM_3 = "mistralai/mistral-medium-3",
MODEL_GOOGLE_GEMINI_2_5_PRO_PREVIEW = "google/gemini-2.5-pro-preview",
MODEL_GOOGLE_GEMINI_2_5_PRO_PREVIEW_05_06 = "google/gemini-2.5-pro-preview-05-06",
MODEL_ARCEE_AI_CALLER_LARGE = "arcee-ai/caller-large",
MODEL_ARCEE_AI_SPOTLIGHT = "arcee-ai/spotlight",
MODEL_ARCEE_AI_MAESTRO_REASONING = "arcee-ai/maestro-reasoning",
@ -310,8 +312,6 @@ export declare enum E_OPENROUTER_MODEL {
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_1 = "mistralai/mistral-7b-instruct-v0.1",
MODEL_PYGMALIONAI_MYTHALION_13B = "pygmalionai/mythalion-13b",
MODEL_OPENAI_GPT_3_5_TURBO_16K = "openai/gpt-3.5-turbo-16k",
MODEL_OPENAI_GPT_4_32K = "openai/gpt-4-32k",
MODEL_OPENAI_GPT_4_32K_0314 = "openai/gpt-4-32k-0314",
MODEL_MANCER_WEAVER = "mancer/weaver",
MODEL_ANTHROPIC_CLAUDE_2_0_BETA = "anthropic/claude-2.0:beta",
MODEL_ANTHROPIC_CLAUDE_2_0 = "anthropic/claude-2.0",

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,12 +1,12 @@
{
"name": "@plastichub/kbot",
"version": "1.1.39",
"version": "1.1.40",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@plastichub/kbot",
"version": "1.1.39",
"version": "1.1.40",
"license": "ISC",
"dependencies": {
"node-emoji": "^2.2.0"

View File

@ -1,6 +1,6 @@
{
"name": "@plastichub/kbot",
"version": "1.1.39",
"version": "1.1.40",
"main": "main_node.js",
"author": "",
"license": "ISC",

View File

@ -1,4 +1,6 @@
export enum E_OPENROUTER_MODEL {
MODEL_GOOGLE_GEMINI_2_5_PRO_PREVIEW = "google/gemini-2.5-pro-preview",
MODEL_SENTIENTAGI_DOBBY_MINI_UNHINGED_PLUS_LLAMA_3_1_8B = "sentientagi/dobby-mini-unhinged-plus-llama-3.1-8b",
MODEL_DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_7B = "deepseek/deepseek-r1-distill-qwen-7b",
MODEL_DEEPSEEK_DEEPSEEK_R1_0528_QWEN3_8B_FREE = "deepseek/deepseek-r1-0528-qwen3-8b:free",
MODEL_DEEPSEEK_DEEPSEEK_R1_0528_QWEN3_8B = "deepseek/deepseek-r1-0528-qwen3-8b",
@ -19,7 +21,7 @@ export enum E_OPENROUTER_MODEL {
MODEL_META_LLAMA_LLAMA_3_3_8B_INSTRUCT_FREE = "meta-llama/llama-3.3-8b-instruct:free",
MODEL_NOUSRESEARCH_DEEPHERMES_3_MISTRAL_24B_PREVIEW_FREE = "nousresearch/deephermes-3-mistral-24b-preview:free",
MODEL_MISTRALAI_MISTRAL_MEDIUM_3 = "mistralai/mistral-medium-3",
MODEL_GOOGLE_GEMINI_2_5_PRO_PREVIEW = "google/gemini-2.5-pro-preview",
MODEL_GOOGLE_GEMINI_2_5_PRO_PREVIEW_05_06 = "google/gemini-2.5-pro-preview-05-06",
MODEL_ARCEE_AI_CALLER_LARGE = "arcee-ai/caller-large",
MODEL_ARCEE_AI_SPOTLIGHT = "arcee-ai/spotlight",
MODEL_ARCEE_AI_MAESTRO_REASONING = "arcee-ai/maestro-reasoning",
@ -310,8 +312,6 @@ export enum E_OPENROUTER_MODEL {
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_1 = "mistralai/mistral-7b-instruct-v0.1",
MODEL_PYGMALIONAI_MYTHALION_13B = "pygmalionai/mythalion-13b",
MODEL_OPENAI_GPT_3_5_TURBO_16K = "openai/gpt-3.5-turbo-16k",
MODEL_OPENAI_GPT_4_32K = "openai/gpt-4-32k",
MODEL_OPENAI_GPT_4_32K_0314 = "openai/gpt-4-32k-0314",
MODEL_MANCER_WEAVER = "mancer/weaver",
MODEL_ANTHROPIC_CLAUDE_2_0_BETA = "anthropic/claude-2.0:beta",
MODEL_ANTHROPIC_CLAUDE_2_0 = "anthropic/claude-2.0",