maintainence love:)
This commit is contained in:
parent
afe247e109
commit
0a92003955
@ -1,5 +1,5 @@
|
||||
{
|
||||
"timestamp": 1752753846841,
|
||||
"timestamp": 1753551692806,
|
||||
"models": [
|
||||
{
|
||||
"id": "gpt-4-0613",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
4
packages/kbot/dist/package-lock.json
generated
vendored
4
packages/kbot/dist/package-lock.json
generated
vendored
@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@plastichub/kbot",
|
||||
"version": "1.1.45",
|
||||
"version": "1.1.46",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@plastichub/kbot",
|
||||
"version": "1.1.45",
|
||||
"version": "1.1.46",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"node-emoji": "^2.2.0"
|
||||
|
||||
2
packages/kbot/dist/package.json
vendored
2
packages/kbot/dist/package.json
vendored
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@plastichub/kbot",
|
||||
"version": "1.1.45",
|
||||
"version": "1.1.46",
|
||||
"main": "main_node.js",
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,4 +1,6 @@
|
||||
export enum E_OPENROUTER_MODEL_FREE {
|
||||
MODEL_FREE_QWEN_QWEN3_CODER_FREE = "qwen/qwen3-coder:free",
|
||||
MODEL_FREE_QWEN_QWEN3_235B_A22B_2507_FREE = "qwen/qwen3-235b-a22b-2507:free",
|
||||
MODEL_FREE_MOONSHOTAI_KIMI_K2_FREE = "moonshotai/kimi-k2:free",
|
||||
MODEL_FREE_COGNITIVECOMPUTATIONS_DOLPHIN_MISTRAL_24B_VENICE_EDITION_FREE = "cognitivecomputations/dolphin-mistral-24b-venice-edition:free",
|
||||
MODEL_FREE_GOOGLE_GEMMA_3N_E2B_IT_FREE = "google/gemma-3n-e2b-it:free",
|
||||
@ -15,7 +17,6 @@ export enum E_OPENROUTER_MODEL_FREE {
|
||||
MODEL_FREE_QWEN_QWEN3_30B_A3B_FREE = "qwen/qwen3-30b-a3b:free",
|
||||
MODEL_FREE_QWEN_QWEN3_8B_FREE = "qwen/qwen3-8b:free",
|
||||
MODEL_FREE_QWEN_QWEN3_14B_FREE = "qwen/qwen3-14b:free",
|
||||
MODEL_FREE_QWEN_QWEN3_32B_FREE = "qwen/qwen3-32b:free",
|
||||
MODEL_FREE_QWEN_QWEN3_235B_A22B_FREE = "qwen/qwen3-235b-a22b:free",
|
||||
MODEL_FREE_TNGTECH_DEEPSEEK_R1T_CHIMERA_FREE = "tngtech/deepseek-r1t-chimera:free",
|
||||
MODEL_FREE_MICROSOFT_MAI_DS_R1_FREE = "microsoft/mai-ds-r1:free",
|
||||
@ -26,7 +27,6 @@ export enum E_OPENROUTER_MODEL_FREE {
|
||||
MODEL_FREE_AGENTICA_ORG_DEEPCODER_14B_PREVIEW_FREE = "agentica-org/deepcoder-14b-preview:free",
|
||||
MODEL_FREE_MOONSHOTAI_KIMI_VL_A3B_THINKING_FREE = "moonshotai/kimi-vl-a3b-thinking:free",
|
||||
MODEL_FREE_NVIDIA_LLAMA_3_1_NEMOTRON_ULTRA_253B_V1_FREE = "nvidia/llama-3.1-nemotron-ultra-253b-v1:free",
|
||||
MODEL_FREE_DEEPSEEK_DEEPSEEK_V3_BASE_FREE = "deepseek/deepseek-v3-base:free",
|
||||
MODEL_FREE_GOOGLE_GEMINI_2_5_PRO_EXP_03_25 = "google/gemini-2.5-pro-exp-03-25",
|
||||
MODEL_FREE_QWEN_QWEN2_5_VL_32B_INSTRUCT_FREE = "qwen/qwen2.5-vl-32b-instruct:free",
|
||||
MODEL_FREE_DEEPSEEK_DEEPSEEK_CHAT_V3_0324_FREE = "deepseek/deepseek-chat-v3-0324:free",
|
||||
@ -45,7 +45,6 @@ export enum E_OPENROUTER_MODEL_FREE {
|
||||
MODEL_FREE_DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_14B_FREE = "deepseek/deepseek-r1-distill-qwen-14b:free",
|
||||
MODEL_FREE_DEEPSEEK_DEEPSEEK_R1_DISTILL_LLAMA_70B_FREE = "deepseek/deepseek-r1-distill-llama-70b:free",
|
||||
MODEL_FREE_DEEPSEEK_DEEPSEEK_R1_FREE = "deepseek/deepseek-r1:free",
|
||||
MODEL_FREE_DEEPSEEK_DEEPSEEK_CHAT_FREE = "deepseek/deepseek-chat:free",
|
||||
MODEL_FREE_GOOGLE_GEMINI_2_0_FLASH_EXP_FREE = "google/gemini-2.0-flash-exp:free",
|
||||
MODEL_FREE_META_LLAMA_LLAMA_3_3_70B_INSTRUCT_FREE = "meta-llama/llama-3.3-70b-instruct:free",
|
||||
MODEL_FREE_QWEN_QWEN_2_5_CODER_32B_INSTRUCT_FREE = "qwen/qwen-2.5-coder-32b-instruct:free",
|
||||
|
||||
@ -1,4 +1,12 @@
|
||||
export enum E_OPENROUTER_MODEL {
|
||||
MODEL_QWEN_QWEN3_235B_A22B_THINKING_2507 = "qwen/qwen3-235b-a22b-thinking-2507",
|
||||
MODEL_Z_AI_GLM_4_32B = "z-ai/glm-4-32b",
|
||||
MODEL_QWEN_QWEN3_CODER_FREE = "qwen/qwen3-coder:free",
|
||||
MODEL_QWEN_QWEN3_CODER = "qwen/qwen3-coder",
|
||||
MODEL_BYTEDANCE_UI_TARS_1_5_7B = "bytedance/ui-tars-1.5-7b",
|
||||
MODEL_GOOGLE_GEMINI_2_5_FLASH_LITE = "google/gemini-2.5-flash-lite",
|
||||
MODEL_QWEN_QWEN3_235B_A22B_2507_FREE = "qwen/qwen3-235b-a22b-2507:free",
|
||||
MODEL_QWEN_QWEN3_235B_A22B_2507 = "qwen/qwen3-235b-a22b-2507",
|
||||
MODEL_SWITCHPOINT_ROUTER = "switchpoint/router",
|
||||
MODEL_MOONSHOTAI_KIMI_K2_FREE = "moonshotai/kimi-k2:free",
|
||||
MODEL_MOONSHOTAI_KIMI_K2 = "moonshotai/kimi-k2",
|
||||
@ -11,6 +19,7 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_TENCENT_HUNYUAN_A13B_INSTRUCT_FREE = "tencent/hunyuan-a13b-instruct:free",
|
||||
MODEL_TENCENT_HUNYUAN_A13B_INSTRUCT = "tencent/hunyuan-a13b-instruct",
|
||||
MODEL_TNGTECH_DEEPSEEK_R1T2_CHIMERA_FREE = "tngtech/deepseek-r1t2-chimera:free",
|
||||
MODEL_TNGTECH_DEEPSEEK_R1T2_CHIMERA = "tngtech/deepseek-r1t2-chimera",
|
||||
MODEL_MORPH_MORPH_V3_LARGE = "morph/morph-v3-large",
|
||||
MODEL_MORPH_MORPH_V3_FAST = "morph/morph-v3-fast",
|
||||
MODEL_BAIDU_ERNIE_4_5_300B_A47B = "baidu/ernie-4.5-300b-a47b",
|
||||
@ -46,15 +55,13 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_GOOGLE_GEMMA_3N_E4B_IT_FREE = "google/gemma-3n-e4b-it:free",
|
||||
MODEL_GOOGLE_GEMMA_3N_E4B_IT = "google/gemma-3n-e4b-it",
|
||||
MODEL_OPENAI_CODEX_MINI = "openai/codex-mini",
|
||||
MODEL_NOUSRESEARCH_DEEPHERMES_3_MISTRAL_24B_PREVIEW = "nousresearch/deephermes-3-mistral-24b-preview",
|
||||
MODEL_MISTRALAI_MISTRAL_MEDIUM_3 = "mistralai/mistral-medium-3",
|
||||
MODEL_GOOGLE_GEMINI_2_5_PRO_PREVIEW_05_06 = "google/gemini-2.5-pro-preview-05-06",
|
||||
MODEL_ARCEE_AI_CALLER_LARGE = "arcee-ai/caller-large",
|
||||
MODEL_ARCEE_AI_SPOTLIGHT = "arcee-ai/spotlight",
|
||||
MODEL_ARCEE_AI_MAESTRO_REASONING = "arcee-ai/maestro-reasoning",
|
||||
MODEL_ARCEE_AI_VIRTUOSO_LARGE = "arcee-ai/virtuoso-large",
|
||||
MODEL_ARCEE_AI_CODER_LARGE = "arcee-ai/coder-large",
|
||||
MODEL_ARCEE_AI_VIRTUOSO_MEDIUM_V2 = "arcee-ai/virtuoso-medium-v2",
|
||||
MODEL_ARCEE_AI_ARCEE_BLITZ = "arcee-ai/arcee-blitz",
|
||||
MODEL_MICROSOFT_PHI_4_REASONING_PLUS = "microsoft/phi-4-reasoning-plus",
|
||||
MODEL_INCEPTION_MERCURY_CODER = "inception/mercury-coder",
|
||||
MODEL_QWEN_QWEN3_4B_FREE = "qwen/qwen3-4b:free",
|
||||
@ -67,28 +74,32 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_QWEN_QWEN3_8B = "qwen/qwen3-8b",
|
||||
MODEL_QWEN_QWEN3_14B_FREE = "qwen/qwen3-14b:free",
|
||||
MODEL_QWEN_QWEN3_14B = "qwen/qwen3-14b",
|
||||
MODEL_QWEN_QWEN3_32B_FREE = "qwen/qwen3-32b:free",
|
||||
MODEL_QWEN_QWEN3_32B = "qwen/qwen3-32b",
|
||||
MODEL_QWEN_QWEN3_235B_A22B_FREE = "qwen/qwen3-235b-a22b:free",
|
||||
MODEL_QWEN_QWEN3_235B_A22B = "qwen/qwen3-235b-a22b",
|
||||
MODEL_TNGTECH_DEEPSEEK_R1T_CHIMERA_FREE = "tngtech/deepseek-r1t-chimera:free",
|
||||
MODEL_MICROSOFT_MAI_DS_R1_FREE = "microsoft/mai-ds-r1:free",
|
||||
MODEL_MICROSOFT_MAI_DS_R1 = "microsoft/mai-ds-r1",
|
||||
MODEL_THUDM_GLM_Z1_32B_FREE = "thudm/glm-z1-32b:free",
|
||||
MODEL_THUDM_GLM_Z1_32B = "thudm/glm-z1-32b",
|
||||
MODEL_THUDM_GLM_4_32B_FREE = "thudm/glm-4-32b:free",
|
||||
MODEL_THUDM_GLM_4_32B = "thudm/glm-4-32b",
|
||||
MODEL_OPENAI_O4_MINI_HIGH = "openai/o4-mini-high",
|
||||
MODEL_OPENAI_O3 = "openai/o3",
|
||||
MODEL_OPENAI_O4_MINI = "openai/o4-mini",
|
||||
MODEL_SHISA_AI_SHISA_V2_LLAMA3_3_70B_FREE = "shisa-ai/shisa-v2-llama3.3-70b:free",
|
||||
MODEL_SHISA_AI_SHISA_V2_LLAMA3_3_70B = "shisa-ai/shisa-v2-llama3.3-70b",
|
||||
MODEL_OPENAI_GPT_4_1 = "openai/gpt-4.1",
|
||||
MODEL_OPENAI_GPT_4_1_MINI = "openai/gpt-4.1-mini",
|
||||
MODEL_OPENAI_GPT_4_1_NANO = "openai/gpt-4.1-nano",
|
||||
MODEL_ELEUTHERAI_LLEMMA_7B = "eleutherai/llemma_7b",
|
||||
MODEL_ALFREDPROS_CODELLAMA_7B_INSTRUCT_SOLIDITY = "alfredpros/codellama-7b-instruct-solidity",
|
||||
MODEL_ARLIAI_QWQ_32B_ARLIAI_RPR_V1_FREE = "arliai/qwq-32b-arliai-rpr-v1:free",
|
||||
MODEL_ARLIAI_QWQ_32B_ARLIAI_RPR_V1 = "arliai/qwq-32b-arliai-rpr-v1",
|
||||
MODEL_AGENTICA_ORG_DEEPCODER_14B_PREVIEW_FREE = "agentica-org/deepcoder-14b-preview:free",
|
||||
MODEL_AGENTICA_ORG_DEEPCODER_14B_PREVIEW = "agentica-org/deepcoder-14b-preview",
|
||||
MODEL_MOONSHOTAI_KIMI_VL_A3B_THINKING_FREE = "moonshotai/kimi-vl-a3b-thinking:free",
|
||||
MODEL_MOONSHOTAI_KIMI_VL_A3B_THINKING = "moonshotai/kimi-vl-a3b-thinking",
|
||||
MODEL_X_AI_GROK_3_MINI_BETA = "x-ai/grok-3-mini-beta",
|
||||
MODEL_X_AI_GROK_3_BETA = "x-ai/grok-3-beta",
|
||||
MODEL_NVIDIA_LLAMA_3_3_NEMOTRON_SUPER_49B_V1 = "nvidia/llama-3.3-nemotron-super-49b-v1",
|
||||
@ -96,7 +107,7 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_NVIDIA_LLAMA_3_1_NEMOTRON_ULTRA_253B_V1 = "nvidia/llama-3.1-nemotron-ultra-253b-v1",
|
||||
MODEL_META_LLAMA_LLAMA_4_MAVERICK = "meta-llama/llama-4-maverick",
|
||||
MODEL_META_LLAMA_LLAMA_4_SCOUT = "meta-llama/llama-4-scout",
|
||||
MODEL_DEEPSEEK_DEEPSEEK_V3_BASE_FREE = "deepseek/deepseek-v3-base:free",
|
||||
MODEL_DEEPSEEK_DEEPSEEK_V3_BASE = "deepseek/deepseek-v3-base",
|
||||
MODEL_SCB10X_LLAMA3_1_TYPHOON2_70B_INSTRUCT = "scb10x/llama3.1-typhoon2-70b-instruct",
|
||||
MODEL_GOOGLE_GEMINI_2_5_PRO_EXP_03_25 = "google/gemini-2.5-pro-exp-03-25",
|
||||
MODEL_QWEN_QWEN2_5_VL_32B_INSTRUCT_FREE = "qwen/qwen2.5-vl-32b-instruct:free",
|
||||
@ -170,11 +181,9 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_MINIMAX_MINIMAX_01 = "minimax/minimax-01",
|
||||
MODEL_MISTRALAI_CODESTRAL_2501 = "mistralai/codestral-2501",
|
||||
MODEL_MICROSOFT_PHI_4 = "microsoft/phi-4",
|
||||
MODEL_DEEPSEEK_DEEPSEEK_CHAT_FREE = "deepseek/deepseek-chat:free",
|
||||
MODEL_DEEPSEEK_DEEPSEEK_CHAT = "deepseek/deepseek-chat",
|
||||
MODEL_SAO10K_L3_3_EURYALE_70B = "sao10k/l3.3-euryale-70b",
|
||||
MODEL_OPENAI_O1 = "openai/o1",
|
||||
MODEL_EVA_UNIT_01_EVA_LLAMA_3_33_70B = "eva-unit-01/eva-llama-3.33-70b",
|
||||
MODEL_X_AI_GROK_2_VISION_1212 = "x-ai/grok-2-vision-1212",
|
||||
MODEL_X_AI_GROK_2_1212 = "x-ai/grok-2-1212",
|
||||
MODEL_COHERE_COMMAND_R7B_12_2024 = "cohere/command-r7b-12-2024",
|
||||
@ -203,16 +212,16 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_ANTHRACITE_ORG_MAGNUM_V4_72B = "anthracite-org/magnum-v4-72b",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_5_SONNET_BETA = "anthropic/claude-3.5-sonnet:beta",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_5_SONNET = "anthropic/claude-3.5-sonnet",
|
||||
MODEL_MISTRALAI_MINISTRAL_3B = "mistralai/ministral-3b",
|
||||
MODEL_MISTRALAI_MINISTRAL_8B = "mistralai/ministral-8b",
|
||||
MODEL_MISTRALAI_MINISTRAL_3B = "mistralai/ministral-3b",
|
||||
MODEL_QWEN_QWEN_2_5_7B_INSTRUCT = "qwen/qwen-2.5-7b-instruct",
|
||||
MODEL_NVIDIA_LLAMA_3_1_NEMOTRON_70B_INSTRUCT = "nvidia/llama-3.1-nemotron-70b-instruct",
|
||||
MODEL_INFLECTION_INFLECTION_3_PRODUCTIVITY = "inflection/inflection-3-productivity",
|
||||
MODEL_INFLECTION_INFLECTION_3_PI = "inflection/inflection-3-pi",
|
||||
MODEL_INFLECTION_INFLECTION_3_PRODUCTIVITY = "inflection/inflection-3-productivity",
|
||||
MODEL_GOOGLE_GEMINI_FLASH_1_5_8B = "google/gemini-flash-1.5-8b",
|
||||
MODEL_ANTHRACITE_ORG_MAGNUM_V2_72B = "anthracite-org/magnum-v2-72b",
|
||||
MODEL_THEDRUMMER_ROCINANTE_12B = "thedrummer/rocinante-12b",
|
||||
MODEL_LIQUID_LFM_40B = "liquid/lfm-40b",
|
||||
MODEL_ANTHRACITE_ORG_MAGNUM_V2_72B = "anthracite-org/magnum-v2-72b",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_3B_INSTRUCT_FREE = "meta-llama/llama-3.2-3b-instruct:free",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_3B_INSTRUCT = "meta-llama/llama-3.2-3b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_1B_INSTRUCT = "meta-llama/llama-3.2-1b-instruct",
|
||||
@ -222,34 +231,32 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_QWEN_QWEN_2_5_72B_INSTRUCT_FREE = "qwen/qwen-2.5-72b-instruct:free",
|
||||
MODEL_QWEN_QWEN_2_5_72B_INSTRUCT = "qwen/qwen-2.5-72b-instruct",
|
||||
MODEL_NEVERSLEEP_LLAMA_3_1_LUMIMAID_8B = "neversleep/llama-3.1-lumimaid-8b",
|
||||
MODEL_OPENAI_O1_PREVIEW = "openai/o1-preview",
|
||||
MODEL_OPENAI_O1_MINI_2024_09_12 = "openai/o1-mini-2024-09-12",
|
||||
MODEL_OPENAI_O1_PREVIEW_2024_09_12 = "openai/o1-preview-2024-09-12",
|
||||
MODEL_OPENAI_O1_MINI_2024_09_12 = "openai/o1-mini-2024-09-12",
|
||||
MODEL_OPENAI_O1_PREVIEW = "openai/o1-preview",
|
||||
MODEL_OPENAI_O1_MINI = "openai/o1-mini",
|
||||
MODEL_MISTRALAI_PIXTRAL_12B = "mistralai/pixtral-12b",
|
||||
MODEL_COHERE_COMMAND_R_PLUS_08_2024 = "cohere/command-r-plus-08-2024",
|
||||
MODEL_COHERE_COMMAND_R_08_2024 = "cohere/command-r-08-2024",
|
||||
MODEL_COHERE_COMMAND_R_PLUS_08_2024 = "cohere/command-r-plus-08-2024",
|
||||
MODEL_QWEN_QWEN_2_5_VL_7B_INSTRUCT = "qwen/qwen-2.5-vl-7b-instruct",
|
||||
MODEL_SAO10K_L3_1_EURYALE_70B = "sao10k/l3.1-euryale-70b",
|
||||
MODEL_MICROSOFT_PHI_3_5_MINI_128K_INSTRUCT = "microsoft/phi-3.5-mini-128k-instruct",
|
||||
MODEL_NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B = "nousresearch/hermes-3-llama-3.1-70b",
|
||||
MODEL_NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B = "nousresearch/hermes-3-llama-3.1-405b",
|
||||
MODEL_OPENAI_CHATGPT_4O_LATEST = "openai/chatgpt-4o-latest",
|
||||
MODEL_AETHERWIING_MN_STARCANNON_12B = "aetherwiing/mn-starcannon-12b",
|
||||
MODEL_SAO10K_L3_LUNARIS_8B = "sao10k/l3-lunaris-8b",
|
||||
MODEL_OPENAI_GPT_4O_2024_08_06 = "openai/gpt-4o-2024-08-06",
|
||||
MODEL_META_LLAMA_LLAMA_3_1_405B = "meta-llama/llama-3.1-405b",
|
||||
MODEL_NOTHINGIISREAL_MN_CELESTE_12B = "nothingiisreal/mn-celeste-12b",
|
||||
MODEL_META_LLAMA_LLAMA_3_1_8B_INSTRUCT = "meta-llama/llama-3.1-8b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_1_70B_INSTRUCT = "meta-llama/llama-3.1-70b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_1_8B_INSTRUCT = "meta-llama/llama-3.1-8b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_1_405B_INSTRUCT_FREE = "meta-llama/llama-3.1-405b-instruct:free",
|
||||
MODEL_META_LLAMA_LLAMA_3_1_405B_INSTRUCT = "meta-llama/llama-3.1-405b-instruct",
|
||||
MODEL_MISTRALAI_MISTRAL_NEMO_FREE = "mistralai/mistral-nemo:free",
|
||||
MODEL_MISTRALAI_MISTRAL_NEMO = "mistralai/mistral-nemo",
|
||||
MODEL_OPENAI_GPT_4O_MINI_2024_07_18 = "openai/gpt-4o-mini-2024-07-18",
|
||||
MODEL_OPENAI_GPT_4O_MINI = "openai/gpt-4o-mini",
|
||||
MODEL_OPENAI_GPT_4O_MINI_2024_07_18 = "openai/gpt-4o-mini-2024-07-18",
|
||||
MODEL_GOOGLE_GEMMA_2_27B_IT = "google/gemma-2-27b-it",
|
||||
MODEL_ALPINDALE_MAGNUM_72B = "alpindale/magnum-72b",
|
||||
MODEL_GOOGLE_GEMMA_2_9B_IT_FREE = "google/gemma-2-9b-it:free",
|
||||
MODEL_GOOGLE_GEMMA_2_9B_IT = "google/gemma-2-9b-it",
|
||||
MODEL_01_AI_YI_LARGE = "01-ai/yi-large",
|
||||
@ -259,63 +266,56 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_COGNITIVECOMPUTATIONS_DOLPHIN_MIXTRAL_8X22B = "cognitivecomputations/dolphin-mixtral-8x22b",
|
||||
MODEL_QWEN_QWEN_2_72B_INSTRUCT = "qwen/qwen-2-72b-instruct",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_3 = "mistralai/mistral-7b-instruct-v0.3",
|
||||
MODEL_NOUSRESEARCH_HERMES_2_PRO_LLAMA_3_8B = "nousresearch/hermes-2-pro-llama-3-8b",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_FREE = "mistralai/mistral-7b-instruct:free",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT = "mistralai/mistral-7b-instruct",
|
||||
MODEL_NOUSRESEARCH_HERMES_2_PRO_LLAMA_3_8B = "nousresearch/hermes-2-pro-llama-3-8b",
|
||||
MODEL_MICROSOFT_PHI_3_MINI_128K_INSTRUCT = "microsoft/phi-3-mini-128k-instruct",
|
||||
MODEL_MICROSOFT_PHI_3_MEDIUM_128K_INSTRUCT = "microsoft/phi-3-medium-128k-instruct",
|
||||
MODEL_NEVERSLEEP_LLAMA_3_LUMIMAID_70B = "neversleep/llama-3-lumimaid-70b",
|
||||
MODEL_GOOGLE_GEMINI_FLASH_1_5 = "google/gemini-flash-1.5",
|
||||
MODEL_OPENAI_GPT_4O_2024_05_13 = "openai/gpt-4o-2024-05-13",
|
||||
MODEL_META_LLAMA_LLAMA_GUARD_2_8B = "meta-llama/llama-guard-2-8b",
|
||||
MODEL_OPENAI_GPT_4O = "openai/gpt-4o",
|
||||
MODEL_OPENAI_GPT_4O_EXTENDED = "openai/gpt-4o:extended",
|
||||
MODEL_OPENAI_GPT_4O_2024_05_13 = "openai/gpt-4o-2024-05-13",
|
||||
MODEL_SAO10K_FIMBULVETR_11B_V2 = "sao10k/fimbulvetr-11b-v2",
|
||||
MODEL_META_LLAMA_LLAMA_3_8B_INSTRUCT = "meta-llama/llama-3-8b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_70B_INSTRUCT = "meta-llama/llama-3-70b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_8B_INSTRUCT = "meta-llama/llama-3-8b-instruct",
|
||||
MODEL_MISTRALAI_MIXTRAL_8X22B_INSTRUCT = "mistralai/mixtral-8x22b-instruct",
|
||||
MODEL_MICROSOFT_WIZARDLM_2_8X22B = "microsoft/wizardlm-2-8x22b",
|
||||
MODEL_GOOGLE_GEMINI_PRO_1_5 = "google/gemini-pro-1.5",
|
||||
MODEL_OPENAI_GPT_4_TURBO = "openai/gpt-4-turbo",
|
||||
MODEL_GOOGLE_GEMINI_PRO_1_5 = "google/gemini-pro-1.5",
|
||||
MODEL_COHERE_COMMAND_R_PLUS = "cohere/command-r-plus",
|
||||
MODEL_COHERE_COMMAND_R_PLUS_04_2024 = "cohere/command-r-plus-04-2024",
|
||||
MODEL_SOPHOSYMPATHEIA_MIDNIGHT_ROSE_70B = "sophosympatheia/midnight-rose-70b",
|
||||
MODEL_COHERE_COMMAND_R = "cohere/command-r",
|
||||
MODEL_COHERE_COMMAND = "cohere/command",
|
||||
MODEL_COHERE_COMMAND_R = "cohere/command-r",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_HAIKU_BETA = "anthropic/claude-3-haiku:beta",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_HAIKU = "anthropic/claude-3-haiku",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_SONNET = "anthropic/claude-3-sonnet",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_OPUS_BETA = "anthropic/claude-3-opus:beta",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_OPUS = "anthropic/claude-3-opus",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_SONNET_BETA = "anthropic/claude-3-sonnet:beta",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_SONNET = "anthropic/claude-3-sonnet",
|
||||
MODEL_COHERE_COMMAND_R_03_2024 = "cohere/command-r-03-2024",
|
||||
MODEL_MISTRALAI_MISTRAL_LARGE = "mistralai/mistral-large",
|
||||
MODEL_OPENAI_GPT_4_TURBO_PREVIEW = "openai/gpt-4-turbo-preview",
|
||||
MODEL_OPENAI_GPT_3_5_TURBO_0613 = "openai/gpt-3.5-turbo-0613",
|
||||
MODEL_NOUSRESEARCH_NOUS_HERMES_2_MIXTRAL_8X7B_DPO = "nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
|
||||
MODEL_MISTRALAI_MISTRAL_TINY = "mistralai/mistral-tiny",
|
||||
MODEL_MISTRALAI_MISTRAL_SMALL = "mistralai/mistral-small",
|
||||
MODEL_MISTRALAI_MISTRAL_TINY = "mistralai/mistral-tiny",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_2 = "mistralai/mistral-7b-instruct-v0.2",
|
||||
MODEL_MISTRALAI_MIXTRAL_8X7B_INSTRUCT = "mistralai/mixtral-8x7b-instruct",
|
||||
MODEL_NEVERSLEEP_NOROMAID_20B = "neversleep/noromaid-20b",
|
||||
MODEL_ANTHROPIC_CLAUDE_2_1_BETA = "anthropic/claude-2.1:beta",
|
||||
MODEL_ANTHROPIC_CLAUDE_2_1 = "anthropic/claude-2.1",
|
||||
MODEL_ANTHROPIC_CLAUDE_2_BETA = "anthropic/claude-2:beta",
|
||||
MODEL_ANTHROPIC_CLAUDE_2 = "anthropic/claude-2",
|
||||
MODEL_ALPINDALE_GOLIATH_120B = "alpindale/goliath-120b",
|
||||
MODEL_UNDI95_TOPPY_M_7B = "undi95/toppy-m-7b",
|
||||
MODEL_ALPINDALE_GOLIATH_120B = "alpindale/goliath-120b",
|
||||
MODEL_OPENROUTER_AUTO = "openrouter/auto",
|
||||
MODEL_OPENAI_GPT_4_1106_PREVIEW = "openai/gpt-4-1106-preview",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_1 = "mistralai/mistral-7b-instruct-v0.1",
|
||||
MODEL_OPENAI_GPT_3_5_TURBO_INSTRUCT = "openai/gpt-3.5-turbo-instruct",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_1 = "mistralai/mistral-7b-instruct-v0.1",
|
||||
MODEL_PYGMALIONAI_MYTHALION_13B = "pygmalionai/mythalion-13b",
|
||||
MODEL_OPENAI_GPT_3_5_TURBO_16K = "openai/gpt-3.5-turbo-16k",
|
||||
MODEL_MANCER_WEAVER = "mancer/weaver",
|
||||
MODEL_ANTHROPIC_CLAUDE_2_0_BETA = "anthropic/claude-2.0:beta",
|
||||
MODEL_ANTHROPIC_CLAUDE_2_0 = "anthropic/claude-2.0",
|
||||
MODEL_UNDI95_REMM_SLERP_L2_13B = "undi95/remm-slerp-l2-13b",
|
||||
MODEL_GRYPHE_MYTHOMAX_L2_13B = "gryphe/mythomax-l2-13b",
|
||||
MODEL_OPENAI_GPT_4 = "openai/gpt-4",
|
||||
MODEL_OPENAI_GPT_4_0314 = "openai/gpt-4-0314",
|
||||
MODEL_OPENAI_GPT_3_5_TURBO = "openai/gpt-3.5-turbo",
|
||||
MODEL_OPENAI_GPT_4_0314 = "openai/gpt-4-0314"
|
||||
MODEL_OPENAI_GPT_4 = "openai/gpt-4"
|
||||
}
|
||||
@ -33,6 +33,7 @@ export interface IKBotOptions {
|
||||
[35m[1m[22m[39m
|
||||
01-ai/yi-large | paid
|
||||
aetherwiing/mn-starcannon-12b | paid
|
||||
agentica-org/deepcoder-14b-preview | paid
|
||||
agentica-org/deepcoder-14b-preview:free | free
|
||||
ai21/jamba-1.6-large | paid
|
||||
ai21/jamba-1.6-mini | paid
|
||||
@ -87,7 +88,6 @@ export interface IKBotOptions {
|
||||
cohere/command-r-plus-04-2024 | paid
|
||||
cohere/command-r-plus-08-2024 | paid
|
||||
cohere/command-r7b-12-2024 | paid
|
||||
openrouter/cypher-alpha:free | free
|
||||
deepseek/deepseek-prover-v2 | paid
|
||||
deepseek/deepseek-r1-0528-qwen3-8b | paid
|
||||
deepseek/deepseek-r1-0528-qwen3-8b:free | free
|
||||
@ -110,10 +110,10 @@ export interface IKBotOptions {
|
||||
deepseek/deepseek-r1-distill-qwen-7b | paid
|
||||
cognitivecomputations/dolphin-mixtral-8x22b | paid
|
||||
cognitivecomputations/dolphin3.0-mistral-24b:free | free
|
||||
cognitivecomputations/dolphin3.0-r1-mistral-24b | paid
|
||||
cognitivecomputations/dolphin3.0-r1-mistral-24b:free | free
|
||||
eleutherai/llemma_7b | paid
|
||||
eva-unit-01/eva-llama-3.33-70b | paid
|
||||
eva-unit-01/eva-qwen-2.5-32b | paid
|
||||
eva-unit-01/eva-qwen-2.5-72b | paid
|
||||
sao10k/fimbulvetr-11b-v2 | paid
|
||||
alpindale/goliath-120b | paid
|
||||
@ -125,10 +125,6 @@ export interface IKBotOptions {
|
||||
google/gemini-2.0-flash-lite-001 | paid
|
||||
google/gemini-2.5-flash | paid
|
||||
google/gemini-2.5-flash-lite-preview-06-17 | paid
|
||||
google/gemini-2.5-flash-preview | paid
|
||||
google/gemini-2.5-flash-preview:thinking | paid
|
||||
google/gemini-2.5-flash-preview-05-20 | paid
|
||||
google/gemini-2.5-flash-preview-05-20:thinking | paid
|
||||
google/gemini-2.5-pro | paid
|
||||
google/gemini-2.5-pro-exp-03-25 | paid
|
||||
google/gemini-2.5-pro-preview-05-06 | paid
|
||||
@ -142,6 +138,7 @@ export interface IKBotOptions {
|
||||
google/gemma-3-27b-it:free | free
|
||||
google/gemma-3-4b-it | paid
|
||||
google/gemma-3-4b-it:free | free
|
||||
google/gemma-3n-e2b-it:free | free
|
||||
google/gemma-3n-e4b-it | paid
|
||||
google/gemma-3n-e4b-it:free | free
|
||||
inception/mercury | paid
|
||||
@ -162,19 +159,19 @@ export interface IKBotOptions {
|
||||
meta-llama/llama-3-8b-instruct | paid
|
||||
meta-llama/llama-3.1-405b | paid
|
||||
meta-llama/llama-3.1-405b-instruct | paid
|
||||
meta-llama/llama-3.1-405b-instruct:free | free
|
||||
meta-llama/llama-3.1-70b-instruct | paid
|
||||
meta-llama/llama-3.1-8b-instruct | paid
|
||||
meta-llama/llama-3.2-11b-vision-instruct | paid
|
||||
meta-llama/llama-3.2-11b-vision-instruct:free | free
|
||||
meta-llama/llama-3.2-1b-instruct | paid
|
||||
meta-llama/llama-3.2-3b-instruct | paid
|
||||
meta-llama/llama-3.2-3b-instruct:free | free
|
||||
meta-llama/llama-3.2-90b-vision-instruct | paid
|
||||
meta-llama/llama-3.3-70b-instruct | paid
|
||||
meta-llama/llama-3.3-70b-instruct:free | free
|
||||
meta-llama/llama-4-maverick | paid
|
||||
meta-llama/llama-4-maverick:free | free
|
||||
meta-llama/llama-4-scout | paid
|
||||
meta-llama/llama-4-scout:free | free
|
||||
meta-llama/llama-guard-4-12b | paid
|
||||
meta-llama/llama-guard-2-8b | paid
|
||||
microsoft/mai-ds-r1:free | free
|
||||
@ -194,8 +191,10 @@ export interface IKBotOptions {
|
||||
mistralai/mistral-small | paid
|
||||
mistralai/mistral-tiny | paid
|
||||
mistralai/codestral-2501 | paid
|
||||
mistralai/devstral-medium | paid
|
||||
mistralai/devstral-small | paid
|
||||
mistralai/devstral-small:free | free
|
||||
mistralai/devstral-small-2505 | paid
|
||||
mistralai/devstral-small-2505:free | free
|
||||
mistralai/magistral-medium-2506 | paid
|
||||
mistralai/magistral-medium-2506:thinking | paid
|
||||
mistralai/magistral-small-2506 | paid
|
||||
@ -221,11 +220,13 @@ export interface IKBotOptions {
|
||||
mistralai/pixtral-large-2411 | paid
|
||||
mistralai/mistral-saba | paid
|
||||
moonshotai/kimi-vl-a3b-thinking:free | free
|
||||
moonshotai/kimi-k2 | paid
|
||||
moonshotai/kimi-k2:free | free
|
||||
morph/morph-v2 | paid
|
||||
morph/morph-v3-fast | paid
|
||||
morph/morph-v3-large | paid
|
||||
gryphe/mythomax-l2-13b | paid
|
||||
neversleep/llama-3-lumimaid-70b | paid
|
||||
neversleep/llama-3-lumimaid-8b | paid
|
||||
neversleep/llama-3.1-lumimaid-70b | paid
|
||||
neversleep/llama-3.1-lumimaid-8b | paid
|
||||
neversleep/noromaid-20b | paid
|
||||
nousresearch/deephermes-3-llama-3-8b-preview:free | free
|
||||
@ -237,9 +238,9 @@ export interface IKBotOptions {
|
||||
nvidia/llama-3.1-nemotron-ultra-253b-v1 | paid
|
||||
nvidia/llama-3.1-nemotron-ultra-253b-v1:free | free
|
||||
nvidia/llama-3.3-nemotron-super-49b-v1 | paid
|
||||
nvidia/llama-3.3-nemotron-super-49b-v1:free | free
|
||||
openai/chatgpt-4o-latest | paid
|
||||
openai/codex-mini | paid
|
||||
openai/gpt-3.5-turbo | paid
|
||||
openai/gpt-3.5-turbo-0613 | paid
|
||||
openai/gpt-3.5-turbo-16k | paid
|
||||
openai/gpt-3.5-turbo-instruct | paid
|
||||
@ -251,7 +252,6 @@ export interface IKBotOptions {
|
||||
openai/gpt-4.1 | paid
|
||||
openai/gpt-4.1-mini | paid
|
||||
openai/gpt-4.1-nano | paid
|
||||
openai/gpt-4.5-preview | paid
|
||||
openai/gpt-4o | paid
|
||||
openai/gpt-4o-2024-05-13 | paid
|
||||
openai/gpt-4o-2024-08-06 | paid
|
||||
@ -274,9 +274,6 @@ export interface IKBotOptions {
|
||||
openai/o4-mini | paid
|
||||
openai/o4-mini-high | paid
|
||||
opengvlab/internvl3-14b | paid
|
||||
all-hands/openhands-lm-32b-v0.1 | paid
|
||||
perplexity/llama-3.1-sonar-large-128k-online | paid
|
||||
perplexity/llama-3.1-sonar-small-128k-online | paid
|
||||
perplexity/r1-1776 | paid
|
||||
perplexity/sonar | paid
|
||||
perplexity/sonar-deep-research | paid
|
||||
@ -303,6 +300,7 @@ export interface IKBotOptions {
|
||||
qwen/qwen3-30b-a3b:free | free
|
||||
qwen/qwen3-32b | paid
|
||||
qwen/qwen3-32b:free | free
|
||||
qwen/qwen3-4b:free | free
|
||||
qwen/qwen3-8b | paid
|
||||
qwen/qwen3-8b:free | free
|
||||
qwen/qwq-32b | paid
|
||||
@ -314,15 +312,20 @@ export interface IKBotOptions {
|
||||
qwen/qwen-2.5-coder-32b-instruct | paid
|
||||
qwen/qwen-2.5-coder-32b-instruct:free | free
|
||||
featherless/qwerky-72b:free | free
|
||||
rekaai/reka-flash-3 | paid
|
||||
rekaai/reka-flash-3:free | free
|
||||
undi95/remm-slerp-l2-13b | paid
|
||||
sao10k/l3-lunaris-8b | paid
|
||||
sao10k/l3-euryale-70b | paid
|
||||
sao10k/l3.1-euryale-70b | paid
|
||||
sao10k/l3.3-euryale-70b | paid
|
||||
sarvamai/sarvam-m | paid
|
||||
sarvamai/sarvam-m:free | free
|
||||
shisa-ai/shisa-v2-llama3.3-70b:free | free
|
||||
raifle/sorcererlm-8x22b | paid
|
||||
switchpoint/router | paid
|
||||
tencent/hunyuan-a13b-instruct | paid
|
||||
tencent/hunyuan-a13b-instruct:free | free
|
||||
thedrummer/anubis-70b-v1.1 | paid
|
||||
thedrummer/anubis-pro-105b-v1 | paid
|
||||
thedrummer/rocinante-12b | paid
|
||||
@ -331,10 +334,13 @@ export interface IKBotOptions {
|
||||
thedrummer/valkyrie-49b-v1 | paid
|
||||
thudm/glm-4-32b | paid
|
||||
thudm/glm-4-32b:free | free
|
||||
thudm/glm-4.1v-9b-thinking | paid
|
||||
thudm/glm-z1-32b:free | free
|
||||
tngtech/deepseek-r1t-chimera:free | free
|
||||
tngtech/deepseek-r1t2-chimera:free | free
|
||||
undi95/toppy-m-7b | paid
|
||||
scb10x/llama3.1-typhoon2-70b-instruct | paid
|
||||
cognitivecomputations/dolphin-mistral-24b-venice-edition:free | free
|
||||
microsoft/wizardlm-2-8x22b | paid
|
||||
x-ai/grok-2-1212 | paid
|
||||
x-ai/grok-2-vision-1212 | paid
|
||||
@ -342,6 +348,7 @@ export interface IKBotOptions {
|
||||
x-ai/grok-3-beta | paid
|
||||
x-ai/grok-3-mini | paid
|
||||
x-ai/grok-3-mini-beta | paid
|
||||
x-ai/grok-4 | paid
|
||||
x-ai/grok-vision-beta | paid
|
||||
[35m[1m[22m[39m
|
||||
[35m[1m OpenAI models:[22m[39m
|
||||
@ -371,8 +378,6 @@ export interface IKBotOptions {
|
||||
gpt-4.1-mini-2025-04-14
|
||||
gpt-4.1-nano
|
||||
gpt-4.1-nano-2025-04-14
|
||||
gpt-4.5-preview
|
||||
gpt-4.5-preview-2025-02-27
|
||||
gpt-4o
|
||||
gpt-4o-2024-05-13
|
||||
gpt-4o-2024-08-06
|
||||
|
||||
Loading…
Reference in New Issue
Block a user