maintainence love:)
This commit is contained in:
parent
7975a782f4
commit
bbac3d6874
@ -1,5 +1,5 @@
|
||||
{
|
||||
"timestamp": 1757760643663,
|
||||
"timestamp": 1758221595993,
|
||||
"models": [
|
||||
{
|
||||
"id": "gpt-4-0613",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
107528
packages/kbot/dist/main_node.js
vendored
107528
packages/kbot/dist/main_node.js
vendored
File diff suppressed because one or more lines are too long
4
packages/kbot/dist/package-lock.json
generated
vendored
4
packages/kbot/dist/package-lock.json
generated
vendored
@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@plastichub/kbot",
|
||||
"version": "1.1.52",
|
||||
"version": "1.1.53",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@plastichub/kbot",
|
||||
"version": "1.1.52",
|
||||
"version": "1.1.53",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"node-emoji": "^2.2.0"
|
||||
|
||||
2
packages/kbot/dist/package.json
vendored
2
packages/kbot/dist/package.json
vendored
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@plastichub/kbot",
|
||||
"version": "1.1.52",
|
||||
"version": "1.1.53",
|
||||
"main": "main_node.js",
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
|
||||
@ -30,7 +30,6 @@ export enum E_OPENROUTER_MODEL_FREE {
|
||||
MODEL_FREE_ARLIAI_QWQ_32B_ARLIAI_RPR_V1_FREE = "arliai/qwq-32b-arliai-rpr-v1:free",
|
||||
MODEL_FREE_AGENTICA_ORG_DEEPCODER_14B_PREVIEW_FREE = "agentica-org/deepcoder-14b-preview:free",
|
||||
MODEL_FREE_MOONSHOTAI_KIMI_VL_A3B_THINKING_FREE = "moonshotai/kimi-vl-a3b-thinking:free",
|
||||
MODEL_FREE_NVIDIA_LLAMA_3_1_NEMOTRON_ULTRA_253B_V1_FREE = "nvidia/llama-3.1-nemotron-ultra-253b-v1:free",
|
||||
MODEL_FREE_META_LLAMA_LLAMA_4_MAVERICK_FREE = "meta-llama/llama-4-maverick:free",
|
||||
MODEL_FREE_META_LLAMA_LLAMA_4_SCOUT_FREE = "meta-llama/llama-4-scout:free",
|
||||
MODEL_FREE_QWEN_QWEN2_5_VL_32B_INSTRUCT_FREE = "qwen/qwen2.5-vl-32b-instruct:free",
|
||||
@ -38,7 +37,6 @@ export enum E_OPENROUTER_MODEL_FREE {
|
||||
MODEL_FREE_MISTRALAI_MISTRAL_SMALL_3_1_24B_INSTRUCT_FREE = "mistralai/mistral-small-3.1-24b-instruct:free",
|
||||
MODEL_FREE_GOOGLE_GEMMA_3_4B_IT_FREE = "google/gemma-3-4b-it:free",
|
||||
MODEL_FREE_GOOGLE_GEMMA_3_12B_IT_FREE = "google/gemma-3-12b-it:free",
|
||||
MODEL_FREE_REKAAI_REKA_FLASH_3_FREE = "rekaai/reka-flash-3:free",
|
||||
MODEL_FREE_GOOGLE_GEMMA_3_27B_IT_FREE = "google/gemma-3-27b-it:free",
|
||||
MODEL_FREE_QWEN_QWQ_32B_FREE = "qwen/qwq-32b:free",
|
||||
MODEL_FREE_NOUSRESEARCH_DEEPHERMES_3_LLAMA_3_8B_PREVIEW_FREE = "nousresearch/deephermes-3-llama-3-8b-preview:free",
|
||||
@ -46,7 +44,6 @@ export enum E_OPENROUTER_MODEL_FREE {
|
||||
MODEL_FREE_COGNITIVECOMPUTATIONS_DOLPHIN3_0_MISTRAL_24B_FREE = "cognitivecomputations/dolphin3.0-mistral-24b:free",
|
||||
MODEL_FREE_QWEN_QWEN2_5_VL_72B_INSTRUCT_FREE = "qwen/qwen2.5-vl-72b-instruct:free",
|
||||
MODEL_FREE_MISTRALAI_MISTRAL_SMALL_24B_INSTRUCT_2501_FREE = "mistralai/mistral-small-24b-instruct-2501:free",
|
||||
MODEL_FREE_DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_14B_FREE = "deepseek/deepseek-r1-distill-qwen-14b:free",
|
||||
MODEL_FREE_DEEPSEEK_DEEPSEEK_R1_DISTILL_LLAMA_70B_FREE = "deepseek/deepseek-r1-distill-llama-70b:free",
|
||||
MODEL_FREE_DEEPSEEK_DEEPSEEK_R1_FREE = "deepseek/deepseek-r1:free",
|
||||
MODEL_FREE_GOOGLE_GEMINI_2_0_FLASH_EXP_FREE = "google/gemini-2.0-flash-exp:free",
|
||||
|
||||
@ -1,4 +1,9 @@
|
||||
export enum E_OPENROUTER_MODEL {
|
||||
MODEL_ALIBABA_TONGYI_DEEPRESEARCH_30B_A3B = "alibaba/tongyi-deepresearch-30b-a3b",
|
||||
MODEL_QWEN_QWEN3_CODER_FLASH = "qwen/qwen3-coder-flash",
|
||||
MODEL_QWEN_QWEN3_CODER_PLUS = "qwen/qwen3-coder-plus",
|
||||
MODEL_ARCEE_AI_AFM_4_5B = "arcee-ai/afm-4.5b",
|
||||
MODEL_OPENGVLAB_INTERNVL3_78B = "opengvlab/internvl3-78b",
|
||||
MODEL_QWEN_QWEN3_NEXT_80B_A3B_THINKING = "qwen/qwen3-next-80b-a3b-thinking",
|
||||
MODEL_QWEN_QWEN3_NEXT_80B_A3B_INSTRUCT = "qwen/qwen3-next-80b-a3b-instruct",
|
||||
MODEL_MEITUAN_LONGCAT_FLASH_CHAT = "meituan/longcat-flash-chat",
|
||||
@ -122,7 +127,6 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_MICROSOFT_MAI_DS_R1_FREE = "microsoft/mai-ds-r1:free",
|
||||
MODEL_MICROSOFT_MAI_DS_R1 = "microsoft/mai-ds-r1",
|
||||
MODEL_THUDM_GLM_Z1_32B = "thudm/glm-z1-32b",
|
||||
MODEL_THUDM_GLM_4_32B = "thudm/glm-4-32b",
|
||||
MODEL_OPENAI_O4_MINI_HIGH = "openai/o4-mini-high",
|
||||
MODEL_OPENAI_O3 = "openai/o3",
|
||||
MODEL_OPENAI_O4_MINI = "openai/o4-mini",
|
||||
@ -141,7 +145,6 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_MOONSHOTAI_KIMI_VL_A3B_THINKING = "moonshotai/kimi-vl-a3b-thinking",
|
||||
MODEL_X_AI_GROK_3_MINI_BETA = "x-ai/grok-3-mini-beta",
|
||||
MODEL_X_AI_GROK_3_BETA = "x-ai/grok-3-beta",
|
||||
MODEL_NVIDIA_LLAMA_3_1_NEMOTRON_ULTRA_253B_V1_FREE = "nvidia/llama-3.1-nemotron-ultra-253b-v1:free",
|
||||
MODEL_NVIDIA_LLAMA_3_1_NEMOTRON_ULTRA_253B_V1 = "nvidia/llama-3.1-nemotron-ultra-253b-v1",
|
||||
MODEL_META_LLAMA_LLAMA_4_MAVERICK_FREE = "meta-llama/llama-4-maverick:free",
|
||||
MODEL_META_LLAMA_LLAMA_4_MAVERICK = "meta-llama/llama-4-maverick",
|
||||
@ -163,7 +166,6 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_COHERE_COMMAND_A = "cohere/command-a",
|
||||
MODEL_OPENAI_GPT_4O_MINI_SEARCH_PREVIEW = "openai/gpt-4o-mini-search-preview",
|
||||
MODEL_OPENAI_GPT_4O_SEARCH_PREVIEW = "openai/gpt-4o-search-preview",
|
||||
MODEL_REKAAI_REKA_FLASH_3_FREE = "rekaai/reka-flash-3:free",
|
||||
MODEL_GOOGLE_GEMMA_3_27B_IT_FREE = "google/gemma-3-27b-it:free",
|
||||
MODEL_GOOGLE_GEMMA_3_27B_IT = "google/gemma-3-27b-it",
|
||||
MODEL_THEDRUMMER_ANUBIS_PRO_105B_V1 = "thedrummer/anubis-pro-105b-v1",
|
||||
@ -202,7 +204,6 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_MISTRALAI_MISTRAL_SMALL_24B_INSTRUCT_2501_FREE = "mistralai/mistral-small-24b-instruct-2501:free",
|
||||
MODEL_MISTRALAI_MISTRAL_SMALL_24B_INSTRUCT_2501 = "mistralai/mistral-small-24b-instruct-2501",
|
||||
MODEL_DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_32B = "deepseek/deepseek-r1-distill-qwen-32b",
|
||||
MODEL_DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_14B_FREE = "deepseek/deepseek-r1-distill-qwen-14b:free",
|
||||
MODEL_DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_14B = "deepseek/deepseek-r1-distill-qwen-14b",
|
||||
MODEL_PERPLEXITY_SONAR_REASONING = "perplexity/sonar-reasoning",
|
||||
MODEL_PERPLEXITY_SONAR = "perplexity/sonar",
|
||||
@ -218,8 +219,6 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_DEEPSEEK_DEEPSEEK_CHAT = "deepseek/deepseek-chat",
|
||||
MODEL_SAO10K_L3_3_EURYALE_70B = "sao10k/l3.3-euryale-70b",
|
||||
MODEL_OPENAI_O1 = "openai/o1",
|
||||
MODEL_X_AI_GROK_2_VISION_1212 = "x-ai/grok-2-vision-1212",
|
||||
MODEL_X_AI_GROK_2_1212 = "x-ai/grok-2-1212",
|
||||
MODEL_COHERE_COMMAND_R7B_12_2024 = "cohere/command-r7b-12-2024",
|
||||
MODEL_GOOGLE_GEMINI_2_0_FLASH_EXP_FREE = "google/gemini-2.0-flash-exp:free",
|
||||
MODEL_META_LLAMA_LLAMA_3_3_70B_INSTRUCT_FREE = "meta-llama/llama-3.3-70b-instruct:free",
|
||||
@ -240,19 +239,19 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_ANTHROPIC_CLAUDE_3_5_HAIKU_20241022 = "anthropic/claude-3.5-haiku-20241022",
|
||||
MODEL_ANTHRACITE_ORG_MAGNUM_V4_72B = "anthracite-org/magnum-v4-72b",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_5_SONNET = "anthropic/claude-3.5-sonnet",
|
||||
MODEL_MISTRALAI_MINISTRAL_3B = "mistralai/ministral-3b",
|
||||
MODEL_MISTRALAI_MINISTRAL_8B = "mistralai/ministral-8b",
|
||||
MODEL_MISTRALAI_MINISTRAL_3B = "mistralai/ministral-3b",
|
||||
MODEL_QWEN_QWEN_2_5_7B_INSTRUCT = "qwen/qwen-2.5-7b-instruct",
|
||||
MODEL_NVIDIA_LLAMA_3_1_NEMOTRON_70B_INSTRUCT = "nvidia/llama-3.1-nemotron-70b-instruct",
|
||||
MODEL_INFLECTION_INFLECTION_3_PI = "inflection/inflection-3-pi",
|
||||
MODEL_INFLECTION_INFLECTION_3_PRODUCTIVITY = "inflection/inflection-3-productivity",
|
||||
MODEL_INFLECTION_INFLECTION_3_PI = "inflection/inflection-3-pi",
|
||||
MODEL_GOOGLE_GEMINI_FLASH_1_5_8B = "google/gemini-flash-1.5-8b",
|
||||
MODEL_THEDRUMMER_ROCINANTE_12B = "thedrummer/rocinante-12b",
|
||||
MODEL_ANTHRACITE_ORG_MAGNUM_V2_72B = "anthracite-org/magnum-v2-72b",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_90B_VISION_INSTRUCT = "meta-llama/llama-3.2-90b-vision-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_1B_INSTRUCT = "meta-llama/llama-3.2-1b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_3B_INSTRUCT_FREE = "meta-llama/llama-3.2-3b-instruct:free",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_3B_INSTRUCT = "meta-llama/llama-3.2-3b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_1B_INSTRUCT = "meta-llama/llama-3.2-1b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_90B_VISION_INSTRUCT = "meta-llama/llama-3.2-90b-vision-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_11B_VISION_INSTRUCT = "meta-llama/llama-3.2-11b-vision-instruct",
|
||||
MODEL_QWEN_QWEN_2_5_72B_INSTRUCT_FREE = "qwen/qwen-2.5-72b-instruct:free",
|
||||
MODEL_QWEN_QWEN_2_5_72B_INSTRUCT = "qwen/qwen-2.5-72b-instruct",
|
||||
@ -262,8 +261,8 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_MISTRALAI_PIXTRAL_12B = "mistralai/pixtral-12b",
|
||||
MODEL_COHERE_COMMAND_R_PLUS_08_2024 = "cohere/command-r-plus-08-2024",
|
||||
MODEL_COHERE_COMMAND_R_08_2024 = "cohere/command-r-08-2024",
|
||||
MODEL_SAO10K_L3_1_EURYALE_70B = "sao10k/l3.1-euryale-70b",
|
||||
MODEL_QWEN_QWEN_2_5_VL_7B_INSTRUCT = "qwen/qwen-2.5-vl-7b-instruct",
|
||||
MODEL_SAO10K_L3_1_EURYALE_70B = "sao10k/l3.1-euryale-70b",
|
||||
MODEL_MICROSOFT_PHI_3_5_MINI_128K_INSTRUCT = "microsoft/phi-3.5-mini-128k-instruct",
|
||||
MODEL_NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B = "nousresearch/hermes-3-llama-3.1-70b",
|
||||
MODEL_NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B = "nousresearch/hermes-3-llama-3.1-405b",
|
||||
@ -277,45 +276,43 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_META_LLAMA_LLAMA_3_1_70B_INSTRUCT = "meta-llama/llama-3.1-70b-instruct",
|
||||
MODEL_MISTRALAI_MISTRAL_NEMO_FREE = "mistralai/mistral-nemo:free",
|
||||
MODEL_MISTRALAI_MISTRAL_NEMO = "mistralai/mistral-nemo",
|
||||
MODEL_OPENAI_GPT_4O_MINI_2024_07_18 = "openai/gpt-4o-mini-2024-07-18",
|
||||
MODEL_OPENAI_GPT_4O_MINI = "openai/gpt-4o-mini",
|
||||
MODEL_OPENAI_GPT_4O_MINI_2024_07_18 = "openai/gpt-4o-mini-2024-07-18",
|
||||
MODEL_GOOGLE_GEMMA_2_27B_IT = "google/gemma-2-27b-it",
|
||||
MODEL_GOOGLE_GEMMA_2_9B_IT_FREE = "google/gemma-2-9b-it:free",
|
||||
MODEL_GOOGLE_GEMMA_2_9B_IT = "google/gemma-2-9b-it",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_5_SONNET_20240620 = "anthropic/claude-3.5-sonnet-20240620",
|
||||
MODEL_SAO10K_L3_EURYALE_70B = "sao10k/l3-euryale-70b",
|
||||
MODEL_COGNITIVECOMPUTATIONS_DOLPHIN_MIXTRAL_8X22B = "cognitivecomputations/dolphin-mixtral-8x22b",
|
||||
MODEL_NOUSRESEARCH_HERMES_2_PRO_LLAMA_3_8B = "nousresearch/hermes-2-pro-llama-3-8b",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_3 = "mistralai/mistral-7b-instruct-v0.3",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_FREE = "mistralai/mistral-7b-instruct:free",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT = "mistralai/mistral-7b-instruct",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_3 = "mistralai/mistral-7b-instruct-v0.3",
|
||||
MODEL_MICROSOFT_PHI_3_MINI_128K_INSTRUCT = "microsoft/phi-3-mini-128k-instruct",
|
||||
MODEL_MICROSOFT_PHI_3_MEDIUM_128K_INSTRUCT = "microsoft/phi-3-medium-128k-instruct",
|
||||
MODEL_NEVERSLEEP_LLAMA_3_LUMIMAID_70B = "neversleep/llama-3-lumimaid-70b",
|
||||
MODEL_GOOGLE_GEMINI_FLASH_1_5 = "google/gemini-flash-1.5",
|
||||
MODEL_OPENAI_GPT_4O_2024_05_13 = "openai/gpt-4o-2024-05-13",
|
||||
MODEL_OPENAI_GPT_4O = "openai/gpt-4o",
|
||||
MODEL_OPENAI_GPT_4O_EXTENDED = "openai/gpt-4o:extended",
|
||||
MODEL_META_LLAMA_LLAMA_GUARD_2_8B = "meta-llama/llama-guard-2-8b",
|
||||
MODEL_META_LLAMA_LLAMA_3_70B_INSTRUCT = "meta-llama/llama-3-70b-instruct",
|
||||
MODEL_OPENAI_GPT_4O_2024_05_13 = "openai/gpt-4o-2024-05-13",
|
||||
MODEL_META_LLAMA_LLAMA_3_8B_INSTRUCT = "meta-llama/llama-3-8b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_70B_INSTRUCT = "meta-llama/llama-3-70b-instruct",
|
||||
MODEL_MISTRALAI_MIXTRAL_8X22B_INSTRUCT = "mistralai/mixtral-8x22b-instruct",
|
||||
MODEL_MICROSOFT_WIZARDLM_2_8X22B = "microsoft/wizardlm-2-8x22b",
|
||||
MODEL_GOOGLE_GEMINI_PRO_1_5 = "google/gemini-pro-1.5",
|
||||
MODEL_OPENAI_GPT_4_TURBO = "openai/gpt-4-turbo",
|
||||
MODEL_COHERE_COMMAND_R_PLUS = "cohere/command-r-plus",
|
||||
MODEL_COHERE_COMMAND_R_PLUS_04_2024 = "cohere/command-r-plus-04-2024",
|
||||
MODEL_SOPHOSYMPATHEIA_MIDNIGHT_ROSE_70B = "sophosympatheia/midnight-rose-70b",
|
||||
MODEL_COHERE_COMMAND = "cohere/command",
|
||||
MODEL_COHERE_COMMAND_R = "cohere/command-r",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_HAIKU = "anthropic/claude-3-haiku",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_OPUS = "anthropic/claude-3-opus",
|
||||
MODEL_COHERE_COMMAND_R_03_2024 = "cohere/command-r-03-2024",
|
||||
MODEL_MISTRALAI_MISTRAL_LARGE = "mistralai/mistral-large",
|
||||
MODEL_OPENAI_GPT_4_TURBO_PREVIEW = "openai/gpt-4-turbo-preview",
|
||||
MODEL_OPENAI_GPT_3_5_TURBO_0613 = "openai/gpt-3.5-turbo-0613",
|
||||
MODEL_MISTRALAI_MISTRAL_TINY = "mistralai/mistral-tiny",
|
||||
MODEL_OPENAI_GPT_4_TURBO_PREVIEW = "openai/gpt-4-turbo-preview",
|
||||
MODEL_MISTRALAI_MISTRAL_SMALL = "mistralai/mistral-small",
|
||||
MODEL_MISTRALAI_MISTRAL_TINY = "mistralai/mistral-tiny",
|
||||
MODEL_MISTRALAI_MIXTRAL_8X7B_INSTRUCT = "mistralai/mixtral-8x7b-instruct",
|
||||
MODEL_NEVERSLEEP_NOROMAID_20B = "neversleep/noromaid-20b",
|
||||
MODEL_ALPINDALE_GOLIATH_120B = "alpindale/goliath-120b",
|
||||
@ -328,6 +325,6 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_UNDI95_REMM_SLERP_L2_13B = "undi95/remm-slerp-l2-13b",
|
||||
MODEL_GRYPHE_MYTHOMAX_L2_13B = "gryphe/mythomax-l2-13b",
|
||||
MODEL_OPENAI_GPT_3_5_TURBO = "openai/gpt-3.5-turbo",
|
||||
MODEL_OPENAI_GPT_4_0314 = "openai/gpt-4-0314",
|
||||
MODEL_OPENAI_GPT_4 = "openai/gpt-4"
|
||||
MODEL_OPENAI_GPT_4 = "openai/gpt-4",
|
||||
MODEL_OPENAI_GPT_4_0314 = "openai/gpt-4-0314"
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user