maintainence love:)
This commit is contained in:
parent
4c7cbc336d
commit
25df919ec2
@ -1,5 +1,5 @@
|
||||
{
|
||||
"timestamp": 1745523700281,
|
||||
"timestamp": 1745946122126,
|
||||
"models": [
|
||||
{
|
||||
"id": "gpt-4o-audio-preview-2024-12-17",
|
||||
@ -85,6 +85,18 @@
|
||||
"created": 1671217299,
|
||||
"owned_by": "openai-internal"
|
||||
},
|
||||
{
|
||||
"id": "chatgpt-4o-latest",
|
||||
"object": "model",
|
||||
"created": 1723515131,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "gpt-4o-realtime-preview-2024-12-17",
|
||||
"object": "model",
|
||||
"created": 1733945430,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "text-embedding-3-large",
|
||||
"object": "model",
|
||||
@ -103,6 +115,12 @@
|
||||
"created": 1727460443,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "o1-2024-12-17",
|
||||
"object": "model",
|
||||
"created": 1734326976,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "o1-preview-2024-09-12",
|
||||
"object": "model",
|
||||
@ -139,6 +157,12 @@
|
||||
"created": 1741391161,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "o1",
|
||||
"object": "model",
|
||||
"created": 1734375816,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "gpt-4.1-mini-2025-04-14",
|
||||
"object": "model",
|
||||
@ -146,9 +170,15 @@
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "chatgpt-4o-latest",
|
||||
"id": "o1-pro",
|
||||
"object": "model",
|
||||
"created": 1723515131,
|
||||
"created": 1742251791,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "o1-pro-2025-03-19",
|
||||
"object": "model",
|
||||
"created": 1742251504,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
@ -175,12 +205,6 @@
|
||||
"created": 1712361441,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "gpt-4o-realtime-preview-2024-12-17",
|
||||
"object": "model",
|
||||
"created": 1733945430,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "gpt-3.5-turbo-instruct",
|
||||
"object": "model",
|
||||
@ -241,6 +265,12 @@
|
||||
"created": 1683758102,
|
||||
"owned_by": "openai-internal"
|
||||
},
|
||||
{
|
||||
"id": "gpt-image-1",
|
||||
"object": "model",
|
||||
"created": 1745517030,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "o1-preview",
|
||||
"object": "model",
|
||||
@ -277,6 +307,18 @@
|
||||
"created": 1732734466,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "o3-mini",
|
||||
"object": "model",
|
||||
"created": 1737146383,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "o3-mini-2025-01-31",
|
||||
"object": "model",
|
||||
"created": 1738010200,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "tts-1-hd",
|
||||
"object": "model",
|
||||
@ -313,24 +355,12 @@
|
||||
"created": 1744316542,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "gpt-4o-transcribe",
|
||||
"object": "model",
|
||||
"created": 1742068463,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "gpt-4.1-2025-04-14",
|
||||
"object": "model",
|
||||
"created": 1744315746,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "o1-2024-12-17",
|
||||
"object": "model",
|
||||
"created": 1734326976,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "gpt-4o-mini-2024-07-18",
|
||||
"object": "model",
|
||||
@ -368,15 +398,9 @@
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "o3-mini",
|
||||
"id": "gpt-4o-transcribe",
|
||||
"object": "model",
|
||||
"created": 1737146383,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "o3-mini-2025-01-31",
|
||||
"object": "model",
|
||||
"created": 1738010200,
|
||||
"created": 1742068463,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
@ -403,24 +427,6 @@
|
||||
"created": 1699053241,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "o1",
|
||||
"object": "model",
|
||||
"created": 1734375816,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "o1-pro",
|
||||
"object": "model",
|
||||
"created": 1742251791,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "o1-pro-2025-03-19",
|
||||
"object": "model",
|
||||
"created": 1742251504,
|
||||
"owned_by": "system"
|
||||
},
|
||||
{
|
||||
"id": "omni-moderation-latest",
|
||||
"object": "model",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
19
packages/kbot/src/models/cache/openai-models.ts
vendored
19
packages/kbot/src/models/cache/openai-models.ts
vendored
@ -13,22 +13,26 @@ export enum E_OPENAI_MODEL {
|
||||
MODEL_BABBAGE_002 = "babbage-002",
|
||||
MODEL_GPT_4 = "gpt-4",
|
||||
MODEL_TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002",
|
||||
MODEL_CHATGPT_4O_LATEST = "chatgpt-4o-latest",
|
||||
MODEL_GPT_4O_REALTIME_PREVIEW_2024_12_17 = "gpt-4o-realtime-preview-2024-12-17",
|
||||
MODEL_TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large",
|
||||
MODEL_GPT_4O_MINI_AUDIO_PREVIEW = "gpt-4o-mini-audio-preview",
|
||||
MODEL_GPT_4O_AUDIO_PREVIEW = "gpt-4o-audio-preview",
|
||||
MODEL_O1_2024_12_17 = "o1-2024-12-17",
|
||||
MODEL_O1_PREVIEW_2024_09_12 = "o1-preview-2024-09-12",
|
||||
MODEL_GPT_4O_MINI_REALTIME_PREVIEW = "gpt-4o-mini-realtime-preview",
|
||||
MODEL_GPT_4_1_MINI = "gpt-4.1-mini",
|
||||
MODEL_GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17 = "gpt-4o-mini-realtime-preview-2024-12-17",
|
||||
MODEL_GPT_3_5_TURBO_INSTRUCT_0914 = "gpt-3.5-turbo-instruct-0914",
|
||||
MODEL_GPT_4O_MINI_SEARCH_PREVIEW = "gpt-4o-mini-search-preview",
|
||||
MODEL_O1 = "o1",
|
||||
MODEL_GPT_4_1_MINI_2025_04_14 = "gpt-4.1-mini-2025-04-14",
|
||||
MODEL_CHATGPT_4O_LATEST = "chatgpt-4o-latest",
|
||||
MODEL_O1_PRO = "o1-pro",
|
||||
MODEL_O1_PRO_2025_03_19 = "o1-pro-2025-03-19",
|
||||
MODEL_DAVINCI_002 = "davinci-002",
|
||||
MODEL_GPT_3_5_TURBO_1106 = "gpt-3.5-turbo-1106",
|
||||
MODEL_GPT_4O_SEARCH_PREVIEW = "gpt-4o-search-preview",
|
||||
MODEL_GPT_4_TURBO = "gpt-4-turbo",
|
||||
MODEL_GPT_4O_REALTIME_PREVIEW_2024_12_17 = "gpt-4o-realtime-preview-2024-12-17",
|
||||
MODEL_GPT_3_5_TURBO_INSTRUCT = "gpt-3.5-turbo-instruct",
|
||||
MODEL_GPT_3_5_TURBO = "gpt-3.5-turbo",
|
||||
MODEL_GPT_4_TURBO_PREVIEW = "gpt-4-turbo-preview",
|
||||
@ -39,35 +43,32 @@ export enum E_OPENAI_MODEL {
|
||||
MODEL_GPT_4O_2024_05_13 = "gpt-4o-2024-05-13",
|
||||
MODEL_GPT_4_TURBO_2024_04_09 = "gpt-4-turbo-2024-04-09",
|
||||
MODEL_GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k",
|
||||
MODEL_GPT_IMAGE_1 = "gpt-image-1",
|
||||
MODEL_O1_PREVIEW = "o1-preview",
|
||||
MODEL_GPT_4_0613 = "gpt-4-0613",
|
||||
MODEL_GPT_4_5_PREVIEW = "gpt-4.5-preview",
|
||||
MODEL_GPT_4_5_PREVIEW_2025_02_27 = "gpt-4.5-preview-2025-02-27",
|
||||
MODEL_GPT_4O_SEARCH_PREVIEW_2025_03_11 = "gpt-4o-search-preview-2025-03-11",
|
||||
MODEL_OMNI_MODERATION_2024_09_26 = "omni-moderation-2024-09-26",
|
||||
MODEL_O3_MINI = "o3-mini",
|
||||
MODEL_O3_MINI_2025_01_31 = "o3-mini-2025-01-31",
|
||||
MODEL_TTS_1_HD = "tts-1-hd",
|
||||
MODEL_GPT_4O = "gpt-4o",
|
||||
MODEL_TTS_1_HD_1106 = "tts-1-hd-1106",
|
||||
MODEL_GPT_4O_MINI = "gpt-4o-mini",
|
||||
MODEL_GPT_4O_2024_08_06 = "gpt-4o-2024-08-06",
|
||||
MODEL_GPT_4_1 = "gpt-4.1",
|
||||
MODEL_GPT_4O_TRANSCRIBE = "gpt-4o-transcribe",
|
||||
MODEL_GPT_4_1_2025_04_14 = "gpt-4.1-2025-04-14",
|
||||
MODEL_O1_2024_12_17 = "o1-2024-12-17",
|
||||
MODEL_GPT_4O_MINI_2024_07_18 = "gpt-4o-mini-2024-07-18",
|
||||
MODEL_GPT_4O_MINI_TRANSCRIBE = "gpt-4o-mini-transcribe",
|
||||
MODEL_O1_MINI = "o1-mini",
|
||||
MODEL_GPT_4O_MINI_AUDIO_PREVIEW_2024_12_17 = "gpt-4o-mini-audio-preview-2024-12-17",
|
||||
MODEL_GPT_3_5_TURBO_0125 = "gpt-3.5-turbo-0125",
|
||||
MODEL_O1_MINI_2024_09_12 = "o1-mini-2024-09-12",
|
||||
MODEL_O3_MINI = "o3-mini",
|
||||
MODEL_O3_MINI_2025_01_31 = "o3-mini-2025-01-31",
|
||||
MODEL_GPT_4O_TRANSCRIBE = "gpt-4o-transcribe",
|
||||
MODEL_TTS_1 = "tts-1",
|
||||
MODEL_GPT_4_1106_PREVIEW = "gpt-4-1106-preview",
|
||||
MODEL_GPT_4O_MINI_TTS = "gpt-4o-mini-tts",
|
||||
MODEL_TTS_1_1106 = "tts-1-1106",
|
||||
MODEL_O1 = "o1",
|
||||
MODEL_O1_PRO = "o1-pro",
|
||||
MODEL_O1_PRO_2025_03_19 = "o1-pro-2025-03-19",
|
||||
MODEL_OMNI_MODERATION_LATEST = "omni-moderation-latest"
|
||||
}
|
||||
@ -1,4 +1,12 @@
|
||||
export enum E_OPENROUTER_MODEL_FREE {
|
||||
MODEL_FREE_QWEN_QWEN3_30B_A3B_FREE = "qwen/qwen3-30b-a3b:free",
|
||||
MODEL_FREE_QWEN_QWEN3_8B_FREE = "qwen/qwen3-8b:free",
|
||||
MODEL_FREE_QWEN_QWEN3_14B_FREE = "qwen/qwen3-14b:free",
|
||||
MODEL_FREE_QWEN_QWEN3_32B_FREE = "qwen/qwen3-32b:free",
|
||||
MODEL_FREE_QWEN_QWEN3_235B_A22B_FREE = "qwen/qwen3-235b-a22b:free",
|
||||
MODEL_FREE_TNGTECH_DEEPSEEK_R1T_CHIMERA_FREE = "tngtech/deepseek-r1t-chimera:free",
|
||||
MODEL_FREE_THUDM_GLM_Z1_9B_FREE = "thudm/glm-z1-9b:free",
|
||||
MODEL_FREE_THUDM_GLM_4_9B_FREE = "thudm/glm-4-9b:free",
|
||||
MODEL_FREE_MICROSOFT_MAI_DS_R1_FREE = "microsoft/mai-ds-r1:free",
|
||||
MODEL_FREE_THUDM_GLM_Z1_32B_FREE = "thudm/glm-z1-32b:free",
|
||||
MODEL_FREE_THUDM_GLM_4_32B_FREE = "thudm/glm-4-32b:free",
|
||||
@ -6,7 +14,6 @@ export enum E_OPENROUTER_MODEL_FREE {
|
||||
MODEL_FREE_ARLIAI_QWQ_32B_ARLIAI_RPR_V1_FREE = "arliai/qwq-32b-arliai-rpr-v1:free",
|
||||
MODEL_FREE_AGENTICA_ORG_DEEPCODER_14B_PREVIEW_FREE = "agentica-org/deepcoder-14b-preview:free",
|
||||
MODEL_FREE_MOONSHOTAI_KIMI_VL_A3B_THINKING_FREE = "moonshotai/kimi-vl-a3b-thinking:free",
|
||||
MODEL_FREE_NVIDIA_LLAMA_3_1_NEMOTRON_NANO_8B_V1_FREE = "nvidia/llama-3.1-nemotron-nano-8b-v1:free",
|
||||
MODEL_FREE_NVIDIA_LLAMA_3_3_NEMOTRON_SUPER_49B_V1_FREE = "nvidia/llama-3.3-nemotron-super-49b-v1:free",
|
||||
MODEL_FREE_NVIDIA_LLAMA_3_1_NEMOTRON_ULTRA_253B_V1_FREE = "nvidia/llama-3.1-nemotron-ultra-253b-v1:free",
|
||||
MODEL_FREE_META_LLAMA_LLAMA_4_MAVERICK_FREE = "meta-llama/llama-4-maverick:free",
|
||||
@ -15,7 +22,7 @@ export enum E_OPENROUTER_MODEL_FREE {
|
||||
MODEL_FREE_ALLENAI_MOLMO_7B_D_FREE = "allenai/molmo-7b-d:free",
|
||||
MODEL_FREE_BYTEDANCE_RESEARCH_UI_TARS_72B_FREE = "bytedance-research/ui-tars-72b:free",
|
||||
MODEL_FREE_QWEN_QWEN2_5_VL_3B_INSTRUCT_FREE = "qwen/qwen2.5-vl-3b-instruct:free",
|
||||
MODEL_FREE_GOOGLE_GEMINI_2_5_PRO_EXP_03_25_FREE = "google/gemini-2.5-pro-exp-03-25:free",
|
||||
MODEL_FREE_GOOGLE_GEMINI_2_5_PRO_EXP_03_25 = "google/gemini-2.5-pro-exp-03-25",
|
||||
MODEL_FREE_QWEN_QWEN2_5_VL_32B_INSTRUCT_FREE = "qwen/qwen2.5-vl-32b-instruct:free",
|
||||
MODEL_FREE_DEEPSEEK_DEEPSEEK_CHAT_V3_0324_FREE = "deepseek/deepseek-chat-v3-0324:free",
|
||||
MODEL_FREE_FEATHERLESS_QWERKY_72B_FREE = "featherless/qwerky-72b:free",
|
||||
@ -46,13 +53,12 @@ export enum E_OPENROUTER_MODEL_FREE {
|
||||
MODEL_FREE_GOOGLE_LEARNLM_1_5_PRO_EXPERIMENTAL_FREE = "google/learnlm-1.5-pro-experimental:free",
|
||||
MODEL_FREE_QWEN_QWEN_2_5_CODER_32B_INSTRUCT_FREE = "qwen/qwen-2.5-coder-32b-instruct:free",
|
||||
MODEL_FREE_QWEN_QWEN_2_5_7B_INSTRUCT_FREE = "qwen/qwen-2.5-7b-instruct:free",
|
||||
MODEL_FREE_NVIDIA_LLAMA_3_1_NEMOTRON_70B_INSTRUCT_FREE = "nvidia/llama-3.1-nemotron-70b-instruct:free",
|
||||
MODEL_FREE_META_LLAMA_LLAMA_3_2_3B_INSTRUCT_FREE = "meta-llama/llama-3.2-3b-instruct:free",
|
||||
MODEL_FREE_META_LLAMA_LLAMA_3_2_1B_INSTRUCT_FREE = "meta-llama/llama-3.2-1b-instruct:free",
|
||||
MODEL_FREE_META_LLAMA_LLAMA_3_2_11B_VISION_INSTRUCT_FREE = "meta-llama/llama-3.2-11b-vision-instruct:free",
|
||||
MODEL_FREE_META_LLAMA_LLAMA_3_2_3B_INSTRUCT_FREE = "meta-llama/llama-3.2-3b-instruct:free",
|
||||
MODEL_FREE_QWEN_QWEN_2_5_72B_INSTRUCT_FREE = "qwen/qwen-2.5-72b-instruct:free",
|
||||
MODEL_FREE_GOOGLE_GEMINI_FLASH_1_5_8B_EXP = "google/gemini-flash-1.5-8b-exp",
|
||||
MODEL_FREE_QWEN_QWEN_2_5_VL_7B_INSTRUCT_FREE = "qwen/qwen-2.5-vl-7b-instruct:free",
|
||||
MODEL_FREE_GOOGLE_GEMINI_FLASH_1_5_8B_EXP = "google/gemini-flash-1.5-8b-exp",
|
||||
MODEL_FREE_META_LLAMA_LLAMA_3_1_405B_FREE = "meta-llama/llama-3.1-405b:free",
|
||||
MODEL_FREE_META_LLAMA_LLAMA_3_1_8B_INSTRUCT_FREE = "meta-llama/llama-3.1-8b-instruct:free",
|
||||
MODEL_FREE_MISTRALAI_MISTRAL_NEMO_FREE = "mistralai/mistral-nemo:free",
|
||||
|
||||
@ -1,15 +1,29 @@
|
||||
export enum E_OPENROUTER_MODEL {
|
||||
MODEL_QWEN_QWEN3_30B_A3B_FREE = "qwen/qwen3-30b-a3b:free",
|
||||
MODEL_QWEN_QWEN3_30B_A3B = "qwen/qwen3-30b-a3b",
|
||||
MODEL_QWEN_QWEN3_8B_FREE = "qwen/qwen3-8b:free",
|
||||
MODEL_QWEN_QWEN3_14B_FREE = "qwen/qwen3-14b:free",
|
||||
MODEL_QWEN_QWEN3_14B = "qwen/qwen3-14b",
|
||||
MODEL_QWEN_QWEN3_32B_FREE = "qwen/qwen3-32b:free",
|
||||
MODEL_QWEN_QWEN3_32B = "qwen/qwen3-32b",
|
||||
MODEL_QWEN_QWEN3_235B_A22B_FREE = "qwen/qwen3-235b-a22b:free",
|
||||
MODEL_QWEN_QWEN3_235B_A22B = "qwen/qwen3-235b-a22b",
|
||||
MODEL_TNGTECH_DEEPSEEK_R1T_CHIMERA_FREE = "tngtech/deepseek-r1t-chimera:free",
|
||||
MODEL_THUDM_GLM_Z1_RUMINATION_32B = "thudm/glm-z1-rumination-32b",
|
||||
MODEL_THUDM_GLM_Z1_9B_FREE = "thudm/glm-z1-9b:free",
|
||||
MODEL_THUDM_GLM_4_9B_FREE = "thudm/glm-4-9b:free",
|
||||
MODEL_MICROSOFT_MAI_DS_R1_FREE = "microsoft/mai-ds-r1:free",
|
||||
MODEL_GOOGLE_GEMINI_2_5_PRO_PREVIEW_03_25 = "google/gemini-2.5-pro-preview-03-25",
|
||||
MODEL_THUDM_GLM_Z1_32B_FREE = "thudm/glm-z1-32b:free",
|
||||
MODEL_THUDM_GLM_Z1_32B = "thudm/glm-z1-32b",
|
||||
MODEL_THUDM_GLM_4_32B_FREE = "thudm/glm-4-32b:free",
|
||||
MODEL_THUDM_GLM_4_32B = "thudm/glm-4-32b",
|
||||
MODEL_GOOGLE_GEMINI_2_5_FLASH_PREVIEW = "google/gemini-2.5-flash-preview",
|
||||
MODEL_GOOGLE_GEMINI_2_5_FLASH_PREVIEW_THINKING = "google/gemini-2.5-flash-preview:thinking",
|
||||
MODEL_OPENAI_O4_MINI_HIGH = "openai/o4-mini-high",
|
||||
MODEL_OPENAI_O3 = "openai/o3",
|
||||
MODEL_OPENAI_O4_MINI = "openai/o4-mini",
|
||||
MODEL_SHISA_AI_SHISA_V2_LLAMA3_3_70B_FREE = "shisa-ai/shisa-v2-llama3.3-70b:free",
|
||||
MODEL_QWEN_QWEN2_5_CODER_7B_INSTRUCT = "qwen/qwen2.5-coder-7b-instruct",
|
||||
MODEL_OPENAI_GPT_4_1 = "openai/gpt-4.1",
|
||||
MODEL_OPENAI_GPT_4_1_MINI = "openai/gpt-4.1-mini",
|
||||
MODEL_OPENAI_GPT_4_1_NANO = "openai/gpt-4.1-nano",
|
||||
@ -20,7 +34,6 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_MOONSHOTAI_KIMI_VL_A3B_THINKING_FREE = "moonshotai/kimi-vl-a3b-thinking:free",
|
||||
MODEL_X_AI_GROK_3_MINI_BETA = "x-ai/grok-3-mini-beta",
|
||||
MODEL_X_AI_GROK_3_BETA = "x-ai/grok-3-beta",
|
||||
MODEL_NVIDIA_LLAMA_3_1_NEMOTRON_NANO_8B_V1_FREE = "nvidia/llama-3.1-nemotron-nano-8b-v1:free",
|
||||
MODEL_NVIDIA_LLAMA_3_3_NEMOTRON_SUPER_49B_V1_FREE = "nvidia/llama-3.3-nemotron-super-49b-v1:free",
|
||||
MODEL_NVIDIA_LLAMA_3_1_NEMOTRON_ULTRA_253B_V1_FREE = "nvidia/llama-3.1-nemotron-ultra-253b-v1:free",
|
||||
MODEL_META_LLAMA_LLAMA_4_MAVERICK_FREE = "meta-llama/llama-4-maverick:free",
|
||||
@ -35,7 +48,7 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_ALLENAI_MOLMO_7B_D_FREE = "allenai/molmo-7b-d:free",
|
||||
MODEL_BYTEDANCE_RESEARCH_UI_TARS_72B_FREE = "bytedance-research/ui-tars-72b:free",
|
||||
MODEL_QWEN_QWEN2_5_VL_3B_INSTRUCT_FREE = "qwen/qwen2.5-vl-3b-instruct:free",
|
||||
MODEL_GOOGLE_GEMINI_2_5_PRO_EXP_03_25_FREE = "google/gemini-2.5-pro-exp-03-25:free",
|
||||
MODEL_GOOGLE_GEMINI_2_5_PRO_EXP_03_25 = "google/gemini-2.5-pro-exp-03-25",
|
||||
MODEL_QWEN_QWEN2_5_VL_32B_INSTRUCT_FREE = "qwen/qwen2.5-vl-32b-instruct:free",
|
||||
MODEL_QWEN_QWEN2_5_VL_32B_INSTRUCT = "qwen/qwen2.5-vl-32b-instruct",
|
||||
MODEL_DEEPSEEK_DEEPSEEK_CHAT_V3_0324_FREE = "deepseek/deepseek-chat-v3-0324:free",
|
||||
@ -147,53 +160,52 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_ANTHROPIC_CLAUDE_3_5_HAIKU = "anthropic/claude-3.5-haiku",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_5_HAIKU_20241022_BETA = "anthropic/claude-3.5-haiku-20241022:beta",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_5_HAIKU_20241022 = "anthropic/claude-3.5-haiku-20241022",
|
||||
MODEL_ANTHRACITE_ORG_MAGNUM_V4_72B = "anthracite-org/magnum-v4-72b",
|
||||
MODEL_NEVERSLEEP_LLAMA_3_1_LUMIMAID_70B = "neversleep/llama-3.1-lumimaid-70b",
|
||||
MODEL_ANTHRACITE_ORG_MAGNUM_V4_72B = "anthracite-org/magnum-v4-72b",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_5_SONNET_BETA = "anthropic/claude-3.5-sonnet:beta",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_5_SONNET = "anthropic/claude-3.5-sonnet",
|
||||
MODEL_X_AI_GROK_BETA = "x-ai/grok-beta",
|
||||
MODEL_MISTRALAI_MINISTRAL_3B = "mistralai/ministral-3b",
|
||||
MODEL_MISTRALAI_MINISTRAL_8B = "mistralai/ministral-8b",
|
||||
MODEL_MISTRALAI_MINISTRAL_3B = "mistralai/ministral-3b",
|
||||
MODEL_QWEN_QWEN_2_5_7B_INSTRUCT_FREE = "qwen/qwen-2.5-7b-instruct:free",
|
||||
MODEL_QWEN_QWEN_2_5_7B_INSTRUCT = "qwen/qwen-2.5-7b-instruct",
|
||||
MODEL_NVIDIA_LLAMA_3_1_NEMOTRON_70B_INSTRUCT_FREE = "nvidia/llama-3.1-nemotron-70b-instruct:free",
|
||||
MODEL_NVIDIA_LLAMA_3_1_NEMOTRON_70B_INSTRUCT = "nvidia/llama-3.1-nemotron-70b-instruct",
|
||||
MODEL_INFLECTION_INFLECTION_3_PI = "inflection/inflection-3-pi",
|
||||
MODEL_INFLECTION_INFLECTION_3_PRODUCTIVITY = "inflection/inflection-3-productivity",
|
||||
MODEL_INFLECTION_INFLECTION_3_PI = "inflection/inflection-3-pi",
|
||||
MODEL_GOOGLE_GEMINI_FLASH_1_5_8B = "google/gemini-flash-1.5-8b",
|
||||
MODEL_THEDRUMMER_ROCINANTE_12B = "thedrummer/rocinante-12b",
|
||||
MODEL_ANTHRACITE_ORG_MAGNUM_V2_72B = "anthracite-org/magnum-v2-72b",
|
||||
MODEL_LIQUID_LFM_40B = "liquid/lfm-40b",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_1B_INSTRUCT_FREE = "meta-llama/llama-3.2-1b-instruct:free",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_1B_INSTRUCT = "meta-llama/llama-3.2-1b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_11B_VISION_INSTRUCT_FREE = "meta-llama/llama-3.2-11b-vision-instruct:free",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_11B_VISION_INSTRUCT = "meta-llama/llama-3.2-11b-vision-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_90B_VISION_INSTRUCT = "meta-llama/llama-3.2-90b-vision-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_3B_INSTRUCT_FREE = "meta-llama/llama-3.2-3b-instruct:free",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_3B_INSTRUCT = "meta-llama/llama-3.2-3b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_1B_INSTRUCT_FREE = "meta-llama/llama-3.2-1b-instruct:free",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_1B_INSTRUCT = "meta-llama/llama-3.2-1b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_90B_VISION_INSTRUCT = "meta-llama/llama-3.2-90b-vision-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_11B_VISION_INSTRUCT_FREE = "meta-llama/llama-3.2-11b-vision-instruct:free",
|
||||
MODEL_META_LLAMA_LLAMA_3_2_11B_VISION_INSTRUCT = "meta-llama/llama-3.2-11b-vision-instruct",
|
||||
MODEL_QWEN_QWEN_2_5_72B_INSTRUCT_FREE = "qwen/qwen-2.5-72b-instruct:free",
|
||||
MODEL_QWEN_QWEN_2_5_72B_INSTRUCT = "qwen/qwen-2.5-72b-instruct",
|
||||
MODEL_QWEN_QWEN_2_5_VL_72B_INSTRUCT = "qwen/qwen-2.5-vl-72b-instruct",
|
||||
MODEL_NEVERSLEEP_LLAMA_3_1_LUMIMAID_8B = "neversleep/llama-3.1-lumimaid-8b",
|
||||
MODEL_OPENAI_O1_MINI = "openai/o1-mini",
|
||||
MODEL_OPENAI_O1_PREVIEW = "openai/o1-preview",
|
||||
MODEL_OPENAI_O1_PREVIEW_2024_09_12 = "openai/o1-preview-2024-09-12",
|
||||
MODEL_OPENAI_O1_MINI = "openai/o1-mini",
|
||||
MODEL_OPENAI_O1_MINI_2024_09_12 = "openai/o1-mini-2024-09-12",
|
||||
MODEL_MISTRALAI_PIXTRAL_12B = "mistralai/pixtral-12b",
|
||||
MODEL_COHERE_COMMAND_R_PLUS_08_2024 = "cohere/command-r-plus-08-2024",
|
||||
MODEL_COHERE_COMMAND_R_08_2024 = "cohere/command-r-08-2024",
|
||||
MODEL_SAO10K_L3_1_EURYALE_70B = "sao10k/l3.1-euryale-70b",
|
||||
MODEL_GOOGLE_GEMINI_FLASH_1_5_8B_EXP = "google/gemini-flash-1.5-8b-exp",
|
||||
MODEL_QWEN_QWEN_2_5_VL_7B_INSTRUCT_FREE = "qwen/qwen-2.5-vl-7b-instruct:free",
|
||||
MODEL_QWEN_QWEN_2_5_VL_7B_INSTRUCT = "qwen/qwen-2.5-vl-7b-instruct",
|
||||
MODEL_SAO10K_L3_1_EURYALE_70B = "sao10k/l3.1-euryale-70b",
|
||||
MODEL_GOOGLE_GEMINI_FLASH_1_5_8B_EXP = "google/gemini-flash-1.5-8b-exp",
|
||||
MODEL_AI21_JAMBA_1_5_MINI = "ai21/jamba-1-5-mini",
|
||||
MODEL_AI21_JAMBA_1_5_LARGE = "ai21/jamba-1-5-large",
|
||||
MODEL_MICROSOFT_PHI_3_5_MINI_128K_INSTRUCT = "microsoft/phi-3.5-mini-128k-instruct",
|
||||
MODEL_NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B = "nousresearch/hermes-3-llama-3.1-70b",
|
||||
MODEL_NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B = "nousresearch/hermes-3-llama-3.1-405b",
|
||||
MODEL_OPENAI_CHATGPT_4O_LATEST = "openai/chatgpt-4o-latest",
|
||||
MODEL_AETHERWIING_MN_STARCANNON_12B = "aetherwiing/mn-starcannon-12b",
|
||||
MODEL_SAO10K_L3_LUNARIS_8B = "sao10k/l3-lunaris-8b",
|
||||
MODEL_AETHERWIING_MN_STARCANNON_12B = "aetherwiing/mn-starcannon-12b",
|
||||
MODEL_OPENAI_GPT_4O_2024_08_06 = "openai/gpt-4o-2024-08-06",
|
||||
MODEL_META_LLAMA_LLAMA_3_1_405B_FREE = "meta-llama/llama-3.1-405b:free",
|
||||
MODEL_META_LLAMA_LLAMA_3_1_405B = "meta-llama/llama-3.1-405b",
|
||||
@ -202,8 +214,8 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_PERPLEXITY_LLAMA_3_1_SONAR_LARGE_128K_ONLINE = "perplexity/llama-3.1-sonar-large-128k-online",
|
||||
MODEL_META_LLAMA_LLAMA_3_1_8B_INSTRUCT_FREE = "meta-llama/llama-3.1-8b-instruct:free",
|
||||
MODEL_META_LLAMA_LLAMA_3_1_8B_INSTRUCT = "meta-llama/llama-3.1-8b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_1_70B_INSTRUCT = "meta-llama/llama-3.1-70b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_1_405B_INSTRUCT = "meta-llama/llama-3.1-405b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_1_70B_INSTRUCT = "meta-llama/llama-3.1-70b-instruct",
|
||||
MODEL_MISTRALAI_CODESTRAL_MAMBA = "mistralai/codestral-mamba",
|
||||
MODEL_MISTRALAI_MISTRAL_NEMO_FREE = "mistralai/mistral-nemo:free",
|
||||
MODEL_MISTRALAI_MISTRAL_NEMO = "mistralai/mistral-nemo",
|
||||
@ -213,17 +225,17 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_ALPINDALE_MAGNUM_72B = "alpindale/magnum-72b",
|
||||
MODEL_GOOGLE_GEMMA_2_9B_IT_FREE = "google/gemma-2-9b-it:free",
|
||||
MODEL_GOOGLE_GEMMA_2_9B_IT = "google/gemma-2-9b-it",
|
||||
MODEL_AI21_JAMBA_INSTRUCT = "ai21/jamba-instruct",
|
||||
MODEL_01_AI_YI_LARGE = "01-ai/yi-large",
|
||||
MODEL_AI21_JAMBA_INSTRUCT = "ai21/jamba-instruct",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_5_SONNET_20240620_BETA = "anthropic/claude-3.5-sonnet-20240620:beta",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_5_SONNET_20240620 = "anthropic/claude-3.5-sonnet-20240620",
|
||||
MODEL_SAO10K_L3_EURYALE_70B = "sao10k/l3-euryale-70b",
|
||||
MODEL_COGNITIVECOMPUTATIONS_DOLPHIN_MIXTRAL_8X22B = "cognitivecomputations/dolphin-mixtral-8x22b",
|
||||
MODEL_QWEN_QWEN_2_72B_INSTRUCT = "qwen/qwen-2-72b-instruct",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_3 = "mistralai/mistral-7b-instruct-v0.3",
|
||||
MODEL_NOUSRESEARCH_HERMES_2_PRO_LLAMA_3_8B = "nousresearch/hermes-2-pro-llama-3-8b",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_FREE = "mistralai/mistral-7b-instruct:free",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT = "mistralai/mistral-7b-instruct",
|
||||
MODEL_NOUSRESEARCH_HERMES_2_PRO_LLAMA_3_8B = "nousresearch/hermes-2-pro-llama-3-8b",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_3 = "mistralai/mistral-7b-instruct-v0.3",
|
||||
MODEL_MICROSOFT_PHI_3_MINI_128K_INSTRUCT = "microsoft/phi-3-mini-128k-instruct",
|
||||
MODEL_MICROSOFT_PHI_3_MEDIUM_128K_INSTRUCT = "microsoft/phi-3-medium-128k-instruct",
|
||||
MODEL_NEVERSLEEP_LLAMA_3_LUMIMAID_70B = "neversleep/llama-3-lumimaid-70b",
|
||||
@ -238,15 +250,14 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_META_LLAMA_LLAMA_3_8B_INSTRUCT = "meta-llama/llama-3-8b-instruct",
|
||||
MODEL_META_LLAMA_LLAMA_3_70B_INSTRUCT = "meta-llama/llama-3-70b-instruct",
|
||||
MODEL_MISTRALAI_MIXTRAL_8X22B_INSTRUCT = "mistralai/mixtral-8x22b-instruct",
|
||||
MODEL_MICROSOFT_WIZARDLM_2_7B = "microsoft/wizardlm-2-7b",
|
||||
MODEL_MICROSOFT_WIZARDLM_2_8X22B = "microsoft/wizardlm-2-8x22b",
|
||||
MODEL_OPENAI_GPT_4_TURBO = "openai/gpt-4-turbo",
|
||||
MODEL_GOOGLE_GEMINI_PRO_1_5 = "google/gemini-pro-1.5",
|
||||
MODEL_OPENAI_GPT_4_TURBO = "openai/gpt-4-turbo",
|
||||
MODEL_COHERE_COMMAND_R_PLUS = "cohere/command-r-plus",
|
||||
MODEL_COHERE_COMMAND_R_PLUS_04_2024 = "cohere/command-r-plus-04-2024",
|
||||
MODEL_SOPHOSYMPATHEIA_MIDNIGHT_ROSE_70B = "sophosympatheia/midnight-rose-70b",
|
||||
MODEL_COHERE_COMMAND_R = "cohere/command-r",
|
||||
MODEL_COHERE_COMMAND = "cohere/command",
|
||||
MODEL_COHERE_COMMAND_R = "cohere/command-r",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_HAIKU_BETA = "anthropic/claude-3-haiku:beta",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_HAIKU = "anthropic/claude-3-haiku",
|
||||
MODEL_ANTHROPIC_CLAUDE_3_OPUS_BETA = "anthropic/claude-3-opus:beta",
|
||||
@ -258,26 +269,23 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_OPENAI_GPT_3_5_TURBO_0613 = "openai/gpt-3.5-turbo-0613",
|
||||
MODEL_OPENAI_GPT_4_TURBO_PREVIEW = "openai/gpt-4-turbo-preview",
|
||||
MODEL_NOUSRESEARCH_NOUS_HERMES_2_MIXTRAL_8X7B_DPO = "nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
|
||||
MODEL_MISTRALAI_MISTRAL_SMALL = "mistralai/mistral-small",
|
||||
MODEL_MISTRALAI_MISTRAL_MEDIUM = "mistralai/mistral-medium",
|
||||
MODEL_MISTRALAI_MISTRAL_SMALL = "mistralai/mistral-small",
|
||||
MODEL_MISTRALAI_MISTRAL_TINY = "mistralai/mistral-tiny",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_2 = "mistralai/mistral-7b-instruct-v0.2",
|
||||
MODEL_COGNITIVECOMPUTATIONS_DOLPHIN_MIXTRAL_8X7B = "cognitivecomputations/dolphin-mixtral-8x7b",
|
||||
MODEL_GOOGLE_GEMINI_PRO_VISION = "google/gemini-pro-vision",
|
||||
MODEL_MISTRALAI_MIXTRAL_8X7B_INSTRUCT = "mistralai/mixtral-8x7b-instruct",
|
||||
MODEL_OPENCHAT_OPENCHAT_7B = "openchat/openchat-7b",
|
||||
MODEL_NEVERSLEEP_NOROMAID_20B = "neversleep/noromaid-20b",
|
||||
MODEL_ANTHROPIC_CLAUDE_2_1_BETA = "anthropic/claude-2.1:beta",
|
||||
MODEL_ANTHROPIC_CLAUDE_2_1 = "anthropic/claude-2.1",
|
||||
MODEL_ANTHROPIC_CLAUDE_2_BETA = "anthropic/claude-2:beta",
|
||||
MODEL_ANTHROPIC_CLAUDE_2 = "anthropic/claude-2",
|
||||
MODEL_ALPINDALE_GOLIATH_120B = "alpindale/goliath-120b",
|
||||
MODEL_UNDI95_TOPPY_M_7B = "undi95/toppy-m-7b",
|
||||
MODEL_OPENROUTER_AUTO = "openrouter/auto",
|
||||
MODEL_OPENAI_GPT_4_1106_PREVIEW = "openai/gpt-4-1106-preview",
|
||||
MODEL_OPENAI_GPT_3_5_TURBO_1106 = "openai/gpt-3.5-turbo-1106",
|
||||
MODEL_GOOGLE_PALM_2_CODECHAT_BISON_32K = "google/palm-2-codechat-bison-32k",
|
||||
MODEL_OPENAI_GPT_4_1106_PREVIEW = "openai/gpt-4-1106-preview",
|
||||
MODEL_GOOGLE_PALM_2_CHAT_BISON_32K = "google/palm-2-chat-bison-32k",
|
||||
MODEL_GOOGLE_PALM_2_CODECHAT_BISON_32K = "google/palm-2-codechat-bison-32k",
|
||||
MODEL_JONDURBIN_AIROBOROS_L2_70B = "jondurbin/airoboros-l2-70b",
|
||||
MODEL_OPENAI_GPT_3_5_TURBO_INSTRUCT = "openai/gpt-3.5-turbo-instruct",
|
||||
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_1 = "mistralai/mistral-7b-instruct-v0.1",
|
||||
@ -285,17 +293,15 @@ export enum E_OPENROUTER_MODEL {
|
||||
MODEL_OPENAI_GPT_3_5_TURBO_16K = "openai/gpt-3.5-turbo-16k",
|
||||
MODEL_OPENAI_GPT_4_32K = "openai/gpt-4-32k",
|
||||
MODEL_OPENAI_GPT_4_32K_0314 = "openai/gpt-4-32k-0314",
|
||||
MODEL_NOUSRESEARCH_NOUS_HERMES_LLAMA2_13B = "nousresearch/nous-hermes-llama2-13b",
|
||||
MODEL_HUGGINGFACEH4_ZEPHYR_7B_BETA_FREE = "huggingfaceh4/zephyr-7b-beta:free",
|
||||
MODEL_MANCER_WEAVER = "mancer/weaver",
|
||||
MODEL_HUGGINGFACEH4_ZEPHYR_7B_BETA_FREE = "huggingfaceh4/zephyr-7b-beta:free",
|
||||
MODEL_ANTHROPIC_CLAUDE_2_0_BETA = "anthropic/claude-2.0:beta",
|
||||
MODEL_ANTHROPIC_CLAUDE_2_0 = "anthropic/claude-2.0",
|
||||
MODEL_UNDI95_REMM_SLERP_L2_13B = "undi95/remm-slerp-l2-13b",
|
||||
MODEL_GOOGLE_PALM_2_CODECHAT_BISON = "google/palm-2-codechat-bison",
|
||||
MODEL_GOOGLE_PALM_2_CHAT_BISON = "google/palm-2-chat-bison",
|
||||
MODEL_GOOGLE_PALM_2_CODECHAT_BISON = "google/palm-2-codechat-bison",
|
||||
MODEL_GRYPHE_MYTHOMAX_L2_13B = "gryphe/mythomax-l2-13b",
|
||||
MODEL_META_LLAMA_LLAMA_2_70B_CHAT = "meta-llama/llama-2-70b-chat",
|
||||
MODEL_META_LLAMA_LLAMA_2_13B_CHAT = "meta-llama/llama-2-13b-chat",
|
||||
MODEL_OPENAI_GPT_3_5_TURBO = "openai/gpt-3.5-turbo",
|
||||
MODEL_OPENAI_GPT_3_5_TURBO_0125 = "openai/gpt-3.5-turbo-0125",
|
||||
MODEL_OPENAI_GPT_4 = "openai/gpt-4",
|
||||
|
||||
Loading…
Reference in New Issue
Block a user