maintainence love:)

This commit is contained in:
babayaga 2025-10-14 10:54:12 +02:00
parent 4463f26bb2
commit fce3c835b8
16 changed files with 3228 additions and 2551 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.2 MiB

File diff suppressed because one or more lines are too long

View File

@ -1,5 +1,5 @@
{
"timestamp": 1758470050446,
"timestamp": 1760432036753,
"models": [
{
"id": "gpt-4-0613",
@ -20,33 +20,33 @@
"owned_by": "openai"
},
{
"id": "gpt-audio",
"id": "sora-2-pro",
"object": "model",
"created": 1756339249,
"created": 1759708663,
"owned_by": "system"
},
{
"id": "gpt-5-nano",
"id": "gpt-audio-mini-2025-10-06",
"object": "model",
"created": 1754426384,
"created": 1759512137,
"owned_by": "system"
},
{
"id": "gpt-audio-2025-08-28",
"id": "gpt-realtime-mini",
"object": "model",
"created": 1756256146,
"created": 1759517133,
"owned_by": "system"
},
{
"id": "gpt-realtime",
"id": "gpt-realtime-mini-2025-10-06",
"object": "model",
"created": 1756271701,
"created": 1759517175,
"owned_by": "system"
},
{
"id": "gpt-realtime-2025-08-28",
"id": "sora-2",
"object": "model",
"created": 1756271773,
"created": 1759708615,
"owned_by": "system"
},
{
@ -493,6 +493,66 @@
"created": 1754426303,
"owned_by": "system"
},
{
"id": "gpt-5-nano",
"object": "model",
"created": 1754426384,
"owned_by": "system"
},
{
"id": "gpt-audio-2025-08-28",
"object": "model",
"created": 1756256146,
"owned_by": "system"
},
{
"id": "gpt-realtime",
"object": "model",
"created": 1756271701,
"owned_by": "system"
},
{
"id": "gpt-realtime-2025-08-28",
"object": "model",
"created": 1756271773,
"owned_by": "system"
},
{
"id": "gpt-audio",
"object": "model",
"created": 1756339249,
"owned_by": "system"
},
{
"id": "gpt-5-codex",
"object": "model",
"created": 1757527818,
"owned_by": "system"
},
{
"id": "gpt-image-1-mini",
"object": "model",
"created": 1758845821,
"owned_by": "system"
},
{
"id": "gpt-5-pro-2025-10-06",
"object": "model",
"created": 1759469707,
"owned_by": "system"
},
{
"id": "gpt-5-pro",
"object": "model",
"created": 1759469822,
"owned_by": "system"
},
{
"id": "gpt-audio-mini",
"object": "model",
"created": 1759512027,
"owned_by": "system"
},
{
"id": "gpt-3.5-turbo-16k",
"object": "model",

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,12 +1,12 @@
{
"name": "@plastichub/kbot",
"version": "1.1.54",
"version": "1.1.55",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@plastichub/kbot",
"version": "1.1.54",
"version": "1.1.55",
"license": "ISC",
"dependencies": {
"node-emoji": "^2.2.0"

View File

@ -1,6 +1,6 @@
{
"name": "@plastichub/kbot",
"version": "1.1.54",
"version": "1.1.55",
"main": "main_node.js",
"author": "",
"license": "ISC",

View File

@ -133,3 +133,4 @@ export class WebImageClient {
This structure will be decomposed into a detailed TODO roadmap in the following slice.

View File

@ -448,7 +448,6 @@ async function launchGuiAndGetPrompt(argv: any): Promise<string | null> {
});
}
export const imageCommand = async (argv: any) => {
const logger = new Logger<ILogObj>({ minLevel: argv.logLevel || 2 });

View File

@ -2,11 +2,11 @@ export enum E_OPENAI_MODEL {
MODEL_GPT_4_0613 = "gpt-4-0613",
MODEL_GPT_4 = "gpt-4",
MODEL_GPT_3_5_TURBO = "gpt-3.5-turbo",
MODEL_GPT_AUDIO = "gpt-audio",
MODEL_GPT_5_NANO = "gpt-5-nano",
MODEL_GPT_AUDIO_2025_08_28 = "gpt-audio-2025-08-28",
MODEL_GPT_REALTIME = "gpt-realtime",
MODEL_GPT_REALTIME_2025_08_28 = "gpt-realtime-2025-08-28",
MODEL_SORA_2_PRO = "sora-2-pro",
MODEL_GPT_AUDIO_MINI_2025_10_06 = "gpt-audio-mini-2025-10-06",
MODEL_GPT_REALTIME_MINI = "gpt-realtime-mini",
MODEL_GPT_REALTIME_MINI_2025_10_06 = "gpt-realtime-mini-2025-10-06",
MODEL_SORA_2 = "sora-2",
MODEL_DAVINCI_002 = "davinci-002",
MODEL_BABBAGE_002 = "babbage-002",
MODEL_GPT_3_5_TURBO_INSTRUCT = "gpt-3.5-turbo-instruct",
@ -81,6 +81,16 @@ export enum E_OPENAI_MODEL {
MODEL_GPT_5_MINI_2025_08_07 = "gpt-5-mini-2025-08-07",
MODEL_GPT_5_MINI = "gpt-5-mini",
MODEL_GPT_5_NANO_2025_08_07 = "gpt-5-nano-2025-08-07",
MODEL_GPT_5_NANO = "gpt-5-nano",
MODEL_GPT_AUDIO_2025_08_28 = "gpt-audio-2025-08-28",
MODEL_GPT_REALTIME = "gpt-realtime",
MODEL_GPT_REALTIME_2025_08_28 = "gpt-realtime-2025-08-28",
MODEL_GPT_AUDIO = "gpt-audio",
MODEL_GPT_5_CODEX = "gpt-5-codex",
MODEL_GPT_IMAGE_1_MINI = "gpt-image-1-mini",
MODEL_GPT_5_PRO_2025_10_06 = "gpt-5-pro-2025-10-06",
MODEL_GPT_5_PRO = "gpt-5-pro",
MODEL_GPT_AUDIO_MINI = "gpt-audio-mini",
MODEL_GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k",
MODEL_TTS_1 = "tts-1",
MODEL_WHISPER_1 = "whisper-1",

View File

@ -1,8 +1,8 @@
export enum E_OPENROUTER_MODEL_FREE {
MODEL_FREE_X_AI_GROK_4_FAST_FREE = "x-ai/grok-4-fast:free",
MODEL_FREE_ALIBABA_TONGYI_DEEPRESEARCH_30B_A3B_FREE = "alibaba/tongyi-deepresearch-30b-a3b:free",
MODEL_FREE_MEITUAN_LONGCAT_FLASH_CHAT_FREE = "meituan/longcat-flash-chat:free",
MODEL_FREE_NVIDIA_NEMOTRON_NANO_9B_V2_FREE = "nvidia/nemotron-nano-9b-v2:free",
MODEL_FREE_DEEPSEEK_DEEPSEEK_CHAT_V3_1_FREE = "deepseek/deepseek-chat-v3.1:free",
MODEL_FREE_OPENAI_GPT_OSS_120B_FREE = "openai/gpt-oss-120b:free",
MODEL_FREE_OPENAI_GPT_OSS_20B_FREE = "openai/gpt-oss-20b:free",
MODEL_FREE_Z_AI_GLM_4_5_AIR_FREE = "z-ai/glm-4.5-air:free",
MODEL_FREE_QWEN_QWEN3_CODER_FREE = "qwen/qwen3-coder:free",
@ -28,7 +28,6 @@ export enum E_OPENROUTER_MODEL_FREE {
MODEL_FREE_SHISA_AI_SHISA_V2_LLAMA3_3_70B_FREE = "shisa-ai/shisa-v2-llama3.3-70b:free",
MODEL_FREE_ARLIAI_QWQ_32B_ARLIAI_RPR_V1_FREE = "arliai/qwq-32b-arliai-rpr-v1:free",
MODEL_FREE_AGENTICA_ORG_DEEPCODER_14B_PREVIEW_FREE = "agentica-org/deepcoder-14b-preview:free",
MODEL_FREE_MOONSHOTAI_KIMI_VL_A3B_THINKING_FREE = "moonshotai/kimi-vl-a3b-thinking:free",
MODEL_FREE_META_LLAMA_LLAMA_4_MAVERICK_FREE = "meta-llama/llama-4-maverick:free",
MODEL_FREE_META_LLAMA_LLAMA_4_SCOUT_FREE = "meta-llama/llama-4-scout:free",
MODEL_FREE_QWEN_QWEN2_5_VL_32B_INSTRUCT_FREE = "qwen/qwen2.5-vl-32b-instruct:free",
@ -37,9 +36,7 @@ export enum E_OPENROUTER_MODEL_FREE {
MODEL_FREE_GOOGLE_GEMMA_3_4B_IT_FREE = "google/gemma-3-4b-it:free",
MODEL_FREE_GOOGLE_GEMMA_3_12B_IT_FREE = "google/gemma-3-12b-it:free",
MODEL_FREE_GOOGLE_GEMMA_3_27B_IT_FREE = "google/gemma-3-27b-it:free",
MODEL_FREE_QWEN_QWQ_32B_FREE = "qwen/qwq-32b:free",
MODEL_FREE_NOUSRESEARCH_DEEPHERMES_3_LLAMA_3_8B_PREVIEW_FREE = "nousresearch/deephermes-3-llama-3-8b-preview:free",
MODEL_FREE_COGNITIVECOMPUTATIONS_DOLPHIN3_0_R1_MISTRAL_24B_FREE = "cognitivecomputations/dolphin3.0-r1-mistral-24b:free",
MODEL_FREE_COGNITIVECOMPUTATIONS_DOLPHIN3_0_MISTRAL_24B_FREE = "cognitivecomputations/dolphin3.0-mistral-24b:free",
MODEL_FREE_QWEN_QWEN2_5_VL_72B_INSTRUCT_FREE = "qwen/qwen2.5-vl-72b-instruct:free",
MODEL_FREE_MISTRALAI_MISTRAL_SMALL_24B_INSTRUCT_2501_FREE = "mistralai/mistral-small-24b-instruct-2501:free",
@ -50,7 +47,6 @@ export enum E_OPENROUTER_MODEL_FREE {
MODEL_FREE_QWEN_QWEN_2_5_CODER_32B_INSTRUCT_FREE = "qwen/qwen-2.5-coder-32b-instruct:free",
MODEL_FREE_META_LLAMA_LLAMA_3_2_3B_INSTRUCT_FREE = "meta-llama/llama-3.2-3b-instruct:free",
MODEL_FREE_QWEN_QWEN_2_5_72B_INSTRUCT_FREE = "qwen/qwen-2.5-72b-instruct:free",
MODEL_FREE_META_LLAMA_LLAMA_3_1_405B_INSTRUCT_FREE = "meta-llama/llama-3.1-405b-instruct:free",
MODEL_FREE_MISTRALAI_MISTRAL_NEMO_FREE = "mistralai/mistral-nemo:free",
MODEL_FREE_GOOGLE_GEMMA_2_9B_IT_FREE = "google/gemma-2-9b-it:free",
MODEL_FREE_MISTRALAI_MISTRAL_7B_INSTRUCT_FREE = "mistralai/mistral-7b-instruct:free"

View File

@ -1,20 +1,39 @@
export enum E_OPENROUTER_MODEL {
MODEL_X_AI_GROK_4_FAST_FREE = "x-ai/grok-4-fast:free",
MODEL_INCLUSIONAI_LING_1T = "inclusionai/ling-1t",
MODEL_NVIDIA_LLAMA_3_3_NEMOTRON_SUPER_49B_V1_5 = "nvidia/llama-3.3-nemotron-super-49b-v1.5",
MODEL_BAIDU_ERNIE_4_5_21B_A3B_THINKING = "baidu/ernie-4.5-21b-a3b-thinking",
MODEL_GOOGLE_GEMINI_2_5_FLASH_IMAGE = "google/gemini-2.5-flash-image",
MODEL_QWEN_QWEN3_VL_30B_A3B_THINKING = "qwen/qwen3-vl-30b-a3b-thinking",
MODEL_QWEN_QWEN3_VL_30B_A3B_INSTRUCT = "qwen/qwen3-vl-30b-a3b-instruct",
MODEL_OPENAI_GPT_5_PRO = "openai/gpt-5-pro",
MODEL_Z_AI_GLM_4_6 = "z-ai/glm-4.6",
MODEL_ANTHROPIC_CLAUDE_SONNET_4_5 = "anthropic/claude-sonnet-4.5",
MODEL_DEEPSEEK_DEEPSEEK_V3_2_EXP = "deepseek/deepseek-v3.2-exp",
MODEL_THEDRUMMER_CYDONIA_24B_V4_1 = "thedrummer/cydonia-24b-v4.1",
MODEL_RELACE_RELACE_APPLY_3 = "relace/relace-apply-3",
MODEL_GOOGLE_GEMINI_2_5_FLASH_PREVIEW_09_2025 = "google/gemini-2.5-flash-preview-09-2025",
MODEL_GOOGLE_GEMINI_2_5_FLASH_LITE_PREVIEW_09_2025 = "google/gemini-2.5-flash-lite-preview-09-2025",
MODEL_QWEN_QWEN3_VL_235B_A22B_THINKING = "qwen/qwen3-vl-235b-a22b-thinking",
MODEL_QWEN_QWEN3_VL_235B_A22B_INSTRUCT = "qwen/qwen3-vl-235b-a22b-instruct",
MODEL_QWEN_QWEN3_MAX = "qwen/qwen3-max",
MODEL_QWEN_QWEN3_CODER_PLUS = "qwen/qwen3-coder-plus",
MODEL_OPENAI_GPT_5_CODEX = "openai/gpt-5-codex",
MODEL_DEEPSEEK_DEEPSEEK_V3_1_TERMINUS = "deepseek/deepseek-v3.1-terminus",
MODEL_X_AI_GROK_4_FAST = "x-ai/grok-4-fast",
MODEL_ALIBABA_TONGYI_DEEPRESEARCH_30B_A3B_FREE = "alibaba/tongyi-deepresearch-30b-a3b:free",
MODEL_ALIBABA_TONGYI_DEEPRESEARCH_30B_A3B = "alibaba/tongyi-deepresearch-30b-a3b",
MODEL_QWEN_QWEN3_CODER_FLASH = "qwen/qwen3-coder-flash",
MODEL_QWEN_QWEN3_CODER_PLUS = "qwen/qwen3-coder-plus",
MODEL_ARCEE_AI_AFM_4_5B = "arcee-ai/afm-4.5b",
MODEL_OPENGVLAB_INTERNVL3_78B = "opengvlab/internvl3-78b",
MODEL_QWEN_QWEN3_NEXT_80B_A3B_THINKING = "qwen/qwen3-next-80b-a3b-thinking",
MODEL_QWEN_QWEN3_NEXT_80B_A3B_INSTRUCT = "qwen/qwen3-next-80b-a3b-instruct",
MODEL_MEITUAN_LONGCAT_FLASH_CHAT_FREE = "meituan/longcat-flash-chat:free",
MODEL_MEITUAN_LONGCAT_FLASH_CHAT = "meituan/longcat-flash-chat",
MODEL_QWEN_QWEN_PLUS_2025_07_28 = "qwen/qwen-plus-2025-07-28",
MODEL_QWEN_QWEN_PLUS_2025_07_28_THINKING = "qwen/qwen-plus-2025-07-28:thinking",
MODEL_NVIDIA_NEMOTRON_NANO_9B_V2_FREE = "nvidia/nemotron-nano-9b-v2:free",
MODEL_NVIDIA_NEMOTRON_NANO_9B_V2 = "nvidia/nemotron-nano-9b-v2",
MODEL_QWEN_QWEN3_MAX = "qwen/qwen3-max",
MODEL_MOONSHOTAI_KIMI_K2_0905 = "moonshotai/kimi-k2-0905",
MODEL_BYTEDANCE_SEED_OSS_36B_INSTRUCT = "bytedance/seed-oss-36b-instruct",
MODEL_DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_109B_MOE = "deepcogito/cogito-v2-preview-llama-109b-moe",
MODEL_DEEPCOGITO_COGITO_V2_PREVIEW_DEEPSEEK_671B = "deepcogito/cogito-v2-preview-deepseek-671b",
MODEL_STEPFUN_AI_STEP3 = "stepfun-ai/step3",
@ -25,7 +44,6 @@ export enum E_OPENROUTER_MODEL {
MODEL_GOOGLE_GEMINI_2_5_FLASH_IMAGE_PREVIEW = "google/gemini-2.5-flash-image-preview",
MODEL_DEEPSEEK_DEEPSEEK_CHAT_V3_1_FREE = "deepseek/deepseek-chat-v3.1:free",
MODEL_DEEPSEEK_DEEPSEEK_CHAT_V3_1 = "deepseek/deepseek-chat-v3.1",
MODEL_DEEPSEEK_DEEPSEEK_V3_1_BASE = "deepseek/deepseek-v3.1-base",
MODEL_OPENAI_GPT_4O_AUDIO_PREVIEW = "openai/gpt-4o-audio-preview",
MODEL_MISTRALAI_MISTRAL_MEDIUM_3_1 = "mistralai/mistral-medium-3.1",
MODEL_BAIDU_ERNIE_4_5_21B_A3B = "baidu/ernie-4.5-21b-a3b",
@ -37,7 +55,6 @@ export enum E_OPENROUTER_MODEL {
MODEL_OPENAI_GPT_5 = "openai/gpt-5",
MODEL_OPENAI_GPT_5_MINI = "openai/gpt-5-mini",
MODEL_OPENAI_GPT_5_NANO = "openai/gpt-5-nano",
MODEL_OPENAI_GPT_OSS_120B_FREE = "openai/gpt-oss-120b:free",
MODEL_OPENAI_GPT_OSS_120B = "openai/gpt-oss-120b",
MODEL_OPENAI_GPT_OSS_20B_FREE = "openai/gpt-oss-20b:free",
MODEL_OPENAI_GPT_OSS_20B = "openai/gpt-oss-20b",
@ -67,6 +84,7 @@ export enum E_OPENROUTER_MODEL {
MODEL_TENCENT_HUNYUAN_A13B_INSTRUCT_FREE = "tencent/hunyuan-a13b-instruct:free",
MODEL_TENCENT_HUNYUAN_A13B_INSTRUCT = "tencent/hunyuan-a13b-instruct",
MODEL_TNGTECH_DEEPSEEK_R1T2_CHIMERA_FREE = "tngtech/deepseek-r1t2-chimera:free",
MODEL_TNGTECH_DEEPSEEK_R1T2_CHIMERA = "tngtech/deepseek-r1t2-chimera",
MODEL_MORPH_MORPH_V3_LARGE = "morph/morph-v3-large",
MODEL_MORPH_MORPH_V3_FAST = "morph/morph-v3-fast",
MODEL_BAIDU_ERNIE_4_5_VL_424B_A47B = "baidu/ernie-4.5-vl-424b-a47b",
@ -131,6 +149,7 @@ export enum E_OPENROUTER_MODEL {
MODEL_OPENAI_O4_MINI = "openai/o4-mini",
MODEL_SHISA_AI_SHISA_V2_LLAMA3_3_70B_FREE = "shisa-ai/shisa-v2-llama3.3-70b:free",
MODEL_SHISA_AI_SHISA_V2_LLAMA3_3_70B = "shisa-ai/shisa-v2-llama3.3-70b",
MODEL_QWEN_QWEN2_5_CODER_7B_INSTRUCT = "qwen/qwen2.5-coder-7b-instruct",
MODEL_OPENAI_GPT_4_1 = "openai/gpt-4.1",
MODEL_OPENAI_GPT_4_1_MINI = "openai/gpt-4.1-mini",
MODEL_OPENAI_GPT_4_1_NANO = "openai/gpt-4.1-nano",
@ -140,8 +159,6 @@ export enum E_OPENROUTER_MODEL {
MODEL_ARLIAI_QWQ_32B_ARLIAI_RPR_V1 = "arliai/qwq-32b-arliai-rpr-v1",
MODEL_AGENTICA_ORG_DEEPCODER_14B_PREVIEW_FREE = "agentica-org/deepcoder-14b-preview:free",
MODEL_AGENTICA_ORG_DEEPCODER_14B_PREVIEW = "agentica-org/deepcoder-14b-preview",
MODEL_MOONSHOTAI_KIMI_VL_A3B_THINKING_FREE = "moonshotai/kimi-vl-a3b-thinking:free",
MODEL_MOONSHOTAI_KIMI_VL_A3B_THINKING = "moonshotai/kimi-vl-a3b-thinking",
MODEL_X_AI_GROK_3_MINI_BETA = "x-ai/grok-3-mini-beta",
MODEL_X_AI_GROK_3_BETA = "x-ai/grok-3-beta",
MODEL_NVIDIA_LLAMA_3_1_NEMOTRON_ULTRA_253B_V1 = "nvidia/llama-3.1-nemotron-ultra-253b-v1",
@ -167,27 +184,23 @@ export enum E_OPENROUTER_MODEL {
MODEL_OPENAI_GPT_4O_SEARCH_PREVIEW = "openai/gpt-4o-search-preview",
MODEL_GOOGLE_GEMMA_3_27B_IT_FREE = "google/gemma-3-27b-it:free",
MODEL_GOOGLE_GEMMA_3_27B_IT = "google/gemma-3-27b-it",
MODEL_THEDRUMMER_ANUBIS_PRO_105B_V1 = "thedrummer/anubis-pro-105b-v1",
MODEL_THEDRUMMER_SKYFALL_36B_V2 = "thedrummer/skyfall-36b-v2",
MODEL_MICROSOFT_PHI_4_MULTIMODAL_INSTRUCT = "microsoft/phi-4-multimodal-instruct",
MODEL_PERPLEXITY_SONAR_REASONING_PRO = "perplexity/sonar-reasoning-pro",
MODEL_PERPLEXITY_SONAR_PRO = "perplexity/sonar-pro",
MODEL_PERPLEXITY_SONAR_DEEP_RESEARCH = "perplexity/sonar-deep-research",
MODEL_QWEN_QWQ_32B_FREE = "qwen/qwq-32b:free",
MODEL_QWEN_QWQ_32B = "qwen/qwq-32b",
MODEL_NOUSRESEARCH_DEEPHERMES_3_LLAMA_3_8B_PREVIEW_FREE = "nousresearch/deephermes-3-llama-3-8b-preview:free",
MODEL_NOUSRESEARCH_DEEPHERMES_3_LLAMA_3_8B_PREVIEW = "nousresearch/deephermes-3-llama-3-8b-preview",
MODEL_GOOGLE_GEMINI_2_0_FLASH_LITE_001 = "google/gemini-2.0-flash-lite-001",
MODEL_ANTHROPIC_CLAUDE_3_7_SONNET = "anthropic/claude-3.7-sonnet",
MODEL_ANTHROPIC_CLAUDE_3_7_SONNET_THINKING = "anthropic/claude-3.7-sonnet:thinking",
MODEL_PERPLEXITY_R1_1776 = "perplexity/r1-1776",
MODEL_MISTRALAI_MISTRAL_SABA = "mistralai/mistral-saba",
MODEL_COGNITIVECOMPUTATIONS_DOLPHIN3_0_R1_MISTRAL_24B_FREE = "cognitivecomputations/dolphin3.0-r1-mistral-24b:free",
MODEL_COGNITIVECOMPUTATIONS_DOLPHIN3_0_R1_MISTRAL_24B = "cognitivecomputations/dolphin3.0-r1-mistral-24b",
MODEL_COGNITIVECOMPUTATIONS_DOLPHIN3_0_MISTRAL_24B_FREE = "cognitivecomputations/dolphin3.0-mistral-24b:free",
MODEL_COGNITIVECOMPUTATIONS_DOLPHIN3_0_MISTRAL_24B = "cognitivecomputations/dolphin3.0-mistral-24b",
MODEL_META_LLAMA_LLAMA_GUARD_3_8B = "meta-llama/llama-guard-3-8b",
MODEL_OPENAI_O3_MINI_HIGH = "openai/o3-mini-high",
MODEL_DEEPSEEK_DEEPSEEK_R1_DISTILL_LLAMA_8B = "deepseek/deepseek-r1-distill-llama-8b",
MODEL_GOOGLE_GEMINI_2_0_FLASH_001 = "google/gemini-2.0-flash-001",
MODEL_QWEN_QWEN_VL_PLUS = "qwen/qwen-vl-plus",
MODEL_AION_LABS_AION_1_0 = "aion-labs/aion-1.0",
@ -215,6 +228,7 @@ export enum E_OPENROUTER_MODEL {
MODEL_MINIMAX_MINIMAX_01 = "minimax/minimax-01",
MODEL_MISTRALAI_CODESTRAL_2501 = "mistralai/codestral-2501",
MODEL_MICROSOFT_PHI_4 = "microsoft/phi-4",
MODEL_SAO10K_L3_1_70B_HANAMI_X1 = "sao10k/l3.1-70b-hanami-x1",
MODEL_DEEPSEEK_DEEPSEEK_CHAT = "deepseek/deepseek-chat",
MODEL_SAO10K_L3_3_EURYALE_70B = "sao10k/l3.3-euryale-70b",
MODEL_OPENAI_O1 = "openai/o1",
@ -225,7 +239,6 @@ export enum E_OPENROUTER_MODEL {
MODEL_AMAZON_NOVA_LITE_V1 = "amazon/nova-lite-v1",
MODEL_AMAZON_NOVA_MICRO_V1 = "amazon/nova-micro-v1",
MODEL_AMAZON_NOVA_PRO_V1 = "amazon/nova-pro-v1",
MODEL_QWEN_QWQ_32B_PREVIEW = "qwen/qwq-32b-preview",
MODEL_OPENAI_GPT_4O_2024_11_20 = "openai/gpt-4o-2024-11-20",
MODEL_MISTRALAI_MISTRAL_LARGE_2411 = "mistralai/mistral-large-2411",
MODEL_MISTRALAI_MISTRAL_LARGE_2407 = "mistralai/mistral-large-2407",
@ -236,30 +249,29 @@ export enum E_OPENROUTER_MODEL {
MODEL_THEDRUMMER_UNSLOPNEMO_12B = "thedrummer/unslopnemo-12b",
MODEL_ANTHROPIC_CLAUDE_3_5_HAIKU = "anthropic/claude-3.5-haiku",
MODEL_ANTHROPIC_CLAUDE_3_5_HAIKU_20241022 = "anthropic/claude-3.5-haiku-20241022",
MODEL_ANTHRACITE_ORG_MAGNUM_V4_72B = "anthracite-org/magnum-v4-72b",
MODEL_ANTHROPIC_CLAUDE_3_5_SONNET = "anthropic/claude-3.5-sonnet",
MODEL_ANTHRACITE_ORG_MAGNUM_V4_72B = "anthracite-org/magnum-v4-72b",
MODEL_MISTRALAI_MINISTRAL_8B = "mistralai/ministral-8b",
MODEL_MISTRALAI_MINISTRAL_3B = "mistralai/ministral-3b",
MODEL_QWEN_QWEN_2_5_7B_INSTRUCT = "qwen/qwen-2.5-7b-instruct",
MODEL_NVIDIA_LLAMA_3_1_NEMOTRON_70B_INSTRUCT = "nvidia/llama-3.1-nemotron-70b-instruct",
MODEL_INFLECTION_INFLECTION_3_PRODUCTIVITY = "inflection/inflection-3-productivity",
MODEL_INFLECTION_INFLECTION_3_PI = "inflection/inflection-3-pi",
MODEL_GOOGLE_GEMINI_FLASH_1_5_8B = "google/gemini-flash-1.5-8b",
MODEL_THEDRUMMER_ROCINANTE_12B = "thedrummer/rocinante-12b",
MODEL_ANTHRACITE_ORG_MAGNUM_V2_72B = "anthracite-org/magnum-v2-72b",
MODEL_META_LLAMA_LLAMA_3_2_3B_INSTRUCT_FREE = "meta-llama/llama-3.2-3b-instruct:free",
MODEL_META_LLAMA_LLAMA_3_2_3B_INSTRUCT = "meta-llama/llama-3.2-3b-instruct",
MODEL_META_LLAMA_LLAMA_3_2_1B_INSTRUCT = "meta-llama/llama-3.2-1b-instruct",
MODEL_META_LLAMA_LLAMA_3_2_90B_VISION_INSTRUCT = "meta-llama/llama-3.2-90b-vision-instruct",
MODEL_META_LLAMA_LLAMA_3_2_11B_VISION_INSTRUCT = "meta-llama/llama-3.2-11b-vision-instruct",
MODEL_META_LLAMA_LLAMA_3_2_90B_VISION_INSTRUCT = "meta-llama/llama-3.2-90b-vision-instruct",
MODEL_QWEN_QWEN_2_5_72B_INSTRUCT_FREE = "qwen/qwen-2.5-72b-instruct:free",
MODEL_QWEN_QWEN_2_5_72B_INSTRUCT = "qwen/qwen-2.5-72b-instruct",
MODEL_NEVERSLEEP_LLAMA_3_1_LUMIMAID_8B = "neversleep/llama-3.1-lumimaid-8b",
MODEL_OPENAI_O1_MINI = "openai/o1-mini",
MODEL_OPENAI_O1_MINI_2024_09_12 = "openai/o1-mini-2024-09-12",
MODEL_MISTRALAI_PIXTRAL_12B = "mistralai/pixtral-12b",
MODEL_COHERE_COMMAND_R_PLUS_08_2024 = "cohere/command-r-plus-08-2024",
MODEL_COHERE_COMMAND_R_08_2024 = "cohere/command-r-08-2024",
MODEL_COHERE_COMMAND_R_PLUS_08_2024 = "cohere/command-r-plus-08-2024",
MODEL_QWEN_QWEN_2_5_VL_7B_INSTRUCT = "qwen/qwen-2.5-vl-7b-instruct",
MODEL_SAO10K_L3_1_EURYALE_70B = "sao10k/l3.1-euryale-70b",
MODEL_MICROSOFT_PHI_3_5_MINI_128K_INSTRUCT = "microsoft/phi-3.5-mini-128k-instruct",
@ -269,61 +281,53 @@ export enum E_OPENROUTER_MODEL {
MODEL_SAO10K_L3_LUNARIS_8B = "sao10k/l3-lunaris-8b",
MODEL_OPENAI_GPT_4O_2024_08_06 = "openai/gpt-4o-2024-08-06",
MODEL_META_LLAMA_LLAMA_3_1_405B = "meta-llama/llama-3.1-405b",
MODEL_META_LLAMA_LLAMA_3_1_8B_INSTRUCT = "meta-llama/llama-3.1-8b-instruct",
MODEL_META_LLAMA_LLAMA_3_1_405B_INSTRUCT_FREE = "meta-llama/llama-3.1-405b-instruct:free",
MODEL_META_LLAMA_LLAMA_3_1_405B_INSTRUCT = "meta-llama/llama-3.1-405b-instruct",
MODEL_META_LLAMA_LLAMA_3_1_8B_INSTRUCT = "meta-llama/llama-3.1-8b-instruct",
MODEL_META_LLAMA_LLAMA_3_1_70B_INSTRUCT = "meta-llama/llama-3.1-70b-instruct",
MODEL_MISTRALAI_MISTRAL_NEMO_FREE = "mistralai/mistral-nemo:free",
MODEL_MISTRALAI_MISTRAL_NEMO = "mistralai/mistral-nemo",
MODEL_OPENAI_GPT_4O_MINI = "openai/gpt-4o-mini",
MODEL_OPENAI_GPT_4O_MINI_2024_07_18 = "openai/gpt-4o-mini-2024-07-18",
MODEL_OPENAI_GPT_4O_MINI = "openai/gpt-4o-mini",
MODEL_GOOGLE_GEMMA_2_27B_IT = "google/gemma-2-27b-it",
MODEL_GOOGLE_GEMMA_2_9B_IT_FREE = "google/gemma-2-9b-it:free",
MODEL_GOOGLE_GEMMA_2_9B_IT = "google/gemma-2-9b-it",
MODEL_ANTHROPIC_CLAUDE_3_5_SONNET_20240620 = "anthropic/claude-3.5-sonnet-20240620",
MODEL_SAO10K_L3_EURYALE_70B = "sao10k/l3-euryale-70b",
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_3 = "mistralai/mistral-7b-instruct-v0.3",
MODEL_NOUSRESEARCH_HERMES_2_PRO_LLAMA_3_8B = "nousresearch/hermes-2-pro-llama-3-8b",
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_FREE = "mistralai/mistral-7b-instruct:free",
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT = "mistralai/mistral-7b-instruct",
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_3 = "mistralai/mistral-7b-instruct-v0.3",
MODEL_MICROSOFT_PHI_3_MINI_128K_INSTRUCT = "microsoft/phi-3-mini-128k-instruct",
MODEL_MICROSOFT_PHI_3_MEDIUM_128K_INSTRUCT = "microsoft/phi-3-medium-128k-instruct",
MODEL_NEVERSLEEP_LLAMA_3_LUMIMAID_70B = "neversleep/llama-3-lumimaid-70b",
MODEL_GOOGLE_GEMINI_FLASH_1_5 = "google/gemini-flash-1.5",
MODEL_OPENAI_GPT_4O = "openai/gpt-4o",
MODEL_OPENAI_GPT_4O_EXTENDED = "openai/gpt-4o:extended",
MODEL_META_LLAMA_LLAMA_GUARD_2_8B = "meta-llama/llama-guard-2-8b",
MODEL_OPENAI_GPT_4O_2024_05_13 = "openai/gpt-4o-2024-05-13",
MODEL_META_LLAMA_LLAMA_GUARD_2_8B = "meta-llama/llama-guard-2-8b",
MODEL_META_LLAMA_LLAMA_3_8B_INSTRUCT = "meta-llama/llama-3-8b-instruct",
MODEL_META_LLAMA_LLAMA_3_70B_INSTRUCT = "meta-llama/llama-3-70b-instruct",
MODEL_MISTRALAI_MIXTRAL_8X22B_INSTRUCT = "mistralai/mixtral-8x22b-instruct",
MODEL_MICROSOFT_WIZARDLM_2_8X22B = "microsoft/wizardlm-2-8x22b",
MODEL_GOOGLE_GEMINI_PRO_1_5 = "google/gemini-pro-1.5",
MODEL_OPENAI_GPT_4_TURBO = "openai/gpt-4-turbo",
MODEL_COHERE_COMMAND_R_PLUS = "cohere/command-r-plus",
MODEL_COHERE_COMMAND_R_PLUS_04_2024 = "cohere/command-r-plus-04-2024",
MODEL_COHERE_COMMAND = "cohere/command",
MODEL_COHERE_COMMAND_R = "cohere/command-r",
MODEL_ANTHROPIC_CLAUDE_3_HAIKU = "anthropic/claude-3-haiku",
MODEL_ANTHROPIC_CLAUDE_3_OPUS = "anthropic/claude-3-opus",
MODEL_COHERE_COMMAND_R_03_2024 = "cohere/command-r-03-2024",
MODEL_MISTRALAI_MISTRAL_LARGE = "mistralai/mistral-large",
MODEL_OPENAI_GPT_3_5_TURBO_0613 = "openai/gpt-3.5-turbo-0613",
MODEL_OPENAI_GPT_4_TURBO_PREVIEW = "openai/gpt-4-turbo-preview",
MODEL_MISTRALAI_MISTRAL_SMALL = "mistralai/mistral-small",
MODEL_MISTRALAI_MISTRAL_TINY = "mistralai/mistral-tiny",
MODEL_MISTRALAI_MISTRAL_SMALL = "mistralai/mistral-small",
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_2 = "mistralai/mistral-7b-instruct-v0.2",
MODEL_MISTRALAI_MIXTRAL_8X7B_INSTRUCT = "mistralai/mixtral-8x7b-instruct",
MODEL_NEVERSLEEP_NOROMAID_20B = "neversleep/noromaid-20b",
MODEL_ALPINDALE_GOLIATH_120B = "alpindale/goliath-120b",
MODEL_OPENROUTER_AUTO = "openrouter/auto",
MODEL_OPENAI_GPT_4_1106_PREVIEW = "openai/gpt-4-1106-preview",
MODEL_OPENAI_GPT_3_5_TURBO_INSTRUCT = "openai/gpt-3.5-turbo-instruct",
MODEL_MISTRALAI_MISTRAL_7B_INSTRUCT_V0_1 = "mistralai/mistral-7b-instruct-v0.1",
MODEL_OPENAI_GPT_3_5_TURBO_INSTRUCT = "openai/gpt-3.5-turbo-instruct",
MODEL_OPENAI_GPT_3_5_TURBO_16K = "openai/gpt-3.5-turbo-16k",
MODEL_MANCER_WEAVER = "mancer/weaver",
MODEL_UNDI95_REMM_SLERP_L2_13B = "undi95/remm-slerp-l2-13b",
MODEL_GRYPHE_MYTHOMAX_L2_13B = "gryphe/mythomax-l2-13b",
MODEL_OPENAI_GPT_3_5_TURBO = "openai/gpt-3.5-turbo",
MODEL_OPENAI_GPT_4 = "openai/gpt-4",
MODEL_OPENAI_GPT_4_0314 = "openai/gpt-4-0314"
MODEL_OPENAI_GPT_4_0314 = "openai/gpt-4-0314",
MODEL_OPENAI_GPT_4 = "openai/gpt-4"
}

View File

@ -1,6 +0,0 @@
kbot-d "Create a comprehensive Readme.md, in ./tests/scripted/readme.md (installation, usage, examples, etc), with Mermaid diagrams (no braces in node names)" \
--model=openai/o3-mini \
--disable=npm,terminal,interact,git,search,web,user,email \
--include=./src/commands/*.ts \
--preferences=none \
--logLevel=2

View File

@ -1,147 +0,0 @@
KBot - Command Line AI Assistant
================================
KBot is a powerful command line tool that leverages AI models to assist with code generation, file summarization, multi-file processing, and more.
Table of Contents
-----------------
- [Installation](#installation)
- [Usage](#usage)
- [Examples](#examples)
- [Architecture](#architecture)
- [Configuration](#configuration)
- [Development](#development)
- [License](#license)
Installation
------------
1. Ensure you have [Node.js](https://nodejs.org/) installed (version 14+ recommended).
2. Clone the repository:
```bash
git clone https://github.com/yourusername/kbot.git
cd kbot
```
3. Install dependencies:
```bash
npm install
```
4. Build the project (if applicable):
```bash
npm run build
```
Usage
-----
KBot provides a rich set of commands for interacting with AI tools. Here are some common commands:
- **Run a task:**
```bash
kbot run --prompt "Summarize the project" --path ./src
```
- **Fetch available models:**
```bash
kbot fetch
```
- **Modify configurations:**
```bash
kbot init
```
Examples
--------
Below are some usage examples:
1. **Summarize Project Files**:
```bash
kbot run --prompt "Give me a summary of the project files" --path ./my_project --include "*.js"
```
2. **Generate Documentation**:
```bash
kbot run --prompt "Generate documentation for the codebase" --dst ./docs/README.md
```
3. **Personalized Assistant**:
Use your own preferences and profiles stored in `./.kbot/preferences.md` for a personalized experience.
Architecture
------------
The following Mermaid diagram illustrates the high-level architecture of KBot:
```mermaid
flowchart TD
Start[Start] --> Config[Load Config]
Config --> Init[Initialize Client]
Init --> Process[Process Request]
Process --> Options[Set Options]
Options --> Execute[Execute Task]
Execute --> End[Return Result]
```
The flow begins with startup, loads the configuration (preferences and settings), initializes the API client, gathers and processes user requests, sets task-specific options, executes the task (running the completion, tools, or assistant mode), and finally returns the result.
Configuration
-------------
KBot uses a configuration file located at `./.kbot/config.json` and a preferences file at `./.kbot/preferences.md` to customize behavior:
- **config.json**: Contains API keys and service configurations for OpenAI, OpenRouter, and more.
- **preferences.md**: Stores personal information to tailor the assistant responses.
Development
-----------
- **Run tests:**
```bash
npm test
```
- **Build the project:**
```bash
npm run build
```
- **Lint the code:**
```bash
npm run lint
```
Mermaid Diagrams
----------------
Mermaid diagrams are used to visually represent the various components and flows within KBot. Here is another example diagram illustrating the internal processing:
```mermaid
flowchart LR
User[User Input] --> CLI[Command Line Parser]
CLI --> Processor[Task Processor]
Processor --> API[API Client]
API --> Collector[Response Collector]
Collector --> Output[Display Output]
```
License
-------
This project is licensed under the MIT License.