maintainence love:)

This commit is contained in:
lovebird 2025-02-14 16:08:33 +01:00
parent 0d32a1d07c
commit 320801cd5b
6 changed files with 75 additions and 242 deletions

View File

@ -1,12 +1,12 @@
{
"name": "@plastichub/kbot",
"version": "1.1.13",
"version": "1.1.14",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@plastichub/kbot",
"version": "1.1.13",
"version": "1.1.14",
"license": "ISC",
"dependencies": {
"node-emoji": "^2.2.0"

View File

@ -1,6 +1,6 @@
{
"name": "@plastichub/kbot",
"version": "1.1.13",
"version": "1.1.14",
"main": "main_node.js",
"author": "",
"license": "ISC",

File diff suppressed because one or more lines are too long

View File

@ -1,16 +1,10 @@
{
"timestamp": 1739477568146,
"timestamp": 1739545527029,
"models": [
{
"id": "gpt-4o",
"id": "gpt-4o-mini-audio-preview-2024-12-17",
"object": "model",
"created": 1715367049,
"owned_by": "system"
},
{
"id": "gpt-4o-2024-08-06",
"object": "model",
"created": 1722814719,
"created": 1734115920,
"owned_by": "system"
},
{
@ -25,6 +19,18 @@
"created": 1698798177,
"owned_by": "system"
},
{
"id": "gpt-4o-audio-preview-2024-10-01",
"object": "model",
"created": 1727389042,
"owned_by": "system"
},
{
"id": "gpt-4o-audio-preview",
"object": "model",
"created": 1727460443,
"owned_by": "system"
},
{
"id": "o1-mini-2024-09-12",
"object": "model",
@ -61,12 +67,6 @@
"created": 1734387380,
"owned_by": "system"
},
{
"id": "gpt-4o-mini-audio-preview-2024-12-17",
"object": "model",
"created": 1734115920,
"owned_by": "system"
},
{
"id": "whisper-1",
"object": "model",
@ -79,6 +79,12 @@
"created": 1712361441,
"owned_by": "system"
},
{
"id": "gpt-4o",
"object": "model",
"created": 1715367049,
"owned_by": "system"
},
{
"id": "gpt-4o-mini-audio-preview",
"object": "model",
@ -86,9 +92,9 @@
"owned_by": "system"
},
{
"id": "gpt-4o-audio-preview-2024-10-01",
"id": "gpt-4o-mini-2024-07-18",
"object": "model",
"created": 1727389042,
"created": 1721172717,
"owned_by": "system"
},
{
@ -127,6 +133,24 @@
"created": 1734034239,
"owned_by": "system"
},
{
"id": "gpt-4o-mini",
"object": "model",
"created": 1721172741,
"owned_by": "system"
},
{
"id": "gpt-4o-2024-08-06",
"object": "model",
"created": 1722814719,
"owned_by": "system"
},
{
"id": "tts-1-hd",
"object": "model",
"created": 1699046015,
"owned_by": "system"
},
{
"id": "gpt-4-0125-preview",
"object": "model",
@ -134,9 +158,15 @@
"owned_by": "system"
},
{
"id": "gpt-4o-audio-preview",
"id": "gpt-4o-2024-11-20",
"object": "model",
"created": 1727460443,
"created": 1739331543,
"owned_by": "system"
},
{
"id": "gpt-4-turbo-preview",
"object": "model",
"created": 1706037777,
"owned_by": "system"
},
{
@ -146,21 +176,9 @@
"owned_by": "system"
},
{
"id": "gpt-4o-mini-2024-07-18",
"id": "text-embedding-3-large",
"object": "model",
"created": 1721172717,
"owned_by": "system"
},
{
"id": "gpt-4o-mini",
"object": "model",
"created": 1721172741,
"owned_by": "system"
},
{
"id": "tts-1-hd",
"object": "model",
"created": 1699046015,
"created": 1705953180,
"owned_by": "system"
},
{
@ -193,12 +211,6 @@
"created": 1698959748,
"owned_by": "system"
},
{
"id": "gpt-4o-2024-05-13",
"object": "model",
"created": 1715368132,
"owned_by": "system"
},
{
"id": "gpt-3.5-turbo-instruct",
"object": "model",
@ -229,12 +241,6 @@
"created": 1677610602,
"owned_by": "openai"
},
{
"id": "gpt-4o-2024-11-20",
"object": "model",
"created": 1739331543,
"owned_by": "system"
},
{
"id": "gpt-4o-realtime-preview",
"object": "model",
@ -271,23 +277,17 @@
"created": 1671217299,
"owned_by": "openai-internal"
},
{
"id": "gpt-4o-2024-05-13",
"object": "model",
"created": 1715368132,
"owned_by": "system"
},
{
"id": "gpt-4-0613",
"object": "model",
"created": 1686588896,
"owned_by": "openai"
},
{
"id": "text-embedding-3-large",
"object": "model",
"created": 1705953180,
"owned_by": "system"
},
{
"id": "gpt-4-turbo-preview",
"object": "model",
"created": 1706037777,
"owned_by": "system"
}
]
}

View File

@ -1,5 +1,5 @@
{
"timestamp": 1739477568753,
"timestamp": 1739545527255,
"models": [
{
"id": "cognitivecomputations/dolphin3.0-r1-mistral-24b:free",
@ -121,30 +121,6 @@
},
"per_request_limits": null
},
{
"id": "deepseek/deepseek-r1-distill-llama-8b",
"name": "DeepSeek: R1 Distill Llama 8B",
"created": 1738937718,
"description": "DeepSeek R1 Distill Llama 8B is a distilled large language model based on [Llama-3.1-8B-Instruct](/meta-llama/llama-3.1-8b-instruct), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). The model combines advanced distillation techniques to achieve high performance across multiple benchmarks, including:\n\n- AIME 2024 pass@1: 50.4\n- MATH-500 pass@1: 89.1\n- CodeForces Rating: 1205\n\nThe model leverages fine-tuning from DeepSeek R1's outputs, enabling competitive performance comparable to larger frontier models.\n\nHugging Face: \n- [Llama-3.1-8B](https://huggingface.co/meta-llama/Llama-3.1-8B) \n- [DeepSeek-R1-Distill-Llama-8B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B) |",
"context_length": 32000,
"architecture": {
"modality": "text->text",
"tokenizer": "Llama3",
"instruct_type": null
},
"pricing": {
"prompt": "0.00000004",
"completion": "0.00000004",
"image": "0",
"request": "0"
},
"top_provider": {
"context_length": 32000,
"max_completion_tokens": 32000,
"is_moderated": false
},
"per_request_limits": null
},
{
"id": "google/gemini-2.0-flash-001",
"name": "Google: Gemini Flash 2.0",
@ -277,8 +253,8 @@
"instruct_type": null
},
"pricing": {
"prompt": "0.0000008",
"completion": "0.0000016",
"prompt": "0.0000007",
"completion": "0.0000014",
"image": "0",
"request": "0"
},
@ -534,21 +510,21 @@
"name": "DeepSeek: R1 Distill Qwen 14B",
"created": 1738193940,
"description": "DeepSeek R1 Distill Qwen 14B is a distilled large language model based on [Qwen 2.5 14B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). It outperforms OpenAI's o1-mini across various benchmarks, achieving new state-of-the-art results for dense models.\n\nOther benchmark results include:\n\n- AIME 2024 pass@1: 69.7\n- MATH-500 pass@1: 93.9\n- CodeForces Rating: 1481\n\nThe model leverages fine-tuning from DeepSeek R1's outputs, enabling competitive performance comparable to larger frontier models.",
"context_length": 64000,
"context_length": 131072,
"architecture": {
"modality": "text->text",
"tokenizer": "Qwen",
"instruct_type": null
},
"pricing": {
"prompt": "0.00000015",
"completion": "0.00000015",
"prompt": "0.0000016",
"completion": "0.0000016",
"image": "0",
"request": "0"
},
"top_provider": {
"context_length": 64000,
"max_completion_tokens": 64000,
"context_length": 131072,
"max_completion_tokens": 32768,
"is_moderated": false
},
"per_request_limits": null
@ -3097,54 +3073,6 @@
},
"per_request_limits": null
},
{
"id": "qwen/qwen-2-7b-instruct:free",
"name": "Qwen 2 7B Instruct (free)",
"created": 1721088000,
"description": "Qwen2 7B is a transformer-based model that excels in language understanding, multilingual capabilities, coding, mathematics, and reasoning.\n\nIt features SwiGLU activation, attention QKV bias, and group query attention. It is pretrained on extensive data with supervised finetuning and direct preference optimization.\n\nFor more details, see this [blog post](https://qwenlm.github.io/blog/qwen2/) and [GitHub repo](https://github.com/QwenLM/Qwen2).\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).",
"context_length": 8192,
"architecture": {
"modality": "text->text",
"tokenizer": "Qwen",
"instruct_type": "chatml"
},
"pricing": {
"prompt": "0",
"completion": "0",
"image": "0",
"request": "0"
},
"top_provider": {
"context_length": 8192,
"max_completion_tokens": 4096,
"is_moderated": false
},
"per_request_limits": null
},
{
"id": "qwen/qwen-2-7b-instruct",
"name": "Qwen 2 7B Instruct",
"created": 1721088000,
"description": "Qwen2 7B is a transformer-based model that excels in language understanding, multilingual capabilities, coding, mathematics, and reasoning.\n\nIt features SwiGLU activation, attention QKV bias, and group query attention. It is pretrained on extensive data with supervised finetuning and direct preference optimization.\n\nFor more details, see this [blog post](https://qwenlm.github.io/blog/qwen2/) and [GitHub repo](https://github.com/QwenLM/Qwen2).\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).",
"context_length": 32768,
"architecture": {
"modality": "text->text",
"tokenizer": "Qwen",
"instruct_type": "chatml"
},
"pricing": {
"prompt": "0.000000054",
"completion": "0.000000054",
"image": "0",
"request": "0"
},
"top_provider": {
"context_length": 32768,
"max_completion_tokens": null,
"is_moderated": false
},
"per_request_limits": null
},
{
"id": "google/gemma-2-27b-it",
"name": "Google: Gemma 2 27B",
@ -3361,30 +3289,6 @@
},
"per_request_limits": null
},
{
"id": "cognitivecomputations/dolphin-mixtral-8x22b",
"name": "Dolphin 2.9.2 Mixtral 8x22B 🐬",
"created": 1717804800,
"description": "Dolphin 2.9 is designed for instruction following, conversational, and coding. This model is a finetune of [Mixtral 8x22B Instruct](/models/mistralai/mixtral-8x22b-instruct). It features a 64k context length and was fine-tuned with a 16k sequence length using ChatML templates.\n\nThis model is a successor to [Dolphin Mixtral 8x7B](/models/cognitivecomputations/dolphin-mixtral-8x7b).\n\nThe model is uncensored and is stripped of alignment and bias. It requires an external alignment layer for ethical use. Users are cautioned to use this highly compliant model responsibly, as detailed in a blog post about uncensored models at [erichartford.com/uncensored-models](https://erichartford.com/uncensored-models).\n\n#moe #uncensored",
"context_length": 16000,
"architecture": {
"modality": "text->text",
"tokenizer": "Mistral",
"instruct_type": "chatml"
},
"pricing": {
"prompt": "0.0000009",
"completion": "0.0000009",
"image": "0",
"request": "0"
},
"top_provider": {
"context_length": 16000,
"max_completion_tokens": null,
"is_moderated": false
},
"per_request_limits": null
},
{
"id": "qwen/qwen-2-72b-instruct",
"name": "Qwen 2 72B Instruct",
@ -4105,30 +4009,6 @@
},
"per_request_limits": null
},
{
"id": "sophosympatheia/midnight-rose-70b",
"name": "Midnight Rose 70B",
"created": 1711065600,
"description": "A merge with a complex family tree, this model was crafted for roleplaying and storytelling. Midnight Rose is a successor to Rogue Rose and Aurora Nights and improves upon them both. It wants to produce lengthy output by default and is the best creative writing merge produced so far by sophosympatheia.\n\nDescending from earlier versions of Midnight Rose and [Wizard Tulu Dolphin 70B](https://huggingface.co/sophosympatheia/Wizard-Tulu-Dolphin-70B-v1.0), it inherits the best qualities of each.",
"context_length": 4096,
"architecture": {
"modality": "text->text",
"tokenizer": "Llama2",
"instruct_type": "airoboros"
},
"pricing": {
"prompt": "0.0000008",
"completion": "0.0000008",
"image": "0",
"request": "0"
},
"top_provider": {
"context_length": 4096,
"max_completion_tokens": null,
"is_moderated": false
},
"per_request_limits": null
},
{
"id": "cohere/command",
"name": "Cohere: Command",
@ -4825,30 +4705,6 @@
},
"per_request_limits": null
},
{
"id": "teknium/openhermes-2.5-mistral-7b",
"name": "OpenHermes 2.5 Mistral 7B",
"created": 1700438400,
"description": "A continuation of [OpenHermes 2 model](/models/teknium/openhermes-2-mistral-7b), trained on additional code datasets.\nPotentially the most interesting finding from training on a good ratio (est. of around 7-14% of the total dataset) of code instruction was that it has boosted several non-code benchmarks, including TruthfulQA, AGIEval, and GPT4All suite. It did however reduce BigBench benchmark score, but the net gain overall is significant.",
"context_length": 4096,
"architecture": {
"modality": "text->text",
"tokenizer": "Mistral",
"instruct_type": "chatml"
},
"pricing": {
"prompt": "0.00000017",
"completion": "0.00000017",
"image": "0",
"request": "0"
},
"top_provider": {
"context_length": 4096,
"max_completion_tokens": 4096,
"is_moderated": false
},
"per_request_limits": null
},
{
"id": "undi95/toppy-m-7b:free",
"name": "Toppy M 7B (free)",
@ -5041,30 +4897,6 @@
},
"per_request_limits": null
},
{
"id": "jondurbin/airoboros-l2-70b",
"name": "Airoboros 70B",
"created": 1698537600,
"description": "A Llama 2 70B fine-tune using synthetic data (the Airoboros dataset).\n\nCurrently based on [jondurbin/airoboros-l2-70b](https://huggingface.co/jondurbin/airoboros-l2-70b-2.2.1), but might get updated in the future.",
"context_length": 4000,
"architecture": {
"modality": "text->text",
"tokenizer": "Llama2",
"instruct_type": "airoboros"
},
"pricing": {
"prompt": "0.0000005",
"completion": "0.0000005",
"image": "0",
"request": "0"
},
"top_provider": {
"context_length": 4000,
"max_completion_tokens": null,
"is_moderated": false
},
"per_request_limits": null
},
{
"id": "xwin-lm/xwin-lm-70b",
"name": "Xwin 70B",
@ -5245,8 +5077,8 @@
"instruct_type": "alpaca"
},
"pricing": {
"prompt": "0.00000017",
"completion": "0.00000017",
"prompt": "0.00000018",
"completion": "0.00000018",
"image": "0",
"request": "0"
},

View File

@ -3,8 +3,9 @@ kbotd modify \
--prompt="./.kbot/todos.md" \
--mode=completion \
--router2=openai \
--model=openai/gpt-4-32k \
--include2="src/commands/run.ts" \
--include2="src/commands/run-tools.ts" \
--model2=openai/gpt-4-32k \
--include="src/commands/run.ts" \
--include="src/zod_schema.ts" \
--include="src/client.ts" \
--disable="npm,terminal,git,user,search,email" \
--dst="./.kbot/todos-log.md"