bump kbot :)

This commit is contained in:
Code 2025-02-01 20:32:07 +01:00
parent 82922e5faf
commit 371dc97aa8
12 changed files with 116 additions and 34 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,6 +1,6 @@
{
"name": "@plastichub/kbot",
"version": "1.1.7",
"version": "1.1.8",
"main": "main_node.js",
"author": "",
"license": "ISC",

File diff suppressed because one or more lines are too long

View File

@ -1,5 +1,5 @@
{
"timestamp": 1738409312097,
"timestamp": 1738438200064,
"models": [
{
"id": "gpt-4o-audio-preview-2024-10-01",
@ -31,6 +31,18 @@
"created": 1698798177,
"owned_by": "system"
},
{
"id": "gpt-4o-mini",
"object": "model",
"created": 1721172741,
"owned_by": "system"
},
{
"id": "gpt-4o-mini-2024-07-18",
"object": "model",
"created": 1721172717,
"owned_by": "system"
},
{
"id": "gpt-3.5-turbo",
"object": "model",
@ -115,24 +127,12 @@
"created": 1699053533,
"owned_by": "system"
},
{
"id": "gpt-4o-mini-2024-07-18",
"object": "model",
"created": 1721172717,
"owned_by": "system"
},
{
"id": "gpt-4o-2024-05-13",
"object": "model",
"created": 1715368132,
"owned_by": "system"
},
{
"id": "gpt-4o-mini",
"object": "model",
"created": 1721172741,
"owned_by": "system"
},
{
"id": "gpt-4o-2024-08-06",
"object": "model",

View File

@ -1,6 +1,78 @@
{
"timestamp": 1738409312720,
"timestamp": 1738438200259,
"models": [
{
"id": "qwen/qwen-turbo-2024-11-01",
"name": "Qwen: Qwen-Turbo",
"created": 1738410974,
"description": "Qwen-Turbo is a 1M context model that provides fast speed and low cost, suitable for simple tasks.",
"context_length": 1000000,
"architecture": {
"modality": "text->text",
"tokenizer": "Qwen",
"instruct_type": null
},
"pricing": {
"prompt": "0.00000005",
"completion": "0.0000002",
"image": "0",
"request": "0"
},
"top_provider": {
"context_length": 1000000,
"max_completion_tokens": 8192,
"is_moderated": false
},
"per_request_limits": null
},
{
"id": "qwen/qwen-plus",
"name": "Qwen: Qwen-Plus",
"created": 1738409840,
"description": "Qwen-Plus is a 131K context model with a balanced performance, speed, and cost combination.",
"context_length": 131072,
"architecture": {
"modality": "text->text",
"tokenizer": "Qwen",
"instruct_type": null
},
"pricing": {
"prompt": "0.0000004",
"completion": "0.0000012",
"image": "0",
"request": "0"
},
"top_provider": {
"context_length": 131072,
"max_completion_tokens": 8192,
"is_moderated": false
},
"per_request_limits": null
},
{
"id": "qwen/qwen-max",
"name": "Qwen: Qwen-Max ",
"created": 1738402289,
"description": "Qwen-Max, with 32K context, provides the best inference performance among [Qwen models](/qwen), especially for complex multi-step tasks.",
"context_length": 32768,
"architecture": {
"modality": "text->text",
"tokenizer": "Qwen",
"instruct_type": null
},
"pricing": {
"prompt": "0.0000016",
"completion": "0.0000064",
"image": "0",
"request": "0"
},
"top_provider": {
"context_length": 32768,
"max_completion_tokens": 8192,
"is_moderated": false
},
"per_request_limits": null
},
{
"id": "openai/o3-mini",
"name": "OpenAI: o3 Mini",
@ -332,7 +404,7 @@
},
"top_provider": {
"context_length": 163840,
"max_completion_tokens": 16384,
"max_completion_tokens": 32768,
"is_moderated": false
},
"per_request_limits": null
@ -1782,7 +1854,7 @@
"name": "Meta: Llama 3.2 11B Vision Instruct (free)",
"created": 1727222400,
"description": "Llama 3.2 11B Vision is a multimodal model with 11 billion parameters, designed to handle tasks combining visual and textual data. It excels in tasks such as image captioning and visual question answering, bridging the gap between language generation and visual reasoning. Pre-trained on a massive dataset of image-text pairs, it performs well in complex, high-accuracy image analysis.\n\nIts ability to integrate visual understanding with language processing makes it an ideal solution for industries requiring comprehensive visual-linguistic AI applications, such as content creation, AI-driven customer service, and research.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD_VISION.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).",
"context_length": 8192,
"context_length": 131072,
"architecture": {
"modality": "text+image->text",
"tokenizer": "Llama3",
@ -1795,8 +1867,8 @@
"request": "0"
},
"top_provider": {
"context_length": 8192,
"max_completion_tokens": 4096,
"context_length": 131072,
"max_completion_tokens": 2048,
"is_moderated": false
},
"per_request_limits": null

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -79,7 +79,6 @@ export const processRun = async (opts: IKBotTask) => {
const paramsPath = path.join(logDir, 'params.json')
write(paramsPath, JSON.stringify({ ...params }, null, 2))
logger.debug(`Read ${files.length} files from project ${path.resolve(options.path)} with ${options.include}`, files.map(f => f.path), options.variables, params.tools.map(t => `${t.function.name} : ${t.function.description}`))
let ret = null
try {
switch (options.mode) {
@ -101,14 +100,13 @@ export const processRun = async (opts: IKBotTask) => {
} catch (e) {
logger.error(`Error running ${options.mode} mode: ${e.message}`)
}
opts.variables['LAST'] = ret
return ret
}
/**
* Extract file paths (Windows or POSIX style) from a single string,
* preserving any spaces within the paths. Needed for Salamand File Manager selections (eg: kbot "summarize, as json" -i $(ListOfSelectedFullNames))
* preserving any spaces within the paths. Needed for Salamand File Manager selections (eg: kbot "summarize, as json" -i "$(ListOfSelectedFullNames)")
* - For Windows, it looks for patterns like "C:\" (any drive letter).
* - For POSIX, it looks for a leading slash "/".
*

View File

@ -79,7 +79,7 @@ export const OptionsSchema = (opts?: any) => {
'each',
z.string()
.optional()
.describe('Glob pattern to run for each matching file')
.describe('Iterate over items, supported: GLOB | Path to JSON File | array of strings (comma separated). To test different models, use --each="gpt-3.5-turbo,gpt-4o", the actual string will exposed as variable `ITEM`, eg: --dst="${ITEM}-output.md"')
)
.add(
'disable',

View File

@ -7,7 +7,7 @@ export interface IKBotOptions {
output?: string | undefined;
/** Optional destination path for the result, will substitute ${MODEL_NAME} and ${ROUTER} in the path. Optional, used for "completion" mode */
dst?: string | undefined;
/** Glob pattern to run for each matching file */
/** Iterate over items, supported GLOB | Path to JSON File | array of strings (comma separated). To test different models, use --each="gpt-3.5-turbo,gpt-4o", the actual string will exposed as variable `ITEM`, eg: --dst=${ITEM}-output.md */
each?: string | undefined;
/** Disable tools categories, eg: --disable=fs,git,interact,terminal,search,web,email,user */
disable?: string[];
@ -64,8 +64,12 @@ export interface IKBotOptions {
databricks/dbrx-instruct | paid
deepseek/deepseek-chat-v2.5 | paid
deepseek/deepseek-r1 | paid
deepseek/deepseek-r1:free | free
deepseek/deepseek-r1:nitro | paid
deepseek/deepseek-r1-distill-llama-70b | paid
deepseek/deepseek-r1-distill-qwen-1.5b | paid
deepseek/deepseek-r1-distill-qwen-14b | paid
deepseek/deepseek-r1-distill-qwen-32b | paid
deepseek/deepseek-chat | paid
cognitivecomputations/dolphin-mixtral-8x7b | paid
cognitivecomputations/dolphin-mixtral-8x22b | paid
@ -90,6 +94,7 @@ export interface IKBotOptions {
google/gemma-2-27b-it | paid
google/gemma-2-9b-it | paid
google/gemma-2-9b-it:free | free
google/gemma-7b-it | paid
google/learnlm-1.5-pro-experimental:free | free
google/palm-2-chat-bison | paid
google/palm-2-chat-bison-32k | paid
@ -158,6 +163,7 @@ export interface IKBotOptions {
mistralai/mistral-7b-instruct-v0.1 | paid
mistralai/mistral-7b-instruct-v0.3 | paid
mistralai/mistral-nemo | paid
mistralai/mistral-small-24b-instruct-2501 | paid
mistralai/mixtral-8x22b-instruct | paid
mistralai/mixtral-8x7b | paid
mistralai/mixtral-8x7b-instruct | paid
@ -206,6 +212,7 @@ export interface IKBotOptions {
openai/o1-mini-2024-09-12 | paid
openai/o1-preview | paid
openai/o1-preview-2024-09-12 | paid
openai/o3-mini | paid
openchat/openchat-7b | paid
openchat/openchat-7b:free | free
teknium/openhermes-2.5-mistral-7b | paid
@ -214,11 +221,16 @@ export interface IKBotOptions {
perplexity/llama-3.1-sonar-large-128k-online | paid
perplexity/llama-3.1-sonar-small-128k-chat | paid
perplexity/llama-3.1-sonar-small-128k-online | paid
perplexity/sonar | paid
perplexity/sonar-reasoning | paid
pygmalionai/mythalion-13b | paid
qwen/qwen-2-72b-instruct | paid
qwen/qwen-2-7b-instruct | paid
qwen/qwen-2-7b-instruct:free | free
qwen/qvq-72b-preview | paid
qwen/qwen-max | paid
qwen/qwen-plus | paid
qwen/qwen-turbo-2024-11-01 | paid
qwen/qwq-32b-preview | paid
qwen/qwen-2-vl-72b-instruct | paid
qwen/qwen-2-vl-7b-instruct | paid

File diff suppressed because one or more lines are too long