maintainence love:)

This commit is contained in:
babayaga 2025-12-31 08:11:45 +01:00
parent e1dd99b693
commit ea3a4e151d
5 changed files with 35 additions and 27 deletions

View File

@ -1,5 +1,5 @@
{
"timestamp": 1766846860695,
"timestamp": 1767165092218,
"models": [
{
"id": "gpt-4-0613",

View File

@ -1,5 +1,5 @@
{
"timestamp": 1766846861040,
"timestamp": 1767165092339,
"models": [
{
"id": "bytedance-seed/seed-1.6-flash",
@ -143,13 +143,23 @@
},
"per_request_limits": null,
"supported_parameters": [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"logprobs",
"max_tokens",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_logprobs",
"top_p"
],
"default_parameters": {
@ -1545,8 +1555,8 @@
"instruct_type": null
},
"pricing": {
"prompt": "0.000000224",
"completion": "0.00000032",
"prompt": "0.00000025",
"completion": "0.00000038",
"request": "0",
"image": "0",
"web_search": "0",
@ -1554,7 +1564,7 @@
},
"top_provider": {
"context_length": 163840,
"max_completion_tokens": null,
"max_completion_tokens": 65536,
"is_moderated": false
},
"per_request_limits": null,
@ -1719,8 +1729,8 @@
"instruct_type": null
},
"pricing": {
"prompt": "0.0000003",
"completion": "0.0000012",
"prompt": "0.00000025",
"completion": "0.00000085",
"request": "0",
"image": "0",
"web_search": "0",
@ -2860,7 +2870,7 @@
},
"top_provider": {
"context_length": 196608,
"max_completion_tokens": 131072,
"max_completion_tokens": 65536,
"is_moderated": false
},
"per_request_limits": null,
@ -3878,7 +3888,7 @@
"name": "Z.AI: GLM 4.6",
"created": 1759235576,
"description": "Compared with GLM-4.5, this generation brings several key improvements:\n\nLonger context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex agentic tasks.\nSuperior coding performance: The model achieves higher scores on code benchmarks and demonstrates better real-world performance in applications such as Claude Code、Cline、Roo Code and Kilo Code, including improvements in generating visually polished front-end pages.\nAdvanced reasoning: GLM-4.6 shows a clear improvement in reasoning performance and supports tool use during inference, leading to stronger overall capability.\nMore capable agents: GLM-4.6 exhibits stronger performance in tool using and search-based agents, and integrates more effectively within agent frameworks.\nRefined writing: Better aligns with human preferences in style and readability, and performs more naturally in role-playing scenarios.",
"context_length": 204800,
"context_length": 202752,
"architecture": {
"modality": "text->text",
"input_modalities": [
@ -3891,16 +3901,16 @@
"instruct_type": null
},
"pricing": {
"prompt": "0.00000039",
"completion": "0.0000019",
"prompt": "0.00000035",
"completion": "0.0000015",
"request": "0",
"image": "0",
"web_search": "0",
"internal_reasoning": "0"
},
"top_provider": {
"context_length": 204800,
"max_completion_tokens": 204800,
"context_length": 202752,
"max_completion_tokens": 65536,
"is_moderated": false
},
"per_request_limits": null,
@ -6631,7 +6641,7 @@
},
"top_provider": {
"context_length": 131072,
"max_completion_tokens": 128000,
"max_completion_tokens": 131072,
"is_moderated": false
},
"per_request_limits": null,
@ -8057,8 +8067,8 @@
"instruct_type": null
},
"pricing": {
"prompt": "0.0000003",
"completion": "0.0000012",
"prompt": "0.00000025",
"completion": "0.00000085",
"request": "0",
"image": "0",
"web_search": "0",
@ -8802,7 +8812,7 @@
"name": "DeepSeek: DeepSeek R1 0528 Qwen3 8B",
"created": 1748538543,
"description": "DeepSeek-R1-0528 is a lightly upgraded release of DeepSeek R1 that taps more compute and smarter post-training tricks, pushing its reasoning and inference to the brink of flagship models like O3 and Gemini 2.5 Pro.\nIt now tops math, programming, and logic leaderboards, showcasing a step-change in depth-of-thought.\nThe distilled variant, DeepSeek-R1-0528-Qwen3-8B, transfers this chain-of-thought into an 8 B-parameter form, beating standard Qwen3 8B by +10 pp and tying the 235 B “thinking” giant on AIME 2024.",
"context_length": 32768,
"context_length": 128000,
"architecture": {
"modality": "text->text",
"input_modalities": [
@ -8815,16 +8825,16 @@
"instruct_type": "deepseek-r1"
},
"pricing": {
"prompt": "0.00000002",
"completion": "0.0000001",
"prompt": "0.000000048",
"completion": "0.000000072",
"request": "0",
"image": "0",
"web_search": "0",
"internal_reasoning": "0"
},
"top_provider": {
"context_length": 32768,
"max_completion_tokens": 32768,
"context_length": 128000,
"max_completion_tokens": 32000,
"is_moderated": false
},
"per_request_limits": null,
@ -8835,10 +8845,8 @@
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"top_k",
"top_p"

File diff suppressed because one or more lines are too long

View File

@ -1,12 +1,12 @@
{
"name": "@plastichub/kbot",
"version": "1.1.58",
"version": "1.1.59",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@plastichub/kbot",
"version": "1.1.58",
"version": "1.1.59",
"license": "ISC",
"dependencies": {
"node-emoji": "^2.2.0"

View File

@ -1,6 +1,6 @@
{
"name": "@plastichub/kbot",
"version": "1.1.58",
"version": "1.1.59",
"main": "main_node.js",
"author": "",
"license": "ISC",