diff --git a/packages/osr-code-bot/dist/package-lock.json b/packages/osr-code-bot/dist/package-lock.json index f14a848..0f6d297 100644 --- a/packages/osr-code-bot/dist/package-lock.json +++ b/packages/osr-code-bot/dist/package-lock.json @@ -1,12 +1,12 @@ { "name": "@plastichub/kbot", - "version": "1.1.23", + "version": "1.1.25", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@plastichub/kbot", - "version": "1.1.23", + "version": "1.1.25", "license": "ISC", "dependencies": { "node-emoji": "^2.2.0" diff --git a/packages/osr-code-bot/dist/package.json b/packages/osr-code-bot/dist/package.json index e775780..691df06 100644 --- a/packages/osr-code-bot/dist/package.json +++ b/packages/osr-code-bot/dist/package.json @@ -1,6 +1,6 @@ { "name": "@plastichub/kbot", - "version": "1.1.23", + "version": "1.1.25", "main": "main_node.js", "author": "", "license": "ISC", diff --git a/packages/osr-code-bot/dist/stats/statistics.html b/packages/osr-code-bot/dist/stats/statistics.html index bcf1cfc..ce8f8f9 100644 --- a/packages/osr-code-bot/dist/stats/statistics.html +++ b/packages/osr-code-bot/dist/stats/statistics.html @@ -188,7 +188,7 @@ footer h2 {
- + diff --git a/packages/osr-code-bot/kbot-tests/.gitignore b/packages/osr-code-bot/kbot-tests/.gitignore deleted file mode 100644 index deed335..0000000 --- a/packages/osr-code-bot/kbot-tests/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -node_modules/ -dist/ -.env diff --git a/packages/osr-code-bot/kbot-tests/package-lock.json b/packages/osr-code-bot/kbot-tests/package-lock.json deleted file mode 100644 index efb0b35..0000000 --- a/packages/osr-code-bot/kbot-tests/package-lock.json +++ /dev/null @@ -1,239 +0,0 @@ -{ - "name": "ts-cli-app", - "version": "1.0.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "ts-cli-app", - "version": "1.0.0", - "license": "ISC", - "bin": { - "ts-cli": "dist/index.js" - }, - "devDependencies": { - "@types/node": "^20.17.10", - "ts-node": "^10.9.1", - "typescript": "^5.7.2" - } - }, - "node_modules/@cspotcode/source-map-support": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", - "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/trace-mapping": "0.3.9" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", - "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", - "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.9", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", - "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/resolve-uri": "^3.0.3", - "@jridgewell/sourcemap-codec": "^1.4.10" - } - }, - "node_modules/@tsconfig/node10": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz", - "integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==", - "dev": true, - "license": "MIT" - }, - "node_modules/@tsconfig/node12": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", - "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", - "dev": true, - "license": "MIT" - }, - "node_modules/@tsconfig/node14": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", - "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", - "dev": true, - "license": "MIT" - }, - "node_modules/@tsconfig/node16": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", - "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/node": { - "version": "20.17.10", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.17.10.tgz", - "integrity": "sha512-/jrvh5h6NXhEauFFexRin69nA0uHJ5gwk4iDivp/DeoEua3uwCUto6PC86IpRITBOs4+6i2I56K5x5b6WYGXHA==", - "dev": true, - "license": "MIT", - "dependencies": { - "undici-types": "~6.19.2" - } - }, - "node_modules/acorn": { - "version": "8.14.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz", - "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==", - "dev": true, - "license": "MIT", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/acorn-walk": { - "version": "8.3.4", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", - "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", - "dev": true, - "license": "MIT", - "dependencies": { - "acorn": "^8.11.0" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/arg": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", - "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", - "dev": true, - "license": "MIT" - }, - "node_modules/create-require": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", - "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/diff": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", - "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.3.1" - } - }, - "node_modules/make-error": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", - "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", - "dev": true, - "license": "ISC" - }, - "node_modules/ts-node": { - "version": "10.9.2", - "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", - "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@cspotcode/source-map-support": "^0.8.0", - "@tsconfig/node10": "^1.0.7", - "@tsconfig/node12": "^1.0.7", - "@tsconfig/node14": "^1.0.0", - "@tsconfig/node16": "^1.0.2", - "acorn": "^8.4.1", - "acorn-walk": "^8.1.1", - "arg": "^4.1.0", - "create-require": "^1.1.0", - "diff": "^4.0.1", - "make-error": "^1.1.1", - "v8-compile-cache-lib": "^3.0.1", - "yn": "3.1.1" - }, - "bin": { - "ts-node": "dist/bin.js", - "ts-node-cwd": "dist/bin-cwd.js", - "ts-node-esm": "dist/bin-esm.js", - "ts-node-script": "dist/bin-script.js", - "ts-node-transpile-only": "dist/bin-transpile.js", - "ts-script": "dist/bin-script-deprecated.js" - }, - "peerDependencies": { - "@swc/core": ">=1.2.50", - "@swc/wasm": ">=1.2.50", - "@types/node": "*", - "typescript": ">=2.7" - }, - "peerDependenciesMeta": { - "@swc/core": { - "optional": true - }, - "@swc/wasm": { - "optional": true - } - } - }, - "node_modules/typescript": { - "version": "5.7.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.2.tgz", - "integrity": "sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg==", - "dev": true, - "license": "Apache-2.0", - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/undici-types": { - "version": "6.19.8", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.19.8.tgz", - "integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==", - "dev": true, - "license": "MIT" - }, - "node_modules/v8-compile-cache-lib": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", - "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", - "dev": true, - "license": "MIT" - }, - "node_modules/yn": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", - "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - } - } -} diff --git a/packages/osr-code-bot/kbot-tests/package.json b/packages/osr-code-bot/kbot-tests/package.json deleted file mode 100644 index 44518a4..0000000 --- a/packages/osr-code-bot/kbot-tests/package.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "name": "ts-cli-app", - "version": "1.0.0", - "description": "TypeScript CLI application", - "main": "dist/index.js", - "bin": { - "ts-cli": "./dist/index.js" - }, - "scripts": { - "build": "tsc", - "start": "ts-node src/index.ts" - }, - "keywords": [], - "author": "", - "license": "ISC", - "devDependencies": { - "@types/node": "^20.17.10", - "ts-node": "^10.9.1", - "typescript": "^5.7.2" - } -} diff --git a/packages/osr-code-bot/kbot-tests/src/index.js b/packages/osr-code-bot/kbot-tests/src/index.js deleted file mode 100644 index f314b03..0000000 --- a/packages/osr-code-bot/kbot-tests/src/index.js +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env node -"use strict"; -console.log('Hello, World! Welcome to your CLI application.'); diff --git a/packages/osr-code-bot/kbot-tests/src/index.ts b/packages/osr-code-bot/kbot-tests/src/index.ts deleted file mode 100644 index 9dc0728..0000000 --- a/packages/osr-code-bot/kbot-tests/src/index.ts +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env node - -console.log('Hello from TypeScript CLI!'); diff --git a/packages/osr-code-bot/kbot-tests/tsconfig.json b/packages/osr-code-bot/kbot-tests/tsconfig.json deleted file mode 100644 index 32b44da..0000000 --- a/packages/osr-code-bot/kbot-tests/tsconfig.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "compilerOptions": { - "target": "ES6", - "module": "commonjs", - "rootDir": "src", - "outDir": "dist", - "strict": true - } -} \ No newline at end of file diff --git a/packages/osr-code-bot/models/data/openai_models.json b/packages/osr-code-bot/models/data/openai_models.json index 678f8ac..ff66fae 100644 --- a/packages/osr-code-bot/models/data/openai_models.json +++ b/packages/osr-code-bot/models/data/openai_models.json @@ -1,16 +1,10 @@ { - "timestamp": 1742336018639, + "timestamp": 1744900800332, "models": [ { - "id": "gpt-4o-2024-11-20", + "id": "gpt-4o-audio-preview-2024-12-17", "object": "model", - "created": 1739331543, - "owned_by": "system" - }, - { - "id": "gpt-4o-mini-audio-preview-2024-12-17", - "object": "model", - "created": 1734115920, + "created": 1734034239, "owned_by": "system" }, { @@ -19,12 +13,24 @@ "created": 1698785189, "owned_by": "system" }, + { + "id": "text-embedding-3-large", + "object": "model", + "created": 1705953180, + "owned_by": "system" + }, { "id": "dall-e-2", "object": "model", "created": 1698798177, "owned_by": "system" }, + { + "id": "o4-mini-2025-04-16", + "object": "model", + "created": 1744133506, + "owned_by": "system" + }, { "id": "gpt-4o-audio-preview-2024-10-01", "object": "model", @@ -32,39 +38,21 @@ "owned_by": "system" }, { - "id": "gpt-4o-audio-preview", + "id": "o4-mini", "object": "model", - "created": 1727460443, + "created": 1744225351, "owned_by": "system" }, { - "id": "o1-mini-2024-09-12", + "id": "gpt-4.1-nano", "object": "model", - "created": 1725648979, + "created": 1744321707, "owned_by": "system" }, { - "id": "o1-mini", + "id": "gpt-4.1-nano-2025-04-14", "object": "model", - "created": 1725649008, - "owned_by": "system" - }, - { - "id": "omni-moderation-latest", - "object": "model", - "created": 1731689265, - "owned_by": "system" - }, - { - "id": "gpt-4o-mini-audio-preview", - "object": "model", - "created": 1734387424, - "owned_by": "system" - }, - { - "id": "omni-moderation-2024-09-26", - "object": "model", - "created": 1732734466, + "created": 1744321025, "owned_by": "system" }, { @@ -73,12 +61,24 @@ "created": 1727131766, "owned_by": "system" }, + { + "id": "gpt-4o-realtime-preview", + "object": "model", + "created": 1727659998, + "owned_by": "system" + }, { "id": "babbage-002", "object": "model", "created": 1692634615, "owned_by": "system" }, + { + "id": "gpt-4-turbo-preview", + "object": "model", + "created": 1706037777, + "owned_by": "system" + }, { "id": "tts-1-hd-1106", "object": "model", @@ -86,21 +86,9 @@ "owned_by": "system" }, { - "id": "whisper-1", + "id": "gpt-4-0125-preview", "object": "model", - "created": 1677532384, - "owned_by": "openai-internal" - }, - { - "id": "text-embedding-3-large", - "object": "model", - "created": 1705953180, - "owned_by": "system" - }, - { - "id": "gpt-4o-audio-preview-2024-12-17", - "object": "model", - "created": 1734034239, + "created": 1706037612, "owned_by": "system" }, { @@ -110,10 +98,10 @@ "owned_by": "openai" }, { - "id": "gpt-4o-2024-05-13", + "id": "text-embedding-ada-002", "object": "model", - "created": 1715368132, - "owned_by": "system" + "created": 1671217299, + "owned_by": "openai-internal" }, { "id": "tts-1-hd", @@ -122,9 +110,15 @@ "owned_by": "system" }, { - "id": "o1-preview", + "id": "gpt-4o-mini-audio-preview", "object": "model", - "created": 1725648897, + "created": 1734387424, + "owned_by": "system" + }, + { + "id": "gpt-4o-audio-preview", + "object": "model", + "created": 1727460443, "owned_by": "system" }, { @@ -134,15 +128,27 @@ "owned_by": "system" }, { - "id": "gpt-3.5-turbo-instruct-0914", + "id": "gpt-4o-mini-realtime-preview", "object": "model", - "created": 1694122472, + "created": 1734387380, "owned_by": "system" }, { - "id": "gpt-4o-mini", + "id": "gpt-4.1-mini", "object": "model", - "created": 1721172741, + "created": 1744318173, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini-realtime-preview-2024-12-17", + "object": "model", + "created": 1734112601, + "owned_by": "system" + }, + { + "id": "gpt-3.5-turbo-instruct-0914", + "object": "model", + "created": 1694122472, "owned_by": "system" }, { @@ -152,9 +158,9 @@ "owned_by": "system" }, { - "id": "gpt-4o-mini-2024-07-18", + "id": "gpt-4.1-mini-2025-04-14", "object": "model", - "created": 1721172717, + "created": 1744317547, "owned_by": "system" }, { @@ -163,6 +169,12 @@ "created": 1699053241, "owned_by": "system" }, + { + "id": "chatgpt-4o-latest", + "object": "model", + "created": 1723515131, + "owned_by": "system" + }, { "id": "davinci-002", "object": "model", @@ -187,36 +199,18 @@ "created": 1712361441, "owned_by": "system" }, + { + "id": "gpt-4o-realtime-preview-2024-12-17", + "object": "model", + "created": 1733945430, + "owned_by": "system" + }, { "id": "gpt-3.5-turbo-instruct", "object": "model", "created": 1692901427, "owned_by": "system" }, - { - "id": "gpt-4o-mini-search-preview-2025-03-11", - "object": "model", - "created": 1741390858, - "owned_by": "system" - }, - { - "id": "chatgpt-4o-latest", - "object": "model", - "created": 1723515131, - "owned_by": "system" - }, - { - "id": "gpt-3.5-turbo-0125", - "object": "model", - "created": 1706048358, - "owned_by": "system" - }, - { - "id": "gpt-4o-2024-08-06", - "object": "model", - "created": 1722814719, - "owned_by": "system" - }, { "id": "gpt-3.5-turbo", "object": "model", @@ -224,15 +218,39 @@ "owned_by": "openai" }, { - "id": "gpt-4-turbo-2024-04-09", + "id": "gpt-4-1106-preview", "object": "model", - "created": 1712601677, + "created": 1698957206, "owned_by": "system" }, { - "id": "gpt-4o-realtime-preview", + "id": "gpt-4o-mini-search-preview-2025-03-11", "object": "model", - "created": 1727659998, + "created": 1741390858, + "owned_by": "system" + }, + { + "id": "gpt-4o-2024-11-20", + "object": "model", + "created": 1739331543, + "owned_by": "system" + }, + { + "id": "whisper-1", + "object": "model", + "created": 1677532384, + "owned_by": "openai-internal" + }, + { + "id": "gpt-4o-2024-05-13", + "object": "model", + "created": 1715368132, + "owned_by": "system" + }, + { + "id": "gpt-4-turbo-2024-04-09", + "object": "model", + "created": 1712601677, "owned_by": "system" }, { @@ -242,29 +260,11 @@ "owned_by": "openai-internal" }, { - "id": "gpt-4o", + "id": "o1-preview", "object": "model", - "created": 1715367049, + "created": 1725648897, "owned_by": "system" }, - { - "id": "text-embedding-3-small", - "object": "model", - "created": 1705948997, - "owned_by": "system" - }, - { - "id": "gpt-4-1106-preview", - "object": "model", - "created": 1698957206, - "owned_by": "system" - }, - { - "id": "text-embedding-ada-002", - "object": "model", - "created": 1671217299, - "owned_by": "openai-internal" - }, { "id": "gpt-4-0613", "object": "model", @@ -277,30 +277,36 @@ "created": 1734326976, "owned_by": "system" }, + { + "id": "o1", + "object": "model", + "created": 1734375816, + "owned_by": "system" + }, + { + "id": "o1-pro", + "object": "model", + "created": 1742251791, + "owned_by": "system" + }, + { + "id": "o1-pro-2025-03-19", + "object": "model", + "created": 1742251504, + "owned_by": "system" + }, { "id": "gpt-4.5-preview", "object": "model", "created": 1740623059, "owned_by": "system" }, - { - "id": "gpt-4o-mini-realtime-preview", - "object": "model", - "created": 1734387380, - "owned_by": "system" - }, { "id": "gpt-4.5-preview-2025-02-27", "object": "model", "created": 1740623304, "owned_by": "system" }, - { - "id": "gpt-4o-mini-realtime-preview-2024-12-17", - "object": "model", - "created": 1734112601, - "owned_by": "system" - }, { "id": "gpt-4o-search-preview-2025-03-11", "object": "model", @@ -314,15 +320,27 @@ "owned_by": "openai-internal" }, { - "id": "gpt-4-turbo-preview", + "id": "omni-moderation-2024-09-26", "object": "model", - "created": 1706037777, + "created": 1732734466, "owned_by": "system" }, { - "id": "gpt-4-0125-preview", + "id": "text-embedding-3-small", "object": "model", - "created": 1706037612, + "created": 1705948997, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini-tts", + "object": "model", + "created": 1742403959, + "owned_by": "system" + }, + { + "id": "gpt-4o", + "object": "model", + "created": 1715367049, "owned_by": "system" }, { @@ -331,12 +349,6 @@ "created": 1737146383, "owned_by": "system" }, - { - "id": "o1", - "object": "model", - "created": 1734375816, - "owned_by": "system" - }, { "id": "o3-mini-2025-01-31", "object": "model", @@ -344,9 +356,75 @@ "owned_by": "system" }, { - "id": "gpt-4o-realtime-preview-2024-12-17", + "id": "gpt-4o-mini", "object": "model", - "created": 1733945430, + "created": 1721172741, + "owned_by": "system" + }, + { + "id": "gpt-4o-2024-08-06", + "object": "model", + "created": 1722814719, + "owned_by": "system" + }, + { + "id": "gpt-4.1", + "object": "model", + "created": 1744316542, + "owned_by": "system" + }, + { + "id": "gpt-4o-transcribe", + "object": "model", + "created": 1742068463, + "owned_by": "system" + }, + { + "id": "gpt-4.1-2025-04-14", + "object": "model", + "created": 1744315746, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini-2024-07-18", + "object": "model", + "created": 1721172717, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini-transcribe", + "object": "model", + "created": 1742068596, + "owned_by": "system" + }, + { + "id": "o1-mini", + "object": "model", + "created": 1725649008, + "owned_by": "system" + }, + { + "id": "gpt-4o-mini-audio-preview-2024-12-17", + "object": "model", + "created": 1734115920, + "owned_by": "system" + }, + { + "id": "gpt-3.5-turbo-0125", + "object": "model", + "created": 1706048358, + "owned_by": "system" + }, + { + "id": "o1-mini-2024-09-12", + "object": "model", + "created": 1725648979, + "owned_by": "system" + }, + { + "id": "omni-moderation-latest", + "object": "model", + "created": 1731689265, "owned_by": "system" } ] diff --git a/packages/osr-code-bot/models/data/openrouter_models.json b/packages/osr-code-bot/models/data/openrouter_models.json index d5ac96f..55e8f90 100644 --- a/packages/osr-code-bot/models/data/openrouter_models.json +++ b/packages/osr-code-bot/models/data/openrouter_models.json @@ -1,29 +1,1311 @@ { - "timestamp": 1742336018806, + "timestamp": 1744900800521, "models": [ + { + "id": "openai/o4-mini-high", + "name": "OpenAI: o4 Mini High", + "created": 1744824212, + "description": "OpenAI o4-mini-high is the same model as [o4-mini](/openai/o4-mini) with reasoning_effort set to high. \n\nOpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining strong multimodal and agentic capabilities. It supports tool use and demonstrates competitive reasoning and coding performance across benchmarks like AIME (99.5% with Python) and SWE-bench, outperforming its predecessor o3-mini and even approaching o3 in some domains.\n\nDespite its smaller size, o4-mini exhibits high accuracy in STEM tasks, visual problem solving (e.g., MathVista, MMMU), and code editing. It is especially well-suited for high-throughput scenarios where latency or cost is critical. Thanks to its efficient architecture and refined reinforcement learning training, o4-mini can chain tools, generate structured outputs, and solve multi-step tasks with minimal delay—often in under a minute.", + "context_length": 200000, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "image", + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000011", + "completion": "0.0000044", + "request": "0", + "image": "0.0008415", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000275" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null + }, + { + "id": "openai/o3", + "name": "OpenAI: o3", + "created": 1744823457, + "description": "o3 is a well-rounded and powerful model across domains. It sets a new standard for math, science, coding, and visual reasoning tasks. It also excels at technical writing and instruction-following. Use it to think through multi-step problems that involve analysis across text, code, and images. Note that BYOK is required for this model. Set up here: https://openrouter.ai/settings/integrations", + "context_length": 200000, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "image", + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00001", + "completion": "0.00004", + "request": "0", + "image": "0.00765", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000025" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null + }, + { + "id": "openai/o4-mini", + "name": "OpenAI: o4 Mini", + "created": 1744820942, + "description": "OpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining strong multimodal and agentic capabilities. It supports tool use and demonstrates competitive reasoning and coding performance across benchmarks like AIME (99.5% with Python) and SWE-bench, outperforming its predecessor o3-mini and even approaching o3 in some domains.\n\nDespite its smaller size, o4-mini exhibits high accuracy in STEM tasks, visual problem solving (e.g., MathVista, MMMU), and code editing. It is especially well-suited for high-throughput scenarios where latency or cost is critical. Thanks to its efficient architecture and refined reinforcement learning training, o4-mini can chain tools, generate structured outputs, and solve multi-step tasks with minimal delay—often in under a minute.", + "context_length": 200000, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "image", + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000011", + "completion": "0.0000044", + "request": "0", + "image": "0.0008415", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000275" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null + }, + { + "id": "shisa-ai/shisa-v2-llama3.3-70b:free", + "name": "Shisa AI: Shisa V2 Llama 3.3 70B (free)", + "created": 1744754858, + "description": "Shisa V2 Llama 3.3 70B is a bilingual Japanese-English chat model fine-tuned by Shisa.AI on Meta’s Llama-3.3-70B-Instruct base. It prioritizes Japanese language performance while retaining strong English capabilities. The model was optimized entirely through post-training, using a refined mix of supervised fine-tuning (SFT) and DPO datasets including regenerated ShareGPT-style data, translation tasks, roleplaying conversations, and instruction-following prompts. Unlike earlier Shisa releases, this version avoids tokenizer modifications or extended pretraining.\n\nShisa V2 70B achieves leading Japanese task performance across a wide range of custom and public benchmarks, including JA MT Bench, ELYZA 100, and Rakuda. It supports a 128K token context length and integrates smoothly with inference frameworks like vLLM and SGLang. While it inherits safety characteristics from its base model, no additional alignment was applied. The model is intended for high-performance bilingual chat, instruction following, and translation tasks across JA/EN.", + "context_length": 32768, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Llama3", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "qwen/qwen2.5-coder-7b-instruct", + "name": "Qwen: Qwen2.5 Coder 7B Instruct", + "created": 1744734887, + "description": "Qwen2.5-Coder-7B-Instruct is a 7B parameter instruction-tuned language model optimized for code-related tasks such as code generation, reasoning, and bug fixing. Based on the Qwen2.5 architecture, it incorporates enhancements like RoPE, SwiGLU, RMSNorm, and GQA attention with support for up to 128K tokens using YaRN-based extrapolation. It is trained on a large corpus of source code, synthetic data, and text-code grounding, providing robust performance across programming languages and agentic coding workflows.\n\nThis model is part of the Qwen2.5-Coder family and offers strong compatibility with tools like vLLM for efficient deployment. Released under the Apache 2.0 license.", + "context_length": 32768, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Qwen", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "openai/gpt-4.1", + "name": "OpenAI: GPT-4.1", + "created": 1744651385, + "description": "GPT-4.1 is a flagship large language model optimized for advanced instruction following, real-world software engineering, and long-context reasoning. It supports a 1 million token context window and outperforms GPT-4o and GPT-4.5 across coding (54.6% SWE-bench Verified), instruction compliance (87.4% IFEval), and multimodal understanding benchmarks. It is tuned for precise code diffs, agent reliability, and high recall in large document contexts, making it ideal for agents, IDE tooling, and enterprise knowledge retrieval.", + "context_length": 1047576, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "image", + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000008", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000005" + }, + "top_provider": { + "context_length": 1047576, + "max_completion_tokens": 32768, + "is_moderated": true + }, + "per_request_limits": null + }, + { + "id": "openai/gpt-4.1-mini", + "name": "OpenAI: GPT-4.1 Mini", + "created": 1744651381, + "description": "GPT-4.1 Mini is a mid-sized model delivering performance competitive with GPT-4o at substantially lower latency and cost. It retains a 1 million token context window and scores 45.1% on hard instruction evals, 35.8% on MultiChallenge, and 84.1% on IFEval. Mini also shows strong coding ability (e.g., 31.6% on Aider’s polyglot diff benchmark) and vision understanding, making it suitable for interactive applications with tight performance constraints.", + "context_length": 1047576, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "image", + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000004", + "completion": "0.0000016", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000001" + }, + "top_provider": { + "context_length": 1047576, + "max_completion_tokens": 32768, + "is_moderated": true + }, + "per_request_limits": null + }, + { + "id": "openai/gpt-4.1-nano", + "name": "OpenAI: GPT-4.1 Nano", + "created": 1744651369, + "description": "For tasks that demand low latency, GPT‑4.1 nano is the fastest and cheapest model in the GPT-4.1 series. It delivers exceptional performance at a small size with its 1 million token context window, and scores 80.1% on MMLU, 50.3% on GPQA, and 9.8% on Aider polyglot coding – even higher than GPT‑4o mini. It’s ideal for tasks like classification or autocompletion.", + "context_length": 1047576, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "image", + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.000000025" + }, + "top_provider": { + "context_length": 1047576, + "max_completion_tokens": 32768, + "is_moderated": true + }, + "per_request_limits": null + }, + { + "id": "eleutherai/llemma_7b", + "name": "EleutherAI: Llemma 7b", + "created": 1744643225, + "description": "Llemma 7B is a language model for mathematics. It was initialized with Code Llama 7B weights, and trained on the Proof-Pile-2 for 200B tokens. Llemma models are particularly strong at chain-of-thought mathematical reasoning and using computational tools for mathematics, such as Python and formal theorem provers.", + "context_length": 4096, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000008", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 4096, + "max_completion_tokens": 4096, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "alfredpros/codellama-7b-instruct-solidity", + "name": "AlfredPros: CodeLLaMa 7B Instruct Solidity", + "created": 1744641874, + "description": "A finetuned 7 billion parameters Code LLaMA - Instruct model to generate Solidity smart contract using 4-bit QLoRA finetuning provided by PEFT library.", + "context_length": 4096, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000008", + "completion": "0.0000012", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 4096, + "max_completion_tokens": 4096, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "arliai/qwq-32b-arliai-rpr-v1:free", + "name": "ArliAI: QwQ 32B RpR v1 (free)", + "created": 1744555982, + "description": "QwQ-32B-ArliAI-RpR-v1 is a 32B parameter model fine-tuned from Qwen/QwQ-32B using a curated creative writing and roleplay dataset originally developed for the RPMax series. It is designed to maintain coherence and reasoning across long multi-turn conversations by introducing explicit reasoning steps per dialogue turn, generated and refined using the base model itself.\n\nThe model was trained using RS-QLORA+ on 8K sequence lengths and supports up to 128K context windows (with practical performance around 32K). It is optimized for creative roleplay and dialogue generation, with an emphasis on minimizing cross-context repetition while preserving stylistic diversity.", + "context_length": 32768, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "agentica-org/deepcoder-14b-preview:free", + "name": "Agentica: Deepcoder 14B Preview (free)", + "created": 1744555395, + "description": "DeepCoder-14B-Preview is a 14B parameter code generation model fine-tuned from DeepSeek-R1-Distill-Qwen-14B using reinforcement learning with GRPO+ and iterative context lengthening. It is optimized for long-context program synthesis and achieves strong performance across coding benchmarks, including 60.6% on LiveCodeBench v5, competitive with models like o3-Mini", + "context_length": 96000, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": "deepseek-r1" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 96000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "moonshotai/kimi-vl-a3b-thinking:free", + "name": "Moonshot AI: Kimi VL A3B Thinking (free)", + "created": 1744304841, + "description": "Kimi-VL is a lightweight Mixture-of-Experts vision-language model that activates only 2.8B parameters per step while delivering strong performance on multimodal reasoning and long-context tasks. The Kimi-VL-A3B-Thinking variant, fine-tuned with chain-of-thought and reinforcement learning, excels in math and visual reasoning benchmarks like MathVision, MMMU, and MathVista, rivaling much larger models such as Qwen2.5-VL-7B and Gemma-3-12B. It supports 128K context and high-resolution input via its MoonViT encoder.", + "context_length": 131072, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "image", + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "x-ai/grok-3-mini-beta", + "name": "xAI: Grok 3 Mini Beta", + "created": 1744240195, + "description": "Grok 3 Mini is a lightweight, smaller thinking model. Unlike traditional models that generate answers immediately, Grok 3 Mini thinks before responding. It’s ideal for reasoning-heavy tasks that don’t demand extensive domain knowledge, and shines in math-specific and quantitative use cases, such as solving challenging puzzles or math problems.\n\nTransparent \"thinking\" traces accessible. Defaults to low reasoning, can boost with setting `reasoning: { effort: \"high\" }`\n\nNote: That there are two xAI endpoints for this model. By default when using this model we will always route you to the base endpoint. If you want the fast endpoint you can add `provider: { sort: throughput}`, to sort by throughput instead. \n", + "context_length": 131072, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Grok", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000003", + "completion": "0.0000005", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "x-ai/grok-3-beta", + "name": "xAI: Grok 3 Beta", + "created": 1744240068, + "description": "Grok 3 is the latest model from xAI. It's their flagship model that excels at enterprise use cases like data extraction, coding, and text summarization. Possesses deep domain knowledge in finance, healthcare, law, and science.\n\nExcels in structured tasks and benchmarks like GPQA, LCB, and MMLU-Pro where it outperforms Grok 3 Mini even on high thinking. \n\nNote: That there are two xAI endpoints for this model. By default when using this model we will always route you to the base endpoint. If you want the fast endpoint you can add `provider: { sort: throughput}`, to sort by throughput instead. \n", + "context_length": 131072, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Grok", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "nvidia/llama-3.1-nemotron-nano-8b-v1:free", + "name": "NVIDIA: Llama 3.1 Nemotron Nano 8B v1 (free)", + "created": 1744123873, + "description": "Llama-3.1-Nemotron-Nano-8B-v1 is a compact large language model (LLM) derived from Meta's Llama-3.1-8B-Instruct, specifically optimized for reasoning tasks, conversational interactions, retrieval-augmented generation (RAG), and tool-calling applications. It balances accuracy and efficiency, fitting comfortably onto a single consumer-grade RTX GPU for local deployment. The model supports extended context lengths of up to 128K tokens.\n\nNote: you must include `detailed thinking on` in the system prompt to enable reasoning. Please see [Usage Recommendations](https://huggingface.co/nvidia/Llama-3_1-Nemotron-Ultra-253B-v1#quick-start-and-usage-recommendations) for more.", + "context_length": 131072, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "nvidia/llama-3.3-nemotron-super-49b-v1:free", + "name": "NVIDIA: Llama 3.3 Nemotron Super 49B v1 (free)", + "created": 1744119494, + "description": "Llama-3.3-Nemotron-Super-49B-v1 is a large language model (LLM) optimized for advanced reasoning, conversational interactions, retrieval-augmented generation (RAG), and tool-calling tasks. Derived from Meta's Llama-3.3-70B-Instruct, it employs a Neural Architecture Search (NAS) approach, significantly enhancing efficiency and reducing memory requirements. This allows the model to support a context length of up to 128K tokens and fit efficiently on single high-performance GPUs, such as NVIDIA H200.\n\nNote: you must include `detailed thinking on` in the system prompt to enable reasoning. Please see [Usage Recommendations](https://huggingface.co/nvidia/Llama-3_1-Nemotron-Ultra-253B-v1#quick-start-and-usage-recommendations) for more.", + "context_length": 131072, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", + "name": "NVIDIA: Llama 3.1 Nemotron Ultra 253B v1 (free)", + "created": 1744115059, + "description": "Llama-3.1-Nemotron-Ultra-253B-v1 is a large language model (LLM) optimized for advanced reasoning, human-interactive chat, retrieval-augmented generation (RAG), and tool-calling tasks. Derived from Meta’s Llama-3.1-405B-Instruct, it has been significantly customized using Neural Architecture Search (NAS), resulting in enhanced efficiency, reduced memory usage, and improved inference latency. The model supports a context length of up to 128K tokens and can operate efficiently on an 8x NVIDIA H100 node.\n\nNote: you must include `detailed thinking on` in the system prompt to enable reasoning. Please see [Usage Recommendations](https://huggingface.co/nvidia/Llama-3_1-Nemotron-Ultra-253B-v1#quick-start-and-usage-recommendations) for more.", + "context_length": 131072, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Llama3", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "meta-llama/llama-4-maverick:free", + "name": "Meta: Llama 4 Maverick (free)", + "created": 1743881822, + "description": "Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per forward pass (400B total). It supports multilingual text and image input, and produces multilingual text and code output across 12 supported languages. Optimized for vision-language tasks, Maverick is instruction-tuned for assistant-like behavior, image reasoning, and general-purpose multimodal interaction.\n\nMaverick features early fusion for native multimodality and a 1 million token context window. It was trained on a curated mixture of public, licensed, and Meta-platform data, covering ~22 trillion tokens, with a knowledge cutoff in August 2024. Released on April 5, 2025 under the Llama 4 Community License, Maverick is suited for research and commercial applications requiring advanced multimodal understanding and high model throughput.", + "context_length": 256000, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 256000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "meta-llama/llama-4-maverick", + "name": "Meta: Llama 4 Maverick", + "created": 1743881822, + "description": "Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per forward pass (400B total). It supports multilingual text and image input, and produces multilingual text and code output across 12 supported languages. Optimized for vision-language tasks, Maverick is instruction-tuned for assistant-like behavior, image reasoning, and general-purpose multimodal interaction.\n\nMaverick features early fusion for native multimodality and a 1 million token context window. It was trained on a curated mixture of public, licensed, and Meta-platform data, covering ~22 trillion tokens, with a knowledge cutoff in August 2024. Released on April 5, 2025 under the Llama 4 Community License, Maverick is suited for research and commercial applications requiring advanced multimodal understanding and high model throughput.", + "context_length": 1048576, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000018", + "completion": "0.0000006", + "request": "0", + "image": "0.0006684", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 1048576, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "meta-llama/llama-4-scout:free", + "name": "Meta: Llama 4 Scout (free)", + "created": 1743881519, + "description": "Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, activating 17 billion parameters out of a total of 109B. It supports native multimodal input (text and image) and multilingual output (text and code) across 12 supported languages. Designed for assistant-style interaction and visual reasoning, Scout uses 16 experts per forward pass and features a context length of 10 million tokens, with a training corpus of ~40 trillion tokens.\n\nBuilt for high efficiency and local or commercial deployment, Llama 4 Scout incorporates early fusion for seamless modality integration. It is instruction-tuned for use in multilingual chat, captioning, and image understanding tasks. Released under the Llama 4 Community License, it was last trained on data up to August 2024 and launched publicly on April 5, 2025.", + "context_length": 512000, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 512000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "meta-llama/llama-4-scout", + "name": "Meta: Llama 4 Scout", + "created": 1743881519, + "description": "Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, activating 17 billion parameters out of a total of 109B. It supports native multimodal input (text and image) and multilingual output (text and code) across 12 supported languages. Designed for assistant-style interaction and visual reasoning, Scout uses 16 experts per forward pass and features a context length of 10 million tokens, with a training corpus of ~40 trillion tokens.\n\nBuilt for high efficiency and local or commercial deployment, Llama 4 Scout incorporates early fusion for seamless modality integration. It is instruction-tuned for use in multilingual chat, captioning, and image understanding tasks. Released under the Llama 4 Community License, it was last trained on data up to August 2024 and launched publicly on April 5, 2025.", + "context_length": 327680, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000008", + "completion": "0.0000003", + "request": "0", + "image": "0.0003342", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 327680, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "google/gemini-2.5-pro-preview-03-25", + "name": "Google: Gemini 2.5 Pro Preview", + "created": 1743780493, + "description": "Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy and nuanced context handling. Gemini 2.5 Pro achieves top-tier performance on multiple benchmarks, including first-place positioning on the LMArena leaderboard, reflecting superior human-preference alignment and complex problem-solving abilities.", + "context_length": 1000000, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000125", + "completion": "0.00001", + "request": "0", + "image": "0.00516", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000003125", + "input_cache_write": "0" + }, + "top_provider": { + "context_length": 1000000, + "max_completion_tokens": 65535, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "all-hands/openhands-lm-32b-v0.1", + "name": "OpenHands LM 32B V0.1", + "created": 1743613013, + "description": "OpenHands LM v0.1 is a 32B open-source coding model fine-tuned from Qwen2.5-Coder-32B-Instruct using reinforcement learning techniques outlined in SWE-Gym. It is optimized for autonomous software development agents and achieves strong performance on SWE-Bench Verified, with a 37.2% resolve rate. The model supports a 128K token context window, making it well-suited for long-horizon code reasoning and large codebase tasks.\n\nOpenHands LM is designed for local deployment and runs on consumer-grade GPUs such as a single 3090. It enables fully offline agent workflows without dependency on proprietary APIs. This release is intended as a research preview, and future updates aim to improve generalizability, reduce repetition, and offer smaller variants.", + "context_length": 16384, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000026", + "completion": "0.0000034", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 16384, + "max_completion_tokens": 4096, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "mistral/ministral-8b", + "name": "Mistral: Ministral 8B", + "created": 1743430021, + "description": "Ministral 8B is a state-of-the-art language model optimized for on-device and edge computing. Designed for efficiency in knowledge-intensive tasks, commonsense reasoning, and function-calling, it features a specialized interleaved sliding-window attention mechanism, enabling faster and more memory-efficient inference. Ministral 8B excels in local, low-latency applications such as offline translation, smart assistants, autonomous robotics, and local analytics.\n\nThe model supports up to 128k context length and can function as a performant intermediary in multi-step agentic workflows, efficiently handling tasks like input parsing, API calls, and task routing. It consistently outperforms comparable models like Mistral 7B across benchmarks, making it particularly suitable for compute-efficient, privacy-focused scenarios.", + "context_length": 131072, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000001", + "completion": "0.0000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "deepseek/deepseek-v3-base:free", + "name": "DeepSeek: DeepSeek V3 Base (free)", + "created": 1743272023, + "description": "Note that this is a base model mostly meant for testing, you need to provide detailed prompts for the model to return useful responses. \n\nDeepSeek-V3 Base is a 671B parameter open Mixture-of-Experts (MoE) language model with 37B active parameters per forward pass and a context length of 128K tokens. Trained on 14.8T tokens using FP8 mixed precision, it achieves high training efficiency and stability, with strong performance across language, reasoning, math, and coding tasks. \n\nDeepSeek-V3 Base is the pre-trained model behind [DeepSeek V3](/deepseek/deepseek-chat-v3)", + "context_length": 163840, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "DeepSeek", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "scb10x/llama3.1-typhoon2-8b-instruct", + "name": "Typhoon2 8B Instruct", + "created": 1743196511, + "description": "Llama3.1-Typhoon2-8B-Instruct is a Thai-English instruction-tuned model with 8 billion parameters, built on Llama 3.1. It significantly improves over its base model in Thai reasoning, instruction-following, and function-calling tasks, while maintaining competitive English performance. The model is optimized for bilingual interaction and performs well on Thai-English code-switching, MT-Bench, IFEval, and tool-use benchmarks.\n\nDespite its smaller size, it demonstrates strong generalization across math, coding, and multilingual benchmarks, outperforming comparable 8B models across most Thai-specific tasks. Full benchmark results and methodology are available in the [technical report.](https://arxiv.org/abs/2412.13702)", + "context_length": 8192, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.00000018", + "completion": "0.00000018", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "scb10x/llama3.1-typhoon2-70b-instruct", + "name": "Typhoon2 70B Instruct", + "created": 1743196170, + "description": "Llama3.1-Typhoon2-70B-Instruct is a Thai-English instruction-tuned language model with 70 billion parameters, built on Llama 3.1. It demonstrates strong performance across general instruction-following, math, coding, and tool-use tasks, with state-of-the-art results in Thai-specific benchmarks such as IFEval, MT-Bench, and Thai-English code-switching.\n\nThe model excels in bilingual reasoning and function-calling scenarios, offering high accuracy across diverse domains. Comparative evaluations show consistent improvements over prior Thai LLMs and other Llama-based baselines. Full results and methodology are available in the [technical report.](https://arxiv.org/abs/2412.13702)", + "context_length": 8192, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.00000088", + "completion": "0.00000088", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "allenai/molmo-7b-d:free", + "name": "AllenAI: Molmo 7B D (free)", + "created": 1743023247, + "description": "Molmo is a family of open vision-language models developed by the Allen Institute for AI. Molmo models are trained on PixMo, a dataset of 1 million, highly-curated image-text pairs. It has state-of-the-art performance among multimodal models with a similar size while being fully open-source. You can find all models in the Molmo family [here](https://huggingface.co/collections/allenai/molmo-66f379e6fe3b8ef090a8ca19). Learn more about the Molmo family [in the announcement blog post](https://molmo.allenai.org/blog) or the [paper](https://huggingface.co/papers/2409.17146).\n\nMolmo 7B-D is based on [Qwen2-7B](https://huggingface.co/Qwen/Qwen2-7B) and uses [OpenAI CLIP](https://huggingface.co/openai/clip-vit-large-patch14-336) as vision backbone. It performs comfortably between GPT-4V and GPT-4o on both academic benchmarks and human evaluation.\n\nThis checkpoint is a preview of the Molmo release. All artifacts used in creating Molmo (PixMo dataset, training code, evaluations, intermediate checkpoints) will be made available at a later date, furthering our commitment to open-source AI development and reproducibility.", + "context_length": 4096, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 4096, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "bytedance-research/ui-tars-72b:free", + "name": "Bytedance: UI-TARS 72B (free)", + "created": 1743020065, + "description": "UI-TARS 72B is an open-source multimodal AI model designed specifically for automating browser and desktop tasks through visual interaction and control. The model is built with a specialized vision architecture enabling accurate interpretation and manipulation of on-screen visual data. It supports automation tasks within web browsers as well as desktop applications, including Microsoft Office and VS Code.\n\nCore capabilities include intelligent screen detection, predictive action modeling, and efficient handling of repetitive interactions. UI-TARS employs supervised fine-tuning (SFT) tailored explicitly for computer control scenarios. It can be deployed locally or accessed via Hugging Face for demonstration purposes. Intended use cases encompass workflow automation, task scripting, and interactive desktop control applications.", + "context_length": 32768, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "qwen/qwen2.5-vl-3b-instruct:free", + "name": "Qwen: Qwen2.5 VL 3B Instruct (free)", + "created": 1743014573, + "description": "Qwen2.5 VL 3B is a multimodal LLM from the Qwen Team with the following key enhancements:\n\n- SoTA understanding of images of various resolution & ratio: Qwen2.5-VL achieves state-of-the-art performance on visual understanding benchmarks, including MathVista, DocVQA, RealWorldQA, MTVQA, etc.\n\n- Agent that can operate your mobiles, robots, etc.: with the abilities of complex reasoning and decision making, Qwen2.5-VL can be integrated with devices like mobile phones, robots, etc., for automatic operation based on visual environment and text instructions.\n\n- Multilingual Support: to serve global users, besides English and Chinese, Qwen2.5-VL now supports the understanding of texts in different languages inside images, including most European languages, Japanese, Korean, Arabic, Vietnamese, etc.\n\nFor more details, see this [blog post](https://qwenlm.github.io/blog/qwen2-vl/) and [GitHub repo](https://github.com/QwenLM/Qwen2-VL).\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", + "context_length": 64000, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Qwen", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 64000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "google/gemini-2.5-pro-exp-03-25:free", + "name": "Google: Gemini 2.5 Pro Experimental (free)", + "created": 1742922099, + "description": "Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy and nuanced context handling. Gemini 2.5 Pro achieves top-tier performance on multiple benchmarks, including first-place positioning on the LMArena leaderboard, reflecting superior human-preference alignment and complex problem-solving abilities.", + "context_length": 1000000, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Gemini", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 1000000, + "max_completion_tokens": 65535, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "qwen/qwen2.5-vl-32b-instruct:free", + "name": "Qwen: Qwen2.5 VL 32B Instruct (free)", + "created": 1742839838, + "description": "Qwen2.5-VL-32B is a multimodal vision-language model fine-tuned through reinforcement learning for enhanced mathematical reasoning, structured outputs, and visual problem-solving capabilities. It excels at visual analysis tasks, including object recognition, textual interpretation within images, and precise event localization in extended videos. Qwen2.5-VL-32B demonstrates state-of-the-art performance across multimodal benchmarks such as MMMU, MathVista, and VideoMME, while maintaining strong reasoning and clarity in text-based tasks like MMLU, mathematical problem-solving, and code generation.", + "context_length": 8192, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Qwen", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "qwen/qwen2.5-vl-32b-instruct", + "name": "Qwen: Qwen2.5 VL 32B Instruct", + "created": 1742839838, + "description": "Qwen2.5-VL-32B is a multimodal vision-language model fine-tuned through reinforcement learning for enhanced mathematical reasoning, structured outputs, and visual problem-solving capabilities. It excels at visual analysis tasks, including object recognition, textual interpretation within images, and precise event localization in extended videos. Qwen2.5-VL-32B demonstrates state-of-the-art performance across multimodal benchmarks such as MMMU, MathVista, and VideoMME, while maintaining strong reasoning and clarity in text-based tasks like MMLU, mathematical problem-solving, and code generation.", + "context_length": 128000, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Qwen", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000009", + "completion": "0.0000009", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "deepseek/deepseek-chat-v3-0324:free", + "name": "DeepSeek: DeepSeek V3 0324 (free)", + "created": 1742824755, + "description": "DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team.\n\nIt succeeds the [DeepSeek V3](/deepseek/deepseek-chat-v3) model and performs really well on a variety of tasks.", + "context_length": 163840, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "DeepSeek", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 163840, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "deepseek/deepseek-chat-v3-0324", + "name": "DeepSeek: DeepSeek V3 0324", + "created": 1742824755, + "description": "DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team.\n\nIt succeeds the [DeepSeek V3](/deepseek/deepseek-chat-v3) model and performs really well on a variety of tasks.", + "context_length": 64000, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "DeepSeek", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000027", + "completion": "0.0000011", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.00000007" + }, + "top_provider": { + "context_length": 64000, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "featherless/qwerky-72b:free", + "name": "Qwerky 72B (free)", + "created": 1742481597, + "description": "Qwerky-72B is a linear-attention RWKV variant of the Qwen 2.5 72B model, optimized to significantly reduce computational cost at scale. Leveraging linear attention, it achieves substantial inference speedups (>1000x) while retaining competitive accuracy on common benchmarks like ARC, HellaSwag, Lambada, and MMLU. It inherits knowledge and language support from Qwen 2.5, supporting approximately 30 languages, making it suitable for efficient inference in large-context applications.", + "context_length": 32768, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 4096, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "openai/o1-pro", + "name": "OpenAI: o1-pro", + "created": 1742423211, + "description": "The o1 series of models are trained with reinforcement learning to think before they answer and perform complex reasoning. The o1-pro model uses more compute to think harder and provide consistently better answers.", + "context_length": 200000, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00015", + "completion": "0.0006", + "request": "0", + "image": "0.21675", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 100000, + "is_moderated": true + }, + "per_request_limits": null + }, + { + "id": "mistralai/mistral-small-3.1-24b-instruct:free", + "name": "Mistral: Mistral Small 3.1 24B (free)", + "created": 1742238937, + "description": "Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities. It provides state-of-the-art performance in text-based reasoning and vision tasks, including image analysis, programming, mathematical reasoning, and multilingual support across dozens of languages. Equipped with an extensive 128k token context window and optimized for efficient local inference, it supports use cases such as conversational agents, function calling, long-document comprehension, and privacy-sensitive deployments.", + "context_length": 96000, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Mistral", + "instruct_type": null + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 96000, + "max_completion_tokens": 96000, + "is_moderated": false + }, + "per_request_limits": null + }, { "id": "mistralai/mistral-small-3.1-24b-instruct", "name": "Mistral: Mistral Small 3.1 24B", "created": 1742238937, "description": "Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities. It provides state-of-the-art performance in text-based reasoning and vision tasks, including image analysis, programming, mathematical reasoning, and multilingual support across dozens of languages. Equipped with an extensive 128k token context window and optimized for efficient local inference, it supports use cases such as conversational agents, function calling, long-document comprehension, and privacy-sensitive deployments.", - "context_length": 128000, + "context_length": 32768, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { "prompt": "0.0000001", "completion": "0.0000003", - "image": "0.000926", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0009264", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 128000, + "context_length": 32768, "max_completion_tokens": null, "is_moderated": false }, @@ -37,16 +1319,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -65,16 +1351,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -90,53 +1380,29 @@ "name": "SteelSkull: L3.3 Electra R1 70B", "created": 1742067611, "description": "L3.3-Electra-R1-70 is the newest release of the Unnamed series. Built on a DeepSeek R1 Distill base, Electra-R1 integrates various models together to provide an intelligent and coherent model capable of providing deep character insights. Through proper prompting, the model demonstrates advanced reasoning capabilities and unprompted exploration of character inner thoughts and motivations. Read more about the model and [prompting here](https://huggingface.co/Steelskull/L3.3-Electra-R1-70b)", - "context_length": 128000, + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0.0000007", - "completion": "0.0000007", - "image": "0", + "completion": "0.00000095", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 128000, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null - }, - { - "id": "allenai/olmo-2-0325-32b-instruct", - "name": "AllenAI: Olmo 2 32B Instruct", - "created": 1741988556, - "description": "OLMo-2 32B Instruct is a supervised instruction-finetuned variant of the OLMo-2 32B March 2025 base model. It excels in complex reasoning and instruction-following tasks across diverse benchmarks such as GSM8K, MATH, IFEval, and general NLP evaluation. Developed by AI2, OLMo-2 32B is part of an open, research-oriented initiative, trained primarily on English-language datasets to advance the understanding and development of open-source language models.", - "context_length": 4096, - "architecture": { - "modality": "text->text", - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000001", - "completion": "0.0000015", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 4096, - "max_completion_tokens": 4096, + "context_length": 131072, + "max_completion_tokens": 131072, "is_moderated": false }, "per_request_limits": null @@ -146,24 +1412,29 @@ "name": "Google: Gemma 3 1B (free)", "created": 1741963556, "description": "Gemma 3 1B is the smallest of the new Gemma 3 family. It handles context windows up to 32k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling. Note: Gemma 3 1B is not multimodal. For the smallest multimodal Gemma 3 model, please see [Gemma 3 4B](google/gemma-3-4b-it)", - "context_length": 32000, + "context_length": 32768, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": "gemma" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 32000, + "context_length": 32768, "max_completion_tokens": 8192, "is_moderated": false }, @@ -177,16 +1448,21 @@ "context_length": 131072, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": "gemma" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -197,6 +1473,39 @@ }, "per_request_limits": null }, + { + "id": "google/gemma-3-4b-it", + "name": "Google: Gemma 3 4B", + "created": 1741905510, + "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling.", + "context_length": 131072, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Gemini", + "instruct_type": "gemma" + }, + "pricing": { + "prompt": "0.00000002", + "completion": "0.00000004", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, { "id": "ai21/jamba-1.6-large", "name": "AI21: Jamba 1.6 Large", @@ -205,16 +1514,20 @@ "context_length": 256000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.000002", "completion": "0.000008", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -233,16 +1546,20 @@ "context_length": 256000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.0000002", "completion": "0.0000004", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -261,16 +1578,21 @@ "context_length": 131072, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": "gemma" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -281,6 +1603,39 @@ }, "per_request_limits": null }, + { + "id": "google/gemma-3-12b-it", + "name": "Google: Gemma 3 12B", + "created": 1741902625, + "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities, including structured outputs and function calling. Gemma 3 12B is the second largest in the family of Gemma 3 models after [Gemma 3 27B](google/gemma-3-27b-it)", + "context_length": 131072, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Gemini", + "instruct_type": "gemma" + }, + "pricing": { + "prompt": "0.00000005", + "completion": "0.0000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, { "id": "cohere/command-a", "name": "Cohere: Command A", @@ -289,16 +1644,20 @@ "context_length": 256000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.0000025", "completion": "0.00001", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -317,16 +1676,21 @@ "context_length": 128000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.00000015", "completion": "0.0000006", - "image": "0.000217", "request": "0.0275", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.000217", "web_search": "0", "internal_reasoning": "0" }, @@ -345,16 +1709,21 @@ "context_length": 128000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.0000025", "completion": "0.00001", - "image": "0.003613", "request": "0.035", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.003613", "web_search": "0", "internal_reasoning": "0" }, @@ -365,34 +1734,6 @@ }, "per_request_limits": null }, - { - "id": "tokyotech-llm/llama-3.1-swallow-70b-instruct-v0.3", - "name": "Swallow: Llama 3.1 Swallow 70B Instruct V0.3", - "created": 1741813936, - "description": "Llama 3.1 Swallow 70B is a large language model that was built by continual pre-training on the Meta Llama 3.1 70B. Llama 3.1 Swallow enhanced the Japanese language capabilities of the original Llama 3.1 while retaining the English language capabilities. Swallow used approximately 200 billion tokens that were sampled from a large Japanese web corpus (Swallow Corpus Version 2), Japanese and English Wikipedia articles, and mathematical and coding contents, etc (see the Training Datasets section of the base model) for continual pre-training. The instruction-tuned models (Instruct) were built by supervised fine-tuning (SFT) on the synthetic data specially built for Japanese. ", - "context_length": 16384, - "architecture": { - "modality": "text->text", - "tokenizer": "Llama3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000006", - "completion": "0.0000012", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 16384, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "rekaai/reka-flash-3:free", "name": "Reka: Flash 3 (free)", @@ -401,16 +1742,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -429,16 +1774,21 @@ "context_length": 96000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": "gemma" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -457,16 +1807,21 @@ "context_length": 131072, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": "gemma" }, "pricing": { "prompt": "0.0000001", "completion": "0.0000002", - "image": "0.0000256", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0000256", "web_search": "0", "internal_reasoning": "0" }, @@ -482,25 +1837,29 @@ "name": "TheDrummer: Anubis Pro 105B V1", "created": 1741642290, "description": "Anubis Pro 105B v1 is an expanded and refined variant of Meta’s Llama 3.3 70B, featuring 50% additional layers and further fine-tuning to leverage its increased capacity. Designed for advanced narrative, roleplay, and instructional tasks, it demonstrates enhanced emotional intelligence, creativity, nuanced character portrayal, and superior prompt adherence compared to smaller models. Its larger parameter count allows for deeper contextual understanding and extended reasoning capabilities, optimized for engaging, intelligent, and coherent interactions.", - "context_length": 64000, + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.0000008", - "completion": "0.0000008", - "image": "0", + "completion": "0.000001", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 64000, - "max_completion_tokens": 64000, + "context_length": 131072, + "max_completion_tokens": 131072, "is_moderated": false }, "per_request_limits": null @@ -510,25 +1869,29 @@ "name": "LatitudeGames: Wayfarer Large 70B Llama 3.3", "created": 1741636885, "description": "Wayfarer Large 70B is a roleplay and text-adventure model fine-tuned from Meta’s Llama-3.3-70B-Instruct. Specifically optimized for narrative-driven, challenging scenarios, it introduces realistic stakes, conflicts, and consequences often avoided by standard RLHF-aligned models. Trained using a curated blend of adventure, roleplay, and instructive fiction datasets, Wayfarer emphasizes tense storytelling, authentic player failure scenarios, and robust narrative immersion, making it uniquely suited for interactive fiction and gaming experiences.", - "context_length": 128000, + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": null }, "pricing": { - "prompt": "0.0000007", - "completion": "0.0000007", - "image": "0", + "prompt": "0.0000008", + "completion": "0.0000009", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 128000, - "max_completion_tokens": 128000, + "context_length": 131072, + "max_completion_tokens": 131072, "is_moderated": false }, "per_request_limits": null @@ -541,16 +1904,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.0000005", - "completion": "0.0000005", - "image": "0", + "completion": "0.0000008", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -569,16 +1936,21 @@ "context_length": 131072, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.00000005", "completion": "0.0000001", - "image": "0.00017685", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.00017685", "web_search": "0", "internal_reasoning": "0" }, @@ -597,17 +1969,21 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0.000002", "completion": "0.000008", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", + "image": "0", + "web_search": "0.005", "internal_reasoning": "0" }, "top_provider": { @@ -625,17 +2001,21 @@ "context_length": 200000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.000003", "completion": "0.000015", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", + "image": "0", + "web_search": "0.005", "internal_reasoning": "0" }, "top_provider": { @@ -650,24 +2030,28 @@ "name": "Perplexity: Sonar Deep Research", "created": 1741311246, "description": "Sonar Deep Research is a research-focused model designed for multi-step retrieval, synthesis, and reasoning across complex topics. It autonomously searches, reads, and evaluates sources, refining its approach as it gathers information. This enables comprehensive report generation across domains like finance, technology, health, and current events.\n\nNotes on Pricing ([Source](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-deep-research)) \n- Input tokens comprise of Prompt tokens (user prompt) + Citation tokens (these are processed tokens from running searches)\n- Deep Research runs multiple searches to conduct exhaustive research. Searches are priced at $5/1000 searches. A request that does 30 searches will cost $0.15 in this step.\n- Reasoning is a distinct step in Deep Research since it does extensive automated reasoning through all the material it gathers during its research phase. Reasoning tokens here are a bit different than the CoTs in the answer - these are tokens that we use to reason through the research material prior to generating the outputs via the CoTs. Reasoning tokens are priced at $3/1M tokens", - "context_length": 200000, + "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0.000002", "completion": "0.000008", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" + "image": "0", + "web_search": "0.005", + "internal_reasoning": "0.000003" }, "top_provider": { - "context_length": 200000, + "context_length": 128000, "max_completion_tokens": null, "is_moderated": false }, @@ -681,16 +2065,20 @@ "context_length": 163840, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -706,25 +2094,29 @@ "name": "Qwen: QwQ 32B (free)", "created": 1741208814, "description": "QwQ is the reasoning model of the Qwen series. Compared with conventional instruction-tuned models, QwQ, which is capable of thinking and reasoning, can achieve significantly enhanced performance in downstream tasks, especially hard problems. QwQ-32B is the medium-sized reasoning model, which is capable of achieving competitive performance against state-of-the-art reasoning models, e.g., DeepSeek-R1, o1-mini.", - "context_length": 131072, + "context_length": 40000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "qwq" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 131072, - "max_completion_tokens": 131072, + "context_length": 40000, + "max_completion_tokens": 40000, "is_moderated": false }, "per_request_limits": null @@ -737,44 +2129,20 @@ "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "qwq" }, "pricing": { - "prompt": "0.00000012", - "completion": "0.00000018", - "image": "0", + "prompt": "0.00000015", + "completion": "0.0000002", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null - }, - { - "id": "qwen/qwen2.5-32b-instruct", - "name": "Qwen: Qwen2.5 32B Instruct", - "created": 1741042744, - "description": "Qwen2.5 32B Instruct is the instruction-tuned variant of the latest Qwen large language model series. It provides enhanced instruction-following capabilities, improved proficiency in coding and mathematical reasoning, and robust handling of structured data and outputs such as JSON. It supports long-context processing up to 128K tokens and multilingual tasks across 29+ languages. The model has 32.5 billion parameters, 64 layers, and utilizes an advanced transformer architecture with RoPE, SwiGLU, RMSNorm, and Attention QKV bias.\n\nFor details, please refer to the [Qwen2.5 Blog](https://qwenlm.github.io/blog/qwen2.5/).", - "context_length": 131072, - "architecture": { - "modality": "text->text", - "tokenizer": "Qwen", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000079", - "completion": "0.00000079", "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -793,16 +2161,20 @@ "context_length": 8192, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -821,16 +2193,20 @@ "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -849,18 +2225,24 @@ "context_length": 128000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.000075", "completion": "0.00015", - "image": "0.108375", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.108375", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.0000375" }, "top_provider": { "context_length": 128000, @@ -877,16 +2259,21 @@ "context_length": 1048576, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": null }, "pricing": { "prompt": "0.000000075", "completion": "0.0000003", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -897,34 +2284,6 @@ }, "per_request_limits": null }, - { - "id": "anthropic/claude-3.7-sonnet:beta", - "name": "Anthropic: Claude 3.7 Sonnet (self-moderated)", - "created": 1740422110, - "description": "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. \n\nClaude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks.\n\nRead more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)", - "context_length": 200000, - "architecture": { - "modality": "text+image->text", - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000015", - "image": "0.0048", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 128000, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "anthropic/claude-3.7-sonnet", "name": "Anthropic: Claude 3.7 Sonnet", @@ -933,23 +2292,30 @@ "context_length": 200000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000003", "completion": "0.000015", - "image": "0.0048", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0048", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" }, "top_provider": { "context_length": 200000, - "max_completion_tokens": 128000, - "is_moderated": true + "max_completion_tokens": 64000, + "is_moderated": false }, "per_request_limits": null }, @@ -961,23 +2327,65 @@ "context_length": 200000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000003", "completion": "0.000015", - "image": "0.0048", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0048", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" + }, + "top_provider": { + "context_length": 200000, + "max_completion_tokens": 64000, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "anthropic/claude-3.7-sonnet:beta", + "name": "Anthropic: Claude 3.7 Sonnet (self-moderated)", + "created": 1740422110, + "description": "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. \n\nClaude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks.\n\nRead more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)", + "context_length": 200000, + "architecture": { + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Claude", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000015", + "request": "0", + "image": "0.0048", + "web_search": "0", + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" }, "top_provider": { "context_length": 200000, "max_completion_tokens": 128000, - "is_moderated": true + "is_moderated": false }, "per_request_limits": null }, @@ -989,16 +2397,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "DeepSeek", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0.000002", "completion": "0.000008", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1014,24 +2426,28 @@ "name": "Mistral: Saba", "created": 1739803239, "description": "Mistral Saba is a 24B-parameter language model specifically designed for the Middle East and South Asia, delivering accurate and contextually relevant responses while maintaining efficient performance. Trained on curated regional datasets, it supports multiple Indian-origin languages—including Tamil and Malayalam—alongside Arabic. This makes it a versatile option for a range of regional and multilingual applications. Read more at the blog post [here](https://mistral.ai/en/news/mistral-saba)", - "context_length": 32000, + "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { "prompt": "0.0000002", "completion": "0.0000006", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 32000, + "context_length": 32768, "max_completion_tokens": null, "is_moderated": false }, @@ -1045,16 +2461,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1073,16 +2493,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1101,16 +2525,20 @@ "context_length": 8192, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "none" }, "pricing": { "prompt": "0.0000002", "completion": "0.0000002", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1129,18 +2557,23 @@ "context_length": 200000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.0000011", "completion": "0.0000044", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000055" }, "top_provider": { "context_length": 200000, @@ -1149,34 +2582,6 @@ }, "per_request_limits": null }, - { - "id": "allenai/llama-3.1-tulu-3-405b", - "name": "Llama 3.1 Tulu 3 405B", - "created": 1739053421, - "description": "Tülu 3 405B is the largest model in the Tülu 3 family, applying fully open post-training recipes at a 405B parameter scale. Built on the Llama 3.1 405B base, it leverages Reinforcement Learning with Verifiable Rewards (RLVR) to enhance instruction following, MATH, GSM8K, and IFEval performance. As part of Tülu 3’s fully open-source approach, it offers state-of-the-art capabilities while surpassing prior open-weight models like Llama 3.1 405B Instruct and Nous Hermes 3 405B on multiple benchmarks. To read more, [click here.](https://allenai.org/blog/tulu-3-405B)", - "context_length": 16384, - "architecture": { - "modality": "text->text", - "tokenizer": "Other", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000005", - "completion": "0.00001", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 16384, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "deepseek/deepseek-r1-distill-llama-8b", "name": "DeepSeek: R1 Distill Llama 8B", @@ -1185,16 +2590,20 @@ "context_length": 32000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0.00000004", "completion": "0.00000004", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1207,22 +2616,27 @@ }, { "id": "google/gemini-2.0-flash-001", - "name": "Google: Gemini Flash 2.0", + "name": "Google: Gemini 2.0 Flash", "created": 1738769413, "description": "Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5). It introduces notable enhancements in multimodal understanding, coding capabilities, complex instruction following, and function calling. These advancements come together to deliver more seamless and robust agentic experiences.", "context_length": 1000000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": null }, "pricing": { "prompt": "0.0000001", "completion": "0.0000004", + "request": "0", "image": "0.0000258", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1233,62 +2647,6 @@ }, "per_request_limits": null }, - { - "id": "google/gemini-2.0-flash-lite-preview-02-05:free", - "name": "Google: Gemini Flash Lite 2.0 Preview (free)", - "created": 1738768262, - "description": "Gemini Flash Lite 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5). Because it's currently in preview, it will be **heavily rate-limited** by Google. This model will move from free to paid pending a general rollout on February 24th, at $0.075 / $0.30 per million input / ouput tokens respectively.", - "context_length": 1000000, - "architecture": { - "modality": "text+image->text", - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 1000000, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null - }, - { - "id": "google/gemini-2.0-pro-exp-02-05:free", - "name": "Google: Gemini Pro 2.0 Experimental (free)", - "created": 1738768044, - "description": "Gemini 2.0 Pro Experimental is a bleeding-edge version of the Gemini 2.0 Pro model. Because it's currently experimental, it will be **heavily rate-limited** by Google.\n\nUsage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).\n\n#multimodal", - "context_length": 2000000, - "architecture": { - "modality": "text+image->text", - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 2000000, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "qwen/qwen-vl-plus", "name": "Qwen: Qwen VL Plus", @@ -1297,16 +2655,21 @@ "context_length": 7500, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": null }, "pricing": { "prompt": "0.00000021", "completion": "0.00000063", - "image": "0.0002688", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0002688", "web_search": "0", "internal_reasoning": "0" }, @@ -1322,24 +2685,28 @@ "name": "AionLabs: Aion-1.0", "created": 1738697557, "description": "Aion-1.0 is a multi-model system designed for high performance across various tasks, including reasoning and coding. It is built on DeepSeek-R1, augmented with additional models and techniques such as Tree of Thoughts (ToT) and Mixture of Experts (MoE). It is Aion Lab's most powerful reasoning model.", - "context_length": 32768, + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.000004", "completion": "0.000008", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 32768, + "context_length": 131072, "max_completion_tokens": 32768, "is_moderated": false }, @@ -1350,24 +2717,28 @@ "name": "AionLabs: Aion-1.0-Mini", "created": 1738697107, "description": "Aion-1.0-Mini 32B parameter model is a distilled version of the DeepSeek-R1 model, designed for strong performance in reasoning domains such as mathematics, coding, and logic. It is a modified variant of a FuseAI model that outperforms R1-Distill-Qwen-32B and R1-Distill-Llama-70B, with benchmark results available on its [Hugging Face page](https://huggingface.co/FuseAI/FuseO1-DeepSeekR1-QwQ-SkyT1-32B-Preview), independently replicated for verification.", - "context_length": 32768, + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.0000007", "completion": "0.0000014", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 32768, + "context_length": 131072, "max_completion_tokens": 32768, "is_moderated": false }, @@ -1381,16 +2752,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.0000002", "completion": "0.0000002", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1409,16 +2784,21 @@ "context_length": 7500, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": null }, "pricing": { "prompt": "0.0000008", "completion": "0.0000032", - "image": "0.001024", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.001024", "web_search": "0", "internal_reasoning": "0" }, @@ -1437,16 +2817,20 @@ "context_length": 1000000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": null }, "pricing": { "prompt": "0.00000005", "completion": "0.0000002", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1465,16 +2849,21 @@ "context_length": 131072, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": null }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1490,25 +2879,30 @@ "name": "Qwen: Qwen2.5 VL 72B Instruct", "created": 1738410311, "description": "Qwen2.5-VL is proficient in recognizing common objects such as flowers, birds, fish, and insects. It is also highly capable of analyzing texts, charts, icons, graphics, and layouts within images.", - "context_length": 32000, + "context_length": 128000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": null }, "pricing": { "prompt": "0.0000007", "completion": "0.0000007", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 32000, - "max_completion_tokens": null, + "context_length": 128000, + "max_completion_tokens": 128000, "is_moderated": false }, "per_request_limits": null @@ -1521,16 +2915,20 @@ "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": null }, "pricing": { "prompt": "0.0000004", "completion": "0.0000012", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1549,16 +2947,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": null }, "pricing": { "prompt": "0.0000016", "completion": "0.0000064", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1577,18 +2979,23 @@ "context_length": 200000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.0000011", "completion": "0.0000044", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000055" }, "top_provider": { "context_length": 200000, @@ -1605,16 +3012,20 @@ "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0.00000018", "completion": "0.00000018", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1633,16 +3044,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1661,16 +3076,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { "prompt": "0.00000007", "completion": "0.00000014", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1689,16 +3108,20 @@ "context_length": 16000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1717,16 +3140,20 @@ "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0.00000012", "completion": "0.00000018", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1745,16 +3172,20 @@ "context_length": 64000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1773,16 +3204,20 @@ "context_length": 64000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0.00000015", "completion": "0.00000015", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1801,16 +3236,20 @@ "context_length": 127000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", - "instruct_type": null + "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0.000001", "completion": "0.000005", - "image": "0", "request": "0.005", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1829,16 +3268,20 @@ "context_length": 127072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.000001", "completion": "0.000001", - "image": "0", "request": "0.005", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1857,16 +3300,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": "chatml" }, "pricing": { "prompt": "0.00000001", "completion": "0.00000001", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1885,16 +3332,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": "chatml" }, "pricing": { "prompt": "0.00000002", "completion": "0.00000002", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1913,16 +3364,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1938,25 +3393,29 @@ "name": "DeepSeek: R1 Distill Llama 70B", "created": 1737663169, "description": "DeepSeek R1 Distill Llama 70B is a distilled large language model based on [Llama-3.3-70B-Instruct](/meta-llama/llama-3.3-70b-instruct), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). The model combines advanced distillation techniques to achieve high performance across multiple benchmarks, including:\n\n- AIME 2024 pass@1: 70.0\n- MATH-500 pass@1: 94.5\n- CodeForces Rating: 1633\n\nThe model leverages fine-tuning from DeepSeek R1's outputs, enabling competitive performance comparable to larger frontier models.", - "context_length": 131072, + "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "deepseek-r1" }, "pricing": { - "prompt": "0.00000023", - "completion": "0.00000069", - "image": "0", + "prompt": "0.0000001", + "completion": "0.0000004", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 131072, - "max_completion_tokens": 8192, + "context_length": 128000, + "max_completion_tokens": 16384, "is_moderated": false }, "per_request_limits": null @@ -1969,16 +3428,21 @@ "context_length": 1048576, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": null }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -1997,16 +3461,20 @@ "context_length": 163840, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "DeepSeek", "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2022,25 +3490,29 @@ "name": "DeepSeek: R1", "created": 1737381095, "description": "DeepSeek R1 is here: Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.\n\nFully open-source model & [technical report](https://api-docs.deepseek.com/news/news250120).\n\nMIT licensed: Distill & commercialize freely!", - "context_length": 64000, + "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "DeepSeek", "instruct_type": "deepseek-r1" }, "pricing": { - "prompt": "0.0000007", - "completion": "0.0000025", - "image": "0", + "prompt": "0.0000005", + "completion": "0.000003", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 64000, - "max_completion_tokens": 16000, + "context_length": 128000, + "max_completion_tokens": 32768, "is_moderated": false }, "per_request_limits": null @@ -2053,16 +3525,20 @@ "context_length": 4096, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama2", "instruct_type": "vicuna" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2081,16 +3557,21 @@ "context_length": 1000192, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.0000002", "completion": "0.0000011", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2106,24 +3587,28 @@ "name": "Mistral: Codestral 2501", "created": 1736895522, "description": "[Mistral](/mistralai)'s cutting-edge language model for coding. Codestral specializes in low-latency, high-frequency tasks such as fill-in-the-middle (FIM), code correction and test generation. \n\nLearn more on their blog post: https://mistral.ai/news/codestral-2501/", - "context_length": 256000, + "context_length": 262144, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { "prompt": "0.0000003", "completion": "0.0000009", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 256000, + "context_length": 262144, "max_completion_tokens": null, "is_moderated": false }, @@ -2137,16 +3622,20 @@ "context_length": 16384, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.00000007", "completion": "0.00000014", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2165,16 +3654,20 @@ "context_length": 16000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": null }, "pricing": { "prompt": "0.000003", "completion": "0.000003", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2190,24 +3683,28 @@ "name": "DeepSeek: DeepSeek V3 (free)", "created": 1735241320, "description": "DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.\n\nFor model details, please visit [the DeepSeek-V3 repo](https://github.com/deepseek-ai/DeepSeek-V3) for more information, or see the [launch announcement](https://api-docs.deepseek.com/news/news1226).", - "context_length": 131072, + "context_length": 163840, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "DeepSeek", "instruct_type": null }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 131072, + "context_length": 163840, "max_completion_tokens": null, "is_moderated": false }, @@ -2218,25 +3715,29 @@ "name": "DeepSeek: DeepSeek V3", "created": 1735241320, "description": "DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.\n\nFor model details, please visit [the DeepSeek-V3 repo](https://github.com/deepseek-ai/DeepSeek-V3) for more information, or see the [launch announcement](https://api-docs.deepseek.com/news/news1226).", - "context_length": 64000, + "context_length": 163840, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "DeepSeek", "instruct_type": null }, "pricing": { - "prompt": "0.0000004", - "completion": "0.0000013", - "image": "0", + "prompt": "0.00000038", + "completion": "0.00000089", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 64000, - "max_completion_tokens": 16000, + "context_length": 163840, + "max_completion_tokens": 163840, "is_moderated": false }, "per_request_limits": null @@ -2249,16 +3750,21 @@ "context_length": 40000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": null }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2277,16 +3783,20 @@ "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { "prompt": "0.0000007", "completion": "0.0000008", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2305,18 +3815,24 @@ "context_length": 200000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.000015", "completion": "0.00006", - "image": "0.021675", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.021675", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.0000075" }, "top_provider": { "context_length": 200000, @@ -2333,16 +3849,20 @@ "context_length": 16384, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { "prompt": "0.000004", "completion": "0.000006", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2361,16 +3881,21 @@ "context_length": 32768, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Grok", "instruct_type": null }, "pricing": { "prompt": "0.000002", "completion": "0.00001", - "image": "0.0036", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0036", "web_search": "0", "internal_reasoning": "0" }, @@ -2389,16 +3914,20 @@ "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Grok", "instruct_type": null }, "pricing": { "prompt": "0.000002", "completion": "0.00001", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2417,16 +3946,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Cohere", "instruct_type": null }, "pricing": { "prompt": "0.0000000375", "completion": "0.00000015", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2439,22 +3972,27 @@ }, { "id": "google/gemini-2.0-flash-exp:free", - "name": "Google: Gemini Flash 2.0 Experimental (free)", + "name": "Google: Gemini 2.0 Flash Experimental (free)", "created": 1733937523, "description": "Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5). It introduces notable enhancements in multimodal understanding, coding capabilities, complex instruction following, and function calling. These advancements come together to deliver more seamless and robust agentic experiences.", "context_length": 1048576, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": null }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2465,58 +4003,34 @@ }, "per_request_limits": null }, - { - "id": "google/gemini-exp-1206:free", - "name": "Google: Gemini Experimental 1206 (free)", - "created": 1733507713, - "description": "Experimental release (December 6, 2024) of Gemini.", - "context_length": 2097152, - "architecture": { - "modality": "text+image->text", - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0", - "completion": "0", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 2097152, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "meta-llama/llama-3.3-70b-instruct:free", "name": "Meta: Llama 3.3 70B Instruct (free)", "created": 1733506137, "description": "The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out). The Llama 3.3 instruction tuned text only model is optimized for multilingual dialogue use cases and outperforms many of the available open source and closed chat models on common industry benchmarks.\n\nSupported languages: English, German, French, Italian, Portuguese, Hindi, Spanish, and Thai.\n\n[Model Card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_3/MODEL_CARD.md)", - "context_length": 131072, + "context_length": 8000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, + "context_length": 8000, + "max_completion_tokens": 8000, "is_moderated": false }, "per_request_limits": null @@ -2526,25 +4040,29 @@ "name": "Meta: Llama 3.3 70B Instruct", "created": 1733506137, "description": "The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out). The Llama 3.3 instruction tuned text only model is optimized for multilingual dialogue use cases and outperforms many of the available open source and closed chat models on common industry benchmarks.\n\nSupported languages: English, German, French, Italian, Portuguese, Hindi, Spanish, and Thai.\n\n[Model Card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_3/MODEL_CARD.md)", - "context_length": 131072, + "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { - "prompt": "0.00000012", - "completion": "0.0000003", - "image": "0", + "prompt": "0.0000001", + "completion": "0.00000025", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, + "context_length": 128000, + "max_completion_tokens": 16384, "is_moderated": false }, "per_request_limits": null @@ -2557,16 +4075,21 @@ "context_length": 300000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Nova", "instruct_type": null }, "pricing": { "prompt": "0.00000006", "completion": "0.00000024", - "image": "0.00009", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.00009", "web_search": "0", "internal_reasoning": "0" }, @@ -2585,16 +4108,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Nova", "instruct_type": null }, "pricing": { "prompt": "0.000000035", "completion": "0.00000014", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2613,16 +4140,21 @@ "context_length": 300000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Nova", "instruct_type": null }, "pricing": { "prompt": "0.0000008", "completion": "0.0000032", - "image": "0.0012", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0012", "web_search": "0", "internal_reasoning": "0" }, @@ -2641,16 +4173,20 @@ "context_length": 16384, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", - "instruct_type": null + "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2669,16 +4205,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", - "instruct_type": null + "instruct_type": "deepseek-r1" }, "pricing": { "prompt": "0.0000002", "completion": "0.0000002", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2697,16 +4237,21 @@ "context_length": 40960, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": null }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2722,25 +4267,29 @@ "name": "EVA Qwen2.5 72B", "created": 1732210606, "description": "EVA Qwen2.5 72B is a roleplay and storywriting specialist model. It's a full-parameter finetune of Qwen2.5-72B on mixture of synthetic and natural data.\n\nIt uses Celeste 70B 0.1 data mixture, greatly expanding it to improve versatility, creativity and \"flavor\" of the resulting model.", - "context_length": 32000, + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "chatml" }, "pricing": { - "prompt": "0.0000007", - "completion": "0.0000007", - "image": "0", + "prompt": "0.0000009", + "completion": "0.0000012", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 32000, - "max_completion_tokens": null, + "context_length": 131072, + "max_completion_tokens": 131072, "is_moderated": false }, "per_request_limits": null @@ -2753,18 +4302,24 @@ "context_length": 128000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.0000025", "completion": "0.00001", - "image": "0.003613", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.003613", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000125" }, "top_provider": { "context_length": 128000, @@ -2778,24 +4333,28 @@ "name": "Mistral Large 2411", "created": 1731978685, "description": "Mistral Large 2 2411 is an update of [Mistral Large 2](/mistralai/mistral-large) released together with [Pixtral Large 2411](/mistralai/pixtral-large-2411)\n\nIt provides a significant upgrade on the previous [Mistral Large 24.07](/mistralai/mistral-large-2407), with notable improvements in long context understanding, a new system prompt, and more accurate function calling.", - "context_length": 128000, + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { "prompt": "0.000002", "completion": "0.000006", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 128000, + "context_length": 131072, "max_completion_tokens": null, "is_moderated": false }, @@ -2806,24 +4365,28 @@ "name": "Mistral Large 2407", "created": 1731978415, "description": "This is Mistral AI's flagship model, Mistral Large 2 (version mistral-large-2407). It's a proprietary weights-available model and excels at reasoning, code, JSON, chat, and more. Read the launch announcement [here](https://mistral.ai/news/mistral-large-2407/).\n\nIt supports dozens of languages including French, German, Spanish, Italian, Portuguese, Arabic, Hindi, Russian, Chinese, Japanese, and Korean, along with 80+ coding languages including Python, Java, C, C++, JavaScript, and Bash. Its long context window allows precise information recall from large documents.\n", - "context_length": 128000, + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { "prompt": "0.000002", "completion": "0.000006", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 128000, + "context_length": 131072, "max_completion_tokens": null, "is_moderated": false }, @@ -2834,24 +4397,29 @@ "name": "Mistral: Pixtral Large 2411", "created": 1731977388, "description": "Pixtral Large is a 124B parameter, open-weight, multimodal model built on top of [Mistral Large 2](/mistralai/mistral-large-2411). The model is able to understand documents, charts and natural images.\n\nThe model is available under the Mistral Research License (MRL) for research and educational use, and the Mistral Commercial License for experimentation, testing, and production for commercial purposes.\n\n", - "context_length": 128000, + "context_length": 131072, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { "prompt": "0.000002", "completion": "0.000006", - "image": "0.002888", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.002888", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 128000, + "context_length": 131072, "max_completion_tokens": null, "is_moderated": false }, @@ -2865,16 +4433,21 @@ "context_length": 8192, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Grok", "instruct_type": null }, "pricing": { "prompt": "0.000005", "completion": "0.000015", - "image": "0.009", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.009", "web_search": "0", "internal_reasoning": "0" }, @@ -2893,16 +4466,20 @@ "context_length": 16384, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "mistral" }, "pricing": { "prompt": "0.0000008", "completion": "0.0000012", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2921,16 +4498,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "chatml" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -2946,25 +4527,29 @@ "name": "Qwen2.5 Coder 32B Instruct", "created": 1731368400, "description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen). Qwen2.5-Coder brings the following improvements upon CodeQwen1.5:\n\n- Significantly improvements in **code generation**, **code reasoning** and **code fixing**. \n- A more comprehensive foundation for real-world applications such as **Code Agents**. Not only enhancing coding capabilities but also maintaining its strengths in mathematics and general competencies.\n\nTo read more about its evaluation results, check out [Qwen 2.5 Coder's blog](https://qwenlm.github.io/blog/qwen2.5-coder-family/).", - "context_length": 33000, + "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "chatml" }, "pricing": { "prompt": "0.00000007", - "completion": "0.00000016", - "image": "0", + "completion": "0.00000015", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 33000, - "max_completion_tokens": 3000, + "context_length": 32768, + "max_completion_tokens": 8192, "is_moderated": false }, "per_request_limits": null @@ -2977,16 +4562,20 @@ "context_length": 16000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "vicuna" }, "pricing": { "prompt": "0.0000045", "completion": "0.0000045", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3005,16 +4594,20 @@ "context_length": 16384, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "chatml" }, "pricing": { "prompt": "0.0000026", "completion": "0.0000034", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3033,16 +4626,20 @@ "context_length": 32000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "mistral" }, "pricing": { "prompt": "0.0000005", "completion": "0.0000005", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3053,62 +4650,6 @@ }, "per_request_limits": null }, - { - "id": "anthropic/claude-3.5-haiku-20241022:beta", - "name": "Anthropic: Claude 3.5 Haiku (2024-10-22) (self-moderated)", - "created": 1730678400, - "description": "Claude 3.5 Haiku features enhancements across all skill sets including coding, tool use, and reasoning. As the fastest model in the Anthropic lineup, it offers rapid response times suitable for applications that require high interactivity and low latency, such as user-facing chatbots and on-the-fly code completions. It also excels in specialized tasks like data extraction and real-time content moderation, making it a versatile tool for a broad range of industries.\n\nIt does not support image inputs.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/3-5-models-and-computer-use)", - "context_length": 200000, - "architecture": { - "modality": "text->text", - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000008", - "completion": "0.000004", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null - }, - { - "id": "anthropic/claude-3.5-haiku-20241022", - "name": "Anthropic: Claude 3.5 Haiku (2024-10-22)", - "created": 1730678400, - "description": "Claude 3.5 Haiku features enhancements across all skill sets including coding, tool use, and reasoning. As the fastest model in the Anthropic lineup, it offers rapid response times suitable for applications that require high interactivity and low latency, such as user-facing chatbots and on-the-fly code completions. It also excels in specialized tasks like data extraction and real-time content moderation, making it a versatile tool for a broad range of industries.\n\nIt does not support image inputs.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/3-5-models-and-computer-use)", - "context_length": 200000, - "architecture": { - "modality": "text->text", - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000008", - "completion": "0.000004", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 8192, - "is_moderated": true - }, - "per_request_limits": null - }, { "id": "anthropic/claude-3.5-haiku:beta", "name": "Anthropic: Claude 3.5 Haiku (self-moderated)", @@ -3116,19 +4657,26 @@ "description": "Claude 3.5 Haiku features offers enhanced capabilities in speed, coding accuracy, and tool use. Engineered to excel in real-time applications, it delivers quick response times that are essential for dynamic tasks such as chat interactions and immediate coding suggestions.\n\nThis makes it highly suitable for environments that demand both speed and precision, such as software development, customer service bots, and data management systems.\n\nThis model is currently pointing to [Claude 3.5 Haiku (2024-10-22)](/anthropic/claude-3-5-haiku-20241022).", "context_length": 200000, "architecture": { - "modality": "text->text", + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.0000008", "completion": "0.000004", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000008", + "input_cache_write": "0.000001" }, "top_provider": { "context_length": 200000, @@ -3144,19 +4692,26 @@ "description": "Claude 3.5 Haiku features offers enhanced capabilities in speed, coding accuracy, and tool use. Engineered to excel in real-time applications, it delivers quick response times that are essential for dynamic tasks such as chat interactions and immediate coding suggestions.\n\nThis makes it highly suitable for environments that demand both speed and precision, such as software development, customer service bots, and data management systems.\n\nThis model is currently pointing to [Claude 3.5 Haiku (2024-10-22)](/anthropic/claude-3-5-haiku-20241022).", "context_length": 200000, "architecture": { - "modality": "text->text", + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.0000008", "completion": "0.000004", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000008", + "input_cache_write": "0.000001" }, "top_provider": { "context_length": 200000, @@ -3166,58 +4721,72 @@ "per_request_limits": null }, { - "id": "neversleep/llama-3.1-lumimaid-70b", - "name": "NeverSleep: Lumimaid v0.2 70B", - "created": 1729555200, - "description": "Lumimaid v0.2 70B is a finetune of [Llama 3.1 70B](/meta-llama/llama-3.1-70b-instruct) with a \"HUGE step up dataset wise\" compared to Lumimaid v0.1. Sloppy chats output were purged.\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", - "context_length": 16384, + "id": "anthropic/claude-3.5-haiku-20241022:beta", + "name": "Anthropic: Claude 3.5 Haiku (2024-10-22) (self-moderated)", + "created": 1730678400, + "description": "Claude 3.5 Haiku features enhancements across all skill sets including coding, tool use, and reasoning. As the fastest model in the Anthropic lineup, it offers rapid response times suitable for applications that require high interactivity and low latency, such as user-facing chatbots and on-the-fly code completions. It also excels in specialized tasks like data extraction and real-time content moderation, making it a versatile tool for a broad range of industries.\n\nIt does not support image inputs.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/3-5-models-and-computer-use)", + "context_length": 200000, "architecture": { - "modality": "text->text", - "tokenizer": "Llama3", - "instruct_type": "llama3" + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Claude", + "instruct_type": null }, "pricing": { - "prompt": "0.000003375", - "completion": "0.0000045", - "image": "0", + "prompt": "0.0000008", + "completion": "0.000004", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000008", + "input_cache_write": "0.000001" }, "top_provider": { - "context_length": 16384, - "max_completion_tokens": 2048, + "context_length": 200000, + "max_completion_tokens": 8192, "is_moderated": false }, "per_request_limits": null }, { - "id": "anthracite-org/magnum-v4-72b", - "name": "Magnum v4 72B", - "created": 1729555200, - "description": "This is a series of models designed to replicate the prose quality of the Claude 3 models, specifically Sonnet(https://openrouter.ai/anthropic/claude-3.5-sonnet) and Opus(https://openrouter.ai/anthropic/claude-3-opus).\n\nThe model is fine-tuned on top of [Qwen2.5 72B](https://openrouter.ai/qwen/qwen-2.5-72b-instruct).", - "context_length": 16384, + "id": "anthropic/claude-3.5-haiku-20241022", + "name": "Anthropic: Claude 3.5 Haiku (2024-10-22)", + "created": 1730678400, + "description": "Claude 3.5 Haiku features enhancements across all skill sets including coding, tool use, and reasoning. As the fastest model in the Anthropic lineup, it offers rapid response times suitable for applications that require high interactivity and low latency, such as user-facing chatbots and on-the-fly code completions. It also excels in specialized tasks like data extraction and real-time content moderation, making it a versatile tool for a broad range of industries.\n\nIt does not support image inputs.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/3-5-models-and-computer-use)", + "context_length": 200000, "architecture": { - "modality": "text->text", - "tokenizer": "Qwen", - "instruct_type": "chatml" + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Claude", + "instruct_type": null }, "pricing": { - "prompt": "0.000001875", - "completion": "0.00000225", - "image": "0", + "prompt": "0.0000008", + "completion": "0.000004", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000008", + "input_cache_write": "0.000001" }, "top_provider": { - "context_length": 16384, - "max_completion_tokens": 1024, - "is_moderated": false + "context_length": 200000, + "max_completion_tokens": 8192, + "is_moderated": true }, "per_request_limits": null }, @@ -3229,18 +4798,25 @@ "context_length": 200000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000003", "completion": "0.000015", - "image": "0.0048", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0048", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" }, "top_provider": { "context_length": 200000, @@ -3257,18 +4833,25 @@ "context_length": 200000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000003", "completion": "0.000015", - "image": "0.0048", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0048", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" }, "top_provider": { "context_length": 200000, @@ -3277,6 +4860,70 @@ }, "per_request_limits": null }, + { + "id": "anthracite-org/magnum-v4-72b", + "name": "Magnum v4 72B", + "created": 1729555200, + "description": "This is a series of models designed to replicate the prose quality of the Claude 3 models, specifically Sonnet(https://openrouter.ai/anthropic/claude-3.5-sonnet) and Opus(https://openrouter.ai/anthropic/claude-3-opus).\n\nThe model is fine-tuned on top of [Qwen2.5 72B](https://openrouter.ai/qwen/qwen-2.5-72b-instruct).", + "context_length": 16384, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Qwen", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0.0000015", + "completion": "0.00000225", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 16384, + "max_completion_tokens": 1024, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "neversleep/llama-3.1-lumimaid-70b", + "name": "NeverSleep: Lumimaid v0.2 70B", + "created": 1729555200, + "description": "Lumimaid v0.2 70B is a finetune of [Llama 3.1 70B](/meta-llama/llama-3.1-70b-instruct) with a \"HUGE step up dataset wise\" compared to Lumimaid v0.1. Sloppy chats output were purged.\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "context_length": 16384, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.0000015", + "completion": "0.00000225", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 16384, + "max_completion_tokens": 2048, + "is_moderated": false + }, + "per_request_limits": null + }, { "id": "x-ai/grok-beta", "name": "xAI: Grok Beta", @@ -3285,16 +4932,20 @@ "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Grok", "instruct_type": null }, "pricing": { "prompt": "0.000005", "completion": "0.000015", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3313,16 +4964,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { "prompt": "0.0000001", "completion": "0.0000001", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3338,29 +4993,65 @@ "name": "Mistral: Ministral 3B", "created": 1729123200, "description": "Ministral 3B is a 3B parameter model optimized for on-device and edge computing. It excels in knowledge, commonsense reasoning, and function-calling, outperforming larger models like Mistral 7B on most benchmarks. Supporting up to 128k context length, it’s ideal for orchestrating agentic workflows and specialist tasks with efficient inference.", - "context_length": 128000, + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { "prompt": "0.00000004", "completion": "0.00000004", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 128000, + "context_length": 131072, "max_completion_tokens": null, "is_moderated": false }, "per_request_limits": null }, + { + "id": "qwen/qwen-2.5-7b-instruct:free", + "name": "Qwen2.5 7B Instruct (free)", + "created": 1729036800, + "description": "Qwen2.5 7B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2:\n\n- Significantly more knowledge and has greatly improved capabilities in coding and mathematics, thanks to our specialized expert models in these domains.\n\n- Significant improvements in instruction following, generating long texts (over 8K tokens), understanding structured data (e.g, tables), and generating structured outputs especially JSON. More resilient to the diversity of system prompts, enhancing role-play implementation and condition-setting for chatbots.\n\n- Long-context Support up to 128K tokens and can generate up to 8K tokens.\n\n- Multilingual support for over 29 languages, including Chinese, English, French, Spanish, Portuguese, German, Italian, Russian, Japanese, Korean, Vietnamese, Thai, Arabic, and more.\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", + "context_length": 32768, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Qwen", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 32768, + "is_moderated": false + }, + "per_request_limits": null + }, { "id": "qwen/qwen-2.5-7b-instruct", "name": "Qwen2.5 7B Instruct", @@ -3369,16 +5060,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "chatml" }, "pricing": { - "prompt": "0.000000025", - "completion": "0.00000005", - "image": "0", + "prompt": "0.00000005", + "completion": "0.0000001", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3397,16 +5092,20 @@ "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3422,25 +5121,29 @@ "name": "NVIDIA: Llama 3.1 Nemotron 70B Instruct", "created": 1728950400, "description": "NVIDIA's Llama 3.1 Nemotron 70B is a language model designed for generating precise and useful responses. Leveraging [Llama 3.1 70B](/models/meta-llama/llama-3.1-70b-instruct) architecture and Reinforcement Learning from Human Feedback (RLHF), it excels in automatic alignment benchmarks. This model is tailored for applications requiring high accuracy in helpfulness and response generation, suitable for diverse user queries across multiple domains.\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", - "context_length": 131000, + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { "prompt": "0.00000012", "completion": "0.0000003", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 131000, - "max_completion_tokens": 131000, + "context_length": 131072, + "max_completion_tokens": 131072, "is_moderated": false }, "per_request_limits": null @@ -3453,16 +5156,20 @@ "context_length": 8000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.0000025", "completion": "0.00001", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3481,16 +5188,20 @@ "context_length": 8000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.0000025", "completion": "0.00001", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3503,24 +5214,31 @@ }, { "id": "google/gemini-flash-1.5-8b", - "name": "Google: Gemini Flash 1.5 8B", + "name": "Google: Gemini 1.5 Flash 8B", "created": 1727913600, "description": "Gemini Flash 1.5 8B is optimized for speed and efficiency, offering enhanced performance in small prompt tasks like chat, transcription, and translation. With reduced latency, it is highly effective for real-time and large-scale operations. This model focuses on cost-effective solutions while maintaining high-quality results.\n\n[Click here to learn more about this model](https://developers.googleblog.com/en/gemini-15-flash-8b-is-now-generally-available-for-use/).\n\nUsage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).", "context_length": 1000000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": null }, "pricing": { "prompt": "0.0000000375", "completion": "0.00000015", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000001", + "input_cache_write": "0.0000000583" }, "top_provider": { "context_length": 1000000, @@ -3529,34 +5247,6 @@ }, "per_request_limits": null }, - { - "id": "anthracite-org/magnum-v2-72b", - "name": "Magnum v2 72B", - "created": 1727654400, - "description": "From the maker of [Goliath](https://openrouter.ai/models/alpindale/goliath-120b), Magnum 72B is the seventh in a family of models designed to achieve the prose quality of the Claude 3 models, notably Opus & Sonnet.\n\nThe model is based on [Qwen2 72B](https://openrouter.ai/models/qwen/qwen-2-72b-instruct) and trained with 55 million tokens of highly curated roleplay (RP) data.", - "context_length": 32768, - "architecture": { - "modality": "text->text", - "tokenizer": "Qwen", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000003", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "liquid/lfm-40b", "name": "Liquid: LFM 40B MoE", @@ -3565,16 +5255,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": "chatml" }, "pricing": { "prompt": "0.00000015", "completion": "0.00000015", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3593,16 +5287,52 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "chatml" }, "pricing": { "prompt": "0.00000025", "completion": "0.0000005", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "anthracite-org/magnum-v2-72b", + "name": "Magnum v2 72B", + "created": 1727654400, + "description": "From the maker of [Goliath](https://openrouter.ai/models/alpindale/goliath-120b), Magnum 72B is the seventh in a family of models designed to achieve the prose quality of the Claude 3 models, notably Opus & Sonnet.\n\nThe model is based on [Qwen2 72B](https://openrouter.ai/models/qwen/qwen-2-72b-instruct) and trained with 55 million tokens of highly curated roleplay (RP) data.", + "context_length": 32768, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Qwen", + "instruct_type": "chatml" + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000003", + "request": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3621,16 +5351,20 @@ "context_length": 20000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3646,81 +5380,29 @@ "name": "Meta: Llama 3.2 3B Instruct", "created": 1727222400, "description": "Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization. Designed with the latest transformer architecture, it supports eight languages, including English, Spanish, and Hindi, and is adaptable for additional languages.\n\nTrained on 9 trillion tokens, the Llama 3.2 3B model excels in instruction-following, complex reasoning, and tool use. Its balanced performance makes it ideal for applications needing accuracy and efficiency in text generation across multilingual settings.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", - "context_length": 131000, + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { "prompt": "0.000000015", "completion": "0.000000025", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131000, - "max_completion_tokens": 131000, - "is_moderated": false - }, - "per_request_limits": null - }, - { - "id": "meta-llama/llama-3.2-1b-instruct:free", - "name": "Meta: Llama 3.2 1B Instruct (free)", - "created": 1727222400, - "description": "Llama 3.2 1B is a 1-billion-parameter language model focused on efficiently performing natural language tasks, such as summarization, dialogue, and multilingual text analysis. Its smaller size allows it to operate efficiently in low-resource environments while maintaining strong task performance.\n\nSupporting eight core languages and fine-tunable for more, Llama 1.3B is ideal for businesses or developers seeking lightweight yet powerful AI solutions that can operate in diverse multilingual settings without the high computational demand of larger models.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", - "context_length": 131072, - "architecture": { - "modality": "text->text", - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0", - "completion": "0", "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null - }, - { - "id": "meta-llama/llama-3.2-1b-instruct", - "name": "Meta: Llama 3.2 1B Instruct", - "created": 1727222400, - "description": "Llama 3.2 1B is a 1-billion-parameter language model focused on efficiently performing natural language tasks, such as summarization, dialogue, and multilingual text analysis. Its smaller size allows it to operate efficiently in low-resource environments while maintaining strong task performance.\n\nSupporting eight core languages and fine-tunable for more, Llama 1.3B is ideal for businesses or developers seeking lightweight yet powerful AI solutions that can operate in diverse multilingual settings without the high computational demand of larger models.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", - "context_length": 131072, - "architecture": { - "modality": "text->text", - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.00000001", - "completion": "0.00000001", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, + "max_completion_tokens": 131072, "is_moderated": false }, "per_request_limits": null @@ -3733,16 +5415,21 @@ "context_length": 4096, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { "prompt": "0.0000008", "completion": "0.0000016", - "image": "0.0051456", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0051456", "web_search": "0", "internal_reasoning": "0" }, @@ -3753,6 +5440,70 @@ }, "per_request_limits": null }, + { + "id": "meta-llama/llama-3.2-1b-instruct:free", + "name": "Meta: Llama 3.2 1B Instruct (free)", + "created": 1727222400, + "description": "Llama 3.2 1B is a 1-billion-parameter language model focused on efficiently performing natural language tasks, such as summarization, dialogue, and multilingual text analysis. Its smaller size allows it to operate efficiently in low-resource environments while maintaining strong task performance.\n\nSupporting eight core languages and fine-tunable for more, Llama 1.3B is ideal for businesses or developers seeking lightweight yet powerful AI solutions that can operate in diverse multilingual settings without the high computational demand of larger models.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", + "context_length": 131072, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0", + "completion": "0", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": 131072, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "meta-llama/llama-3.2-1b-instruct", + "name": "Meta: Llama 3.2 1B Instruct", + "created": 1727222400, + "description": "Llama 3.2 1B is a 1-billion-parameter language model focused on efficiently performing natural language tasks, such as summarization, dialogue, and multilingual text analysis. Its smaller size allows it to operate efficiently in low-resource environments while maintaining strong task performance.\n\nSupporting eight core languages and fine-tunable for more, Llama 1.3B is ideal for businesses or developers seeking lightweight yet powerful AI solutions that can operate in diverse multilingual settings without the high computational demand of larger models.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", + "context_length": 131072, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.00000001", + "completion": "0.00000001", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 131072, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, { "id": "meta-llama/llama-3.2-11b-vision-instruct:free", "name": "Meta: Llama 3.2 11B Vision Instruct (free)", @@ -3761,16 +5512,21 @@ "context_length": 131072, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3786,25 +5542,30 @@ "name": "Meta: Llama 3.2 11B Vision Instruct", "created": 1727222400, "description": "Llama 3.2 11B Vision is a multimodal model with 11 billion parameters, designed to handle tasks combining visual and textual data. It excels in tasks such as image captioning and visual question answering, bridging the gap between language generation and visual reasoning. Pre-trained on a massive dataset of image-text pairs, it performs well in complex, high-accuracy image analysis.\n\nIts ability to integrate visual understanding with language processing makes it an ideal solution for industries requiring comprehensive visual-linguistic AI applications, such as content creation, AI-driven customer service, and research.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD_VISION.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).", - "context_length": 16384, + "context_length": 131072, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { - "prompt": "0.000000055", - "completion": "0.000000055", - "image": "0", + "prompt": "0.000000049", + "completion": "0.000000049", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.00007948", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 16384, - "max_completion_tokens": 16384, + "context_length": 131072, + "max_completion_tokens": 8192, "is_moderated": false }, "per_request_limits": null @@ -3817,16 +5578,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "chatml" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3842,25 +5607,29 @@ "name": "Qwen2.5 72B Instruct", "created": 1726704000, "description": "Qwen2.5 72B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2:\n\n- Significantly more knowledge and has greatly improved capabilities in coding and mathematics, thanks to our specialized expert models in these domains.\n\n- Significant improvements in instruction following, generating long texts (over 8K tokens), understanding structured data (e.g, tables), and generating structured outputs especially JSON. More resilient to the diversity of system prompts, enhancing role-play implementation and condition-setting for chatbots.\n\n- Long-context Support up to 128K tokens and can generate up to 8K tokens.\n\n- Multilingual support for over 29 languages, including Chinese, English, French, Spanish, Portuguese, German, Italian, Russian, Japanese, Korean, Vietnamese, Thai, Arabic, and more.\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", - "context_length": 128000, + "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "chatml" }, "pricing": { - "prompt": "0.00000013", - "completion": "0.0000004", - "image": "0", + "prompt": "0.00000012", + "completion": "0.00000039", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 128000, - "max_completion_tokens": null, + "context_length": 32768, + "max_completion_tokens": 8192, "is_moderated": false }, "per_request_limits": null @@ -3870,24 +5639,29 @@ "name": "Qwen: Qwen2.5-VL 72B Instruct", "created": 1726617600, "description": "Qwen2.5 VL 72B is a multimodal LLM from the Qwen Team with the following key enhancements:\n\n- SoTA understanding of images of various resolution & ratio: Qwen2.5-VL achieves state-of-the-art performance on visual understanding benchmarks, including MathVista, DocVQA, RealWorldQA, MTVQA, etc.\n\n- Understanding videos of 20min+: Qwen2.5-VL can understand videos over 20 minutes for high-quality video-based question answering, dialog, content creation, etc.\n\n- Agent that can operate your mobiles, robots, etc.: with the abilities of complex reasoning and decision making, Qwen2.5-VL can be integrated with devices like mobile phones, robots, etc., for automatic operation based on visual environment and text instructions.\n\n- Multilingual Support: to serve global users, besides English and Chinese, Qwen2.5-VL now supports the understanding of texts in different languages inside images, including most European languages, Japanese, Korean, Arabic, Vietnamese, etc.\n\nFor more details, see this [blog post](https://qwenlm.github.io/blog/qwen2-vl/) and [GitHub repo](https://github.com/QwenLM/Qwen2-VL).\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", - "context_length": 4096, + "context_length": 32768, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": null }, "pricing": { "prompt": "0.0000006", "completion": "0.0000006", - "image": "0.000578", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.000578", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 4096, + "context_length": 32768, "max_completion_tokens": null, "is_moderated": false }, @@ -3901,16 +5675,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { - "prompt": "0.0000001875", - "completion": "0.000001125", - "image": "0", + "prompt": "0.00000009375", + "completion": "0.00000075", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -3922,25 +5700,30 @@ "per_request_limits": null }, { - "id": "openai/o1-mini-2024-09-12", - "name": "OpenAI: o1-mini (2024-09-12)", + "id": "openai/o1-mini", + "name": "OpenAI: o1-mini", "created": 1726099200, "description": "The latest and strongest model family from OpenAI, o1 is designed to spend more time thinking before responding.\n\nThe o1 models are optimized for math, science, programming, and other STEM-related tasks. They consistently exhibit PhD-level accuracy on benchmarks in physics, chemistry, and biology. Learn more in the [launch announcement](https://openai.com/o1).\n\nNote: This model is currently experimental and not suitable for production use-cases, and may be heavily rate-limited.", "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.0000011", "completion": "0.0000044", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000055" }, "top_provider": { "context_length": 128000, @@ -3957,18 +5740,23 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.000015", "completion": "0.00006", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.0000075" }, "top_provider": { "context_length": 128000, @@ -3985,18 +5773,23 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.000015", "completion": "0.00006", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.0000075" }, "top_provider": { "context_length": 128000, @@ -4006,25 +5799,30 @@ "per_request_limits": null }, { - "id": "openai/o1-mini", - "name": "OpenAI: o1-mini", + "id": "openai/o1-mini-2024-09-12", + "name": "OpenAI: o1-mini (2024-09-12)", "created": 1726099200, "description": "The latest and strongest model family from OpenAI, o1 is designed to spend more time thinking before responding.\n\nThe o1 models are optimized for math, science, programming, and other STEM-related tasks. They consistently exhibit PhD-level accuracy on benchmarks in physics, chemistry, and biology. Learn more in the [launch announcement](https://openai.com/o1).\n\nNote: This model is currently experimental and not suitable for production use-cases, and may be heavily rate-limited.", "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.0000011", "completion": "0.0000044", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000055" }, "top_provider": { "context_length": 128000, @@ -4038,57 +5836,34 @@ "name": "Mistral: Pixtral 12B", "created": 1725926400, "description": "The first multi-modal, text+image-to-text model from Mistral AI. Its weights were launched via torrent: https://x.com/mistralai/status/1833758285167722836.", - "context_length": 4096, + "context_length": 32768, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { "prompt": "0.0000001", "completion": "0.0000001", + "request": "0", "image": "0.0001445", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 4096, + "context_length": 32768, "max_completion_tokens": null, "is_moderated": false }, "per_request_limits": null }, - { - "id": "cohere/command-r-08-2024", - "name": "Cohere: Command R (08-2024)", - "created": 1724976000, - "description": "command-r-08-2024 is an update of the [Command R](/models/cohere/command-r) with improved performance for multilingual retrieval-augmented generation (RAG) and tool use. More broadly, it is better at math, code and reasoning and is competitive with the previous version of the larger Command R+ model.\n\nRead the launch post [here](https://docs.cohere.com/changelog/command-gets-refreshed).\n\nUse of this model is subject to Cohere's [Usage Policy](https://docs.cohere.com/docs/usage-policy) and [SaaS Agreement](https://cohere.com/saas-agreement).", - "context_length": 128000, - "architecture": { - "modality": "text->text", - "tokenizer": "Cohere", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000001425", - "completion": "0.00000057", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 4000, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "cohere/command-r-plus-08-2024", "name": "Cohere: Command R+ (08-2024)", @@ -4097,16 +5872,52 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Cohere", "instruct_type": null }, "pricing": { - "prompt": "0.000002375", - "completion": "0.0000095", - "image": "0", + "prompt": "0.0000025", + "completion": "0.00001", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 128000, + "max_completion_tokens": 4000, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "cohere/command-r-08-2024", + "name": "Cohere: Command R (08-2024)", + "created": 1724976000, + "description": "command-r-08-2024 is an update of the [Command R](/models/cohere/command-r) with improved performance for multilingual retrieval-augmented generation (RAG) and tool use. More broadly, it is better at math, code and reasoning and is competitive with the previous version of the larger Command R+ model.\n\nRead the launch post [here](https://docs.cohere.com/changelog/command-gets-refreshed).\n\nUse of this model is subject to Cohere's [Usage Policy](https://docs.cohere.com/docs/usage-policy) and [SaaS Agreement](https://cohere.com/saas-agreement).", + "context_length": 128000, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Cohere", + "instruct_type": null + }, + "pricing": { + "prompt": "0.00000015", + "completion": "0.0000006", + "request": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -4125,16 +5936,20 @@ "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { "prompt": "0.0000007", "completion": "0.0000008", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -4146,29 +5961,34 @@ "per_request_limits": null }, { - "id": "google/gemini-flash-1.5-8b-exp", - "name": "Google: Gemini Flash 1.5 8B Experimental", + "id": "qwen/qwen-2.5-vl-7b-instruct:free", + "name": "Qwen: Qwen2.5-VL 7B Instruct (free)", "created": 1724803200, - "description": "Gemini Flash 1.5 8B Experimental is an experimental, 8B parameter version of the [Gemini Flash 1.5](/models/google/gemini-flash-1.5) model.\n\nUsage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).\n\n#multimodal\n\nNote: This model is currently experimental and not suitable for production use-cases, and may be heavily rate-limited.", - "context_length": 1000000, + "description": "Qwen2.5 VL 7B is a multimodal LLM from the Qwen Team with the following key enhancements:\n\n- SoTA understanding of images of various resolution & ratio: Qwen2.5-VL achieves state-of-the-art performance on visual understanding benchmarks, including MathVista, DocVQA, RealWorldQA, MTVQA, etc.\n\n- Understanding videos of 20min+: Qwen2.5-VL can understand videos over 20 minutes for high-quality video-based question answering, dialog, content creation, etc.\n\n- Agent that can operate your mobiles, robots, etc.: with the abilities of complex reasoning and decision making, Qwen2.5-VL can be integrated with devices like mobile phones, robots, etc., for automatic operation based on visual environment and text instructions.\n\n- Multilingual Support: to serve global users, besides English and Chinese, Qwen2.5-VL now supports the understanding of texts in different languages inside images, including most European languages, Japanese, Korean, Arabic, Vietnamese, etc.\n\nFor more details, see this [blog post](https://qwenlm.github.io/blog/qwen2-vl/) and [GitHub repo](https://github.com/QwenLM/Qwen2-VL).\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", + "context_length": 64000, "architecture": { "modality": "text+image->text", - "tokenizer": "Gemini", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Qwen", "instruct_type": null }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 1000000, - "max_completion_tokens": 8192, + "context_length": 64000, + "max_completion_tokens": 64000, "is_moderated": false }, "per_request_limits": null @@ -4178,53 +5998,63 @@ "name": "Qwen: Qwen2.5-VL 7B Instruct", "created": 1724803200, "description": "Qwen2.5 VL 7B is a multimodal LLM from the Qwen Team with the following key enhancements:\n\n- SoTA understanding of images of various resolution & ratio: Qwen2.5-VL achieves state-of-the-art performance on visual understanding benchmarks, including MathVista, DocVQA, RealWorldQA, MTVQA, etc.\n\n- Understanding videos of 20min+: Qwen2.5-VL can understand videos over 20 minutes for high-quality video-based question answering, dialog, content creation, etc.\n\n- Agent that can operate your mobiles, robots, etc.: with the abilities of complex reasoning and decision making, Qwen2.5-VL can be integrated with devices like mobile phones, robots, etc., for automatic operation based on visual environment and text instructions.\n\n- Multilingual Support: to serve global users, besides English and Chinese, Qwen2.5-VL now supports the understanding of texts in different languages inside images, including most European languages, Japanese, Korean, Arabic, Vietnamese, etc.\n\nFor more details, see this [blog post](https://qwenlm.github.io/blog/qwen2-vl/) and [GitHub repo](https://github.com/QwenLM/Qwen2-VL).\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", - "context_length": 4096, + "context_length": 32768, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": null }, "pricing": { "prompt": "0.0000002", "completion": "0.0000002", - "image": "0.0001445", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0001445", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 4096, + "context_length": 32768, "max_completion_tokens": null, "is_moderated": false }, "per_request_limits": null }, { - "id": "ai21/jamba-1-5-large", - "name": "AI21: Jamba 1.5 Large", - "created": 1724371200, - "description": "Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality.\n\nIt features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis.\n\nBuilt on a novel SSM-Transformer architecture, it outperforms larger models like Llama 3.1 70B on benchmarks while maintaining resource efficiency.\n\nRead their [announcement](https://www.ai21.com/blog/announcing-jamba-model-family) to learn more.", - "context_length": 256000, + "id": "google/gemini-flash-1.5-8b-exp", + "name": "Google: Gemini 1.5 Flash 8B Experimental", + "created": 1724803200, + "description": "Gemini Flash 1.5 8B Experimental is an experimental, 8B parameter version of the [Gemini Flash 1.5](/models/google/gemini-flash-1.5) model.\n\nUsage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).\n\n#multimodal\n\nNote: This model is currently experimental and not suitable for production use-cases, and may be heavily rate-limited.", + "context_length": 1000000, "architecture": { - "modality": "text->text", - "tokenizer": "Other", + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Gemini", "instruct_type": null }, "pricing": { - "prompt": "0.000002", - "completion": "0.000008", - "image": "0", + "prompt": "0", + "completion": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 256000, - "max_completion_tokens": 4096, + "context_length": 1000000, + "max_completion_tokens": 8192, "is_moderated": false }, "per_request_limits": null @@ -4237,16 +6067,52 @@ "context_length": 256000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.0000002", "completion": "0.0000004", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 256000, + "max_completion_tokens": 4096, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "ai21/jamba-1-5-large", + "name": "AI21: Jamba 1.5 Large", + "created": 1724371200, + "description": "Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality.\n\nIt features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis.\n\nBuilt on a novel SSM-Transformer architecture, it outperforms larger models like Llama 3.1 70B on benchmarks while maintaining resource efficiency.\n\nRead their [announcement](https://www.ai21.com/blog/announcing-jamba-model-family) to learn more.", + "context_length": 256000, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Other", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000002", + "completion": "0.000008", + "request": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -4265,16 +6131,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": "phi3" }, "pricing": { "prompt": "0.0000001", "completion": "0.0000001", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -4290,25 +6160,29 @@ "name": "Nous: Hermes 3 70B Instruct", "created": 1723939200, "description": "Hermes 3 is a generalist language model with many improvements over [Hermes 2](/models/nousresearch/nous-hermes-2-mistral-7b-dpo), including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board.\n\nHermes 3 70B is a competitive, if not superior finetune of the [Llama-3.1 70B foundation model](/models/meta-llama/llama-3.1-70b-instruct), focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.\n\nThe Hermes 3 series builds and expands on the Hermes 2 set of capabilities, including more powerful and reliable function calling and structured output capabilities, generalist assistant capabilities, and improved code generation skills.", - "context_length": 131000, + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "chatml" }, "pricing": { "prompt": "0.00000012", "completion": "0.0000003", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 131000, - "max_completion_tokens": 131000, + "context_length": 131072, + "max_completion_tokens": 131072, "is_moderated": false }, "per_request_limits": null @@ -4318,25 +6192,29 @@ "name": "Nous: Hermes 3 405B Instruct", "created": 1723766400, "description": "Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board.\n\nHermes 3 405B is a frontier-level, full-parameter finetune of the Llama-3.1 405B foundation model, focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.\n\nThe Hermes 3 series builds and expands on the Hermes 2 set of capabilities, including more powerful and reliable function calling and structured output capabilities, generalist assistant capabilities, and improved code generation skills.\n\nHermes 3 is competitive, if not superior, to Llama-3.1 Instruct models at general capabilities, with varying strengths and weaknesses attributable between the two.", - "context_length": 131000, + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "chatml" }, "pricing": { "prompt": "0.0000008", "completion": "0.0000008", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 131000, - "max_completion_tokens": 131000, + "context_length": 131072, + "max_completion_tokens": 131072, "is_moderated": false }, "per_request_limits": null @@ -4349,16 +6227,21 @@ "context_length": 128000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.000005", "completion": "0.000015", - "image": "0.007225", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.007225", "web_search": "0", "internal_reasoning": "0" }, @@ -4369,34 +6252,6 @@ }, "per_request_limits": null }, - { - "id": "sao10k/l3-lunaris-8b", - "name": "Sao10K: Llama 3 8B Lunaris", - "created": 1723507200, - "description": "Lunaris 8B is a versatile generalist and roleplaying model based on Llama 3. It's a strategic merge of multiple models, designed to balance creativity with improved logic and general knowledge.\n\nCreated by [Sao10k](https://huggingface.co/Sao10k), this model aims to offer an improved experience over Stheno v3.2, with enhanced creativity and logical reasoning.\n\nFor best results, use with Llama 3 Instruct context template, temperature 1.4, and min_p 0.1.", - "context_length": 8192, - "architecture": { - "modality": "text->text", - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0.00000003", - "completion": "0.00000006", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "aetherwiing/mn-starcannon-12b", "name": "Aetherwiing: Starcannon 12B", @@ -4405,16 +6260,20 @@ "context_length": 16384, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "chatml" }, "pricing": { "prompt": "0.0000008", "completion": "0.0000012", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -4425,6 +6284,38 @@ }, "per_request_limits": null }, + { + "id": "sao10k/l3-lunaris-8b", + "name": "Sao10K: Llama 3 8B Lunaris", + "created": 1723507200, + "description": "Lunaris 8B is a versatile generalist and roleplaying model based on Llama 3. It's a strategic merge of multiple models, designed to balance creativity with improved logic and general knowledge.\n\nCreated by [Sao10k](https://huggingface.co/Sao10k), this model aims to offer an improved experience over Stheno v3.2, with enhanced creativity and logical reasoning.\n\nFor best results, use with Llama 3 Instruct context template, temperature 1.4, and min_p 0.1.", + "context_length": 8192, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Llama3", + "instruct_type": "llama3" + }, + "pricing": { + "prompt": "0.00000002", + "completion": "0.00000005", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8192, + "max_completion_tokens": null, + "is_moderated": false + }, + "per_request_limits": null + }, { "id": "openai/gpt-4o-2024-08-06", "name": "OpenAI: GPT-4o (2024-08-06)", @@ -4433,18 +6324,24 @@ "context_length": 128000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.0000025", "completion": "0.00001", - "image": "0.003613", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.003613", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000125" }, "top_provider": { "context_length": 128000, @@ -4461,16 +6358,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "none" }, "pricing": { "prompt": "0.000002", "completion": "0.000002", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -4489,16 +6390,20 @@ "context_length": 16384, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "chatml" }, "pricing": { "prompt": "0.0000008", "completion": "0.0000012", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -4509,62 +6414,6 @@ }, "per_request_limits": null }, - { - "id": "perplexity/llama-3.1-sonar-small-128k-chat", - "name": "Perplexity: Llama 3.1 Sonar 8B", - "created": 1722470400, - "description": "Llama 3.1 Sonar is Perplexity's latest model family. It surpasses their earlier Sonar models in cost-efficiency, speed, and performance.\n\nThis is a normal offline LLM, but the [online version](/models/perplexity/llama-3.1-sonar-small-128k-online) of this model has Internet access.", - "context_length": 131072, - "architecture": { - "modality": "text->text", - "tokenizer": "Llama3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000002", - "completion": "0.0000002", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null - }, - { - "id": "perplexity/llama-3.1-sonar-large-128k-chat", - "name": "Perplexity: Llama 3.1 Sonar 70B", - "created": 1722470400, - "description": "Llama 3.1 Sonar is Perplexity's latest model family. It surpasses their earlier Sonar models in cost-efficiency, speed, and performance.\n\nThis is a normal offline LLM, but the [online version](/models/perplexity/llama-3.1-sonar-large-128k-online) of this model has Internet access.", - "context_length": 131072, - "architecture": { - "modality": "text->text", - "tokenizer": "Llama3", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000001", - "completion": "0.000001", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 131072, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "perplexity/llama-3.1-sonar-large-128k-online", "name": "Perplexity: Llama 3.1 Sonar 70B Online", @@ -4573,16 +6422,20 @@ "context_length": 127072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": null }, "pricing": { "prompt": "0.000001", "completion": "0.000001", - "image": "0", "request": "0.005", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -4601,16 +6454,20 @@ "context_length": 127072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": null }, "pricing": { "prompt": "0.0000002", "completion": "0.0000002", - "image": "0", "request": "0.005", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -4622,28 +6479,32 @@ "per_request_limits": null }, { - "id": "meta-llama/llama-3.1-405b-instruct", - "name": "Meta: Llama 3.1 405B Instruct", + "id": "meta-llama/llama-3.1-70b-instruct", + "name": "Meta: Llama 3.1 70B Instruct", "created": 1721692800, - "description": "The highly anticipated 400B class of Llama3 is here! Clocking in at 128k context with impressive eval scores, the Meta AI team continues to push the frontier of open-source LLMs.\n\nMeta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 405B instruct-tuned version is optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models including GPT-4o and Claude 3.5 Sonnet in evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3-1/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", - "context_length": 32768, + "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 70B instruct-tuned version is optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3-1/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { - "prompt": "0.0000008", - "completion": "0.0000008", - "image": "0", + "prompt": "0.00000012", + "completion": "0.00000028", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 32768, + "context_length": 131072, "max_completion_tokens": 8192, "is_moderated": false }, @@ -4657,22 +6518,26 @@ "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { "context_length": 131072, - "max_completion_tokens": null, + "max_completion_tokens": 4096, "is_moderated": false }, "per_request_limits": null @@ -4685,16 +6550,20 @@ "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { "prompt": "0.00000002", - "completion": "0.00000005", - "image": "0", + "completion": "0.000000045", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -4706,28 +6575,32 @@ "per_request_limits": null }, { - "id": "meta-llama/llama-3.1-70b-instruct", - "name": "Meta: Llama 3.1 70B Instruct", + "id": "meta-llama/llama-3.1-405b-instruct", + "name": "Meta: Llama 3.1 405B Instruct", "created": 1721692800, - "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 70B instruct-tuned version is optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3-1/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", - "context_length": 131072, + "description": "The highly anticipated 400B class of Llama3 is here! Clocking in at 128k context with impressive eval scores, the Meta AI team continues to push the frontier of open-source LLMs.\n\nMeta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 405B instruct-tuned version is optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models including GPT-4o and Claude 3.5 Sonnet in evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3-1/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", + "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { - "prompt": "0.00000012", - "completion": "0.0000003", - "image": "0", + "prompt": "0.0000008", + "completion": "0.0000008", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 131072, + "context_length": 32768, "max_completion_tokens": 8192, "is_moderated": false }, @@ -4741,16 +6614,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "mistral" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -4769,16 +6646,20 @@ "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "mistral" }, "pricing": { "prompt": "0.000000035", "completion": "0.00000008", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -4794,57 +6675,33 @@ "name": "Mistral: Codestral Mamba", "created": 1721347200, "description": "A 7.3B parameter Mamba-based model designed for code and reasoning tasks.\n\n- Linear time inference, allowing for theoretically infinite sequence lengths\n- 256k token context window\n- Optimized for quick responses, especially beneficial for code productivity\n- Performs comparably to state-of-the-art transformer models in code and reasoning tasks\n- Available under the Apache 2.0 license for free use, modification, and distribution", - "context_length": 256000, + "context_length": 262144, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { "prompt": "0.00000025", "completion": "0.00000025", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 256000, + "context_length": 262144, "max_completion_tokens": null, "is_moderated": false }, "per_request_limits": null }, - { - "id": "openai/gpt-4o-mini", - "name": "OpenAI: GPT-4o-mini", - "created": 1721260800, - "description": "GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs.\n\nAs their most advanced small model, it is many multiples more affordable than other recent frontier models, and more than 60% cheaper than [GPT-3.5 Turbo](/models/openai/gpt-3.5-turbo). It maintains SOTA intelligence, while being significantly more cost-effective.\n\nGPT-4o mini achieves an 82% score on MMLU and presently ranks higher than GPT-4 on chat preferences [common leaderboards](https://arena.lmsys.org/).\n\nCheck out the [launch announcement](https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/) to learn more.\n\n#multimodal", - "context_length": 128000, - "architecture": { - "modality": "text+image->text", - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00000015", - "completion": "0.0000006", - "image": "0.000217", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 128000, - "max_completion_tokens": 16384, - "is_moderated": true - }, - "per_request_limits": null - }, { "id": "openai/gpt-4o-mini-2024-07-18", "name": "OpenAI: GPT-4o-mini (2024-07-18)", @@ -4853,18 +6710,24 @@ "context_length": 128000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.00000015", "completion": "0.0000006", - "image": "0.007225", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.007225", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.000000075" }, "top_provider": { "context_length": 128000, @@ -4874,58 +6737,36 @@ "per_request_limits": null }, { - "id": "qwen/qwen-2-7b-instruct:free", - "name": "Qwen 2 7B Instruct (free)", - "created": 1721088000, - "description": "Qwen2 7B is a transformer-based model that excels in language understanding, multilingual capabilities, coding, mathematics, and reasoning.\n\nIt features SwiGLU activation, attention QKV bias, and group query attention. It is pretrained on extensive data with supervised finetuning and direct preference optimization.\n\nFor more details, see this [blog post](https://qwenlm.github.io/blog/qwen2/) and [GitHub repo](https://github.com/QwenLM/Qwen2).\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", - "context_length": 8192, + "id": "openai/gpt-4o-mini", + "name": "OpenAI: GPT-4o-mini", + "created": 1721260800, + "description": "GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs.\n\nAs their most advanced small model, it is many multiples more affordable than other recent frontier models, and more than 60% cheaper than [GPT-3.5 Turbo](/models/openai/gpt-3.5-turbo). It maintains SOTA intelligence, while being significantly more cost-effective.\n\nGPT-4o mini achieves an 82% score on MMLU and presently ranks higher than GPT-4 on chat preferences [common leaderboards](https://arena.lmsys.org/).\n\nCheck out the [launch announcement](https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/) to learn more.\n\n#multimodal", + "context_length": 128000, "architecture": { - "modality": "text->text", - "tokenizer": "Qwen", - "instruct_type": "chatml" + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "GPT", + "instruct_type": null }, "pricing": { - "prompt": "0", - "completion": "0", - "image": "0", + "prompt": "0.00000015", + "completion": "0.0000006", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.000217", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.000000075" }, "top_provider": { - "context_length": 8192, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null - }, - { - "id": "qwen/qwen-2-7b-instruct", - "name": "Qwen 2 7B Instruct", - "created": 1721088000, - "description": "Qwen2 7B is a transformer-based model that excels in language understanding, multilingual capabilities, coding, mathematics, and reasoning.\n\nIt features SwiGLU activation, attention QKV bias, and group query attention. It is pretrained on extensive data with supervised finetuning and direct preference optimization.\n\nFor more details, see this [blog post](https://qwenlm.github.io/blog/qwen2/) and [GitHub repo](https://github.com/QwenLM/Qwen2).\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).", - "context_length": 32768, - "architecture": { - "modality": "text->text", - "tokenizer": "Qwen", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0.000000054", - "completion": "0.000000054", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": null, - "is_moderated": false + "context_length": 128000, + "max_completion_tokens": 16384, + "is_moderated": true }, "per_request_limits": null }, @@ -4937,22 +6778,26 @@ "context_length": 8192, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": "gemma" }, "pricing": { - "prompt": "0.00000027", - "completion": "0.00000027", - "image": "0", + "prompt": "0.0000008", + "completion": "0.0000008", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { "context_length": 8192, - "max_completion_tokens": 8192, + "max_completion_tokens": 2048, "is_moderated": false }, "per_request_limits": null @@ -4965,16 +6810,20 @@ "context_length": 16384, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "chatml" }, "pricing": { - "prompt": "0.000001875", + "prompt": "0.0000015", "completion": "0.00000225", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -4993,22 +6842,26 @@ "context_length": 8192, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": "gemma" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { "context_length": 8192, - "max_completion_tokens": 4096, + "max_completion_tokens": 8192, "is_moderated": false }, "per_request_limits": null @@ -5021,50 +6874,26 @@ "context_length": 8192, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": "gemma" }, "pricing": { - "prompt": "0.00000003", - "completion": "0.00000006", - "image": "0", + "prompt": "0.00000007", + "completion": "0.00000007", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { "context_length": 8192, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null - }, - { - "id": "01-ai/yi-large", - "name": "01.AI: Yi Large", - "created": 1719273600, - "description": "The Yi Large model was designed by 01.AI with the following usecases in mind: knowledge search, data classification, human-like chat bots, and customer service.\n\nIt stands out for its multilingual proficiency, particularly in Spanish, Chinese, Japanese, German, and French.\n\nCheck out the [launch announcement](https://01-ai.github.io/blog/01.ai-yi-large-llm-launch) to learn more.", - "context_length": 32768, - "architecture": { - "modality": "text->text", - "tokenizer": "Yi", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000003", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 4096, + "max_completion_tokens": null, "is_moderated": false }, "per_request_limits": null @@ -5077,16 +6906,20 @@ "context_length": 256000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": null }, "pricing": { "prompt": "0.0000005", "completion": "0.0000007", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5097,6 +6930,38 @@ }, "per_request_limits": null }, + { + "id": "01-ai/yi-large", + "name": "01.AI: Yi Large", + "created": 1719273600, + "description": "The Yi Large model was designed by 01.AI with the following usecases in mind: knowledge search, data classification, human-like chat bots, and customer service.\n\nIt stands out for its multilingual proficiency, particularly in Spanish, Chinese, Japanese, German, and French.\n\nCheck out the [launch announcement](https://01-ai.github.io/blog/01.ai-yi-large-llm-launch) to learn more.", + "context_length": 32768, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Yi", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000003", + "completion": "0.000003", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 4096, + "is_moderated": false + }, + "per_request_limits": null + }, { "id": "anthropic/claude-3.5-sonnet-20240620:beta", "name": "Anthropic: Claude 3.5 Sonnet (2024-06-20) (self-moderated)", @@ -5105,18 +6970,25 @@ "context_length": 200000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000003", "completion": "0.000015", - "image": "0.0048", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0048", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" }, "top_provider": { "context_length": 200000, @@ -5133,18 +7005,25 @@ "context_length": 200000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000003", "completion": "0.000015", - "image": "0.0048", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0048", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" }, "top_provider": { "context_length": 200000, @@ -5161,16 +7040,20 @@ "context_length": 8192, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { - "prompt": "0.0000007", - "completion": "0.0000008", - "image": "0", + "prompt": "0.00000148", + "completion": "0.00000148", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5189,16 +7072,20 @@ "context_length": 16000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "chatml" }, "pricing": { "prompt": "0.0000009", "completion": "0.0000009", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5217,16 +7104,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Qwen", "instruct_type": "chatml" }, "pricing": { "prompt": "0.0000009", "completion": "0.0000009", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5237,62 +7128,6 @@ }, "per_request_limits": null }, - { - "id": "mistralai/mistral-7b-instruct:free", - "name": "Mistral: Mistral 7B Instruct (free)", - "created": 1716768000, - "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\n*Mistral 7B Instruct has multiple version variants, and this is intended to be the latest version.*", - "context_length": 8192, - "architecture": { - "modality": "text->text", - "tokenizer": "Mistral", - "instruct_type": "mistral" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null - }, - { - "id": "mistralai/mistral-7b-instruct", - "name": "Mistral: Mistral 7B Instruct", - "created": 1716768000, - "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\n*Mistral 7B Instruct has multiple version variants, and this is intended to be the latest version.*", - "context_length": 32768, - "architecture": { - "modality": "text->text", - "tokenizer": "Mistral", - "instruct_type": "mistral" - }, - "pricing": { - "prompt": "0.00000003", - "completion": "0.000000055", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32768, - "max_completion_tokens": 8192, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "mistralai/mistral-7b-instruct-v0.3", "name": "Mistral: Mistral 7B Instruct v0.3", @@ -5301,16 +7136,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "mistral" }, "pricing": { - "prompt": "0.00000003", + "prompt": "0.00000004", "completion": "0.000000055", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5326,53 +7165,93 @@ "name": "NousResearch: Hermes 2 Pro - Llama-3 8B", "created": 1716768000, "description": "Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON Mode dataset developed in-house.", - "context_length": 131000, + "context_length": 131072, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "chatml" }, "pricing": { "prompt": "0.000000025", "completion": "0.00000004", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 131000, - "max_completion_tokens": 131000, + "context_length": 131072, + "max_completion_tokens": 131072, "is_moderated": false }, "per_request_limits": null }, { - "id": "microsoft/phi-3-mini-128k-instruct:free", - "name": "Microsoft: Phi-3 Mini 128K Instruct (free)", - "created": 1716681600, - "description": "Phi-3 Mini is a powerful 3.8B parameter model designed for advanced language understanding, reasoning, and instruction following. Optimized through supervised fine-tuning and preference adjustments, it excels in tasks involving common sense, mathematics, logical reasoning, and code processing.\n\nAt time of release, Phi-3 Medium demonstrated state-of-the-art performance among lightweight models. This model is static, trained on an offline dataset with an October 2023 cutoff date.", - "context_length": 8192, + "id": "mistralai/mistral-7b-instruct:free", + "name": "Mistral: Mistral 7B Instruct (free)", + "created": 1716768000, + "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\n*Mistral 7B Instruct has multiple version variants, and this is intended to be the latest version.*", + "context_length": 32768, "architecture": { "modality": "text->text", - "tokenizer": "Other", - "instruct_type": "phi3" + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Mistral", + "instruct_type": "mistral" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 8192, - "max_completion_tokens": 4096, + "context_length": 32768, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "mistralai/mistral-7b-instruct", + "name": "Mistral: Mistral 7B Instruct", + "created": 1716768000, + "description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\n*Mistral 7B Instruct has multiple version variants, and this is intended to be the latest version.*", + "context_length": 32768, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Mistral", + "instruct_type": "mistral" + }, + "pricing": { + "prompt": "0.00000003", + "completion": "0.000000055", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 8192, "is_moderated": false }, "per_request_limits": null @@ -5385,16 +7264,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": "phi3" }, "pricing": { "prompt": "0.0000001", "completion": "0.0000001", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5405,34 +7288,6 @@ }, "per_request_limits": null }, - { - "id": "microsoft/phi-3-medium-128k-instruct:free", - "name": "Microsoft: Phi-3 Medium 128K Instruct (free)", - "created": 1716508800, - "description": "Phi-3 128K Medium is a powerful 14-billion parameter model designed for advanced language understanding, reasoning, and instruction following. Optimized through supervised fine-tuning and preference adjustments, it excels in tasks involving common sense, mathematics, logical reasoning, and code processing.\n\nAt time of release, Phi-3 Medium demonstrated state-of-the-art performance among lightweight models. In the MMLU-Pro eval, the model even comes close to a Llama3 70B level of performance.\n\nFor 4k context length, try [Phi-3 Medium 4K](/models/microsoft/phi-3-medium-4k-instruct).", - "context_length": 8192, - "architecture": { - "modality": "text->text", - "tokenizer": "Other", - "instruct_type": "phi3" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "microsoft/phi-3-medium-128k-instruct", "name": "Microsoft: Phi-3 Medium 128K Instruct", @@ -5441,16 +7296,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Other", "instruct_type": "phi3" }, "pricing": { "prompt": "0.000001", "completion": "0.000001", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5469,16 +7328,20 @@ "context_length": 8192, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { "prompt": "0.000003375", "completion": "0.0000045", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5491,24 +7354,31 @@ }, { "id": "google/gemini-flash-1.5", - "name": "Google: Gemini Flash 1.5", + "name": "Google: Gemini 1.5 Flash ", "created": 1715644800, "description": "Gemini 1.5 Flash is a foundation model that performs well at a variety of multimodal tasks such as visual understanding, classification, summarization, and creating content from image, audio and video. It's adept at processing visual and text inputs such as photographs, documents, infographics, and screenshots.\n\nGemini 1.5 Flash is designed for high-volume, high-frequency tasks where cost and latency matter. On most common tasks, Flash achieves comparable quality to other Gemini Pro models at a significantly reduced cost. Flash is well-suited for applications like chat assistants and on-demand content generation where speed and scale matter.\n\nUsage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).\n\n#multimodal", "context_length": 1000000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": null }, "pricing": { "prompt": "0.000000075", "completion": "0.0000003", - "image": "0.00004", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.00004", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000001875", + "input_cache_write": "0.0000001583" }, "top_provider": { "context_length": 1000000, @@ -5525,16 +7395,21 @@ "context_length": 128000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.000005", "completion": "0.000015", - "image": "0.007225", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.007225", "web_search": "0", "internal_reasoning": "0" }, @@ -5553,16 +7428,20 @@ "context_length": 8192, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "none" }, "pricing": { "prompt": "0.0000002", "completion": "0.0000002", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5581,18 +7460,24 @@ "context_length": 128000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.0000025", "completion": "0.00001", - "image": "0.003613", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.003613", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000125" }, "top_provider": { "context_length": 128000, @@ -5609,16 +7494,21 @@ "context_length": 128000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.000006", "completion": "0.000018", - "image": "0.007225", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.007225", "web_search": "0", "internal_reasoning": "0" }, @@ -5637,16 +7527,20 @@ "context_length": 24576, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { - "prompt": "0.0000001875", - "completion": "0.000001125", - "image": "0", + "prompt": "0.00000009375", + "completion": "0.00000075", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5665,16 +7559,20 @@ "context_length": 24576, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { - "prompt": "0.0000001875", - "completion": "0.000001125", - "image": "0", + "prompt": "0.00000009375", + "completion": "0.00000075", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5693,16 +7591,20 @@ "context_length": 4096, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama2", "instruct_type": "alpaca" }, "pricing": { "prompt": "0.0000008", "completion": "0.0000012", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5713,34 +7615,6 @@ }, "per_request_limits": null }, - { - "id": "meta-llama/llama-3-8b-instruct:free", - "name": "Meta: Llama 3 8B Instruct (free)", - "created": 1713398400, - "description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 8B instruct-tuned version was optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", - "context_length": 8192, - "architecture": { - "modality": "text->text", - "tokenizer": "Llama3", - "instruct_type": "llama3" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "meta-llama/llama-3-8b-instruct", "name": "Meta: Llama 3 8B Instruct", @@ -5749,16 +7623,20 @@ "context_length": 8192, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { "prompt": "0.00000003", "completion": "0.00000006", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5777,16 +7655,20 @@ "context_length": 8192, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama3", "instruct_type": "llama3" }, "pricing": { - "prompt": "0.00000023", + "prompt": "0.0000003", "completion": "0.0000004", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5805,16 +7687,20 @@ "context_length": 65536, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "mistral" }, "pricing": { "prompt": "0.0000009", "completion": "0.0000009", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5833,16 +7719,20 @@ "context_length": 65536, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "vicuna" }, "pricing": { "prompt": "0.0000005", "completion": "0.0000005", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5861,16 +7751,20 @@ "context_length": 32000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "vicuna" }, "pricing": { "prompt": "0.00000007", "completion": "0.00000007", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5883,22 +7777,27 @@ }, { "id": "google/gemini-pro-1.5", - "name": "Google: Gemini Pro 1.5", + "name": "Google: Gemini 1.5 Pro", "created": 1712620800, "description": "Google's latest multimodal model, supports image and video[0] in text or chat prompts.\n\nOptimized for language tasks including:\n\n- Code generation\n- Text generation\n- Text editing\n- Problem solving\n- Recommendations\n- Information extraction\n- Data extraction or generation\n- AI agents\n\nUsage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).\n\n* [0]: Video input is not available through OpenRouter at this time.", "context_length": 2000000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": null }, "pricing": { "prompt": "0.00000125", "completion": "0.000005", - "image": "0.0006575", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0006575", "web_search": "0", "internal_reasoning": "0" }, @@ -5917,16 +7816,21 @@ "context_length": 128000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.00001", "completion": "0.00003", - "image": "0.01445", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.01445", "web_search": "0", "internal_reasoning": "0" }, @@ -5945,16 +7849,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Cohere", "instruct_type": null }, "pricing": { - "prompt": "0.00000285", - "completion": "0.00001425", - "image": "0", + "prompt": "0.000003", + "completion": "0.000015", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -5973,16 +7881,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Cohere", "instruct_type": null }, "pricing": { - "prompt": "0.00000285", - "completion": "0.00001425", - "image": "0", + "prompt": "0.000003", + "completion": "0.000015", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6001,16 +7913,20 @@ "context_length": 4096, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama2", "instruct_type": "airoboros" }, "pricing": { "prompt": "0.0000008", "completion": "0.0000008", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6029,16 +7945,20 @@ "context_length": 4096, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Cohere", "instruct_type": null }, "pricing": { - "prompt": "0.00000095", - "completion": "0.0000019", - "image": "0", + "prompt": "0.000001", + "completion": "0.000002", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6057,16 +7977,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Cohere", "instruct_type": null }, "pricing": { - "prompt": "0.000000475", - "completion": "0.000001425", - "image": "0", + "prompt": "0.0000005", + "completion": "0.0000015", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6085,18 +8009,25 @@ "context_length": 200000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.00000025", "completion": "0.00000125", - "image": "0.0004", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0004", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000003", + "input_cache_write": "0.0000003" }, "top_provider": { "context_length": 200000, @@ -6113,18 +8044,25 @@ "context_length": 200000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.00000025", "completion": "0.00000125", - "image": "0.0004", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0004", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.00000003", + "input_cache_write": "0.0000003" }, "top_provider": { "context_length": 200000, @@ -6141,18 +8079,25 @@ "context_length": 200000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000015", "completion": "0.000075", - "image": "0.024", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.024", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.0000015", + "input_cache_write": "0.00001875" }, "top_provider": { "context_length": 200000, @@ -6169,18 +8114,25 @@ "context_length": 200000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000015", "completion": "0.000075", - "image": "0.024", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.024", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.0000015", + "input_cache_write": "0.00001875" }, "top_provider": { "context_length": 200000, @@ -6197,18 +8149,25 @@ "context_length": 200000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000003", "completion": "0.000015", - "image": "0.0048", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0048", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" }, "top_provider": { "context_length": 200000, @@ -6225,18 +8184,25 @@ "context_length": 200000, "architecture": { "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000003", "completion": "0.000015", - "image": "0.0048", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0048", "web_search": "0", - "internal_reasoning": "0" + "internal_reasoning": "0", + "input_cache_read": "0.0000003", + "input_cache_write": "0.00000375" }, "top_provider": { "context_length": 200000, @@ -6253,16 +8219,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Cohere", "instruct_type": null }, "pricing": { - "prompt": "0.000000475", - "completion": "0.000001425", - "image": "0", + "prompt": "0.0000005", + "completion": "0.0000015", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6281,16 +8251,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { "prompt": "0.000002", "completion": "0.000006", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6301,34 +8275,6 @@ }, "per_request_limits": null }, - { - "id": "google/gemma-7b-it", - "name": "Google: Gemma 7B", - "created": 1708560000, - "description": "Gemma by Google is an advanced, open-source language model family, leveraging the latest in decoder-only, text-to-text technology. It offers English language capabilities across text generation tasks like question answering, summarization, and reasoning. The Gemma 7B variant is comparable in performance to leading open source models.\n\nUsage of Gemma is subject to Google's [Gemma Terms of Use](https://ai.google.dev/gemma/terms).", - "context_length": 8192, - "architecture": { - "modality": "text->text", - "tokenizer": "Gemini", - "instruct_type": "gemma" - }, - "pricing": { - "prompt": "0.00000015", - "completion": "0.00000015", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": null, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "openai/gpt-3.5-turbo-0613", "name": "OpenAI: GPT-3.5 Turbo (older v0613)", @@ -6337,16 +8283,20 @@ "context_length": 4095, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.000001", "completion": "0.000002", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6365,16 +8315,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.00001", "completion": "0.00003", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6393,16 +8347,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "chatml" }, "pricing": { "prompt": "0.0000006", "completion": "0.0000006", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6414,28 +8372,32 @@ "per_request_limits": null }, { - "id": "mistralai/mistral-small", - "name": "Mistral Small", + "id": "mistralai/mistral-medium", + "name": "Mistral Medium", "created": 1704844800, - "description": "With 22 billion parameters, Mistral Small v24.09 offers a convenient mid-point between (Mistral NeMo 12B)[/mistralai/mistral-nemo] and (Mistral Large 2)[/mistralai/mistral-large], providing a cost-effective solution that can be deployed across various platforms and environments. It has better reasoning, exhibits more capabilities, can produce and reason about code, and is multiligual, supporting English, French, German, Italian, and Spanish.", - "context_length": 32000, + "description": "This is Mistral AI's closed-source, medium-sided model. It's powered by a closed-source prototype and excels at reasoning, code, JSON, chat, and more. In benchmarks, it compares with many of the flagship models of other companies.", + "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { - "prompt": "0.0000002", - "completion": "0.0000006", - "image": "0", + "prompt": "0.00000275", + "completion": "0.0000081", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 32000, + "context_length": 32768, "max_completion_tokens": null, "is_moderated": false }, @@ -6445,53 +8407,61 @@ "id": "mistralai/mistral-tiny", "name": "Mistral Tiny", "created": 1704844800, - "description": "This model is currently powered by Mistral-7B-v0.2, and incorporates a \"better\" fine-tuning than [Mistral 7B](/models/mistralai/mistral-7b-instruct-v0.1), inspired by community work. It's best used for large batch processing tasks where cost is a significant factor but reasoning capabilities are not crucial.", - "context_length": 32000, + "description": "Note: This model is being deprecated. Recommended replacement is the newer [Ministral 8B](/mistral/ministral-8b)\n\nThis model is currently powered by Mistral-7B-v0.2, and incorporates a \"better\" fine-tuning than [Mistral 7B](/models/mistralai/mistral-7b-instruct-v0.1), inspired by community work. It's best used for large batch processing tasks where cost is a significant factor but reasoning capabilities are not crucial.", + "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { "prompt": "0.00000025", "completion": "0.00000025", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 32000, + "context_length": 32768, "max_completion_tokens": null, "is_moderated": false }, "per_request_limits": null }, { - "id": "mistralai/mistral-medium", - "name": "Mistral Medium", + "id": "mistralai/mistral-small", + "name": "Mistral Small", "created": 1704844800, - "description": "This is Mistral AI's closed-source, medium-sided model. It's powered by a closed-source prototype and excels at reasoning, code, JSON, chat, and more. In benchmarks, it compares with many of the flagship models of other companies.", - "context_length": 32000, + "description": "With 22 billion parameters, Mistral Small v24.09 offers a convenient mid-point between (Mistral NeMo 12B)[/mistralai/mistral-nemo] and (Mistral Large 2)[/mistralai/mistral-large], providing a cost-effective solution that can be deployed across various platforms and environments. It has better reasoning, exhibits more capabilities, can produce and reason about code, and is multiligual, supporting English, French, German, Italian, and Spanish.", + "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": null }, "pricing": { - "prompt": "0.00000275", - "completion": "0.0000081", - "image": "0", + "prompt": "0.0000002", + "completion": "0.0000006", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 32000, + "context_length": 32768, "max_completion_tokens": null, "is_moderated": false }, @@ -6505,16 +8475,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "mistral" }, "pricing": { "prompt": "0.0000002", "completion": "0.0000002", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6533,16 +8507,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "chatml" }, "pricing": { "prompt": "0.0000005", "completion": "0.0000005", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6553,34 +8531,6 @@ }, "per_request_limits": null }, - { - "id": "google/gemini-pro-vision", - "name": "Google: Gemini Pro Vision 1.0", - "created": 1702425600, - "description": "Google's flagship multimodal model, supporting image and video in text or chat prompts for a text or code response.\n\nSee the benchmarks and prompting guidelines from [Deepmind](https://deepmind.google/technologies/gemini/).\n\nUsage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).\n\n#multimodal", - "context_length": 16384, - "architecture": { - "modality": "text+image->text", - "tokenizer": "Gemini", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000005", - "completion": "0.0000015", - "image": "0.0025", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 16384, - "max_completion_tokens": 2048, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "google/gemini-pro", "name": "Google: Gemini Pro 1.0", @@ -6589,16 +8539,20 @@ "context_length": 32760, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Gemini", "instruct_type": null }, "pricing": { "prompt": "0.0000005", "completion": "0.0000015", - "image": "0.0025", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0025", "web_search": "0", "internal_reasoning": "0" }, @@ -6610,28 +8564,33 @@ "per_request_limits": null }, { - "id": "mistralai/mixtral-8x7b", - "name": "Mistral: Mixtral 8x7B (base)", - "created": 1702166400, - "description": "Mixtral 8x7B is a pretrained generative Sparse Mixture of Experts, by Mistral AI. Incorporates 8 experts (feed-forward networks) for a total of 47B parameters. Base model (not fine-tuned for instructions) - see [Mixtral 8x7B Instruct](/models/mistralai/mixtral-8x7b-instruct) for an instruct-tuned model.\n\n#moe", - "context_length": 32768, + "id": "google/gemini-pro-vision", + "name": "Google: Gemini Pro Vision 1.0", + "created": 1702425600, + "description": "Google's flagship multimodal model, supporting image and video in text or chat prompts for a text or code response.\n\nSee the benchmarks and prompting guidelines from [Deepmind](https://deepmind.google/technologies/gemini/).\n\nUsage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).\n\n#multimodal", + "context_length": 16384, "architecture": { - "modality": "text->text", - "tokenizer": "Mistral", - "instruct_type": "none" + "modality": "text+image->text", + "input_modalities": [ + "text", + "image" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Gemini", + "instruct_type": null }, "pricing": { - "prompt": "0.0000006", - "completion": "0.0000006", - "image": "0", + "prompt": "0.0000005", + "completion": "0.0000015", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0.0025", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 32768, + "context_length": 16384, "max_completion_tokens": 2048, "is_moderated": false }, @@ -6645,16 +8604,20 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "mistral" }, "pricing": { "prompt": "0.00000024", "completion": "0.00000024", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6665,34 +8628,6 @@ }, "per_request_limits": null }, - { - "id": "openchat/openchat-7b:free", - "name": "OpenChat 3.5 7B (free)", - "created": 1701129600, - "description": "OpenChat 7B is a library of open-source language models, fine-tuned with \"C-RLFT (Conditioned Reinforcement Learning Fine-Tuning)\" - a strategy inspired by offline reinforcement learning. It has been trained on mixed-quality data without preference labels.\n\n- For OpenChat fine-tuned on Mistral 7B, check out [OpenChat 7B](/models/openchat/openchat-7b).\n- For OpenChat fine-tuned on Llama 8B, check out [OpenChat 8B](/models/openchat/openchat-8b).\n\n#open-source", - "context_length": 8192, - "architecture": { - "modality": "text->text", - "tokenizer": "Mistral", - "instruct_type": "openchat" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8192, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "openchat/openchat-7b", "name": "OpenChat 3.5 7B", @@ -6701,22 +8636,26 @@ "context_length": 8192, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "openchat" }, "pricing": { - "prompt": "0.000000055", - "completion": "0.000000055", - "image": "0", + "prompt": "0.00000007", + "completion": "0.00000007", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { "context_length": 8192, - "max_completion_tokens": 8192, + "max_completion_tokens": null, "is_moderated": false }, "per_request_limits": null @@ -6729,16 +8668,20 @@ "context_length": 8192, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama2", "instruct_type": "alpaca" }, "pricing": { - "prompt": "0.0000015", - "completion": "0.00000225", - "image": "0", + "prompt": "0.00000075", + "completion": "0.0000015", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6757,16 +8700,20 @@ "context_length": 200000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000008", "completion": "0.000024", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6785,16 +8732,20 @@ "context_length": 200000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000008", "completion": "0.000024", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6813,16 +8764,20 @@ "context_length": 200000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000008", "completion": "0.000024", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6841,16 +8796,20 @@ "context_length": 200000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000008", "completion": "0.000024", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6861,62 +8820,6 @@ }, "per_request_limits": null }, - { - "id": "teknium/openhermes-2.5-mistral-7b", - "name": "OpenHermes 2.5 Mistral 7B", - "created": 1700438400, - "description": "A continuation of [OpenHermes 2 model](/models/teknium/openhermes-2-mistral-7b), trained on additional code datasets.\nPotentially the most interesting finding from training on a good ratio (est. of around 7-14% of the total dataset) of code instruction was that it has boosted several non-code benchmarks, including TruthfulQA, AGIEval, and GPT4All suite. It did however reduce BigBench benchmark score, but the net gain overall is significant.", - "context_length": 4096, - "architecture": { - "modality": "text->text", - "tokenizer": "Mistral", - "instruct_type": "chatml" - }, - "pricing": { - "prompt": "0.00000017", - "completion": "0.00000017", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 4096, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null - }, - { - "id": "undi95/toppy-m-7b:free", - "name": "Toppy M 7B (free)", - "created": 1699574400, - "description": "A wild 7B parameter model that merges several models using the new task_arithmetic merge method from mergekit.\nList of merged models:\n- NousResearch/Nous-Capybara-7B-V1.9\n- [HuggingFaceH4/zephyr-7b-beta](/models/huggingfaceh4/zephyr-7b-beta)\n- lemonilia/AshhLimaRP-Mistral-7B\n- Vulkane/120-Days-of-Sodom-LoRA-Mistral-7b\n- Undi95/Mistral-pippa-sharegpt-7b-qlora\n\n#merge #uncensored", - "context_length": 4096, - "architecture": { - "modality": "text->text", - "tokenizer": "Mistral", - "instruct_type": "alpaca" - }, - "pricing": { - "prompt": "0", - "completion": "0", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 4096, - "max_completion_tokens": 2048, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "undi95/toppy-m-7b", "name": "Toppy M 7B", @@ -6925,16 +8828,20 @@ "context_length": 4096, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "alpaca" }, "pricing": { "prompt": "0.00000007", "completion": "0.00000007", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6953,16 +8860,20 @@ "context_length": 6144, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama2", "instruct_type": "airoboros" }, "pricing": { - "prompt": "0.000009375", + "prompt": "0.0000065625", "completion": "0.000009375", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -6981,18 +8892,18 @@ "context_length": 2000000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Router", "instruct_type": null }, "pricing": { "prompt": "-1", - "completion": "-1", - "request": "-1", - "image": "-1", - "web_search": "-1", - "input_cache_read": "-1", - "input_cache_write": "-1", - "internal_reasoning": "-1" + "completion": "-1" }, "top_provider": { "context_length": null, @@ -7001,34 +8912,6 @@ }, "per_request_limits": null }, - { - "id": "openai/gpt-3.5-turbo-1106", - "name": "OpenAI: GPT-3.5 Turbo 16k (older v1106)", - "created": 1699228800, - "description": "An older GPT-3.5 Turbo model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Training data: up to Sep 2021.", - "context_length": 16385, - "architecture": { - "modality": "text->text", - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000001", - "completion": "0.000002", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 16385, - "max_completion_tokens": 4096, - "is_moderated": true - }, - "per_request_limits": null - }, { "id": "openai/gpt-4-1106-preview", "name": "OpenAI: GPT-4 Turbo (older v1106)", @@ -7037,16 +8920,20 @@ "context_length": 128000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.00001", "completion": "0.00003", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7058,30 +8945,34 @@ "per_request_limits": null }, { - "id": "google/palm-2-chat-bison-32k", - "name": "Google: PaLM 2 Chat 32k", - "created": 1698969600, - "description": "PaLM 2 is a language model by Google with improved multilingual, reasoning and coding capabilities.", - "context_length": 32768, + "id": "openai/gpt-3.5-turbo-1106", + "name": "OpenAI: GPT-3.5 Turbo 16k (older v1106)", + "created": 1699228800, + "description": "An older GPT-3.5 Turbo model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Training data: up to Sep 2021.", + "context_length": 16385, "architecture": { "modality": "text->text", - "tokenizer": "PaLM", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.000001", "completion": "0.000002", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 32768, - "max_completion_tokens": 8192, - "is_moderated": false + "context_length": 16385, + "max_completion_tokens": 4096, + "is_moderated": true }, "per_request_limits": null }, @@ -7093,16 +8984,52 @@ "context_length": 32768, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "PaLM", "instruct_type": null }, "pricing": { "prompt": "0.000001", "completion": "0.000002", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 8192, + "is_moderated": false + }, + "per_request_limits": null + }, + { + "id": "google/palm-2-chat-bison-32k", + "name": "Google: PaLM 2 Chat 32k", + "created": 1698969600, + "description": "PaLM 2 is a language model by Google with improved multilingual, reasoning and coding capabilities.", + "context_length": 32768, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "PaLM", + "instruct_type": null + }, + "pricing": { + "prompt": "0.000001", + "completion": "0.000002", + "request": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7121,16 +9048,20 @@ "context_length": 4096, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama2", "instruct_type": "airoboros" }, "pricing": { "prompt": "0.0000005", "completion": "0.0000005", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7149,16 +9080,20 @@ "context_length": 8192, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama2", "instruct_type": "airoboros" }, "pricing": { "prompt": "0.00000375", "completion": "0.00000375", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7169,6 +9104,38 @@ }, "per_request_limits": null }, + { + "id": "mistralai/mistral-7b-instruct-v0.1", + "name": "Mistral: Mistral 7B Instruct v0.1", + "created": 1695859200, + "description": "A 7.3B parameter model that outperforms Llama 2 13B on all benchmarks, with optimizations for speed and context length.", + "context_length": 32768, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Mistral", + "instruct_type": "mistral" + }, + "pricing": { + "prompt": "0.0000002", + "completion": "0.0000002", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 32768, + "max_completion_tokens": 2048, + "is_moderated": false + }, + "per_request_limits": null + }, { "id": "openai/gpt-3.5-turbo-instruct", "name": "OpenAI: GPT-3.5 Turbo Instruct", @@ -7177,16 +9144,20 @@ "context_length": 4095, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": "chatml" }, "pricing": { "prompt": "0.0000015", "completion": "0.000002", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7198,58 +9169,66 @@ "per_request_limits": null }, { - "id": "mistralai/mistral-7b-instruct-v0.1", - "name": "Mistral: Mistral 7B Instruct v0.1", - "created": 1695859200, - "description": "A 7.3B parameter model that outperforms Llama 2 13B on all benchmarks, with optimizations for speed and context length.", - "context_length": 32768, + "id": "pygmalionai/mythalion-13b", + "name": "Pygmalion: Mythalion 13B", + "created": 1693612800, + "description": "A blend of the new Pygmalion-13b and MythoMax. #merge", + "context_length": 8192, "architecture": { "modality": "text->text", - "tokenizer": "Mistral", - "instruct_type": "mistral" + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Llama2", + "instruct_type": "alpaca" }, "pricing": { - "prompt": "0.0000002", - "completion": "0.0000002", - "image": "0", + "prompt": "0.0000005625", + "completion": "0.000001125", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 32768, - "max_completion_tokens": 2048, + "context_length": 8192, + "max_completion_tokens": 1024, "is_moderated": false }, "per_request_limits": null }, { - "id": "pygmalionai/mythalion-13b", - "name": "Pygmalion: Mythalion 13B", - "created": 1693612800, - "description": "A blend of the new Pygmalion-13b and MythoMax. #merge", - "context_length": 4096, + "id": "openai/gpt-4-32k-0314", + "name": "OpenAI: GPT-4 32k (older v0314)", + "created": 1693180800, + "description": "GPT-4-32k is an extended version of GPT-4, with the same capabilities but quadrupled context length, allowing for processing up to 40 pages of text in a single pass. This is particularly beneficial for handling longer content like interacting with PDFs without an external vector database. Training data: up to Sep 2021.", + "context_length": 32767, "architecture": { "modality": "text->text", - "tokenizer": "Llama2", - "instruct_type": "alpaca" + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "GPT", + "instruct_type": null }, "pricing": { - "prompt": "0.0000008", - "completion": "0.0000012", - "image": "0", + "prompt": "0.00006", + "completion": "0.00012", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 4096, + "context_length": 32767, "max_completion_tokens": 4096, - "is_moderated": false + "is_moderated": true }, "per_request_limits": null }, @@ -7261,16 +9240,20 @@ "context_length": 16385, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.000003", "completion": "0.000004", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7289,44 +9272,20 @@ "context_length": 32767, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.00006", "completion": "0.00012", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 32767, - "max_completion_tokens": 4096, - "is_moderated": true - }, - "per_request_limits": null - }, - { - "id": "openai/gpt-4-32k-0314", - "name": "OpenAI: GPT-4 32k (older v0314)", - "created": 1693180800, - "description": "GPT-4-32k is an extended version of GPT-4, with the same capabilities but quadrupled context length, allowing for processing up to 40 pages of text in a single pass. This is particularly beneficial for handling longer content like interacting with PDFs without an external vector database. Training data: up to Sep 2021.", - "context_length": 32767, - "architecture": { - "modality": "text->text", - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.00006", - "completion": "0.00012", "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7345,16 +9304,20 @@ "context_length": 4096, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama2", "instruct_type": "alpaca" }, "pricing": { - "prompt": "0.00000017", - "completion": "0.00000017", - "image": "0", + "prompt": "0.00000018", + "completion": "0.00000018", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7365,34 +9328,6 @@ }, "per_request_limits": null }, - { - "id": "mancer/weaver", - "name": "Mancer: Weaver (alpha)", - "created": 1690934400, - "description": "An attempt to recreate Claude-style verbosity, but don't expect the same level of coherence or memory. Meant for use in roleplay/narrative situations.", - "context_length": 8000, - "architecture": { - "modality": "text->text", - "tokenizer": "Llama2", - "instruct_type": "alpaca" - }, - "pricing": { - "prompt": "0.0000015", - "completion": "0.00000225", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 8000, - "max_completion_tokens": 1000, - "is_moderated": false - }, - "per_request_limits": null - }, { "id": "huggingfaceh4/zephyr-7b-beta:free", "name": "Hugging Face: Zephyr 7B (free)", @@ -7401,16 +9336,20 @@ "context_length": 4096, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Mistral", "instruct_type": "zephyr" }, "pricing": { "prompt": "0", "completion": "0", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7421,6 +9360,38 @@ }, "per_request_limits": null }, + { + "id": "mancer/weaver", + "name": "Mancer: Weaver (alpha)", + "created": 1690934400, + "description": "An attempt to recreate Claude-style verbosity, but don't expect the same level of coherence or memory. Meant for use in roleplay/narrative situations.", + "context_length": 8000, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "Llama2", + "instruct_type": "alpaca" + }, + "pricing": { + "prompt": "0.000001125", + "completion": "0.000001125", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 8000, + "max_completion_tokens": 1000, + "is_moderated": false + }, + "per_request_limits": null + }, { "id": "anthropic/claude-2.0:beta", "name": "Anthropic: Claude v2.0 (self-moderated)", @@ -7429,16 +9400,20 @@ "context_length": 100000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000008", "completion": "0.000024", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7457,16 +9432,20 @@ "context_length": 100000, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Claude", "instruct_type": null }, "pricing": { "prompt": "0.000008", "completion": "0.000024", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7482,52 +9461,28 @@ "name": "ReMM SLERP 13B", "created": 1689984000, "description": "A recreation trial of the original MythoMax-L2-B13 but with updated models. #merge", - "context_length": 4096, + "context_length": 6144, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama2", "instruct_type": "alpaca" }, "pricing": { - "prompt": "0.0000008", - "completion": "0.0000012", - "image": "0", + "prompt": "0.0000005625", + "completion": "0.000001125", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 4096, - "max_completion_tokens": 4096, - "is_moderated": false - }, - "per_request_limits": null - }, - { - "id": "google/palm-2-chat-bison", - "name": "Google: PaLM 2 Chat", - "created": 1689811200, - "description": "PaLM 2 is a language model by Google with improved multilingual, reasoning and coding capabilities.", - "context_length": 9216, - "architecture": { - "modality": "text->text", - "tokenizer": "PaLM", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000001", - "completion": "0.000002", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 9216, + "context_length": 6144, "max_completion_tokens": 1024, "is_moderated": false }, @@ -7541,16 +9496,20 @@ "context_length": 7168, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "PaLM", "instruct_type": null }, "pricing": { "prompt": "0.000001", "completion": "0.000002", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7562,29 +9521,33 @@ "per_request_limits": null }, { - "id": "gryphe/mythomax-l2-13b:free", - "name": "MythoMax 13B (free)", - "created": 1688256000, - "description": "One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay. #merge", - "context_length": 4096, + "id": "google/palm-2-chat-bison", + "name": "Google: PaLM 2 Chat", + "created": 1689811200, + "description": "PaLM 2 is a language model by Google with improved multilingual, reasoning and coding capabilities.", + "context_length": 9216, "architecture": { "modality": "text->text", - "tokenizer": "Llama2", - "instruct_type": "alpaca" + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "PaLM", + "instruct_type": null }, "pricing": { - "prompt": "0", - "completion": "0", - "image": "0", + "prompt": "0.000001", + "completion": "0.000002", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, "top_provider": { - "context_length": 4096, - "max_completion_tokens": 2048, + "context_length": 9216, + "max_completion_tokens": 1024, "is_moderated": false }, "per_request_limits": null @@ -7597,16 +9560,20 @@ "context_length": 4096, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama2", "instruct_type": "alpaca" }, "pricing": { "prompt": "0.000000065", "completion": "0.000000065", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7625,16 +9592,20 @@ "context_length": 4096, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama2", "instruct_type": "llama2" }, "pricing": { "prompt": "0.00000022", "completion": "0.00000022", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7653,16 +9624,20 @@ "context_length": 4096, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "Llama2", "instruct_type": "llama2" }, "pricing": { "prompt": "0.0000009", "completion": "0.0000009", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7673,34 +9648,6 @@ }, "per_request_limits": null }, - { - "id": "openai/gpt-3.5-turbo", - "name": "OpenAI: GPT-3.5 Turbo", - "created": 1685232000, - "description": "GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.\n\nTraining data up to Sep 2021.", - "context_length": 16385, - "architecture": { - "modality": "text->text", - "tokenizer": "GPT", - "instruct_type": null - }, - "pricing": { - "prompt": "0.0000005", - "completion": "0.0000015", - "image": "0", - "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", - "web_search": "0", - "internal_reasoning": "0" - }, - "top_provider": { - "context_length": 16385, - "max_completion_tokens": 4096, - "is_moderated": true - }, - "per_request_limits": null - }, { "id": "openai/gpt-3.5-turbo-0125", "name": "OpenAI: GPT-3.5 Turbo 16k", @@ -7709,16 +9656,20 @@ "context_length": 16385, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.0000005", "completion": "0.0000015", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7737,16 +9688,20 @@ "context_length": 8191, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.00003", "completion": "0.00006", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, @@ -7757,6 +9712,38 @@ }, "per_request_limits": null }, + { + "id": "openai/gpt-3.5-turbo", + "name": "OpenAI: GPT-3.5 Turbo", + "created": 1685232000, + "description": "GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.\n\nTraining data up to Sep 2021.", + "context_length": 16385, + "architecture": { + "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], + "tokenizer": "GPT", + "instruct_type": null + }, + "pricing": { + "prompt": "0.0000005", + "completion": "0.0000015", + "request": "0", + "image": "0", + "web_search": "0", + "internal_reasoning": "0" + }, + "top_provider": { + "context_length": 16385, + "max_completion_tokens": 4096, + "is_moderated": true + }, + "per_request_limits": null + }, { "id": "openai/gpt-4-0314", "name": "OpenAI: GPT-4 (older v0314)", @@ -7765,16 +9752,20 @@ "context_length": 8191, "architecture": { "modality": "text->text", + "input_modalities": [ + "text" + ], + "output_modalities": [ + "text" + ], "tokenizer": "GPT", "instruct_type": null }, "pricing": { "prompt": "0.00003", "completion": "0.00006", - "image": "0", "request": "0", - "input_cache_read": "0", - "input_cache_write": "0", + "image": "0", "web_search": "0", "internal_reasoning": "0" }, diff --git a/packages/osr-code-bot/schema.json b/packages/osr-code-bot/schema.json index 94e899a..2f4683c 100644 --- a/packages/osr-code-bot/schema.json +++ b/packages/osr-code-bot/schema.json @@ -81,7 +81,7 @@ }, "model": { "type": "string", - "description": "AI model to use for processing. Available models:\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m OpenRouter models:\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n01-ai/yi-large | paid\naetherwiing/mn-starcannon-12b | paid\nai21/jamba-1-5-large | paid\nai21/jamba-1-5-mini | paid\nai21/jamba-1.6-large | paid\nai21/jamba-instruct | paid\nai21/jamba-1.6-mini | paid\naion-labs/aion-1.0 | paid\naion-labs/aion-1.0-mini | paid\naion-labs/aion-rp-llama-3.1-8b | paid\njondurbin/airoboros-l2-70b | paid\nallenai/olmo-2-0325-32b-instruct | paid\namazon/nova-lite-v1 | paid\namazon/nova-micro-v1 | paid\namazon/nova-pro-v1 | paid\nanthropic/claude-3-haiku | paid\nanthropic/claude-3-haiku:beta | paid\nanthropic/claude-3-opus | paid\nanthropic/claude-3-opus:beta | paid\nanthropic/claude-3-sonnet | paid\nanthropic/claude-3-sonnet:beta | paid\nanthropic/claude-3.5-haiku | paid\nanthropic/claude-3.5-haiku-20241022 | paid\nanthropic/claude-3.5-haiku-20241022:beta | paid\nanthropic/claude-3.5-haiku:beta | paid\nanthropic/claude-3.5-sonnet | paid\nanthropic/claude-3.5-sonnet-20240620 | paid\nanthropic/claude-3.5-sonnet-20240620:beta | paid\nanthropic/claude-3.5-sonnet:beta | paid\nanthropic/claude-3.7-sonnet | paid\nanthropic/claude-3.7-sonnet:beta | paid\nanthropic/claude-3.7-sonnet:thinking | paid\nanthropic/claude-2 | paid\nanthropic/claude-2:beta | paid\nanthropic/claude-2.0 | paid\nanthropic/claude-2.0:beta | paid\nanthropic/claude-2.1 | paid\nanthropic/claude-2.1:beta | paid\nopenrouter/auto | paid\ncohere/command | paid\ncohere/command-a | paid\ncohere/command-r | paid\ncohere/command-r-03-2024 | paid\ncohere/command-r-08-2024 | paid\ncohere/command-r-plus | paid\ncohere/command-r-plus-04-2024 | paid\ncohere/command-r-plus-08-2024 | paid\ncohere/command-r7b-12-2024 | paid\ndeepseek/deepseek-r1-zero:free | free\ndeepseek/deepseek-chat | paid\ndeepseek/deepseek-chat:free | free\ndeepseek/deepseek-r1 | paid\ndeepseek/deepseek-r1:free | free\ndeepseek/deepseek-r1-distill-llama-70b | paid\ndeepseek/deepseek-r1-distill-llama-70b:free | free\ndeepseek/deepseek-r1-distill-llama-8b | paid\ndeepseek/deepseek-r1-distill-qwen-1.5b | paid\ndeepseek/deepseek-r1-distill-qwen-14b | paid\ndeepseek/deepseek-r1-distill-qwen-14b:free | free\ndeepseek/deepseek-r1-distill-qwen-32b | paid\ndeepseek/deepseek-r1-distill-qwen-32b:free | free\ncognitivecomputations/dolphin-mixtral-8x7b | paid\ncognitivecomputations/dolphin-mixtral-8x22b | paid\ncognitivecomputations/dolphin3.0-mistral-24b:free | free\ncognitivecomputations/dolphin3.0-r1-mistral-24b:free | free\neva-unit-01/eva-llama-3.33-70b | paid\neva-unit-01/eva-qwen-2.5-32b | paid\neva-unit-01/eva-qwen-2.5-72b | paid\nsao10k/fimbulvetr-11b-v2 | paid\nalpindale/goliath-120b | paid\ngoogle/gemini-2.0-flash-lite-001 | paid\ngoogle/gemini-2.0-flash-thinking-exp-1219:free | free\ngoogle/gemini-2.0-flash-thinking-exp:free | free\ngoogle/gemini-exp-1206:free | free\ngoogle/gemini-flash-1.5 | paid\ngoogle/gemini-flash-1.5-8b | paid\ngoogle/gemini-flash-1.5-8b-exp | paid\ngoogle/gemini-2.0-flash-001 | paid\ngoogle/gemini-2.0-flash-exp:free | free\ngoogle/gemini-2.0-flash-lite-preview-02-05:free | free\ngoogle/gemini-pro | paid\ngoogle/gemini-pro-1.5 | paid\ngoogle/gemini-2.0-pro-exp-02-05:free | free\ngoogle/gemini-pro-vision | paid\ngoogle/gemma-2-27b-it | paid\ngoogle/gemma-2-9b-it | paid\ngoogle/gemma-2-9b-it:free | free\ngoogle/gemma-3-12b-it:free | free\ngoogle/gemma-3-1b-it:free | free\ngoogle/gemma-3-27b-it | paid\ngoogle/gemma-3-27b-it:free | free\ngoogle/gemma-3-4b-it:free | free\ngoogle/gemma-7b-it | paid\ngoogle/learnlm-1.5-pro-experimental:free | free\ngoogle/palm-2-chat-bison | paid\ngoogle/palm-2-chat-bison-32k | paid\ngoogle/palm-2-codechat-bison | paid\ngoogle/palm-2-codechat-bison-32k | paid\nhuggingfaceh4/zephyr-7b-beta:free | free\ninfermatic/mn-inferor-12b | paid\ninflection/inflection-3-pi | paid\ninflection/inflection-3-productivity | paid\nlatitudegames/wayfarer-large-70b-llama-3.3 | paid\nliquid/lfm-3b | paid\nliquid/lfm-40b | paid\nliquid/lfm-7b | paid\nallenai/llama-3.1-tulu-3-405b | paid\nmeta-llama/llama-guard-3-8b | paid\nalpindale/magnum-72b | paid\nanthracite-org/magnum-v2-72b | paid\nanthracite-org/magnum-v4-72b | paid\nmancer/weaver | paid\nmeta-llama/llama-2-13b-chat | paid\nmeta-llama/llama-2-70b-chat | paid\nmeta-llama/llama-3-70b-instruct | paid\nmeta-llama/llama-3-8b-instruct | paid\nmeta-llama/llama-3-8b-instruct:free | free\nmeta-llama/llama-3.1-405b | paid\nmeta-llama/llama-3.1-405b-instruct | paid\nmeta-llama/llama-3.1-70b-instruct | paid\nmeta-llama/llama-3.1-8b-instruct | paid\nmeta-llama/llama-3.1-8b-instruct:free | free\nmeta-llama/llama-3.2-11b-vision-instruct | paid\nmeta-llama/llama-3.2-11b-vision-instruct:free | free\nmeta-llama/llama-3.2-1b-instruct | paid\nmeta-llama/llama-3.2-1b-instruct:free | free\nmeta-llama/llama-3.2-3b-instruct | paid\nmeta-llama/llama-3.2-3b-instruct:free | free\nmeta-llama/llama-3.2-90b-vision-instruct | paid\nmeta-llama/llama-3.3-70b-instruct | paid\nmeta-llama/llama-3.3-70b-instruct:free | free\nmeta-llama/llama-guard-2-8b | paid\nmicrosoft/phi-4 | paid\nmicrosoft/phi-4-multimodal-instruct | paid\nmicrosoft/phi-3-medium-128k-instruct | paid\nmicrosoft/phi-3-medium-128k-instruct:free | free\nmicrosoft/phi-3-mini-128k-instruct | paid\nmicrosoft/phi-3-mini-128k-instruct:free | free\nmicrosoft/phi-3.5-mini-128k-instruct | paid\nsophosympatheia/midnight-rose-70b | paid\nminimax/minimax-01 | paid\nmistralai/mistral-large | paid\nmistralai/mistral-large-2407 | paid\nmistralai/mistral-large-2411 | paid\nmistralai/mistral-medium | paid\nnothingiisreal/mn-celeste-12b | paid\nmistralai/mistral-small | paid\nmistralai/mistral-tiny | paid\nmistralai/codestral-2501 | paid\nmistralai/codestral-mamba | paid\nmistralai/ministral-3b | paid\nmistralai/ministral-8b | paid\nmistralai/mistral-7b-instruct | paid\nmistralai/mistral-7b-instruct:free | free\nmistralai/mistral-7b-instruct-v0.1 | paid\nmistralai/mistral-7b-instruct-v0.2 | paid\nmistralai/mistral-7b-instruct-v0.3 | paid\nmistralai/mistral-nemo | paid\nmistralai/mistral-nemo:free | free\nmistralai/mistral-small-24b-instruct-2501 | paid\nmistralai/mistral-small-24b-instruct-2501:free | free\nmistralai/mistral-small-3.1-24b-instruct | paid\nmistralai/mixtral-8x22b-instruct | paid\nmistralai/mixtral-8x7b | paid\nmistralai/mixtral-8x7b-instruct | paid\nmistralai/pixtral-12b | paid\nmistralai/pixtral-large-2411 | paid\nmistralai/mistral-saba | paid\nmoonshotai/moonlight-16b-a3b-instruct:free | free\ngryphe/mythomax-l2-13b | paid\ngryphe/mythomax-l2-13b:free | free\nneversleep/llama-3-lumimaid-70b | paid\nneversleep/llama-3-lumimaid-8b | paid\nneversleep/llama-3-lumimaid-8b:extended | paid\nneversleep/llama-3.1-lumimaid-70b | paid\nneversleep/llama-3.1-lumimaid-8b | paid\nneversleep/noromaid-20b | paid\nnousresearch/deephermes-3-llama-3-8b-preview:free | free\nnousresearch/nous-hermes-llama2-13b | paid\nnousresearch/nous-hermes-2-mixtral-8x7b-dpo | paid\nnousresearch/hermes-3-llama-3.1-405b | paid\nnousresearch/hermes-3-llama-3.1-70b | paid\nnousresearch/hermes-2-pro-llama-3-8b | paid\nnvidia/llama-3.1-nemotron-70b-instruct | paid\nnvidia/llama-3.1-nemotron-70b-instruct:free | free\nopen-r1/olympiccoder-32b:free | free\nopen-r1/olympiccoder-7b:free | free\nopenai/chatgpt-4o-latest | paid\nopenai/gpt-3.5-turbo | paid\nopenai/gpt-3.5-turbo-0613 | paid\nopenai/gpt-3.5-turbo-16k | paid\nopenai/gpt-3.5-turbo-0125 | paid\nopenai/gpt-3.5-turbo-1106 | paid\nopenai/gpt-3.5-turbo-instruct | paid\nopenai/gpt-4 | paid\nopenai/gpt-4-0314 | paid\nopenai/gpt-4-32k | paid\nopenai/gpt-4-32k-0314 | paid\nopenai/gpt-4-turbo | paid\nopenai/gpt-4-1106-preview | paid\nopenai/gpt-4-turbo-preview | paid\nopenai/gpt-4.5-preview | paid\nopenai/gpt-4o | paid\nopenai/gpt-4o-2024-05-13 | paid\nopenai/gpt-4o-2024-08-06 | paid\nopenai/gpt-4o-2024-11-20 | paid\nopenai/gpt-4o:extended | paid\nopenai/gpt-4o-search-preview | paid\nopenai/gpt-4o-mini | paid\nopenai/gpt-4o-mini-2024-07-18 | paid\nopenai/gpt-4o-mini-search-preview | paid\nopenai/o1 | paid\nopenai/o1-mini | paid\nopenai/o1-mini-2024-09-12 | paid\nopenai/o1-preview | paid\nopenai/o1-preview-2024-09-12 | paid\nopenai/o3-mini | paid\nopenai/o3-mini-high | paid\nopenchat/openchat-7b | paid\nopenchat/openchat-7b:free | free\nteknium/openhermes-2.5-mistral-7b | paid\nperplexity/llama-3.1-sonar-large-128k-chat | paid\nperplexity/llama-3.1-sonar-large-128k-online | paid\nperplexity/llama-3.1-sonar-small-128k-chat | paid\nperplexity/llama-3.1-sonar-small-128k-online | paid\nperplexity/r1-1776 | paid\nperplexity/sonar | paid\nperplexity/sonar-deep-research | paid\nperplexity/sonar-pro | paid\nperplexity/sonar-reasoning | paid\nperplexity/sonar-reasoning-pro | paid\npygmalionai/mythalion-13b | paid\nqwen/qwen-2-72b-instruct | paid\nqwen/qwen-2-7b-instruct | paid\nqwen/qwen-2-7b-instruct:free | free\nqwen/qwen-vl-max | paid\nqwen/qwen-vl-plus | paid\nqwen/qwen-max | paid\nqwen/qwen-plus | paid\nqwen/qwen-turbo | paid\nqwen/qwen2.5-32b-instruct | paid\nqwen/qwen2.5-vl-72b-instruct | paid\nqwen/qwen2.5-vl-72b-instruct:free | free\nqwen/qwen-2.5-vl-72b-instruct | paid\nqwen/qwen-2.5-vl-7b-instruct | paid\nqwen/qwq-32b | paid\nqwen/qwq-32b:free | free\nqwen/qwq-32b-preview | paid\nqwen/qwq-32b-preview:free | free\nqwen/qwen-2.5-72b-instruct | paid\nqwen/qwen-2.5-72b-instruct:free | free\nqwen/qwen-2.5-7b-instruct | paid\nqwen/qwen-2.5-coder-32b-instruct | paid\nqwen/qwen-2.5-coder-32b-instruct:free | free\nrekaai/reka-flash-3:free | free\nundi95/remm-slerp-l2-13b | paid\nthedrummer/rocinante-12b | paid\nsophosympatheia/rogue-rose-103b-v0.2:free | free\nsao10k/l3-lunaris-8b | paid\nsao10k/l3-euryale-70b | paid\nsao10k/l3.1-70b-hanami-x1 | paid\nsao10k/l3.1-euryale-70b | paid\nsao10k/l3.3-euryale-70b | paid\nraifle/sorcererlm-8x22b | paid\nsteelskull/l3.3-electra-r1-70b | paid\ntokyotech-llm/llama-3.1-swallow-70b-instruct-v0.3 | paid\nthedrummer/anubis-pro-105b-v1 | paid\nthedrummer/skyfall-36b-v2 | paid\nundi95/toppy-m-7b | paid\nundi95/toppy-m-7b:free | free\nthedrummer/unslopnemo-12b | paid\nmicrosoft/wizardlm-2-7b | paid\nmicrosoft/wizardlm-2-8x22b | paid\nx-ai/grok-2-1212 | paid\nx-ai/grok-2-vision-1212 | paid\nx-ai/grok-beta | paid\nx-ai/grok-vision-beta | paid\nxwin-lm/xwin-lm-70b | paid\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m OpenAI models:\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\nbabbage-002\nchatgpt-4o-latest\ndall-e-2\ndall-e-3\ndavinci-002\ngpt-3.5-turbo\ngpt-3.5-turbo-0125\ngpt-3.5-turbo-1106\ngpt-3.5-turbo-16k\ngpt-3.5-turbo-instruct\ngpt-3.5-turbo-instruct-0914\ngpt-4\ngpt-4-0125-preview\ngpt-4-0613\ngpt-4-1106-preview\ngpt-4-turbo\ngpt-4-turbo-2024-04-09\ngpt-4-turbo-preview\ngpt-4.5-preview\ngpt-4.5-preview-2025-02-27\ngpt-4o\ngpt-4o-2024-05-13\ngpt-4o-2024-08-06\ngpt-4o-2024-11-20\ngpt-4o-audio-preview\ngpt-4o-audio-preview-2024-10-01\ngpt-4o-audio-preview-2024-12-17\ngpt-4o-mini\ngpt-4o-mini-2024-07-18\ngpt-4o-mini-audio-preview\ngpt-4o-mini-audio-preview-2024-12-17\ngpt-4o-mini-realtime-preview\ngpt-4o-mini-realtime-preview-2024-12-17\ngpt-4o-mini-search-preview\ngpt-4o-mini-search-preview-2025-03-11\ngpt-4o-realtime-preview\ngpt-4o-realtime-preview-2024-10-01\ngpt-4o-realtime-preview-2024-12-17\ngpt-4o-search-preview\ngpt-4o-search-preview-2025-03-11\no1\no1-2024-12-17\no1-mini\no1-mini-2024-09-12\no1-preview\no1-preview-2024-09-12\no3-mini\no3-mini-2025-01-31\nomni-moderation-2024-09-26\nomni-moderation-latest\ntext-embedding-3-large\ntext-embedding-3-small\ntext-embedding-ada-002\ntts-1\ntts-1-1106\ntts-1-hd\ntts-1-hd-1106\nwhisper-1\n-----\n\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m Deepseek models:\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\ndeepseek-chat\ndeepseek-reasoner\n-----\n" + "description": "AI model to use for processing. Available models:\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m OpenRouter models:\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n01-ai/yi-large | paid\naetherwiing/mn-starcannon-12b | paid\nai21/jamba-1-5-large | paid\nai21/jamba-1-5-mini | paid\nai21/jamba-1.6-large | paid\nai21/jamba-instruct | paid\nai21/jamba-1.6-mini | paid\naion-labs/aion-1.0 | paid\naion-labs/aion-1.0-mini | paid\naion-labs/aion-rp-llama-3.1-8b | paid\njondurbin/airoboros-l2-70b | paid\nallenai/molmo-7b-d:free | free\nallenai/olmo-2-0325-32b-instruct | paid\namazon/nova-lite-v1 | paid\namazon/nova-micro-v1 | paid\namazon/nova-pro-v1 | paid\nanthropic/claude-3-haiku | paid\nanthropic/claude-3-haiku:beta | paid\nanthropic/claude-3-opus | paid\nanthropic/claude-3-opus:beta | paid\nanthropic/claude-3-sonnet | paid\nanthropic/claude-3-sonnet:beta | paid\nanthropic/claude-3.5-haiku | paid\nanthropic/claude-3.5-haiku-20241022 | paid\nanthropic/claude-3.5-haiku-20241022:beta | paid\nanthropic/claude-3.5-haiku:beta | paid\nanthropic/claude-3.5-sonnet | paid\nanthropic/claude-3.5-sonnet-20240620 | paid\nanthropic/claude-3.5-sonnet-20240620:beta | paid\nanthropic/claude-3.5-sonnet:beta | paid\nanthropic/claude-3.7-sonnet | paid\nanthropic/claude-3.7-sonnet:beta | paid\nanthropic/claude-3.7-sonnet:thinking | paid\nanthropic/claude-2 | paid\nanthropic/claude-2:beta | paid\nanthropic/claude-2.0 | paid\nanthropic/claude-2.0:beta | paid\nanthropic/claude-2.1 | paid\nanthropic/claude-2.1:beta | paid\nopenrouter/auto | paid\nbytedance-research/ui-tars-72b:free | free\ncohere/command | paid\ncohere/command-a | paid\ncohere/command-r | paid\ncohere/command-r-03-2024 | paid\ncohere/command-r-08-2024 | paid\ncohere/command-r-plus | paid\ncohere/command-r-plus-04-2024 | paid\ncohere/command-r-plus-08-2024 | paid\ncohere/command-r7b-12-2024 | paid\ndeepseek/deepseek-r1-zero:free | free\ndeepseek/deepseek-chat | paid\ndeepseek/deepseek-chat:free | free\ndeepseek/deepseek-chat-v3-0324 | paid\ndeepseek/deepseek-chat-v3-0324:free | free\ndeepseek/deepseek-v3-base:free | free\ndeepseek/deepseek-r1 | paid\ndeepseek/deepseek-r1:free | free\ndeepseek/deepseek-r1-distill-llama-70b | paid\ndeepseek/deepseek-r1-distill-llama-70b:free | free\ndeepseek/deepseek-r1-distill-llama-8b | paid\ndeepseek/deepseek-r1-distill-qwen-1.5b | paid\ndeepseek/deepseek-r1-distill-qwen-14b | paid\ndeepseek/deepseek-r1-distill-qwen-14b:free | free\ndeepseek/deepseek-r1-distill-qwen-32b | paid\ndeepseek/deepseek-r1-distill-qwen-32b:free | free\ncognitivecomputations/dolphin-mixtral-8x7b | paid\ncognitivecomputations/dolphin-mixtral-8x22b | paid\ncognitivecomputations/dolphin3.0-mistral-24b:free | free\ncognitivecomputations/dolphin3.0-r1-mistral-24b:free | free\neva-unit-01/eva-llama-3.33-70b | paid\neva-unit-01/eva-qwen-2.5-32b | paid\neva-unit-01/eva-qwen-2.5-72b | paid\nsao10k/fimbulvetr-11b-v2 | paid\nalpindale/goliath-120b | paid\ngoogle/gemini-flash-1.5 | paid\ngoogle/gemini-flash-1.5-8b | paid\ngoogle/gemini-flash-1.5-8b-exp | paid\ngoogle/gemini-pro-1.5 | paid\ngoogle/gemini-2.0-flash-001 | paid\ngoogle/gemini-2.0-flash-exp:free | free\ngoogle/gemini-2.0-flash-lite-001 | paid\ngoogle/gemini-2.0-flash-thinking-exp-1219:free | free\ngoogle/gemini-2.0-flash-thinking-exp:free | free\ngoogle/gemini-2.0-pro-exp-02-05:free | free\ngoogle/gemini-2.5-pro-exp-03-25:free | free\ngoogle/gemini-2.5-pro-preview-03-25 | paid\ngoogle/gemini-pro | paid\ngoogle/gemini-pro-vision | paid\ngoogle/gemma-2-27b-it | paid\ngoogle/gemma-2-9b-it | paid\ngoogle/gemma-2-9b-it:free | free\ngoogle/gemma-3-12b-it | paid\ngoogle/gemma-3-12b-it:free | free\ngoogle/gemma-3-1b-it:free | free\ngoogle/gemma-3-27b-it | paid\ngoogle/gemma-3-27b-it:free | free\ngoogle/gemma-3-4b-it | paid\ngoogle/gemma-3-4b-it:free | free\ngoogle/learnlm-1.5-pro-experimental:free | free\ngoogle/palm-2-chat-bison | paid\ngoogle/palm-2-chat-bison-32k | paid\ngoogle/palm-2-codechat-bison | paid\ngoogle/palm-2-codechat-bison-32k | paid\nhuggingfaceh4/zephyr-7b-beta:free | free\ninfermatic/mn-inferor-12b | paid\ninflection/inflection-3-pi | paid\ninflection/inflection-3-productivity | paid\nlatitudegames/wayfarer-large-70b-llama-3.3 | paid\nliquid/lfm-3b | paid\nliquid/lfm-40b | paid\nliquid/lfm-7b | paid\nmeta-llama/llama-guard-3-8b | paid\nalpindale/magnum-72b | paid\nanthracite-org/magnum-v2-72b | paid\nanthracite-org/magnum-v4-72b | paid\nmancer/weaver | paid\nmeta-llama/llama-2-13b-chat | paid\nmeta-llama/llama-2-70b-chat | paid\nmeta-llama/llama-3-70b-instruct | paid\nmeta-llama/llama-3-8b-instruct | paid\nmeta-llama/llama-3.1-405b | paid\nmeta-llama/llama-3.1-405b-instruct | paid\nmeta-llama/llama-3.1-70b-instruct | paid\nmeta-llama/llama-3.1-8b-instruct | paid\nmeta-llama/llama-3.1-8b-instruct:free | free\nmeta-llama/llama-3.2-11b-vision-instruct | paid\nmeta-llama/llama-3.2-11b-vision-instruct:free | free\nmeta-llama/llama-3.2-1b-instruct | paid\nmeta-llama/llama-3.2-1b-instruct:free | free\nmeta-llama/llama-3.2-3b-instruct | paid\nmeta-llama/llama-3.2-3b-instruct:free | free\nmeta-llama/llama-3.2-90b-vision-instruct | paid\nmeta-llama/llama-3.3-70b-instruct | paid\nmeta-llama/llama-3.3-70b-instruct:free | free\nmeta-llama/llama-4-maverick | paid\nmeta-llama/llama-4-maverick:free | free\nmeta-llama/llama-4-scout | paid\nmeta-llama/llama-4-scout:free | free\nmeta-llama/llama-guard-2-8b | paid\nmicrosoft/phi-4 | paid\nmicrosoft/phi-4-multimodal-instruct | paid\nmicrosoft/phi-3-medium-128k-instruct | paid\nmicrosoft/phi-3-medium-128k-instruct:free | free\nmicrosoft/phi-3-mini-128k-instruct | paid\nmicrosoft/phi-3-mini-128k-instruct:free | free\nmicrosoft/phi-3.5-mini-128k-instruct | paid\nsophosympatheia/midnight-rose-70b | paid\nminimax/minimax-01 | paid\nmistralai/mistral-large | paid\nmistralai/mistral-large-2407 | paid\nmistralai/mistral-large-2411 | paid\nmistralai/mistral-medium | paid\nnothingiisreal/mn-celeste-12b | paid\nmistralai/mistral-small | paid\nmistralai/mistral-tiny | paid\nmistralai/codestral-2501 | paid\nmistralai/codestral-mamba | paid\nmistralai/ministral-3b | paid\nmistral/ministral-8b | paid\nmistralai/ministral-8b | paid\nmistralai/mistral-7b-instruct | paid\nmistralai/mistral-7b-instruct:free | free\nmistralai/mistral-7b-instruct-v0.1 | paid\nmistralai/mistral-7b-instruct-v0.2 | paid\nmistralai/mistral-7b-instruct-v0.3 | paid\nmistralai/mistral-nemo | paid\nmistralai/mistral-nemo:free | free\nmistralai/mistral-small-24b-instruct-2501 | paid\nmistralai/mistral-small-24b-instruct-2501:free | free\nmistralai/mistral-small-3.1-24b-instruct | paid\nmistralai/mistral-small-3.1-24b-instruct:free | free\nmistralai/mixtral-8x22b-instruct | paid\nmistralai/mixtral-8x7b | paid\nmistralai/mixtral-8x7b-instruct | paid\nmistralai/pixtral-12b | paid\nmistralai/pixtral-large-2411 | paid\nmistralai/mistral-saba | paid\nmoonshotai/moonlight-16b-a3b-instruct:free | free\ngryphe/mythomax-l2-13b | paid\nneversleep/llama-3-lumimaid-70b | paid\nneversleep/llama-3-lumimaid-8b | paid\nneversleep/llama-3-lumimaid-8b:extended | paid\nneversleep/llama-3.1-lumimaid-70b | paid\nneversleep/llama-3.1-lumimaid-8b | paid\nneversleep/noromaid-20b | paid\nnousresearch/deephermes-3-llama-3-8b-preview:free | free\nnousresearch/nous-hermes-llama2-13b | paid\nnousresearch/nous-hermes-2-mixtral-8x7b-dpo | paid\nnousresearch/hermes-3-llama-3.1-405b | paid\nnousresearch/hermes-3-llama-3.1-70b | paid\nnousresearch/hermes-2-pro-llama-3-8b | paid\nnvidia/llama-3.1-nemotron-70b-instruct | paid\nnvidia/llama-3.1-nemotron-70b-instruct:free | free\nopen-r1/olympiccoder-32b:free | free\nopen-r1/olympiccoder-7b:free | free\nopenai/chatgpt-4o-latest | paid\nopenai/gpt-3.5-turbo | paid\nopenai/gpt-3.5-turbo-0613 | paid\nopenai/gpt-3.5-turbo-16k | paid\nopenai/gpt-3.5-turbo-0125 | paid\nopenai/gpt-3.5-turbo-1106 | paid\nopenai/gpt-3.5-turbo-instruct | paid\nopenai/gpt-4 | paid\nopenai/gpt-4-0314 | paid\nopenai/gpt-4-32k | paid\nopenai/gpt-4-32k-0314 | paid\nopenai/gpt-4-turbo | paid\nopenai/gpt-4-1106-preview | paid\nopenai/gpt-4-turbo-preview | paid\nopenai/gpt-4.5-preview | paid\nopenai/gpt-4o | paid\nopenai/gpt-4o-2024-05-13 | paid\nopenai/gpt-4o-2024-08-06 | paid\nopenai/gpt-4o-2024-11-20 | paid\nopenai/gpt-4o:extended | paid\nopenai/gpt-4o-search-preview | paid\nopenai/gpt-4o-mini | paid\nopenai/gpt-4o-mini-2024-07-18 | paid\nopenai/gpt-4o-mini-search-preview | paid\nopenai/o1 | paid\nopenai/o1-mini | paid\nopenai/o1-mini-2024-09-12 | paid\nopenai/o1-preview | paid\nopenai/o1-preview-2024-09-12 | paid\nopenai/o1-pro | paid\nopenai/o3-mini | paid\nopenai/o3-mini-high | paid\nopenchat/openchat-7b | paid\nopenchat/openchat-7b:free | free\nall-hands/openhands-lm-32b-v0.1 | paid\nperplexity/llama-3.1-sonar-large-128k-online | paid\nperplexity/llama-3.1-sonar-small-128k-online | paid\nperplexity/r1-1776 | paid\nperplexity/sonar | paid\nperplexity/sonar-deep-research | paid\nperplexity/sonar-pro | paid\nperplexity/sonar-reasoning | paid\nperplexity/sonar-reasoning-pro | paid\npygmalionai/mythalion-13b | paid\nopenrouter/quasar-alpha | paid\nqwen/qwen-2-72b-instruct | paid\nqwen/qwen-vl-max | paid\nqwen/qwen-vl-plus | paid\nqwen/qwen-max | paid\nqwen/qwen-plus | paid\nqwen/qwen-turbo | paid\nqwen/qwen2.5-32b-instruct | paid\nqwen/qwen2.5-vl-32b-instruct | paid\nqwen/qwen2.5-vl-32b-instruct:free | free\nqwen/qwen2.5-vl-3b-instruct:free | free\nqwen/qwen2.5-vl-72b-instruct | paid\nqwen/qwen2.5-vl-72b-instruct:free | free\nqwen/qwen-2.5-vl-72b-instruct | paid\nqwen/qwen-2.5-vl-7b-instruct | paid\nqwen/qwen-2.5-vl-7b-instruct:free | free\nqwen/qwq-32b | paid\nqwen/qwq-32b:free | free\nqwen/qwq-32b-preview | paid\nqwen/qwq-32b-preview:free | free\nqwen/qwen-2.5-72b-instruct | paid\nqwen/qwen-2.5-72b-instruct:free | free\nqwen/qwen-2.5-7b-instruct | paid\nqwen/qwen-2.5-7b-instruct:free | free\nqwen/qwen-2.5-coder-32b-instruct | paid\nqwen/qwen-2.5-coder-32b-instruct:free | free\nfeatherless/qwerky-72b:free | free\nrekaai/reka-flash-3:free | free\nundi95/remm-slerp-l2-13b | paid\nthedrummer/rocinante-12b | paid\nsophosympatheia/rogue-rose-103b-v0.2:free | free\nsao10k/l3-lunaris-8b | paid\nsao10k/l3-euryale-70b | paid\nsao10k/l3.1-70b-hanami-x1 | paid\nsao10k/l3.1-euryale-70b | paid\nsao10k/l3.3-euryale-70b | paid\nraifle/sorcererlm-8x22b | paid\nsteelskull/l3.3-electra-r1-70b | paid\ntokyotech-llm/llama-3.1-swallow-70b-instruct-v0.3 | paid\nthedrummer/anubis-pro-105b-v1 | paid\nthedrummer/skyfall-36b-v2 | paid\nundi95/toppy-m-7b | paid\nundi95/toppy-m-7b:free | free\nscb10x/llama3.1-typhoon2-70b-instruct | paid\nscb10x/llama3.1-typhoon2-8b-instruct | paid\nthedrummer/unslopnemo-12b | paid\nmicrosoft/wizardlm-2-7b | paid\nmicrosoft/wizardlm-2-8x22b | paid\nx-ai/grok-2-1212 | paid\nx-ai/grok-2-vision-1212 | paid\nx-ai/grok-beta | paid\nx-ai/grok-vision-beta | paid\nxwin-lm/xwin-lm-70b | paid\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m OpenAI models:\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\nbabbage-002\nchatgpt-4o-latest\ndall-e-2\ndall-e-3\ndavinci-002\ngpt-3.5-turbo\ngpt-3.5-turbo-0125\ngpt-3.5-turbo-1106\ngpt-3.5-turbo-16k\ngpt-3.5-turbo-instruct\ngpt-3.5-turbo-instruct-0914\ngpt-4\ngpt-4-0125-preview\ngpt-4-0613\ngpt-4-1106-preview\ngpt-4-turbo\ngpt-4-turbo-2024-04-09\ngpt-4-turbo-preview\ngpt-4.5-preview\ngpt-4.5-preview-2025-02-27\ngpt-4o\ngpt-4o-2024-05-13\ngpt-4o-2024-08-06\ngpt-4o-2024-11-20\ngpt-4o-audio-preview\ngpt-4o-audio-preview-2024-10-01\ngpt-4o-audio-preview-2024-12-17\ngpt-4o-mini\ngpt-4o-mini-2024-07-18\ngpt-4o-mini-audio-preview\ngpt-4o-mini-audio-preview-2024-12-17\ngpt-4o-mini-realtime-preview\ngpt-4o-mini-realtime-preview-2024-12-17\ngpt-4o-mini-search-preview\ngpt-4o-mini-search-preview-2025-03-11\ngpt-4o-mini-transcribe\ngpt-4o-mini-tts\ngpt-4o-realtime-preview\ngpt-4o-realtime-preview-2024-10-01\ngpt-4o-realtime-preview-2024-12-17\ngpt-4o-search-preview\ngpt-4o-search-preview-2025-03-11\ngpt-4o-transcribe\no1\no1-2024-12-17\no1-mini\no1-mini-2024-09-12\no1-preview\no1-preview-2024-09-12\no1-pro\no1-pro-2025-03-19\no3-mini\no3-mini-2025-01-31\nomni-moderation-2024-09-26\nomni-moderation-latest\ntext-embedding-3-large\ntext-embedding-3-small\ntext-embedding-ada-002\ntts-1\ntts-1-1106\ntts-1-hd\ntts-1-hd-1106\nwhisper-1\n-----\n\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m Deepseek models:\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\ndeepseek-chat\ndeepseek-reasoner\n-----\n" }, "router": { "type": "string", diff --git a/packages/osr-code-bot/schema_ui.json b/packages/osr-code-bot/schema_ui.json index 4b25261..987b53b 100644 --- a/packages/osr-code-bot/schema_ui.json +++ b/packages/osr-code-bot/schema_ui.json @@ -63,7 +63,7 @@ "ui:title": "Api_key" }, "model": { - "ui:description": "AI model to use for processing. Available models:\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m OpenRouter models:\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n01-ai/yi-large | paid\naetherwiing/mn-starcannon-12b | paid\nai21/jamba-1-5-large | paid\nai21/jamba-1-5-mini | paid\nai21/jamba-1.6-large | paid\nai21/jamba-instruct | paid\nai21/jamba-1.6-mini | paid\naion-labs/aion-1.0 | paid\naion-labs/aion-1.0-mini | paid\naion-labs/aion-rp-llama-3.1-8b | paid\njondurbin/airoboros-l2-70b | paid\nallenai/olmo-2-0325-32b-instruct | paid\namazon/nova-lite-v1 | paid\namazon/nova-micro-v1 | paid\namazon/nova-pro-v1 | paid\nanthropic/claude-3-haiku | paid\nanthropic/claude-3-haiku:beta | paid\nanthropic/claude-3-opus | paid\nanthropic/claude-3-opus:beta | paid\nanthropic/claude-3-sonnet | paid\nanthropic/claude-3-sonnet:beta | paid\nanthropic/claude-3.5-haiku | paid\nanthropic/claude-3.5-haiku-20241022 | paid\nanthropic/claude-3.5-haiku-20241022:beta | paid\nanthropic/claude-3.5-haiku:beta | paid\nanthropic/claude-3.5-sonnet | paid\nanthropic/claude-3.5-sonnet-20240620 | paid\nanthropic/claude-3.5-sonnet-20240620:beta | paid\nanthropic/claude-3.5-sonnet:beta | paid\nanthropic/claude-3.7-sonnet | paid\nanthropic/claude-3.7-sonnet:beta | paid\nanthropic/claude-3.7-sonnet:thinking | paid\nanthropic/claude-2 | paid\nanthropic/claude-2:beta | paid\nanthropic/claude-2.0 | paid\nanthropic/claude-2.0:beta | paid\nanthropic/claude-2.1 | paid\nanthropic/claude-2.1:beta | paid\nopenrouter/auto | paid\ncohere/command | paid\ncohere/command-a | paid\ncohere/command-r | paid\ncohere/command-r-03-2024 | paid\ncohere/command-r-08-2024 | paid\ncohere/command-r-plus | paid\ncohere/command-r-plus-04-2024 | paid\ncohere/command-r-plus-08-2024 | paid\ncohere/command-r7b-12-2024 | paid\ndeepseek/deepseek-r1-zero:free | free\ndeepseek/deepseek-chat | paid\ndeepseek/deepseek-chat:free | free\ndeepseek/deepseek-r1 | paid\ndeepseek/deepseek-r1:free | free\ndeepseek/deepseek-r1-distill-llama-70b | paid\ndeepseek/deepseek-r1-distill-llama-70b:free | free\ndeepseek/deepseek-r1-distill-llama-8b | paid\ndeepseek/deepseek-r1-distill-qwen-1.5b | paid\ndeepseek/deepseek-r1-distill-qwen-14b | paid\ndeepseek/deepseek-r1-distill-qwen-14b:free | free\ndeepseek/deepseek-r1-distill-qwen-32b | paid\ndeepseek/deepseek-r1-distill-qwen-32b:free | free\ncognitivecomputations/dolphin-mixtral-8x7b | paid\ncognitivecomputations/dolphin-mixtral-8x22b | paid\ncognitivecomputations/dolphin3.0-mistral-24b:free | free\ncognitivecomputations/dolphin3.0-r1-mistral-24b:free | free\neva-unit-01/eva-llama-3.33-70b | paid\neva-unit-01/eva-qwen-2.5-32b | paid\neva-unit-01/eva-qwen-2.5-72b | paid\nsao10k/fimbulvetr-11b-v2 | paid\nalpindale/goliath-120b | paid\ngoogle/gemini-2.0-flash-lite-001 | paid\ngoogle/gemini-2.0-flash-thinking-exp-1219:free | free\ngoogle/gemini-2.0-flash-thinking-exp:free | free\ngoogle/gemini-exp-1206:free | free\ngoogle/gemini-flash-1.5 | paid\ngoogle/gemini-flash-1.5-8b | paid\ngoogle/gemini-flash-1.5-8b-exp | paid\ngoogle/gemini-2.0-flash-001 | paid\ngoogle/gemini-2.0-flash-exp:free | free\ngoogle/gemini-2.0-flash-lite-preview-02-05:free | free\ngoogle/gemini-pro | paid\ngoogle/gemini-pro-1.5 | paid\ngoogle/gemini-2.0-pro-exp-02-05:free | free\ngoogle/gemini-pro-vision | paid\ngoogle/gemma-2-27b-it | paid\ngoogle/gemma-2-9b-it | paid\ngoogle/gemma-2-9b-it:free | free\ngoogle/gemma-3-12b-it:free | free\ngoogle/gemma-3-1b-it:free | free\ngoogle/gemma-3-27b-it | paid\ngoogle/gemma-3-27b-it:free | free\ngoogle/gemma-3-4b-it:free | free\ngoogle/gemma-7b-it | paid\ngoogle/learnlm-1.5-pro-experimental:free | free\ngoogle/palm-2-chat-bison | paid\ngoogle/palm-2-chat-bison-32k | paid\ngoogle/palm-2-codechat-bison | paid\ngoogle/palm-2-codechat-bison-32k | paid\nhuggingfaceh4/zephyr-7b-beta:free | free\ninfermatic/mn-inferor-12b | paid\ninflection/inflection-3-pi | paid\ninflection/inflection-3-productivity | paid\nlatitudegames/wayfarer-large-70b-llama-3.3 | paid\nliquid/lfm-3b | paid\nliquid/lfm-40b | paid\nliquid/lfm-7b | paid\nallenai/llama-3.1-tulu-3-405b | paid\nmeta-llama/llama-guard-3-8b | paid\nalpindale/magnum-72b | paid\nanthracite-org/magnum-v2-72b | paid\nanthracite-org/magnum-v4-72b | paid\nmancer/weaver | paid\nmeta-llama/llama-2-13b-chat | paid\nmeta-llama/llama-2-70b-chat | paid\nmeta-llama/llama-3-70b-instruct | paid\nmeta-llama/llama-3-8b-instruct | paid\nmeta-llama/llama-3-8b-instruct:free | free\nmeta-llama/llama-3.1-405b | paid\nmeta-llama/llama-3.1-405b-instruct | paid\nmeta-llama/llama-3.1-70b-instruct | paid\nmeta-llama/llama-3.1-8b-instruct | paid\nmeta-llama/llama-3.1-8b-instruct:free | free\nmeta-llama/llama-3.2-11b-vision-instruct | paid\nmeta-llama/llama-3.2-11b-vision-instruct:free | free\nmeta-llama/llama-3.2-1b-instruct | paid\nmeta-llama/llama-3.2-1b-instruct:free | free\nmeta-llama/llama-3.2-3b-instruct | paid\nmeta-llama/llama-3.2-3b-instruct:free | free\nmeta-llama/llama-3.2-90b-vision-instruct | paid\nmeta-llama/llama-3.3-70b-instruct | paid\nmeta-llama/llama-3.3-70b-instruct:free | free\nmeta-llama/llama-guard-2-8b | paid\nmicrosoft/phi-4 | paid\nmicrosoft/phi-4-multimodal-instruct | paid\nmicrosoft/phi-3-medium-128k-instruct | paid\nmicrosoft/phi-3-medium-128k-instruct:free | free\nmicrosoft/phi-3-mini-128k-instruct | paid\nmicrosoft/phi-3-mini-128k-instruct:free | free\nmicrosoft/phi-3.5-mini-128k-instruct | paid\nsophosympatheia/midnight-rose-70b | paid\nminimax/minimax-01 | paid\nmistralai/mistral-large | paid\nmistralai/mistral-large-2407 | paid\nmistralai/mistral-large-2411 | paid\nmistralai/mistral-medium | paid\nnothingiisreal/mn-celeste-12b | paid\nmistralai/mistral-small | paid\nmistralai/mistral-tiny | paid\nmistralai/codestral-2501 | paid\nmistralai/codestral-mamba | paid\nmistralai/ministral-3b | paid\nmistralai/ministral-8b | paid\nmistralai/mistral-7b-instruct | paid\nmistralai/mistral-7b-instruct:free | free\nmistralai/mistral-7b-instruct-v0.1 | paid\nmistralai/mistral-7b-instruct-v0.2 | paid\nmistralai/mistral-7b-instruct-v0.3 | paid\nmistralai/mistral-nemo | paid\nmistralai/mistral-nemo:free | free\nmistralai/mistral-small-24b-instruct-2501 | paid\nmistralai/mistral-small-24b-instruct-2501:free | free\nmistralai/mistral-small-3.1-24b-instruct | paid\nmistralai/mixtral-8x22b-instruct | paid\nmistralai/mixtral-8x7b | paid\nmistralai/mixtral-8x7b-instruct | paid\nmistralai/pixtral-12b | paid\nmistralai/pixtral-large-2411 | paid\nmistralai/mistral-saba | paid\nmoonshotai/moonlight-16b-a3b-instruct:free | free\ngryphe/mythomax-l2-13b | paid\ngryphe/mythomax-l2-13b:free | free\nneversleep/llama-3-lumimaid-70b | paid\nneversleep/llama-3-lumimaid-8b | paid\nneversleep/llama-3-lumimaid-8b:extended | paid\nneversleep/llama-3.1-lumimaid-70b | paid\nneversleep/llama-3.1-lumimaid-8b | paid\nneversleep/noromaid-20b | paid\nnousresearch/deephermes-3-llama-3-8b-preview:free | free\nnousresearch/nous-hermes-llama2-13b | paid\nnousresearch/nous-hermes-2-mixtral-8x7b-dpo | paid\nnousresearch/hermes-3-llama-3.1-405b | paid\nnousresearch/hermes-3-llama-3.1-70b | paid\nnousresearch/hermes-2-pro-llama-3-8b | paid\nnvidia/llama-3.1-nemotron-70b-instruct | paid\nnvidia/llama-3.1-nemotron-70b-instruct:free | free\nopen-r1/olympiccoder-32b:free | free\nopen-r1/olympiccoder-7b:free | free\nopenai/chatgpt-4o-latest | paid\nopenai/gpt-3.5-turbo | paid\nopenai/gpt-3.5-turbo-0613 | paid\nopenai/gpt-3.5-turbo-16k | paid\nopenai/gpt-3.5-turbo-0125 | paid\nopenai/gpt-3.5-turbo-1106 | paid\nopenai/gpt-3.5-turbo-instruct | paid\nopenai/gpt-4 | paid\nopenai/gpt-4-0314 | paid\nopenai/gpt-4-32k | paid\nopenai/gpt-4-32k-0314 | paid\nopenai/gpt-4-turbo | paid\nopenai/gpt-4-1106-preview | paid\nopenai/gpt-4-turbo-preview | paid\nopenai/gpt-4.5-preview | paid\nopenai/gpt-4o | paid\nopenai/gpt-4o-2024-05-13 | paid\nopenai/gpt-4o-2024-08-06 | paid\nopenai/gpt-4o-2024-11-20 | paid\nopenai/gpt-4o:extended | paid\nopenai/gpt-4o-search-preview | paid\nopenai/gpt-4o-mini | paid\nopenai/gpt-4o-mini-2024-07-18 | paid\nopenai/gpt-4o-mini-search-preview | paid\nopenai/o1 | paid\nopenai/o1-mini | paid\nopenai/o1-mini-2024-09-12 | paid\nopenai/o1-preview | paid\nopenai/o1-preview-2024-09-12 | paid\nopenai/o3-mini | paid\nopenai/o3-mini-high | paid\nopenchat/openchat-7b | paid\nopenchat/openchat-7b:free | free\nteknium/openhermes-2.5-mistral-7b | paid\nperplexity/llama-3.1-sonar-large-128k-chat | paid\nperplexity/llama-3.1-sonar-large-128k-online | paid\nperplexity/llama-3.1-sonar-small-128k-chat | paid\nperplexity/llama-3.1-sonar-small-128k-online | paid\nperplexity/r1-1776 | paid\nperplexity/sonar | paid\nperplexity/sonar-deep-research | paid\nperplexity/sonar-pro | paid\nperplexity/sonar-reasoning | paid\nperplexity/sonar-reasoning-pro | paid\npygmalionai/mythalion-13b | paid\nqwen/qwen-2-72b-instruct | paid\nqwen/qwen-2-7b-instruct | paid\nqwen/qwen-2-7b-instruct:free | free\nqwen/qwen-vl-max | paid\nqwen/qwen-vl-plus | paid\nqwen/qwen-max | paid\nqwen/qwen-plus | paid\nqwen/qwen-turbo | paid\nqwen/qwen2.5-32b-instruct | paid\nqwen/qwen2.5-vl-72b-instruct | paid\nqwen/qwen2.5-vl-72b-instruct:free | free\nqwen/qwen-2.5-vl-72b-instruct | paid\nqwen/qwen-2.5-vl-7b-instruct | paid\nqwen/qwq-32b | paid\nqwen/qwq-32b:free | free\nqwen/qwq-32b-preview | paid\nqwen/qwq-32b-preview:free | free\nqwen/qwen-2.5-72b-instruct | paid\nqwen/qwen-2.5-72b-instruct:free | free\nqwen/qwen-2.5-7b-instruct | paid\nqwen/qwen-2.5-coder-32b-instruct | paid\nqwen/qwen-2.5-coder-32b-instruct:free | free\nrekaai/reka-flash-3:free | free\nundi95/remm-slerp-l2-13b | paid\nthedrummer/rocinante-12b | paid\nsophosympatheia/rogue-rose-103b-v0.2:free | free\nsao10k/l3-lunaris-8b | paid\nsao10k/l3-euryale-70b | paid\nsao10k/l3.1-70b-hanami-x1 | paid\nsao10k/l3.1-euryale-70b | paid\nsao10k/l3.3-euryale-70b | paid\nraifle/sorcererlm-8x22b | paid\nsteelskull/l3.3-electra-r1-70b | paid\ntokyotech-llm/llama-3.1-swallow-70b-instruct-v0.3 | paid\nthedrummer/anubis-pro-105b-v1 | paid\nthedrummer/skyfall-36b-v2 | paid\nundi95/toppy-m-7b | paid\nundi95/toppy-m-7b:free | free\nthedrummer/unslopnemo-12b | paid\nmicrosoft/wizardlm-2-7b | paid\nmicrosoft/wizardlm-2-8x22b | paid\nx-ai/grok-2-1212 | paid\nx-ai/grok-2-vision-1212 | paid\nx-ai/grok-beta | paid\nx-ai/grok-vision-beta | paid\nxwin-lm/xwin-lm-70b | paid\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m OpenAI models:\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\nbabbage-002\nchatgpt-4o-latest\ndall-e-2\ndall-e-3\ndavinci-002\ngpt-3.5-turbo\ngpt-3.5-turbo-0125\ngpt-3.5-turbo-1106\ngpt-3.5-turbo-16k\ngpt-3.5-turbo-instruct\ngpt-3.5-turbo-instruct-0914\ngpt-4\ngpt-4-0125-preview\ngpt-4-0613\ngpt-4-1106-preview\ngpt-4-turbo\ngpt-4-turbo-2024-04-09\ngpt-4-turbo-preview\ngpt-4.5-preview\ngpt-4.5-preview-2025-02-27\ngpt-4o\ngpt-4o-2024-05-13\ngpt-4o-2024-08-06\ngpt-4o-2024-11-20\ngpt-4o-audio-preview\ngpt-4o-audio-preview-2024-10-01\ngpt-4o-audio-preview-2024-12-17\ngpt-4o-mini\ngpt-4o-mini-2024-07-18\ngpt-4o-mini-audio-preview\ngpt-4o-mini-audio-preview-2024-12-17\ngpt-4o-mini-realtime-preview\ngpt-4o-mini-realtime-preview-2024-12-17\ngpt-4o-mini-search-preview\ngpt-4o-mini-search-preview-2025-03-11\ngpt-4o-realtime-preview\ngpt-4o-realtime-preview-2024-10-01\ngpt-4o-realtime-preview-2024-12-17\ngpt-4o-search-preview\ngpt-4o-search-preview-2025-03-11\no1\no1-2024-12-17\no1-mini\no1-mini-2024-09-12\no1-preview\no1-preview-2024-09-12\no3-mini\no3-mini-2025-01-31\nomni-moderation-2024-09-26\nomni-moderation-latest\ntext-embedding-3-large\ntext-embedding-3-small\ntext-embedding-ada-002\ntts-1\ntts-1-1106\ntts-1-hd\ntts-1-hd-1106\nwhisper-1\n-----\n\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m Deepseek models:\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\ndeepseek-chat\ndeepseek-reasoner\n-----\n", + "ui:description": "AI model to use for processing. Available models:\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m OpenRouter models:\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n01-ai/yi-large | paid\naetherwiing/mn-starcannon-12b | paid\nai21/jamba-1-5-large | paid\nai21/jamba-1-5-mini | paid\nai21/jamba-1.6-large | paid\nai21/jamba-instruct | paid\nai21/jamba-1.6-mini | paid\naion-labs/aion-1.0 | paid\naion-labs/aion-1.0-mini | paid\naion-labs/aion-rp-llama-3.1-8b | paid\njondurbin/airoboros-l2-70b | paid\nallenai/molmo-7b-d:free | free\nallenai/olmo-2-0325-32b-instruct | paid\namazon/nova-lite-v1 | paid\namazon/nova-micro-v1 | paid\namazon/nova-pro-v1 | paid\nanthropic/claude-3-haiku | paid\nanthropic/claude-3-haiku:beta | paid\nanthropic/claude-3-opus | paid\nanthropic/claude-3-opus:beta | paid\nanthropic/claude-3-sonnet | paid\nanthropic/claude-3-sonnet:beta | paid\nanthropic/claude-3.5-haiku | paid\nanthropic/claude-3.5-haiku-20241022 | paid\nanthropic/claude-3.5-haiku-20241022:beta | paid\nanthropic/claude-3.5-haiku:beta | paid\nanthropic/claude-3.5-sonnet | paid\nanthropic/claude-3.5-sonnet-20240620 | paid\nanthropic/claude-3.5-sonnet-20240620:beta | paid\nanthropic/claude-3.5-sonnet:beta | paid\nanthropic/claude-3.7-sonnet | paid\nanthropic/claude-3.7-sonnet:beta | paid\nanthropic/claude-3.7-sonnet:thinking | paid\nanthropic/claude-2 | paid\nanthropic/claude-2:beta | paid\nanthropic/claude-2.0 | paid\nanthropic/claude-2.0:beta | paid\nanthropic/claude-2.1 | paid\nanthropic/claude-2.1:beta | paid\nopenrouter/auto | paid\nbytedance-research/ui-tars-72b:free | free\ncohere/command | paid\ncohere/command-a | paid\ncohere/command-r | paid\ncohere/command-r-03-2024 | paid\ncohere/command-r-08-2024 | paid\ncohere/command-r-plus | paid\ncohere/command-r-plus-04-2024 | paid\ncohere/command-r-plus-08-2024 | paid\ncohere/command-r7b-12-2024 | paid\ndeepseek/deepseek-r1-zero:free | free\ndeepseek/deepseek-chat | paid\ndeepseek/deepseek-chat:free | free\ndeepseek/deepseek-chat-v3-0324 | paid\ndeepseek/deepseek-chat-v3-0324:free | free\ndeepseek/deepseek-v3-base:free | free\ndeepseek/deepseek-r1 | paid\ndeepseek/deepseek-r1:free | free\ndeepseek/deepseek-r1-distill-llama-70b | paid\ndeepseek/deepseek-r1-distill-llama-70b:free | free\ndeepseek/deepseek-r1-distill-llama-8b | paid\ndeepseek/deepseek-r1-distill-qwen-1.5b | paid\ndeepseek/deepseek-r1-distill-qwen-14b | paid\ndeepseek/deepseek-r1-distill-qwen-14b:free | free\ndeepseek/deepseek-r1-distill-qwen-32b | paid\ndeepseek/deepseek-r1-distill-qwen-32b:free | free\ncognitivecomputations/dolphin-mixtral-8x7b | paid\ncognitivecomputations/dolphin-mixtral-8x22b | paid\ncognitivecomputations/dolphin3.0-mistral-24b:free | free\ncognitivecomputations/dolphin3.0-r1-mistral-24b:free | free\neva-unit-01/eva-llama-3.33-70b | paid\neva-unit-01/eva-qwen-2.5-32b | paid\neva-unit-01/eva-qwen-2.5-72b | paid\nsao10k/fimbulvetr-11b-v2 | paid\nalpindale/goliath-120b | paid\ngoogle/gemini-flash-1.5 | paid\ngoogle/gemini-flash-1.5-8b | paid\ngoogle/gemini-flash-1.5-8b-exp | paid\ngoogle/gemini-pro-1.5 | paid\ngoogle/gemini-2.0-flash-001 | paid\ngoogle/gemini-2.0-flash-exp:free | free\ngoogle/gemini-2.0-flash-lite-001 | paid\ngoogle/gemini-2.0-flash-thinking-exp-1219:free | free\ngoogle/gemini-2.0-flash-thinking-exp:free | free\ngoogle/gemini-2.0-pro-exp-02-05:free | free\ngoogle/gemini-2.5-pro-exp-03-25:free | free\ngoogle/gemini-2.5-pro-preview-03-25 | paid\ngoogle/gemini-pro | paid\ngoogle/gemini-pro-vision | paid\ngoogle/gemma-2-27b-it | paid\ngoogle/gemma-2-9b-it | paid\ngoogle/gemma-2-9b-it:free | free\ngoogle/gemma-3-12b-it | paid\ngoogle/gemma-3-12b-it:free | free\ngoogle/gemma-3-1b-it:free | free\ngoogle/gemma-3-27b-it | paid\ngoogle/gemma-3-27b-it:free | free\ngoogle/gemma-3-4b-it | paid\ngoogle/gemma-3-4b-it:free | free\ngoogle/learnlm-1.5-pro-experimental:free | free\ngoogle/palm-2-chat-bison | paid\ngoogle/palm-2-chat-bison-32k | paid\ngoogle/palm-2-codechat-bison | paid\ngoogle/palm-2-codechat-bison-32k | paid\nhuggingfaceh4/zephyr-7b-beta:free | free\ninfermatic/mn-inferor-12b | paid\ninflection/inflection-3-pi | paid\ninflection/inflection-3-productivity | paid\nlatitudegames/wayfarer-large-70b-llama-3.3 | paid\nliquid/lfm-3b | paid\nliquid/lfm-40b | paid\nliquid/lfm-7b | paid\nmeta-llama/llama-guard-3-8b | paid\nalpindale/magnum-72b | paid\nanthracite-org/magnum-v2-72b | paid\nanthracite-org/magnum-v4-72b | paid\nmancer/weaver | paid\nmeta-llama/llama-2-13b-chat | paid\nmeta-llama/llama-2-70b-chat | paid\nmeta-llama/llama-3-70b-instruct | paid\nmeta-llama/llama-3-8b-instruct | paid\nmeta-llama/llama-3.1-405b | paid\nmeta-llama/llama-3.1-405b-instruct | paid\nmeta-llama/llama-3.1-70b-instruct | paid\nmeta-llama/llama-3.1-8b-instruct | paid\nmeta-llama/llama-3.1-8b-instruct:free | free\nmeta-llama/llama-3.2-11b-vision-instruct | paid\nmeta-llama/llama-3.2-11b-vision-instruct:free | free\nmeta-llama/llama-3.2-1b-instruct | paid\nmeta-llama/llama-3.2-1b-instruct:free | free\nmeta-llama/llama-3.2-3b-instruct | paid\nmeta-llama/llama-3.2-3b-instruct:free | free\nmeta-llama/llama-3.2-90b-vision-instruct | paid\nmeta-llama/llama-3.3-70b-instruct | paid\nmeta-llama/llama-3.3-70b-instruct:free | free\nmeta-llama/llama-4-maverick | paid\nmeta-llama/llama-4-maverick:free | free\nmeta-llama/llama-4-scout | paid\nmeta-llama/llama-4-scout:free | free\nmeta-llama/llama-guard-2-8b | paid\nmicrosoft/phi-4 | paid\nmicrosoft/phi-4-multimodal-instruct | paid\nmicrosoft/phi-3-medium-128k-instruct | paid\nmicrosoft/phi-3-medium-128k-instruct:free | free\nmicrosoft/phi-3-mini-128k-instruct | paid\nmicrosoft/phi-3-mini-128k-instruct:free | free\nmicrosoft/phi-3.5-mini-128k-instruct | paid\nsophosympatheia/midnight-rose-70b | paid\nminimax/minimax-01 | paid\nmistralai/mistral-large | paid\nmistralai/mistral-large-2407 | paid\nmistralai/mistral-large-2411 | paid\nmistralai/mistral-medium | paid\nnothingiisreal/mn-celeste-12b | paid\nmistralai/mistral-small | paid\nmistralai/mistral-tiny | paid\nmistralai/codestral-2501 | paid\nmistralai/codestral-mamba | paid\nmistralai/ministral-3b | paid\nmistral/ministral-8b | paid\nmistralai/ministral-8b | paid\nmistralai/mistral-7b-instruct | paid\nmistralai/mistral-7b-instruct:free | free\nmistralai/mistral-7b-instruct-v0.1 | paid\nmistralai/mistral-7b-instruct-v0.2 | paid\nmistralai/mistral-7b-instruct-v0.3 | paid\nmistralai/mistral-nemo | paid\nmistralai/mistral-nemo:free | free\nmistralai/mistral-small-24b-instruct-2501 | paid\nmistralai/mistral-small-24b-instruct-2501:free | free\nmistralai/mistral-small-3.1-24b-instruct | paid\nmistralai/mistral-small-3.1-24b-instruct:free | free\nmistralai/mixtral-8x22b-instruct | paid\nmistralai/mixtral-8x7b | paid\nmistralai/mixtral-8x7b-instruct | paid\nmistralai/pixtral-12b | paid\nmistralai/pixtral-large-2411 | paid\nmistralai/mistral-saba | paid\nmoonshotai/moonlight-16b-a3b-instruct:free | free\ngryphe/mythomax-l2-13b | paid\nneversleep/llama-3-lumimaid-70b | paid\nneversleep/llama-3-lumimaid-8b | paid\nneversleep/llama-3-lumimaid-8b:extended | paid\nneversleep/llama-3.1-lumimaid-70b | paid\nneversleep/llama-3.1-lumimaid-8b | paid\nneversleep/noromaid-20b | paid\nnousresearch/deephermes-3-llama-3-8b-preview:free | free\nnousresearch/nous-hermes-llama2-13b | paid\nnousresearch/nous-hermes-2-mixtral-8x7b-dpo | paid\nnousresearch/hermes-3-llama-3.1-405b | paid\nnousresearch/hermes-3-llama-3.1-70b | paid\nnousresearch/hermes-2-pro-llama-3-8b | paid\nnvidia/llama-3.1-nemotron-70b-instruct | paid\nnvidia/llama-3.1-nemotron-70b-instruct:free | free\nopen-r1/olympiccoder-32b:free | free\nopen-r1/olympiccoder-7b:free | free\nopenai/chatgpt-4o-latest | paid\nopenai/gpt-3.5-turbo | paid\nopenai/gpt-3.5-turbo-0613 | paid\nopenai/gpt-3.5-turbo-16k | paid\nopenai/gpt-3.5-turbo-0125 | paid\nopenai/gpt-3.5-turbo-1106 | paid\nopenai/gpt-3.5-turbo-instruct | paid\nopenai/gpt-4 | paid\nopenai/gpt-4-0314 | paid\nopenai/gpt-4-32k | paid\nopenai/gpt-4-32k-0314 | paid\nopenai/gpt-4-turbo | paid\nopenai/gpt-4-1106-preview | paid\nopenai/gpt-4-turbo-preview | paid\nopenai/gpt-4.5-preview | paid\nopenai/gpt-4o | paid\nopenai/gpt-4o-2024-05-13 | paid\nopenai/gpt-4o-2024-08-06 | paid\nopenai/gpt-4o-2024-11-20 | paid\nopenai/gpt-4o:extended | paid\nopenai/gpt-4o-search-preview | paid\nopenai/gpt-4o-mini | paid\nopenai/gpt-4o-mini-2024-07-18 | paid\nopenai/gpt-4o-mini-search-preview | paid\nopenai/o1 | paid\nopenai/o1-mini | paid\nopenai/o1-mini-2024-09-12 | paid\nopenai/o1-preview | paid\nopenai/o1-preview-2024-09-12 | paid\nopenai/o1-pro | paid\nopenai/o3-mini | paid\nopenai/o3-mini-high | paid\nopenchat/openchat-7b | paid\nopenchat/openchat-7b:free | free\nall-hands/openhands-lm-32b-v0.1 | paid\nperplexity/llama-3.1-sonar-large-128k-online | paid\nperplexity/llama-3.1-sonar-small-128k-online | paid\nperplexity/r1-1776 | paid\nperplexity/sonar | paid\nperplexity/sonar-deep-research | paid\nperplexity/sonar-pro | paid\nperplexity/sonar-reasoning | paid\nperplexity/sonar-reasoning-pro | paid\npygmalionai/mythalion-13b | paid\nopenrouter/quasar-alpha | paid\nqwen/qwen-2-72b-instruct | paid\nqwen/qwen-vl-max | paid\nqwen/qwen-vl-plus | paid\nqwen/qwen-max | paid\nqwen/qwen-plus | paid\nqwen/qwen-turbo | paid\nqwen/qwen2.5-32b-instruct | paid\nqwen/qwen2.5-vl-32b-instruct | paid\nqwen/qwen2.5-vl-32b-instruct:free | free\nqwen/qwen2.5-vl-3b-instruct:free | free\nqwen/qwen2.5-vl-72b-instruct | paid\nqwen/qwen2.5-vl-72b-instruct:free | free\nqwen/qwen-2.5-vl-72b-instruct | paid\nqwen/qwen-2.5-vl-7b-instruct | paid\nqwen/qwen-2.5-vl-7b-instruct:free | free\nqwen/qwq-32b | paid\nqwen/qwq-32b:free | free\nqwen/qwq-32b-preview | paid\nqwen/qwq-32b-preview:free | free\nqwen/qwen-2.5-72b-instruct | paid\nqwen/qwen-2.5-72b-instruct:free | free\nqwen/qwen-2.5-7b-instruct | paid\nqwen/qwen-2.5-7b-instruct:free | free\nqwen/qwen-2.5-coder-32b-instruct | paid\nqwen/qwen-2.5-coder-32b-instruct:free | free\nfeatherless/qwerky-72b:free | free\nrekaai/reka-flash-3:free | free\nundi95/remm-slerp-l2-13b | paid\nthedrummer/rocinante-12b | paid\nsophosympatheia/rogue-rose-103b-v0.2:free | free\nsao10k/l3-lunaris-8b | paid\nsao10k/l3-euryale-70b | paid\nsao10k/l3.1-70b-hanami-x1 | paid\nsao10k/l3.1-euryale-70b | paid\nsao10k/l3.3-euryale-70b | paid\nraifle/sorcererlm-8x22b | paid\nsteelskull/l3.3-electra-r1-70b | paid\ntokyotech-llm/llama-3.1-swallow-70b-instruct-v0.3 | paid\nthedrummer/anubis-pro-105b-v1 | paid\nthedrummer/skyfall-36b-v2 | paid\nundi95/toppy-m-7b | paid\nundi95/toppy-m-7b:free | free\nscb10x/llama3.1-typhoon2-70b-instruct | paid\nscb10x/llama3.1-typhoon2-8b-instruct | paid\nthedrummer/unslopnemo-12b | paid\nmicrosoft/wizardlm-2-7b | paid\nmicrosoft/wizardlm-2-8x22b | paid\nx-ai/grok-2-1212 | paid\nx-ai/grok-2-vision-1212 | paid\nx-ai/grok-beta | paid\nx-ai/grok-vision-beta | paid\nxwin-lm/xwin-lm-70b | paid\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m OpenAI models:\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\nbabbage-002\nchatgpt-4o-latest\ndall-e-2\ndall-e-3\ndavinci-002\ngpt-3.5-turbo\ngpt-3.5-turbo-0125\ngpt-3.5-turbo-1106\ngpt-3.5-turbo-16k\ngpt-3.5-turbo-instruct\ngpt-3.5-turbo-instruct-0914\ngpt-4\ngpt-4-0125-preview\ngpt-4-0613\ngpt-4-1106-preview\ngpt-4-turbo\ngpt-4-turbo-2024-04-09\ngpt-4-turbo-preview\ngpt-4.5-preview\ngpt-4.5-preview-2025-02-27\ngpt-4o\ngpt-4o-2024-05-13\ngpt-4o-2024-08-06\ngpt-4o-2024-11-20\ngpt-4o-audio-preview\ngpt-4o-audio-preview-2024-10-01\ngpt-4o-audio-preview-2024-12-17\ngpt-4o-mini\ngpt-4o-mini-2024-07-18\ngpt-4o-mini-audio-preview\ngpt-4o-mini-audio-preview-2024-12-17\ngpt-4o-mini-realtime-preview\ngpt-4o-mini-realtime-preview-2024-12-17\ngpt-4o-mini-search-preview\ngpt-4o-mini-search-preview-2025-03-11\ngpt-4o-mini-transcribe\ngpt-4o-mini-tts\ngpt-4o-realtime-preview\ngpt-4o-realtime-preview-2024-10-01\ngpt-4o-realtime-preview-2024-12-17\ngpt-4o-search-preview\ngpt-4o-search-preview-2025-03-11\ngpt-4o-transcribe\no1\no1-2024-12-17\no1-mini\no1-mini-2024-09-12\no1-preview\no1-preview-2024-09-12\no1-pro\no1-pro-2025-03-19\no3-mini\no3-mini-2025-01-31\nomni-moderation-2024-09-26\nomni-moderation-latest\ntext-embedding-3-large\ntext-embedding-3-small\ntext-embedding-ada-002\ntts-1\ntts-1-1106\ntts-1-hd\ntts-1-hd-1106\nwhisper-1\n-----\n\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m Deepseek models:\u001b[22m\u001b[39m\n\u001b[35m\u001b[1m\u001b[22m\u001b[39m\ndeepseek-chat\ndeepseek-reasoner\n-----\n", "ui:title": "Model" }, "router": { diff --git a/packages/osr-code-bot/src/zod_schema.ts b/packages/osr-code-bot/src/zod_schema.ts index 537cbbe..1f74d33 100644 --- a/packages/osr-code-bot/src/zod_schema.ts +++ b/packages/osr-code-bot/src/zod_schema.ts @@ -13,14 +13,11 @@ export const get_var = (key: string ='') => env.get(key).asString() || env.get(k export const HOME = (sub = '') => path.join(process.env[(process.platform == 'win32' ) ? 'USERPROFILE' : 'HOME'] || '', sub) export const PREFERENCES_DEFAULT = (key: string = 'KBOT_PREFERENCES') => get_var(key) || path.join(HOME(`.${API_PREFIX}`), PREFERENCES_FILE_NAME) - import { Filters } from './filters' import { models_dist } from './models' import { defaultTemplate } from './tools' - export const E_Filters = z.enum(Object.keys(Filters) as any) - export const RouterTypeSchema = z.enum(['openrouter', 'openai', 'deepseek', 'huggingface', 'ollama', 'fireworks', 'gemini', 'xai']) export type RouterType = z.infer diff --git a/packages/osr-code-bot/src/zod_types.ts b/packages/osr-code-bot/src/zod_types.ts index 909dba6..1a6fe98 100644 --- a/packages/osr-code-bot/src/zod_types.ts +++ b/packages/osr-code-bot/src/zod_types.ts @@ -34,6 +34,7 @@ export interface IKBotOptions { aion-labs/aion-1.0-mini | paid aion-labs/aion-rp-llama-3.1-8b | paid jondurbin/airoboros-l2-70b | paid + allenai/molmo-7b-d:free | free allenai/olmo-2-0325-32b-instruct | paid amazon/nova-lite-v1 | paid amazon/nova-micro-v1 | paid @@ -62,6 +63,7 @@ export interface IKBotOptions { anthropic/claude-2.1 | paid anthropic/claude-2.1:beta | paid openrouter/auto | paid + bytedance-research/ui-tars-72b:free | free cohere/command | paid cohere/command-a | paid cohere/command-r | paid @@ -74,6 +76,9 @@ export interface IKBotOptions { deepseek/deepseek-r1-zero:free | free deepseek/deepseek-chat | paid deepseek/deepseek-chat:free | free + deepseek/deepseek-chat-v3-0324 | paid + deepseek/deepseek-chat-v3-0324:free | free + deepseek/deepseek-v3-base:free | free deepseek/deepseek-r1 | paid deepseek/deepseek-r1:free | free deepseek/deepseek-r1-distill-llama-70b | paid @@ -93,29 +98,30 @@ export interface IKBotOptions { eva-unit-01/eva-qwen-2.5-72b | paid sao10k/fimbulvetr-11b-v2 | paid alpindale/goliath-120b | paid - google/gemini-2.0-flash-lite-001 | paid - google/gemini-2.0-flash-thinking-exp-1219:free | free - google/gemini-2.0-flash-thinking-exp:free | free - google/gemini-exp-1206:free | free google/gemini-flash-1.5 | paid google/gemini-flash-1.5-8b | paid google/gemini-flash-1.5-8b-exp | paid + google/gemini-pro-1.5 | paid google/gemini-2.0-flash-001 | paid google/gemini-2.0-flash-exp:free | free - google/gemini-2.0-flash-lite-preview-02-05:free | free - google/gemini-pro | paid - google/gemini-pro-1.5 | paid + google/gemini-2.0-flash-lite-001 | paid + google/gemini-2.0-flash-thinking-exp-1219:free | free + google/gemini-2.0-flash-thinking-exp:free | free google/gemini-2.0-pro-exp-02-05:free | free + google/gemini-2.5-pro-exp-03-25:free | free + google/gemini-2.5-pro-preview-03-25 | paid + google/gemini-pro | paid google/gemini-pro-vision | paid google/gemma-2-27b-it | paid google/gemma-2-9b-it | paid google/gemma-2-9b-it:free | free + google/gemma-3-12b-it | paid google/gemma-3-12b-it:free | free google/gemma-3-1b-it:free | free google/gemma-3-27b-it | paid google/gemma-3-27b-it:free | free + google/gemma-3-4b-it | paid google/gemma-3-4b-it:free | free - google/gemma-7b-it | paid google/learnlm-1.5-pro-experimental:free | free google/palm-2-chat-bison | paid google/palm-2-chat-bison-32k | paid @@ -129,7 +135,6 @@ export interface IKBotOptions { liquid/lfm-3b | paid liquid/lfm-40b | paid liquid/lfm-7b | paid - allenai/llama-3.1-tulu-3-405b | paid meta-llama/llama-guard-3-8b | paid alpindale/magnum-72b | paid anthracite-org/magnum-v2-72b | paid @@ -139,7 +144,6 @@ export interface IKBotOptions { meta-llama/llama-2-70b-chat | paid meta-llama/llama-3-70b-instruct | paid meta-llama/llama-3-8b-instruct | paid - meta-llama/llama-3-8b-instruct:free | free meta-llama/llama-3.1-405b | paid meta-llama/llama-3.1-405b-instruct | paid meta-llama/llama-3.1-70b-instruct | paid @@ -154,6 +158,10 @@ export interface IKBotOptions { meta-llama/llama-3.2-90b-vision-instruct | paid meta-llama/llama-3.3-70b-instruct | paid meta-llama/llama-3.3-70b-instruct:free | free + meta-llama/llama-4-maverick | paid + meta-llama/llama-4-maverick:free | free + meta-llama/llama-4-scout | paid + meta-llama/llama-4-scout:free | free meta-llama/llama-guard-2-8b | paid microsoft/phi-4 | paid microsoft/phi-4-multimodal-instruct | paid @@ -174,6 +182,7 @@ export interface IKBotOptions { mistralai/codestral-2501 | paid mistralai/codestral-mamba | paid mistralai/ministral-3b | paid + mistral/ministral-8b | paid mistralai/ministral-8b | paid mistralai/mistral-7b-instruct | paid mistralai/mistral-7b-instruct:free | free @@ -185,6 +194,7 @@ export interface IKBotOptions { mistralai/mistral-small-24b-instruct-2501 | paid mistralai/mistral-small-24b-instruct-2501:free | free mistralai/mistral-small-3.1-24b-instruct | paid + mistralai/mistral-small-3.1-24b-instruct:free | free mistralai/mixtral-8x22b-instruct | paid mistralai/mixtral-8x7b | paid mistralai/mixtral-8x7b-instruct | paid @@ -193,7 +203,6 @@ export interface IKBotOptions { mistralai/mistral-saba | paid moonshotai/moonlight-16b-a3b-instruct:free | free gryphe/mythomax-l2-13b | paid - gryphe/mythomax-l2-13b:free | free neversleep/llama-3-lumimaid-70b | paid neversleep/llama-3-lumimaid-8b | paid neversleep/llama-3-lumimaid-8b:extended | paid @@ -239,14 +248,13 @@ export interface IKBotOptions { openai/o1-mini-2024-09-12 | paid openai/o1-preview | paid openai/o1-preview-2024-09-12 | paid + openai/o1-pro | paid openai/o3-mini | paid openai/o3-mini-high | paid openchat/openchat-7b | paid openchat/openchat-7b:free | free - teknium/openhermes-2.5-mistral-7b | paid - perplexity/llama-3.1-sonar-large-128k-chat | paid + all-hands/openhands-lm-32b-v0.1 | paid perplexity/llama-3.1-sonar-large-128k-online | paid - perplexity/llama-3.1-sonar-small-128k-chat | paid perplexity/llama-3.1-sonar-small-128k-online | paid perplexity/r1-1776 | paid perplexity/sonar | paid @@ -255,19 +263,22 @@ export interface IKBotOptions { perplexity/sonar-reasoning | paid perplexity/sonar-reasoning-pro | paid pygmalionai/mythalion-13b | paid + openrouter/quasar-alpha | paid qwen/qwen-2-72b-instruct | paid - qwen/qwen-2-7b-instruct | paid - qwen/qwen-2-7b-instruct:free | free qwen/qwen-vl-max | paid qwen/qwen-vl-plus | paid qwen/qwen-max | paid qwen/qwen-plus | paid qwen/qwen-turbo | paid qwen/qwen2.5-32b-instruct | paid + qwen/qwen2.5-vl-32b-instruct | paid + qwen/qwen2.5-vl-32b-instruct:free | free + qwen/qwen2.5-vl-3b-instruct:free | free qwen/qwen2.5-vl-72b-instruct | paid qwen/qwen2.5-vl-72b-instruct:free | free qwen/qwen-2.5-vl-72b-instruct | paid qwen/qwen-2.5-vl-7b-instruct | paid + qwen/qwen-2.5-vl-7b-instruct:free | free qwen/qwq-32b | paid qwen/qwq-32b:free | free qwen/qwq-32b-preview | paid @@ -275,8 +286,10 @@ export interface IKBotOptions { qwen/qwen-2.5-72b-instruct | paid qwen/qwen-2.5-72b-instruct:free | free qwen/qwen-2.5-7b-instruct | paid + qwen/qwen-2.5-7b-instruct:free | free qwen/qwen-2.5-coder-32b-instruct | paid qwen/qwen-2.5-coder-32b-instruct:free | free + featherless/qwerky-72b:free | free rekaai/reka-flash-3:free | free undi95/remm-slerp-l2-13b | paid thedrummer/rocinante-12b | paid @@ -293,6 +306,8 @@ export interface IKBotOptions { thedrummer/skyfall-36b-v2 | paid undi95/toppy-m-7b | paid undi95/toppy-m-7b:free | free + scb10x/llama3.1-typhoon2-70b-instruct | paid + scb10x/llama3.1-typhoon2-8b-instruct | paid thedrummer/unslopnemo-12b | paid microsoft/wizardlm-2-7b | paid microsoft/wizardlm-2-8x22b | paid @@ -339,17 +354,22 @@ export interface IKBotOptions { gpt-4o-mini-realtime-preview-2024-12-17 gpt-4o-mini-search-preview gpt-4o-mini-search-preview-2025-03-11 + gpt-4o-mini-transcribe + gpt-4o-mini-tts gpt-4o-realtime-preview gpt-4o-realtime-preview-2024-10-01 gpt-4o-realtime-preview-2024-12-17 gpt-4o-search-preview gpt-4o-search-preview-2025-03-11 + gpt-4o-transcribe o1 o1-2024-12-17 o1-mini o1-mini-2024-09-12 o1-preview o1-preview-2024-09-12 + o1-pro + o1-pro-2025-03-19 o3-mini o3-mini-2025-01-31 omni-moderation-2024-09-26