Merge pull request #2373 from gh-xj/feat/max-token-continuation-policy

feat(agent): add provider-agnostic max-token continuation policy
This commit is contained in:
Chum Yin 2026-03-02 13:40:35 +08:00 committed by GitHub
commit af53996604
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
33 changed files with 1591 additions and 145 deletions

View File

@ -8,6 +8,8 @@ on:
- "Cargo.lock"
- "src/**"
- "crates/**"
- "scripts/ci/ensure_c_toolchain.sh"
- "scripts/ci/ensure_cargo_component.sh"
- "scripts/ci/ensure_cc.sh"
- "scripts/ci/reproducible_build_check.sh"
- "scripts/ci/self_heal_rust_toolchain.sh"
@ -19,6 +21,8 @@ on:
- "Cargo.lock"
- "src/**"
- "crates/**"
- "scripts/ci/ensure_c_toolchain.sh"
- "scripts/ci/ensure_cargo_component.sh"
- "scripts/ci/ensure_cc.sh"
- "scripts/ci/reproducible_build_check.sh"
- "scripts/ci/self_heal_rust_toolchain.sh"
@ -56,6 +60,10 @@ jobs:
name: Reproducible Build Probe
runs-on: ubuntu-22.04
timeout-minutes: 75
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- name: Checkout
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
@ -64,6 +72,10 @@ jobs:
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- name: Setup Rust
uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
@ -71,6 +83,11 @@ jobs:
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
env:
ENSURE_CARGO_COMPONENT_STRICT: "true"
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- name: Run reproducible build check
shell: bash

View File

@ -52,6 +52,10 @@ jobs:
if: needs.changes.outputs.rust_changed == 'true'
runs-on: ubuntu-22.04
timeout-minutes: 75
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
@ -59,12 +63,18 @@ jobs:
- name: Self-heal Rust toolchain cache
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
components: rustfmt, clippy
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
with:
prefix-key: ci-run-check
@ -128,8 +138,15 @@ jobs:
if: needs.changes.outputs.rust_changed == 'true'
runs-on: ubuntu-22.04
timeout-minutes: 120
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- name: Self-heal Rust toolchain cache
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
@ -138,6 +155,9 @@ jobs:
toolchain: 1.92.0
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
with:
prefix-key: ci-run-check
@ -197,9 +217,16 @@ jobs:
if: needs.changes.outputs.rust_changed == 'true'
runs-on: ubuntu-22.04
timeout-minutes: 90
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- name: Self-heal Rust toolchain cache
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
@ -208,6 +235,9 @@ jobs:
toolchain: 1.92.0
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
with:
prefix-key: ci-run-build

View File

@ -15,6 +15,8 @@ on:
- ".github/security/unsafe-audit-governance.json"
- "scripts/ci/install_gitleaks.sh"
- "scripts/ci/install_syft.sh"
- "scripts/ci/ensure_c_toolchain.sh"
- "scripts/ci/ensure_cargo_component.sh"
- "scripts/ci/self_heal_rust_toolchain.sh"
- "scripts/ci/deny_policy_guard.py"
- "scripts/ci/secrets_governance_guard.py"
@ -72,6 +74,10 @@ jobs:
name: Security Audit
runs-on: ubuntu-22.04
timeout-minutes: 45
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
@ -79,12 +85,22 @@ jobs:
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
env:
ENSURE_CARGO_COMPONENT_STRICT: "true"
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- uses: rustsec/audit-check@69366f33c96575abad1ee0dba8212993eecbe998 # v2.0.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
@ -93,9 +109,26 @@ jobs:
name: License & Supply Chain
runs-on: ubuntu-22.04
timeout-minutes: 20
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- name: Ensure cargo component
shell: bash
env:
ENSURE_CARGO_COMPONENT_STRICT: "true"
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- name: Enforce deny policy hygiene
shell: bash
run: |
@ -108,9 +141,46 @@ jobs:
--output-md artifacts/deny-policy-guard.md \
--fail-on-violation
- uses: EmbarkStudios/cargo-deny-action@3fd3802e88374d3fe9159b834c7714ec57d6c979 # v2
with:
command: check advisories licenses sources
- name: Install cargo-deny
shell: bash
run: |
set -euo pipefail
version="0.19.0"
arch="$(uname -m)"
case "${arch}" in
x86_64|amd64)
target="x86_64-unknown-linux-musl"
expected_sha256="0e8c2aa59128612c90d9e09c02204e912f29a5b8d9a64671b94608cbe09e064f"
;;
aarch64|arm64)
target="aarch64-unknown-linux-musl"
expected_sha256="2b3567a60b7491c159d1cef8b7d8479d1ad2a31e29ef49462634ad4552fcc77d"
;;
*)
echo "Unsupported runner architecture for cargo-deny: ${arch}" >&2
exit 1
;;
esac
install_dir="${RUNNER_TEMP}/cargo-deny-${version}"
archive="${RUNNER_TEMP}/cargo-deny-${version}-${target}.tar.gz"
mkdir -p "${install_dir}"
curl --proto '=https' --tlsv1.2 --fail --location --silent --show-error \
--output "${archive}" \
"https://github.com/EmbarkStudios/cargo-deny/releases/download/${version}/cargo-deny-${version}-${target}.tar.gz"
actual_sha256="$(sha256sum "${archive}" | awk '{print $1}')"
if [ "${actual_sha256}" != "${expected_sha256}" ]; then
echo "Checksum mismatch for cargo-deny ${version} (${target})" >&2
echo "Expected: ${expected_sha256}" >&2
echo "Actual: ${actual_sha256}" >&2
exit 1
fi
tar -xzf "${archive}" -C "${install_dir}" --strip-components=1
echo "${install_dir}" >> "${GITHUB_PATH}"
"${install_dir}/cargo-deny" --version
- name: Run cargo-deny checks
shell: bash
run: cargo-deny check advisories licenses sources
- name: Emit deny audit event
if: always()
@ -148,8 +218,16 @@ jobs:
name: Security Regression Tests
runs-on: ubuntu-22.04
timeout-minutes: 30
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- name: Self-heal Rust toolchain cache
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
@ -158,6 +236,11 @@ jobs:
toolchain: 1.92.0
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
env:
ENSURE_CARGO_COMPONENT_STRICT: "true"
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
with:
prefix-key: sec-audit-security-regressions

View File

@ -8,6 +8,8 @@ on:
- "Cargo.lock"
- "src/**"
- "crates/**"
- "scripts/ci/ensure_c_toolchain.sh"
- "scripts/ci/ensure_cargo_component.sh"
- ".github/codeql/**"
- "scripts/ci/self_heal_rust_toolchain.sh"
- "scripts/ci/ensure_cc.sh"
@ -19,6 +21,8 @@ on:
- "Cargo.lock"
- "src/**"
- "crates/**"
- "scripts/ci/ensure_c_toolchain.sh"
- "scripts/ci/ensure_cargo_component.sh"
- ".github/codeql/**"
- "scripts/ci/self_heal_rust_toolchain.sh"
- "scripts/ci/ensure_cc.sh"
@ -49,12 +53,20 @@ jobs:
name: CodeQL Analysis
runs-on: ubuntu-22.04
timeout-minutes: 120
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- name: Initialize CodeQL
uses: github/codeql-action/init@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4
with:
@ -73,6 +85,9 @@ jobs:
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- name: Build
run: cargo build --workspace --all-targets --locked

View File

@ -42,6 +42,8 @@ impl BenchProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}]),
}
}
@ -59,6 +61,8 @@ impl BenchProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
},
ChatResponse {
text: Some("done".into()),
@ -66,6 +70,8 @@ impl BenchProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
},
]),
}
@ -98,6 +104,8 @@ impl Provider for BenchProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
});
}
Ok(guard.remove(0))
@ -166,6 +174,8 @@ Let me know if you need more."#
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
let multi_tool = ChatResponse {
@ -185,6 +195,8 @@ Let me know if you need more."#
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
c.bench_function("xml_parse_single_tool_call", |b| {
@ -220,6 +232,8 @@ fn bench_native_parsing(c: &mut Criterion) {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
c.bench_function("native_parse_tool_calls", |b| {

View File

@ -33,6 +33,20 @@ set -euo pipefail
BIN="${1:?Usage: check_binary_size.sh <binary_path> [label]}"
LABEL="${2:-}"
if [ ! -f "$BIN" ] && [ -n "${CARGO_TARGET_DIR:-}" ]; then
if [[ "$BIN" == target/* ]]; then
alt_bin="${CARGO_TARGET_DIR}/${BIN#target/}"
if [ -f "$alt_bin" ]; then
BIN="$alt_bin"
fi
elif [[ "$BIN" != /* ]]; then
alt_bin="${CARGO_TARGET_DIR}/${BIN}"
if [ -f "$alt_bin" ]; then
BIN="$alt_bin"
fi
fi
fi
if [ ! -f "$BIN" ]; then
echo "::error::Binary not found at $BIN"
exit 1

View File

@ -0,0 +1,58 @@
#!/usr/bin/env bash
set -euo pipefail
set_env_var() {
local key="$1"
local value="$2"
if [ -n "${GITHUB_ENV:-}" ]; then
echo "${key}=${value}" >>"${GITHUB_ENV}"
fi
}
configure_linker() {
local linker="$1"
if [ ! -x "${linker}" ]; then
return 1
fi
set_env_var "CC" "${linker}"
set_env_var "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER" "${linker}"
if command -v g++ >/dev/null 2>&1; then
set_env_var "CXX" "$(command -v g++)"
elif command -v clang++ >/dev/null 2>&1; then
set_env_var "CXX" "$(command -v clang++)"
fi
echo "Using C linker: ${linker}"
"${linker}" --version | head -n 1 || true
return 0
}
echo "Ensuring C toolchain is available for Rust native dependencies"
if command -v cc >/dev/null 2>&1; then
configure_linker "$(command -v cc)"
exit 0
fi
if command -v gcc >/dev/null 2>&1; then
configure_linker "$(command -v gcc)"
exit 0
fi
if command -v clang >/dev/null 2>&1; then
configure_linker "$(command -v clang)"
exit 0
fi
if command -v sudo >/dev/null 2>&1 && command -v apt-get >/dev/null 2>&1; then
echo "C compiler not found. Installing build-essential via apt..."
sudo apt-get update
sudo apt-get install -y build-essential
configure_linker "$(command -v cc)"
exit 0
fi
echo "No usable C compiler found (cc/gcc/clang)." >&2
exit 1

View File

@ -0,0 +1,107 @@
#!/usr/bin/env bash
set -euo pipefail
requested_toolchain="${1:-1.92.0}"
fallback_toolchain="${2:-stable}"
strict_mode_raw="${3:-${ENSURE_CARGO_COMPONENT_STRICT:-false}}"
strict_mode="$(printf '%s' "${strict_mode_raw}" | tr '[:upper:]' '[:lower:]')"
is_truthy() {
local value="${1:-}"
case "${value}" in
1 | true | yes | on) return 0 ;;
*) return 1 ;;
esac
}
probe_cargo() {
local toolchain="$1"
rustup run "${toolchain}" cargo --version >/dev/null 2>&1
}
probe_rustc() {
local toolchain="$1"
rustup run "${toolchain}" rustc --version >/dev/null 2>&1
}
export_toolchain_for_next_steps() {
local toolchain="$1"
if [ -z "${GITHUB_ENV:-}" ]; then
return 0
fi
{
echo "RUSTUP_TOOLCHAIN=${toolchain}"
cargo_path="$(rustup which --toolchain "${toolchain}" cargo 2>/dev/null || true)"
rustc_path="$(rustup which --toolchain "${toolchain}" rustc 2>/dev/null || true)"
if [ -n "${cargo_path}" ]; then
echo "CARGO=${cargo_path}"
fi
if [ -n "${rustc_path}" ]; then
echo "RUSTC=${rustc_path}"
fi
} >>"${GITHUB_ENV}"
}
assert_rustc_version_matches() {
local toolchain="$1"
local expected_version="$2"
local actual_version
if [[ ! "${expected_version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
return 0
fi
actual_version="$(rustup run "${toolchain}" rustc --version | awk '{print $2}')"
if [ "${actual_version}" != "${expected_version}" ]; then
echo "rustc version mismatch for ${toolchain}: expected ${expected_version}, got ${actual_version}" >&2
exit 1
fi
}
selected_toolchain="${requested_toolchain}"
echo "Ensuring cargo component is available for toolchain: ${requested_toolchain}"
if ! probe_rustc "${requested_toolchain}"; then
echo "Requested toolchain ${requested_toolchain} is not installed; installing..."
rustup toolchain install "${requested_toolchain}" --profile default
fi
if ! probe_cargo "${requested_toolchain}"; then
echo "cargo is unavailable for ${requested_toolchain}; reinstalling toolchain profile..."
rustup toolchain install "${requested_toolchain}" --profile default
rustup component add cargo --toolchain "${requested_toolchain}" || true
fi
if ! probe_cargo "${requested_toolchain}"; then
if is_truthy "${strict_mode}"; then
echo "::error::Strict mode enabled; cargo is unavailable for requested toolchain ${requested_toolchain}." >&2
rustup toolchain list || true
exit 1
fi
echo "::warning::Falling back to ${fallback_toolchain} because ${requested_toolchain} cargo remains unavailable."
rustup toolchain install "${fallback_toolchain}" --profile default
rustup component add cargo --toolchain "${fallback_toolchain}" || true
if ! probe_cargo "${fallback_toolchain}"; then
echo "No usable cargo found for ${requested_toolchain} or ${fallback_toolchain}" >&2
rustup toolchain list || true
exit 1
fi
selected_toolchain="${fallback_toolchain}"
fi
if is_truthy "${strict_mode}" && [ "${selected_toolchain}" != "${requested_toolchain}" ]; then
echo "::error::Strict mode enabled; refusing fallback toolchain ${selected_toolchain} (requested ${requested_toolchain})." >&2
exit 1
fi
if is_truthy "${strict_mode}"; then
assert_rustc_version_matches "${selected_toolchain}" "${requested_toolchain}"
fi
export_toolchain_for_next_steps "${selected_toolchain}"
echo "Using Rust toolchain: ${selected_toolchain}"
rustup run "${selected_toolchain}" rustc --version
rustup run "${selected_toolchain}" cargo --version

View File

@ -11,11 +11,12 @@ BINARY_NAME="${BINARY_NAME:-zeroclaw}"
OUTPUT_DIR="${OUTPUT_DIR:-artifacts}"
FAIL_ON_DRIFT="${FAIL_ON_DRIFT:-false}"
ALLOW_BUILD_ID_DRIFT="${ALLOW_BUILD_ID_DRIFT:-true}"
TARGET_ROOT="${CARGO_TARGET_DIR:-target}"
mkdir -p "${OUTPUT_DIR}"
host_target="$(rustc -vV | sed -n 's/^host: //p')"
artifact_path="target/${host_target}/${PROFILE}/${BINARY_NAME}"
artifact_path="${TARGET_ROOT}/${host_target}/${PROFILE}/${BINARY_NAME}"
sha256_file() {
local file="$1"

View File

@ -9,11 +9,15 @@ import json
import re
import subprocess
import sys
import tomllib
from collections import Counter
from dataclasses import dataclass
from pathlib import Path
try:
import tomllib # Python 3.11+
except ModuleNotFoundError:
import tomli as tomllib # type: ignore
@dataclass(frozen=True)
class PatternSpec:

View File

@ -804,6 +804,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
});
}
Ok(guard.remove(0))
@ -842,6 +844,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
});
}
Ok(guard.remove(0))
@ -882,6 +886,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}]),
});
@ -923,6 +929,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
},
crate::providers::ChatResponse {
text: Some("done".into()),
@ -930,6 +938,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
},
]),
});
@ -972,6 +982,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}]),
seen_models: seen_models.clone(),
});

View File

@ -264,6 +264,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
let dispatcher = XmlToolDispatcher;
let (_, calls) = dispatcher.parse_response(&response);
@ -283,6 +285,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
let dispatcher = NativeToolDispatcher;
let (_, calls) = dispatcher.parse_response(&response);

View File

@ -6,7 +6,8 @@ use crate::memory::{self, Memory, MemoryCategory};
use crate::multimodal;
use crate::observability::{self, runtime_trace, Observer, ObserverEvent};
use crate::providers::{
self, ChatMessage, ChatRequest, Provider, ProviderCapabilityError, ToolCall,
self, ChatMessage, ChatRequest, NormalizedStopReason, Provider, ProviderCapabilityError,
ToolCall,
};
use crate::runtime;
use crate::security::SecurityPolicy;
@ -61,6 +62,16 @@ const STREAM_CHUNK_MIN_CHARS: usize = 80;
/// Used as a safe fallback when `max_tool_iterations` is unset or configured as zero.
const DEFAULT_MAX_TOOL_ITERATIONS: usize = 20;
/// Maximum continuation retries when a provider reports max-token truncation.
const MAX_TOKENS_CONTINUATION_MAX_ATTEMPTS: usize = 3;
/// Absolute safety cap for merged continuation output.
const MAX_TOKENS_CONTINUATION_MAX_OUTPUT_CHARS: usize = 120_000;
/// Deterministic continuation instruction appended as a user message.
const MAX_TOKENS_CONTINUATION_PROMPT: &str = "Previous response was truncated by token limit.\nContinue exactly from where you left off.\nIf you intended a tool call, emit one complete tool call payload only.\nDo not repeat already-sent text.";
/// Notice appended when continuation budget is exhausted before completion.
const MAX_TOKENS_CONTINUATION_NOTICE: &str =
"\n\n[Response may be truncated due to continuation limits. Reply \"continue\" to resume.]";
/// Minimum user-message length (in chars) for auto-save to memory.
/// Matches the channel-side constant in `channels/mod.rs`.
const AUTOSAVE_MIN_MESSAGE_CHARS: usize = 20;
@ -559,6 +570,55 @@ fn looks_like_deferred_action_without_tool_call(text: &str) -> bool {
&& CJK_DEFERRED_ACTION_VERB_REGEX.is_match(trimmed)
}
fn merge_continuation_text(existing: &str, next: &str) -> String {
if next.is_empty() {
return existing.to_string();
}
if existing.is_empty() {
return next.to_string();
}
if existing.ends_with(next) {
return existing.to_string();
}
if next.starts_with(existing) {
return next.to_string();
}
let mut prefix_ends: Vec<usize> = next.char_indices().map(|(idx, _)| idx).collect();
prefix_ends.push(next.len());
for prefix_end in prefix_ends.into_iter().rev() {
if prefix_end == 0 || prefix_end > existing.len() {
continue;
}
if existing.ends_with(&next[..prefix_end]) {
return format!("{existing}{}", &next[prefix_end..]);
}
}
format!("{existing}{next}")
}
fn add_optional_u64(lhs: Option<u64>, rhs: Option<u64>) -> Option<u64> {
match (lhs, rhs) {
(Some(left), Some(right)) => Some(left.saturating_add(right)),
(Some(left), None) => Some(left),
(None, Some(right)) => Some(right),
(None, None) => None,
}
}
fn stop_reason_name(reason: &NormalizedStopReason) -> &'static str {
match reason {
NormalizedStopReason::EndTurn => "end_turn",
NormalizedStopReason::ToolCall => "tool_call",
NormalizedStopReason::MaxTokens => "max_tokens",
NormalizedStopReason::ContextWindowExceeded => "context_window_exceeded",
NormalizedStopReason::SafetyBlocked => "safety_blocked",
NormalizedStopReason::Cancelled => "cancelled",
NormalizedStopReason::Unknown(_) => "unknown",
}
}
fn maybe_inject_cron_add_delivery(
tool_name: &str,
tool_args: &mut serde_json::Value,
@ -1340,12 +1400,183 @@ pub async fn run_tool_call_loop(
parse_issue_detected,
) = match chat_result {
Ok(resp) => {
let (resp_input_tokens, resp_output_tokens) = resp
let mut response_text = resp.text_or_empty().to_string();
let mut native_calls = resp.tool_calls;
let mut reasoning_content = resp.reasoning_content.clone();
let mut stop_reason = resp.stop_reason.clone();
let mut raw_stop_reason = resp.raw_stop_reason.clone();
let (mut resp_input_tokens, mut resp_output_tokens) = resp
.usage
.as_ref()
.map(|u| (u.input_tokens, u.output_tokens))
.unwrap_or((None, None));
if let Some(reason) = stop_reason.as_ref() {
runtime_trace::record_event(
"stop_reason_observed",
Some(channel_name),
Some(provider_name),
Some(active_model.as_str()),
Some(&turn_id),
Some(true),
None,
serde_json::json!({
"iteration": iteration + 1,
"normalized_reason": stop_reason_name(reason),
"raw_reason": raw_stop_reason.clone(),
}),
);
}
let mut continuation_attempts = 0usize;
let mut continuation_termination_reason: Option<&'static str> = None;
let mut continuation_error: Option<String> = None;
let mut output_chars = response_text.chars().count();
while matches!(stop_reason, Some(NormalizedStopReason::MaxTokens))
&& native_calls.is_empty()
&& continuation_attempts < MAX_TOKENS_CONTINUATION_MAX_ATTEMPTS
&& output_chars < MAX_TOKENS_CONTINUATION_MAX_OUTPUT_CHARS
{
continuation_attempts += 1;
runtime_trace::record_event(
"continuation_attempt",
Some(channel_name),
Some(provider_name),
Some(active_model.as_str()),
Some(&turn_id),
Some(true),
None,
serde_json::json!({
"iteration": iteration + 1,
"attempt": continuation_attempts,
"output_chars": output_chars,
"max_output_chars": MAX_TOKENS_CONTINUATION_MAX_OUTPUT_CHARS,
}),
);
let mut continuation_messages = request_messages.clone();
continuation_messages.push(ChatMessage::assistant(response_text.clone()));
continuation_messages.push(ChatMessage::user(
MAX_TOKENS_CONTINUATION_PROMPT.to_string(),
));
let continuation_future = provider.chat(
ChatRequest {
messages: &continuation_messages,
tools: request_tools,
},
active_model.as_str(),
temperature,
);
let continuation_result = if let Some(token) = cancellation_token.as_ref() {
tokio::select! {
() = token.cancelled() => return Err(ToolLoopCancelled.into()),
result = continuation_future => result,
}
} else {
continuation_future.await
};
let continuation_resp = match continuation_result {
Ok(response) => response,
Err(error) => {
continuation_termination_reason = Some("provider_error");
continuation_error =
Some(crate::providers::sanitize_api_error(&error.to_string()));
break;
}
};
if let Some(usage) = continuation_resp.usage.as_ref() {
resp_input_tokens = add_optional_u64(resp_input_tokens, usage.input_tokens);
resp_output_tokens =
add_optional_u64(resp_output_tokens, usage.output_tokens);
}
let next_text = continuation_resp.text_or_empty().to_string();
let merged_text = merge_continuation_text(&response_text, &next_text);
let merged_chars = merged_text.chars().count();
if merged_chars > MAX_TOKENS_CONTINUATION_MAX_OUTPUT_CHARS {
response_text = merged_text
.chars()
.take(MAX_TOKENS_CONTINUATION_MAX_OUTPUT_CHARS)
.collect();
output_chars = MAX_TOKENS_CONTINUATION_MAX_OUTPUT_CHARS;
stop_reason = Some(NormalizedStopReason::MaxTokens);
continuation_termination_reason = Some("output_cap");
break;
}
response_text = merged_text;
output_chars = merged_chars;
if continuation_resp.reasoning_content.is_some() {
reasoning_content = continuation_resp.reasoning_content.clone();
}
if !continuation_resp.tool_calls.is_empty() {
native_calls = continuation_resp.tool_calls;
}
stop_reason = continuation_resp.stop_reason;
raw_stop_reason = continuation_resp.raw_stop_reason;
if let Some(reason) = stop_reason.as_ref() {
runtime_trace::record_event(
"stop_reason_observed",
Some(channel_name),
Some(provider_name),
Some(active_model.as_str()),
Some(&turn_id),
Some(true),
None,
serde_json::json!({
"iteration": iteration + 1,
"continuation_attempt": continuation_attempts,
"normalized_reason": stop_reason_name(reason),
"raw_reason": raw_stop_reason.clone(),
}),
);
}
}
if continuation_attempts > 0 && continuation_termination_reason.is_none() {
continuation_termination_reason =
if matches!(stop_reason, Some(NormalizedStopReason::MaxTokens)) {
if output_chars >= MAX_TOKENS_CONTINUATION_MAX_OUTPUT_CHARS {
Some("output_cap")
} else {
Some("retry_limit")
}
} else {
Some("completed")
};
}
if let Some(terminal_reason) = continuation_termination_reason {
runtime_trace::record_event(
"continuation_terminated",
Some(channel_name),
Some(provider_name),
Some(active_model.as_str()),
Some(&turn_id),
Some(terminal_reason == "completed"),
continuation_error.as_deref(),
serde_json::json!({
"iteration": iteration + 1,
"attempts": continuation_attempts,
"terminal_reason": terminal_reason,
"output_chars": output_chars,
}),
);
}
if continuation_attempts > 0
&& matches!(stop_reason, Some(NormalizedStopReason::MaxTokens))
&& native_calls.is_empty()
&& !response_text.ends_with(MAX_TOKENS_CONTINUATION_NOTICE)
{
response_text.push_str(MAX_TOKENS_CONTINUATION_NOTICE);
}
observer.record_event(&ObserverEvent::LlmResponse {
provider: provider_name.to_string(),
model: active_model.clone(),
@ -1356,15 +1587,21 @@ pub async fn run_tool_call_loop(
output_tokens: resp_output_tokens,
});
let response_text = resp.text_or_empty().to_string();
// First try native structured tool calls (OpenAI-format).
// Fall back to text-based parsing (XML tags, markdown blocks,
// GLM format) only if the provider returned no native calls —
// this ensures we support both native and prompt-guided models.
let mut calls = parse_structured_tool_calls(&resp.tool_calls);
let structured_parse = parse_structured_tool_calls(&native_calls);
let invalid_native_tool_json_count = structured_parse.invalid_json_arguments;
let mut calls = structured_parse.calls;
if invalid_native_tool_json_count > 0 {
// Safety policy: when native tool-call args are partially truncated
// or malformed, do not execute any parsed subset in this turn.
calls.clear();
}
let mut parsed_text = String::new();
if calls.is_empty() {
if invalid_native_tool_json_count == 0 && calls.is_empty() {
let (fallback_text, fallback_calls) = parse_tool_calls(&response_text);
if !fallback_text.is_empty() {
parsed_text = fallback_text;
@ -1372,7 +1609,12 @@ pub async fn run_tool_call_loop(
calls = fallback_calls;
}
let parse_issue = detect_tool_call_parse_issue(&response_text, &calls);
let mut parse_issue = detect_tool_call_parse_issue(&response_text, &calls);
if parse_issue.is_none() && invalid_native_tool_json_count > 0 {
parse_issue = Some(format!(
"provider returned {invalid_native_tool_json_count} native tool call(s) with invalid JSON arguments"
));
}
if let Some(parse_issue) = parse_issue.as_deref() {
runtime_trace::record_event(
"tool_call_parse_issue",
@ -1384,6 +1626,7 @@ pub async fn run_tool_call_loop(
Some(parse_issue),
serde_json::json!({
"iteration": iteration + 1,
"invalid_native_tool_json_count": invalid_native_tool_json_count,
"response_excerpt": truncate_with_ellipsis(
&scrub_credentials(&response_text),
600
@ -1406,15 +1649,17 @@ pub async fn run_tool_call_loop(
"input_tokens": resp_input_tokens,
"output_tokens": resp_output_tokens,
"raw_response": scrub_credentials(&response_text),
"native_tool_calls": resp.tool_calls.len(),
"native_tool_calls": native_calls.len(),
"parsed_tool_calls": calls.len(),
"continuation_attempts": continuation_attempts,
"stop_reason": stop_reason.as_ref().map(stop_reason_name),
"raw_stop_reason": raw_stop_reason,
}),
);
// Preserve native tool call IDs in assistant history so role=tool
// follow-up messages can reference the exact call id.
let reasoning_content = resp.reasoning_content.clone();
let assistant_history_content = if resp.tool_calls.is_empty() {
let assistant_history_content = if native_calls.is_empty() {
if use_native_tools {
build_native_assistant_history_from_parsed_calls(
&response_text,
@ -1428,12 +1673,11 @@ pub async fn run_tool_call_loop(
} else {
build_native_assistant_history(
&response_text,
&resp.tool_calls,
&native_calls,
reasoning_content.as_deref(),
)
};
let native_calls = resp.tool_calls;
(
response_text,
parsed_text,
@ -3235,6 +3479,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}
}
@ -3245,6 +3491,13 @@ mod tests {
}
impl ScriptedProvider {
fn from_scripted_responses(responses: Vec<ChatResponse>) -> Self {
Self {
responses: Arc::new(Mutex::new(VecDeque::from(responses))),
capabilities: ProviderCapabilities::default(),
}
}
fn from_text_responses(responses: Vec<&str>) -> Self {
let scripted = responses
.into_iter()
@ -3254,12 +3507,11 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
.collect();
Self {
responses: Arc::new(Mutex::new(scripted)),
capabilities: ProviderCapabilities::default(),
}
Self::from_scripted_responses(scripted)
}
fn with_native_tool_support(mut self) -> Self {
@ -4261,6 +4513,474 @@ mod tests {
);
}
#[tokio::test]
async fn run_tool_call_loop_retries_when_native_tool_args_are_truncated_json() {
let provider = ScriptedProvider::from_scripted_responses(vec![
ChatResponse {
text: Some(String::new()),
tool_calls: vec![ToolCall {
id: "call_bad".to_string(),
name: "count_tool".to_string(),
arguments: "{\"value\":\"truncated\"".to_string(),
}],
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::MaxTokens),
raw_stop_reason: Some("length".to_string()),
},
ChatResponse {
text: Some(String::new()),
tool_calls: vec![ToolCall {
id: "call_good".to_string(),
name: "count_tool".to_string(),
arguments: "{\"value\":\"fixed\"}".to_string(),
}],
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::ToolCall),
raw_stop_reason: Some("tool_calls".to_string()),
},
ChatResponse {
text: Some("done after native retry".to_string()),
tool_calls: Vec::new(),
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::EndTurn),
raw_stop_reason: Some("stop".to_string()),
},
])
.with_native_tool_support();
let invocations = Arc::new(AtomicUsize::new(0));
let tools_registry: Vec<Box<dyn Tool>> = vec![Box::new(CountingTool::new(
"count_tool",
Arc::clone(&invocations),
))];
let mut history = vec![
ChatMessage::system("test-system"),
ChatMessage::user("run native call"),
];
let observer = NoopObserver;
let result = run_tool_call_loop(
&provider,
&mut history,
&tools_registry,
&observer,
"mock-provider",
"mock-model",
0.0,
true,
None,
"cli",
&crate::config::MultimodalConfig::default(),
6,
None,
None,
None,
&[],
)
.await
.expect("truncated native arguments should trigger safe retry");
assert_eq!(result, "done after native retry");
assert_eq!(
invocations.load(Ordering::SeqCst),
1,
"only the repaired native tool call should execute"
);
assert!(
history.iter().any(|msg| {
msg.role == "tool" && msg.content.contains("\"tool_call_id\":\"call_good\"")
}),
"tool history should include only the repaired tool_call_id"
);
assert!(
history.iter().all(|msg| {
!(msg.role == "tool" && msg.content.contains("\"tool_call_id\":\"call_bad\""))
}),
"invalid truncated native call must not execute"
);
}
#[tokio::test]
async fn run_tool_call_loop_ignores_text_fallback_when_native_tool_args_are_truncated_json() {
let provider = ScriptedProvider::from_scripted_responses(vec![
ChatResponse {
text: Some(
r#"<tool_call>
{"name":"count_tool","arguments":{"value":"from_text_fallback"}}
</tool_call>"#
.to_string(),
),
tool_calls: vec![ToolCall {
id: "call_bad".to_string(),
name: "count_tool".to_string(),
arguments: "{\"value\":\"truncated\"".to_string(),
}],
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::MaxTokens),
raw_stop_reason: Some("length".to_string()),
},
ChatResponse {
text: Some(String::new()),
tool_calls: vec![ToolCall {
id: "call_good".to_string(),
name: "count_tool".to_string(),
arguments: "{\"value\":\"from_native_fixed\"}".to_string(),
}],
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::ToolCall),
raw_stop_reason: Some("tool_calls".to_string()),
},
ChatResponse {
text: Some("done after safe retry".to_string()),
tool_calls: Vec::new(),
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::EndTurn),
raw_stop_reason: Some("stop".to_string()),
},
])
.with_native_tool_support();
let invocations = Arc::new(AtomicUsize::new(0));
let tools_registry: Vec<Box<dyn Tool>> = vec![Box::new(CountingTool::new(
"count_tool",
Arc::clone(&invocations),
))];
let mut history = vec![
ChatMessage::system("test-system"),
ChatMessage::user("run native call"),
];
let observer = NoopObserver;
let result = run_tool_call_loop(
&provider,
&mut history,
&tools_registry,
&observer,
"mock-provider",
"mock-model",
0.0,
true,
None,
"cli",
&crate::config::MultimodalConfig::default(),
6,
None,
None,
None,
&[],
)
.await
.expect("invalid native args should force retry without text fallback execution");
assert_eq!(result, "done after safe retry");
assert_eq!(
invocations.load(Ordering::SeqCst),
1,
"only repaired native call should execute after retry"
);
assert!(
history
.iter()
.all(|msg| !msg.content.contains("counted:from_text_fallback")),
"text fallback tool call must not execute when native JSON args are invalid"
);
assert!(
history
.iter()
.any(|msg| msg.content.contains("counted:from_native_fixed")),
"repaired native call should execute after retry"
);
}
#[tokio::test]
async fn run_tool_call_loop_executes_valid_native_tool_call_with_max_tokens_stop_reason() {
let provider = ScriptedProvider::from_scripted_responses(vec![
ChatResponse {
text: Some(String::new()),
tool_calls: vec![ToolCall {
id: "call_valid".to_string(),
name: "count_tool".to_string(),
arguments: "{\"value\":\"from_valid_native\"}".to_string(),
}],
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::MaxTokens),
raw_stop_reason: Some("length".to_string()),
},
ChatResponse {
text: Some("done after valid native tool".to_string()),
tool_calls: Vec::new(),
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::EndTurn),
raw_stop_reason: Some("stop".to_string()),
},
])
.with_native_tool_support();
let invocations = Arc::new(AtomicUsize::new(0));
let tools_registry: Vec<Box<dyn Tool>> = vec![Box::new(CountingTool::new(
"count_tool",
Arc::clone(&invocations),
))];
let mut history = vec![
ChatMessage::system("test-system"),
ChatMessage::user("run native call"),
];
let observer = NoopObserver;
let result = run_tool_call_loop(
&provider,
&mut history,
&tools_registry,
&observer,
"mock-provider",
"mock-model",
0.0,
true,
None,
"cli",
&crate::config::MultimodalConfig::default(),
6,
None,
None,
None,
&[],
)
.await
.expect("valid native tool calls must execute even when stop_reason is max_tokens");
assert_eq!(result, "done after valid native tool");
assert_eq!(
invocations.load(Ordering::SeqCst),
1,
"valid native tool call should execute exactly once"
);
assert!(
history.iter().any(|msg| {
msg.role == "tool" && msg.content.contains("\"tool_call_id\":\"call_valid\"")
}),
"tool history should preserve valid native tool_call_id"
);
}
#[tokio::test]
async fn run_tool_call_loop_continues_when_stop_reason_is_max_tokens() {
let provider = ScriptedProvider::from_scripted_responses(vec![
ChatResponse {
text: Some("part 1 ".to_string()),
tool_calls: Vec::new(),
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::MaxTokens),
raw_stop_reason: Some("length".to_string()),
},
ChatResponse {
text: Some("part 2".to_string()),
tool_calls: Vec::new(),
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::EndTurn),
raw_stop_reason: Some("stop".to_string()),
},
]);
let tools_registry: Vec<Box<dyn Tool>> = Vec::new();
let mut history = vec![
ChatMessage::system("test-system"),
ChatMessage::user("continue this"),
];
let observer = NoopObserver;
let result = run_tool_call_loop(
&provider,
&mut history,
&tools_registry,
&observer,
"mock-provider",
"mock-model",
0.0,
true,
None,
"cli",
&crate::config::MultimodalConfig::default(),
4,
None,
None,
None,
&[],
)
.await
.expect("max-token continuation should complete");
assert_eq!(result, "part 1 part 2");
assert!(
!result.contains("Response may be truncated"),
"continuation should not emit truncation notice when it ends cleanly"
);
}
#[tokio::test]
async fn run_tool_call_loop_appends_notice_when_continuation_budget_exhausts() {
let provider = ScriptedProvider::from_scripted_responses(vec![
ChatResponse {
text: Some("A".to_string()),
tool_calls: Vec::new(),
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::MaxTokens),
raw_stop_reason: Some("length".to_string()),
},
ChatResponse {
text: Some("B".to_string()),
tool_calls: Vec::new(),
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::MaxTokens),
raw_stop_reason: Some("length".to_string()),
},
ChatResponse {
text: Some("C".to_string()),
tool_calls: Vec::new(),
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::MaxTokens),
raw_stop_reason: Some("length".to_string()),
},
ChatResponse {
text: Some("D".to_string()),
tool_calls: Vec::new(),
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::MaxTokens),
raw_stop_reason: Some("length".to_string()),
},
]);
let tools_registry: Vec<Box<dyn Tool>> = Vec::new();
let mut history = vec![
ChatMessage::system("test-system"),
ChatMessage::user("long output"),
];
let observer = NoopObserver;
let result = run_tool_call_loop(
&provider,
&mut history,
&tools_registry,
&observer,
"mock-provider",
"mock-model",
0.0,
true,
None,
"cli",
&crate::config::MultimodalConfig::default(),
4,
None,
None,
None,
&[],
)
.await
.expect("continuation should degrade to partial output");
assert!(result.starts_with("ABCD"));
assert!(
result.contains("Response may be truncated due to continuation limits"),
"result should include truncation notice when continuation cap is hit"
);
}
#[tokio::test]
async fn run_tool_call_loop_clamps_continuation_output_to_hard_cap() {
let oversized_chunk = "B".repeat(MAX_TOKENS_CONTINUATION_MAX_OUTPUT_CHARS);
let provider = ScriptedProvider::from_scripted_responses(vec![
ChatResponse {
text: Some("A".to_string()),
tool_calls: Vec::new(),
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::MaxTokens),
raw_stop_reason: Some("length".to_string()),
},
ChatResponse {
text: Some(oversized_chunk),
tool_calls: Vec::new(),
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: Some(NormalizedStopReason::EndTurn),
raw_stop_reason: Some("stop".to_string()),
},
]);
let tools_registry: Vec<Box<dyn Tool>> = Vec::new();
let mut history = vec![
ChatMessage::system("test-system"),
ChatMessage::user("long output"),
];
let observer = NoopObserver;
let result = run_tool_call_loop(
&provider,
&mut history,
&tools_registry,
&observer,
"mock-provider",
"mock-model",
0.0,
true,
None,
"cli",
&crate::config::MultimodalConfig::default(),
4,
None,
None,
None,
&[],
)
.await
.expect("continuation should clamp oversized merge");
assert!(
result.ends_with(MAX_TOKENS_CONTINUATION_NOTICE),
"hard-cap truncation should append continuation notice"
);
let capped_output = result
.strip_suffix(MAX_TOKENS_CONTINUATION_NOTICE)
.expect("result should end with continuation notice");
assert_eq!(
capped_output.chars().count(),
MAX_TOKENS_CONTINUATION_MAX_OUTPUT_CHARS
);
assert!(
capped_output.starts_with('A'),
"capped output should preserve earlier text before continuation chunk"
);
}
#[tokio::test]
async fn run_tool_call_loop_preserves_failed_tool_error_for_after_hook() {
let provider = ScriptedProvider::from_text_responses(vec![
@ -4311,6 +5031,18 @@ mod tests {
assert_eq!(recorded[0].as_deref(), Some("boom"));
}
#[test]
fn merge_continuation_text_deduplicates_partial_overlap() {
let merged = merge_continuation_text("The result is wor", "world.");
assert_eq!(merged, "The result is world.");
}
#[test]
fn merge_continuation_text_handles_unicode_overlap() {
let merged = merge_continuation_text("你好世界", "世界和平");
assert_eq!(merged, "你好世界和平");
}
#[test]
fn parse_tool_calls_extracts_single_call() {
let response = r#"Let me check that.
@ -5489,14 +6221,30 @@ Done."#;
arguments: "ls -la".to_string(),
}];
let parsed = parse_structured_tool_calls(&calls);
assert_eq!(parsed.len(), 1);
assert_eq!(parsed[0].name, "shell");
assert_eq!(parsed.invalid_json_arguments, 0);
assert_eq!(parsed.calls.len(), 1);
assert_eq!(parsed.calls[0].name, "shell");
assert_eq!(
parsed[0].arguments.get("command").and_then(|v| v.as_str()),
parsed.calls[0]
.arguments
.get("command")
.and_then(|v| v.as_str()),
Some("ls -la")
);
}
#[test]
fn parse_structured_tool_calls_skips_truncated_json_payloads() {
let calls = vec![ToolCall {
id: "call_bad".to_string(),
name: "count_tool".to_string(),
arguments: "{\"value\":\"unterminated\"".to_string(),
}];
let parsed = parse_structured_tool_calls(&calls);
assert_eq!(parsed.calls.len(), 0);
assert_eq!(parsed.invalid_json_arguments, 1);
}
// ═══════════════════════════════════════════════════════════════════════
// GLM-Style Tool Call Parsing
// ═══════════════════════════════════════════════════════════════════════

View File

@ -261,6 +261,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}
}
@ -451,6 +453,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}
}
@ -589,6 +593,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}
}

View File

@ -10,6 +10,12 @@ pub(super) struct ParsedToolCall {
pub(super) tool_call_id: Option<String>,
}
#[derive(Debug, Clone, Default)]
pub(super) struct StructuredToolCallParseResult {
pub(super) calls: Vec<ParsedToolCall>,
pub(super) invalid_json_arguments: usize,
}
pub(super) fn parse_arguments_value(raw: Option<&serde_json::Value>) -> serde_json::Value {
match raw {
Some(serde_json::Value::String(s)) => serde_json::from_str::<serde_json::Value>(s)
@ -1676,18 +1682,41 @@ pub(super) fn detect_tool_call_parse_issue(
}
}
pub(super) fn parse_structured_tool_calls(tool_calls: &[ToolCall]) -> Vec<ParsedToolCall> {
tool_calls
.iter()
.map(|call| {
let name = call.name.clone();
let parsed = serde_json::from_str::<serde_json::Value>(&call.arguments)
.unwrap_or_else(|_| serde_json::Value::Object(serde_json::Map::new()));
ParsedToolCall {
name: name.clone(),
arguments: normalize_tool_arguments(&name, parsed, Some(call.arguments.as_str())),
tool_call_id: Some(call.id.clone()),
}
})
.collect()
pub(super) fn parse_structured_tool_calls(
tool_calls: &[ToolCall],
) -> StructuredToolCallParseResult {
let mut result = StructuredToolCallParseResult::default();
for call in tool_calls {
let name = call.name.clone();
let raw_arguments = call.arguments.trim();
// Fail closed for truncated/invalid JSON payloads that look like native
// structured tool-call arguments. This prevents executing partial args.
if (raw_arguments.starts_with('{') || raw_arguments.starts_with('['))
&& serde_json::from_str::<serde_json::Value>(raw_arguments).is_err()
{
result.invalid_json_arguments += 1;
tracing::warn!(
tool_name = %name,
tool_call_id = %call.id,
"Skipping native tool call with invalid JSON arguments"
);
continue;
}
let raw_value = serde_json::Value::String(call.arguments.clone());
let arguments = normalize_tool_arguments(
&name,
parse_arguments_value(Some(&raw_value)),
raw_string_argument_hint(Some(&raw_value)),
);
result.calls.push(ParsedToolCall {
name,
arguments,
tool_call_id: Some(call.id.clone()),
});
}
result
}

View File

@ -96,6 +96,8 @@ impl Provider for ScriptedProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
});
}
Ok(guard.remove(0))
@ -334,6 +336,8 @@ fn tool_response(calls: Vec<ToolCall>) -> ChatResponse {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}
}
@ -345,6 +349,8 @@ fn text_response(text: &str) -> ChatResponse {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}
}
@ -358,6 +364,8 @@ fn xml_tool_response(name: &str, args: &str) -> ChatResponse {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}
}
@ -768,6 +776,8 @@ async fn turn_handles_empty_text_response() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}]));
let mut agent = build_agent_with(provider, vec![], Box::new(NativeToolDispatcher));
@ -784,6 +794,8 @@ async fn turn_handles_none_text_response() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}]));
let mut agent = build_agent_with(provider, vec![], Box::new(NativeToolDispatcher));
@ -810,6 +822,8 @@ async fn turn_preserves_text_alongside_tool_calls() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
},
text_response("Here are the results"),
]));
@ -1081,6 +1095,8 @@ async fn native_dispatcher_handles_stringified_arguments() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
let (_, calls) = dispatcher.parse_response(&response);
@ -1109,6 +1125,8 @@ fn xml_dispatcher_handles_nested_json() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
let dispatcher = XmlToolDispatcher;
@ -1129,6 +1147,8 @@ fn xml_dispatcher_handles_empty_tool_call_tag() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
let dispatcher = XmlToolDispatcher;
@ -1145,6 +1165,8 @@ fn xml_dispatcher_handles_unclosed_tool_call() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
let dispatcher = XmlToolDispatcher;

View File

@ -1,6 +1,6 @@
use crate::providers::traits::{
ChatMessage, ChatRequest as ProviderChatRequest, ChatResponse as ProviderChatResponse,
Provider, ProviderCapabilities, TokenUsage, ToolCall as ProviderToolCall,
NormalizedStopReason, Provider, ProviderCapabilities, TokenUsage, ToolCall as ProviderToolCall,
};
use crate::tools::ToolSpec;
use async_trait::async_trait;
@ -139,6 +139,8 @@ struct NativeChatResponse {
#[serde(default)]
content: Vec<NativeContentIn>,
#[serde(default)]
stop_reason: Option<String>,
#[serde(default)]
usage: Option<AnthropicUsage>,
}
@ -417,6 +419,10 @@ impl AnthropicProvider {
fn parse_native_response(response: NativeChatResponse) -> ProviderChatResponse {
let mut text_parts = Vec::new();
let mut tool_calls = Vec::new();
let raw_stop_reason = response.stop_reason.clone();
let stop_reason = raw_stop_reason
.as_deref()
.map(NormalizedStopReason::from_anthropic_stop_reason);
let usage = response.usage.map(|u| TokenUsage {
input_tokens: u.input_tokens,
@ -460,6 +466,8 @@ impl AnthropicProvider {
usage,
reasoning_content: None,
quota_metadata: None,
stop_reason,
raw_stop_reason,
}
}

View File

@ -6,8 +6,8 @@
use crate::providers::traits::{
ChatMessage, ChatRequest as ProviderChatRequest, ChatResponse as ProviderChatResponse,
Provider, ProviderCapabilities, StreamChunk, StreamError, StreamOptions, StreamResult,
TokenUsage, ToolCall as ProviderToolCall, ToolsPayload,
NormalizedStopReason, Provider, ProviderCapabilities, StreamChunk, StreamError, StreamOptions,
StreamResult, TokenUsage, ToolCall as ProviderToolCall, ToolsPayload,
};
use crate::tools::ToolSpec;
use async_trait::async_trait;
@ -512,7 +512,6 @@ struct ConverseResponse {
#[serde(default)]
output: Option<ConverseOutput>,
#[serde(default)]
#[allow(dead_code)]
stop_reason: Option<String>,
#[serde(default)]
usage: Option<BedrockUsage>,
@ -941,6 +940,10 @@ impl BedrockProvider {
fn parse_converse_response(response: ConverseResponse) -> ProviderChatResponse {
let mut text_parts = Vec::new();
let mut tool_calls = Vec::new();
let raw_stop_reason = response.stop_reason.clone();
let stop_reason = raw_stop_reason
.as_deref()
.map(NormalizedStopReason::from_bedrock_stop_reason);
let usage = response.usage.map(|u| TokenUsage {
input_tokens: u.input_tokens,
@ -982,6 +985,8 @@ impl BedrockProvider {
usage,
reasoning_content: None,
quota_metadata: None,
stop_reason,
raw_stop_reason,
}
}

View File

@ -5,8 +5,8 @@
use crate::multimodal;
use crate::providers::traits::{
ChatMessage, ChatRequest as ProviderChatRequest, ChatResponse as ProviderChatResponse,
Provider, StreamChunk, StreamError, StreamOptions, StreamResult, TokenUsage,
ToolCall as ProviderToolCall,
NormalizedStopReason, Provider, StreamChunk, StreamError, StreamOptions, StreamResult,
TokenUsage, ToolCall as ProviderToolCall,
};
use async_trait::async_trait;
use futures_util::{stream, SinkExt, StreamExt};
@ -481,6 +481,8 @@ struct UsageInfo {
#[derive(Debug, Deserialize)]
struct Choice {
message: ResponseMessage,
#[serde(default)]
finish_reason: Option<String>,
}
/// Remove `<think>...</think>` blocks from model output.
@ -970,6 +972,8 @@ fn parse_responses_chat_response(response: ResponsesResponse) -> ProviderChatRes
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}
}
@ -1815,7 +1819,12 @@ impl OpenAiCompatibleProvider {
modified_messages
}
fn parse_native_response(message: ResponseMessage) -> ProviderChatResponse {
fn parse_native_response(choice: Choice) -> ProviderChatResponse {
let raw_stop_reason = choice.finish_reason;
let stop_reason = raw_stop_reason
.as_deref()
.map(NormalizedStopReason::from_openai_finish_reason);
let message = choice.message;
let text = message.effective_content_optional();
let reasoning_content = message.reasoning_content.clone();
let tool_calls = message
@ -1847,6 +1856,8 @@ impl OpenAiCompatibleProvider {
usage: None,
reasoning_content,
quota_metadata: None,
stop_reason,
raw_stop_reason,
}
}
@ -1871,6 +1882,8 @@ impl OpenAiCompatibleProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}
}
@ -2220,6 +2233,8 @@ impl Provider for OpenAiCompatibleProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
});
}
};
@ -2268,6 +2283,11 @@ impl Provider for OpenAiCompatibleProvider {
.next()
.ok_or_else(|| anyhow::anyhow!("No response from {}", self.name))?;
let raw_stop_reason = choice.finish_reason;
let stop_reason = raw_stop_reason
.as_deref()
.map(NormalizedStopReason::from_openai_finish_reason);
let text = choice.message.effective_content_optional();
let reasoning_content = choice.message.reasoning_content;
let tool_calls = choice
@ -2293,6 +2313,8 @@ impl Provider for OpenAiCompatibleProvider {
usage,
reasoning_content,
quota_metadata: None,
stop_reason,
raw_stop_reason,
})
}
@ -2417,14 +2439,13 @@ impl Provider for OpenAiCompatibleProvider {
input_tokens: u.prompt_tokens,
output_tokens: u.completion_tokens,
});
let message = native_response
let choice = native_response
.choices
.into_iter()
.next()
.map(|choice| choice.message)
.ok_or_else(|| anyhow::anyhow!("No response from {}", self.name))?;
let mut result = Self::parse_native_response(message);
let mut result = Self::parse_native_response(choice);
result.usage = usage;
Ok(result)
}
@ -3438,26 +3459,31 @@ mod tests {
#[test]
fn parse_native_response_preserves_tool_call_id() {
let message = ResponseMessage {
content: None,
tool_calls: Some(vec![ToolCall {
id: Some("call_123".to_string()),
kind: Some("function".to_string()),
function: Some(Function {
name: Some("shell".to_string()),
arguments: Some(r#"{"command":"pwd"}"#.to_string()),
}),
name: None,
arguments: None,
parameters: None,
}]),
reasoning_content: None,
let choice = Choice {
message: ResponseMessage {
content: None,
tool_calls: Some(vec![ToolCall {
id: Some("call_123".to_string()),
kind: Some("function".to_string()),
function: Some(Function {
name: Some("shell".to_string()),
arguments: Some(r#"{"command":"pwd"}"#.to_string()),
}),
name: None,
arguments: None,
parameters: None,
}]),
reasoning_content: None,
},
finish_reason: Some("tool_calls".to_string()),
};
let parsed = OpenAiCompatibleProvider::parse_native_response(message);
let parsed = OpenAiCompatibleProvider::parse_native_response(choice);
assert_eq!(parsed.tool_calls.len(), 1);
assert_eq!(parsed.tool_calls[0].id, "call_123");
assert_eq!(parsed.tool_calls[0].name, "shell");
assert_eq!(parsed.stop_reason, Some(NormalizedStopReason::ToolCall));
assert_eq!(parsed.raw_stop_reason.as_deref(), Some("tool_calls"));
}
#[test]
@ -4548,39 +4574,49 @@ mod tests {
#[test]
fn parse_native_response_captures_reasoning_content() {
let message = ResponseMessage {
content: Some("answer".to_string()),
reasoning_content: Some("thinking step".to_string()),
tool_calls: Some(vec![ToolCall {
id: Some("call_1".to_string()),
kind: Some("function".to_string()),
function: Some(Function {
name: Some("shell".to_string()),
arguments: Some(r#"{"cmd":"ls"}"#.to_string()),
}),
name: None,
arguments: None,
parameters: None,
}]),
let choice = Choice {
message: ResponseMessage {
content: Some("answer".to_string()),
reasoning_content: Some("thinking step".to_string()),
tool_calls: Some(vec![ToolCall {
id: Some("call_1".to_string()),
kind: Some("function".to_string()),
function: Some(Function {
name: Some("shell".to_string()),
arguments: Some(r#"{"cmd":"ls"}"#.to_string()),
}),
name: None,
arguments: None,
parameters: None,
}]),
},
finish_reason: Some("length".to_string()),
};
let parsed = OpenAiCompatibleProvider::parse_native_response(message);
let parsed = OpenAiCompatibleProvider::parse_native_response(choice);
assert_eq!(parsed.reasoning_content.as_deref(), Some("thinking step"));
assert_eq!(parsed.text.as_deref(), Some("answer"));
assert_eq!(parsed.tool_calls.len(), 1);
assert_eq!(parsed.stop_reason, Some(NormalizedStopReason::MaxTokens));
assert_eq!(parsed.raw_stop_reason.as_deref(), Some("length"));
}
#[test]
fn parse_native_response_none_reasoning_content_for_normal_model() {
let message = ResponseMessage {
content: Some("hello".to_string()),
reasoning_content: None,
tool_calls: None,
let choice = Choice {
message: ResponseMessage {
content: Some("hello".to_string()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("stop".to_string()),
};
let parsed = OpenAiCompatibleProvider::parse_native_response(message);
let parsed = OpenAiCompatibleProvider::parse_native_response(choice);
assert!(parsed.reasoning_content.is_none());
assert_eq!(parsed.text.as_deref(), Some("hello"));
assert_eq!(parsed.stop_reason, Some(NormalizedStopReason::EndTurn));
assert_eq!(parsed.raw_stop_reason.as_deref(), Some("stop"));
}
#[test]

View File

@ -400,6 +400,8 @@ impl CopilotProvider {
usage,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}

View File

@ -236,6 +236,8 @@ impl Provider for CursorProvider {
usage: Some(TokenUsage::default()),
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}
}

View File

@ -6,7 +6,9 @@
use crate::auth::AuthService;
use crate::multimodal;
use crate::providers::traits::{ChatMessage, ChatResponse, Provider, TokenUsage};
use crate::providers::traits::{
ChatMessage, ChatResponse, NormalizedStopReason, Provider, TokenUsage,
};
use async_trait::async_trait;
use base64::Engine;
use directories::UserDirs;
@ -190,6 +192,8 @@ struct InternalGenerateContentResponse {
struct Candidate {
#[serde(default)]
content: Option<CandidateContent>,
#[serde(default, rename = "finishReason")]
finish_reason: Option<String>,
}
#[derive(Debug, Deserialize)]
@ -1005,7 +1009,12 @@ impl GeminiProvider {
system_instruction: Option<Content>,
model: &str,
temperature: f64,
) -> anyhow::Result<(String, Option<TokenUsage>)> {
) -> anyhow::Result<(
Option<String>,
Option<TokenUsage>,
Option<NormalizedStopReason>,
Option<String>,
)> {
let auth = self.auth.as_ref().ok_or_else(|| {
anyhow::anyhow!(
"Gemini API key not found. Options:\n\
@ -1198,14 +1207,18 @@ impl GeminiProvider {
output_tokens: u.candidates_token_count,
});
let text = result
let candidate = result
.candidates
.and_then(|c| c.into_iter().next())
.and_then(|c| c.content)
.and_then(|c| c.effective_text())
.ok_or_else(|| anyhow::anyhow!("No response from Gemini"))?;
let raw_stop_reason = candidate.finish_reason.clone();
let stop_reason = raw_stop_reason
.as_deref()
.map(NormalizedStopReason::from_gemini_finish_reason);
Ok((text, usage))
let text = candidate.content.and_then(|c| c.effective_text());
Ok((text, usage, stop_reason, raw_stop_reason))
}
}
@ -1230,9 +1243,10 @@ impl Provider for GeminiProvider {
parts: Self::build_user_parts(message),
}];
let (text, _usage) = self
let (text_opt, _usage, _stop_reason, _raw_stop_reason) = self
.send_generate_content(contents, system_instruction, model, temperature)
.await?;
let text = text_opt.ok_or_else(|| anyhow::anyhow!("No response from Gemini"))?;
Ok(text)
}
@ -1280,9 +1294,10 @@ impl Provider for GeminiProvider {
})
};
let (text, _usage) = self
let (text_opt, _usage, _stop_reason, _raw_stop_reason) = self
.send_generate_content(contents, system_instruction, model, temperature)
.await?;
let text = text_opt.ok_or_else(|| anyhow::anyhow!("No response from Gemini"))?;
Ok(text)
}
@ -1323,16 +1338,18 @@ impl Provider for GeminiProvider {
})
};
let (text, usage) = self
let (text, usage, stop_reason, raw_stop_reason) = self
.send_generate_content(contents, system_instruction, model, temperature)
.await?;
Ok(ChatResponse {
text: Some(text),
text,
tool_calls: Vec::new(),
usage,
reasoning_content: None,
quota_metadata: None,
stop_reason,
raw_stop_reason,
})
}

View File

@ -39,8 +39,8 @@ pub mod traits;
#[allow(unused_imports)]
pub use traits::{
is_user_or_assistant_role, ChatMessage, ChatRequest, ChatResponse, ConversationMessage,
Provider, ProviderCapabilityError, ToolCall, ToolResultMessage, ROLE_ASSISTANT, ROLE_SYSTEM,
ROLE_TOOL, ROLE_USER,
NormalizedStopReason, Provider, ProviderCapabilityError, ToolCall, ToolResultMessage,
ROLE_ASSISTANT, ROLE_SYSTEM, ROLE_TOOL, ROLE_USER,
};
use crate::auth::AuthService;

View File

@ -650,6 +650,8 @@ impl Provider for OllamaProvider {
usage,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
});
}
@ -669,6 +671,8 @@ impl Provider for OllamaProvider {
usage,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}
@ -717,6 +721,8 @@ impl Provider for OllamaProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}
}

View File

@ -1,6 +1,6 @@
use crate::providers::traits::{
ChatMessage, ChatRequest as ProviderChatRequest, ChatResponse as ProviderChatResponse,
Provider, TokenUsage, ToolCall as ProviderToolCall,
NormalizedStopReason, Provider, TokenUsage, ToolCall as ProviderToolCall,
};
use crate::tools::ToolSpec;
use async_trait::async_trait;
@ -36,6 +36,8 @@ struct ChatResponse {
#[derive(Debug, Deserialize)]
struct Choice {
message: ResponseMessage,
#[serde(default)]
finish_reason: Option<String>,
}
#[derive(Debug, Deserialize)]
@ -145,6 +147,8 @@ struct UsageInfo {
#[derive(Debug, Deserialize)]
struct NativeChoice {
message: NativeResponseMessage,
#[serde(default)]
finish_reason: Option<String>,
}
#[derive(Debug, Deserialize)]
@ -282,7 +286,12 @@ impl OpenAiProvider {
.collect()
}
fn parse_native_response(message: NativeResponseMessage) -> ProviderChatResponse {
fn parse_native_response(choice: NativeChoice) -> ProviderChatResponse {
let raw_stop_reason = choice.finish_reason;
let stop_reason = raw_stop_reason
.as_deref()
.map(NormalizedStopReason::from_openai_finish_reason);
let message = choice.message;
let text = message.effective_content();
let reasoning_content = message.reasoning_content.clone();
let tool_calls = message
@ -302,6 +311,8 @@ impl OpenAiProvider {
usage: None,
reasoning_content,
quota_metadata: None,
stop_reason,
raw_stop_reason,
}
}
@ -407,13 +418,12 @@ impl Provider for OpenAiProvider {
input_tokens: u.prompt_tokens,
output_tokens: u.completion_tokens,
});
let message = native_response
let choice = native_response
.choices
.into_iter()
.next()
.map(|c| c.message)
.ok_or_else(|| anyhow::anyhow!("No response from OpenAI"))?;
let mut result = Self::parse_native_response(message);
let mut result = Self::parse_native_response(choice);
result.usage = usage;
result.quota_metadata = quota_metadata;
Ok(result)
@ -476,13 +486,12 @@ impl Provider for OpenAiProvider {
input_tokens: u.prompt_tokens,
output_tokens: u.completion_tokens,
});
let message = native_response
let choice = native_response
.choices
.into_iter()
.next()
.map(|c| c.message)
.ok_or_else(|| anyhow::anyhow!("No response from OpenAI"))?;
let mut result = Self::parse_native_response(message);
let mut result = Self::parse_native_response(choice);
result.usage = usage;
result.quota_metadata = quota_metadata;
Ok(result)
@ -773,21 +782,25 @@ mod tests {
"content":"answer",
"reasoning_content":"thinking step",
"tool_calls":[{"id":"call_1","type":"function","function":{"name":"shell","arguments":"{}"}}]
}}]}"#;
},"finish_reason":"length"}]}"#;
let resp: NativeChatResponse = serde_json::from_str(json).unwrap();
let message = resp.choices.into_iter().next().unwrap().message;
let parsed = OpenAiProvider::parse_native_response(message);
let choice = resp.choices.into_iter().next().unwrap();
let parsed = OpenAiProvider::parse_native_response(choice);
assert_eq!(parsed.reasoning_content.as_deref(), Some("thinking step"));
assert_eq!(parsed.tool_calls.len(), 1);
assert_eq!(parsed.stop_reason, Some(NormalizedStopReason::MaxTokens));
assert_eq!(parsed.raw_stop_reason.as_deref(), Some("length"));
}
#[test]
fn parse_native_response_none_reasoning_content_for_normal_model() {
let json = r#"{"choices":[{"message":{"content":"hello"}}]}"#;
let json = r#"{"choices":[{"message":{"content":"hello"},"finish_reason":"stop"}]}"#;
let resp: NativeChatResponse = serde_json::from_str(json).unwrap();
let message = resp.choices.into_iter().next().unwrap().message;
let parsed = OpenAiProvider::parse_native_response(message);
let choice = resp.choices.into_iter().next().unwrap();
let parsed = OpenAiProvider::parse_native_response(choice);
assert!(parsed.reasoning_content.is_none());
assert_eq!(parsed.stop_reason, Some(NormalizedStopReason::EndTurn));
assert_eq!(parsed.raw_stop_reason.as_deref(), Some("stop"));
}
#[test]

View File

@ -1,7 +1,7 @@
use crate::multimodal;
use crate::providers::traits::{
ChatMessage, ChatRequest as ProviderChatRequest, ChatResponse as ProviderChatResponse,
Provider, ProviderCapabilities, TokenUsage, ToolCall as ProviderToolCall,
NormalizedStopReason, Provider, ProviderCapabilities, TokenUsage, ToolCall as ProviderToolCall,
};
use crate::tools::ToolSpec;
use async_trait::async_trait;
@ -55,6 +55,8 @@ struct ApiChatResponse {
#[derive(Debug, Deserialize)]
struct Choice {
message: ResponseMessage,
#[serde(default)]
finish_reason: Option<String>,
}
#[derive(Debug, Deserialize)]
@ -137,6 +139,8 @@ struct UsageInfo {
#[derive(Debug, Deserialize)]
struct NativeChoice {
message: NativeResponseMessage,
#[serde(default)]
finish_reason: Option<String>,
}
#[derive(Debug, Deserialize)]
@ -284,7 +288,12 @@ impl OpenRouterProvider {
MessageContent::Parts(parts)
}
fn parse_native_response(message: NativeResponseMessage) -> ProviderChatResponse {
fn parse_native_response(choice: NativeChoice) -> ProviderChatResponse {
let raw_stop_reason = choice.finish_reason;
let stop_reason = raw_stop_reason
.as_deref()
.map(NormalizedStopReason::from_openai_finish_reason);
let message = choice.message;
let reasoning_content = message.reasoning_content.clone();
let tool_calls = message
.tool_calls
@ -303,6 +312,8 @@ impl OpenRouterProvider {
usage: None,
reasoning_content,
quota_metadata: None,
stop_reason,
raw_stop_reason,
}
}
@ -487,13 +498,12 @@ impl Provider for OpenRouterProvider {
input_tokens: u.prompt_tokens,
output_tokens: u.completion_tokens,
});
let message = native_response
let choice = native_response
.choices
.into_iter()
.next()
.map(|c| c.message)
.ok_or_else(|| anyhow::anyhow!("No response from OpenRouter"))?;
let mut result = Self::parse_native_response(message);
let mut result = Self::parse_native_response(choice);
result.usage = usage;
Ok(result)
}
@ -582,13 +592,12 @@ impl Provider for OpenRouterProvider {
input_tokens: u.prompt_tokens,
output_tokens: u.completion_tokens,
});
let message = native_response
let choice = native_response
.choices
.into_iter()
.next()
.map(|c| c.message)
.ok_or_else(|| anyhow::anyhow!("No response from OpenRouter"))?;
let mut result = Self::parse_native_response(message);
let mut result = Self::parse_native_response(choice);
result.usage = usage;
Ok(result)
}
@ -828,25 +837,30 @@ mod tests {
#[test]
fn parse_native_response_converts_to_chat_response() {
let message = NativeResponseMessage {
content: Some("Here you go.".into()),
reasoning_content: None,
tool_calls: Some(vec![NativeToolCall {
id: Some("call_789".into()),
kind: Some("function".into()),
function: NativeFunctionCall {
name: "file_read".into(),
arguments: r#"{"path":"test.txt"}"#.into(),
},
}]),
let choice = NativeChoice {
message: NativeResponseMessage {
content: Some("Here you go.".into()),
reasoning_content: None,
tool_calls: Some(vec![NativeToolCall {
id: Some("call_789".into()),
kind: Some("function".into()),
function: NativeFunctionCall {
name: "file_read".into(),
arguments: r#"{"path":"test.txt"}"#.into(),
},
}]),
},
finish_reason: Some("stop".into()),
};
let response = OpenRouterProvider::parse_native_response(message);
let response = OpenRouterProvider::parse_native_response(choice);
assert_eq!(response.text.as_deref(), Some("Here you go."));
assert_eq!(response.tool_calls.len(), 1);
assert_eq!(response.tool_calls[0].id, "call_789");
assert_eq!(response.tool_calls[0].name, "file_read");
assert_eq!(response.stop_reason, Some(NormalizedStopReason::EndTurn));
assert_eq!(response.raw_stop_reason.as_deref(), Some("stop"));
}
#[test]
@ -942,32 +956,42 @@ mod tests {
#[test]
fn parse_native_response_captures_reasoning_content() {
let message = NativeResponseMessage {
content: Some("answer".into()),
reasoning_content: Some("thinking step".into()),
tool_calls: Some(vec![NativeToolCall {
id: Some("call_1".into()),
kind: Some("function".into()),
function: NativeFunctionCall {
name: "shell".into(),
arguments: "{}".into(),
},
}]),
let choice = NativeChoice {
message: NativeResponseMessage {
content: Some("answer".into()),
reasoning_content: Some("thinking step".into()),
tool_calls: Some(vec![NativeToolCall {
id: Some("call_1".into()),
kind: Some("function".into()),
function: NativeFunctionCall {
name: "shell".into(),
arguments: "{}".into(),
},
}]),
},
finish_reason: Some("length".into()),
};
let parsed = OpenRouterProvider::parse_native_response(message);
let parsed = OpenRouterProvider::parse_native_response(choice);
assert_eq!(parsed.reasoning_content.as_deref(), Some("thinking step"));
assert_eq!(parsed.tool_calls.len(), 1);
assert_eq!(parsed.stop_reason, Some(NormalizedStopReason::MaxTokens));
assert_eq!(parsed.raw_stop_reason.as_deref(), Some("length"));
}
#[test]
fn parse_native_response_none_reasoning_content_for_normal_model() {
let message = NativeResponseMessage {
content: Some("hello".into()),
reasoning_content: None,
tool_calls: None,
let choice = NativeChoice {
message: NativeResponseMessage {
content: Some("hello".into()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("stop".into()),
};
let parsed = OpenRouterProvider::parse_native_response(message);
let parsed = OpenRouterProvider::parse_native_response(choice);
assert!(parsed.reasoning_content.is_none());
assert_eq!(parsed.stop_reason, Some(NormalizedStopReason::EndTurn));
assert_eq!(parsed.raw_stop_reason.as_deref(), Some("stop"));
}
#[test]

View File

@ -1876,6 +1876,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}
}
@ -2070,6 +2072,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}
}

View File

@ -65,6 +65,69 @@ pub struct TokenUsage {
pub output_tokens: Option<u64>,
}
/// Provider-agnostic stop reasons used by the agent loop.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(tag = "kind", content = "value", rename_all = "snake_case")]
pub enum NormalizedStopReason {
EndTurn,
ToolCall,
MaxTokens,
ContextWindowExceeded,
SafetyBlocked,
Cancelled,
Unknown(String),
}
impl NormalizedStopReason {
pub fn from_openai_finish_reason(raw: &str) -> Self {
match raw.trim().to_ascii_lowercase().as_str() {
"stop" => Self::EndTurn,
"tool_calls" | "function_call" => Self::ToolCall,
"length" | "max_tokens" => Self::MaxTokens,
"content_filter" => Self::SafetyBlocked,
"cancelled" | "canceled" => Self::Cancelled,
_ => Self::Unknown(raw.trim().to_string()),
}
}
pub fn from_anthropic_stop_reason(raw: &str) -> Self {
match raw.trim().to_ascii_lowercase().as_str() {
"end_turn" | "stop_sequence" => Self::EndTurn,
"tool_use" => Self::ToolCall,
"max_tokens" => Self::MaxTokens,
"model_context_window_exceeded" => Self::ContextWindowExceeded,
"safety" => Self::SafetyBlocked,
"cancelled" | "canceled" => Self::Cancelled,
_ => Self::Unknown(raw.trim().to_string()),
}
}
pub fn from_bedrock_stop_reason(raw: &str) -> Self {
match raw.trim().to_ascii_lowercase().as_str() {
"end_turn" => Self::EndTurn,
"tool_use" => Self::ToolCall,
"max_tokens" => Self::MaxTokens,
"guardrail_intervened" => Self::SafetyBlocked,
"cancelled" | "canceled" => Self::Cancelled,
_ => Self::Unknown(raw.trim().to_string()),
}
}
pub fn from_gemini_finish_reason(raw: &str) -> Self {
match raw.trim().to_ascii_uppercase().as_str() {
"STOP" => Self::EndTurn,
"MAX_TOKENS" => Self::MaxTokens,
"MALFORMED_FUNCTION_CALL" | "UNEXPECTED_TOOL_CALL" | "TOO_MANY_TOOL_CALLS" => {
Self::ToolCall
}
"SAFETY" | "RECITATION" => Self::SafetyBlocked,
// Observed in some integrations even though not always listed in docs.
"CANCELLED" => Self::Cancelled,
_ => Self::Unknown(raw.trim().to_string()),
}
}
}
/// An LLM response that may contain text, tool calls, or both.
#[derive(Debug, Clone)]
pub struct ChatResponse {
@ -82,6 +145,10 @@ pub struct ChatResponse {
/// Quota metadata extracted from response headers (if available).
/// Populated by providers that support quota tracking.
pub quota_metadata: Option<super::quota_types::QuotaMetadata>,
/// Normalized provider stop reason (if surfaced by the upstream API).
pub stop_reason: Option<NormalizedStopReason>,
/// Raw provider-native stop reason string for diagnostics.
pub raw_stop_reason: Option<String>,
}
impl ChatResponse {
@ -376,6 +443,8 @@ pub trait Provider: Send + Sync {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
});
}
}
@ -389,6 +458,8 @@ pub trait Provider: Send + Sync {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}
@ -425,6 +496,8 @@ pub trait Provider: Send + Sync {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}
@ -555,6 +628,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
assert!(!empty.has_tool_calls());
assert_eq!(empty.text_or_empty(), "");
@ -569,6 +644,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
assert!(with_tools.has_tool_calls());
assert_eq!(with_tools.text_or_empty(), "Let me check");
@ -592,6 +669,8 @@ mod tests {
}),
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
assert_eq!(resp.usage.as_ref().unwrap().input_tokens, Some(100));
assert_eq!(resp.usage.as_ref().unwrap().output_tokens, Some(50));
@ -661,6 +740,42 @@ mod tests {
assert!(provider.supports_vision());
}
#[test]
fn normalized_stop_reason_mappings_cover_core_provider_values() {
assert_eq!(
NormalizedStopReason::from_openai_finish_reason("length"),
NormalizedStopReason::MaxTokens
);
assert_eq!(
NormalizedStopReason::from_openai_finish_reason("tool_calls"),
NormalizedStopReason::ToolCall
);
assert_eq!(
NormalizedStopReason::from_anthropic_stop_reason("model_context_window_exceeded"),
NormalizedStopReason::ContextWindowExceeded
);
assert_eq!(
NormalizedStopReason::from_bedrock_stop_reason("guardrail_intervened"),
NormalizedStopReason::SafetyBlocked
);
assert_eq!(
NormalizedStopReason::from_gemini_finish_reason("MAX_TOKENS"),
NormalizedStopReason::MaxTokens
);
assert_eq!(
NormalizedStopReason::from_gemini_finish_reason("MALFORMED_FUNCTION_CALL"),
NormalizedStopReason::ToolCall
);
assert_eq!(
NormalizedStopReason::from_gemini_finish_reason("UNEXPECTED_TOOL_CALL"),
NormalizedStopReason::ToolCall
);
assert_eq!(
NormalizedStopReason::from_gemini_finish_reason("TOO_MANY_TOOL_CALLS"),
NormalizedStopReason::ToolCall
);
}
#[test]
fn tools_payload_variants() {
// Test Gemini variant

View File

@ -881,6 +881,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
} else {
Ok(ChatResponse {
@ -893,6 +895,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}
}
@ -928,6 +932,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
})
}
}

View File

@ -936,6 +936,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
});
}
Ok(guard.remove(0))
@ -997,6 +999,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
},
// Turn 1 continued: provider sees tool result and answers
ChatResponse {
@ -1005,6 +1009,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
},
]);
@ -1092,6 +1098,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
},
ChatResponse {
text: Some("The file appears to be binary data.".into()),
@ -1099,6 +1107,8 @@ mod tests {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
},
]);

View File

@ -67,6 +67,8 @@ impl Provider for MockProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
});
}
Ok(guard.remove(0))
@ -194,6 +196,8 @@ impl Provider for RecordingProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
});
}
Ok(guard.remove(0))
@ -244,6 +248,8 @@ fn text_response(text: &str) -> ChatResponse {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}
}
@ -254,6 +260,8 @@ fn tool_response(calls: Vec<ToolCall>) -> ChatResponse {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}
}
@ -380,6 +388,8 @@ async fn e2e_xml_dispatcher_tool_call() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
},
text_response("XML tool executed"),
]));
@ -1019,6 +1029,8 @@ async fn e2e_agent_research_prompt_guided() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
});
}
Ok(guard.remove(0))
@ -1038,6 +1050,8 @@ async fn e2e_agent_research_prompt_guided() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
// Response 2: Research complete
@ -1047,6 +1061,8 @@ async fn e2e_agent_research_prompt_guided() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
// Response 3: Main turn response

View File

@ -62,6 +62,8 @@ impl Provider for MockProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
});
}
Ok(guard.remove(0))
@ -185,6 +187,8 @@ fn text_response(text: &str) -> ChatResponse {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}
}
@ -195,6 +199,8 @@ fn tool_response(calls: Vec<ToolCall>) -> ChatResponse {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}
}
@ -365,6 +371,8 @@ async fn agent_handles_empty_provider_response() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
@ -381,6 +389,8 @@ async fn agent_handles_none_text_response() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}]));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);

View File

@ -156,6 +156,8 @@ fn chat_response_text_only() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
assert_eq!(resp.text_or_empty(), "Hello world");
@ -174,6 +176,8 @@ fn chat_response_with_tool_calls() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
assert!(resp.has_tool_calls());
@ -189,6 +193,8 @@ fn chat_response_text_or_empty_handles_none() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
assert_eq!(resp.text_or_empty(), "");
@ -213,6 +219,8 @@ fn chat_response_multiple_tool_calls() {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
assert!(resp.has_tool_calls());