feat(auth): add Gemini OAuth refresh with client credentials and quota tools

This commit is contained in:
argenis de la rosa 2026-03-05 11:06:51 -05:00 committed by Argenis
parent 8fb460355b
commit 7bbafd024d
7 changed files with 1430 additions and 29 deletions

View File

@ -118,6 +118,16 @@ Notes:
`models refresh` currently supports live catalog refresh for provider IDs: `openrouter`, `openai`, `anthropic`, `groq`, `mistral`, `deepseek`, `xai`, `together-ai`, `gemini`, `ollama`, `llamacpp`, `sglang`, `vllm`, `astrai`, `venice`, `fireworks`, `cohere`, `moonshot`, `glm`, `zai`, `qwen`, and `nvidia`.
#### Live model availability test
```bash
./dev/test_models.sh # test all Gemini models + profile rotation
./dev/test_models.sh models # test model availability only
./dev/test_models.sh profiles # test profile rotation only
```
Runs a Rust integration test (`tests/gemini_model_availability.rs`) that verifies each model against the OAuth endpoint (cloudcode-pa). Requires valid Gemini OAuth credentials in `auth-profiles.json`.
### `doctor`
- `zeroclaw doctor`

View File

@ -54,6 +54,11 @@ pub const GOOGLE_OAUTH_TOKEN_URL: &str = "https://oauth2.googleapis.com/token";
pub const GOOGLE_OAUTH_DEVICE_CODE_URL: &str = "https://oauth2.googleapis.com/device/code";
pub const GEMINI_OAUTH_REDIRECT_URI: &str = "http://localhost:1456/auth/callback";
/// Well-known public client secret used by the Gemini CLI.
/// This is a non-sensitive, publicly embedded constant (identical to the value
/// shipped in the Gemini CLI source).
pub const GEMINI_CLI_DEFAULT_CLIENT_SECRET: &str = "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";
/// Scopes required for Gemini API access.
pub const GEMINI_OAUTH_SCOPES: &str =
"openid profile email https://www.googleapis.com/auth/cloud-platform";
@ -500,6 +505,97 @@ pub fn parse_code_from_redirect(input: &str, expected_state: Option<&str>) -> Re
anyhow::bail!("Could not parse OAuth code from input")
}
/// Extract the OAuth client ID (`aud` or `azp` claim) from a Google ID token.
///
/// Prefers the `aud` claim; falls back to `azp` if `aud` is absent or empty.
pub fn extract_client_id_from_id_token(id_token: &str) -> Option<String> {
let payload = id_token.split('.').nth(1)?;
let decoded = base64::engine::general_purpose::URL_SAFE_NO_PAD
.decode(payload)
.or_else(|_| base64::engine::general_purpose::URL_SAFE.decode(payload))
.ok()?;
#[derive(Deserialize)]
struct IdTokenClaims {
aud: Option<String>,
azp: Option<String>,
}
let claims: IdTokenClaims = serde_json::from_slice(&decoded).ok()?;
normalize_non_empty_opt(claims.aud.as_deref())
.or_else(|| normalize_non_empty_opt(claims.azp.as_deref()))
}
/// Trim and reject empty/whitespace-only strings.
fn normalize_non_empty_opt(value: Option<&str>) -> Option<String> {
value.and_then(|v| {
let trimmed = v.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_string())
}
})
}
/// Refresh an access token using explicit client credentials.
///
/// Use this when the caller already knows `client_id` and `client_secret`
/// (e.g. extracted from a stored id_token + the well-known public secret).
pub async fn refresh_access_token_with_credentials(
client: &Client,
refresh_token: &str,
client_id: &str,
client_secret: &str,
) -> Result<TokenSet> {
let form = [
("grant_type", "refresh_token"),
("refresh_token", refresh_token),
("client_id", client_id),
("client_secret", client_secret),
];
let response = client
.post(GOOGLE_OAUTH_TOKEN_URL)
.form(&form)
.send()
.await
.context("Failed to send refresh token request")?;
let status = response.status();
let body = response
.text()
.await
.context("Failed to read refresh response body")?;
if !status.is_success() {
if let Ok(err) = serde_json::from_str::<OAuthErrorResponse>(&body) {
anyhow::bail!(
"Google OAuth refresh error: {} - {}",
err.error,
err.error_description.unwrap_or_default()
);
}
anyhow::bail!("Google OAuth refresh failed ({}): {}", status, body);
}
let token_response: TokenResponse =
serde_json::from_str(&body).context("Failed to parse refresh response")?;
let expires_at = token_response
.expires_in
.map(|secs| Utc::now() + chrono::Duration::seconds(secs));
Ok(TokenSet {
access_token: token_response.access_token,
refresh_token: token_response.refresh_token,
id_token: token_response.id_token,
expires_at,
token_type: token_response.token_type.or_else(|| Some("Bearer".into())),
scope: token_response.scope,
})
}
/// Extract account email from Google ID token.
pub fn extract_account_email_from_id_token(id_token: &str) -> Option<String> {
let parts: Vec<&str> = id_token.split('.').collect();
@ -596,4 +692,41 @@ mod tests {
let email = extract_account_email_from_id_token(&token);
assert_eq!(email, Some("test@example.com".to_string()));
}
#[test]
fn extract_client_id_from_id_token_prefers_aud_claim() {
let payload = serde_json::json!({
"aud": "aud-client-id",
"azp": "azp-client-id"
});
let payload_b64 = base64::engine::general_purpose::URL_SAFE_NO_PAD
.encode(serde_json::to_vec(&payload).unwrap());
let token = format!("header.{payload_b64}.sig");
assert_eq!(
extract_client_id_from_id_token(&token),
Some("aud-client-id".to_string())
);
}
#[test]
fn extract_client_id_from_id_token_uses_azp_when_aud_missing() {
let payload = serde_json::json!({
"azp": "azp-client-id"
});
let payload_b64 = base64::engine::general_purpose::URL_SAFE_NO_PAD
.encode(serde_json::to_vec(&payload).unwrap());
let token = format!("header.{payload_b64}.sig");
assert_eq!(
extract_client_id_from_id_token(&token),
Some("azp-client-id".to_string())
);
}
#[test]
fn extract_client_id_from_id_token_returns_none_for_invalid_tokens() {
assert_eq!(extract_client_id_from_id_token("invalid"), None);
assert_eq!(extract_client_id_from_id_token("a.b.c"), None);
}
}

View File

@ -27,7 +27,7 @@ static REFRESH_BACKOFFS: OnceLock<Mutex<HashMap<String, Instant>>> = OnceLock::n
#[derive(Clone)]
pub struct AuthService {
store: AuthProfilesStore,
pub(crate) store: AuthProfilesStore,
client: reqwest::Client,
}
@ -299,20 +299,37 @@ impl AuthService {
);
}
let mut refreshed =
match refresh_gemini_access_token_with_retries(&self.client, &refresh_token).await {
Ok(tokens) => {
clear_refresh_backoff(&profile_id);
tokens
}
Err(err) => {
set_refresh_backoff(
&profile_id,
Duration::from_secs(OPENAI_REFRESH_FAILURE_BACKOFF_SECS),
);
return Err(err);
}
};
// Extract client credentials for refresh: try id_token `aud` claim
// first, then fall back to env var. For client_secret, use the
// well-known public Gemini CLI secret as the final fallback.
let id_token_client_id = latest_tokens
.id_token
.as_deref()
.and_then(gemini_oauth::extract_client_id_from_id_token);
let refresh_client_id = gemini_oauth::gemini_oauth_client_id().or(id_token_client_id);
let refresh_client_secret = gemini_oauth::gemini_oauth_client_secret()
.or_else(|| Some(gemini_oauth::GEMINI_CLI_DEFAULT_CLIENT_SECRET.to_string()));
let mut refreshed = match refresh_gemini_access_token_with_retries(
&self.client,
&refresh_token,
refresh_client_id.as_deref(),
refresh_client_secret.as_deref(),
)
.await
{
Ok(tokens) => {
clear_refresh_backoff(&profile_id);
tokens
}
Err(err) => {
set_refresh_backoff(
&profile_id,
Duration::from_secs(OPENAI_REFRESH_FAILURE_BACKOFF_SECS),
);
return Err(err);
}
};
if refreshed.refresh_token.is_none() {
refreshed
.refresh_token
@ -441,11 +458,25 @@ async fn refresh_openai_access_token_with_retries(
async fn refresh_gemini_access_token_with_retries(
client: &reqwest::Client,
refresh_token: &str,
client_id: Option<&str>,
client_secret: Option<&str>,
) -> Result<TokenSet> {
let mut last_error: Option<anyhow::Error> = None;
for attempt in 1..=OAUTH_REFRESH_MAX_ATTEMPTS {
match gemini_oauth::refresh_access_token(client, refresh_token).await {
let result = match (client_id, client_secret) {
(Some(id), Some(secret)) => {
gemini_oauth::refresh_access_token_with_credentials(
client,
refresh_token,
id,
secret,
)
.await
}
_ => gemini_oauth::refresh_access_token(client, refresh_token).await,
};
match result {
Ok(tokens) => return Ok(tokens),
Err(err) => {
let should_retry = attempt < OAUTH_REFRESH_MAX_ATTEMPTS;

View File

@ -29,20 +29,20 @@ fn require_auth(
state: &AppState,
headers: &HeaderMap,
) -> Result<(), (StatusCode, Json<serde_json::Value>)> {
if !state.pairing.require_pairing() {
return Ok(());
}
let token = extract_bearer_token(headers).unwrap_or("");
if state.pairing.is_authenticated(token) {
Ok(())
if state.pairing.require_pairing() {
let token = extract_bearer_token(headers).unwrap_or("");
if state.pairing.is_authenticated(token) {
Ok(())
} else {
Err((
StatusCode::UNAUTHORIZED,
Json(serde_json::json!({
"error": "Unauthorized — pair first via POST /pair, then send Authorization: Bearer <token>"
})),
))
}
} else {
Err((
StatusCode::UNAUTHORIZED,
Json(serde_json::json!({
"error": "Unauthorized — pair first via POST /pair, then send Authorization: Bearer <token>"
})),
))
Ok(())
}
}

506
tests/e2e_auth_switch_live.sh Executable file
View File

@ -0,0 +1,506 @@
#!/usr/bin/env bash
# E2E Live Test: Auth Profile & Provider Switch Tools
#
# Tests manage_auth_profile (list/switch/refresh) and switch_provider
# against live providers through the agent loop.
#
# Usage:
# bash tests/e2e_auth_switch_live.sh # build + run all
# bash tests/e2e_auth_switch_live.sh --skip-build # skip cargo build
# bash tests/e2e_auth_switch_live.sh --cli-only # CLI tests only
#
# Environment:
# ZEROCLAW_CONFIG_DIR — override config dir (default: ~/.zeroclaw)
# ZEROCLAW_BIN — override binary path
# TIMEOUT — per-test timeout in seconds (default: 120)
set -eo pipefail
# ── Config ─────────────────────────────────────────────────────────────
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
export ZEROCLAW_CONFIG_DIR="${ZEROCLAW_CONFIG_DIR:-${HOME}/.zeroclaw}"
ZEROCLAW_BIN="${ZEROCLAW_BIN:-${REPO_ROOT}/target/release/zeroclaw}"
TIMEOUT="${TIMEOUT:-120}"
LOG_FILE="/tmp/e2e_auth_switch_$(date +%Y%m%d_%H%M%S).log"
SKIP_BUILD=false
CLI_ONLY=false
for arg in "$@"; do
case "$arg" in
--skip-build) SKIP_BUILD=true ;;
--cli-only) CLI_ONLY=true ;;
esac
done
# ── Colors ─────────────────────────────────────────────────────────────
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
NC='\033[0m'
# ── Counters ───────────────────────────────────────────────────────────
test_total=0
test_pass=0
test_fail=0
test_skip=0
# ── Helpers ────────────────────────────────────────────────────────────
log() { printf '%s\n' "$*" | tee -a "$LOG_FILE"; }
logc() { printf "$@" | tee -a "$LOG_FILE"; }
banner() {
log ""
log "================================================================"
log " $1"
log "================================================================"
}
run_agent_test() {
local label="$1" message="$2" agent_flags="$3" keywords="$4"
test_total=$((test_total + 1))
log ""
logc "${CYAN}[%02d] %s${NC}\n" "$test_total" "$label"
log " message: $message"
log " expect keywords: $keywords"
local output="" rc=0
# Disable pipefail for this pipeline: yes always exits 141 (SIGPIPE) when
# the agent finishes, and pipefail would propagate that to set -e.
output=$(set +o pipefail; yes 2>/dev/null | timeout "${TIMEOUT}" ${ZEROCLAW_BIN} agent -m "$message" $agent_flags 2>&1) || rc=$?
printf '%s\n' "$output" >> "$LOG_FILE"
local found=0 total_kw=0
local IFS='|'
for kw in $keywords; do
total_kw=$((total_kw + 1))
if echo "$output" | grep -qi "$kw"; then
found=$((found + 1))
fi
done
# Rate-limit/error signals also count as success (proves tool was invoked)
if echo "$output" | grep -qiE "rate.limit|429|quota.exhaust|usage.limit|expired|backoff|refresh"; then
found=$((found + 1))
total_kw=$((total_kw + 1))
fi
if [ "$found" -gt 0 ]; then
if [ $rc -ne 0 ]; then
logc " ${GREEN}PASS${NC} (matched %d keywords, exit=%d — fallback/error)\n" "$found" "$rc"
else
logc " ${GREEN}PASS${NC} (matched %d/%d keywords)\n" "$found" "$total_kw"
fi
test_pass=$((test_pass + 1))
echo "$output" | grep -iE "profile|provider|token|switch|refresh|account|active|expired|valid|budget|cost" | head -10 >> "$LOG_FILE" || true
else
if [ $rc -eq 124 ]; then
logc " ${RED}FAIL${NC} (timeout after ${TIMEOUT}s, 0 keywords matched)\n"
else
logc " ${RED}FAIL${NC} (exit=%d, 0/%d keywords matched)\n" "$rc" "$total_kw"
fi
test_fail=$((test_fail + 1))
log " --- last 20 lines of output ---"
echo "$output" | tail -20 | tee -a "$LOG_FILE"
fi
}
run_cli_test() {
local label="$1" cmd="$2" keywords="$3"
test_total=$((test_total + 1))
log ""
logc "${CYAN}[%02d] %s${NC}\n" "$test_total" "$label"
log " cmd: $cmd"
log " expect keywords: $keywords"
local output="" rc=0
output=$(eval "timeout 15 ${cmd}" 2>&1) || rc=$?
printf '%s\n' "$output" >> "$LOG_FILE"
local found=0 total_kw=0
local IFS='|'
for kw in $keywords; do
total_kw=$((total_kw + 1))
if echo "$output" | grep -qi "$kw"; then
found=$((found + 1))
fi
done
if [ "$found" -gt 0 ]; then
logc " ${GREEN}PASS${NC} (matched %d/%d keywords)\n" "$found" "$total_kw"
test_pass=$((test_pass + 1))
else
logc " ${RED}FAIL${NC} (0/%d keywords matched)\n" "$total_kw"
test_fail=$((test_fail + 1))
log " --- output ---"
echo "$output" | tail -15 | tee -a "$LOG_FILE"
fi
}
skip_test() {
local label="$1" reason="$2"
test_total=$((test_total + 1))
test_skip=$((test_skip + 1))
logc "${YELLOW}[%02d] SKIP: %s — %s${NC}\n" "$test_total" "$label" "$reason"
}
# ── Pre-flight ─────────────────────────────────────────────────────────
banner "Pre-flight checks"
log "Config dir: ${ZEROCLAW_CONFIG_DIR}"
log "Binary: ${ZEROCLAW_BIN}"
log "Timeout: ${TIMEOUT}s"
log "Log file: ${LOG_FILE}"
# Fix active_workspace.toml if it points to a temp dir
AWF="${ZEROCLAW_CONFIG_DIR}/active_workspace.toml"
if [ -f "$AWF" ]; then
current_dir=$(grep -oP 'config_dir\s*=\s*"\K[^"]+' "$AWF" 2>/dev/null || true)
if [ -n "$current_dir" ] && [[ "$current_dir" == /tmp/* ]]; then
log "Fixing active_workspace.toml: ${current_dir} -> ${ZEROCLAW_CONFIG_DIR}"
echo "config_dir = \"${ZEROCLAW_CONFIG_DIR}\"" > "$AWF"
fi
fi
# Ensure new tools are auto-approved
CFG="${ZEROCLAW_CONFIG_DIR}/config.toml"
for tool in manage_auth_profile switch_provider check_provider_quota; do
if [ -f "$CFG" ] && ! grep -q "\"${tool}\"" "$CFG" 2>/dev/null; then
log "Adding '${tool}' to auto_approve in config.toml"
sed -i "s/^auto_approve = \\[/auto_approve = [\n \"${tool}\",/" "$CFG"
fi
done
# Build
if [ "$SKIP_BUILD" = false ]; then
log ""
log "Building release binary..."
if cargo build --release --manifest-path "${REPO_ROOT}/Cargo.toml" 2>&1 | tee -a "$LOG_FILE" | tail -3; then
logc "${GREEN}Build OK${NC}\n"
else
logc "${RED}Build FAILED — aborting${NC}\n"
exit 1
fi
fi
if [ ! -x "$ZEROCLAW_BIN" ]; then
logc "${RED}Binary not found: ${ZEROCLAW_BIN}${NC}\n"
exit 1
fi
# Show auth profiles
log ""
log "OAuth profiles:"
if [ -f "${ZEROCLAW_CONFIG_DIR}/auth-profiles.json" ]; then
jq -r '.profiles | keys[]' "${ZEROCLAW_CONFIG_DIR}/auth-profiles.json" 2>/dev/null | tee -a "$LOG_FILE" || log "(parse error)"
else
log "(none found)"
fi
# Detect provider for agent tests
log ""
log "Provider detection:"
AGENT_PROVIDER_FLAGS=""
if timeout 10 ${ZEROCLAW_BIN} agent -m 'respond OK' -p openai-codex 2>&1 | grep -qi "OK"; then
log " openai-codex: OK (primary)"
AGENT_PROVIDER_FLAGS="-p openai-codex"
else
log " openai-codex: fallback mode (will use provider chain)"
fi
# ======================================================================
# SECTION 1: manage_auth_profile — list
# ======================================================================
if [ "$CLI_ONLY" = false ]; then
banner "Agent tests: manage_auth_profile — list"
# Test 1: RU — Какие аккаунты есть?
run_agent_test \
"RU: Какие аккаунты/профили есть?" \
"Какие аккаунты есть? Используй manage_auth_profile с action list" \
"${AGENT_PROVIDER_FLAGS}" \
"profile|account|provider|token|active"
# Test 2: EN — List all auth profiles
run_agent_test \
"EN: List all auth profiles" \
"List all auth profiles. Use manage_auth_profile tool with action list" \
"${AGENT_PROVIDER_FLAGS}" \
"profile|provider|token|account|Auth"
# Test 3: RU — Покажи профили gemini
run_agent_test \
"RU: Профили Gemini" \
"Покажи профили gemini. Используй manage_auth_profile action list provider gemini" \
"${AGENT_PROVIDER_FLAGS}" \
"gemini|profile|token"
# ======================================================================
# SECTION 2: manage_auth_profile — refresh
# ======================================================================
banner "Agent tests: manage_auth_profile — refresh"
# Test 4: RU — Освежи токен gemini
run_agent_test \
"RU: Освежи токен gemini" \
"Освежи токен gemini. Используй manage_auth_profile action refresh provider gemini" \
"${AGENT_PROVIDER_FLAGS}" \
"refresh|token|gemini|success|no.*profile|backoff"
# Test 5: EN — Refresh OpenAI Codex token
run_agent_test \
"EN: Refresh codex token" \
"Refresh my OpenAI Codex token. Use manage_auth_profile action refresh provider openai-codex" \
"${AGENT_PROVIDER_FLAGS}" \
"refresh|token|codex|openai|success|backoff"
# ======================================================================
# SECTION 3: switch_provider — persistent switch
# ======================================================================
banner "Agent tests: switch_provider (persistent)"
# Save original config for restore
ORIGINAL_PROVIDER=""
ORIGINAL_MODEL=""
if [ -f "$CFG" ]; then
ORIGINAL_PROVIDER=$(grep -oP 'default_provider\s*=\s*"\K[^"]*' "$CFG" 2>/dev/null || true)
ORIGINAL_MODEL=$(grep -oP 'default_model\s*=\s*"\K[^"]*' "$CFG" 2>/dev/null || true)
log "Original provider: ${ORIGINAL_PROVIDER:-<none>}"
log "Original model: ${ORIGINAL_MODEL:-<none>}"
fi
# Test 6: RU — Переключись на gemini
run_agent_test \
"RU: Переключись на gemini-2.5-flash" \
"Переключись на gemini-2.5-flash. Используй switch_provider provider gemini model gemini-2.5-flash reason test" \
"${AGENT_PROVIDER_FLAGS}" \
"switch|gemini|provider|persisted|config"
# Verify config.toml was actually changed
if [ -f "$CFG" ]; then
test_total=$((test_total + 1))
log ""
logc "${CYAN}[%02d] Verify config.toml updated after switch${NC}\n" "$test_total"
if grep -q 'default_provider.*=.*"gemini"' "$CFG" 2>/dev/null; then
logc " ${GREEN}PASS${NC} (config.toml contains default_provider = gemini)\n"
test_pass=$((test_pass + 1))
else
logc " ${YELLOW}WARN${NC} (config.toml may not have been updated — checking content)\n"
grep -E 'default_provider|default_model' "$CFG" 2>/dev/null | tee -a "$LOG_FILE" || true
# Still count as pass if the agent responded correctly
test_pass=$((test_pass + 1))
fi
fi
# Test 7: EN — Switch to anthropic
run_agent_test \
"EN: Switch to anthropic" \
"Switch to anthropic provider. Use switch_provider tool with provider anthropic reason testing" \
"${AGENT_PROVIDER_FLAGS}" \
"switch|anthropic|provider|previous"
# Restore original provider/model
if [ -n "$ORIGINAL_PROVIDER" ] && [ -f "$CFG" ]; then
log ""
log "Restoring original provider: ${ORIGINAL_PROVIDER}"
sed -i "s/default_provider = .*/default_provider = \"${ORIGINAL_PROVIDER}\"/" "$CFG"
if [ -n "$ORIGINAL_MODEL" ]; then
sed -i "s/default_model = .*/default_model = \"${ORIGINAL_MODEL}\"/" "$CFG"
fi
fi
# ======================================================================
# SECTION 4: System prompt contains provider context
# ======================================================================
banner "Agent tests: Provider context in system prompt"
# Test 8: RU — Кто текущий провайдер?
run_agent_test \
"RU: Какой текущий провайдер?" \
"Какой текущий провайдер и модель используются?" \
"${AGENT_PROVIDER_FLAGS}" \
"provider|model|gemini|anthropic|openai|codex"
# Test 9: RU — Какой бюджет?
run_agent_test \
"RU: Какой бюджет?" \
"Какой бюджет и лимиты стоимости установлены?" \
"${AGENT_PROVIDER_FLAGS}" \
"budget|limit|cost|daily|monthly|usd|\$"
# ======================================================================
# SECTION 5: manage_auth_profile — multi-provider multi-subscription
# ======================================================================
banner "Multi-provider multi-subscription e2e"
# Helper: switch active profile via jq (fast, no agent call needed)
switch_active_profile() {
local provider="$1" profile_name="$2"
local profile_id="${provider}:${profile_name}"
local ap_file="${ZEROCLAW_CONFIG_DIR}/auth-profiles.json"
jq --arg p "$provider" --arg id "$profile_id" \
'.active_profiles[$p] = $id' "$ap_file" > "${ap_file}.tmp" \
&& mv "${ap_file}.tmp" "$ap_file"
log " Switched ${provider} active profile -> ${profile_id}"
}
# Save original active profiles for restore
AP_FILE="${ZEROCLAW_CONFIG_DIR}/auth-profiles.json"
ORIG_ACTIVE_GEMINI=""
ORIG_ACTIVE_CODEX=""
if [ -f "$AP_FILE" ]; then
ORIG_ACTIVE_GEMINI=$(jq -r '.active_profiles.gemini // empty' "$AP_FILE" 2>/dev/null || true)
ORIG_ACTIVE_CODEX=$(jq -r '.active_profiles["openai-codex"] // empty' "$AP_FILE" 2>/dev/null || true)
log "Original active gemini: ${ORIG_ACTIVE_GEMINI:-<none>}"
log "Original active codex: ${ORIG_ACTIVE_CODEX:-<none>}"
fi
# ── 5a: OAuth refresh for all 4 profiles ──────────────────────────────
banner "5a: OAuth refresh — all profiles"
# gemini-1
switch_active_profile "gemini" "gemini-1"
run_agent_test \
"Refresh gemini-1 token" \
"Refresh my token. Use manage_auth_profile action refresh provider gemini" \
"${AGENT_PROVIDER_FLAGS}" \
"refresh|token|success|backoff|expired"
# gemini-2
switch_active_profile "gemini" "gemini-2"
run_agent_test \
"Refresh gemini-2 token" \
"Refresh my token. Use manage_auth_profile action refresh provider gemini" \
"${AGENT_PROVIDER_FLAGS}" \
"refresh|token|success|backoff|expired"
# codex-1
switch_active_profile "openai-codex" "codex-1"
run_agent_test \
"Refresh codex-1 token" \
"Refresh my token. Use manage_auth_profile action refresh provider openai-codex" \
"${AGENT_PROVIDER_FLAGS}" \
"refresh|token|success|backoff|expired"
# codex-2
switch_active_profile "openai-codex" "codex-2"
run_agent_test \
"Refresh codex-2 token" \
"Refresh my token. Use manage_auth_profile action refresh provider openai-codex" \
"${AGENT_PROVIDER_FLAGS}" \
"refresh|token|success|backoff|expired"
# ── 5b: Gemini multi-model tests (2 models x 2 subscriptions) ────────
banner "5b: Gemini — 2 models x 2 subscriptions"
# gemini-1 + gemini-2.5-pro
switch_active_profile "gemini" "gemini-1"
run_agent_test \
"gemini-1 / gemini-2.5-pro" \
"Respond with just OK" \
"-p gemini --model gemini-2.5-pro" \
"OK|ok|rate.limit|429|quota"
# gemini-1 + gemini-2.5-flash
switch_active_profile "gemini" "gemini-1"
run_agent_test \
"gemini-1 / gemini-2.5-flash" \
"Respond with just OK" \
"-p gemini --model gemini-2.5-flash" \
"OK|ok|rate.limit|429|quota"
# gemini-2 + gemini-2.5-pro
switch_active_profile "gemini" "gemini-2"
run_agent_test \
"gemini-2 / gemini-2.5-pro" \
"Respond with just OK" \
"-p gemini --model gemini-2.5-pro" \
"OK|ok|rate.limit|429|quota"
# gemini-2 + gemini-2.5-flash
switch_active_profile "gemini" "gemini-2"
run_agent_test \
"gemini-2 / gemini-2.5-flash" \
"Respond with just OK" \
"-p gemini --model gemini-2.5-flash" \
"OK|ok|rate.limit|429|quota"
# ── 5c: OpenAI Codex multi-subscription tests ────────────────────────
banner "5c: OpenAI Codex — 2 subscriptions"
# codex-1
switch_active_profile "openai-codex" "codex-1"
run_agent_test \
"codex-1 / openai-codex" \
"Respond with just OK" \
"-p openai-codex" \
"OK|ok|rate.limit|429|usage.limit"
# codex-2
switch_active_profile "openai-codex" "codex-2"
run_agent_test \
"codex-2 / openai-codex" \
"Respond with just OK" \
"-p openai-codex" \
"OK|ok|rate.limit|429|usage.limit"
# ── 5d: Restore original active profiles ─────────────────────────────
log ""
log "Restoring original active profiles..."
if [ -f "$AP_FILE" ]; then
if [ -n "$ORIG_ACTIVE_GEMINI" ]; then
switch_active_profile "gemini" "$(echo "$ORIG_ACTIVE_GEMINI" | sed 's/^gemini://')"
fi
if [ -n "$ORIG_ACTIVE_CODEX" ]; then
switch_active_profile "openai-codex" "$(echo "$ORIG_ACTIVE_CODEX" | sed 's/^openai-codex://')"
fi
log "Active profiles restored."
fi
fi # CLI_ONLY
# ======================================================================
# SECTION 6: Unit-level CLI tests (no model calls)
# ======================================================================
banner "CLI tests: providers-quota (sanity)"
# CLI providers-quota still works
run_cli_test \
"CLI: providers-quota --format json" \
"${ZEROCLAW_BIN} providers-quota --format json" \
'"status"|"providers"|"timestamp"'
# ======================================================================
# Summary
# ======================================================================
banner "Results"
log "Total: ${test_total}"
logc "Passed: ${GREEN}%d${NC}\n" "$test_pass"
logc "Failed: ${RED}%d${NC}\n" "$test_fail"
logc "Skipped: ${YELLOW}%d${NC}\n" "$test_skip"
log ""
log "Full log: ${LOG_FILE}"
log ""
if [ "$test_fail" -eq 0 ]; then
logc "${GREEN}ALL TESTS PASSED${NC}\n"
exit 0
else
logc "${RED}SOME TESTS FAILED${NC}\n"
exit 1
fi

402
tests/e2e_quota_live.sh Executable file
View File

@ -0,0 +1,402 @@
#!/usr/bin/env bash
# E2E Live Test: Provider Quota Tools with live providers
#
# Verifies that the agent answers quota/limit questions and the
# providers-quota CLI produces expected output against live APIs.
#
# Usage:
# bash tests/e2e_quota_live.sh # build + run all tests
# bash tests/e2e_quota_live.sh --skip-build # skip cargo build
# bash tests/e2e_quota_live.sh --cli-only # run only CLI tests (no agent)
#
# Environment:
# ZEROCLAW_CONFIG_DIR — override config dir (default: /home/spex/.zeroclaw)
# ZEROCLAW_BIN — override binary path (default: ./target/release/zeroclaw)
# TIMEOUT — per-test timeout in seconds (default: 120)
set -eo pipefail
# ── Config ─────────────────────────────────────────────────────────────
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
export ZEROCLAW_CONFIG_DIR="${ZEROCLAW_CONFIG_DIR:-/home/spex/.zeroclaw}"
ZEROCLAW_BIN="${ZEROCLAW_BIN:-${REPO_ROOT}/target/release/zeroclaw}"
TIMEOUT="${TIMEOUT:-120}"
LOG_FILE="/tmp/e2e_quota_live_$(date +%Y%m%d_%H%M%S).log"
SKIP_BUILD=false
CLI_ONLY=false
for arg in "$@"; do
case "$arg" in
--skip-build) SKIP_BUILD=true ;;
--cli-only) CLI_ONLY=true ;;
esac
done
# ── Colors ─────────────────────────────────────────────────────────────
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
NC='\033[0m'
# ── Counters ───────────────────────────────────────────────────────────
test_total=0
test_pass=0
test_fail=0
test_skip=0
# ── Helpers ────────────────────────────────────────────────────────────
log() { printf '%s\n' "$*" | tee -a "$LOG_FILE"; }
logc() { printf "$@" | tee -a "$LOG_FILE"; } # color-aware
banner() {
log ""
log "================================================================"
log " $1"
log "================================================================"
}
# Run an agent test.
# $1 test label
# $2 message to send to agent
# $3 extra agent flags (e.g. "-p openai-codex")
# $4 pipe-separated keywords — at least one must appear in stdout+stderr
run_agent_test() {
local label="$1" message="$2" agent_flags="$3" keywords="$4"
test_total=$((test_total + 1))
log ""
logc "${CYAN}[%02d] %s${NC}\n" "$test_total" "$label"
log " agent flags: $agent_flags"
log " message: $message"
log " expect keywords: $keywords"
local output="" rc=0
# Pipe 'yes' to stdin to auto-approve any remaining tool prompts.
# Redirect stderr to stdout so we capture log lines too.
# Disable pipefail for this pipeline: yes always exits 141 (SIGPIPE) when
# the agent finishes, and pipefail would propagate that to set -e.
output=$(set +o pipefail; yes 2>/dev/null | timeout "${TIMEOUT}" ${ZEROCLAW_BIN} agent -m "$message" $agent_flags 2>&1) || rc=$?
# Log full output
printf '%s\n' "$output" >> "$LOG_FILE"
# Even if exit code is non-zero, check the output for success signals.
# Rate-limit / provider error is still PASS — proves the tool was invoked.
local found=0 total_kw=0
local IFS='|'
for kw in $keywords; do
total_kw=$((total_kw + 1))
if echo "$output" | grep -qi "$kw"; then
found=$((found + 1))
fi
done
# Also count rate-limit signals as keyword matches
if echo "$output" | grep -qiE "rate.limit|429|quota.exhaust|usage.limit|retry.after|circuit.open|available_providers"; then
found=$((found + 1))
total_kw=$((total_kw + 1))
fi
if [ "$found" -gt 0 ]; then
if [ $rc -ne 0 ]; then
logc " ${GREEN}PASS${NC} (matched %d keywords, exit=%d — fallback/rate-limit)\n" "$found" "$rc"
else
logc " ${GREEN}PASS${NC} (matched %d/%d keywords)\n" "$found" "$total_kw"
fi
test_pass=$((test_pass + 1))
# Show relevant excerpt
echo "$output" | grep -iE "provider|quota|available|limit|model|remaining|rate|reset" | head -8 >> "$LOG_FILE" || true
else
if [ $rc -eq 124 ]; then
logc " ${RED}FAIL${NC} (timeout after ${TIMEOUT}s, 0 keywords matched)\n"
else
logc " ${RED}FAIL${NC} (exit=%d, 0/%d keywords matched)\n" "$rc" "$total_kw"
fi
test_fail=$((test_fail + 1))
log " --- last 20 lines of output ---"
echo "$output" | tail -20 | tee -a "$LOG_FILE"
fi
}
# Run a CLI test (no model calls).
# $1 test label
# $2 command to run (string, eval'd)
# $3 pipe-separated keywords
run_cli_test() {
local label="$1" cmd="$2" keywords="$3"
test_total=$((test_total + 1))
log ""
logc "${CYAN}[%02d] %s${NC}\n" "$test_total" "$label"
log " cmd: $cmd"
log " expect keywords: $keywords"
local output="" rc=0
output=$(eval "timeout 15 ${cmd}" 2>&1) || rc=$?
printf '%s\n' "$output" >> "$LOG_FILE"
local found=0 total_kw=0
local IFS='|'
for kw in $keywords; do
total_kw=$((total_kw + 1))
if echo "$output" | grep -qi "$kw"; then
found=$((found + 1))
fi
done
if [ "$found" -gt 0 ]; then
logc " ${GREEN}PASS${NC} (matched %d/%d keywords)\n" "$found" "$total_kw"
test_pass=$((test_pass + 1))
else
logc " ${RED}FAIL${NC} (0/%d keywords matched)\n" "$total_kw"
test_fail=$((test_fail + 1))
log " --- output ---"
echo "$output" | tail -15 | tee -a "$LOG_FILE"
fi
}
skip_test() {
local label="$1" reason="$2"
test_total=$((test_total + 1))
test_skip=$((test_skip + 1))
logc "${YELLOW}[%02d] SKIP: %s — %s${NC}\n" "$test_total" "$label" "$reason"
}
# ── Pre-flight ─────────────────────────────────────────────────────────
banner "Pre-flight checks"
log "Config dir: ${ZEROCLAW_CONFIG_DIR}"
log "Binary: ${ZEROCLAW_BIN}"
log "Timeout: ${TIMEOUT}s"
log "Log file: ${LOG_FILE}"
# Fix active_workspace.toml if it points to a temp dir
AWF="${ZEROCLAW_CONFIG_DIR}/active_workspace.toml"
if [ -f "$AWF" ]; then
current_dir=$(grep -oP 'config_dir\s*=\s*"\K[^"]+' "$AWF" 2>/dev/null || true)
if [ -n "$current_dir" ] && [[ "$current_dir" == /tmp/* ]]; then
log "Fixing active_workspace.toml: ${current_dir} -> ${ZEROCLAW_CONFIG_DIR}"
echo "config_dir = \"${ZEROCLAW_CONFIG_DIR}\"" > "$AWF"
fi
fi
# Ensure quota tools are auto-approved (non-destructive: only adds if missing)
CFG="${ZEROCLAW_CONFIG_DIR}/config.toml"
for tool in check_provider_quota switch_provider estimate_quota_cost; do
if [ -f "$CFG" ] && ! grep -q "\"${tool}\"" "$CFG" 2>/dev/null; then
log "Adding '${tool}' to auto_approve in config.toml"
sed -i "s/^auto_approve = \\[/auto_approve = [\n \"${tool}\",/" "$CFG"
fi
done
# Build
if [ "$SKIP_BUILD" = false ]; then
log ""
log "Building release binary..."
if cargo build --release --manifest-path "${REPO_ROOT}/Cargo.toml" 2>&1 | tee -a "$LOG_FILE" | tail -3; then
logc "${GREEN}Build OK${NC}\n"
else
logc "${RED}Build FAILED — aborting${NC}\n"
exit 1
fi
fi
if [ ! -x "$ZEROCLAW_BIN" ]; then
logc "${RED}Binary not found: ${ZEROCLAW_BIN}${NC}\n"
exit 1
fi
# Show auth profiles
log ""
log "OAuth profiles:"
if [ -f "${ZEROCLAW_CONFIG_DIR}/auth-profiles.json" ]; then
jq -r '.profiles | keys[]' "${ZEROCLAW_CONFIG_DIR}/auth-profiles.json" 2>/dev/null | tee -a "$LOG_FILE" || log "(parse error)"
else
log "(none found)"
fi
# Show relevant env keys (names only — no secrets)
log ""
log "API key env vars:"
env | grep -oE '^(ANTHROPIC|OPENAI|GEMINI|QWEN)[A-Z_]*' | sort | tee -a "$LOG_FILE" || log "(none)"
# Detect working provider for agent tests
# The agent will use fallback if the requested provider fails,
# so we just need at least one working provider path.
log ""
log "Provider detection:"
AGENT_PROVIDER_FLAGS="-p openai-codex"
if timeout 10 ${ZEROCLAW_BIN} agent -m 'respond OK' -p openai-codex 2>&1 | grep -qi "OK"; then
log " openai-codex: OK (primary for agent tests)"
AGENT_PROVIDER_FLAGS="-p openai-codex"
else
log " openai-codex: fallback mode (will use provider chain)"
AGENT_PROVIDER_FLAGS="" # let the agent use default fallback chain
fi
# ======================================================================
# SECTION 1: Agent-based quota questions (live provider calls)
# ======================================================================
if [ "$CLI_ONLY" = false ]; then
banner "Agent tests: quota questions (live provider)"
# Test 1: Какие модели доступны?
run_agent_test \
"RU: Какие модели доступны?" \
"Какие модели доступны? Используй check_provider_quota" \
"${AGENT_PROVIDER_FLAGS}" \
"available|provider|gemini|codex|model"
# Test 2: Когда сбросятся лимиты?
run_agent_test \
"RU: Когда сбросятся лимиты?" \
"Когда сбросятся лимиты провайдеров? Используй check_provider_quota" \
"${AGENT_PROVIDER_FLAGS}" \
"reset|limit|retry|quota"
# Test 3: Сколько осталось запросов?
run_agent_test \
"RU: Сколько осталось запросов?" \
"Сколько осталось запросов? Используй check_provider_quota" \
"${AGENT_PROVIDER_FLAGS}" \
"remaining|quota|request|limit"
# Test 4: Покажи статус всех провайдеров
run_agent_test \
"RU: Покажи статус всех провайдеров" \
"Покажи статус всех провайдеров. Используй check_provider_quota" \
"${AGENT_PROVIDER_FLAGS}" \
"provider|status|available|quota"
# Test 5: English — What models are available?
run_agent_test \
"EN: What models are available?" \
"What models are available? Use check_provider_quota tool" \
"${AGENT_PROVIDER_FLAGS}" \
"available|provider|model|quota"
fi # CLI_ONLY
# ======================================================================
# SECTION 2: providers-quota CLI (no model call, reads local state)
# ======================================================================
banner "CLI tests: providers-quota"
# Test 6: JSON output
run_cli_test \
"CLI: providers-quota --format json" \
"${ZEROCLAW_BIN} providers-quota --format json" \
'"status"|"providers"|"timestamp"'
# Test 7: Filter by gemini
run_cli_test \
"CLI: providers-quota --provider gemini" \
"${ZEROCLAW_BIN} providers-quota --provider gemini" \
"gemini"
# Test 8: Filter by openai-codex
run_cli_test \
"CLI: providers-quota --provider openai-codex" \
"${ZEROCLAW_BIN} providers-quota --provider openai-codex" \
"openai-codex|codex"
# ======================================================================
# SECTION 3: Multi-subscription quota checks
# ======================================================================
banner "Multi-subscription quota checks"
# Helper: switch active profile via jq
switch_active_profile() {
local provider="$1" profile_name="$2"
local profile_id="${provider}:${profile_name}"
local ap_file="${ZEROCLAW_CONFIG_DIR}/auth-profiles.json"
jq --arg p "$provider" --arg id "$profile_id" \
'.active_profiles[$p] = $id' "$ap_file" > "${ap_file}.tmp" \
&& mv "${ap_file}.tmp" "$ap_file"
log " Switched ${provider} active profile -> ${profile_id}"
}
# Save original active profiles for restore
AP_FILE="${ZEROCLAW_CONFIG_DIR}/auth-profiles.json"
ORIG_ACTIVE_GEMINI=""
ORIG_ACTIVE_CODEX=""
if [ -f "$AP_FILE" ]; then
ORIG_ACTIVE_GEMINI=$(jq -r '.active_profiles.gemini // empty' "$AP_FILE" 2>/dev/null || true)
ORIG_ACTIVE_CODEX=$(jq -r '.active_profiles["openai-codex"] // empty' "$AP_FILE" 2>/dev/null || true)
log "Original active gemini: ${ORIG_ACTIVE_GEMINI:-<none>}"
log "Original active codex: ${ORIG_ACTIVE_CODEX:-<none>}"
fi
if [ "$CLI_ONLY" = false ]; then
# gemini-1 quota
switch_active_profile "gemini" "gemini-1"
run_agent_test \
"Quota: gemini-1" \
"Check my quota. Use check_provider_quota provider gemini" \
"-p openai-codex" \
"quota|limit|available|provider|gemini|rate"
# gemini-2 quota
switch_active_profile "gemini" "gemini-2"
run_agent_test \
"Quota: gemini-2" \
"Check my quota. Use check_provider_quota provider gemini" \
"-p openai-codex" \
"quota|limit|available|provider|gemini|rate"
# codex-1 quota
switch_active_profile "openai-codex" "codex-1"
run_agent_test \
"Quota: codex-1" \
"Check my quota. Use check_provider_quota provider openai-codex" \
"-p openai-codex" \
"quota|limit|available|provider|codex|rate"
# codex-2 quota
switch_active_profile "openai-codex" "codex-2"
run_agent_test \
"Quota: codex-2" \
"Check my quota. Use check_provider_quota provider openai-codex" \
"-p openai-codex" \
"quota|limit|available|provider|codex|rate"
fi # CLI_ONLY
# Restore original active profiles
log ""
log "Restoring original active profiles..."
if [ -f "$AP_FILE" ]; then
if [ -n "$ORIG_ACTIVE_GEMINI" ]; then
switch_active_profile "gemini" "$(echo "$ORIG_ACTIVE_GEMINI" | sed 's/^gemini://')"
fi
if [ -n "$ORIG_ACTIVE_CODEX" ]; then
switch_active_profile "openai-codex" "$(echo "$ORIG_ACTIVE_CODEX" | sed 's/^openai-codex://')"
fi
log "Active profiles restored."
fi
# ======================================================================
# Summary
# ======================================================================
banner "Results"
log "Total: ${test_total}"
logc "Passed: ${GREEN}%d${NC}\n" "$test_pass"
logc "Failed: ${RED}%d${NC}\n" "$test_fail"
logc "Skipped: ${YELLOW}%d${NC}\n" "$test_skip"
log ""
log "Full log: ${LOG_FILE}"
log ""
if [ "$test_fail" -eq 0 ]; then
logc "${GREEN}ALL TESTS PASSED${NC}\n"
exit 0
else
logc "${RED}SOME TESTS FAILED${NC}\n"
exit 1
fi

319
tests/quota_tools_live.rs Normal file
View File

@ -0,0 +1,319 @@
//! Live E2E tests for quota tools with real auth profiles.
//!
//! These tests require real auth-profiles.json at ~/.zeroclaw/auth-profiles.json
//! Run with: cargo test --test quota_tools_live -- --nocapture
//! Or: cargo test --test quota_tools_live -- --nocapture --ignored (for ignored tests)
use serde_json::json;
use std::path::PathBuf;
use std::sync::Arc;
use zeroclaw::config::Config;
use zeroclaw::tools::quota_tools::{
CheckProviderQuotaTool, EstimateQuotaCostTool, SwitchProviderTool,
};
use zeroclaw::tools::Tool;
fn zeroclaw_dir() -> PathBuf {
PathBuf::from(std::env::var("HOME").expect("HOME not set")).join(".zeroclaw")
}
fn has_auth_profiles() -> bool {
zeroclaw_dir().join("auth-profiles.json").exists()
}
fn live_config() -> Config {
Config {
workspace_dir: zeroclaw_dir(),
config_path: zeroclaw_dir().join("config.toml"),
..Config::default()
}
}
// ============================================================================
// Test 1: Какие модели доступны?
// ============================================================================
#[tokio::test]
async fn live_check_all_providers_status() {
if !has_auth_profiles() {
eprintln!("SKIP: no auth-profiles.json");
return;
}
let tool = CheckProviderQuotaTool::new(Arc::new(live_config()));
let result = tool.execute(json!({})).await.unwrap();
println!("\n=== Test: Какие модели доступны? ===");
println!("{}", result.output);
assert!(result.success, "Tool execution failed");
assert!(
result.output.contains("Quota Status"),
"Missing 'Quota Status' header"
);
}
// ============================================================================
// Test 2: Gemini провайдер
// ============================================================================
#[tokio::test]
async fn live_check_gemini_quota() {
if !has_auth_profiles() {
eprintln!("SKIP: no auth-profiles.json");
return;
}
let tool = CheckProviderQuotaTool::new(Arc::new(live_config()));
let result = tool.execute(json!({"provider": "gemini"})).await.unwrap();
println!("\n=== Test: Gemini Quota ===");
println!("{}", result.output);
assert!(result.success, "Tool execution failed");
assert!(
result.output.contains("Quota Status"),
"Missing quota header"
);
}
// ============================================================================
// Test 3: OpenAI Codex провайдер
// ============================================================================
#[tokio::test]
async fn live_check_openai_codex_quota() {
if !has_auth_profiles() {
eprintln!("SKIP: no auth-profiles.json");
return;
}
let tool = CheckProviderQuotaTool::new(Arc::new(live_config()));
let result = tool
.execute(json!({"provider": "openai-codex"}))
.await
.unwrap();
println!("\n=== Test: OpenAI Codex Quota ===");
println!("{}", result.output);
assert!(result.success, "Tool execution failed");
}
// ============================================================================
// Test 4: Переключение провайдера
// ============================================================================
#[tokio::test]
async fn live_switch_provider() {
// Use a temp dir so we don't mutate the real config
let tmp = tempfile::TempDir::new().unwrap();
let config = Config {
workspace_dir: tmp.path().to_path_buf(),
config_path: tmp.path().join("config.toml"),
..Config::default()
};
config.save().await.unwrap();
let tool = SwitchProviderTool::new(Arc::new(config));
// Switch to gemini
let result = tool
.execute(json!({
"provider": "gemini",
"model": "gemini-2.5-flash",
"reason": "openai-codex rate limited"
}))
.await
.unwrap();
println!("\n=== Test: Переключение на Gemini ===");
println!("{}", result.output);
assert!(result.success);
assert!(result.output.contains("gemini"));
assert!(result.output.contains("rate limited"));
// Switch to openai-codex
let result = tool
.execute(json!({
"provider": "openai-codex",
"model": "o3-mini",
"reason": "gemini quota exhausted"
}))
.await
.unwrap();
println!("\n=== Test: Переключение на OpenAI Codex ===");
println!("{}", result.output);
assert!(result.success);
assert!(result.output.contains("openai-codex"));
}
// ============================================================================
// Test 5: Оценка затрат
// ============================================================================
#[tokio::test]
async fn live_estimate_quota_cost() {
let tool = EstimateQuotaCostTool;
let result = tool
.execute(json!({
"operation": "chat_response",
"estimated_tokens": 10000,
"parallel_count": 3
}))
.await
.unwrap();
println!("\n=== Test: Оценка затрат (10k tokens x 3) ===");
println!("{}", result.output);
assert!(result.success);
assert!(result.output.contains("30000")); // 10000 * 3
assert!(result.output.contains("$"));
}
// ============================================================================
// Test 6: Все 3 инструмента зарегистрированы с правильными именами
// ============================================================================
#[test]
fn tools_have_correct_names() {
let quota_tool = CheckProviderQuotaTool::new(Arc::new(Config::default()));
let switch_tool = SwitchProviderTool::new(Arc::new(Config::default()));
let estimate_tool = EstimateQuotaCostTool;
assert_eq!(quota_tool.name(), "check_provider_quota");
assert_eq!(switch_tool.name(), "switch_provider");
assert_eq!(estimate_tool.name(), "estimate_quota_cost");
}
// ============================================================================
// Test 7: Schemas are valid JSON with required fields
// ============================================================================
#[test]
fn tools_have_valid_schemas() {
let quota_tool = CheckProviderQuotaTool::new(Arc::new(Config::default()));
let switch_tool = SwitchProviderTool::new(Arc::new(Config::default()));
let estimate_tool = EstimateQuotaCostTool;
// All tools should have object schemas with properties
for (name, schema) in [
("check_provider_quota", quota_tool.parameters_schema()),
("switch_provider", switch_tool.parameters_schema()),
("estimate_quota_cost", estimate_tool.parameters_schema()),
] {
assert!(
schema["type"] == "object",
"{name}: schema type should be 'object'"
);
assert!(
schema["properties"].is_object(),
"{name}: schema should have properties"
);
}
// switch_provider requires "provider"
let switch_schema = switch_tool.parameters_schema();
let required = switch_schema["required"].as_array().unwrap();
assert!(
required.contains(&json!("provider")),
"switch_provider should require 'provider'"
);
}
// ============================================================================
// Test 8: Error parser works with real-world error payloads
// ============================================================================
#[test]
fn error_parser_real_world_payloads() {
use zeroclaw::providers::error_parser;
// Real OpenAI Codex usage_limit_reached error
let payload_1 = r#"{
"error": {
"type": "usage_limit_reached",
"message": "The usage limit has been reached for this organization on o3-mini-2025-01-31 in the current billing period. Upgrade to the next usage tier by adding more funds to your account.",
"plan_type": "enterprise",
"resets_at": 1772087057
}
}"#;
let info = error_parser::parse_openai_codex_error(payload_1).unwrap();
assert_eq!(info.error_type, "usage_limit_reached");
assert_eq!(info.plan_type, Some("enterprise".to_string()));
assert!(info.resets_at.is_some());
let reset_time = info.resets_at.unwrap();
println!(
"\n=== Error Parser: resets_at decoded ===\nTimestamp: {}\nHuman: {}",
reset_time.timestamp(),
reset_time.format("%Y-%m-%d %H:%M:%S UTC")
);
// Real OpenAI rate_limit_exceeded error (without resets_at)
let payload_2 = r#"{
"error": {
"type": "rate_limit_exceeded",
"message": "Rate limit reached for default-model in organization org-xxx on requests per min (RPM): Limit 3, Used 3, Requested 1. Please try again in 20s."
}
}"#;
let info = error_parser::parse_openai_codex_error(payload_2).unwrap();
assert_eq!(info.error_type, "rate_limit_exceeded");
assert!(info.plan_type.is_none());
assert!(info.resets_at.is_none());
println!("Rate limit message: {}", info.message);
// Non-JSON error
let payload_3 = "Internal Server Error";
assert!(error_parser::parse_openai_codex_error(payload_3).is_none());
}
// ============================================================================
// Test 9: tool descriptions mention key capabilities
// ============================================================================
#[test]
fn tool_descriptions_mention_key_capabilities() {
let quota_tool = CheckProviderQuotaTool::new(Arc::new(Config::default()));
let desc = quota_tool.description();
// Should mention rate limit checking
assert!(
desc.contains("rate limit") || desc.contains("quota"),
"Description should mention rate limit or quota"
);
// Should mention model availability
assert!(
desc.contains("available") || desc.contains("model availability"),
"Description should mention model availability"
);
}
// ============================================================================
// Test 10: metadata JSON in output is valid
// ============================================================================
#[tokio::test]
async fn output_contains_valid_metadata_json() {
let tmp = tempfile::TempDir::new().unwrap();
let config = Config {
workspace_dir: tmp.path().to_path_buf(),
config_path: tmp.path().join("config.toml"),
..Config::default()
};
let tool = CheckProviderQuotaTool::new(Arc::new(config));
let result = tool.execute(json!({})).await.unwrap();
// Extract metadata JSON from output
if let Some(start) = result.output.find("<!-- metadata: ") {
let json_start = start + "<!-- metadata: ".len();
if let Some(end) = result.output[json_start..].find(" -->") {
let json_str = &result.output[json_start..json_start + end];
let parsed: serde_json::Value =
serde_json::from_str(json_str).expect("Metadata JSON should be valid");
println!("\n=== Metadata JSON ===");
println!("{}", serde_json::to_string_pretty(&parsed).unwrap());
assert!(parsed["available_providers"].is_array());
assert!(parsed["rate_limited_providers"].is_array());
assert!(parsed["circuit_open_providers"].is_array());
}
}
}