Merge remote-tracking branch 'origin/main'

# Conflicts:
#	src/channels/mod.rs
#	src/config/mod.rs
#	src/config/schema.rs
This commit is contained in:
VirtualHotBar 2026-02-28 16:12:02 +08:00
commit fae10cd5c4
91 changed files with 9374 additions and 895 deletions

34
.github/CODEOWNERS vendored
View File

@ -1,5 +1,5 @@
# Default owner for all files
* @theonlyhennygod
* @theonlyhennygod @chumyin
# Important functional modules
/src/agent/** @theonlyhennygod
@ -8,25 +8,25 @@
/src/tools/** @theonlyhennygod
/src/gateway/** @theonlyhennygod
/src/runtime/** @theonlyhennygod
/src/memory/** @theonlyhennygod
/src/memory/** @theonlyhennygod @chumyin
/Cargo.toml @theonlyhennygod
/Cargo.lock @theonlyhennygod
# Security / tests / CI-CD ownership
/src/security/** @theonlyhennygod
/tests/** @theonlyhennygod
/.github/** @theonlyhennygod
/.github/workflows/** @theonlyhennygod
/.github/codeql/** @theonlyhennygod
/.github/dependabot.yml @theonlyhennygod
/SECURITY.md @theonlyhennygod
/docs/actions-source-policy.md @theonlyhennygod
/docs/ci-map.md @theonlyhennygod
/src/security/** @chumyin
/tests/** @chumyin
/.github/** @chumyin
/.github/workflows/** @chumyin
/.github/codeql/** @chumyin
/.github/dependabot.yml @chumyin
/SECURITY.md @chumyin
/docs/actions-source-policy.md @chumyin
/docs/ci-map.md @chumyin
# Docs & governance
/docs/** @theonlyhennygod
/AGENTS.md @theonlyhennygod
/CLAUDE.md @theonlyhennygod
/CONTRIBUTING.md @theonlyhennygod
/docs/pr-workflow.md @theonlyhennygod
/docs/reviewer-playbook.md @theonlyhennygod
/docs/** @chumyin
/AGENTS.md @chumyin
/CLAUDE.md @chumyin
/CONTRIBUTING.md @chumyin
/docs/pr-workflow.md @chumyin
/docs/reviewer-playbook.md @chumyin

View File

@ -5,7 +5,7 @@ updates:
directory: "/"
schedule:
interval: daily
target-branch: dev
target-branch: main
open-pull-requests-limit: 3
labels:
- "dependencies"
@ -21,7 +21,7 @@ updates:
directory: "/"
schedule:
interval: daily
target-branch: dev
target-branch: main
open-pull-requests-limit: 1
labels:
- "ci"
@ -38,7 +38,7 @@ updates:
directory: "/"
schedule:
interval: daily
target-branch: dev
target-branch: main
open-pull-requests-limit: 1
labels:
- "ci"

View File

@ -2,7 +2,7 @@
Describe this PR in 2-5 bullets:
- Base branch target (`dev` for normal contributions; `main` only for `dev` promotion):
- Base branch target (`main` or `dev`; direct `main` PRs are allowed):
- Problem:
- Why it matters:
- What changed:

View File

@ -23,7 +23,6 @@
"Nightly Summary & Routing"
],
"stable": [
"Main Promotion Gate",
"CI Required Gate",
"Security Audit",
"Feature Matrix Summary",

View File

@ -30,6 +30,7 @@ jobs:
docs_changed: ${{ steps.scope.outputs.docs_changed }}
rust_changed: ${{ steps.scope.outputs.rust_changed }}
workflow_changed: ${{ steps.scope.outputs.workflow_changed }}
ci_cd_changed: ${{ steps.scope.outputs.ci_cd_changed }}
docs_files: ${{ steps.scope.outputs.docs_files }}
base_sha: ${{ steps.scope.outputs.base_sha }}
steps:
@ -50,7 +51,7 @@ jobs:
needs: [changes]
if: needs.changes.outputs.rust_changed == 'true'
runs-on: [self-hosted, aws-india]
timeout-minutes: 25
timeout-minutes: 40
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
@ -74,7 +75,7 @@ jobs:
needs: [changes]
if: needs.changes.outputs.rust_changed == 'true'
runs-on: [self-hosted, aws-india]
timeout-minutes: 35
timeout-minutes: 60
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
@ -135,7 +136,7 @@ jobs:
needs: [changes]
if: needs.changes.outputs.rust_changed == 'true'
runs-on: [self-hosted, aws-india]
timeout-minutes: 20
timeout-minutes: 35
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
@ -251,9 +252,9 @@ jobs:
await script({github, context, core});
workflow-owner-approval:
name: Workflow Owner Approval
name: CI/CD Owner Approval (@chumyin)
needs: [changes]
if: github.event_name == 'pull_request' && needs.changes.outputs.workflow_changed == 'true'
if: github.event_name == 'pull_request' && needs.changes.outputs.ci_cd_changed == 'true'
runs-on: [self-hosted, aws-india]
permissions:
contents: read
@ -262,38 +263,13 @@ jobs:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Require owner approval for workflow file changes
- name: Require @chumyin approval for CI/CD related changes
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
WORKFLOW_OWNER_LOGINS: ${{ vars.WORKFLOW_OWNER_LOGINS }}
with:
script: |
const script = require('./.github/workflows/scripts/ci_workflow_owner_approval.js');
await script({ github, context, core });
human-review-approval:
name: Human Review Approval
needs: [changes]
if: github.event_name == 'pull_request'
runs-on: [self-hosted, aws-india]
permissions:
contents: read
pull-requests: read
steps:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
ref: ${{ github.event.pull_request.base.sha }}
- name: Require at least one human approving review
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
HUMAN_REVIEW_BOT_LOGINS: ${{ vars.HUMAN_REVIEW_BOT_LOGINS }}
with:
script: |
const script = require('./.github/workflows/scripts/ci_human_review_guard.js');
await script({ github, context, core });
license-file-owner-guard:
name: License File Owner Guard
needs: [changes]
@ -315,7 +291,7 @@ jobs:
ci-required:
name: CI Required Gate
if: always()
needs: [changes, lint, test, build, docs-only, non-rust, docs-quality, lint-feedback, workflow-owner-approval, human-review-approval, license-file-owner-guard]
needs: [changes, lint, test, build, docs-only, non-rust, docs-quality, lint-feedback, workflow-owner-approval, license-file-owner-guard]
runs-on: [self-hosted, aws-india]
steps:
- name: Enforce required status
@ -326,21 +302,16 @@ jobs:
event_name="${{ github.event_name }}"
rust_changed="${{ needs.changes.outputs.rust_changed }}"
docs_changed="${{ needs.changes.outputs.docs_changed }}"
workflow_changed="${{ needs.changes.outputs.workflow_changed }}"
ci_cd_changed="${{ needs.changes.outputs.ci_cd_changed }}"
docs_result="${{ needs.docs-quality.result }}"
workflow_owner_result="${{ needs.workflow-owner-approval.result }}"
human_review_result="${{ needs.human-review-approval.result }}"
license_owner_result="${{ needs.license-file-owner-guard.result }}"
# --- Helper: enforce PR governance gates ---
check_pr_governance() {
if [ "$event_name" != "pull_request" ]; then return 0; fi
if [ "$workflow_changed" = "true" ] && [ "$workflow_owner_result" != "success" ]; then
echo "Workflow files changed but workflow owner approval gate did not pass."
exit 1
fi
if [ "$human_review_result" != "success" ]; then
echo "Human review approval guard did not pass."
if [ "$ci_cd_changed" = "true" ] && [ "$workflow_owner_result" != "success" ]; then
echo "CI/CD related files changed but required @chumyin approval gate did not pass."
exit 1
fi
if [ "$license_owner_result" != "success" ]; then
@ -382,7 +353,6 @@ jobs:
echo "build=${build_result}"
echo "docs=${docs_result}"
echo "workflow_owner_approval=${workflow_owner_result}"
echo "human_review_approval=${human_review_result}"
echo "license_file_owner_guard=${license_owner_result}"
check_pr_governance

View File

@ -1,6 +1,6 @@
# Main Branch Delivery Flows
This document explains what runs when code is proposed to `dev`, promoted to `main`, and released.
This document explains what runs when code is proposed to `dev`/`main`, merged to `main`, and released.
Use this with:
@ -13,7 +13,7 @@ Use this with:
| Event | Main workflows |
| --- | --- |
| PR activity (`pull_request_target`) | `pr-intake-checks.yml`, `pr-labeler.yml`, `pr-auto-response.yml` |
| PR activity (`pull_request`) | `ci-run.yml`, `sec-audit.yml`, `main-promotion-gate.yml` (for `main` PRs), plus path-scoped workflows |
| PR activity (`pull_request`) | `ci-run.yml`, `sec-audit.yml`, plus path-scoped workflows |
| Push to `dev`/`main` | `ci-run.yml`, `sec-audit.yml`, plus path-scoped workflows |
| Tag push (`v*`) | `pub-release.yml` publish mode, `pub-docker-img.yml` publish job |
| Scheduled/manual | `pub-release.yml` verification mode, `sec-codeql.yml`, `feature-matrix.yml`, `test-fuzz.yml`, `pr-check-stale.yml`, `pr-check-status.yml`, `sync-contributors.yml`, `test-benchmarks.yml`, `test-e2e.yml` |
@ -120,14 +120,12 @@ Notes:
- repeated `pull_request_target` reruns from label churn causing noisy signals.
9. After merge, normal `push` workflows on `dev` execute (scenario 4).
### 3) Promotion PR `dev` -> `main`
### 3) PR to `main` (direct or from `dev`)
1. Maintainer opens PR with head `dev` and base `main`.
2. `main-promotion-gate.yml` runs and fails unless PR author is `willsarg` or `theonlyhennygod`.
3. `main-promotion-gate.yml` also fails if head repo/branch is not `<this-repo>:dev`.
4. `ci-run.yml` and `sec-audit.yml` run on the promotion PR.
5. Maintainer merges PR once checks and review policy pass.
6. Merge emits a `push` event on `main`.
1. Contributor or maintainer opens PR with base `main`.
2. `ci-run.yml` and `sec-audit.yml` run on the PR, plus any path-scoped workflows.
3. Maintainer merges PR once checks and review policy pass.
4. Merge emits a `push` event on `main`.
### 4) Push/Merge Queue to `dev` or `main` (including after merge)
@ -240,29 +238,29 @@ flowchart TD
G --> H["push event on dev"]
```
### Promotion and Release
### Main Delivery and Release
```mermaid
flowchart TD
D0["Commit reaches dev"] --> B0["ci-run.yml"]
D0 --> C0["sec-audit.yml"]
P["Promotion PR dev -> main"] --> PG["main-promotion-gate.yml"]
PG --> M["Merge to main"]
PRM["PR to main"] --> QM["ci-run.yml + sec-audit.yml (+ path-scoped)"]
QM --> M["Merge to main"]
M --> A["Commit reaches main"]
A --> B["ci-run.yml"]
A --> C["sec-audit.yml"]
A --> D["path-scoped workflows (if matched)"]
T["Tag push v*"] --> R["pub-release.yml"]
W["Manual/Scheduled release verify"] --> R
T --> P["pub-docker-img.yml publish job"]
T --> DP["pub-docker-img.yml publish job"]
R --> R1["Artifacts + SBOM + checksums + signatures + GitHub Release"]
W --> R2["Verification build only (no GitHub Release publish)"]
P --> P1["Push ghcr image tags (version + sha + latest)"]
DP --> P1["Push ghcr image tags (version + sha + latest)"]
```
## Quick Troubleshooting
1. Unexpected skipped jobs: inspect `scripts/ci/detect_change_scope.sh` outputs.
2. Workflow-change PR blocked: verify `WORKFLOW_OWNER_LOGINS` and approvals.
2. CI/CD-change PR blocked: verify `@chumyin` approved review is present.
3. Fork PR appears stalled: check whether Actions run approval is pending.
4. Docker not published: confirm a `v*` tag was pushed to the intended commit.

View File

@ -1,64 +0,0 @@
name: Main Promotion Gate
on:
pull_request:
branches: [main]
concurrency:
group: main-promotion-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
permissions:
contents: read
env:
GIT_CONFIG_COUNT: "1"
GIT_CONFIG_KEY_0: core.hooksPath
GIT_CONFIG_VALUE_0: /dev/null
jobs:
enforce-dev-promotion:
name: Enforce Dev -> Main Promotion
runs-on: [self-hosted, aws-india]
steps:
- name: Validate PR source branch
shell: bash
env:
HEAD_REF: ${{ github.head_ref }}
HEAD_REPO: ${{ github.event.pull_request.head.repo.full_name }}
BASE_REPO: ${{ github.repository }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
run: |
set -euo pipefail
pr_author_lc="$(echo "${PR_AUTHOR}" | tr '[:upper:]' '[:lower:]')"
allowed_authors=("willsarg" "theonlyhennygod")
if [[ "$HEAD_REPO" != "$BASE_REPO" ]]; then
echo "::error::PRs into main must originate from ${BASE_REPO}:dev or ${BASE_REPO}:release/*. Current head repo: ${HEAD_REPO}."
exit 1
fi
if [[ "$HEAD_REF" != "dev" && ! "$HEAD_REF" =~ ^release/ ]]; then
echo "::error::PRs into main must use head branch 'dev' or 'release/*'. Current head branch: ${HEAD_REF}."
exit 1
fi
# Keep strict author allowlist for dev -> main, but allow release/* promotion from same repo.
if [[ "$HEAD_REF" == "dev" ]]; then
is_allowed_author=false
for allowed in "${allowed_authors[@]}"; do
if [[ "$pr_author_lc" == "$allowed" ]]; then
is_allowed_author=true
break
fi
done
if [[ "$is_allowed_author" != "true" ]]; then
echo "::error::dev -> main PRs are restricted to: willsarg, theonlyhennygod. PR author: ${PR_AUTHOR}."
exit 1
fi
fi
echo "Promotion policy satisfied: author=${PR_AUTHOR}, source=${HEAD_REPO}:${HEAD_REF} -> main"

View File

@ -1,28 +1,15 @@
// Extracted from ci-run.yml step: Require owner approval for workflow file changes
// Extracted from ci-run.yml step: Require @chumyin approval for CI/CD related changes
module.exports = async ({ github, context, core }) => {
const owner = context.repo.owner;
const repo = context.repo.repo;
const prNumber = context.payload.pull_request?.number;
const prAuthor = context.payload.pull_request?.user?.login?.toLowerCase() || "";
if (!prNumber) {
core.setFailed("Missing pull_request context.");
return;
}
const baseOwners = ["theonlyhennygod", "willsarg", "chumyin"];
const configuredOwners = (process.env.WORKFLOW_OWNER_LOGINS || "")
.split(",")
.map((login) => login.trim().toLowerCase())
.filter(Boolean);
const ownerAllowlist = [...new Set([...baseOwners, ...configuredOwners])];
if (ownerAllowlist.length === 0) {
core.setFailed("Workflow owner allowlist is empty.");
return;
}
core.info(`Workflow owner allowlist: ${ownerAllowlist.join(", ")}`);
const requiredApprover = "chumyin";
const files = await github.paginate(github.rest.pulls.listFiles, {
owner,
@ -31,21 +18,29 @@ module.exports = async ({ github, context, core }) => {
per_page: 100,
});
const workflowFiles = files
const ciCdFiles = files
.map((file) => file.filename)
.filter((name) => name.startsWith(".github/workflows/"));
.filter((name) =>
name.startsWith(".github/workflows/") ||
name.startsWith(".github/codeql/") ||
name.startsWith(".github/connectivity/") ||
name.startsWith(".github/release/") ||
name.startsWith(".github/security/") ||
name.startsWith("scripts/ci/") ||
name === ".github/actionlint.yaml" ||
name === ".github/dependabot.yml" ||
name === "docs/ci-map.md" ||
name === "docs/actions-source-policy.md" ||
name === "docs/operations/self-hosted-runner-remediation.md",
);
if (workflowFiles.length === 0) {
core.info("No workflow files changed in this PR.");
if (ciCdFiles.length === 0) {
core.info("No CI/CD related files changed in this PR.");
return;
}
core.info(`Workflow files changed:\n- ${workflowFiles.join("\n- ")}`);
if (prAuthor && ownerAllowlist.includes(prAuthor)) {
core.info(`Workflow PR authored by allowlisted owner: @${prAuthor}`);
return;
}
core.info(`CI/CD related files changed:\n- ${ciCdFiles.join("\n- ")}`);
core.info(`Required approver: @${requiredApprover}`);
const reviews = await github.paginate(github.rest.pulls.listReviews, {
owner,
@ -66,18 +61,17 @@ module.exports = async ({ github, context, core }) => {
.map(([login]) => login);
if (approvedUsers.length === 0) {
core.setFailed("Workflow files changed but no approving review is present.");
core.setFailed("CI/CD related files changed but no approving review is present.");
return;
}
const ownerApprover = approvedUsers.find((login) => ownerAllowlist.includes(login));
if (!ownerApprover) {
if (!approvedUsers.includes(requiredApprover)) {
core.setFailed(
`Workflow files changed. Approvals found (${approvedUsers.join(", ")}), but none match workflow owner allowlist.`,
`CI/CD related files changed. Approvals found (${approvedUsers.join(", ")}), but @${requiredApprover} approval is required.`,
);
return;
}
core.info(`Workflow owner approval present: @${ownerApprover}`);
core.info(`Required CI/CD approval present: @${requiredApprover}`);
};

View File

@ -6,8 +6,6 @@ module.exports = async ({ github, context, core }) => {
const repo = context.repo.repo;
const pr = context.payload.pull_request;
if (!pr) return;
const prAuthor = (pr.user?.login || "").toLowerCase();
const prBaseRef = pr.base?.ref || "";
const marker = "<!-- pr-intake-checks -->";
const legacyMarker = "<!-- pr-intake-sanity -->";
@ -89,22 +87,12 @@ module.exports = async ({ github, context, core }) => {
if (dangerousProblems.length > 0) {
blockingFindings.push(`Dangerous patch markers found (${dangerousProblems.length})`);
}
const promotionAuthorAllowlist = new Set(["willsarg", "theonlyhennygod"]);
const shouldRetargetToDev =
prBaseRef === "main" && !promotionAuthorAllowlist.has(prAuthor);
if (linearKeys.length === 0) {
blockingFindings.push(
"Missing Linear issue key reference (`RMN-<id>`, `CDV-<id>`, or `COM-<id>`) in PR title/body.",
);
}
if (shouldRetargetToDev) {
advisoryFindings.push(
"This PR targets `main`, but normal contributions must target `dev`. Retarget this PR to `dev` unless this is an authorized promotion PR.",
);
}
const comments = await github.paginate(github.rest.issues.listComments, {
owner,
repo,
@ -176,9 +164,6 @@ module.exports = async ({ github, context, core }) => {
" - `./scripts/ci/rust_quality_gate.sh`",
" - `./scripts/ci/rust_strict_delta_gate.sh`",
" - `./scripts/ci/docs_quality_gate.sh`",
...(shouldRetargetToDev
? ["5. Retarget this PR base branch from `main` to `dev`."]
: []),
"",
`Detected Linear keys: ${linearKeys.length > 0 ? linearKeys.join(", ") : "none"}`,
"",

View File

@ -44,7 +44,7 @@ jobs:
codeql:
name: CodeQL Analysis
runs-on: [self-hosted, aws-india]
timeout-minutes: 30
timeout-minutes: 60
steps:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4

View File

@ -1,62 +0,0 @@
name: Test Rust Build
on:
workflow_call:
inputs:
run_command:
description: "Shell command(s) to execute."
required: true
type: string
timeout_minutes:
description: "Job timeout in minutes."
required: false
default: 20
type: number
toolchain:
description: "Rust toolchain channel/version."
required: false
default: "stable"
type: string
components:
description: "Optional rustup components."
required: false
default: ""
type: string
targets:
description: "Optional rustup targets."
required: false
default: ""
type: string
use_cache:
description: "Whether to enable rust-cache."
required: false
default: true
type: boolean
permissions:
contents: read
jobs:
run:
runs-on: [self-hosted, aws-india]
timeout-minutes: ${{ inputs.timeout_minutes }}
steps:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Setup Rust toolchain
uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: ${{ inputs.toolchain }}
components: ${{ inputs.components }}
targets: ${{ inputs.targets }}
- name: Restore Rust cache
if: inputs.use_cache
uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
- name: Run command
shell: bash
run: |
set -euo pipefail
${{ inputs.run_command }}

16
.github/workflows/test-self-hosted.yml vendored Normal file
View File

@ -0,0 +1,16 @@
name: Test Self-Hosted Runner
on:
workflow_dispatch:
jobs:
test-runner:
runs-on: self-hosted
steps:
- name: Check runner info
run: |
echo "Runner: $(hostname)"
echo "OS: $(uname -a)"
echo "Docker: $(docker --version)"
- name: Test Docker
run: docker run --rm hello-world

View File

@ -240,8 +240,8 @@ All contributors (human or agent) must follow the same collaboration flow:
- Create and work from a non-`main` branch.
- Commit changes to that branch with clear, scoped commit messages.
- Open a PR to `dev`; do not push directly to `dev` or `main`.
- `main` is reserved for release promotion PRs from `dev`.
- Open a PR to `main` by default (`dev` is optional for integration batching); do not push directly to `dev` or `main`.
- `main` accepts direct PR merges after required checks and review policy pass.
- Wait for required checks and review outcomes before merging.
- Merge via PR controls (squash/rebase/merge as repository policy allows).
- After merge/close, clean up task branches/worktrees that are no longer needed.
@ -251,7 +251,7 @@ All contributors (human or agent) must follow the same collaboration flow:
- Decide merge/close outcomes from repository-local authority in this order: `.github/workflows/**`, GitHub branch protection/rulesets, `docs/pr-workflow.md`, then this `AGENTS.md`.
- External agent skills/templates are execution aids only; they must not override repository-local policy.
- A normal contributor PR targeting `main` is a routing defect, not by itself a closure reason; if intent and content are legitimate, retarget to `dev`.
- A normal contributor PR targeting `main` is valid under the main-first flow when required checks and review policy are satisfied; use `dev` only for explicit integration batching.
- Direct-close the PR (do not supersede/replay) when high-confidence integrity-risk signals exist:
- unapproved or unrelated repository rebranding attempts (for example replacing project logo/identity assets)
- unauthorized platform-surface expansion (for example introducing `web` apps, dashboards, frontend stacks, or UI surfaces not requested by maintainers)
@ -350,7 +350,6 @@ Use these rules to keep the trait/factory architecture stable under growth.
- Apply `docs/i18n-guide.md` completion checklist before merge and include i18n status in PR notes.
- For docs snapshots, add new date-stamped files for new sprints rather than rewriting historical context.
## 8) Validation Matrix
Default local checks for code changes:

View File

@ -27,6 +27,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
Legacy values are still decrypted for backward compatibility but should be migrated.
### Fixed
- **Gemini thinking model support** — Responses from thinking models (e.g. `gemini-3-pro-preview`)
are now handled correctly. The provider skips internal reasoning parts (`thought: true`) and
signature parts (`thoughtSignature`), extracting only the final answer text. Falls back to

View File

@ -240,8 +240,8 @@ All contributors (human or agent) must follow the same collaboration flow:
- Create and work from a non-`main` branch.
- Commit changes to that branch with clear, scoped commit messages.
- Open a PR to `dev`; do not push directly to `dev` or `main`.
- `main` is reserved for release promotion PRs from `dev`.
- Open a PR to `main` by default (`dev` is optional for integration batching); do not push directly to `dev` or `main`.
- `main` accepts direct PR merges after required checks and review policy pass.
- Wait for required checks and review outcomes before merging.
- Merge via PR controls (squash/rebase/merge as repository policy allows).
- After merge/close, clean up task branches/worktrees that are no longer needed.
@ -251,7 +251,7 @@ All contributors (human or agent) must follow the same collaboration flow:
- Decide merge/close outcomes from repository-local authority in this order: `.github/workflows/**`, GitHub branch protection/rulesets, `docs/pr-workflow.md`, then this `CLAUDE.md`.
- External agent skills/templates are execution aids only; they must not override repository-local policy.
- A normal contributor PR targeting `main` is a routing defect, not by itself a closure reason; if intent and content are legitimate, retarget to `dev`.
- A normal contributor PR targeting `main` is valid under the main-first flow when required checks and review policy are satisfied; use `dev` only for explicit integration batching.
- Direct-close the PR (do not supersede/replay) when high-confidence integrity-risk signals exist:
- unapproved or unrelated repository rebranding attempts (for example replacing project logo/identity assets)
- unauthorized platform-surface expansion (for example introducing `web` apps, dashboards, frontend stacks, or UI surfaces not requested by maintainers)
@ -350,7 +350,6 @@ Use these rules to keep the trait/factory architecture stable under growth.
- Apply `docs/i18n-guide.md` completion checklist before merge and include i18n status in PR notes.
- For docs snapshots, add new date-stamped files for new sprints rather than rewriting historical context.
## 8) Validation Matrix
Default local checks for code changes:

335
Cargo.lock generated

File diff suppressed because it is too large Load Diff

51
PR_DESCRIPTION_UPDATE.md Normal file
View File

@ -0,0 +1,51 @@
## Android Phase 3 - Agent Integration
This PR implements the Android client for ZeroClaw with full agent integration, including foreground service, Quick Settings tile, boot receiver, and background heartbeat support.
### Changes
- `ZeroClawApp.kt` - Application setup with notification channels and WorkManager
- `SettingsRepository.kt` - DataStore + EncryptedSharedPreferences for secure settings
- `SettingsScreen.kt` - Compose UI for configuring the agent
- `BootReceiver.kt` - Auto-start on boot when enabled
- `HeartbeatWorker.kt` - Background periodic tasks via WorkManager
- `ZeroClawTileService.kt` - Quick Settings tile for agent control
- `ShareHandler.kt` - Handle content shared from other apps
- `ci-android.yml` - GitHub Actions workflow for Android builds
- `proguard-rules.pro` - R8 optimization rules
---
## Validation Evidence
- [x] All HIGH and MEDIUM CodeRabbit issues addressed
- [x] DataStore IOException handling added to prevent crashes on corrupted preferences
- [x] BootReceiver double `pendingResult.finish()` call removed
- [x] `text/uri-list` MIME type routed correctly in ShareHandler
- [x] API 34+ PendingIntent overload added to TileService
- [x] Kotlin Intrinsics null checks preserved in ProGuard rules
- [x] HeartbeatWorker enforces 15-minute minimum and uses UPDATE policy
- [x] SettingsScreen refreshes battery optimization state on resume
- [x] ZeroClawApp listens for settings changes to update heartbeat schedule
- [x] Trailing whitespace removed from all Kotlin files
- [ ] Manual testing: Build and install on Android 14 device (pending)
## Security Impact
- **API Keys**: Stored in Android Keystore via EncryptedSharedPreferences (AES-256-GCM)
- **Permissions**: RECEIVE_BOOT_COMPLETED, FOREGROUND_SERVICE, POST_NOTIFICATIONS
- **Data in Transit**: All API calls use HTTPS
- **No New Vulnerabilities**: No raw SQL, no WebView JavaScript, no exported components without protection
## Privacy and Data Hygiene
- **Local Storage Only**: All settings stored on-device, nothing transmitted except to configured AI provider
- **No Analytics**: No third-party analytics or tracking SDKs
- **User Control**: API key can be cleared via settings, auto-start is opt-in
- **Minimal Permissions**: Only requests permissions necessary for core functionality
## Rollback Plan
1. **Feature Flag**: Not yet implemented; can be added if needed
2. **Version Pinning**: Users can stay on previous APK version
3. **Clean Uninstall**: All data stored in app's private directory, removed on uninstall
4. **Server-Side**: No backend changes required; rollback is client-only

View File

@ -13,13 +13,31 @@ name = "zeroclaw_android"
# Note: zeroclaw dep commented out until we integrate properly
# zeroclaw = { path = "../.." }
uniffi = { version = "0.27" }
tokio = { version = "1", features = ["rt-multi-thread", "sync"] }
# Minimal tokio - only what we need
tokio = { version = "1", default-features = false, features = ["rt", "rt-multi-thread", "sync"] }
anyhow = "1"
serde = { version = "1", features = ["derive"] }
serde = { version = "1", default-features = false, features = ["derive"] }
serde_json = "1"
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
# Minimal tracing for mobile
tracing = { version = "0.1", default-features = false }
tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "env-filter"] }
[[bin]]
name = "uniffi-bindgen"
path = "uniffi-bindgen.rs"
# ============================================
# BINARY SIZE OPTIMIZATION
# ============================================
# Target: <3MB native library per ABI
[profile.release]
# Optimize for size over speed
opt-level = "z" # Smallest binary (was "3" for speed)
lto = true # Link-time optimization - removes dead code
codegen-units = 1 # Better optimization, slower compile
panic = "abort" # No unwinding = smaller binary
strip = true # Strip symbols
[profile.release.package."*"]
opt-level = "z" # Apply to all dependencies too

View File

@ -112,7 +112,7 @@ impl ZeroClawController {
/// Start the ZeroClaw gateway
pub fn start(&self) -> Result<(), ZeroClawError> {
let mut status = self.status.lock().map_err(|_| ZeroClawError::LockError)?;
if matches!(*status, AgentStatus::Running | AgentStatus::Starting) {
return Ok(());
}
@ -138,7 +138,7 @@ impl ZeroClawController {
/// Stop the gateway
pub fn stop(&self) -> Result<(), ZeroClawError> {
let mut status = self.status.lock().map_err(|_| ZeroClawError::LockError)?;
// TODO: Actually stop the gateway
// if let Some(gateway) = self.gateway.lock()?.take() {
// gateway.shutdown();
@ -162,7 +162,7 @@ impl ZeroClawController {
/// Send a message to the agent
pub fn send_message(&self, content: String) -> SendResult {
let msg_id = uuid_v4();
// Add user message
if let Ok(mut messages) = self.messages.lock() {
messages.push(ChatMessage {
@ -296,7 +296,7 @@ mod tests {
let controller = ZeroClawController::with_defaults("/tmp/zeroclaw".to_string());
let result = controller.send_message("Hello".to_string());
assert!(result.success);
let messages = controller.get_messages();
assert_eq!(messages.len(), 2); // User + assistant
}

View File

@ -5,7 +5,7 @@ Native Android client for ZeroClaw - run your autonomous AI assistant on Android
## Features
- 🚀 **Native Performance** - Kotlin/Jetpack Compose, not a webview
- 🔋 **Battery Efficient** - WorkManager, Doze-aware, minimal wake locks
- 🔋 **Battery Efficient** - WorkManager, Doze-aware, minimal wake locks
- 🔐 **Security First** - Android Keystore for secrets, sandboxed execution
- 🦀 **ZeroClaw Core** - Full Rust binary via UniFFI/JNI
- 🎨 **Material You** - Dynamic theming, modern Android UX
@ -80,12 +80,24 @@ cargo ndk -t arm64-v8a -o app/src/main/jniLibs build --release
- [x] Chat UI scaffold
- [x] Theme system (Material 3)
🚧 **Phase 3: Integration** (Next)
- [ ] Cargo NDK build integration
- [ ] Native library loading
- [ ] WorkManager for cron
- [ ] DataStore persistence
- [ ] Quick Settings tile
**Phase 3: Integration** (Complete)
- [x] WorkManager for cron/heartbeat
- [x] DataStore + encrypted preferences
- [x] Quick Settings tile
- [x] Share intent handling
- [x] Battery optimization helpers
- [x] CI workflow for Android builds
**Phase 4: Polish** (Complete)
- [x] Home screen widget
- [x] Accessibility utilities (TalkBack support)
- [x] One-liner install scripts (Termux, ADB)
- [x] Web installer page
🚀 **Ready for Production**
- [ ] Cargo NDK CI integration
- [ ] F-Droid submission
- [ ] Google Play submission
## Contributing

97
clients/android/SIZE.md Normal file
View File

@ -0,0 +1,97 @@
# ZeroClaw Android - Binary Size Optimization
## Target Sizes
| Component | Target | Notes |
|-----------|--------|-------|
| Native lib (per ABI) | <3MB | Rust, optimized for size |
| APK (arm64-v8a) | <10MB | Single ABI, most users |
| APK (universal) | <20MB | All ABIs, fallback |
## Optimization Strategy
### 1. Rust Native Library
```toml
[profile.release]
opt-level = "z" # Optimize for size
lto = true # Link-time optimization
codegen-units = 1 # Better optimization
panic = "abort" # No unwinding overhead
strip = true # Remove symbols
```
**Expected savings:** ~40% reduction vs default release
### 2. Android APK
**Enabled:**
- R8 minification (`isMinifyEnabled = true`)
- Resource shrinking (`isShrinkResources = true`)
- ABI splits (users download only their arch)
- Aggressive ProGuard rules
**Removed:**
- `material-icons-extended` (~5MB → 0MB)
- `kotlinx-serialization` (~300KB, unused)
- `ui-tooling-preview` (~100KB, debug only)
- Debug symbols in release
### 3. Dependencies Audit
| Dependency | Size | Keep? |
|------------|------|-------|
| Compose BOM | ~3MB | ✅ Required |
| Material3 | ~1MB | ✅ Required |
| material-icons-extended | ~5MB | ❌ Removed |
| Navigation | ~200KB | ✅ Required |
| DataStore | ~100KB | ✅ Required |
| WorkManager | ~300KB | ✅ Required |
| Security-crypto | ~100KB | ✅ Required |
| Coroutines | ~200KB | ✅ Required |
| Serialization | ~300KB | ❌ Removed (unused) |
### 4. Split APKs
```kotlin
splits {
abi {
isEnable = true
include("arm64-v8a", "armeabi-v7a", "x86_64")
isUniversalApk = true
}
}
```
**Result:**
- `app-arm64-v8a-release.apk` → ~10MB (90% of users)
- `app-armeabi-v7a-release.apk` → ~9MB (older devices)
- `app-x86_64-release.apk` → ~10MB (emulators)
- `app-universal-release.apk` → ~18MB (fallback)
## Measuring Size
```bash
# Build release APK
./gradlew assembleRelease
# Check sizes
ls -lh app/build/outputs/apk/release/
# Analyze APK contents
$ANDROID_HOME/build-tools/34.0.0/apkanalyzer apk summary app-release.apk
```
## Future Optimizations
1. **Baseline Profiles** - Pre-compile hot paths
2. **R8 full mode** - More aggressive shrinking
3. **Custom Compose compiler** - Smaller runtime
4. **WebP images** - Smaller than PNG
5. **Dynamic delivery** - On-demand features
## Philosophy
> "Zero overhead. Zero compromise."
Every KB matters. We ship what users need, nothing more.

View File

@ -15,7 +15,7 @@ android {
versionName = "0.1.0"
testInstrumentationRunner = "androidx.test.runner.AndroidJUnitRunner"
vectorDrawables {
useSupportLibrary = true
}
@ -33,6 +33,10 @@ android {
getDefaultProguardFile("proguard-android-optimize.txt"),
"proguard-rules.pro"
)
// Aggressive optimization
ndk {
debugSymbolLevel = "NONE"
}
}
debug {
isDebuggable = true
@ -40,6 +44,16 @@ android {
}
}
// Split APKs by ABI - users only download what they need
splits {
abi {
isEnable = true
reset()
include("arm64-v8a", "armeabi-v7a", "x86_64")
isUniversalApk = true // Also build universal for fallback
}
}
compileOptions {
sourceCompatibility = JavaVersion.VERSION_17
targetCompatibility = JavaVersion.VERSION_17
@ -63,15 +77,15 @@ android {
excludes += "/META-INF/{AL2.0,LGPL2.1}"
}
}
// Task to build native library before APK
tasks.register("buildRustLibrary") {
doLast {
exec {
workingDir = rootProject.projectDir.parentFile.parentFile // zeroclaw root
commandLine("cargo", "ndk",
commandLine("cargo", "ndk",
"-t", "arm64-v8a",
"-t", "armeabi-v7a",
"-t", "armeabi-v7a",
"-t", "x86_64",
"-o", "clients/android/app/src/main/jniLibs",
"build", "--release", "-p", "zeroclaw-android-bridge")
@ -86,40 +100,40 @@ dependencies {
implementation("androidx.lifecycle:lifecycle-runtime-ktx:2.7.0")
implementation("androidx.lifecycle:lifecycle-viewmodel-compose:2.7.0")
implementation("androidx.activity:activity-compose:1.8.2")
// Compose
// Compose - minimal set
implementation(platform("androidx.compose:compose-bom:2024.02.00"))
implementation("androidx.compose.ui:ui")
implementation("androidx.compose.ui:ui-graphics")
implementation("androidx.compose.ui:ui-tooling-preview")
implementation("androidx.compose.material3:material3")
implementation("androidx.compose.material:material-icons-extended")
// NOTE: Using material-icons-core (small) instead of extended (5MB+)
// Add individual icons via drawable if needed
// Navigation
implementation("androidx.navigation:navigation-compose:2.7.7")
// DataStore (preferences)
implementation("androidx.datastore:datastore-preferences:1.0.0")
// WorkManager (background tasks)
implementation("androidx.work:work-runtime-ktx:2.9.0")
// Security (Keystore)
implementation("androidx.security:security-crypto:1.1.0-alpha06")
// Coroutines
implementation("org.jetbrains.kotlinx:kotlinx-coroutines-android:1.7.3")
// Serialization
implementation("org.jetbrains.kotlinx:kotlinx-serialization-json:1.6.2")
// NOTE: Serialization removed - not used yet, saves ~300KB
// Add back when needed: implementation("org.jetbrains.kotlinx:kotlinx-serialization-json:1.6.2")
// Testing
testImplementation("junit:junit:4.13.2")
androidTestImplementation("androidx.test.ext:junit:1.1.5")
androidTestImplementation("androidx.test.espresso:espresso-core:3.5.1")
androidTestImplementation(platform("androidx.compose:compose-bom:2024.02.00"))
androidTestImplementation("androidx.compose.ui:ui-test-junit4")
// Debug
debugImplementation("androidx.compose.ui:ui-tooling")
debugImplementation("androidx.compose.ui:ui-test-manifest")

View File

@ -1,6 +1,9 @@
# ZeroClaw Android ProGuard Rules
# Goal: Smallest possible APK
# Keep native bridge
# ============================================
# KEEP NATIVE BRIDGE
# ============================================
-keep class ai.zeroclaw.android.bridge.** { *; }
-keepclassmembers class ai.zeroclaw.android.bridge.** { *; }
@ -9,12 +12,56 @@
native <methods>;
}
# Keep data classes for serialization
-keep class ai.zeroclaw.android.**.data.** { *; }
-keepclassmembers class ai.zeroclaw.android.**.data.** { *; }
# ============================================
# KEEP DATA CLASSES
# ============================================
-keep class ai.zeroclaw.android.data.** { *; }
-keepclassmembers class ai.zeroclaw.android.data.** { *; }
# Kotlin serialization
# ============================================
# KOTLIN SERIALIZATION
# ============================================
-keepattributes *Annotation*, InnerClasses
-dontnote kotlinx.serialization.AnnotationsKt
-keepclassmembers class kotlinx.serialization.json.** { *** Companion; }
-keepclasseswithmembers class kotlinx.serialization.json.** { kotlinx.serialization.KSerializer serializer(...); }
-keepclassmembers class kotlinx.serialization.json.** {
*** Companion;
}
-keepclasseswithmembers class kotlinx.serialization.json.** {
kotlinx.serialization.KSerializer serializer(...);
}
# ============================================
# AGGRESSIVE OPTIMIZATIONS
# ============================================
# Remove logging in release
-assumenosideeffects class android.util.Log {
public static int v(...);
public static int d(...);
public static int i(...);
}
# KEEP Kotlin null checks - stripping them hides bugs and causes crashes
# (Previously removed; CodeRabbit HIGH severity fix)
# -assumenosideeffects class kotlin.jvm.internal.Intrinsics { ... }
# Optimize enums
-optimizations !code/simplification/enum*
# Remove unused Compose stuff
-dontwarn androidx.compose.**
# ============================================
# SIZE OPTIMIZATIONS
# ============================================
# Merge classes where possible
-repackageclasses ''
-allowaccessmodification
# Remove unused code paths
-optimizationpasses 5
# Don't keep attributes we don't need
-keepattributes SourceFile,LineNumberTable # Keep for crash reports
-renamesourcefileattribute SourceFile

View File

@ -2,14 +2,21 @@
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools">
<!-- Permissions -->
<!-- Network -->
<uses-permission android:name="android.permission.INTERNET" />
<uses-permission android:name="android.permission.ACCESS_NETWORK_STATE" />
<!-- Background execution -->
<uses-permission android:name="android.permission.FOREGROUND_SERVICE" />
<uses-permission android:name="android.permission.FOREGROUND_SERVICE_DATA_SYNC" />
<uses-permission android:name="android.permission.POST_NOTIFICATIONS" />
<uses-permission android:name="android.permission.RECEIVE_BOOT_COMPLETED" />
<uses-permission android:name="android.permission.WAKE_LOCK" />
<!-- Notifications -->
<uses-permission android:name="android.permission.POST_NOTIFICATIONS" />
<!-- Battery optimization (optional - for requesting exemption) -->
<uses-permission android:name="android.permission.REQUEST_IGNORE_BATTERY_OPTIMIZATIONS" />
<application
android:name=".ZeroClawApp"
@ -26,18 +33,33 @@
android:name=".MainActivity"
android:exported="true"
android:label="@string/app_name"
android:theme="@style/Theme.ZeroClaw">
android:theme="@style/Theme.ZeroClaw"
android:launchMode="singleTop">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
<!-- Handle share intents -->
<!-- Handle text share intents -->
<intent-filter>
<action android:name="android.intent.action.SEND" />
<category android:name="android.intent.category.DEFAULT" />
<data android:mimeType="text/plain" />
</intent-filter>
<!-- Handle URL share intents -->
<intent-filter>
<action android:name="android.intent.action.SEND" />
<category android:name="android.intent.category.DEFAULT" />
<data android:mimeType="text/uri-list" />
</intent-filter>
<!-- Handle image share intents -->
<intent-filter>
<action android:name="android.intent.action.SEND" />
<category android:name="android.intent.category.DEFAULT" />
<data android:mimeType="image/*" />
</intent-filter>
</activity>
<!-- Background Service -->
@ -46,6 +68,21 @@
android:exported="false"
android:foregroundServiceType="dataSync" />
<!-- Quick Settings Tile -->
<service
android:name=".tile.ZeroClawTileService"
android:exported="true"
android:icon="@drawable/ic_notification"
android:label="@string/app_name"
android:permission="android.permission.BIND_QUICK_SETTINGS_TILE">
<intent-filter>
<action android:name="android.service.quicksettings.action.QS_TILE" />
</intent-filter>
<meta-data
android:name="android.service.quicksettings.ACTIVE_TILE"
android:value="false" />
</service>
<!-- Boot Receiver -->
<receiver
android:name=".receiver.BootReceiver"
@ -54,10 +91,28 @@
<intent-filter>
<action android:name="android.intent.action.BOOT_COMPLETED" />
<action android:name="android.intent.action.QUICKBOOT_POWERON" />
<action android:name="android.intent.action.MY_PACKAGE_REPLACED" />
</intent-filter>
</receiver>
<!-- WorkManager Initialization -->
<!-- Home Screen Widget -->
<receiver
android:name=".widget.ZeroClawWidget"
android:exported="true"
android:label="@string/app_name">
<intent-filter>
<action android:name="android.appwidget.action.APPWIDGET_UPDATE" />
</intent-filter>
<intent-filter>
<action android:name="ai.zeroclaw.widget.TOGGLE" />
<action android:name="ai.zeroclaw.widget.QUICK_MESSAGE" />
</intent-filter>
<meta-data
android:name="android.appwidget.provider"
android:resource="@xml/widget_info" />
</receiver>
<!-- WorkManager Initialization (disable default, we initialize manually) -->
<provider
android:name="androidx.startup.InitializationProvider"
android:authorities="${applicationId}.androidx-startup"
@ -65,7 +120,8 @@
tools:node="merge">
<meta-data
android:name="androidx.work.WorkManagerInitializer"
android:value="androidx.startup" />
android:value="androidx.startup"
tools:node="remove" />
</provider>
</application>

View File

@ -34,7 +34,7 @@ fun ZeroClawApp() {
var agentStatus by remember { mutableStateOf(AgentStatus.Stopped) }
var messages by remember { mutableStateOf(listOf<ChatMessage>()) }
var inputText by remember { mutableStateOf("") }
Scaffold(
topBar = {
TopAppBar(
@ -88,7 +88,7 @@ fun StatusIndicator(status: AgentStatus) {
AgentStatus.Stopped -> MaterialTheme.colorScheme.outline to "Stopped"
AgentStatus.Error -> MaterialTheme.colorScheme.error to "Error"
}
Surface(
color = color.copy(alpha = 0.2f),
shape = MaterialTheme.shapes.small
@ -128,7 +128,7 @@ fun EmptyState(status: AgentStatus, onStart: () -> Unit) {
textAlign = TextAlign.Center
)
Spacer(modifier = Modifier.height(32.dp))
if (status == AgentStatus.Stopped) {
Button(onClick = onStart) {
Text("Start Agent")
@ -180,11 +180,11 @@ fun ChatMessageList(messages: List<ChatMessage>, modifier: Modifier = Modifier)
@Composable
fun ChatBubble(message: ChatMessage) {
val alignment = if (message.isUser) Alignment.End else Alignment.Start
val color = if (message.isUser)
MaterialTheme.colorScheme.primaryContainer
else
val color = if (message.isUser)
MaterialTheme.colorScheme.primaryContainer
else
MaterialTheme.colorScheme.surfaceVariant
Box(
modifier = Modifier.fillMaxWidth(),
contentAlignment = if (message.isUser) Alignment.CenterEnd else Alignment.CenterStart

View File

@ -0,0 +1,104 @@
package ai.zeroclaw.android
import android.content.Intent
import android.net.Uri
/**
* Handles content shared TO ZeroClaw from other apps.
*
* Supports:
* - Plain text
* - URLs
* - Images (future)
* - Files (future)
*/
object ShareHandler {
sealed class SharedContent {
data class Text(val text: String) : SharedContent()
data class Url(val url: String, val title: String? = null) : SharedContent()
data class Image(val uri: Uri) : SharedContent()
data class File(val uri: Uri, val mimeType: String) : SharedContent()
object None : SharedContent()
}
/**
* Parse incoming share intent
*/
fun parseIntent(intent: Intent): SharedContent {
if (intent.action != Intent.ACTION_SEND) {
return SharedContent.None
}
val type = intent.type ?: return SharedContent.None
return when {
type == "text/plain" -> parseTextIntent(intent)
type == "text/uri-list" -> parseUriListIntent(intent)
type.startsWith("image/") -> parseImageIntent(intent)
else -> parseFileIntent(intent, type)
}
}
private fun parseTextIntent(intent: Intent): SharedContent {
val text = intent.getStringExtra(Intent.EXTRA_TEXT) ?: return SharedContent.None
// Check if it's a URL
if (text.startsWith("http://") || text.startsWith("https://")) {
val title = intent.getStringExtra(Intent.EXTRA_SUBJECT)
return SharedContent.Url(text, title)
}
return SharedContent.Text(text)
}
private fun parseUriListIntent(intent: Intent): SharedContent {
val text = intent.getStringExtra(Intent.EXTRA_TEXT) ?: return SharedContent.None
// text/uri-list contains URLs separated by newlines
val firstUrl = text.lines().firstOrNull { it.startsWith("http://") || it.startsWith("https://") }
return if (firstUrl != null) {
val title = intent.getStringExtra(Intent.EXTRA_SUBJECT)
SharedContent.Url(firstUrl, title)
} else {
SharedContent.Text(text)
}
}
private fun parseImageIntent(intent: Intent): SharedContent {
val uri = if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.TIRAMISU) {
intent.getParcelableExtra(Intent.EXTRA_STREAM, Uri::class.java)
} else {
@Suppress("DEPRECATION")
intent.getParcelableExtra(Intent.EXTRA_STREAM)
}
return uri?.let { SharedContent.Image(it) } ?: SharedContent.None
}
private fun parseFileIntent(intent: Intent, mimeType: String): SharedContent {
val uri = if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.TIRAMISU) {
intent.getParcelableExtra(Intent.EXTRA_STREAM, Uri::class.java)
} else {
@Suppress("DEPRECATION")
intent.getParcelableExtra(Intent.EXTRA_STREAM)
}
return uri?.let { SharedContent.File(it, mimeType) } ?: SharedContent.None
}
/**
* Generate a prompt from shared content
*/
fun generatePrompt(content: SharedContent): String {
return when (content) {
is SharedContent.Text -> "I'm sharing this text with you:\n\n${content.text}"
is SharedContent.Url -> {
val title = content.title?.let { "\"$it\"\n" } ?: ""
"${title}I'm sharing this URL: ${content.url}\n\nPlease summarize or help me with this."
}
is SharedContent.Image -> "I'm sharing an image with you. [Image attached]"
is SharedContent.File -> "I'm sharing a file with you. [File: ${content.mimeType}]"
SharedContent.None -> ""
}
}
}

View File

@ -4,50 +4,113 @@ import android.app.Application
import android.app.NotificationChannel
import android.app.NotificationManager
import android.os.Build
import androidx.work.Configuration
import androidx.work.WorkManager
import ai.zeroclaw.android.data.SettingsRepository
import ai.zeroclaw.android.worker.HeartbeatWorker
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.SupervisorJob
import kotlinx.coroutines.flow.distinctUntilChanged
import kotlinx.coroutines.flow.first
import kotlinx.coroutines.flow.map
import kotlinx.coroutines.launch
class ZeroClawApp : Application(), Configuration.Provider {
class ZeroClawApp : Application() {
companion object {
const val CHANNEL_ID = "zeroclaw_service"
const val CHANNEL_NAME = "ZeroClaw Agent"
const val AGENT_CHANNEL_ID = "zeroclaw_agent"
const val AGENT_CHANNEL_NAME = "Agent Messages"
// Singleton instance for easy access
lateinit var instance: ZeroClawApp
private set
}
// Application scope for coroutines
private val applicationScope = CoroutineScope(SupervisorJob() + Dispatchers.Main)
// Lazy initialized repositories
val settingsRepository by lazy { SettingsRepository(this) }
override fun onCreate() {
super.onCreate()
instance = this
createNotificationChannels()
initializeWorkManager()
// Schedule heartbeat if auto-start is enabled
applicationScope.launch {
val settings = settingsRepository.settings.first()
if (settings.autoStart && settings.isConfigured()) {
HeartbeatWorker.scheduleHeartbeat(
this@ZeroClawApp,
settings.heartbeatIntervalMinutes.toLong()
)
}
}
// Listen for settings changes and update heartbeat schedule
applicationScope.launch {
settingsRepository.settings
.map { Triple(it.autoStart, it.isConfigured(), it.heartbeatIntervalMinutes) }
.distinctUntilChanged()
.collect { (autoStart, isConfigured, intervalMinutes) ->
if (autoStart && isConfigured) {
HeartbeatWorker.scheduleHeartbeat(this@ZeroClawApp, intervalMinutes.toLong())
} else {
HeartbeatWorker.cancelHeartbeat(this@ZeroClawApp)
}
}
}
// TODO: Initialize native library
// System.loadLibrary("zeroclaw")
// System.loadLibrary("zeroclaw_android")
}
private fun createNotificationChannels() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
val manager = getSystemService(NotificationManager::class.java)
// Service channel (foreground service)
// Service channel (foreground service - low priority, silent)
val serviceChannel = NotificationChannel(
CHANNEL_ID,
CHANNEL_NAME,
NotificationManager.IMPORTANCE_LOW
).apply {
description = "ZeroClaw background service"
description = "ZeroClaw background service notification"
setShowBadge(false)
enableVibration(false)
setSound(null, null)
}
// Agent messages channel
// Agent messages channel (high priority for important messages)
val agentChannel = NotificationChannel(
AGENT_CHANNEL_ID,
AGENT_CHANNEL_NAME,
NotificationManager.IMPORTANCE_HIGH
).apply {
description = "Messages from your AI agent"
description = "Messages and alerts from your AI agent"
enableVibration(true)
setShowBadge(true)
}
manager.createNotificationChannel(serviceChannel)
manager.createNotificationChannel(agentChannel)
}
}
private fun initializeWorkManager() {
// WorkManager is initialized via Configuration.Provider
// This ensures it's ready before any work is scheduled
}
// Configuration.Provider implementation for custom WorkManager setup
override val workManagerConfiguration: Configuration
get() = Configuration.Builder()
.setMinimumLoggingLevel(android.util.Log.INFO)
.build()
}

View File

@ -0,0 +1,123 @@
package ai.zeroclaw.android.accessibility
import android.content.Context
import android.view.accessibility.AccessibilityManager
import androidx.compose.runtime.Composable
import androidx.compose.runtime.remember
import androidx.compose.ui.platform.LocalContext
import androidx.compose.ui.semantics.SemanticsPropertyKey
import androidx.compose.ui.semantics.SemanticsPropertyReceiver
/**
* Accessibility utilities for ZeroClaw Android.
*
* Ensures the app is usable with:
* - TalkBack (screen reader)
* - Switch Access
* - Voice Access
* - Large text/display size
*/
object AccessibilityUtils {
/**
* Check if TalkBack or similar screen reader is enabled
*/
fun isScreenReaderEnabled(context: Context): Boolean {
val am = context.getSystemService(Context.ACCESSIBILITY_SERVICE) as AccessibilityManager
return am.isEnabled && am.isTouchExplorationEnabled
}
/**
* Check if any accessibility service is enabled
*/
fun isAccessibilityEnabled(context: Context): Boolean {
val am = context.getSystemService(Context.ACCESSIBILITY_SERVICE) as AccessibilityManager
return am.isEnabled
}
/**
* Get appropriate content description for agent status
*/
fun getStatusDescription(isRunning: Boolean, isThinking: Boolean = false): String {
return when {
isThinking -> "Agent is thinking and processing your request"
isRunning -> "Agent is running and ready to help"
else -> "Agent is stopped. Tap to start"
}
}
/**
* Get content description for chat messages
*/
fun getMessageDescription(
content: String,
isUser: Boolean,
timestamp: String
): String {
val sender = if (isUser) "You said" else "Agent replied"
return "$sender at $timestamp: $content"
}
/**
* Announce message for screen readers
*/
fun announceForAccessibility(context: Context, message: String) {
val am = context.getSystemService(Context.ACCESSIBILITY_SERVICE) as AccessibilityManager
if (am.isEnabled) {
val event = android.view.accessibility.AccessibilityEvent.obtain(
android.view.accessibility.AccessibilityEvent.TYPE_ANNOUNCEMENT
)
event.text.add(message)
am.sendAccessibilityEvent(event)
}
}
}
/**
* Custom semantic property for live regions
*/
val LiveRegion = SemanticsPropertyKey<LiveRegionMode>("LiveRegion")
var SemanticsPropertyReceiver.liveRegion by LiveRegion
enum class LiveRegionMode {
None,
Polite, // Announce when user is idle
Assertive // Announce immediately
}
/**
* Composable to check screen reader status
*/
@Composable
fun rememberAccessibilityState(): AccessibilityState {
val context = LocalContext.current
return remember {
AccessibilityState(
isScreenReaderEnabled = AccessibilityUtils.isScreenReaderEnabled(context),
isAccessibilityEnabled = AccessibilityUtils.isAccessibilityEnabled(context)
)
}
}
data class AccessibilityState(
val isScreenReaderEnabled: Boolean,
val isAccessibilityEnabled: Boolean
)
/**
* Content descriptions for common UI elements
*/
object ContentDescriptions {
const val TOGGLE_AGENT = "Toggle agent on or off"
const val SEND_MESSAGE = "Send message"
const val CLEAR_CHAT = "Clear conversation"
const val OPEN_SETTINGS = "Open settings"
const val BACK = "Go back"
const val AGENT_STATUS = "Agent status"
const val MESSAGE_INPUT = "Type your message here"
const val PROVIDER_DROPDOWN = "Select AI provider"
const val MODEL_DROPDOWN = "Select AI model"
const val API_KEY_INPUT = "Enter your API key"
const val SHOW_API_KEY = "Show API key"
const val HIDE_API_KEY = "Hide API key"
}

View File

@ -2,17 +2,17 @@ package ai.zeroclaw.android.bridge
/**
* JNI bridge to ZeroClaw Rust library.
*
*
* This class will be replaced by UniFFI-generated bindings.
* For now, it provides stub implementations.
*
*
* Native library: libzeroclaw.so
* Build command: cargo ndk -t arm64-v8a -o app/src/main/jniLibs build --release
*/
object ZeroClawBridge {
private var initialized = false
/**
* Initialize the ZeroClaw runtime.
* Must be called before any other methods.
@ -25,7 +25,7 @@ object ZeroClawBridge {
initialized = true
}
}
/**
* Start the ZeroClaw gateway.
* @param configPath Path to zeroclaw.toml config file
@ -36,7 +36,7 @@ object ZeroClawBridge {
// TODO: nativeStart(configPath)
}
}
/**
* Stop the ZeroClaw gateway.
*/
@ -45,7 +45,7 @@ object ZeroClawBridge {
// TODO: nativeStop()
}
}
/**
* Send a message to the agent.
*/
@ -55,7 +55,7 @@ object ZeroClawBridge {
// TODO: nativeSendMessage(message)
}
}
/**
* Poll for the next message from the agent.
* Returns null if no message available.
@ -65,7 +65,7 @@ object ZeroClawBridge {
// TODO: return nativePollMessage()
return null
}
/**
* Get current agent status.
*/
@ -74,12 +74,12 @@ object ZeroClawBridge {
// TODO: return nativeGetStatus()
return AgentStatus.Stopped
}
/**
* Check if the native library is loaded.
*/
fun isLoaded(): Boolean = initialized
// Native method declarations (to be implemented)
// private external fun nativeInit(dataDir: String)
// private external fun nativeStart(configPath: String)

View File

@ -0,0 +1,156 @@
package ai.zeroclaw.android.data
import android.content.Context
import androidx.datastore.core.DataStore
import androidx.datastore.preferences.core.*
import androidx.datastore.preferences.preferencesDataStore
import androidx.security.crypto.EncryptedSharedPreferences
import androidx.security.crypto.MasterKey
import kotlinx.coroutines.flow.Flow
import kotlinx.coroutines.flow.catch
import kotlinx.coroutines.flow.map
import java.io.IOException
// Extension for DataStore
private val Context.dataStore: DataStore<Preferences> by preferencesDataStore(name = "zeroclaw_settings")
/**
* Repository for persisting ZeroClaw settings.
*
* Uses DataStore for general settings and EncryptedSharedPreferences
* for sensitive data like API keys.
*/
class SettingsRepository(private val context: Context) {
// DataStore keys
private object Keys {
val PROVIDER = stringPreferencesKey("provider")
val MODEL = stringPreferencesKey("model")
val AUTO_START = booleanPreferencesKey("auto_start")
val NOTIFICATIONS_ENABLED = booleanPreferencesKey("notifications_enabled")
val SYSTEM_PROMPT = stringPreferencesKey("system_prompt")
val HEARTBEAT_INTERVAL = intPreferencesKey("heartbeat_interval")
val FIRST_RUN = booleanPreferencesKey("first_run")
}
// Encrypted storage for API key
private val encryptedPrefs by lazy {
val masterKey = MasterKey.Builder(context)
.setKeyScheme(MasterKey.KeyScheme.AES256_GCM)
.build()
EncryptedSharedPreferences.create(
context,
"zeroclaw_secure",
masterKey,
EncryptedSharedPreferences.PrefKeyEncryptionScheme.AES256_SIV,
EncryptedSharedPreferences.PrefValueEncryptionScheme.AES256_GCM
)
}
// Flow of settings with IOException handling for DataStore corruption
val settings: Flow<ZeroClawSettings> = context.dataStore.data
.catch { exception ->
if (exception is IOException) {
android.util.Log.e("SettingsRepository", "Error reading DataStore", exception)
emit(emptyPreferences())
} else {
throw exception
}
}
.map { prefs ->
ZeroClawSettings(
provider = prefs[Keys.PROVIDER] ?: "anthropic",
model = prefs[Keys.MODEL] ?: "claude-sonnet-4-5",
apiKey = getApiKey(),
autoStart = prefs[Keys.AUTO_START] ?: false,
notificationsEnabled = prefs[Keys.NOTIFICATIONS_ENABLED] ?: true,
systemPrompt = prefs[Keys.SYSTEM_PROMPT] ?: "",
heartbeatIntervalMinutes = prefs[Keys.HEARTBEAT_INTERVAL] ?: 15
)
}
val isFirstRun: Flow<Boolean> = context.dataStore.data
.catch { exception ->
if (exception is IOException) {
android.util.Log.e("SettingsRepository", "Error reading DataStore", exception)
emit(emptyPreferences())
} else {
throw exception
}
}
.map { prefs ->
prefs[Keys.FIRST_RUN] ?: true
}
suspend fun updateSettings(settings: ZeroClawSettings) {
// Save API key to encrypted storage
saveApiKey(settings.apiKey)
// Save other settings to DataStore
context.dataStore.edit { prefs ->
prefs[Keys.PROVIDER] = settings.provider
prefs[Keys.MODEL] = settings.model
prefs[Keys.AUTO_START] = settings.autoStart
prefs[Keys.NOTIFICATIONS_ENABLED] = settings.notificationsEnabled
prefs[Keys.SYSTEM_PROMPT] = settings.systemPrompt
prefs[Keys.HEARTBEAT_INTERVAL] = settings.heartbeatIntervalMinutes
}
}
suspend fun setFirstRunComplete() {
context.dataStore.edit { prefs ->
prefs[Keys.FIRST_RUN] = false
}
}
suspend fun updateProvider(provider: String) {
context.dataStore.edit { prefs ->
prefs[Keys.PROVIDER] = provider
}
}
suspend fun updateModel(model: String) {
context.dataStore.edit { prefs ->
prefs[Keys.MODEL] = model
}
}
suspend fun updateAutoStart(enabled: Boolean) {
context.dataStore.edit { prefs ->
prefs[Keys.AUTO_START] = enabled
}
}
// Encrypted API key storage
private fun saveApiKey(apiKey: String) {
encryptedPrefs.edit().putString("api_key", apiKey).apply()
}
private fun getApiKey(): String {
return encryptedPrefs.getString("api_key", "") ?: ""
}
fun hasApiKey(): Boolean {
return getApiKey().isNotBlank()
}
fun clearApiKey() {
encryptedPrefs.edit().remove("api_key").apply()
}
}
/**
* Settings data class with all configurable options
*/
data class ZeroClawSettings(
val provider: String = "anthropic",
val model: String = "claude-sonnet-4-5",
val apiKey: String = "",
val autoStart: Boolean = false,
val notificationsEnabled: Boolean = true,
val systemPrompt: String = "",
val heartbeatIntervalMinutes: Int = 15
) {
fun isConfigured(): Boolean = apiKey.isNotBlank()
}

View File

@ -3,27 +3,79 @@ package ai.zeroclaw.android.receiver
import android.content.BroadcastReceiver
import android.content.Context
import android.content.Intent
import android.os.Build
import ai.zeroclaw.android.ZeroClawApp
import ai.zeroclaw.android.service.ZeroClawService
import ai.zeroclaw.android.worker.HeartbeatWorker
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.flow.first
import kotlinx.coroutines.launch
/**
* Receives boot completed broadcast to auto-start ZeroClaw.
*
* Requires user opt-in via settings.
*
* Also handles:
* - Package updates (MY_PACKAGE_REPLACED)
* - Quick boot on some devices (QUICKBOOT_POWERON)
*
* Respects user's auto-start preference from settings.
*/
class BootReceiver : BroadcastReceiver() {
override fun onReceive(context: Context, intent: Intent) {
if (intent.action == Intent.ACTION_BOOT_COMPLETED ||
intent.action == "android.intent.action.QUICKBOOT_POWERON") {
// TODO: Check if auto-start is enabled in preferences
// val prefs = context.getSharedPreferences("zeroclaw", Context.MODE_PRIVATE)
// if (!prefs.getBoolean("auto_start", false)) return
val serviceIntent = Intent(context, ZeroClawService::class.java).apply {
action = ZeroClawService.ACTION_START
when (intent.action) {
Intent.ACTION_BOOT_COMPLETED,
"android.intent.action.QUICKBOOT_POWERON",
Intent.ACTION_MY_PACKAGE_REPLACED -> {
handleBoot(context)
}
context.startForegroundService(serviceIntent)
}
}
private fun handleBoot(context: Context) {
// Use goAsync() to get more time for async operations
val pendingResult = goAsync()
CoroutineScope(Dispatchers.IO).launch {
try {
val app = context.applicationContext as? ZeroClawApp
val settingsRepo = app?.settingsRepository ?: return@launch
val settings = settingsRepo.settings.first()
// Only auto-start if enabled and configured
if (settings.autoStart && settings.isConfigured()) {
// Start the foreground service
val serviceIntent = Intent(context, ZeroClawService::class.java).apply {
action = ZeroClawService.ACTION_START
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
context.startForegroundService(serviceIntent)
} else {
context.startService(serviceIntent)
}
// Schedule heartbeat worker
HeartbeatWorker.scheduleHeartbeat(
context,
settings.heartbeatIntervalMinutes.toLong()
)
android.util.Log.i(TAG, "ZeroClaw auto-started on boot")
} else {
android.util.Log.d(TAG, "Auto-start disabled or not configured, skipping")
}
} catch (e: Exception) {
android.util.Log.e(TAG, "Error during boot handling", e)
} finally {
pendingResult.finish()
}
}
}
companion object {
private const val TAG = "BootReceiver"
}
}

View File

@ -15,7 +15,7 @@ import kotlinx.coroutines.flow.StateFlow
/**
* Foreground service that keeps ZeroClaw running in the background.
*
*
* This service:
* - Runs the ZeroClaw Rust binary via JNI
* - Maintains a persistent notification
@ -23,27 +23,27 @@ import kotlinx.coroutines.flow.StateFlow
* - Survives app backgrounding (within Android limits)
*/
class ZeroClawService : Service() {
private val binder = LocalBinder()
private val scope = CoroutineScope(Dispatchers.Default + SupervisorJob())
private val _status = MutableStateFlow(Status.Stopped)
val status: StateFlow<Status> = _status
private val _lastMessage = MutableStateFlow<String?>(null)
val lastMessage: StateFlow<String?> = _lastMessage
inner class LocalBinder : Binder() {
fun getService(): ZeroClawService = this@ZeroClawService
}
override fun onBind(intent: Intent): IBinder = binder
override fun onCreate() {
super.onCreate()
startForeground(NOTIFICATION_ID, createNotification())
}
override fun onStartCommand(intent: Intent?, flags: Int, startId: Int): Int {
when (intent?.action) {
ACTION_START -> startAgent()
@ -52,24 +52,24 @@ class ZeroClawService : Service() {
}
return START_STICKY
}
override fun onDestroy() {
scope.cancel()
super.onDestroy()
}
private fun startAgent() {
if (_status.value == Status.Running) return
_status.value = Status.Starting
scope.launch {
try {
// TODO: Initialize and start ZeroClaw native library
// ZeroClawBridge.start(configPath)
_status.value = Status.Running
// TODO: Start message loop
// while (isActive) {
// val message = ZeroClawBridge.pollMessage()
@ -80,20 +80,20 @@ class ZeroClawService : Service() {
}
}
}
private fun stopAgent() {
scope.launch {
// TODO: ZeroClawBridge.stop()
_status.value = Status.Stopped
}
}
private fun sendMessage(message: String) {
scope.launch {
// TODO: ZeroClawBridge.sendMessage(message)
}
}
private fun createNotification(): Notification {
val pendingIntent = PendingIntent.getActivity(
this,
@ -101,7 +101,7 @@ class ZeroClawService : Service() {
Intent(this, MainActivity::class.java),
PendingIntent.FLAG_IMMUTABLE
)
return NotificationCompat.Builder(this, ZeroClawApp.CHANNEL_ID)
.setContentTitle("ZeroClaw is running")
.setContentText("Your AI assistant is active")
@ -111,7 +111,7 @@ class ZeroClawService : Service() {
.setSilent(true)
.build()
}
companion object {
private const val NOTIFICATION_ID = 1001
const val ACTION_START = "ai.zeroclaw.action.START"
@ -119,7 +119,7 @@ class ZeroClawService : Service() {
const val ACTION_SEND = "ai.zeroclaw.action.SEND"
const val EXTRA_MESSAGE = "message"
}
sealed class Status {
object Stopped : Status()
object Starting : Status()

View File

@ -0,0 +1,120 @@
package ai.zeroclaw.android.tile
import android.app.PendingIntent
import android.content.Intent
import android.os.Build
import android.service.quicksettings.Tile
import android.service.quicksettings.TileService
import ai.zeroclaw.android.MainActivity
import ai.zeroclaw.android.service.ZeroClawService
/**
* Quick Settings tile for ZeroClaw.
*
* Allows users to:
* - See agent status at a glance
* - Toggle agent on/off from notification shade
* - Quick access to the app
*/
class ZeroClawTileService : TileService() {
override fun onStartListening() {
super.onStartListening()
updateTile()
}
override fun onClick() {
super.onClick()
val tile = qsTile ?: return
when (tile.state) {
Tile.STATE_ACTIVE -> {
// Stop the agent
stopAgent()
tile.state = Tile.STATE_INACTIVE
tile.subtitle = "Stopped"
}
Tile.STATE_INACTIVE -> {
// Start the agent
startAgent()
tile.state = Tile.STATE_ACTIVE
tile.subtitle = "Running"
}
else -> {
// Open app for configuration
openApp()
}
}
tile.updateTile()
}
override fun onTileAdded() {
super.onTileAdded()
updateTile()
}
private fun updateTile() {
val tile = qsTile ?: return
// TODO: Check actual agent status from bridge
// val isRunning = ZeroClawBridge.isRunning()
val isRunning = isServiceRunning()
tile.state = if (isRunning) Tile.STATE_ACTIVE else Tile.STATE_INACTIVE
tile.label = "ZeroClaw"
tile.subtitle = if (isRunning) "Running" else "Stopped"
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) {
tile.subtitle = if (isRunning) "Running" else "Tap to start"
}
tile.updateTile()
}
private fun startAgent() {
val intent = Intent(this, ZeroClawService::class.java).apply {
action = ZeroClawService.ACTION_START
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
startForegroundService(intent)
} else {
startService(intent)
}
}
private fun stopAgent() {
val intent = Intent(this, ZeroClawService::class.java).apply {
action = ZeroClawService.ACTION_STOP
}
startService(intent)
}
private fun openApp() {
val intent = Intent(this, MainActivity::class.java).apply {
flags = Intent.FLAG_ACTIVITY_NEW_TASK
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.UPSIDE_DOWN_CAKE) {
// API 34+ requires PendingIntent overload
val pendingIntent = PendingIntent.getActivity(
this,
0,
intent,
PendingIntent.FLAG_UPDATE_CURRENT or PendingIntent.FLAG_IMMUTABLE
)
startActivityAndCollapse(pendingIntent)
} else {
@Suppress("DEPRECATION")
startActivityAndCollapse(intent)
}
}
private fun isServiceRunning(): Boolean {
// Simple check - in production would check actual service state
// TODO: Implement proper service state checking
return false
}
}

View File

@ -10,19 +10,14 @@ import androidx.compose.material3.*
import androidx.compose.runtime.*
import androidx.compose.ui.Alignment
import androidx.compose.ui.Modifier
import androidx.compose.ui.platform.LocalContext
import androidx.compose.ui.text.input.KeyboardType
import androidx.compose.ui.text.input.PasswordVisualTransformation
import androidx.compose.ui.text.input.VisualTransformation
import androidx.compose.ui.unit.dp
data class ZeroClawSettings(
val provider: String = "anthropic",
val model: String = "claude-sonnet-4-5",
val apiKey: String = "",
val autoStart: Boolean = false,
val notificationsEnabled: Boolean = true,
val systemPrompt: String = ""
)
import androidx.lifecycle.repeatOnLifecycle
import ai.zeroclaw.android.data.ZeroClawSettings
import ai.zeroclaw.android.util.BatteryUtils
@OptIn(ExperimentalMaterial3Api::class)
@Composable
@ -34,7 +29,7 @@ fun SettingsScreen(
) {
var showApiKey by remember { mutableStateOf(false) }
var localSettings by remember(settings) { mutableStateOf(settings) }
Scaffold(
topBar = {
TopAppBar(
@ -118,7 +113,7 @@ fun SettingsScreen(
)
else -> listOf("auto" to "Auto")
}
ExposedDropdownMenuBox(
expanded = modelExpanded,
onExpandedChange = { modelExpanded = it }
@ -170,7 +165,7 @@ fun SettingsScreen(
modifier = Modifier.fillMaxWidth(),
singleLine = true
)
Text(
text = "Your API key is stored securely in Android Keystore",
style = MaterialTheme.typography.bodySmall,
@ -187,7 +182,7 @@ fun SettingsScreen(
checked = localSettings.autoStart,
onCheckedChange = { localSettings = localSettings.copy(autoStart = it) }
)
SettingsSwitch(
title = "Notifications",
description = "Show agent messages as notifications",
@ -210,6 +205,51 @@ fun SettingsScreen(
)
}
// Battery Optimization Section
val context = LocalContext.current
val lifecycleOwner = androidx.lifecycle.compose.LocalLifecycleOwner.current
var isOptimized by remember { mutableStateOf(BatteryUtils.isIgnoringBatteryOptimizations(context)) }
// Refresh battery optimization state when screen resumes
LaunchedEffect(lifecycleOwner) {
lifecycleOwner.lifecycle.repeatOnLifecycle(androidx.lifecycle.Lifecycle.State.RESUMED) {
isOptimized = BatteryUtils.isIgnoringBatteryOptimizations(context)
}
}
SettingsSection(title = "Battery") {
Row(
modifier = Modifier.fillMaxWidth(),
horizontalArrangement = Arrangement.SpaceBetween,
verticalAlignment = Alignment.CenterVertically
) {
Column(modifier = Modifier.weight(1f)) {
Text("Battery Optimization")
Text(
text = if (isOptimized) "Unrestricted ✓" else "Restricted - may affect background tasks",
style = MaterialTheme.typography.bodySmall,
color = if (isOptimized) MaterialTheme.colorScheme.primary else MaterialTheme.colorScheme.error
)
}
if (!isOptimized) {
TextButton(onClick = {
BatteryUtils.requestBatteryOptimizationExemption(context)
}) {
Text("Fix")
}
}
}
if (BatteryUtils.hasAggressiveBatteryOptimization()) {
Spacer(modifier = Modifier.height(8.dp))
Text(
text = "⚠️ Your device may have aggressive battery management. If ZeroClaw stops working in background, check manufacturer battery settings.",
style = MaterialTheme.typography.bodySmall,
color = MaterialTheme.colorScheme.onSurfaceVariant
)
}
}
// About Section
SettingsSection(title = "About") {
Row(

View File

@ -58,7 +58,7 @@ fun ZeroClawTheme(
darkTheme -> DarkColorScheme
else -> LightColorScheme
}
val view = LocalView.current
if (!view.isInEditMode) {
SideEffect {

View File

@ -0,0 +1,141 @@
package ai.zeroclaw.android.util
import android.content.Context
import android.content.Intent
import android.net.Uri
import android.os.Build
import android.os.PowerManager
import android.provider.Settings
/**
* Utilities for handling battery optimization.
*
* ZeroClaw needs to run reliably in the background for:
* - Heartbeat checks
* - Cron job execution
* - Notification monitoring
*
* This helper manages battery optimization exemption requests.
*/
object BatteryUtils {
/**
* Check if app is exempt from battery optimization
*/
fun isIgnoringBatteryOptimizations(context: Context): Boolean {
val powerManager = context.getSystemService(Context.POWER_SERVICE) as PowerManager
return powerManager.isIgnoringBatteryOptimizations(context.packageName)
}
/**
* Request battery optimization exemption.
*
* Note: This shows a system dialog - use sparingly and explain to user first.
* Google Play policy requires justification for this permission.
*/
fun requestBatteryOptimizationExemption(context: Context) {
if (isIgnoringBatteryOptimizations(context)) {
return // Already exempt
}
val intent = Intent(Settings.ACTION_REQUEST_IGNORE_BATTERY_OPTIMIZATIONS).apply {
data = Uri.parse("package:${context.packageName}")
flags = Intent.FLAG_ACTIVITY_NEW_TASK
}
try {
context.startActivity(intent)
} catch (e: Exception) {
// Fallback to battery settings
openBatterySettings(context)
}
}
/**
* Open battery optimization settings page
*/
fun openBatterySettings(context: Context) {
val intent = Intent(Settings.ACTION_IGNORE_BATTERY_OPTIMIZATION_SETTINGS).apply {
flags = Intent.FLAG_ACTIVITY_NEW_TASK
}
try {
context.startActivity(intent)
} catch (e: Exception) {
// Fallback to general settings
openAppSettings(context)
}
}
/**
* Open app's settings page
*/
fun openAppSettings(context: Context) {
val intent = Intent(Settings.ACTION_APPLICATION_DETAILS_SETTINGS).apply {
data = Uri.parse("package:${context.packageName}")
flags = Intent.FLAG_ACTIVITY_NEW_TASK
}
context.startActivity(intent)
}
/**
* Check if device has aggressive battery optimization (common on Chinese OEMs)
*/
fun hasAggressiveBatteryOptimization(): Boolean {
val manufacturer = Build.MANUFACTURER.lowercase()
return manufacturer in listOf(
"xiaomi", "redmi", "poco",
"huawei", "honor",
"oppo", "realme", "oneplus",
"vivo", "iqoo",
"samsung", // Some Samsung models
"meizu",
"asus"
)
}
/**
* Get manufacturer-specific battery settings intent
*/
fun getManufacturerBatteryIntent(context: Context): Intent? {
val manufacturer = Build.MANUFACTURER.lowercase()
return when {
manufacturer.contains("xiaomi") || manufacturer.contains("redmi") -> {
Intent().apply {
component = android.content.ComponentName(
"com.miui.powerkeeper",
"com.miui.powerkeeper.ui.HiddenAppsConfigActivity"
)
putExtra("package_name", context.packageName)
putExtra("package_label", "ZeroClaw")
}
}
manufacturer.contains("huawei") || manufacturer.contains("honor") -> {
Intent().apply {
component = android.content.ComponentName(
"com.huawei.systemmanager",
"com.huawei.systemmanager.startupmgr.ui.StartupNormalAppListActivity"
)
}
}
manufacturer.contains("samsung") -> {
Intent().apply {
component = android.content.ComponentName(
"com.samsung.android.lool",
"com.samsung.android.sm.battery.ui.BatteryActivity"
)
}
}
manufacturer.contains("oppo") || manufacturer.contains("realme") -> {
Intent().apply {
component = android.content.ComponentName(
"com.coloros.safecenter",
"com.coloros.safecenter.permission.startup.StartupAppListActivity"
)
}
}
else -> null
}
}
}

View File

@ -0,0 +1,128 @@
package ai.zeroclaw.android.widget
import android.app.PendingIntent
import android.appwidget.AppWidgetManager
import android.appwidget.AppWidgetProvider
import android.content.Context
import android.content.Intent
import android.widget.RemoteViews
import ai.zeroclaw.android.MainActivity
import ai.zeroclaw.android.R
import ai.zeroclaw.android.service.ZeroClawService
/**
* Home screen widget for ZeroClaw.
*
* Features:
* - Shows agent status (running/stopped)
* - Quick action button to toggle or send message
* - Tap to open app
*
* Widget sizes:
* - Small (2x1): Status + toggle button
* - Medium (4x1): Status + quick message
* - Large (4x2): Status + recent message + input
*/
class ZeroClawWidget : AppWidgetProvider() {
override fun onUpdate(
context: Context,
appWidgetManager: AppWidgetManager,
appWidgetIds: IntArray
) {
for (appWidgetId in appWidgetIds) {
updateAppWidget(context, appWidgetManager, appWidgetId)
}
}
override fun onEnabled(context: Context) {
// First widget placed
}
override fun onDisabled(context: Context) {
// Last widget removed
}
override fun onReceive(context: Context, intent: Intent) {
super.onReceive(context, intent)
when (intent.action) {
ACTION_TOGGLE -> {
toggleAgent(context)
}
ACTION_QUICK_MESSAGE -> {
openAppWithMessage(context, intent.getStringExtra(EXTRA_MESSAGE))
}
}
}
private fun toggleAgent(context: Context) {
// TODO: Check actual status and toggle
val serviceIntent = Intent(context, ZeroClawService::class.java).apply {
action = ZeroClawService.ACTION_START
}
context.startForegroundService(serviceIntent)
}
private fun openAppWithMessage(context: Context, message: String?) {
val intent = Intent(context, MainActivity::class.java).apply {
flags = Intent.FLAG_ACTIVITY_NEW_TASK or Intent.FLAG_ACTIVITY_CLEAR_TOP
message?.let { putExtra(EXTRA_MESSAGE, it) }
}
context.startActivity(intent)
}
companion object {
const val ACTION_TOGGLE = "ai.zeroclaw.widget.TOGGLE"
const val ACTION_QUICK_MESSAGE = "ai.zeroclaw.widget.QUICK_MESSAGE"
const val EXTRA_MESSAGE = "message"
internal fun updateAppWidget(
context: Context,
appWidgetManager: AppWidgetManager,
appWidgetId: Int
) {
// Create RemoteViews
val views = RemoteViews(context.packageName, R.layout.widget_zeroclaw)
// Set status text
// TODO: Get actual status from bridge
val isRunning = false
views.setTextViewText(
R.id.widget_status,
if (isRunning) "🟢 Running" else "⚪ Stopped"
)
// Open app on tap
val openIntent = Intent(context, MainActivity::class.java)
val openPendingIntent = PendingIntent.getActivity(
context, 0, openIntent,
PendingIntent.FLAG_UPDATE_CURRENT or PendingIntent.FLAG_IMMUTABLE
)
views.setOnClickPendingIntent(R.id.widget_container, openPendingIntent)
// Toggle button
val toggleIntent = Intent(context, ZeroClawWidget::class.java).apply {
action = ACTION_TOGGLE
}
val togglePendingIntent = PendingIntent.getBroadcast(
context, 1, toggleIntent,
PendingIntent.FLAG_UPDATE_CURRENT or PendingIntent.FLAG_IMMUTABLE
)
views.setOnClickPendingIntent(R.id.widget_toggle_button, togglePendingIntent)
// Update widget
appWidgetManager.updateAppWidget(appWidgetId, views)
}
/**
* Request widget update from anywhere in the app
*/
fun requestUpdate(context: Context) {
val intent = Intent(context, ZeroClawWidget::class.java).apply {
action = AppWidgetManager.ACTION_APPWIDGET_UPDATE
}
context.sendBroadcast(intent)
}
}
}

View File

@ -0,0 +1,141 @@
package ai.zeroclaw.android.worker
import android.content.Context
import androidx.work.*
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.withContext
import java.util.concurrent.TimeUnit
/**
* WorkManager worker that runs periodic heartbeat checks.
*
* This handles:
* - Cron job execution
* - Health checks
* - Scheduled agent tasks
*
* Respects Android's Doze mode and battery optimization.
*/
class HeartbeatWorker(
context: Context,
params: WorkerParameters
) : CoroutineWorker(context, params) {
override suspend fun doWork(): Result = withContext(Dispatchers.IO) {
try {
// Get task type from input data
val taskType = inputData.getString(KEY_TASK_TYPE) ?: TASK_HEARTBEAT
when (taskType) {
TASK_HEARTBEAT -> runHeartbeat()
TASK_CRON -> runCronJob()
TASK_HEALTH_CHECK -> runHealthCheck()
else -> runHeartbeat()
}
Result.success()
} catch (e: Exception) {
if (runAttemptCount < 3) {
Result.retry()
} else {
Result.failure(workDataOf(KEY_ERROR to e.message))
}
}
}
private suspend fun runHeartbeat() {
// TODO: Connect to ZeroClaw bridge
// val bridge = ZeroClawBridge
// bridge.sendHeartbeat()
// For now, just log
android.util.Log.d(TAG, "Heartbeat executed")
}
private suspend fun runCronJob() {
val jobId = inputData.getString(KEY_JOB_ID)
val prompt = inputData.getString(KEY_PROMPT)
// TODO: Execute cron job via bridge
// ZeroClawBridge.executeCronJob(jobId, prompt)
android.util.Log.d(TAG, "Cron job executed: $jobId")
}
private suspend fun runHealthCheck() {
// TODO: Check agent status
// val status = ZeroClawBridge.getStatus()
android.util.Log.d(TAG, "Health check executed")
}
companion object {
private const val TAG = "HeartbeatWorker"
const val KEY_TASK_TYPE = "task_type"
const val KEY_JOB_ID = "job_id"
const val KEY_PROMPT = "prompt"
const val KEY_ERROR = "error"
const val TASK_HEARTBEAT = "heartbeat"
const val TASK_CRON = "cron"
const val TASK_HEALTH_CHECK = "health_check"
const val WORK_NAME_HEARTBEAT = "zeroclaw_heartbeat"
/**
* Schedule periodic heartbeat (every 15 minutes minimum for WorkManager)
*/
fun scheduleHeartbeat(context: Context, intervalMinutes: Long = 15) {
// WorkManager enforces 15-minute minimum for periodic work
val effectiveInterval = maxOf(intervalMinutes, 15L)
val constraints = Constraints.Builder()
.setRequiredNetworkType(NetworkType.CONNECTED)
.build()
val request = PeriodicWorkRequestBuilder<HeartbeatWorker>(
effectiveInterval, TimeUnit.MINUTES
)
.setConstraints(constraints)
.setInputData(workDataOf(KEY_TASK_TYPE to TASK_HEARTBEAT))
.setBackoffCriteria(BackoffPolicy.EXPONENTIAL, 1, TimeUnit.MINUTES)
.build()
// Use UPDATE policy to apply new interval settings immediately
WorkManager.getInstance(context).enqueueUniquePeriodicWork(
WORK_NAME_HEARTBEAT,
ExistingPeriodicWorkPolicy.UPDATE,
request
)
}
/**
* Schedule a one-time cron job
*/
fun scheduleCronJob(
context: Context,
jobId: String,
prompt: String,
delayMs: Long
) {
val request = OneTimeWorkRequestBuilder<HeartbeatWorker>()
.setInputData(workDataOf(
KEY_TASK_TYPE to TASK_CRON,
KEY_JOB_ID to jobId,
KEY_PROMPT to prompt
))
.setInitialDelay(delayMs, TimeUnit.MILLISECONDS)
.build()
WorkManager.getInstance(context).enqueue(request)
}
/**
* Cancel heartbeat
*/
fun cancelHeartbeat(context: Context) {
WorkManager.getInstance(context).cancelUniqueWork(WORK_NAME_HEARTBEAT)
}
}
}

View File

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="utf-8"?>
<shape xmlns:android="http://schemas.android.com/apk/res/android"
android:shape="rectangle">
<solid android:color="#CC1A1A2E" />
<corners android:radius="16dp" />
<stroke
android:width="1dp"
android:color="#33FFFFFF" />
</shape>

View File

@ -0,0 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<shape xmlns:android="http://schemas.android.com/apk/res/android"
android:shape="oval">
<solid android:color="#E85C0D" />
</shape>

View File

@ -0,0 +1,48 @@
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:id="@+id/widget_container"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:orientation="horizontal"
android:padding="12dp"
android:background="@drawable/widget_background"
android:gravity="center_vertical">
<!-- Status Section -->
<LinearLayout
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_weight="1"
android:orientation="vertical">
<TextView
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="ZeroClaw"
android:textColor="#FFFFFF"
android:textSize="14sp"
android:textStyle="bold" />
<TextView
android:id="@+id/widget_status"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="⚪ Stopped"
android:textColor="#B0B0B0"
android:textSize="12sp"
android:layout_marginTop="2dp" />
</LinearLayout>
<!-- Toggle Button -->
<ImageButton
android:id="@+id/widget_toggle_button"
android:layout_width="48dp"
android:layout_height="48dp"
android:src="@android:drawable/ic_media_play"
android:background="@drawable/widget_button_background"
android:contentDescription="Toggle Agent"
android:scaleType="centerInside"
android:padding="12dp" />
</LinearLayout>

View File

@ -5,4 +5,14 @@
<string name="notification_channel_agent">Agent Messages</string>
<string name="service_notification_title">ZeroClaw is running</string>
<string name="service_notification_text">Your AI assistant is active</string>
<!-- Widget -->
<string name="widget_description">Quick access to your AI assistant</string>
<string name="widget_status_running">🟢 Running</string>
<string name="widget_status_stopped">⚪ Stopped</string>
<!-- Accessibility -->
<string name="cd_toggle_agent">Toggle agent on or off</string>
<string name="cd_open_settings">Open settings</string>
<string name="cd_send_message">Send message to agent</string>
</resources>

View File

@ -0,0 +1,18 @@
<?xml version="1.0" encoding="utf-8"?>
<appwidget-provider xmlns:android="http://schemas.android.com/apk/res/android"
android:minWidth="180dp"
android:minHeight="40dp"
android:targetCellWidth="3"
android:targetCellHeight="1"
android:minResizeWidth="110dp"
android:minResizeHeight="40dp"
android:maxResizeWidth="530dp"
android:maxResizeHeight="110dp"
android:resizeMode="horizontal|vertical"
android:widgetCategory="home_screen"
android:initialLayout="@layout/widget_zeroclaw"
android:previewLayout="@layout/widget_zeroclaw"
android:updatePeriodMillis="1800000"
android:description="@string/widget_description"
android:widgetFeatures="reconfigurable">
</appwidget-provider>

View File

@ -0,0 +1,111 @@
# Android CI Workflow
#
# This workflow builds the Android client and native Rust library.
# Place in .github/workflows/ci-android.yml when ready for CI.
name: Android Build
on:
push:
branches: [main, dev]
paths:
- 'clients/android/**'
- 'clients/android-bridge/**'
pull_request:
paths:
- 'clients/android/**'
- 'clients/android-bridge/**'
env:
CARGO_TERM_COLOR: always
jobs:
build-native:
name: Build Native Library
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
with:
targets: aarch64-linux-android,armv7-linux-androideabi,x86_64-linux-android
- name: Install cargo-ndk
run: cargo install cargo-ndk
- name: Setup Android NDK
uses: android-actions/setup-android@v3
with:
packages: 'ndk;25.2.9519653'
- name: Build native library
run: |
export ANDROID_NDK_HOME=$ANDROID_SDK_ROOT/ndk/25.2.9519653
cargo ndk -t arm64-v8a -t armeabi-v7a -t x86_64 \
-o clients/android/app/src/main/jniLibs \
build --release -p zeroclaw-android-bridge
- name: Upload native libs
uses: actions/upload-artifact@v4
with:
name: native-libs
path: clients/android/app/src/main/jniLibs/
build-android:
name: Build Android APK
needs: build-native
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Download native libs
uses: actions/download-artifact@v4
with:
name: native-libs
path: clients/android/app/src/main/jniLibs/
- name: Setup JDK
uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: '17'
- name: Setup Gradle
uses: gradle/actions/setup-gradle@v3
- name: Build Debug APK
working-directory: clients/android
run: ./gradlew assembleDebug
- name: Upload APK
uses: actions/upload-artifact@v4
with:
name: zeroclaw-debug-apk
path: clients/android/app/build/outputs/apk/debug/*.apk
lint:
name: Lint Android
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup JDK
uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: '17'
- name: Setup Gradle
uses: gradle/actions/setup-gradle@v3
- name: Run Lint
working-directory: clients/android
run: ./gradlew lint
- name: Upload Lint Report
if: always()
uses: actions/upload-artifact@v4
with:
name: lint-report
path: clients/android/app/build/reports/lint-results-*.html

View File

@ -41,6 +41,7 @@ Last refreshed: **February 25, 2026**.
- [reference/README.md](reference/README.md)
- [commands-reference.md](commands-reference.md)
- [cron-scheduling.md](cron-scheduling.md)
- [providers-reference.md](providers-reference.md)
- [channels-reference.md](channels-reference.md)
- [nextcloud-talk-setup.md](nextcloud-talk-setup.md)

View File

@ -89,12 +89,15 @@ cargo build --release --target aarch64-linux-android
## Troubleshooting
### "Permission denied"
```bash
chmod +x zeroclaw
```
### "not found" or linker errors
Make sure you downloaded the correct architecture for your device.
### Old Android (4.x)
Use the `armv7-linux-androideabi` build with API level 16+.

View File

@ -16,7 +16,7 @@ Merge-blocking checks should stay small and deterministic. Optional checks are u
- Additional behavior: `lint`, `test`, and `build` run in parallel (all depend only on `changes` job) to minimize critical path duration
- Additional behavior: rust-cache is shared between `lint` and `test` via unified `prefix-key` (`ci-run-check`) to reduce redundant compilation; `build` uses a separate key for release-fast profile
- Additional behavior: flake detection is integrated into the `test` job via single-retry probe; emits `test-flake-probe` artifact when flake is suspected; optional blocking can be enabled with repository variable `CI_BLOCK_ON_FLAKE_SUSPECTED=true`
- Additional behavior: PRs that change `.github/workflows/**` require at least one approving review from a login in `WORKFLOW_OWNER_LOGINS` (repository variable fallback: `theonlyhennygod,willsarg`)
- Additional behavior: PRs that change CI/CD-governed paths require an explicit approving review from `@chumyin` (`.github/workflows/**`, `.github/codeql/**`, `.github/connectivity/**`, `.github/release/**`, `.github/security/**`, `.github/actionlint.yaml`, `.github/dependabot.yml`, `scripts/ci/**`, and CI governance docs)
- Additional behavior: PRs that change root license files (`LICENSE-APACHE`, `LICENSE-MIT`) must be authored by `willsarg`
- Additional behavior: when lint/docs gates fail on PRs, CI posts an actionable feedback comment with failing gate names and local fix commands
- Merge gate: `CI Required Gate`
@ -25,8 +25,6 @@ Merge-blocking checks should stay small and deterministic. Optional checks are u
- Recommended for workflow-changing PRs
- `.github/workflows/pr-intake-checks.yml` (`PR Intake Checks`)
- Purpose: safe pre-CI PR checks (template completeness, added-line tabs/trailing-whitespace/conflict markers) with immediate sticky feedback comment
- `.github/workflows/main-promotion-gate.yml` (`Main Promotion Gate`)
- Purpose: enforce stable-branch policy by allowing only `dev` -> `main` PR promotion authored by `willsarg` or `theonlyhennygod`
### Non-Blocking but Important
@ -65,8 +63,6 @@ Merge-blocking checks should stay small and deterministic. Optional checks are u
- Purpose: build release artifacts in verification mode (manual/scheduled) and publish GitHub releases on tag push or manual publish mode
- `.github/workflows/pr-label-policy-check.yml` (`Label Policy Sanity`)
- Purpose: validate shared contributor-tier policy in `.github/label-policy.json` and ensure label workflows consume that policy
- `.github/workflows/test-rust-build.yml` (`Rust Reusable Job`)
- Purpose: reusable Rust setup/cache + command runner for workflow-call consumers
### Optional Repository Automation
@ -105,8 +101,7 @@ Merge-blocking checks should stay small and deterministic. Optional checks are u
- `Security Audit`: push to `dev` and `main`, PRs to `dev` and `main`, weekly schedule
- `Sec Vorpal Reviewdog`: manual dispatch only
- `Workflow Sanity`: PR/push when `.github/workflows/**`, `.github/*.yml`, or `.github/*.yaml` change
- `Main Promotion Gate`: PRs to `main` only; requires PR author `willsarg`/`theonlyhennygod` and head branch `dev` in the same repository
- `Dependabot`: all update PRs target `dev` (not `main`)
- `Dependabot`: all update PRs target `main` (not `dev`)
- `PR Intake Checks`: `pull_request_target` on opened/reopened/synchronize/ready_for_review
- `Label Policy Sanity`: PR/push when `.github/label-policy.json`, `.github/workflows/pr-labeler.yml`, or `.github/workflows/pr-auto-response.yml` changes
- `PR Labeler`: `pull_request_target` on opened/reopened/synchronize/ready_for_review

View File

@ -2,7 +2,7 @@
This reference is derived from the current CLI surface (`zeroclaw --help`).
Last verified: **February 25, 2026**.
Last verified: **February 28, 2026**.
## Top-Level Commands
@ -116,7 +116,7 @@ Notes:
- `zeroclaw models refresh --provider <ID>`
- `zeroclaw models refresh --force`
`models refresh` currently supports live catalog refresh for provider IDs: `openrouter`, `openai`, `anthropic`, `groq`, `mistral`, `deepseek`, `xai`, `together-ai`, `gemini`, `ollama`, `llamacpp`, `sglang`, `vllm`, `astrai`, `venice`, `fireworks`, `cohere`, `moonshot`, `glm`, `zai`, `qwen`, and `nvidia`.
`models refresh` currently supports live catalog refresh for provider IDs: `openrouter`, `openai`, `anthropic`, `groq`, `mistral`, `deepseek`, `xai`, `together-ai`, `gemini`, `ollama`, `llamacpp`, `sglang`, `vllm`, `astrai`, `venice`, `fireworks`, `cohere`, `moonshot`, `glm`, `zai`, `qwen`, `volcengine` (`doubao`/`ark` aliases), `siliconflow`, and `nvidia`.
### `doctor`

View File

@ -94,6 +94,9 @@ Operational note for container users:
| `max_history_messages` | `50` | Maximum conversation history messages retained per session |
| `parallel_tools` | `false` | Enable parallel tool execution within a single iteration |
| `tool_dispatcher` | `auto` | Tool dispatch strategy |
| `loop_detection_no_progress_threshold` | `3` | Same tool+args producing identical output this many times triggers loop detection. `0` disables |
| `loop_detection_ping_pong_cycles` | `2` | A→B→A→B alternating pattern cycle count threshold. `0` disables |
| `loop_detection_failure_streak` | `3` | Same tool consecutive failure count threshold. `0` disables |
Notes:
@ -101,6 +104,7 @@ Notes:
- If a channel message exceeds this value, the runtime returns: `Agent exceeded maximum tool iterations (<value>)`.
- In CLI, gateway, and channel tool loops, multiple independent tool calls are executed concurrently by default when the pending calls do not require approval gating; result order remains stable.
- `parallel_tools` applies to the `Agent::turn()` API surface. It does not gate the runtime loop used by CLI, gateway, or channel handlers.
- **Loop detection** intervenes before `max_tool_iterations` is exhausted. On first detection the agent receives a self-correction prompt; if the loop persists the agent is stopped early. Detection is result-aware: repeated calls with *different* outputs (genuine progress) do not trigger. Set any threshold to `0` to disable that detector.
## `[security.otp]`
@ -382,6 +386,7 @@ WASM profile templates:
| Key | Default | Purpose |
|---|---|---|
| `reasoning_level` | unset (`None`) | Reasoning effort/level override for providers that support explicit levels (currently OpenAI Codex `/responses`) |
| `transport` | unset (`None`) | Provider transport override (`auto`, `websocket`, `sse`) |
Notes:
@ -389,6 +394,14 @@ Notes:
- When set, overrides `ZEROCLAW_CODEX_REASONING_EFFORT` for OpenAI Codex requests.
- Unset falls back to `ZEROCLAW_CODEX_REASONING_EFFORT` if present, otherwise defaults to `xhigh`.
- If both `provider.reasoning_level` and deprecated `runtime.reasoning_level` are set, provider-level value wins.
- `provider.transport` is normalized case-insensitively (`ws` aliases to `websocket`; `http` aliases to `sse`).
- For OpenAI Codex, default transport mode is `auto` (WebSocket-first with SSE fallback).
- Transport override precedence for OpenAI Codex:
1. `[[model_routes]].transport` (route-specific)
2. `PROVIDER_TRANSPORT` / `ZEROCLAW_PROVIDER_TRANSPORT` / `ZEROCLAW_CODEX_TRANSPORT`
3. `provider.transport`
4. legacy `ZEROCLAW_RESPONSES_WEBSOCKET` (boolean)
- Environment overrides replace configured `provider.transport` when set.
## `[skills]`
@ -668,6 +681,7 @@ Use route hints so integrations can keep stable names while model IDs evolve.
| `model` | _required_ | Model to use with that provider |
| `max_tokens` | unset | Optional per-route output token cap forwarded to provider APIs |
| `api_key` | unset | Optional API key override for this route's provider |
| `transport` | unset | Optional per-route transport override (`auto`, `websocket`, `sse`) |
### `[[embedding_routes]]`

241
docs/cron-scheduling.md Normal file
View File

@ -0,0 +1,241 @@
# Cron & Scheduling System
ZeroClaw includes a full-featured job scheduling system for running tasks on a schedule, at specific times, or at regular intervals.
## Quick Start
```bash
# Add a cron job (runs every day at 9 AM)
zeroclaw cron add '0 9 * * *' 'echo "Good morning!"'
# Add a one-shot reminder (runs in 30 minutes)
zeroclaw cron once 30m 'notify-send "Time is up!"'
# Add an interval job (runs every 5 minutes)
zeroclaw cron add-every 300000 'curl -s http://api.example.com/health'
# List all jobs
zeroclaw cron list
# Remove a job
zeroclaw cron remove <job-id>
```
## Schedule Types
### Cron Expressions (`kind: "cron"`)
Standard cron expressions with optional timezone support.
```bash
# Every weekday at 9 AM Pacific
zeroclaw cron add '0 9 * * 1-5' --tz 'America/Los_Angeles' 'echo "Work time"'
# Every hour
zeroclaw cron add '0 * * * *' 'echo "Hourly check"'
# Every 15 minutes
zeroclaw cron add '*/15 * * * *' 'curl http://localhost:8080/ping'
```
**Format:** `minute hour day-of-month month day-of-week`
| Field | Values |
|-------|--------|
| minute | 0-59 |
| hour | 0-23 |
| day-of-month | 1-31 |
| month | 1-12 |
| day-of-week | 0-6 (Sun-Sat) |
### One-Shot (`kind: "at"`)
Run exactly once at a specific time.
```bash
# At a specific ISO timestamp
zeroclaw cron add-at '2026-03-15T14:30:00Z' 'echo "Meeting starts!"'
# Relative delay (human-friendly)
zeroclaw cron once 2h 'echo "Two hours later"'
zeroclaw cron once 30m 'echo "Half hour reminder"'
zeroclaw cron once 1d 'echo "Tomorrow"'
```
**Delay units:** `s` (seconds), `m` (minutes), `h` (hours), `d` (days)
### Interval (`kind: "every"`)
Run repeatedly at a fixed interval.
```bash
# Every 5 minutes (300000 ms)
zeroclaw cron add-every 300000 'echo "Ping"'
# Every hour (3600000 ms)
zeroclaw cron add-every 3600000 'curl http://api.example.com/sync'
```
## Job Types
### Shell Jobs
Execute shell commands directly:
```bash
zeroclaw cron add '0 6 * * *' 'backup.sh && notify-send "Backup done"'
```
### Agent Jobs
Send prompts to the AI agent:
```toml
# In zeroclaw.toml
[[cron.jobs]]
schedule = { kind = "cron", expr = "0 9 * * *", tz = "America/Los_Angeles" }
job_type = "agent"
prompt = "Check my calendar and summarize today's events"
session_target = "main" # or "isolated"
```
## Session Targeting
Control where agent jobs run:
| Target | Behavior |
|--------|----------|
| `isolated` (default) | Spawns new session, no history |
| `main` | Runs in main session with full context |
```toml
[[cron.jobs]]
schedule = { kind = "every", every_ms = 1800000 } # 30 min
job_type = "agent"
prompt = "Check for new emails and summarize any urgent ones"
session_target = "main" # Has access to conversation history
```
## Delivery Configuration
Route job output to channels:
```toml
[[cron.jobs]]
schedule = { kind = "cron", expr = "0 8 * * *" }
job_type = "agent"
prompt = "Generate a morning briefing"
session_target = "isolated"
[cron.jobs.delivery]
mode = "channel"
channel = "telegram"
to = "123456789" # Telegram chat ID
best_effort = true # Don't fail if delivery fails
```
**Delivery modes:**
- `none` - No output delivery (default)
- `channel` - Send to a specific channel
- `notify` - System notification
## CLI Commands
| Command | Description |
|---------|-------------|
| `zeroclaw cron list` | Show all scheduled jobs |
| `zeroclaw cron add <expr> <cmd>` | Add cron-expression job |
| `zeroclaw cron add-at <time> <cmd>` | Add one-shot at time |
| `zeroclaw cron add-every <ms> <cmd>` | Add interval job |
| `zeroclaw cron once <delay> <cmd>` | Add one-shot with delay |
| `zeroclaw cron update <id> [opts]` | Update job settings |
| `zeroclaw cron remove <id>` | Delete a job |
| `zeroclaw cron pause <id>` | Pause (disable) job |
| `zeroclaw cron resume <id>` | Resume (enable) job |
## Configuration File
Define jobs in `zeroclaw.toml`:
```toml
[[cron.jobs]]
name = "morning-briefing"
schedule = { kind = "cron", expr = "0 8 * * 1-5", tz = "America/New_York" }
job_type = "agent"
prompt = "Good morning! Check my calendar, emails, and weather."
session_target = "main"
enabled = true
[[cron.jobs]]
name = "health-check"
schedule = { kind = "every", every_ms = 60000 }
job_type = "shell"
command = "curl -sf http://localhost:8080/health || notify-send 'Service down!'"
enabled = true
[[cron.jobs]]
name = "daily-backup"
schedule = { kind = "cron", expr = "0 2 * * *" }
job_type = "shell"
command = "/home/user/scripts/backup.sh"
enabled = true
```
## Tool Integration
The cron system is also available as agent tools:
| Tool | Description |
|------|-------------|
| `cron_add` | Create a new cron job |
| `cron_list` | List all jobs |
| `cron_remove` | Delete a job |
| `cron_update` | Modify a job |
| `cron_run` | Force-run a job immediately |
| `cron_runs` | Show recent run history |
### Example: Agent creating a reminder
```
User: Remind me to call mom in 2 hours
Agent: [uses cron_add with kind="at" and delay="2h"]
Done! I'll remind you to call mom at 4:30 PM.
```
## Migration from OpenClaw
ZeroClaw's cron system is compatible with OpenClaw's scheduling:
| OpenClaw | ZeroClaw |
|----------|----------|
| `kind: "cron"` | `kind = "cron"` ✅ |
| `kind: "every"` | `kind = "every"` ✅ |
| `kind: "at"` | `kind = "at"` ✅ |
| `sessionTarget: "main"` | `session_target = "main"` ✅ |
| `sessionTarget: "isolated"` | `session_target = "isolated"` ✅ |
| `payload.kind: "systemEvent"` | `job_type = "agent"` |
| `payload.kind: "agentTurn"` | `job_type = "agent"` |
**Key difference:** ZeroClaw uses TOML config format, OpenClaw uses JSON.
## Best Practices
1. **Use timezones** for user-facing schedules (meetings, reminders)
2. **Use intervals** for background tasks (health checks, syncs)
3. **Use one-shots** for reminders and delayed actions
4. **Set `session_target = "main"`** when the agent needs conversation context
5. **Use `delivery`** to route output to the right channel
## Troubleshooting
**Job not running?**
- Check `zeroclaw cron list` - is it enabled?
- Verify the cron expression is correct
- Check timezone settings
**Agent job has no context?**
- Change `session_target` from `"isolated"` to `"main"`
**Output not delivered?**
- Verify `delivery.channel` is configured
- Check that the target channel is active

View File

@ -22,6 +22,7 @@
- [reference/README.md](reference/README.md)
- [commands-reference.md](commands-reference.md)
- [cron-scheduling.md](cron-scheduling.md)
- [providers-reference.md](providers-reference.md)
- [channels-reference.md](channels-reference.md)
- [nextcloud-talk-setup.md](nextcloud-talk-setup.md)

View File

@ -14,7 +14,7 @@ Các kiểm tra chặn merge nên giữ nhỏ và mang tính quyết định. C
- Mục đích: Rust validation (`cargo fmt --all -- --check`, `cargo clippy --locked --all-targets -- -D clippy::correctness`, strict delta lint gate trên các dòng Rust thay đổi, `test`, kiểm tra smoke release build) + kiểm tra chất lượng tài liệu khi tài liệu thay đổi (`markdownlint` chỉ chặn các vấn đề trên dòng thay đổi; link check chỉ quét các link mới được thêm trên dòng thay đổi)
- Hành vi bổ sung: rust-cache được phân vùng theo vai trò job qua `prefix-key` để giảm cache churn giữa các lane lint/test/build/flake-probe
- Hành vi bổ sung: sinh artifact `test-flake-probe` từ cơ chế retry một lần khi test fail; có thể bật chế độ chặn bằng biến repository `CI_BLOCK_ON_FLAKE_SUSPECTED=true`
- Hành vi bổ sung: các PR thay đổi `.github/workflows/**` yêu cầu ít nhất một review phê duyệt từ login trong `WORKFLOW_OWNER_LOGINS` (fallback biến repository: `theonlyhennygod,willsarg`)
- Hành vi bổ sung: các PR thay đổi đường dẫn CI/CD được quản trị yêu cầu review phê duyệt tường minh từ `@chumyin` (`.github/workflows/**`, `.github/codeql/**`, `.github/connectivity/**`, `.github/release/**`, `.github/security/**`, `.github/actionlint.yaml`, `.github/dependabot.yml`, `scripts/ci/**` và tài liệu CI governance)
- Hành vi bổ sung: lint gate chạy trước `test`/`build`; khi lint/docs gate thất bại trên PR, CI đăng comment phản hồi hành động được với tên gate thất bại và các lệnh sửa cục bộ
- Merge gate: `CI Required Gate`
- `.github/workflows/workflow-sanity.yml` (`Workflow Sanity`)
@ -45,8 +45,6 @@ Các kiểm tra chặn merge nên giữ nhỏ và mang tính quyết định. C
- Mục đích: build release artifact ở chế độ xác minh (thủ công/theo lịch) và publish GitHub release khi push tag hoặc chế độ publish thủ công
- `.github/workflows/pr-label-policy-check.yml` (`Label Policy Sanity`)
- Mục đích: xác thực chính sách bậc contributor dùng chung trong `.github/label-policy.json` và đảm bảo các label workflow sử dụng chính sách đó
- `.github/workflows/test-rust-build.yml` (`Rust Reusable Job`)
- Mục đích: Rust setup/cache có thể tái sử dụng + trình chạy lệnh cho các workflow-call consumer
### Tự động hóa repository tùy chọn

View File

@ -2,7 +2,7 @@
Dựa trên CLI hiện tại (`zeroclaw --help`).
Xác minh lần cuối: **2026-02-20**.
Xác minh lần cuối: **2026-02-28**.
## Lệnh cấp cao nhất
@ -77,7 +77,7 @@ Xác minh lần cuối: **2026-02-20**.
- `zeroclaw models refresh --provider <ID>`
- `zeroclaw models refresh --force`
`models refresh` hiện hỗ trợ làm mới danh mục trực tiếp cho các provider: `openrouter`, `openai`, `anthropic`, `groq`, `mistral`, `deepseek`, `xai`, `together-ai`, `gemini`, `ollama`, `astrai`, `venice`, `fireworks`, `cohere`, `moonshot`, `glm`, `zai`, `qwen` và `nvidia`.
`models refresh` hiện hỗ trợ làm mới danh mục trực tiếp cho các provider: `openrouter`, `openai`, `anthropic`, `groq`, `mistral`, `deepseek`, `xai`, `together-ai`, `gemini`, `ollama`, `llamacpp`, `sglang`, `vllm`, `astrai`, `venice`, `fireworks`, `cohere`, `moonshot`, `glm`, `zai`, `qwen`, `volcengine` (alias `doubao`/`ark`), `siliconflow` và `nvidia`.
### `channel`

View File

@ -0,0 +1,241 @@
# Hệ thống Cron & Lập lịch
ZeroClaw bao gồm hệ thống lập lịch công việc đầy đủ tính năng để chạy các tác vụ theo lịch trình, tại thời điểm cụ thể, hoặc theo khoảng thời gian đều đặn.
## Bắt đầu nhanh
```bash
# Thêm cron job (chạy mỗi ngày lúc 9 giờ sáng)
zeroclaw cron add '0 9 * * *' 'echo "Chào buổi sáng!"'
# Thêm nhắc nhở một lần (chạy sau 30 phút)
zeroclaw cron once 30m 'notify-send "Hết giờ!"'
# Thêm job theo khoảng thời gian (chạy mỗi 5 phút)
zeroclaw cron add-every 300000 'curl -s http://api.example.com/health'
# Liệt kê tất cả jobs
zeroclaw cron list
# Xóa một job
zeroclaw cron remove <job-id>
```
## Loại lịch trình
### Biểu thức Cron (`kind: "cron"`)
Biểu thức cron tiêu chuẩn với hỗ trợ múi giờ tùy chọn.
```bash
# Mỗi ngày làm việc lúc 9 giờ sáng giờ Pacific
zeroclaw cron add '0 9 * * 1-5' --tz 'America/Los_Angeles' 'echo "Giờ làm việc"'
# Mỗi giờ
zeroclaw cron add '0 * * * *' 'echo "Kiểm tra hàng giờ"'
# Mỗi 15 phút
zeroclaw cron add '*/15 * * * *' 'curl http://localhost:8080/ping'
```
**Định dạng:** `phút giờ ngày-trong-tháng tháng ngày-trong-tuần`
| Trường | Giá trị |
|--------|---------|
| phút | 0-59 |
| giờ | 0-23 |
| ngày-trong-tháng | 1-31 |
| tháng | 1-12 |
| ngày-trong-tuần | 0-6 (CN-T7) |
### Chạy một lần (`kind: "at"`)
Chạy đúng một lần tại thời điểm cụ thể.
```bash
# Tại thời điểm ISO cụ thể
zeroclaw cron add-at '2026-03-15T14:30:00Z' 'echo "Cuộc họp bắt đầu!"'
# Độ trễ tương đối (thân thiện với người dùng)
zeroclaw cron once 2h 'echo "Hai giờ sau"'
zeroclaw cron once 30m 'echo "Nhắc nhở nửa giờ"'
zeroclaw cron once 1d 'echo "Ngày mai"'
```
**Đơn vị độ trễ:** `s` (giây), `m` (phút), `h` (giờ), `d` (ngày)
### Khoảng thời gian (`kind: "every"`)
Chạy lặp lại theo khoảng thời gian cố định.
```bash
# Mỗi 5 phút (300000 ms)
zeroclaw cron add-every 300000 'echo "Ping"'
# Mỗi giờ (3600000 ms)
zeroclaw cron add-every 3600000 'curl http://api.example.com/sync'
```
## Loại công việc
### Shell Jobs
Thực thi lệnh shell trực tiếp:
```bash
zeroclaw cron add '0 6 * * *' 'backup.sh && notify-send "Sao lưu xong"'
```
### Agent Jobs
Gửi prompt đến AI agent:
```toml
# Trong zeroclaw.toml
[[cron.jobs]]
schedule = { kind = "cron", expr = "0 9 * * *", tz = "America/Los_Angeles" }
job_type = "agent"
prompt = "Kiểm tra lịch của tôi và tóm tắt các sự kiện hôm nay"
session_target = "main" # hoặc "isolated"
```
## Nhắm mục tiêu phiên
Kiểm soát nơi agent jobs chạy:
| Mục tiêu | Hành vi |
|----------|---------|
| `isolated` (mặc định) | Tạo phiên mới, không có lịch sử |
| `main` | Chạy trong phiên chính với ngữ cảnh đầy đủ |
```toml
[[cron.jobs]]
schedule = { kind = "every", every_ms = 1800000 } # 30 phút
job_type = "agent"
prompt = "Kiểm tra email mới và tóm tắt những email khẩn cấp"
session_target = "main" # Có quyền truy cập lịch sử hội thoại
```
## Cấu hình gửi kết quả
Định tuyến output của job đến các kênh:
```toml
[[cron.jobs]]
schedule = { kind = "cron", expr = "0 8 * * *" }
job_type = "agent"
prompt = "Tạo bản tóm tắt buổi sáng"
session_target = "isolated"
[cron.jobs.delivery]
mode = "channel"
channel = "telegram"
to = "123456789" # Telegram chat ID
best_effort = true # Không thất bại nếu gửi thất bại
```
**Các chế độ gửi:**
- `none` - Không gửi output (mặc định)
- `channel` - Gửi đến kênh cụ thể
- `notify` - Thông báo hệ thống
## Lệnh CLI
| Lệnh | Mô tả |
|------|-------|
| `zeroclaw cron list` | Hiển thị tất cả jobs đã lập lịch |
| `zeroclaw cron add <expr> <cmd>` | Thêm job với biểu thức cron |
| `zeroclaw cron add-at <time> <cmd>` | Thêm job chạy một lần tại thời điểm |
| `zeroclaw cron add-every <ms> <cmd>` | Thêm job theo khoảng thời gian |
| `zeroclaw cron once <delay> <cmd>` | Thêm job chạy một lần với độ trễ |
| `zeroclaw cron update <id> [opts]` | Cập nhật cài đặt job |
| `zeroclaw cron remove <id>` | Xóa một job |
| `zeroclaw cron pause <id>` | Tạm dừng (vô hiệu hóa) job |
| `zeroclaw cron resume <id>` | Tiếp tục (kích hoạt) job |
## Tệp cấu hình
Định nghĩa jobs trong `zeroclaw.toml`:
```toml
[[cron.jobs]]
name = "morning-briefing"
schedule = { kind = "cron", expr = "0 8 * * 1-5", tz = "America/New_York" }
job_type = "agent"
prompt = "Chào buổi sáng! Kiểm tra lịch, email và thời tiết của tôi."
session_target = "main"
enabled = true
[[cron.jobs]]
name = "health-check"
schedule = { kind = "every", every_ms = 60000 }
job_type = "shell"
command = "curl -sf http://localhost:8080/health || notify-send 'Dịch vụ ngừng hoạt động!'"
enabled = true
[[cron.jobs]]
name = "daily-backup"
schedule = { kind = "cron", expr = "0 2 * * *" }
job_type = "shell"
command = "/home/user/scripts/backup.sh"
enabled = true
```
## Tích hợp công cụ
Hệ thống cron cũng có sẵn dưới dạng agent tools:
| Tool | Mô tả |
|------|-------|
| `cron_add` | Tạo cron job mới |
| `cron_list` | Liệt kê tất cả jobs |
| `cron_remove` | Xóa một job |
| `cron_update` | Sửa đổi một job |
| `cron_run` | Chạy ngay một job |
| `cron_runs` | Hiển thị lịch sử chạy gần đây |
### Ví dụ: Agent tạo nhắc nhở
```
Người dùng: Nhắc tôi gọi điện cho mẹ sau 2 giờ
Agent: [sử dụng cron_add với kind="at" và delay="2h"]
Xong! Tôi sẽ nhắc bạn gọi điện cho mẹ lúc 4:30 chiều.
```
## Di chuyển từ OpenClaw
Hệ thống cron của ZeroClaw tương thích với lập lịch của OpenClaw:
| OpenClaw | ZeroClaw |
|----------|----------|
| `kind: "cron"` | `kind = "cron"` ✅ |
| `kind: "every"` | `kind = "every"` ✅ |
| `kind: "at"` | `kind = "at"` ✅ |
| `sessionTarget: "main"` | `session_target = "main"` ✅ |
| `sessionTarget: "isolated"` | `session_target = "isolated"` ✅ |
| `payload.kind: "systemEvent"` | `job_type = "agent"` |
| `payload.kind: "agentTurn"` | `job_type = "agent"` |
**Khác biệt chính:** ZeroClaw sử dụng định dạng TOML, OpenClaw sử dụng JSON.
## Thực hành tốt nhất
1. **Sử dụng múi giờ** cho lịch trình hướng đến người dùng (cuộc họp, nhắc nhở)
2. **Sử dụng khoảng thời gian** cho các tác vụ nền (kiểm tra sức khỏe, đồng bộ)
3. **Sử dụng chạy một lần** cho nhắc nhở và hành động trì hoãn
4. **Đặt `session_target = "main"`** khi agent cần ngữ cảnh hội thoại
5. **Sử dụng `delivery`** để định tuyến output đến kênh phù hợp
## Xử lý sự cố
**Job không chạy?**
- Kiểm tra `zeroclaw cron list` - nó có được bật không?
- Xác minh biểu thức cron đúng
- Kiểm tra cài đặt múi giờ
**Agent job không có ngữ cảnh?**
- Thay đổi `session_target` từ `"isolated"` sang `"main"`
**Output không được gửi?**
- Xác minh `delivery.channel` đã được cấu hình
- Kiểm tra kênh đích đang hoạt động

View File

@ -99,7 +99,7 @@ Duy trì các quy tắc branch protection sau trên `main`:
- Yêu cầu check `CI Required Gate`.
- Yêu cầu review pull request trước khi merge.
- Yêu cầu review CODEOWNERS cho các đường dẫn được bảo vệ.
- Với `.github/workflows/**`, yêu cầu phê duyệt từ owner qua `CI Required Gate` (`WORKFLOW_OWNER_LOGINS`) và giới hạn quyền bypass branch/ruleset cho org owner.
- Với các đường dẫn CI/CD được quản trị (`.github/workflows/**`, `.github/codeql/**`, `.github/connectivity/**`, `.github/release/**`, `.github/security/**`, `.github/actionlint.yaml`, `.github/dependabot.yml`, `scripts/ci/**` và tài liệu CI governance), yêu cầu review phê duyệt tường minh từ `@chumyin` qua `CI Required Gate`.
- Hủy bỏ approval cũ khi có commit mới được đẩy lên.
- Hạn chế force-push trên các branch được bảo vệ.

View File

@ -2,7 +2,7 @@
Tài liệu này liệt kê các provider ID, alias và biến môi trường chứa thông tin xác thực.
Cập nhật lần cuối: **2026-02-19**.
Cập nhật lần cuối: **2026-02-28**.
## Cách liệt kê các Provider
@ -41,6 +41,8 @@ Với chuỗi provider dự phòng (`reliability.fallback_providers`), mỗi pro
| `minimax` | `minimax-intl`, `minimax-io`, `minimax-global`, `minimax-cn`, `minimaxi`, `minimax-oauth`, `minimax-oauth-cn`, `minimax-portal`, `minimax-portal-cn` | Không | `MINIMAX_OAUTH_TOKEN`, `MINIMAX_API_KEY` |
| `bedrock` | `aws-bedrock` | Không | `AWS_ACCESS_KEY_ID` + `AWS_SECRET_ACCESS_KEY` (tùy chọn: `AWS_REGION`) |
| `qianfan` | `baidu` | Không | `QIANFAN_API_KEY` |
| `doubao` | `volcengine`, `ark`, `doubao-cn` | Không | `ARK_API_KEY`, `DOUBAO_API_KEY` |
| `siliconflow` | `silicon-cloud`, `siliconcloud` | Không | `SILICONFLOW_API_KEY` |
| `qwen` | `dashscope`, `qwen-intl`, `dashscope-intl`, `qwen-us`, `dashscope-us`, `qwen-code`, `qwen-oauth`, `qwen_oauth` | Không | `QWEN_OAUTH_TOKEN`, `DASHSCOPE_API_KEY` |
| `groq` | — | Không | `GROQ_API_KEY` |
| `mistral` | — | Không | `MISTRAL_API_KEY` |
@ -61,6 +63,53 @@ Với chuỗi provider dự phòng (`reliability.fallback_providers`), mỗi pro
- Request bằng API key dùng endpoint `generativelanguage.googleapis.com/v1beta`
- Request OAuth qua Gemini CLI dùng endpoint `cloudcode-pa.googleapis.com/v1internal` theo chuẩn Code Assist request envelope
### Ghi chú về Volcengine ARK (Doubao)
- Runtime provider ID: `doubao` (alias: `volcengine`, `ark`, `doubao-cn`)
- Tên hiển thị/canonical trong onboarding: `volcengine`
- Base API URL: `https://ark.cn-beijing.volces.com/api/v3`
- Chat endpoint: `/chat/completions`
- Model discovery endpoint: `/models`
- Xác thực: `ARK_API_KEY` (fallback: `DOUBAO_API_KEY`)
- Model mặc định: `doubao-1-5-pro-32k-250115`
Ví dụ thiết lập nhanh:
```bash
export ARK_API_KEY="your-ark-api-key"
zeroclaw onboard --provider volcengine --api-key "$ARK_API_KEY" --model doubao-1-5-pro-32k-250115 --force
```
Kiểm tra nhanh:
```bash
zeroclaw models refresh --provider volcengine
zeroclaw agent --provider volcengine --model doubao-1-5-pro-32k-250115 -m "ping"
```
### Ghi chú về SiliconFlow
- Provider ID: `siliconflow` (alias: `silicon-cloud`, `siliconcloud`)
- Base API URL: `https://api.siliconflow.cn/v1`
- Chat endpoint: `/chat/completions`
- Model discovery endpoint: `/models`
- Xác thực: `SILICONFLOW_API_KEY`
- Model mặc định: `Pro/zai-org/GLM-4.7`
Ví dụ thiết lập nhanh:
```bash
export SILICONFLOW_API_KEY="your-siliconflow-api-key"
zeroclaw onboard --provider siliconflow --api-key "$SILICONFLOW_API_KEY" --model Pro/zai-org/GLM-4.7 --force
```
Kiểm tra nhanh:
```bash
zeroclaw models refresh --provider siliconflow
zeroclaw agent --provider siliconflow --model Pro/zai-org/GLM-4.7 -m "ping"
```
### Ghi chú về Ollama Vision
- Provider ID: `ollama`

View File

@ -26,7 +26,7 @@ Policy: `.github/release/prerelease-stage-gates.json`
| `alpha` | - | `CI Required Gate`, `Security Audit` |
| `beta` | `alpha` | `CI Required Gate`, `Security Audit`, `Feature Matrix Summary` |
| `rc` | `beta` | `CI Required Gate`, `Security Audit`, `Feature Matrix Summary`, `Nightly Summary & Routing` |
| `stable` | `rc` | `Main Promotion Gate`, `CI Required Gate`, `Security Audit`, `Feature Matrix Summary`, `Verify Artifact Set`, `Nightly Summary & Routing` |
| `stable` | `rc` | `CI Required Gate`, `Security Audit`, `Feature Matrix Summary`, `Verify Artifact Set`, `Nightly Summary & Routing` |
The guard validates that the policy file defines this matrix shape completely. Missing or malformed matrix configuration fails validation.

View File

@ -18,14 +18,6 @@ Feature matrix lane check names (informational, non-required):
- `Matrix Lane (browser-native)`
- `Matrix Lane (nightly-all-features)`
## Promotion to `main`
| Required check name | Source workflow | Scope |
| --- | --- | --- |
| `Main Promotion Gate` | `.github/workflows/main-promotion-gate.yml` | branch + actor policy |
| `CI Required Gate` | `.github/workflows/ci-run.yml` | baseline quality gate |
| `Security Audit` | `.github/workflows/sec-audit.yml` | security baseline |
## Release / Pre-release
| Required check name | Source workflow | Scope |
@ -47,5 +39,5 @@ Feature matrix lane check names (informational, non-required):
- Use pinned `uses:` references for all workflow actions.
- Keep check names stable; renaming check jobs can break branch protection rules.
- GitHub scheduled/manual discovery for workflows is default-branch driven. If a release/nightly workflow only exists on `dev`, promotion to `main` is required before default-branch schedule visibility is expected.
- GitHub scheduled/manual discovery for workflows is default-branch driven. If a release/nightly workflow only exists on a non-default branch, merge it into the default branch before expecting schedule visibility.
- Update this mapping whenever merge-critical workflows/jobs are added or renamed.

View File

@ -99,12 +99,12 @@ Maintain these branch protection rules on `dev` and `main`:
- Require check `CI Required Gate`.
- Require pull request reviews before merge.
- Require CODEOWNERS review for protected paths.
- For `.github/workflows/**`, require owner approval via `CI Required Gate` (`WORKFLOW_OWNER_LOGINS`) and keep branch/ruleset bypass limited to org owners.
- Default workflow-owner allowlist includes `theonlyhennygod`, `willsarg`, and `chumyin` (plus any comma-separated additions from `WORKFLOW_OWNER_LOGINS`).
- For CI/CD-related paths (`.github/workflows/**`, `.github/codeql/**`, `.github/connectivity/**`, `.github/release/**`, `.github/security/**`, `.github/actionlint.yaml`, `.github/dependabot.yml`, `scripts/ci/**`, and CI governance docs), require an explicit approving review from `@chumyin` via `CI Required Gate`.
- Keep branch/ruleset bypass limited to org owners.
- Dismiss stale approvals when new commits are pushed.
- Restrict force-push on protected branches.
- Route normal contributor PRs to `dev`.
- Allow `main` merges only through a promotion PR from `dev` (enforced by `Main Promotion Gate`).
- Route normal contributor PRs to `main` by default (`dev` is optional for dedicated integration batching).
- Allow direct merges to `main` once required checks and review policy pass.
---

View File

@ -2,7 +2,7 @@
This document maps provider IDs, aliases, and credential environment variables.
Last verified: **February 24, 2026**.
Last verified: **February 28, 2026**.
## How to List Providers
@ -44,6 +44,7 @@ credential is not reused for fallback providers.
| `bedrock` | `aws-bedrock` | No | `AWS_ACCESS_KEY_ID` + `AWS_SECRET_ACCESS_KEY` (optional: `AWS_REGION`) |
| `qianfan` | `baidu` | No | `QIANFAN_API_KEY` |
| `doubao` | `volcengine`, `ark`, `doubao-cn` | No | `ARK_API_KEY`, `DOUBAO_API_KEY` |
| `siliconflow` | `silicon-cloud`, `siliconcloud` | No | `SILICONFLOW_API_KEY` |
| `hunyuan` | `tencent` | No | `HUNYUAN_API_KEY` |
| `qwen` | `dashscope`, `qwen-intl`, `dashscope-intl`, `qwen-us`, `dashscope-us`, `qwen-code`, `qwen-oauth`, `qwen_oauth` | No | `QWEN_OAUTH_TOKEN`, `DASHSCOPE_API_KEY` |
| `groq` | — | No | `GROQ_API_KEY` |
@ -101,6 +102,53 @@ credential is not reused for fallback providers.
- OAuth free tier limited to 1 model and 1000 requests/day
- See test report: `docs/qwen-provider-test-report.md`
### Volcengine ARK (Doubao) Notes
- Runtime provider ID: `doubao` (aliases: `volcengine`, `ark`, `doubao-cn`)
- Onboarding display/canonical name: `volcengine`
- Base API URL: `https://ark.cn-beijing.volces.com/api/v3`
- Chat endpoint: `/chat/completions`
- Model discovery endpoint: `/models`
- Authentication: `ARK_API_KEY` (fallback: `DOUBAO_API_KEY`)
- Default model preset: `doubao-1-5-pro-32k-250115`
Minimal setup example:
```bash
export ARK_API_KEY="your-ark-api-key"
zeroclaw onboard --provider volcengine --api-key "$ARK_API_KEY" --model doubao-1-5-pro-32k-250115 --force
```
Quick validation:
```bash
zeroclaw models refresh --provider volcengine
zeroclaw agent --provider volcengine --model doubao-1-5-pro-32k-250115 -m "ping"
```
### SiliconFlow Notes
- Provider ID: `siliconflow` (aliases: `silicon-cloud`, `siliconcloud`)
- Base API URL: `https://api.siliconflow.cn/v1`
- Chat endpoint: `/chat/completions`
- Model discovery endpoint: `/models`
- Authentication: `SILICONFLOW_API_KEY`
- Default model preset: `Pro/zai-org/GLM-4.7`
Minimal setup example:
```bash
export SILICONFLOW_API_KEY="your-siliconflow-api-key"
zeroclaw onboard --provider siliconflow --api-key "$SILICONFLOW_API_KEY" --model Pro/zai-org/GLM-4.7 --force
```
Quick validation:
```bash
zeroclaw models refresh --provider siliconflow
zeroclaw agent --provider siliconflow --model Pro/zai-org/GLM-4.7 -m "ping"
```
### Ollama Vision Notes
- Provider ID: `ollama`

View File

@ -24,29 +24,43 @@ if [ -z "$BASE" ] || ! git cat-file -e "$BASE^{commit}" 2>/dev/null; then
echo "docs_changed=false"
echo "rust_changed=true"
echo "workflow_changed=false"
echo "ci_cd_changed=false"
echo "base_sha="
} >> "$GITHUB_OUTPUT"
write_empty_docs_files
exit 0
fi
# Use merge-base to avoid false positives when the base branch has advanced
# and the PR branch is temporarily behind. This limits scope to changes
# introduced by the head branch itself.
DIFF_BASE="$BASE"
if MERGE_BASE="$(git merge-base "$BASE" HEAD 2>/dev/null)"; then
if [ -n "$MERGE_BASE" ]; then
DIFF_BASE="$MERGE_BASE"
DIFF_HEAD="HEAD"
# For pull_request events, checkout usually points to refs/pull/*/merge.
# In that case HEAD is a synthetic merge commit:
# - HEAD^1 => latest base branch tip
# - HEAD => merged result used for CI
# Diffing HEAD^1..HEAD isolates only PR-introduced changes, even when the
# BASE_SHA from the event payload is stale.
if [ "$EVENT_NAME" = "pull_request" ] && git rev-parse --verify HEAD^2 >/dev/null 2>&1; then
DIFF_BASE="$(git rev-parse HEAD^1)"
DIFF_HEAD="HEAD"
else
# Fallback: use merge-base to avoid false positives when the base branch has
# advanced and the PR branch is temporarily behind.
if MERGE_BASE="$(git merge-base "$BASE" HEAD 2>/dev/null)"; then
if [ -n "$MERGE_BASE" ]; then
DIFF_BASE="$MERGE_BASE"
fi
fi
fi
CHANGED="$(git diff --name-only "$DIFF_BASE" HEAD || true)"
CHANGED="$(git diff --name-only "$DIFF_BASE" "$DIFF_HEAD" || true)"
if [ -z "$CHANGED" ]; then
{
echo "docs_only=false"
echo "docs_changed=false"
echo "rust_changed=false"
echo "workflow_changed=false"
echo "ci_cd_changed=false"
echo "base_sha=$DIFF_BASE"
} >> "$GITHUB_OUTPUT"
write_empty_docs_files
@ -57,6 +71,7 @@ docs_only=true
docs_changed=false
rust_changed=false
workflow_changed=false
ci_cd_changed=false
docs_files=()
while IFS= read -r file; do
[ -z "$file" ] && continue
@ -65,6 +80,20 @@ while IFS= read -r file; do
workflow_changed=true
fi
if [[ "$file" == .github/workflows/* ]] \
|| [[ "$file" == .github/codeql/* ]] \
|| [[ "$file" == .github/connectivity/* ]] \
|| [[ "$file" == .github/release/* ]] \
|| [[ "$file" == .github/security/* ]] \
|| [[ "$file" == .github/actionlint.yaml ]] \
|| [[ "$file" == .github/dependabot.yml ]] \
|| [[ "$file" == scripts/ci/* ]] \
|| [[ "$file" == docs/ci-map.md ]] \
|| [[ "$file" == docs/actions-source-policy.md ]] \
|| [[ "$file" == docs/operations/self-hosted-runner-remediation.md ]]; then
ci_cd_changed=true
fi
if [[ "$file" == docs/* ]] \
|| [[ "$file" == *.md ]] \
|| [[ "$file" == *.mdx ]] \
@ -98,8 +127,11 @@ done <<< "$CHANGED"
echo "docs_changed=$docs_changed"
echo "rust_changed=$rust_changed"
echo "workflow_changed=$workflow_changed"
echo "ci_cd_changed=$ci_cd_changed"
echo "base_sha=$DIFF_BASE"
echo "docs_files<<EOF"
printf '%s\n' "${docs_files[@]}"
if [ "${#docs_files[@]}" -gt 0 ]; then
printf '%s\n' "${docs_files[@]}"
fi
echo "EOF"
} >> "$GITHUB_OUTPUT"

View File

@ -3053,7 +3053,6 @@ class CiScriptsBehaviorTest(unittest.TestCase):
"Nightly Summary & Routing",
],
"stable": [
"Main Promotion Gate",
"CI Required Gate",
"Security Audit",
"Feature Matrix Summary",
@ -3151,7 +3150,6 @@ class CiScriptsBehaviorTest(unittest.TestCase):
"Nightly Summary & Routing",
],
"stable": [
"Main Promotion Gate",
"CI Required Gate",
"Security Audit",
"Feature Matrix Summary",
@ -3246,7 +3244,6 @@ class CiScriptsBehaviorTest(unittest.TestCase):
"Nightly Summary & Routing",
],
"stable": [
"Main Promotion Gate",
"CI Required Gate",
"Security Audit",
"Feature Matrix Summary",

View File

@ -0,0 +1,156 @@
#!/usr/bin/env python3
"""Focused tests for detect_change_scope.sh."""
from __future__ import annotations
import os
import shutil
import subprocess
import tempfile
import unittest
from pathlib import Path
ROOT = Path(__file__).resolve().parents[3]
SCRIPT = ROOT / "scripts" / "ci" / "detect_change_scope.sh"
def run_cmd(cmd: list[str], *, cwd: Path, env: dict[str, str] | None = None) -> subprocess.CompletedProcess[str]:
return subprocess.run(
cmd,
cwd=str(cwd),
env=env,
text=True,
capture_output=True,
check=False,
)
def parse_github_output(output_path: Path) -> dict[str, str | list[str]]:
lines = output_path.read_text(encoding="utf-8").splitlines()
parsed: dict[str, str | list[str]] = {}
i = 0
while i < len(lines):
line = lines[i]
if line.endswith("<<EOF"):
key = line.split("<<", 1)[0]
i += 1
values: list[str] = []
while i < len(lines) and lines[i] != "EOF":
if lines[i] != "":
values.append(lines[i])
i += 1
parsed[key] = values
elif "=" in line:
key, value = line.split("=", 1)
parsed[key] = value
i += 1
return parsed
class DetectChangeScopeTest(unittest.TestCase):
def setUp(self) -> None:
self.tmp = Path(tempfile.mkdtemp(prefix="zc-detect-scope-"))
self.addCleanup(lambda: shutil.rmtree(self.tmp, ignore_errors=True))
self._assert_cmd_ok(["git", "init", "-q"], "git init")
self._assert_cmd_ok(["git", "checkout", "-q", "-b", "main"], "git checkout -b main")
self._assert_cmd_ok(["git", "config", "user.name", "CI Test"], "git config user.name")
self._assert_cmd_ok(["git", "config", "user.email", "ci@example.com"], "git config user.email")
def _assert_cmd_ok(self, cmd: list[str], desc: str) -> None:
proc = run_cmd(cmd, cwd=self.tmp)
self.assertEqual(proc.returncode, 0, msg=f"{desc} failed: {proc.stderr}\n{proc.stdout}")
def _commit(self, message: str) -> str:
proc = run_cmd(["git", "commit", "-q", "-m", message], cwd=self.tmp)
self.assertEqual(proc.returncode, 0, msg=proc.stderr)
sha = run_cmd(["git", "rev-parse", "HEAD"], cwd=self.tmp)
self.assertEqual(sha.returncode, 0, msg=sha.stderr)
return sha.stdout.strip()
def _run_scope(self, *, event_name: str, base_sha: str) -> dict[str, str | list[str]]:
output_path = self.tmp / "github_output.txt"
env = {
"PATH": os.environ.get("PATH") or "/usr/bin:/bin",
"GITHUB_OUTPUT": str(output_path),
"EVENT_NAME": event_name,
"BASE_SHA": base_sha,
}
proc = run_cmd(["bash", str(SCRIPT)], cwd=self.tmp, env=env)
self.assertEqual(proc.returncode, 0, msg=f"{proc.stderr}\n{proc.stdout}")
return parse_github_output(output_path)
def test_pull_request_merge_commit_uses_merge_parents(self) -> None:
(self.tmp / "src").mkdir(parents=True, exist_ok=True)
(self.tmp / "src" / "lib.rs").write_text("pub fn answer() -> i32 { 42 }\n", encoding="utf-8")
self._assert_cmd_ok(["git", "add", "src/lib.rs"], "git add src/lib.rs")
stale_base = self._commit("base")
self._assert_cmd_ok(
["git", "checkout", "-q", "-b", "feature/workflow-only"],
"git checkout -b feature/workflow-only",
)
(self.tmp / ".github" / "workflows").mkdir(parents=True, exist_ok=True)
(self.tmp / ".github" / "workflows" / "ci-example.yml").write_text(
"name: Example\non: pull_request\njobs: {}\n",
encoding="utf-8",
)
self._assert_cmd_ok(
["git", "add", ".github/workflows/ci-example.yml"],
"git add .github/workflows/ci-example.yml",
)
self._commit("feature: workflow only")
self._assert_cmd_ok(["git", "checkout", "-q", "main"], "git checkout main")
(self.tmp / "src" / "lib.rs").write_text("pub fn answer() -> i32 { 43 }\n", encoding="utf-8")
self._assert_cmd_ok(["git", "add", "src/lib.rs"], "git add src/lib.rs")
main_tip = self._commit("main: rust change after feature fork")
merge_proc = run_cmd(
["git", "merge", "--no-ff", "-q", "feature/workflow-only", "-m", "merge feature"],
cwd=self.tmp,
)
self.assertEqual(merge_proc.returncode, 0, msg=merge_proc.stderr)
out = self._run_scope(event_name="pull_request", base_sha=stale_base)
self.assertEqual(out["rust_changed"], "false")
self.assertEqual(out["workflow_changed"], "true")
self.assertEqual(out["docs_changed"], "false")
self.assertEqual(out["docs_only"], "false")
self.assertEqual(out["base_sha"], main_tip)
self.assertEqual(out["docs_files"], [])
def test_push_event_falls_back_to_merge_base(self) -> None:
(self.tmp / "src").mkdir(parents=True, exist_ok=True)
(self.tmp / "src" / "lib.rs").write_text("pub fn alpha() {}\n", encoding="utf-8")
self._assert_cmd_ok(["git", "add", "src/lib.rs"], "git add src/lib.rs")
common_base = self._commit("base")
self._assert_cmd_ok(
["git", "checkout", "-q", "-b", "feature/rust-change"],
"git checkout -b feature/rust-change",
)
(self.tmp / "src" / "lib.rs").write_text("pub fn alpha() {}\npub fn beta() {}\n", encoding="utf-8")
self._assert_cmd_ok(["git", "add", "src/lib.rs"], "git add src/lib.rs")
self._commit("feature: rust change")
self._assert_cmd_ok(["git", "checkout", "-q", "main"], "git checkout main")
(self.tmp / "README.md").write_text("# docs touch\n", encoding="utf-8")
self._assert_cmd_ok(["git", "add", "README.md"], "git add README.md")
advanced_base = self._commit("main advanced")
self._assert_cmd_ok(
["git", "checkout", "-q", "feature/rust-change"],
"git checkout feature/rust-change",
)
out = self._run_scope(event_name="push", base_sha=advanced_base)
self.assertEqual(out["rust_changed"], "true")
self.assertEqual(out["workflow_changed"], "false")
self.assertEqual(out["docs_changed"], "false")
self.assertEqual(out["docs_only"], "false")
self.assertEqual(out["base_sha"], common_base)
if __name__ == "__main__":
unittest.main()

View File

@ -1,6 +1,7 @@
use crate::agent::dispatcher::{
NativeToolDispatcher, ParsedToolCall, ToolDispatcher, ToolExecutionResult, XmlToolDispatcher,
};
use crate::agent::loop_::detection::{DetectionVerdict, LoopDetectionConfig, LoopDetector};
use crate::agent::memory_loader::{DefaultMemoryLoader, MemoryLoader};
use crate::agent::prompt::{PromptContext, SystemPromptBuilder};
use crate::agent::research;
@ -557,8 +558,13 @@ impl Agent {
.push(ConversationMessage::Chat(ChatMessage::user(enriched)));
let effective_model = self.classify_model(user_message);
let mut loop_detector = LoopDetector::new(LoopDetectionConfig {
no_progress_threshold: self.config.loop_detection_no_progress_threshold,
ping_pong_cycles: self.config.loop_detection_ping_pong_cycles,
failure_streak_threshold: self.config.loop_detection_failure_streak,
});
for _ in 0..self.config.max_tool_iterations {
for iteration in 0..self.config.max_tool_iterations {
let messages = self.tool_dispatcher.to_provider_messages(&self.history);
let response = match self
.provider
@ -613,9 +619,34 @@ impl Agent {
});
let results = self.execute_tools(&calls).await;
// ── Loop detection: record calls ─────────────────────
for (call, result) in calls.iter().zip(results.iter()) {
let args_sig =
serde_json::to_string(&call.arguments).unwrap_or_else(|_| "{}".into());
loop_detector.record_call(&call.name, &args_sig, &result.output, result.success);
}
let formatted = self.tool_dispatcher.format_results(&results);
self.history.push(formatted);
self.trim_history();
// ── Loop detection: check verdict ────────────────────
match loop_detector.check() {
DetectionVerdict::Continue => {}
DetectionVerdict::InjectWarning(warning) => {
self.history
.push(ConversationMessage::Chat(ChatMessage::user(warning)));
}
DetectionVerdict::HardStop(reason) => {
anyhow::bail!(
"Agent stopped early due to detected loop pattern (iteration {}/{}): {}",
iteration + 1,
self.config.max_tool_iterations,
reason
);
}
}
}
anyhow::bail!(

View File

@ -28,12 +28,14 @@ use tokio_util::sync::CancellationToken;
use uuid::Uuid;
mod context;
pub(crate) mod detection;
mod execution;
mod history;
mod parsing;
use crate::agent::session::{create_session_manager, resolve_session_id, SessionManager};
use context::{build_context, build_hardware_context};
use detection::{DetectionVerdict, LoopDetectionConfig, LoopDetector};
use execution::{
execute_tools_parallel, execute_tools_sequential, should_execute_tools_in_parallel,
ToolExecutionOutcome,
@ -314,6 +316,7 @@ pub(crate) struct NonCliApprovalContext {
tokio::task_local! {
static TOOL_LOOP_NON_CLI_APPROVAL_CONTEXT: Option<NonCliApprovalContext>;
static LOOP_DETECTION_CONFIG: LoopDetectionConfig;
}
/// Extract a short hint from tool call arguments for progress display.
@ -599,6 +602,14 @@ pub(crate) fn is_tool_iteration_limit_error(err: &anyhow::Error) -> bool {
})
}
pub(crate) fn is_loop_detection_error(err: &anyhow::Error) -> bool {
err.chain().any(|source| {
source
.to_string()
.contains("Agent stopped early due to detected loop pattern")
})
}
/// Execute a single turn of the agent loop: send messages, parse tool calls,
/// execute tools, and loop until the LLM produces a final text response.
/// When `silent` is true, suppresses stdout (for channel use).
@ -799,6 +810,11 @@ pub(crate) async fn run_tool_call_loop(
let mut seen_tool_signatures: HashSet<(String, String)> = HashSet::new();
let mut missing_tool_call_retry_used = false;
let mut missing_tool_call_retry_prompt: Option<String> = None;
let ld_config = LOOP_DETECTION_CONFIG
.try_with(Clone::clone)
.unwrap_or_default();
let mut loop_detector = LoopDetector::new(ld_config);
let mut loop_detection_prompt: Option<String> = None;
let bypass_non_cli_approval_for_turn =
approval.is_some_and(|mgr| channel_name != "cli" && mgr.consume_non_cli_allow_all_once());
if bypass_non_cli_approval_for_turn {
@ -842,6 +858,9 @@ pub(crate) async fn run_tool_call_loop(
if let Some(prompt) = missing_tool_call_retry_prompt.take() {
request_messages.push(ChatMessage::user(prompt));
}
if let Some(prompt) = loop_detection_prompt.take() {
request_messages.push(ChatMessage::user(prompt));
}
// ── Progress: LLM thinking ────────────────────────────
if let Some(ref tx) = on_delta {
@ -1469,6 +1488,12 @@ pub(crate) async fn run_tool_call_loop(
.await;
}
// ── Loop detection: record call ──────────────────────
{
let sig = tool_call_signature(&call.name, &call.arguments);
loop_detector.record_call(&sig.0, &sig.1, &outcome.output, outcome.success);
}
ordered_results[*idx] = Some((call.name.clone(), call.tool_call_id.clone(), outcome));
}
@ -1514,6 +1539,49 @@ pub(crate) async fn run_tool_call_loop(
history.push(ChatMessage::tool(tool_msg.to_string()));
}
}
// ── Loop detection: check verdict ────────────────────────
match loop_detector.check() {
DetectionVerdict::Continue => {}
DetectionVerdict::InjectWarning(warning) => {
runtime_trace::record_event(
"loop_detected_warning",
Some(channel_name),
Some(provider_name),
Some(model),
Some(&turn_id),
Some(false),
Some("loop pattern detected, injecting self-correction prompt"),
serde_json::json!({ "iteration": iteration + 1, "warning": &warning }),
);
if let Some(ref tx) = on_delta {
let _ = tx
.send(format!(
"{DRAFT_PROGRESS_SENTINEL}\u{26a0}\u{fe0f} Loop detected, attempting self-correction\n"
))
.await;
}
loop_detection_prompt = Some(warning);
}
DetectionVerdict::HardStop(reason) => {
runtime_trace::record_event(
"loop_detected_hard_stop",
Some(channel_name),
Some(provider_name),
Some(model),
Some(&turn_id),
Some(false),
Some("loop persisted after warning, stopping early"),
serde_json::json!({ "iteration": iteration + 1, "reason": &reason }),
);
anyhow::bail!(
"Agent stopped early due to detected loop pattern (iteration {}/{}): {}",
iteration + 1,
max_iterations,
reason
);
}
}
}
runtime_trace::record_event(
@ -1732,6 +1800,7 @@ pub async fn run(
let provider_runtime_options = providers::ProviderRuntimeOptions {
auth_profile_override: None,
provider_api_url: config.api_url.clone(),
provider_transport: config.effective_provider_transport(),
zeroclaw_dir: config.config_path.parent().map(std::path::PathBuf::from),
secrets_encrypt: config.secrets.encrypt,
reasoning_enabled: config.runtime.reasoning_enabled,
@ -1956,25 +2025,34 @@ pub async fn run(
ChatMessage::user(&enriched),
];
let response = run_tool_call_loop(
provider.as_ref(),
&mut history,
&tools_registry,
observer.as_ref(),
provider_name,
model_name,
temperature,
false,
approval_manager.as_ref(),
channel_name,
&config.multimodal,
config.agent.max_tool_iterations,
None,
None,
None,
&[],
)
.await?;
let ld_cfg = LoopDetectionConfig {
no_progress_threshold: config.agent.loop_detection_no_progress_threshold,
ping_pong_cycles: config.agent.loop_detection_ping_pong_cycles,
failure_streak_threshold: config.agent.loop_detection_failure_streak,
};
let response = LOOP_DETECTION_CONFIG
.scope(
ld_cfg,
run_tool_call_loop(
provider.as_ref(),
&mut history,
&tools_registry,
observer.as_ref(),
provider_name,
model_name,
temperature,
false,
approval_manager.as_ref(),
channel_name,
&config.multimodal,
config.agent.max_tool_iterations,
None,
None,
None,
&[],
),
)
.await?;
final_output = response.clone();
println!("{response}");
observer.record_event(&ObserverEvent::TurnComplete);
@ -2081,25 +2159,34 @@ pub async fn run(
history.push(ChatMessage::user(&enriched));
let response = match run_tool_call_loop(
provider.as_ref(),
&mut history,
&tools_registry,
observer.as_ref(),
provider_name,
model_name,
temperature,
false,
approval_manager.as_ref(),
channel_name,
&config.multimodal,
config.agent.max_tool_iterations,
None,
None,
None,
&[],
)
.await
let ld_cfg = LoopDetectionConfig {
no_progress_threshold: config.agent.loop_detection_no_progress_threshold,
ping_pong_cycles: config.agent.loop_detection_ping_pong_cycles,
failure_streak_threshold: config.agent.loop_detection_failure_streak,
};
let response = match LOOP_DETECTION_CONFIG
.scope(
ld_cfg,
run_tool_call_loop(
provider.as_ref(),
&mut history,
&tools_registry,
observer.as_ref(),
provider_name,
model_name,
temperature,
false,
approval_manager.as_ref(),
channel_name,
&config.multimodal,
config.agent.max_tool_iterations,
None,
None,
None,
&[],
),
)
.await
{
Ok(resp) => resp,
Err(e) => {
@ -2113,6 +2200,15 @@ pub async fn run(
eprintln!("\n{pause_notice}\n");
continue;
}
if is_loop_detection_error(&e) {
let notice =
"\u{26a0}\u{fe0f} Loop pattern detected and agent stopped early. \
Context preserved. Reply \"continue\" to resume, or adjust \
loop_detection_* thresholds in config.";
history.push(ChatMessage::assistant(notice));
eprintln!("\n{notice}\n");
continue;
}
eprintln!("\nError: {e}\n");
continue;
}
@ -2218,6 +2314,7 @@ pub async fn process_message(
let provider_runtime_options = providers::ProviderRuntimeOptions {
auth_profile_override: None,
provider_api_url: config.api_url.clone(),
provider_transport: config.effective_provider_transport(),
zeroclaw_dir: config.config_path.parent().map(std::path::PathBuf::from),
secrets_encrypt: config.secrets.encrypt,
reasoning_enabled: config.runtime.reasoning_enabled,

View File

@ -0,0 +1,389 @@
//! Loop detection for the agent tool-call loop.
//!
//! Detects three patterns of unproductive looping:
//! 1. **No-progress repeat** — same tool + same args + same output hash.
//! 2. **Ping-pong** — two calls alternating (A→B→A→B) with no progress.
//! 3. **Consecutive failure streak** — same tool failing repeatedly.
//!
//! On first detection an `InjectWarning` verdict gives the LLM a chance to
//! self-correct. If the pattern persists the next check returns `HardStop`.
use std::collections::HashMap;
use std::hash::{DefaultHasher, Hash, Hasher};
/// Maximum bytes of tool output considered when hashing results.
/// Keeps hashing fast and bounded for large outputs.
const OUTPUT_HASH_PREFIX_BYTES: usize = 4096;
// ─── Configuration ───────────────────────────────────────────────────────────
/// Tuning knobs for each detection strategy.
#[derive(Debug, Clone)]
pub(crate) struct LoopDetectionConfig {
/// Identical (tool + args + output) repetitions before triggering.
/// `0` = disabled. Default: `3`.
pub no_progress_threshold: usize,
/// Full A-B cycles before triggering ping-pong detection.
/// `0` = disabled. Default: `2`.
pub ping_pong_cycles: usize,
/// Consecutive failures of the *same* tool before triggering.
/// `0` = disabled. Default: `3`.
pub failure_streak_threshold: usize,
}
impl Default for LoopDetectionConfig {
fn default() -> Self {
Self {
no_progress_threshold: 3,
ping_pong_cycles: 2,
failure_streak_threshold: 3,
}
}
}
// ─── Verdict ─────────────────────────────────────────────────────────────────
/// Action the caller should take after `LoopDetector::check()`.
#[derive(Debug, PartialEq, Eq)]
pub(crate) enum DetectionVerdict {
/// No loop detected — proceed normally.
Continue,
/// First detection — inject this self-correction prompt, then continue.
InjectWarning(String),
/// Pattern persisted after warning — terminate the loop.
HardStop(String),
}
// ─── Internal record ─────────────────────────────────────────────────────────
struct CallRecord {
tool_name: String,
args_sig: String,
result_hash: u64,
success: bool,
}
// ─── Detector ────────────────────────────────────────────────────────────────
pub(crate) struct LoopDetector {
config: LoopDetectionConfig,
history: Vec<CallRecord>,
consecutive_failures: HashMap<String, usize>,
warning_injected: bool,
}
impl LoopDetector {
pub fn new(config: LoopDetectionConfig) -> Self {
Self {
config,
history: Vec::new(),
consecutive_failures: HashMap::new(),
warning_injected: false,
}
}
/// Record a completed tool invocation.
///
/// * `tool_name` — canonical tool name (lowercased by caller).
/// * `args_sig` — canonical JSON args string from `tool_call_signature()`.
/// * `output` — raw tool output text.
/// * `success` — whether the tool reported success.
pub fn record_call(&mut self, tool_name: &str, args_sig: &str, output: &str, success: bool) {
let result_hash = hash_output(output);
self.history.push(CallRecord {
tool_name: tool_name.to_owned(),
args_sig: args_sig.to_owned(),
result_hash,
success,
});
if success {
self.consecutive_failures.remove(tool_name);
} else {
*self
.consecutive_failures
.entry(tool_name.to_owned())
.or_insert(0) += 1;
}
}
/// Evaluate the current history and return a verdict.
pub fn check(&mut self) -> DetectionVerdict {
let reason = self
.check_no_progress_repeat()
.or_else(|| self.check_ping_pong())
.or_else(|| self.check_failure_streak());
match reason {
None => DetectionVerdict::Continue,
Some(msg) => {
if self.warning_injected {
DetectionVerdict::HardStop(msg)
} else {
self.warning_injected = true;
DetectionVerdict::InjectWarning(format_warning(&msg))
}
}
}
}
// ── Strategy 1: no-progress repeat ───────────────────────────────────
fn check_no_progress_repeat(&self) -> Option<String> {
let threshold = self.config.no_progress_threshold;
if threshold == 0 || self.history.is_empty() {
return None;
}
let last = self.history.last().unwrap();
let streak = self
.history
.iter()
.rev()
.take_while(|r| {
r.tool_name == last.tool_name
&& r.args_sig == last.args_sig
&& r.result_hash == last.result_hash
})
.count();
if streak >= threshold {
Some(format!(
"Tool '{}' called {} times with identical arguments and identical results \
no progress detected",
last.tool_name, streak
))
} else {
None
}
}
// ── Strategy 2: ping-pong ────────────────────────────────────────────
fn check_ping_pong(&self) -> Option<String> {
let cycles = self.config.ping_pong_cycles;
if cycles == 0 || self.history.len() < 4 {
return None;
}
let len = self.history.len();
let a = &self.history[len - 2];
let b = &self.history[len - 1];
// The two sides of the ping-pong must differ.
if a.tool_name == b.tool_name && a.args_sig == b.args_sig {
return None;
}
let min_entries = cycles * 2;
if len < min_entries {
return None;
}
let tail = &self.history[len - min_entries..];
let is_ping_pong = tail.chunks(2).all(|pair| {
pair.len() == 2
&& pair[0].tool_name == a.tool_name
&& pair[0].args_sig == a.args_sig
&& pair[0].result_hash == a.result_hash
&& pair[1].tool_name == b.tool_name
&& pair[1].args_sig == b.args_sig
&& pair[1].result_hash == b.result_hash
});
if is_ping_pong {
Some(format!(
"Ping-pong loop detected: '{}' and '{}' alternating {} times with no progress",
a.tool_name, b.tool_name, cycles
))
} else {
None
}
}
// ── Strategy 3: consecutive failure streak ───────────────────────────
fn check_failure_streak(&self) -> Option<String> {
let threshold = self.config.failure_streak_threshold;
if threshold == 0 {
return None;
}
for (tool, count) in &self.consecutive_failures {
if *count >= threshold {
return Some(format!(
"Tool '{}' failed {} consecutive times",
tool, count
));
}
}
None
}
}
// ─── Helpers ─────────────────────────────────────────────────────────────────
fn hash_output(output: &str) -> u64 {
let prefix = if output.len() > OUTPUT_HASH_PREFIX_BYTES {
&output[..OUTPUT_HASH_PREFIX_BYTES]
} else {
output
};
let mut hasher = DefaultHasher::new();
prefix.hash(&mut hasher);
hasher.finish()
}
fn format_warning(reason: &str) -> String {
format!(
"IMPORTANT: A loop pattern has been detected in your tool usage. {reason}. \
You must change your approach: \
(1) Try a different tool or different arguments, \
(2) If polling a process, increase wait time or check if it's stuck, \
(3) If the task cannot be completed, explain why and stop. \
Do NOT repeat the same tool call with the same arguments."
)
}
// ─── Unit tests ──────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
fn default_config() -> LoopDetectionConfig {
LoopDetectionConfig::default()
}
fn disabled_config() -> LoopDetectionConfig {
LoopDetectionConfig {
no_progress_threshold: 0,
ping_pong_cycles: 0,
failure_streak_threshold: 0,
}
}
// 1. Below threshold → Continue
#[test]
fn below_threshold_does_not_trigger() {
let mut det = LoopDetector::new(default_config());
det.record_call("echo", r#"{"msg":"hi"}"#, "hello", true);
det.record_call("echo", r#"{"msg":"hi"}"#, "hello", true);
assert_eq!(det.check(), DetectionVerdict::Continue);
}
// 2. No-progress repeat triggers warning at threshold
#[test]
fn no_progress_repeat_triggers_warning() {
let mut det = LoopDetector::new(default_config());
for _ in 0..3 {
det.record_call("echo", r#"{"msg":"hi"}"#, "hello", true);
}
match det.check() {
DetectionVerdict::InjectWarning(msg) => {
assert!(msg.contains("no progress"), "msg: {msg}");
}
other => panic!("expected InjectWarning, got {other:?}"),
}
}
// 3. Same input but different output → no trigger (progress detected)
#[test]
fn same_input_different_output_does_not_trigger() {
let mut det = LoopDetector::new(default_config());
det.record_call("echo", r#"{"msg":"hi"}"#, "result_1", true);
det.record_call("echo", r#"{"msg":"hi"}"#, "result_2", true);
det.record_call("echo", r#"{"msg":"hi"}"#, "result_3", true);
assert_eq!(det.check(), DetectionVerdict::Continue);
}
// 4. Warning then continued loop → HardStop
#[test]
fn warning_then_continued_loop_triggers_hard_stop() {
let mut det = LoopDetector::new(default_config());
for _ in 0..3 {
det.record_call("echo", r#"{"msg":"hi"}"#, "same", true);
}
assert!(matches!(det.check(), DetectionVerdict::InjectWarning(_)));
// One more identical call
det.record_call("echo", r#"{"msg":"hi"}"#, "same", true);
match det.check() {
DetectionVerdict::HardStop(msg) => {
assert!(msg.contains("no progress"), "msg: {msg}");
}
other => panic!("expected HardStop, got {other:?}"),
}
}
// 5. Ping-pong detection
#[test]
fn ping_pong_triggers_warning() {
let mut det = LoopDetector::new(default_config());
// 2 cycles: A-B-A-B
det.record_call("tool_a", r#"{"x":1}"#, "out_a", true);
det.record_call("tool_b", r#"{"y":2}"#, "out_b", true);
det.record_call("tool_a", r#"{"x":1}"#, "out_a", true);
det.record_call("tool_b", r#"{"y":2}"#, "out_b", true);
match det.check() {
DetectionVerdict::InjectWarning(msg) => {
assert!(msg.contains("Ping-pong"), "msg: {msg}");
}
other => panic!("expected InjectWarning, got {other:?}"),
}
}
// 6. Ping-pong with progress does not trigger
#[test]
fn ping_pong_with_progress_does_not_trigger() {
let mut det = LoopDetector::new(default_config());
det.record_call("tool_a", r#"{"x":1}"#, "out_a_1", true);
det.record_call("tool_b", r#"{"y":2}"#, "out_b_1", true);
det.record_call("tool_a", r#"{"x":1}"#, "out_a_2", true); // different output
det.record_call("tool_b", r#"{"y":2}"#, "out_b_2", true); // different output
assert_eq!(det.check(), DetectionVerdict::Continue);
}
// 7. Consecutive failure streak (different args each time to avoid no-progress trigger)
#[test]
fn failure_streak_triggers_warning() {
let mut det = LoopDetector::new(default_config());
det.record_call("shell", r#"{"cmd":"bad1"}"#, "error: not found 1", false);
det.record_call("shell", r#"{"cmd":"bad2"}"#, "error: not found 2", false);
det.record_call("shell", r#"{"cmd":"bad3"}"#, "error: not found 3", false);
match det.check() {
DetectionVerdict::InjectWarning(msg) => {
assert!(msg.contains("failed 3 consecutive"), "msg: {msg}");
}
other => panic!("expected InjectWarning, got {other:?}"),
}
}
// 8. Failure streak resets on success
#[test]
fn failure_streak_resets_on_success() {
let mut det = LoopDetector::new(default_config());
det.record_call("shell", r#"{"cmd":"bad"}"#, "err", false);
det.record_call("shell", r#"{"cmd":"bad"}"#, "err", false);
det.record_call("shell", r#"{"cmd":"good"}"#, "ok", true); // resets
det.record_call("shell", r#"{"cmd":"bad"}"#, "err", false);
det.record_call("shell", r#"{"cmd":"bad"}"#, "err", false);
assert_eq!(det.check(), DetectionVerdict::Continue);
}
// 9. All thresholds zero → disabled
#[test]
fn all_disabled_never_triggers() {
let mut det = LoopDetector::new(disabled_config());
for _ in 0..20 {
det.record_call("echo", r#"{"msg":"hi"}"#, "same", true);
}
assert_eq!(det.check(), DetectionVerdict::Continue);
}
// 10. Mixed tools → no false positive
#[test]
fn mixed_tools_no_false_positive() {
let mut det = LoopDetector::new(default_config());
det.record_call("file_read", r#"{"path":"a.rs"}"#, "content_a", true);
det.record_call("shell", r#"{"cmd":"ls"}"#, "file_list", true);
det.record_call("memory_store", r#"{"key":"x"}"#, "stored", true);
det.record_call("file_read", r#"{"path":"b.rs"}"#, "content_b", true);
det.record_call("shell", r#"{"cmd":"cargo test"}"#, "ok", true);
assert_eq!(det.check(), DetectionVerdict::Continue);
}
}

View File

@ -999,6 +999,7 @@ fn runtime_perplexity_filter_snapshot(
return state.perplexity_filter.clone();
}
}
crate::config::PerplexityFilterConfig::default()
}
@ -2168,54 +2169,6 @@ async fn handle_runtime_command_if_needed(
)
}
}
ChannelRuntimeCommand::ApprovePendingRequest(raw_request_id) => {
let request_id = raw_request_id.trim().to_string();
if request_id.is_empty() {
"Usage: `/approve-allow <request-id>`".to_string()
} else {
match ctx.approval_manager.confirm_non_cli_pending_request(
&request_id,
sender,
source_channel,
reply_target,
) {
Ok(req) => {
ctx.approval_manager
.record_non_cli_pending_resolution(&request_id, ApprovalResponse::Yes);
runtime_trace::record_event(
"approval_request_allowed",
Some(source_channel),
None,
None,
None,
Some(true),
Some("pending request allowed for current tool invocation"),
serde_json::json!({
"request_id": request_id,
"tool_name": req.tool_name,
"sender": sender,
"channel": source_channel,
}),
);
format!(
"Approved pending request `{}` for this invocation of `{}`.",
req.request_id, req.tool_name
)
}
Err(PendingApprovalError::NotFound) => {
format!("Pending approval request `{request_id}` was not found.")
}
Err(PendingApprovalError::Expired) => {
format!("Pending approval request `{request_id}` has expired.")
}
Err(PendingApprovalError::RequesterMismatch) => {
format!(
"Pending approval request `{request_id}` can only be approved by the same sender in the same chat/channel that created it."
)
}
}
}
}
ChannelRuntimeCommand::ConfirmToolApproval(raw_request_id) => {
let request_id = raw_request_id.trim().to_string();
if request_id.is_empty() {
@ -2228,8 +2181,6 @@ async fn handle_runtime_command_if_needed(
reply_target,
) {
Ok(req) => {
ctx.approval_manager
.record_non_cli_pending_resolution(&request_id, ApprovalResponse::Yes);
let tool_name = req.tool_name;
let mut approval_message = if tool_name == APPROVAL_ALL_TOOLS_ONCE_TOKEN {
let remaining = ctx.approval_manager.grant_non_cli_allow_all_once();
@ -2336,10 +2287,54 @@ async fn handle_runtime_command_if_needed(
}
}
}
ChannelRuntimeCommand::ApprovePendingRequest(raw_request_id) => {
let request_id = raw_request_id.trim().to_string();
if request_id.is_empty() {
"Usage: `/approve-allow <request-id>`".to_string()
} else if !ctx
.approval_manager
.is_non_cli_approval_actor_allowed(source_channel, sender)
{
"You are not allowed to approve pending non-CLI tool requests.".to_string()
} else {
match ctx.approval_manager.confirm_non_cli_pending_request(
&request_id,
sender,
source_channel,
reply_target,
) {
Ok(req) => {
ctx.approval_manager.record_non_cli_pending_resolution(
&request_id,
ApprovalResponse::Yes,
);
format!(
"Approved pending request `{}` for `{}`.",
request_id,
approval_target_label(&req.tool_name)
)
}
Err(PendingApprovalError::NotFound) => {
format!("Pending approval request `{request_id}` was not found.")
}
Err(PendingApprovalError::Expired) => {
format!("Pending approval request `{request_id}` has expired.")
}
Err(PendingApprovalError::RequesterMismatch) => format!(
"Pending approval request `{request_id}` can only be approved by the same sender in the same chat/channel that created it."
),
}
}
}
ChannelRuntimeCommand::DenyToolApproval(raw_request_id) => {
let request_id = raw_request_id.trim().to_string();
if request_id.is_empty() {
"Usage: `/approve-deny <request-id>`".to_string()
} else if !ctx
.approval_manager
.is_non_cli_approval_actor_allowed(source_channel, sender)
{
"You are not allowed to deny pending non-CLI tool requests.".to_string()
} else {
match ctx.approval_manager.reject_non_cli_pending_request(
&request_id,
@ -2348,81 +2343,25 @@ async fn handle_runtime_command_if_needed(
reply_target,
) {
Ok(req) => {
ctx.approval_manager
.record_non_cli_pending_resolution(&request_id, ApprovalResponse::No);
runtime_trace::record_event(
"approval_request_denied",
Some(source_channel),
None,
None,
None,
Some(true),
Some("pending request denied"),
serde_json::json!({
"request_id": request_id,
"tool_name": req.tool_name,
"sender": sender,
"channel": source_channel,
}),
ctx.approval_manager.record_non_cli_pending_resolution(
&request_id,
ApprovalResponse::No,
);
format!(
"Denied pending approval request `{}` for tool `{}`.",
req.request_id, req.tool_name
"Denied pending request `{}` for `{}`.",
request_id,
approval_target_label(&req.tool_name)
)
}
Err(PendingApprovalError::NotFound) => {
runtime_trace::record_event(
"approval_request_denied",
Some(source_channel),
None,
None,
None,
Some(false),
Some("pending request not found"),
serde_json::json!({
"request_id": request_id,
"sender": sender,
"channel": source_channel,
}),
);
format!("Pending approval request `{request_id}` was not found.")
}
Err(PendingApprovalError::Expired) => {
runtime_trace::record_event(
"approval_request_denied",
Some(source_channel),
None,
None,
None,
Some(false),
Some("pending request expired"),
serde_json::json!({
"request_id": request_id,
"sender": sender,
"channel": source_channel,
}),
);
format!("Pending approval request `{request_id}` has expired.")
}
Err(PendingApprovalError::RequesterMismatch) => {
runtime_trace::record_event(
"approval_request_denied",
Some(source_channel),
None,
None,
None,
Some(false),
Some("pending request denier mismatch"),
serde_json::json!({
"request_id": request_id,
"sender": sender,
"channel": source_channel,
}),
);
format!(
"Pending approval request `{request_id}` can only be denied by the same sender in the same chat/channel that created it."
)
}
Err(PendingApprovalError::RequesterMismatch) => format!(
"Pending approval request `{request_id}` can only be denied by the same sender in the same chat/channel that created it."
),
}
}
}
@ -3909,7 +3848,7 @@ async fn run_message_dispatch_loop(
}
}
process_channel_message(worker_ctx, msg, cancellation_token).await;
Box::pin(process_channel_message(worker_ctx, msg, cancellation_token)).await;
if interrupt_enabled {
let mut active = in_flight.lock().await;
@ -4894,6 +4833,7 @@ pub async fn start_channels(config: Config) -> Result<()> {
let provider_runtime_options = providers::ProviderRuntimeOptions {
auth_profile_override: None,
provider_api_url: config.api_url.clone(),
provider_transport: config.effective_provider_transport(),
zeroclaw_dir: config.config_path.parent().map(std::path::PathBuf::from),
secrets_encrypt: config.secrets.encrypt,
reasoning_enabled: config.runtime.reasoning_enabled,
@ -5301,6 +5241,7 @@ pub async fn start_channels(config: Config) -> Result<()> {
}
#[cfg(test)]
#[allow(clippy::large_futures)]
mod tests {
use super::*;
use crate::memory::{Memory, MemoryCategory, SqliteMemory};
@ -10411,6 +10352,25 @@ BTC is currently around $65,000 based on latest tool output."#;
.any(|entry| entry.channel.name() == "mattermost"));
}
#[test]
fn collect_configured_channels_includes_dingtalk_when_configured() {
let mut config = Config::default();
config.channels_config.dingtalk = Some(crate::config::schema::DingTalkConfig {
client_id: "ding-app-key".to_string(),
client_secret: "ding-app-secret".to_string(),
allowed_users: vec!["*".to_string()],
});
let channels = collect_configured_channels(&config, "test");
assert!(channels
.iter()
.any(|entry| entry.display_name == "DingTalk"));
assert!(channels
.iter()
.any(|entry| entry.channel.name() == "dingtalk"));
}
struct AlwaysFailChannel {
name: &'static str,
calls: Arc<AtomicUsize>,

View File

@ -8,20 +8,21 @@ pub use schema::{
AgentConfig, AgentSessionBackend, AgentSessionConfig, AgentSessionStrategy, AgentsIpcConfig,
AuditConfig, AutonomyConfig, BrowserComputerUseConfig, BrowserConfig, BuiltinHooksConfig,
ChannelsConfig, ClassificationRule, ComposioConfig, Config, CoordinationConfig, CostConfig,
CronConfig, DelegateAgentConfig, DiscordConfig, DockerRuntimeConfig, EmbeddingRouteConfig,
EstopConfig, FeishuConfig, GatewayConfig, GroupReplyConfig, GroupReplyMode, HardwareConfig,
HardwareTransport, HeartbeatConfig, HooksConfig, HttpRequestConfig, IMessageConfig,
IdentityConfig, LarkConfig, MatrixConfig, MemoryConfig, ModelRouteConfig, MultimodalConfig,
NextcloudTalkConfig, NonCliNaturalLanguageApprovalMode, ObservabilityConfig,
OtpChallengeDelivery, OtpConfig, OtpMethod, PeripheralBoardConfig, PeripheralsConfig,
PerplexityFilterConfig, PluginEntryConfig, PluginsConfig, ProviderConfig, ProxyConfig,
ProxyScope, QdrantConfig, QueryClassificationConfig, ReliabilityConfig, ResearchPhaseConfig,
ResearchTrigger, ResourceLimitsConfig, RuntimeConfig, SandboxBackend, SandboxConfig,
SchedulerConfig, SecretsConfig, SecurityConfig, SecurityRoleConfig, SkillsConfig,
SkillsPromptInjectionMode, SlackConfig, StorageConfig, StorageProviderConfig,
StorageProviderSection, StreamMode, SyscallAnomalyConfig, TelegramConfig, TranscriptionConfig,
TunnelConfig, UrlAccessConfig, WasmCapabilityEscalationMode, WasmConfig, WasmModuleHashPolicy,
WasmRuntimeConfig, WasmSecurityConfig, WebFetchConfig, WebSearchConfig, WebhookConfig,
CronConfig, DelegateAgentConfig, DiscordConfig, DockerRuntimeConfig, EconomicConfig,
EconomicTokenPricing, EmbeddingRouteConfig, EstopConfig, FeishuConfig, GatewayConfig,
GroupReplyConfig, GroupReplyMode, HardwareConfig, HardwareTransport, HeartbeatConfig,
HooksConfig, HttpRequestConfig, IMessageConfig, IdentityConfig, LarkConfig, MatrixConfig,
MemoryConfig, ModelRouteConfig, MultimodalConfig, NextcloudTalkConfig,
NonCliNaturalLanguageApprovalMode, ObservabilityConfig, OtpChallengeDelivery, OtpConfig,
OtpMethod, PeripheralBoardConfig, PeripheralsConfig, PerplexityFilterConfig, PluginEntryConfig,
PluginsConfig, ProviderConfig, ProxyConfig, ProxyScope, QdrantConfig,
QueryClassificationConfig, ReliabilityConfig, ResearchPhaseConfig, ResearchTrigger,
ResourceLimitsConfig, RuntimeConfig, SandboxBackend, SandboxConfig, SchedulerConfig,
SecretsConfig, SecurityConfig, SecurityRoleConfig, SkillsConfig, SkillsPromptInjectionMode,
SlackConfig, StorageConfig, StorageProviderConfig, StorageProviderSection, StreamMode,
SyscallAnomalyConfig, TelegramConfig, TranscriptionConfig, TunnelConfig, UrlAccessConfig,
WasmCapabilityEscalationMode, WasmConfig, WasmModuleHashPolicy, WasmRuntimeConfig,
WasmSecurityConfig, WebFetchConfig, WebSearchConfig, WebhookConfig,
};
pub fn name_and_presence<T: traits::ChannelConfig>(channel: Option<&T>) -> (&'static str, bool) {

View File

@ -237,6 +237,11 @@ pub struct Config {
#[serde(default)]
pub cost: CostConfig,
/// Economic agent survival tracking (`[economic]`).
/// Tracks balance, token costs, work income, and survival status.
#[serde(default)]
pub economic: EconomicConfig,
/// Peripheral board configuration for hardware integration (`[peripherals]`).
#[serde(default)]
pub peripherals: PeripheralsConfig,
@ -309,6 +314,20 @@ pub struct ProviderConfig {
/// (e.g. OpenAI Codex `/responses` reasoning effort).
#[serde(default)]
pub reasoning_level: Option<String>,
/// Optional transport override for providers that support multiple transports.
/// Supported values: "auto", "websocket", "sse".
///
/// Resolution order:
/// 1) `model_routes[].transport` (route-specific)
/// 2) env overrides (`PROVIDER_TRANSPORT`, `ZEROCLAW_PROVIDER_TRANSPORT`, `ZEROCLAW_CODEX_TRANSPORT`)
/// 3) `provider.transport`
/// 4) runtime default (`auto`, WebSocket-first with SSE fallback for OpenAI Codex)
///
/// Note: env overrides replace configured `provider.transport` when set.
///
/// Existing configs that omit `provider.transport` remain valid and fall back to defaults.
#[serde(default)]
pub transport: Option<String>,
}
// ── Delegate Agents ──────────────────────────────────────────────
@ -716,6 +735,21 @@ pub struct AgentConfig {
/// Tool dispatch strategy (e.g. `"auto"`). Default: `"auto"`.
#[serde(default = "default_agent_tool_dispatcher")]
pub tool_dispatcher: String,
/// Loop detection: no-progress repeat threshold.
/// Triggers when the same tool+args produces identical output this many times.
/// Set to `0` to disable. Default: `3`.
#[serde(default = "default_loop_detection_no_progress_threshold")]
pub loop_detection_no_progress_threshold: usize,
/// Loop detection: ping-pong cycle threshold.
/// Detects A→B→A→B alternating patterns with no progress.
/// Value is number of full cycles (A-B = 1 cycle). Set to `0` to disable. Default: `2`.
#[serde(default = "default_loop_detection_ping_pong_cycles")]
pub loop_detection_ping_pong_cycles: usize,
/// Loop detection: consecutive failure streak threshold.
/// Triggers when the same tool fails this many times in a row.
/// Set to `0` to disable. Default: `3`.
#[serde(default = "default_loop_detection_failure_streak")]
pub loop_detection_failure_streak: usize,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
@ -787,6 +821,18 @@ fn default_agent_session_max_messages() -> usize {
default_agent_max_history_messages()
}
fn default_loop_detection_no_progress_threshold() -> usize {
3
}
fn default_loop_detection_ping_pong_cycles() -> usize {
2
}
fn default_loop_detection_failure_streak() -> usize {
3
}
impl Default for AgentConfig {
fn default() -> Self {
Self {
@ -796,6 +842,9 @@ impl Default for AgentConfig {
max_history_messages: default_agent_max_history_messages(),
parallel_tools: false,
tool_dispatcher: default_agent_tool_dispatcher(),
loop_detection_no_progress_threshold: default_loop_detection_no_progress_threshold(),
loop_detection_ping_pong_cycles: default_loop_detection_ping_pong_cycles(),
loop_detection_failure_streak: default_loop_detection_failure_streak(),
}
}
}
@ -1160,6 +1209,83 @@ pub struct PeripheralBoardConfig {
pub baud: u32,
}
// ── Economic Agent Config ─────────────────────────────────────────
/// Token pricing configuration for economic tracking.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct EconomicTokenPricing {
/// Price per million input tokens (USD)
#[serde(default = "default_input_price")]
pub input_price_per_million: f64,
/// Price per million output tokens (USD)
#[serde(default = "default_output_price")]
pub output_price_per_million: f64,
}
fn default_input_price() -> f64 {
3.0 // Claude Sonnet 4 input price
}
fn default_output_price() -> f64 {
15.0 // Claude Sonnet 4 output price
}
impl Default for EconomicTokenPricing {
fn default() -> Self {
Self {
input_price_per_million: default_input_price(),
output_price_per_million: default_output_price(),
}
}
}
/// Economic agent survival tracking configuration (`[economic]` section).
///
/// Implements the ClawWork economic model for AI agents, tracking
/// balance, costs, income, and survival status.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct EconomicConfig {
/// Enable economic tracking (default: false)
#[serde(default)]
pub enabled: bool,
/// Starting balance in USD (default: 1000.0)
#[serde(default = "default_initial_balance")]
pub initial_balance: f64,
/// Token pricing configuration
#[serde(default)]
pub token_pricing: EconomicTokenPricing,
/// Minimum evaluation score (0.0-1.0) to receive payment (default: 0.6)
#[serde(default = "default_min_evaluation_threshold")]
pub min_evaluation_threshold: f64,
/// Data directory for economic state persistence (relative to workspace)
#[serde(default)]
pub data_path: Option<String>,
}
fn default_initial_balance() -> f64 {
1000.0
}
fn default_min_evaluation_threshold() -> f64 {
0.6
}
impl Default for EconomicConfig {
fn default() -> Self {
Self {
enabled: false,
initial_balance: default_initial_balance(),
token_pricing: EconomicTokenPricing::default(),
min_evaluation_threshold: default_min_evaluation_threshold(),
data_path: None,
}
}
}
fn default_peripheral_transport() -> String {
"serial".into()
}
@ -3266,6 +3392,14 @@ pub struct ModelRouteConfig {
/// Optional API key override for this route's provider
#[serde(default)]
pub api_key: Option<String>,
/// Optional route-specific transport override for this route.
/// Supported values: "auto", "websocket", "sse".
///
/// When `model_routes[].transport` is unset, the route inherits `provider.transport`.
/// If both are unset, runtime defaults are used (`auto` for OpenAI Codex).
/// Existing configs without this field remain valid.
#[serde(default)]
pub transport: Option<String>,
}
// ── Embedding routing ───────────────────────────────────────────
@ -5135,6 +5269,7 @@ impl Default for Config {
proxy: ProxyConfig::default(),
identity: IdentityConfig::default(),
cost: CostConfig::default(),
economic: EconomicConfig::default(),
peripherals: PeripheralsConfig::default(),
agents: HashMap::new(),
coordination: CoordinationConfig::default(),
@ -6115,6 +6250,28 @@ impl Config {
}
}
fn normalize_provider_transport(raw: Option<&str>, source: &str) -> Option<String> {
let value = raw?.trim();
if value.is_empty() {
return None;
}
let normalized = value.to_ascii_lowercase().replace(['-', '_'], "");
match normalized.as_str() {
"auto" => Some("auto".to_string()),
"websocket" | "ws" => Some("websocket".to_string()),
"sse" | "http" => Some("sse".to_string()),
_ => {
tracing::warn!(
transport = %value,
source,
"Ignoring invalid provider transport override"
);
None
}
}
}
/// Resolve provider reasoning level with backward-compatible runtime alias.
///
/// Priority:
@ -6158,6 +6315,16 @@ impl Config {
}
}
/// Resolve provider transport mode (`provider.transport`).
///
/// Supported values:
/// - `auto`
/// - `websocket`
/// - `sse`
pub fn effective_provider_transport(&self) -> Option<String> {
Self::normalize_provider_transport(self.provider.transport.as_deref(), "provider.transport")
}
fn lookup_model_provider_profile(
&self,
provider_name: &str,
@ -6521,6 +6688,32 @@ impl Config {
if route.max_tokens == Some(0) {
anyhow::bail!("model_routes[{i}].max_tokens must be greater than 0");
}
if route
.transport
.as_deref()
.is_some_and(|value| !value.trim().is_empty())
&& Self::normalize_provider_transport(
route.transport.as_deref(),
"model_routes[].transport",
)
.is_none()
{
anyhow::bail!("model_routes[{i}].transport must be one of: auto, websocket, sse");
}
}
if self
.provider
.transport
.as_deref()
.is_some_and(|value| !value.trim().is_empty())
&& Self::normalize_provider_transport(
self.provider.transport.as_deref(),
"provider.transport",
)
.is_none()
{
anyhow::bail!("provider.transport must be one of: auto, websocket, sse");
}
if self.provider_api.is_some()
@ -6852,6 +7045,17 @@ impl Config {
}
}
// Provider transport override: ZEROCLAW_PROVIDER_TRANSPORT or PROVIDER_TRANSPORT
if let Ok(transport) = std::env::var("ZEROCLAW_PROVIDER_TRANSPORT")
.or_else(|_| std::env::var("PROVIDER_TRANSPORT"))
{
if let Some(normalized) =
Self::normalize_provider_transport(Some(&transport), "env:provider_transport")
{
self.provider.transport = Some(normalized);
}
}
// Vision support override: ZEROCLAW_MODEL_SUPPORT_VISION or MODEL_SUPPORT_VISION
if let Ok(flag) = std::env::var("ZEROCLAW_MODEL_SUPPORT_VISION")
.or_else(|_| std::env::var("MODEL_SUPPORT_VISION"))
@ -7700,6 +7904,7 @@ default_temperature = 0.7
agent: AgentConfig::default(),
identity: IdentityConfig::default(),
cost: CostConfig::default(),
economic: EconomicConfig::default(),
peripherals: PeripheralsConfig::default(),
agents: HashMap::new(),
hooks: HooksConfig::default(),
@ -8074,6 +8279,7 @@ tool_dispatcher = "xml"
agent: AgentConfig::default(),
identity: IdentityConfig::default(),
cost: CostConfig::default(),
economic: EconomicConfig::default(),
peripherals: PeripheralsConfig::default(),
agents: HashMap::new(),
hooks: HooksConfig::default(),
@ -9453,6 +9659,7 @@ provider_api = "not-a-real-mode"
model: "anthropic/claude-sonnet-4.6".to_string(),
max_tokens: Some(0),
api_key: None,
transport: None,
}];
let err = config
@ -9463,6 +9670,48 @@ provider_api = "not-a-real-mode"
.contains("model_routes[0].max_tokens must be greater than 0"));
}
#[test]
async fn provider_transport_normalizes_aliases() {
let mut config = Config::default();
config.provider.transport = Some("WS".to_string());
assert_eq!(
config.effective_provider_transport().as_deref(),
Some("websocket")
);
}
#[test]
async fn provider_transport_invalid_is_rejected() {
let mut config = Config::default();
config.provider.transport = Some("udp".to_string());
let err = config
.validate()
.expect_err("provider.transport should reject invalid values");
assert!(err
.to_string()
.contains("provider.transport must be one of: auto, websocket, sse"));
}
#[test]
async fn model_route_transport_invalid_is_rejected() {
let mut config = Config::default();
config.model_routes = vec![ModelRouteConfig {
hint: "reasoning".to_string(),
provider: "openrouter".to_string(),
model: "anthropic/claude-sonnet-4.6".to_string(),
max_tokens: None,
api_key: None,
transport: Some("udp".to_string()),
}];
let err = config
.validate()
.expect_err("model_routes[].transport should reject invalid values");
assert!(err
.to_string()
.contains("model_routes[0].transport must be one of: auto, websocket, sse"));
}
#[test]
async fn env_override_glm_api_key_for_regional_aliases() {
let _env_guard = env_override_lock().await;
@ -10103,6 +10352,60 @@ default_model = "legacy-model"
std::env::remove_var("ZEROCLAW_REASONING_LEVEL");
}
#[test]
async fn env_override_provider_transport_normalizes_zeroclaw_alias() {
let _env_guard = env_override_lock().await;
let mut config = Config::default();
std::env::remove_var("PROVIDER_TRANSPORT");
std::env::set_var("ZEROCLAW_PROVIDER_TRANSPORT", "WS");
config.apply_env_overrides();
assert_eq!(config.provider.transport.as_deref(), Some("websocket"));
std::env::remove_var("ZEROCLAW_PROVIDER_TRANSPORT");
}
#[test]
async fn env_override_provider_transport_normalizes_legacy_alias() {
let _env_guard = env_override_lock().await;
let mut config = Config::default();
std::env::remove_var("ZEROCLAW_PROVIDER_TRANSPORT");
std::env::set_var("PROVIDER_TRANSPORT", "HTTP");
config.apply_env_overrides();
assert_eq!(config.provider.transport.as_deref(), Some("sse"));
std::env::remove_var("PROVIDER_TRANSPORT");
}
#[test]
async fn env_override_provider_transport_invalid_zeroclaw_does_not_override_existing() {
let _env_guard = env_override_lock().await;
let mut config = Config::default();
config.provider.transport = Some("sse".to_string());
std::env::remove_var("PROVIDER_TRANSPORT");
std::env::set_var("ZEROCLAW_PROVIDER_TRANSPORT", "udp");
config.apply_env_overrides();
assert_eq!(config.provider.transport.as_deref(), Some("sse"));
std::env::remove_var("ZEROCLAW_PROVIDER_TRANSPORT");
}
#[test]
async fn env_override_provider_transport_invalid_legacy_does_not_override_existing() {
let _env_guard = env_override_lock().await;
let mut config = Config::default();
config.provider.transport = Some("auto".to_string());
std::env::remove_var("ZEROCLAW_PROVIDER_TRANSPORT");
std::env::set_var("PROVIDER_TRANSPORT", "udp");
config.apply_env_overrides();
assert_eq!(config.provider.transport.as_deref(), Some("auto"));
std::env::remove_var("PROVIDER_TRANSPORT");
}
#[test]
async fn env_override_model_support_vision() {
let _env_guard = env_override_lock().await;
@ -10597,6 +10900,46 @@ default_model = "legacy-model"
assert_eq!(parsed.allowed_users, vec!["*"]);
}
#[test]
async fn dingtalk_config_defaults_allowed_users_to_empty() {
let json = r#"{"client_id":"ding-app-key","client_secret":"ding-app-secret"}"#;
let parsed: DingTalkConfig = serde_json::from_str(json).unwrap();
assert_eq!(parsed.client_id, "ding-app-key");
assert_eq!(parsed.client_secret, "ding-app-secret");
assert!(parsed.allowed_users.is_empty());
}
#[test]
async fn dingtalk_config_toml_roundtrip() {
let dc = DingTalkConfig {
client_id: "ding-app-key".into(),
client_secret: "ding-app-secret".into(),
allowed_users: vec!["*".into(), "staff123".into()],
};
let toml_str = toml::to_string(&dc).unwrap();
let parsed: DingTalkConfig = toml::from_str(&toml_str).unwrap();
assert_eq!(parsed.client_id, "ding-app-key");
assert_eq!(parsed.client_secret, "ding-app-secret");
assert_eq!(parsed.allowed_users, vec!["*", "staff123"]);
}
#[test]
async fn channels_except_webhook_reports_dingtalk_as_enabled() {
let mut channels = ChannelsConfig::default();
channels.dingtalk = Some(DingTalkConfig {
client_id: "ding-app-key".into(),
client_secret: "ding-app-secret".into(),
allowed_users: vec!["*".into()],
});
let dingtalk_state = channels
.channels_except_webhook()
.iter()
.find_map(|(handle, enabled)| (handle.name() == "DingTalk").then_some(*enabled));
assert_eq!(dingtalk_state, Some(true));
}
#[test]
async fn nextcloud_talk_config_serde() {
let nc = NextcloudTalkConfig {

View File

@ -22,6 +22,10 @@ const MIN_POLL_SECONDS: u64 = 5;
const SHELL_JOB_TIMEOUT_SECS: u64 = 120;
const SCHEDULER_COMPONENT: &str = "scheduler";
pub(crate) fn is_no_reply_sentinel(output: &str) -> bool {
output.trim().eq_ignore_ascii_case("NO_REPLY")
}
pub async fn run(config: Config) -> Result<()> {
let poll_secs = config.reliability.scheduler_poll_secs.max(MIN_POLL_SECONDS);
let mut interval = time::interval(Duration::from_secs(poll_secs));
@ -292,6 +296,13 @@ async fn deliver_if_configured(config: &Config, job: &CronJob, output: &str) ->
if !delivery.mode.eq_ignore_ascii_case("announce") {
return Ok(());
}
if is_no_reply_sentinel(output) {
tracing::debug!(
"Cron job '{}' returned NO_REPLY sentinel; skipping announce delivery",
job.id
);
return Ok(());
}
let channel = delivery
.channel
@ -1136,6 +1147,31 @@ mod tests {
assert!(err.to_string().contains("unsupported delivery channel"));
}
#[tokio::test]
async fn deliver_if_configured_skips_no_reply_sentinel() {
let tmp = TempDir::new().unwrap();
let config = test_config(&tmp).await;
let mut job = test_job("echo ok");
job.delivery = DeliveryConfig {
mode: "announce".into(),
channel: Some("invalid".into()),
to: Some("target".into()),
best_effort: true,
};
assert!(deliver_if_configured(&config, &job, " no_reply ")
.await
.is_ok());
}
#[test]
fn no_reply_sentinel_matching_is_trimmed_and_case_insensitive() {
assert!(is_no_reply_sentinel("NO_REPLY"));
assert!(is_no_reply_sentinel(" no_reply "));
assert!(!is_no_reply_sentinel("NO_REPLY please"));
assert!(!is_no_reply_sentinel(""));
}
#[tokio::test]
async fn deliver_if_configured_whatsapp_web_requires_live_session_in_web_mode() {
let tmp = TempDir::new().unwrap();

View File

@ -227,26 +227,25 @@ async fn run_heartbeat_worker(config: Config) -> Result<()> {
{
Ok(output) => {
crate::health::mark_component_ok("heartbeat");
let announcement = if output.trim().is_empty() {
"heartbeat task executed".to_string()
} else {
output
};
if let Some((channel, target)) = &delivery {
if let Err(e) = crate::cron::scheduler::deliver_announcement(
&config,
channel,
target,
&announcement,
)
.await
{
crate::health::mark_component_error(
"heartbeat",
format!("delivery failed: {e}"),
);
tracing::warn!("Heartbeat delivery failed: {e}");
if let Some(announcement) = heartbeat_announcement_text(&output) {
if let Some((channel, target)) = &delivery {
if let Err(e) = crate::cron::scheduler::deliver_announcement(
&config,
channel,
target,
&announcement,
)
.await
{
crate::health::mark_component_error(
"heartbeat",
format!("delivery failed: {e}"),
);
tracing::warn!("Heartbeat delivery failed: {e}");
}
}
} else {
tracing::debug!("Heartbeat returned NO_REPLY sentinel; skipping delivery");
}
}
Err(e) => {
@ -258,6 +257,16 @@ async fn run_heartbeat_worker(config: Config) -> Result<()> {
}
}
fn heartbeat_announcement_text(output: &str) -> Option<String> {
if crate::cron::scheduler::is_no_reply_sentinel(output) {
return None;
}
if output.trim().is_empty() {
return Some("heartbeat task executed".to_string());
}
Some(output.to_string())
}
fn heartbeat_tasks_for_tick(
file_tasks: Vec<String>,
fallback_message: Option<&str>,
@ -553,6 +562,27 @@ mod tests {
assert!(tasks.is_empty());
}
#[test]
fn heartbeat_announcement_text_skips_no_reply_sentinel() {
assert!(heartbeat_announcement_text(" NO_reply ").is_none());
}
#[test]
fn heartbeat_announcement_text_uses_default_for_empty_output() {
assert_eq!(
heartbeat_announcement_text(" \n\t "),
Some("heartbeat task executed".to_string())
);
}
#[test]
fn heartbeat_announcement_text_keeps_regular_output() {
assert_eq!(
heartbeat_announcement_text("system nominal"),
Some("system nominal".to_string())
);
}
#[test]
fn heartbeat_delivery_target_none_when_unset() {
let config = Config::default();

View File

@ -1167,6 +1167,7 @@ mod tests {
model: String::new(),
max_tokens: None,
api_key: None,
transport: None,
}];
let mut items = Vec::new();
check_config_semantics(&config, &mut items);

874
src/economic/classifier.rs Normal file
View File

@ -0,0 +1,874 @@
//! Task Classifier for ZeroClaw Economic Agents
//!
//! Classifies work instructions into 44 BLS occupations with wage data
//! to estimate task value for agent economics.
//!
//! ## Overview
//!
//! The classifier matches task instructions to standardized occupation
//! categories using keyword matching and heuristics, then calculates
//! expected payment based on BLS hourly wage data.
//!
//! ## Example
//!
//! ```rust,ignore
//! use zeroclaw::economic::classifier::{TaskClassifier, OccupationCategory};
//!
//! let classifier = TaskClassifier::new();
//! let result = classifier.classify("Write a REST API in Rust").await?;
//!
//! println!("Occupation: {}", result.occupation);
//! println!("Hourly wage: ${:.2}", result.hourly_wage);
//! println!("Estimated hours: {:.2}", result.estimated_hours);
//! println!("Max payment: ${:.2}", result.max_payment);
//! ```
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Occupation category groupings based on BLS major groups
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum OccupationCategory {
/// Software, IT, engineering roles
TechnologyEngineering,
/// Finance, accounting, management, sales
BusinessFinance,
/// Medical, nursing, social work
HealthcareSocialServices,
/// Legal, media, operations, other professional
LegalMediaOperations,
}
impl OccupationCategory {
/// Returns a human-readable name for the category
pub fn display_name(&self) -> &'static str {
match self {
Self::TechnologyEngineering => "Technology & Engineering",
Self::BusinessFinance => "Business & Finance",
Self::HealthcareSocialServices => "Healthcare & Social Services",
Self::LegalMediaOperations => "Legal, Media & Operations",
}
}
}
/// A single occupation with BLS wage data
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Occupation {
/// Official BLS occupation name
pub name: String,
/// Hourly wage in USD (BLS median)
pub hourly_wage: f64,
/// Category grouping
pub category: OccupationCategory,
/// Keywords for matching
#[serde(skip)]
pub keywords: Vec<&'static str>,
}
/// Result of classifying a task instruction
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClassificationResult {
/// Matched occupation name
pub occupation: String,
/// BLS hourly wage for this occupation
pub hourly_wage: f64,
/// Estimated hours to complete task
pub estimated_hours: f64,
/// Maximum payment (hours × wage)
pub max_payment: f64,
/// Classification confidence (0.0 - 1.0)
pub confidence: f64,
/// Category of the matched occupation
pub category: OccupationCategory,
/// Brief reasoning for the classification
pub reasoning: String,
}
/// Task classifier that maps instructions to BLS occupations
#[derive(Debug)]
pub struct TaskClassifier {
occupations: Vec<Occupation>,
keyword_index: HashMap<&'static str, Vec<usize>>,
fallback_occupation: String,
fallback_wage: f64,
}
impl Default for TaskClassifier {
fn default() -> Self {
Self::new()
}
}
impl TaskClassifier {
/// Create a new TaskClassifier with embedded BLS occupation data
pub fn new() -> Self {
let occupations = Self::load_occupations();
let keyword_index = Self::build_keyword_index(&occupations);
Self {
occupations,
keyword_index,
fallback_occupation: "General and Operations Managers".to_string(),
fallback_wage: 64.0,
}
}
/// Load all 44 BLS occupations with wage data
fn load_occupations() -> Vec<Occupation> {
use OccupationCategory::{
BusinessFinance, HealthcareSocialServices, LegalMediaOperations, TechnologyEngineering,
};
vec![
// Technology & Engineering
Occupation {
name: "Software Developers".into(),
hourly_wage: 69.50,
category: TechnologyEngineering,
keywords: vec![
"software",
"code",
"programming",
"developer",
"rust",
"python",
"javascript",
"api",
"backend",
"frontend",
"fullstack",
"app",
"application",
"debug",
"refactor",
"implement",
"algorithm",
],
},
Occupation {
name: "Computer and Information Systems Managers".into(),
hourly_wage: 90.38,
category: TechnologyEngineering,
keywords: vec![
"it manager",
"cto",
"tech lead",
"infrastructure",
"systems",
"devops",
"cloud",
"architecture",
"platform",
"enterprise",
],
},
Occupation {
name: "Industrial Engineers".into(),
hourly_wage: 51.87,
category: TechnologyEngineering,
keywords: vec![
"industrial",
"process",
"optimization",
"efficiency",
"workflow",
"manufacturing",
"lean",
"six sigma",
"production",
],
},
Occupation {
name: "Mechanical Engineers".into(),
hourly_wage: 52.92,
category: TechnologyEngineering,
keywords: vec![
"mechanical",
"cad",
"solidworks",
"machinery",
"thermal",
"hvac",
"automotive",
"robotics",
],
},
// Business & Finance
Occupation {
name: "Accountants and Auditors".into(),
hourly_wage: 44.96,
category: BusinessFinance,
keywords: vec![
"accounting",
"audit",
"tax",
"bookkeeping",
"financial statements",
"gaap",
"ledger",
"reconciliation",
"cpa",
],
},
Occupation {
name: "Administrative Services Managers".into(),
hourly_wage: 60.59,
category: BusinessFinance,
keywords: vec![
"administrative",
"office manager",
"facilities",
"operations",
"scheduling",
"coordination",
],
},
Occupation {
name: "Buyers and Purchasing Agents".into(),
hourly_wage: 39.29,
category: BusinessFinance,
keywords: vec![
"procurement",
"purchasing",
"vendor",
"supplier",
"sourcing",
"negotiation",
"contracts",
],
},
Occupation {
name: "Compliance Officers".into(),
hourly_wage: 40.86,
category: BusinessFinance,
keywords: vec![
"compliance",
"regulatory",
"audit",
"policy",
"governance",
"risk",
"sox",
"gdpr",
],
},
Occupation {
name: "Financial Managers".into(),
hourly_wage: 86.76,
category: BusinessFinance,
keywords: vec![
"cfo",
"finance director",
"treasury",
"budget",
"financial planning",
"investment management",
],
},
Occupation {
name: "Financial and Investment Analysts".into(),
hourly_wage: 56.01,
category: BusinessFinance,
keywords: vec![
"financial analysis",
"investment",
"portfolio",
"stock",
"equity",
"valuation",
"modeling",
"dcf",
"market research",
],
},
Occupation {
name: "General and Operations Managers".into(),
hourly_wage: 64.00,
category: BusinessFinance,
keywords: vec![
"operations",
"general manager",
"director",
"oversee",
"manage",
"strategy",
"leadership",
"business",
],
},
Occupation {
name: "Market Research Analysts and Marketing Specialists".into(),
hourly_wage: 41.58,
category: BusinessFinance,
keywords: vec![
"market research",
"marketing",
"campaign",
"branding",
"seo",
"advertising",
"analytics",
"customer",
"segment",
],
},
Occupation {
name: "Personal Financial Advisors".into(),
hourly_wage: 77.02,
category: BusinessFinance,
keywords: vec![
"financial advisor",
"wealth",
"retirement",
"401k",
"ira",
"estate planning",
"insurance",
],
},
Occupation {
name: "Project Management Specialists".into(),
hourly_wage: 51.97,
category: BusinessFinance,
keywords: vec![
"project manager",
"pmp",
"agile",
"scrum",
"sprint",
"milestone",
"timeline",
"stakeholder",
"deliverable",
],
},
Occupation {
name: "Property, Real Estate, and Community Association Managers".into(),
hourly_wage: 39.77,
category: BusinessFinance,
keywords: vec![
"property",
"real estate",
"landlord",
"tenant",
"lease",
"hoa",
"community",
],
},
Occupation {
name: "Sales Managers".into(),
hourly_wage: 77.37,
category: BusinessFinance,
keywords: vec![
"sales manager",
"revenue",
"quota",
"pipeline",
"crm",
"account executive",
"territory",
],
},
Occupation {
name: "Marketing and Sales Managers".into(),
hourly_wage: 79.35,
category: BusinessFinance,
keywords: vec!["vp sales", "cmo", "growth", "go-to-market", "demand gen"],
},
Occupation {
name: "Financial Specialists".into(),
hourly_wage: 48.12,
category: BusinessFinance,
keywords: vec!["financial specialist", "credit", "loan", "underwriting"],
},
Occupation {
name: "Securities, Commodities, and Financial Services Sales Agents".into(),
hourly_wage: 48.12,
category: BusinessFinance,
keywords: vec!["broker", "securities", "commodities", "trading", "series 7"],
},
Occupation {
name: "Business Operations Specialists, All Other".into(),
hourly_wage: 44.41,
category: BusinessFinance,
keywords: vec![
"business analyst",
"operations specialist",
"process improvement",
],
},
Occupation {
name: "Claims Adjusters, Examiners, and Investigators".into(),
hourly_wage: 37.87,
category: BusinessFinance,
keywords: vec!["claims", "insurance", "adjuster", "investigator", "fraud"],
},
Occupation {
name: "Transportation, Storage, and Distribution Managers".into(),
hourly_wage: 55.77,
category: BusinessFinance,
keywords: vec![
"logistics",
"supply chain",
"warehouse",
"distribution",
"shipping",
"inventory",
"fulfillment",
],
},
Occupation {
name: "Industrial Production Managers".into(),
hourly_wage: 62.11,
category: BusinessFinance,
keywords: vec![
"production manager",
"plant manager",
"manufacturing operations",
],
},
Occupation {
name: "Lodging Managers".into(),
hourly_wage: 37.24,
category: BusinessFinance,
keywords: vec!["hotel", "hospitality", "lodging", "resort", "concierge"],
},
Occupation {
name: "Real Estate Brokers".into(),
hourly_wage: 39.77,
category: BusinessFinance,
keywords: vec!["real estate broker", "realtor", "mls", "listing"],
},
Occupation {
name: "Managers, All Other".into(),
hourly_wage: 72.06,
category: BusinessFinance,
keywords: vec!["manager", "supervisor", "team lead"],
},
// Healthcare & Social Services
Occupation {
name: "Medical and Health Services Managers".into(),
hourly_wage: 66.22,
category: HealthcareSocialServices,
keywords: vec![
"healthcare",
"hospital",
"clinic",
"medical",
"health services",
"patient",
"hipaa",
],
},
Occupation {
name: "Social and Community Service Managers".into(),
hourly_wage: 41.39,
category: HealthcareSocialServices,
keywords: vec![
"social services",
"community",
"nonprofit",
"outreach",
"case management",
"welfare",
],
},
Occupation {
name: "Child, Family, and School Social Workers".into(),
hourly_wage: 41.39,
category: HealthcareSocialServices,
keywords: vec![
"social worker",
"child welfare",
"family services",
"school counselor",
],
},
Occupation {
name: "Registered Nurses".into(),
hourly_wage: 66.22,
category: HealthcareSocialServices,
keywords: vec!["nurse", "rn", "nursing", "patient care", "clinical"],
},
Occupation {
name: "Nurse Practitioners".into(),
hourly_wage: 66.22,
category: HealthcareSocialServices,
keywords: vec!["np", "nurse practitioner", "aprn", "prescribe"],
},
Occupation {
name: "Pharmacists".into(),
hourly_wage: 66.22,
category: HealthcareSocialServices,
keywords: vec![
"pharmacy",
"pharmacist",
"medication",
"prescription",
"drug",
],
},
Occupation {
name: "Medical Secretaries and Administrative Assistants".into(),
hourly_wage: 66.22,
category: HealthcareSocialServices,
keywords: vec![
"medical secretary",
"medical records",
"ehr",
"scheduling appointments",
],
},
// Legal, Media & Operations
Occupation {
name: "Lawyers".into(),
hourly_wage: 44.41,
category: LegalMediaOperations,
keywords: vec![
"lawyer",
"attorney",
"legal",
"contract",
"litigation",
"counsel",
"law",
"paralegal",
],
},
Occupation {
name: "Editors".into(),
hourly_wage: 72.06,
category: LegalMediaOperations,
keywords: vec![
"editor",
"editing",
"proofread",
"copy edit",
"manuscript",
"publication",
],
},
Occupation {
name: "Film and Video Editors".into(),
hourly_wage: 68.15,
category: LegalMediaOperations,
keywords: vec![
"video editor",
"film",
"premiere",
"final cut",
"davinci",
"post-production",
],
},
Occupation {
name: "Audio and Video Technicians".into(),
hourly_wage: 41.86,
category: LegalMediaOperations,
keywords: vec![
"audio",
"video",
"av",
"broadcast",
"streaming",
"recording",
],
},
Occupation {
name: "Producers and Directors".into(),
hourly_wage: 41.86,
category: LegalMediaOperations,
keywords: vec![
"producer",
"director",
"production",
"creative director",
"content",
"show",
],
},
Occupation {
name: "News Analysts, Reporters, and Journalists".into(),
hourly_wage: 68.15,
category: LegalMediaOperations,
keywords: vec![
"journalist",
"reporter",
"news",
"article",
"press",
"interview",
"story",
],
},
Occupation {
name: "Entertainment and Recreation Managers, Except Gambling".into(),
hourly_wage: 41.86,
category: LegalMediaOperations,
keywords: vec!["entertainment", "recreation", "event", "venue", "concert"],
},
Occupation {
name: "Recreation Workers".into(),
hourly_wage: 41.86,
category: LegalMediaOperations,
keywords: vec!["recreation", "activity", "fitness", "sports"],
},
Occupation {
name: "Customer Service Representatives".into(),
hourly_wage: 44.41,
category: LegalMediaOperations,
keywords: vec!["customer service", "support", "helpdesk", "ticket", "chat"],
},
Occupation {
name: "Private Detectives and Investigators".into(),
hourly_wage: 37.87,
category: LegalMediaOperations,
keywords: vec![
"detective",
"investigator",
"background check",
"surveillance",
],
},
Occupation {
name: "First-Line Supervisors of Police and Detectives".into(),
hourly_wage: 72.06,
category: LegalMediaOperations,
keywords: vec!["police", "law enforcement", "security supervisor"],
},
]
}
/// Build keyword → occupation index for fast lookup
fn build_keyword_index(occupations: &[Occupation]) -> HashMap<&'static str, Vec<usize>> {
let mut index: HashMap<&'static str, Vec<usize>> = HashMap::new();
for (i, occ) in occupations.iter().enumerate() {
for &kw in &occ.keywords {
index.entry(kw).or_default().push(i);
}
}
index
}
/// Classify a task instruction into an occupation with estimated value
///
/// This is a synchronous keyword-based classifier. For LLM-based
/// classification, use `classify_with_llm` instead.
pub fn classify(&self, instruction: &str) -> ClassificationResult {
let lower = instruction.to_lowercase();
let mut scores: HashMap<usize, f64> = HashMap::new();
// Score each occupation by keyword matches
for (keyword, occ_indices) in &self.keyword_index {
if lower.contains(keyword) {
for &idx in occ_indices {
*scores.entry(idx).or_default() += 1.0;
}
}
}
// Find best match
let (best_idx, best_score) = scores
.iter()
.max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
.map(|(&idx, &score)| (idx, score))
.unwrap_or((usize::MAX, 0.0));
let (occupation, hourly_wage, category, confidence, reasoning) =
if best_idx < self.occupations.len() {
let occ = &self.occupations[best_idx];
let confidence = (best_score / 3.0).min(1.0); // Normalize confidence
(
occ.name.clone(),
occ.hourly_wage,
occ.category,
confidence,
format!("Matched {:.0} keywords", best_score),
)
} else {
// Fallback
(
self.fallback_occupation.clone(),
self.fallback_wage,
OccupationCategory::BusinessFinance,
0.3,
"Fallback classification - no strong keyword match".to_string(),
)
};
let estimated_hours = Self::estimate_hours(instruction);
let max_payment = (estimated_hours * hourly_wage * 100.0).round() / 100.0;
ClassificationResult {
occupation,
hourly_wage,
estimated_hours,
max_payment,
confidence,
category,
reasoning,
}
}
/// Estimate hours based on instruction complexity
fn estimate_hours(instruction: &str) -> f64 {
let word_count = instruction.split_whitespace().count();
let has_complex_markers = instruction.to_lowercase().contains("implement")
|| instruction.contains("build")
|| instruction.contains("create")
|| instruction.contains("design")
|| instruction.contains("develop");
let has_simple_markers = instruction.to_lowercase().contains("fix")
|| instruction.contains("update")
|| instruction.contains("change")
|| instruction.contains("review");
let base_hours = if has_complex_markers {
2.0
} else if has_simple_markers {
0.5
} else {
1.0
};
// Scale by instruction length
let length_factor = (word_count as f64 / 20.0).clamp(0.5, 2.0);
let hours = base_hours * length_factor;
// Clamp to valid range
hours.clamp(0.25, 40.0)
}
/// Get all occupations
pub fn occupations(&self) -> &[Occupation] {
&self.occupations
}
/// Get occupations by category
pub fn occupations_by_category(&self, category: OccupationCategory) -> Vec<&Occupation> {
self.occupations
.iter()
.filter(|o| o.category == category)
.collect()
}
/// Get the fallback occupation name
pub fn fallback_occupation(&self) -> &str {
&self.fallback_occupation
}
/// Get the fallback hourly wage
pub fn fallback_wage(&self) -> f64 {
self.fallback_wage
}
/// Look up an occupation by exact name
pub fn get_occupation(&self, name: &str) -> Option<&Occupation> {
self.occupations.iter().find(|o| o.name == name)
}
/// Fuzzy match an occupation name (case-insensitive, substring)
pub fn fuzzy_match(&self, name: &str) -> Option<&Occupation> {
let lower = name.to_lowercase();
// Exact match first
if let Some(occ) = self.occupations.iter().find(|o| o.name == name) {
return Some(occ);
}
// Case-insensitive match
if let Some(occ) = self
.occupations
.iter()
.find(|o| o.name.to_lowercase() == lower)
{
return Some(occ);
}
// Substring match
self.occupations.iter().find(|o| {
lower.contains(&o.name.to_lowercase()) || o.name.to_lowercase().contains(&lower)
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_classifier_new() {
let classifier = TaskClassifier::new();
assert_eq!(classifier.occupations.len(), 44);
}
#[test]
fn test_classify_software() {
let classifier = TaskClassifier::new();
let result = classifier.classify("Write a REST API in Rust with authentication");
assert_eq!(result.occupation, "Software Developers");
assert!((result.hourly_wage - 69.50).abs() < 0.01);
assert!(result.confidence > 0.0);
}
#[test]
fn test_classify_finance() {
let classifier = TaskClassifier::new();
let result = classifier.classify("Prepare quarterly financial statements and audit trail");
assert!(
result.occupation.contains("Account") || result.occupation.contains("Financial"),
"Expected finance occupation, got: {}",
result.occupation
);
}
#[test]
fn test_classify_fallback() {
let classifier = TaskClassifier::new();
let result = classifier.classify("xyzzy foobar baz");
assert_eq!(result.occupation, "General and Operations Managers");
assert_eq!(result.confidence, 0.3);
}
#[test]
fn test_estimate_hours_complex() {
let hours = TaskClassifier::estimate_hours(
"Implement a complete microservices architecture with event sourcing",
);
assert!(hours >= 1.0, "Complex task should estimate >= 1 hour");
}
#[test]
fn test_estimate_hours_simple() {
let hours = TaskClassifier::estimate_hours("Fix typo");
assert!(hours <= 1.0, "Simple task should estimate <= 1 hour");
}
#[test]
fn test_fuzzy_match() {
let classifier = TaskClassifier::new();
// Exact match
assert!(classifier.fuzzy_match("Software Developers").is_some());
// Case insensitive
assert!(classifier.fuzzy_match("software developers").is_some());
// Substring
assert!(classifier.fuzzy_match("Software").is_some());
}
#[test]
fn test_occupations_by_category() {
let classifier = TaskClassifier::new();
let tech = classifier.occupations_by_category(OccupationCategory::TechnologyEngineering);
assert!(!tech.is_empty());
assert!(tech.iter().any(|o| o.name == "Software Developers"));
}
}

369
src/economic/costs.rs Normal file
View File

@ -0,0 +1,369 @@
//! Token cost tracking types for economic agents.
//!
//! Separates costs by channel (LLM, search API, OCR, etc.) following
//! the ClawWork economic model.
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Channel-separated cost breakdown for a task or session.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct CostBreakdown {
/// Cost from LLM token usage
pub llm_tokens: f64,
/// Cost from search API calls (Brave, JINA, Tavily, etc.)
pub search_api: f64,
/// Cost from OCR API calls
pub ocr_api: f64,
/// Cost from other API calls
pub other_api: f64,
}
impl CostBreakdown {
/// Create a new empty cost breakdown.
pub fn new() -> Self {
Self::default()
}
/// Get total cost across all channels.
pub fn total(&self) -> f64 {
self.llm_tokens + self.search_api + self.ocr_api + self.other_api
}
/// Add another breakdown to this one.
pub fn add(&mut self, other: &CostBreakdown) {
self.llm_tokens += other.llm_tokens;
self.search_api += other.search_api;
self.ocr_api += other.ocr_api;
self.other_api += other.other_api;
}
/// Reset all costs to zero.
pub fn reset(&mut self) {
self.llm_tokens = 0.0;
self.search_api = 0.0;
self.ocr_api = 0.0;
self.other_api = 0.0;
}
}
/// Token pricing configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TokenPricing {
/// Price per million input tokens (USD)
pub input_price_per_million: f64,
/// Price per million output tokens (USD)
pub output_price_per_million: f64,
}
impl Default for TokenPricing {
fn default() -> Self {
// Default to Claude Sonnet 4 pricing via OpenRouter
Self {
input_price_per_million: 3.0,
output_price_per_million: 15.0,
}
}
}
impl TokenPricing {
/// Calculate cost for given token counts.
pub fn calculate_cost(&self, input_tokens: u64, output_tokens: u64) -> f64 {
let input_cost = (input_tokens as f64 / 1_000_000.0) * self.input_price_per_million;
let output_cost = (output_tokens as f64 / 1_000_000.0) * self.output_price_per_million;
input_cost + output_cost
}
}
/// A single LLM call record with token details.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LlmCallRecord {
/// Timestamp of the call
pub timestamp: DateTime<Utc>,
/// API name/source (e.g., "agent", "wrapup", "research")
pub api_name: String,
/// Number of input tokens
pub input_tokens: u64,
/// Number of output tokens
pub output_tokens: u64,
/// Cost in USD
pub cost: f64,
}
/// A single API call record (non-LLM).
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ApiCallRecord {
/// Timestamp of the call
pub timestamp: DateTime<Utc>,
/// API name (e.g., "tavily_search", "jina_reader")
pub api_name: String,
/// Pricing model used
pub pricing_model: PricingModel,
/// Number of tokens (if token-based pricing)
#[serde(skip_serializing_if = "Option::is_none")]
pub tokens: Option<u64>,
/// Price per million tokens (if token-based)
#[serde(skip_serializing_if = "Option::is_none")]
pub price_per_million: Option<f64>,
/// Cost in USD
pub cost: f64,
}
/// Pricing model for API calls.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum PricingModel {
/// Token-based pricing (cost = tokens / 1M * price_per_million)
PerToken,
/// Flat rate per call
FlatRate,
}
/// Comprehensive task cost record (one per task).
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskCostRecord {
/// Task end timestamp
pub timestamp_end: DateTime<Utc>,
/// Task start timestamp
pub timestamp_start: DateTime<Utc>,
/// Date the task was assigned (YYYY-MM-DD)
pub date: String,
/// Unique task identifier
pub task_id: String,
/// LLM usage summary
pub llm_usage: LlmUsageSummary,
/// API usage summary
pub api_usage: ApiUsageSummary,
/// Cost summary by channel
pub cost_summary: CostBreakdown,
/// Balance after this task
pub balance_after: f64,
/// Session cost so far
pub session_cost: f64,
/// Daily cost so far
pub daily_cost: f64,
}
/// Aggregated LLM usage for a task.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct LlmUsageSummary {
/// Number of LLM calls made
pub total_calls: usize,
/// Total input tokens
pub total_input_tokens: u64,
/// Total output tokens
pub total_output_tokens: u64,
/// Total tokens (input + output)
pub total_tokens: u64,
/// Total cost in USD
pub total_cost: f64,
/// Pricing used
pub input_price_per_million: f64,
pub output_price_per_million: f64,
/// Detailed call records
#[serde(default)]
pub calls_detail: Vec<LlmCallRecord>,
}
/// Aggregated API usage for a task.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ApiUsageSummary {
/// Number of API calls made
pub total_calls: usize,
/// Search API costs
pub search_api_cost: f64,
/// OCR API costs
pub ocr_api_cost: f64,
/// Other API costs
pub other_api_cost: f64,
/// Number of token-based calls
pub token_based_calls: usize,
/// Number of flat-rate calls
pub flat_rate_calls: usize,
/// Detailed call records
#[serde(default)]
pub calls_detail: Vec<ApiCallRecord>,
}
/// Work income record.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WorkIncomeRecord {
/// Timestamp
pub timestamp: DateTime<Utc>,
/// Date (YYYY-MM-DD)
pub date: String,
/// Task identifier
pub task_id: String,
/// Base payment amount offered
pub base_amount: f64,
/// Actual payment received (0 if below threshold)
pub actual_payment: f64,
/// Evaluation score (0.0-1.0)
pub evaluation_score: f64,
/// Minimum threshold required for payment
pub threshold: f64,
/// Whether payment was awarded
pub payment_awarded: bool,
/// Optional description
#[serde(default)]
pub description: String,
/// Balance after this income
pub balance_after: f64,
}
/// Daily balance record for persistence.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BalanceRecord {
/// Date (YYYY-MM-DD or "initialization")
pub date: String,
/// Current balance
pub balance: f64,
/// Token cost delta for this period
pub token_cost_delta: f64,
/// Work income delta for this period
pub work_income_delta: f64,
/// Trading profit delta for this period
pub trading_profit_delta: f64,
/// Cumulative total token cost
pub total_token_cost: f64,
/// Cumulative total work income
pub total_work_income: f64,
/// Cumulative total trading profit
pub total_trading_profit: f64,
/// Net worth (balance + portfolio value)
pub net_worth: f64,
/// Current survival status
pub survival_status: String,
/// Tasks completed in this period
#[serde(default)]
pub completed_tasks: Vec<String>,
/// Primary task ID for the day
#[serde(skip_serializing_if = "Option::is_none")]
pub task_id: Option<String>,
/// Time to complete tasks (seconds)
#[serde(skip_serializing_if = "Option::is_none")]
pub task_completion_time_seconds: Option<f64>,
/// Whether session was aborted by API error
#[serde(default)]
pub api_error: bool,
}
/// Task completion record for analytics.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskCompletionRecord {
/// Task identifier
pub task_id: String,
/// Date (YYYY-MM-DD)
pub date: String,
/// Attempt number (1-based)
pub attempt: u32,
/// Whether work was submitted
pub work_submitted: bool,
/// Evaluation score (0.0-1.0)
pub evaluation_score: f64,
/// Money earned from this task
pub money_earned: f64,
/// Wall-clock time in seconds
pub wall_clock_seconds: f64,
/// Timestamp of completion
pub timestamp: DateTime<Utc>,
}
/// Economic analytics summary.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct EconomicAnalytics {
/// Total costs by channel
pub total_costs: CostBreakdown,
/// Costs broken down by date
pub by_date: HashMap<String, DateCostSummary>,
/// Costs broken down by task
pub by_task: HashMap<String, TaskCostSummary>,
/// Total number of tasks
pub total_tasks: usize,
/// Total income earned
pub total_income: f64,
/// Number of tasks that received payment
pub tasks_paid: usize,
/// Number of tasks rejected (below threshold)
pub tasks_rejected: usize,
}
/// Cost summary for a single date.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct DateCostSummary {
/// Costs by channel
#[serde(flatten)]
pub costs: CostBreakdown,
/// Total cost
pub total: f64,
/// Income earned
pub income: f64,
}
/// Cost summary for a single task.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct TaskCostSummary {
/// Costs by channel
#[serde(flatten)]
pub costs: CostBreakdown,
/// Total cost
pub total: f64,
/// Date of the task
pub date: String,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn cost_breakdown_total() {
let breakdown = CostBreakdown {
llm_tokens: 1.0,
search_api: 0.5,
ocr_api: 0.25,
other_api: 0.1,
};
assert!((breakdown.total() - 1.85).abs() < f64::EPSILON);
}
#[test]
fn cost_breakdown_add() {
let mut a = CostBreakdown {
llm_tokens: 1.0,
search_api: 0.5,
ocr_api: 0.0,
other_api: 0.0,
};
let b = CostBreakdown {
llm_tokens: 0.5,
search_api: 0.25,
ocr_api: 0.1,
other_api: 0.05,
};
a.add(&b);
assert!((a.llm_tokens - 1.5).abs() < f64::EPSILON);
assert!((a.search_api - 0.75).abs() < f64::EPSILON);
assert!((a.total() - 2.4).abs() < f64::EPSILON);
}
#[test]
fn token_pricing_calculation() {
let pricing = TokenPricing {
input_price_per_million: 3.0,
output_price_per_million: 15.0,
};
// 1000 input, 500 output
// (1000/1M)*3 + (500/1M)*15 = 0.003 + 0.0075 = 0.0105
let cost = pricing.calculate_cost(1000, 500);
assert!((cost - 0.0105).abs() < 0.0001);
}
#[test]
fn default_token_pricing() {
let pricing = TokenPricing::default();
assert!((pricing.input_price_per_million - 3.0).abs() < f64::EPSILON);
assert!((pricing.output_price_per_million - 15.0).abs() < f64::EPSILON);
}
}

83
src/economic/mod.rs Normal file
View File

@ -0,0 +1,83 @@
//! Economic tracking module for agent survival economics.
//!
//! This module implements the ClawWork economic model for AI agents,
//! tracking balance, costs, income, and survival status. Agents start
//! with initial capital and must manage their resources while completing
//! tasks.
//!
//! ## Overview
//!
//! The economic system models agent viability:
//! - **Balance**: Starting capital minus costs plus earned income
//! - **Costs**: LLM tokens, search APIs, OCR, and other service usage
//! - **Income**: Payments for completed tasks (with quality threshold)
//! - **Status**: Health indicator based on remaining capital percentage
//!
//! ## Example
//!
//! ```rust,ignore
//! use zeroclaw::economic::{EconomicTracker, EconomicConfig, SurvivalStatus};
//!
//! let config = EconomicConfig {
//! enabled: true,
//! initial_balance: 1000.0,
//! ..Default::default()
//! };
//!
//! let tracker = EconomicTracker::new("my-agent", config, None);
//! tracker.initialize()?;
//!
//! // Start a task
//! tracker.start_task("task-001", None);
//!
//! // Track LLM usage
//! let cost = tracker.track_tokens(1000, 500, "agent", None);
//!
//! // Complete task and earn income
//! tracker.end_task()?;
//! let payment = tracker.add_work_income(10.0, "task-001", 0.85, "Completed task")?;
//!
//! // Check survival status
//! match tracker.get_survival_status() {
//! SurvivalStatus::Thriving => println!("Agent is healthy!"),
//! SurvivalStatus::Bankrupt => println!("Agent needs intervention!"),
//! _ => {}
//! }
//! ```
//!
//! ## Persistence
//!
//! Economic state is persisted to JSONL files:
//! - `balance.jsonl`: Daily balance snapshots and cumulative totals
//! - `token_costs.jsonl`: Detailed per-task cost records
//! - `task_completions.jsonl`: Task completion statistics
//!
//! ## Configuration
//!
//! Add to `config.toml`:
//!
//! ```toml
//! [economic]
//! enabled = true
//! initial_balance = 1000.0
//! min_evaluation_threshold = 0.6
//!
//! [economic.token_pricing]
//! input_price_per_million = 3.0
//! output_price_per_million = 15.0
//! ```
pub mod classifier;
pub mod costs;
pub mod status;
pub mod tracker;
// Re-exports for convenient access
pub use classifier::{ClassificationResult, Occupation, OccupationCategory, TaskClassifier};
pub use costs::{
ApiCallRecord, ApiUsageSummary, BalanceRecord, CostBreakdown, DateCostSummary,
EconomicAnalytics, LlmCallRecord, LlmUsageSummary, PricingModel, TaskCompletionRecord,
TaskCostRecord, TaskCostSummary, TokenPricing, WorkIncomeRecord,
};
pub use status::SurvivalStatus;
pub use tracker::{EconomicConfig, EconomicSummary, EconomicTracker};

207
src/economic/status.rs Normal file
View File

@ -0,0 +1,207 @@
//! Survival status tracking for economic agents.
//!
//! Defines the health states an agent can be in based on remaining balance
//! as a percentage of initial capital.
use serde::{Deserialize, Serialize};
use std::fmt;
/// Survival status based on balance percentage relative to initial capital.
///
/// Mirrors the ClawWork LiveBench agent survival states.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, Default)]
#[serde(rename_all = "snake_case")]
pub enum SurvivalStatus {
/// Balance > 80% of initial - Agent is profitable and healthy
Thriving,
/// Balance 40-80% of initial - Agent is maintaining stability
#[default]
Stable,
/// Balance 10-40% of initial - Agent is losing money, needs attention
Struggling,
/// Balance 1-10% of initial - Agent is near death, urgent intervention needed
Critical,
/// Balance <= 0 - Agent has exhausted resources and cannot operate
Bankrupt,
}
impl SurvivalStatus {
/// Calculate survival status from current and initial balance.
///
/// # Arguments
/// * `current_balance` - Current remaining balance
/// * `initial_balance` - Starting balance
///
/// # Returns
/// The appropriate `SurvivalStatus` based on the percentage remaining.
pub fn from_balance(current_balance: f64, initial_balance: f64) -> Self {
if initial_balance <= 0.0 {
// Edge case: if initial was zero or negative, can't calculate percentage
return if current_balance <= 0.0 {
Self::Bankrupt
} else {
Self::Thriving
};
}
let percentage = (current_balance / initial_balance) * 100.0;
match percentage {
p if p <= 0.0 => Self::Bankrupt,
p if p < 10.0 => Self::Critical,
p if p < 40.0 => Self::Struggling,
p if p < 80.0 => Self::Stable,
_ => Self::Thriving,
}
}
/// Check if the agent can still operate (not bankrupt).
pub fn is_operational(&self) -> bool {
!matches!(self, Self::Bankrupt)
}
/// Check if the agent needs urgent attention.
pub fn needs_intervention(&self) -> bool {
matches!(self, Self::Critical | Self::Bankrupt)
}
/// Get a human-readable emoji indicator.
pub fn emoji(&self) -> &'static str {
match self {
Self::Thriving => "🌟",
Self::Stable => "",
Self::Struggling => "⚠️",
Self::Critical => "🚨",
Self::Bankrupt => "💀",
}
}
/// Get a color code for terminal output (ANSI).
pub fn ansi_color(&self) -> &'static str {
match self {
Self::Thriving => "\x1b[32m", // Green
Self::Stable => "\x1b[34m", // Blue
Self::Struggling => "\x1b[33m", // Yellow
Self::Critical => "\x1b[31m", // Red
Self::Bankrupt => "\x1b[35m", // Magenta
}
}
}
impl fmt::Display for SurvivalStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let status = match self {
Self::Thriving => "Thriving",
Self::Stable => "Stable",
Self::Struggling => "Struggling",
Self::Critical => "Critical",
Self::Bankrupt => "Bankrupt",
};
write!(f, "{}", status)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn thriving_above_80_percent() {
assert_eq!(
SurvivalStatus::from_balance(900.0, 1000.0),
SurvivalStatus::Thriving
);
assert_eq!(
SurvivalStatus::from_balance(1500.0, 1000.0), // Profit!
SurvivalStatus::Thriving
);
assert_eq!(
SurvivalStatus::from_balance(800.01, 1000.0),
SurvivalStatus::Thriving
);
}
#[test]
fn stable_between_40_and_80_percent() {
assert_eq!(
SurvivalStatus::from_balance(799.99, 1000.0),
SurvivalStatus::Stable
);
assert_eq!(
SurvivalStatus::from_balance(500.0, 1000.0),
SurvivalStatus::Stable
);
assert_eq!(
SurvivalStatus::from_balance(400.01, 1000.0),
SurvivalStatus::Stable
);
}
#[test]
fn struggling_between_10_and_40_percent() {
assert_eq!(
SurvivalStatus::from_balance(399.99, 1000.0),
SurvivalStatus::Struggling
);
assert_eq!(
SurvivalStatus::from_balance(200.0, 1000.0),
SurvivalStatus::Struggling
);
assert_eq!(
SurvivalStatus::from_balance(100.01, 1000.0),
SurvivalStatus::Struggling
);
}
#[test]
fn critical_between_0_and_10_percent() {
assert_eq!(
SurvivalStatus::from_balance(99.99, 1000.0),
SurvivalStatus::Critical
);
assert_eq!(
SurvivalStatus::from_balance(50.0, 1000.0),
SurvivalStatus::Critical
);
assert_eq!(
SurvivalStatus::from_balance(0.01, 1000.0),
SurvivalStatus::Critical
);
}
#[test]
fn bankrupt_at_zero_or_negative() {
assert_eq!(
SurvivalStatus::from_balance(0.0, 1000.0),
SurvivalStatus::Bankrupt
);
assert_eq!(
SurvivalStatus::from_balance(-100.0, 1000.0),
SurvivalStatus::Bankrupt
);
}
#[test]
fn is_operational() {
assert!(SurvivalStatus::Thriving.is_operational());
assert!(SurvivalStatus::Stable.is_operational());
assert!(SurvivalStatus::Struggling.is_operational());
assert!(SurvivalStatus::Critical.is_operational());
assert!(!SurvivalStatus::Bankrupt.is_operational());
}
#[test]
fn needs_intervention() {
assert!(!SurvivalStatus::Thriving.needs_intervention());
assert!(!SurvivalStatus::Stable.needs_intervention());
assert!(!SurvivalStatus::Struggling.needs_intervention());
assert!(SurvivalStatus::Critical.needs_intervention());
assert!(SurvivalStatus::Bankrupt.needs_intervention());
}
#[test]
fn display_format() {
assert_eq!(format!("{}", SurvivalStatus::Thriving), "Thriving");
assert_eq!(format!("{}", SurvivalStatus::Bankrupt), "Bankrupt");
}
}

992
src/economic/tracker.rs Normal file
View File

@ -0,0 +1,992 @@
//! Economic tracker for agent survival economics.
//!
//! Tracks balance, token costs, work income, and survival status following
//! the ClawWork LiveBench economic model. Persists state to JSONL files.
use super::costs::{
ApiCallRecord, ApiUsageSummary, BalanceRecord, CostBreakdown, LlmCallRecord, LlmUsageSummary,
PricingModel, TaskCompletionRecord, TaskCostRecord, TokenPricing, WorkIncomeRecord,
};
use super::status::SurvivalStatus;
use anyhow::{Context, Result};
use chrono::{DateTime, Utc};
use parking_lot::Mutex;
use serde::{Deserialize, Serialize};
use std::fs::{self, File, OpenOptions};
use std::io::{BufRead, BufReader, Write};
use std::path::PathBuf;
use std::sync::Arc;
/// Economic configuration options.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EconomicConfig {
/// Enable economic tracking
#[serde(default)]
pub enabled: bool,
/// Starting balance in USD
#[serde(default = "default_initial_balance")]
pub initial_balance: f64,
/// Token pricing configuration
#[serde(default)]
pub token_pricing: TokenPricing,
/// Minimum evaluation score to receive payment (0.0-1.0)
#[serde(default = "default_min_threshold")]
pub min_evaluation_threshold: f64,
}
fn default_initial_balance() -> f64 {
1000.0
}
fn default_min_threshold() -> f64 {
0.6
}
impl Default for EconomicConfig {
fn default() -> Self {
Self {
enabled: false,
initial_balance: default_initial_balance(),
token_pricing: TokenPricing::default(),
min_evaluation_threshold: default_min_threshold(),
}
}
}
/// Task-level tracking state (in-memory during task execution).
#[derive(Debug, Clone, Default)]
struct TaskState {
/// Current task ID
task_id: Option<String>,
/// Date the task was assigned
task_date: Option<String>,
/// Task start timestamp
start_time: Option<DateTime<Utc>>,
/// Costs accumulated for this task
costs: CostBreakdown,
/// LLM call records
llm_calls: Vec<LlmCallRecord>,
/// API call records
api_calls: Vec<ApiCallRecord>,
}
impl TaskState {
fn reset(&mut self) {
self.task_id = None;
self.task_date = None;
self.start_time = None;
self.costs.reset();
self.llm_calls.clear();
self.api_calls.clear();
}
}
/// Daily tracking state (accumulated across tasks).
#[derive(Debug, Clone, Default)]
struct DailyState {
/// Task IDs completed today
task_ids: Vec<String>,
/// First task start time
first_task_start: Option<DateTime<Utc>>,
/// Last task end time
last_task_end: Option<DateTime<Utc>>,
/// Daily cost accumulator
cost: f64,
}
impl DailyState {
fn reset(&mut self) {
self.task_ids.clear();
self.first_task_start = None;
self.last_task_end = None;
self.cost = 0.0;
}
}
/// Session tracking state.
#[derive(Debug, Clone, Default)]
struct SessionState {
/// Input tokens this session
input_tokens: u64,
/// Output tokens this session
output_tokens: u64,
/// Cost this session
cost: f64,
}
impl SessionState {
fn reset(&mut self) {
self.input_tokens = 0;
self.output_tokens = 0;
self.cost = 0.0;
}
}
/// Economic tracker for managing agent survival economics.
///
/// Tracks:
/// - Balance (starting capital minus costs plus income)
/// - Token costs separated by channel (LLM, search, OCR, etc.)
/// - Work income with evaluation threshold
/// - Trading profits/losses
/// - Survival status
///
/// Persists records to JSONL files for durability and analysis.
pub struct EconomicTracker {
/// Configuration
config: EconomicConfig,
/// Agent signature/name
signature: String,
/// Data directory for persistence
data_path: PathBuf,
/// Current balance (protected by mutex for thread safety)
state: Arc<Mutex<TrackerState>>,
}
/// Internal mutable state.
struct TrackerState {
/// Current balance
balance: f64,
/// Initial balance (for status calculation)
initial_balance: f64,
/// Cumulative totals
total_token_cost: f64,
total_work_income: f64,
total_trading_profit: f64,
/// Task-level tracking
task: TaskState,
/// Daily tracking
daily: DailyState,
/// Session tracking
session: SessionState,
}
impl EconomicTracker {
/// Create a new economic tracker.
///
/// # Arguments
/// * `signature` - Agent signature/name for identification
/// * `config` - Economic configuration
/// * `data_path` - Optional custom data path (defaults to `./data/agent_data/{signature}/economic`)
pub fn new(
signature: impl Into<String>,
config: EconomicConfig,
data_path: Option<PathBuf>,
) -> Self {
let signature = signature.into();
let data_path = data_path
.unwrap_or_else(|| PathBuf::from(format!("./data/agent_data/{}/economic", signature)));
Self {
signature,
state: Arc::new(Mutex::new(TrackerState {
balance: config.initial_balance,
initial_balance: config.initial_balance,
total_token_cost: 0.0,
total_work_income: 0.0,
total_trading_profit: 0.0,
task: TaskState::default(),
daily: DailyState::default(),
session: SessionState::default(),
})),
config,
data_path,
}
}
/// Initialize the tracker, loading existing state or creating new.
pub fn initialize(&self) -> Result<()> {
fs::create_dir_all(&self.data_path).with_context(|| {
format!(
"Failed to create data directory: {}",
self.data_path.display()
)
})?;
let balance_file = self.balance_file_path();
if balance_file.exists() {
self.load_latest_state()?;
let state = self.state.lock();
tracing::info!(
"📊 Loaded economic state for {}: balance=${:.2}, status={}",
self.signature,
state.balance,
self.get_survival_status_inner(&state)
);
} else {
self.save_balance_record("initialization", 0.0, 0.0, 0.0, Vec::new(), false)?;
tracing::info!(
"✅ Initialized economic tracker for {}: starting balance=${:.2}",
self.signature,
self.config.initial_balance
);
}
Ok(())
}
/// Start tracking costs for a new task.
pub fn start_task(&self, task_id: impl Into<String>, date: Option<String>) {
let task_id = task_id.into();
let date = date.unwrap_or_else(|| Utc::now().format("%Y-%m-%d").to_string());
let now = Utc::now();
let mut state = self.state.lock();
state.task.task_id = Some(task_id.clone());
state.task.task_date = Some(date);
state.task.start_time = Some(now);
state.task.costs.reset();
state.task.llm_calls.clear();
state.task.api_calls.clear();
// Track daily window
if state.daily.first_task_start.is_none() {
state.daily.first_task_start = Some(now);
}
state.daily.task_ids.push(task_id);
}
/// End tracking for current task and save consolidated record.
pub fn end_task(&self) -> Result<()> {
let mut state = self.state.lock();
if state.task.task_id.is_some() {
self.save_task_record_inner(&state)?;
state.daily.last_task_end = Some(Utc::now());
state.task.reset();
}
Ok(())
}
/// Track LLM token usage.
///
/// # Arguments
/// * `input_tokens` - Number of input tokens
/// * `output_tokens` - Number of output tokens
/// * `api_name` - Origin of the call (e.g., "agent", "wrapup")
/// * `cost` - Pre-computed cost (if provided, skips local calculation)
///
/// # Returns
/// The cost in USD for this call.
pub fn track_tokens(
&self,
input_tokens: u64,
output_tokens: u64,
api_name: impl Into<String>,
cost: Option<f64>,
) -> f64 {
let api_name = api_name.into();
let cost = cost.unwrap_or_else(|| {
self.config
.token_pricing
.calculate_cost(input_tokens, output_tokens)
});
let mut state = self.state.lock();
// Update session tracking
state.session.input_tokens += input_tokens;
state.session.output_tokens += output_tokens;
state.session.cost += cost;
state.daily.cost += cost;
// Update task-level tracking
state.task.costs.llm_tokens += cost;
state.task.llm_calls.push(LlmCallRecord {
timestamp: Utc::now(),
api_name,
input_tokens,
output_tokens,
cost,
});
// Update totals
state.total_token_cost += cost;
state.balance -= cost;
cost
}
/// Track token-based API call cost.
///
/// # Arguments
/// * `tokens` - Number of tokens used
/// * `price_per_million` - Price per million tokens
/// * `api_name` - Name of the API
///
/// # Returns
/// The cost in USD for this call.
pub fn track_api_call(
&self,
tokens: u64,
price_per_million: f64,
api_name: impl Into<String>,
) -> f64 {
let api_name = api_name.into();
let cost = (tokens as f64 / 1_000_000.0) * price_per_million;
self.record_api_cost(
&api_name,
cost,
Some(tokens),
Some(price_per_million),
PricingModel::PerToken,
);
cost
}
/// Track flat-rate API call cost.
///
/// # Arguments
/// * `cost` - Flat cost in USD
/// * `api_name` - Name of the API
///
/// # Returns
/// The cost (same as input).
pub fn track_flat_api_call(&self, cost: f64, api_name: impl Into<String>) -> f64 {
let api_name = api_name.into();
self.record_api_cost(&api_name, cost, None, None, PricingModel::FlatRate);
cost
}
fn record_api_cost(
&self,
api_name: &str,
cost: f64,
tokens: Option<u64>,
price_per_million: Option<f64>,
pricing_model: PricingModel,
) {
let mut state = self.state.lock();
// Update session/daily
state.session.cost += cost;
state.daily.cost += cost;
// Categorize by API type
let api_lower = api_name.to_lowercase();
if api_lower.contains("search")
|| api_lower.contains("jina")
|| api_lower.contains("tavily")
{
state.task.costs.search_api += cost;
} else if api_lower.contains("ocr") {
state.task.costs.ocr_api += cost;
} else {
state.task.costs.other_api += cost;
}
// Record detailed call
state.task.api_calls.push(ApiCallRecord {
timestamp: Utc::now(),
api_name: api_name.to_string(),
pricing_model,
tokens,
price_per_million,
cost,
});
// Update totals
state.total_token_cost += cost;
state.balance -= cost;
}
/// Add income from completed work with evaluation threshold.
///
/// Payment is only awarded if `evaluation_score >= min_evaluation_threshold`.
///
/// # Arguments
/// * `amount` - Base payment amount in USD
/// * `task_id` - Task identifier
/// * `evaluation_score` - Score from 0.0 to 1.0
/// * `description` - Optional description
///
/// # Returns
/// Actual payment received (0.0 if below threshold).
pub fn add_work_income(
&self,
amount: f64,
task_id: impl Into<String>,
evaluation_score: f64,
description: impl Into<String>,
) -> Result<f64> {
let task_id = task_id.into();
let description = description.into();
let threshold = self.config.min_evaluation_threshold;
let actual_payment = if evaluation_score >= threshold {
amount
} else {
0.0
};
{
let mut state = self.state.lock();
if actual_payment > 0.0 {
state.balance += actual_payment;
state.total_work_income += actual_payment;
tracing::info!(
"💰 Work income: +${:.2} (Task: {}, Score: {:.2})",
actual_payment,
task_id,
evaluation_score
);
} else {
tracing::warn!(
"⚠️ Work below threshold (score: {:.2} < {:.2}), no payment for task: {}",
evaluation_score,
threshold,
task_id
);
}
}
self.log_work_income(
&task_id,
amount,
actual_payment,
evaluation_score,
&description,
)?;
Ok(actual_payment)
}
/// Add profit/loss from trading.
pub fn add_trading_profit(&self, profit: f64, _description: impl Into<String>) {
let mut state = self.state.lock();
state.balance += profit;
state.total_trading_profit += profit;
let sign = if profit >= 0.0 { "+" } else { "" };
tracing::info!(
"📈 Trading P&L: {}${:.2}, new balance: ${:.2}",
sign,
profit,
state.balance
);
}
/// Save end-of-day economic state.
pub fn save_daily_state(
&self,
date: &str,
work_income: f64,
trading_profit: f64,
completed_tasks: Vec<String>,
api_error: bool,
) -> Result<()> {
let daily_cost = {
let state = self.state.lock();
state.daily.cost
};
self.save_balance_record(
date,
daily_cost,
work_income,
trading_profit,
completed_tasks,
api_error,
)?;
// Reset daily tracking
{
let mut state = self.state.lock();
state.daily.reset();
state.session.reset();
}
tracing::info!("💾 Saved daily state for {}", date);
Ok(())
}
/// Get current balance.
pub fn get_balance(&self) -> f64 {
self.state.lock().balance
}
/// Get net worth (balance + portfolio value).
pub fn get_net_worth(&self) -> f64 {
// TODO: Add trading portfolio value
self.get_balance()
}
/// Get current survival status.
pub fn get_survival_status(&self) -> SurvivalStatus {
let state = self.state.lock();
self.get_survival_status_inner(&state)
}
fn get_survival_status_inner(&self, state: &TrackerState) -> SurvivalStatus {
SurvivalStatus::from_balance(state.balance, state.initial_balance)
}
/// Check if agent is bankrupt.
pub fn is_bankrupt(&self) -> bool {
self.get_survival_status() == SurvivalStatus::Bankrupt
}
/// Get session cost so far.
pub fn get_session_cost(&self) -> f64 {
self.state.lock().session.cost
}
/// Get daily cost so far.
pub fn get_daily_cost(&self) -> f64 {
self.state.lock().daily.cost
}
/// Get comprehensive economic summary.
pub fn get_summary(&self) -> EconomicSummary {
let state = self.state.lock();
EconomicSummary {
signature: self.signature.clone(),
balance: state.balance,
initial_balance: state.initial_balance,
net_worth: state.balance, // TODO: Add portfolio
total_token_cost: state.total_token_cost,
total_work_income: state.total_work_income,
total_trading_profit: state.total_trading_profit,
session_cost: state.session.cost,
daily_cost: state.daily.cost,
session_input_tokens: state.session.input_tokens,
session_output_tokens: state.session.output_tokens,
survival_status: self.get_survival_status_inner(&state),
is_bankrupt: self.get_survival_status_inner(&state) == SurvivalStatus::Bankrupt,
min_evaluation_threshold: self.config.min_evaluation_threshold,
}
}
/// Reset session tracking (for new decision/activity).
pub fn reset_session(&self) {
self.state.lock().session.reset();
}
/// Record task completion statistics.
pub fn record_task_completion(
&self,
task_id: impl Into<String>,
work_submitted: bool,
wall_clock_seconds: f64,
evaluation_score: f64,
money_earned: f64,
attempt: u32,
date: Option<String>,
) -> Result<()> {
let task_id = task_id.into();
let date = date
.or_else(|| self.state.lock().task.task_date.clone())
.unwrap_or_else(|| Utc::now().format("%Y-%m-%d").to_string());
let record = TaskCompletionRecord {
task_id: task_id.clone(),
date,
attempt,
work_submitted,
evaluation_score,
money_earned,
wall_clock_seconds,
timestamp: Utc::now(),
};
// Read existing records, filter out this task_id
let completions_file = self.task_completions_file_path();
let mut existing: Vec<String> = Vec::new();
if completions_file.exists() {
let file = File::open(&completions_file)?;
let reader = BufReader::new(file);
for line in reader.lines() {
let line = line?;
if line.trim().is_empty() {
continue;
}
if let Ok(entry) = serde_json::from_str::<TaskCompletionRecord>(&line) {
if entry.task_id != task_id {
existing.push(line);
}
} else {
existing.push(line);
}
}
}
// Rewrite with updated record
let mut file = File::create(&completions_file)?;
for line in existing {
writeln!(file, "{}", line)?;
}
writeln!(file, "{}", serde_json::to_string(&record)?)?;
file.sync_all()?;
Ok(())
}
// ── Private helpers ──
fn balance_file_path(&self) -> PathBuf {
self.data_path.join("balance.jsonl")
}
fn token_costs_file_path(&self) -> PathBuf {
self.data_path.join("token_costs.jsonl")
}
fn task_completions_file_path(&self) -> PathBuf {
self.data_path.join("task_completions.jsonl")
}
fn load_latest_state(&self) -> Result<()> {
let balance_file = self.balance_file_path();
let file = File::open(&balance_file)?;
let reader = BufReader::new(file);
let mut last_record: Option<BalanceRecord> = None;
for line in reader.lines() {
let line = line?;
if let Ok(record) = serde_json::from_str::<BalanceRecord>(&line) {
last_record = Some(record);
}
}
if let Some(record) = last_record {
let mut state = self.state.lock();
state.balance = record.balance;
state.total_token_cost = record.total_token_cost;
state.total_work_income = record.total_work_income;
state.total_trading_profit = record.total_trading_profit;
}
Ok(())
}
fn save_task_record_inner(&self, state: &TrackerState) -> Result<()> {
let Some(ref task_id) = state.task.task_id else {
return Ok(());
};
let total_input = state.task.llm_calls.iter().map(|c| c.input_tokens).sum();
let total_output = state.task.llm_calls.iter().map(|c| c.output_tokens).sum();
let llm_call_count = state.task.llm_calls.len();
let token_based = state
.task
.api_calls
.iter()
.filter(|c| c.pricing_model == PricingModel::PerToken)
.count();
let flat_rate = state
.task
.api_calls
.iter()
.filter(|c| c.pricing_model == PricingModel::FlatRate)
.count();
let record = TaskCostRecord {
timestamp_end: Utc::now(),
timestamp_start: state.task.start_time.unwrap_or_else(Utc::now),
date: state
.task
.task_date
.clone()
.unwrap_or_else(|| Utc::now().format("%Y-%m-%d").to_string()),
task_id: task_id.clone(),
llm_usage: LlmUsageSummary {
total_calls: llm_call_count,
total_input_tokens: total_input,
total_output_tokens: total_output,
total_tokens: total_input + total_output,
total_cost: state.task.costs.llm_tokens,
input_price_per_million: self.config.token_pricing.input_price_per_million,
output_price_per_million: self.config.token_pricing.output_price_per_million,
calls_detail: state.task.llm_calls.clone(),
},
api_usage: ApiUsageSummary {
total_calls: state.task.api_calls.len(),
search_api_cost: state.task.costs.search_api,
ocr_api_cost: state.task.costs.ocr_api,
other_api_cost: state.task.costs.other_api,
token_based_calls: token_based,
flat_rate_calls: flat_rate,
calls_detail: state.task.api_calls.clone(),
},
cost_summary: state.task.costs.clone(),
balance_after: state.balance,
session_cost: state.session.cost,
daily_cost: state.daily.cost,
};
let mut file = OpenOptions::new()
.create(true)
.append(true)
.open(self.token_costs_file_path())?;
writeln!(file, "{}", serde_json::to_string(&record)?)?;
file.sync_all()?;
Ok(())
}
fn save_balance_record(
&self,
date: &str,
token_cost_delta: f64,
work_income_delta: f64,
trading_profit_delta: f64,
completed_tasks: Vec<String>,
api_error: bool,
) -> Result<()> {
let state = self.state.lock();
let task_completion_time = match (state.daily.first_task_start, state.daily.last_task_end) {
(Some(start), Some(end)) => Some((end - start).num_seconds() as f64),
_ => None,
};
let record = BalanceRecord {
date: date.to_string(),
balance: state.balance,
token_cost_delta,
work_income_delta,
trading_profit_delta,
total_token_cost: state.total_token_cost,
total_work_income: state.total_work_income,
total_trading_profit: state.total_trading_profit,
net_worth: state.balance,
survival_status: self.get_survival_status_inner(&state).to_string(),
completed_tasks,
task_id: state.daily.task_ids.first().cloned(),
task_completion_time_seconds: task_completion_time,
api_error,
};
drop(state); // Release lock before IO
let mut file = OpenOptions::new()
.create(true)
.append(true)
.open(self.balance_file_path())?;
writeln!(file, "{}", serde_json::to_string(&record)?)?;
file.sync_all()?;
Ok(())
}
fn log_work_income(
&self,
task_id: &str,
base_amount: f64,
actual_payment: f64,
evaluation_score: f64,
description: &str,
) -> Result<()> {
let state = self.state.lock();
let record = WorkIncomeRecord {
timestamp: Utc::now(),
date: state
.task
.task_date
.clone()
.unwrap_or_else(|| Utc::now().format("%Y-%m-%d").to_string()),
task_id: task_id.to_string(),
base_amount,
actual_payment,
evaluation_score,
threshold: self.config.min_evaluation_threshold,
payment_awarded: actual_payment > 0.0,
description: description.to_string(),
balance_after: state.balance,
};
drop(state);
let mut file = OpenOptions::new()
.create(true)
.append(true)
.open(self.token_costs_file_path())?;
writeln!(file, "{}", serde_json::to_string(&record)?)?;
file.sync_all()?;
Ok(())
}
}
impl std::fmt::Display for EconomicTracker {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let state = self.state.lock();
write!(
f,
"EconomicTracker(signature='{}', balance=${:.2}, status={})",
self.signature,
state.balance,
self.get_survival_status_inner(&state)
)
}
}
/// Comprehensive economic summary.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EconomicSummary {
pub signature: String,
pub balance: f64,
pub initial_balance: f64,
pub net_worth: f64,
pub total_token_cost: f64,
pub total_work_income: f64,
pub total_trading_profit: f64,
pub session_cost: f64,
pub daily_cost: f64,
pub session_input_tokens: u64,
pub session_output_tokens: u64,
pub survival_status: SurvivalStatus,
pub is_bankrupt: bool,
pub min_evaluation_threshold: f64,
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
fn test_config() -> EconomicConfig {
EconomicConfig {
enabled: true,
initial_balance: 1000.0,
token_pricing: TokenPricing {
input_price_per_million: 3.0,
output_price_per_million: 15.0,
},
min_evaluation_threshold: 0.6,
}
}
#[test]
fn tracker_initialization() {
let tmp = TempDir::new().unwrap();
let config = test_config();
let tracker = EconomicTracker::new("test-agent", config, Some(tmp.path().to_path_buf()));
tracker.initialize().unwrap();
assert!((tracker.get_balance() - 1000.0).abs() < f64::EPSILON);
assert_eq!(tracker.get_survival_status(), SurvivalStatus::Thriving);
}
#[test]
fn track_tokens_reduces_balance() {
let tmp = TempDir::new().unwrap();
let tracker =
EconomicTracker::new("test-agent", test_config(), Some(tmp.path().to_path_buf()));
tracker.initialize().unwrap();
tracker.start_task("task-1", None);
let cost = tracker.track_tokens(1000, 500, "agent", None);
tracker.end_task().unwrap();
// (1000/1M)*3 + (500/1M)*15 = 0.003 + 0.0075 = 0.0105
assert!((cost - 0.0105).abs() < 0.0001);
assert!((tracker.get_balance() - (1000.0 - 0.0105)).abs() < 0.0001);
}
#[test]
fn work_income_with_threshold() {
let tmp = TempDir::new().unwrap();
let tracker =
EconomicTracker::new("test-agent", test_config(), Some(tmp.path().to_path_buf()));
tracker.initialize().unwrap();
// Below threshold - no payment
let payment = tracker.add_work_income(100.0, "task-1", 0.5, "").unwrap();
assert!((payment - 0.0).abs() < f64::EPSILON);
assert!((tracker.get_balance() - 1000.0).abs() < f64::EPSILON);
// At threshold - payment awarded
let payment = tracker.add_work_income(100.0, "task-2", 0.6, "").unwrap();
assert!((payment - 100.0).abs() < f64::EPSILON);
assert!((tracker.get_balance() - 1100.0).abs() < f64::EPSILON);
}
#[test]
fn survival_status_changes() {
let tmp = TempDir::new().unwrap();
let mut config = test_config();
config.initial_balance = 100.0;
let tracker = EconomicTracker::new("test-agent", config, Some(tmp.path().to_path_buf()));
tracker.initialize().unwrap();
assert_eq!(tracker.get_survival_status(), SurvivalStatus::Thriving);
// Spend 30% - should be stable
tracker.track_tokens(10_000_000, 0, "agent", Some(30.0));
assert_eq!(tracker.get_survival_status(), SurvivalStatus::Stable);
// Spend more to reach struggling
tracker.track_tokens(10_000_000, 0, "agent", Some(35.0));
assert_eq!(tracker.get_survival_status(), SurvivalStatus::Struggling);
// Spend more to reach critical
tracker.track_tokens(10_000_000, 0, "agent", Some(25.0));
assert_eq!(tracker.get_survival_status(), SurvivalStatus::Critical);
// Bankrupt
tracker.track_tokens(10_000_000, 0, "agent", Some(20.0));
assert_eq!(tracker.get_survival_status(), SurvivalStatus::Bankrupt);
assert!(tracker.is_bankrupt());
}
#[test]
fn state_persistence() {
let tmp = TempDir::new().unwrap();
let config = test_config();
// Create tracker, do some work, save state
{
let tracker =
EconomicTracker::new("test-agent", config.clone(), Some(tmp.path().to_path_buf()));
tracker.initialize().unwrap();
tracker.track_tokens(1000, 500, "agent", Some(10.0));
tracker
.save_daily_state("2025-01-01", 0.0, 0.0, vec![], false)
.unwrap();
}
// Create new tracker, should load state
{
let tracker =
EconomicTracker::new("test-agent", config, Some(tmp.path().to_path_buf()));
tracker.initialize().unwrap();
assert!((tracker.get_balance() - 990.0).abs() < 0.01);
}
}
#[test]
fn api_call_categorization() {
let tmp = TempDir::new().unwrap();
let tracker =
EconomicTracker::new("test-agent", test_config(), Some(tmp.path().to_path_buf()));
tracker.initialize().unwrap();
tracker.start_task("task-1", None);
// Search API
tracker.track_flat_api_call(0.001, "tavily_search");
// OCR API
tracker.track_api_call(1000, 1.0, "ocr_reader");
// Other API
tracker.track_flat_api_call(0.01, "some_api");
tracker.end_task().unwrap();
// Balance should reflect all costs
let expected_reduction = 0.001 + 0.001 + 0.01; // search + ocr + other
assert!((tracker.get_balance() - (1000.0 - expected_reduction)).abs() < 0.0001);
}
}

View File

@ -362,6 +362,7 @@ pub async fn run_gateway(host: &str, port: u16, config: Config) -> Result<()> {
&providers::ProviderRuntimeOptions {
auth_profile_override: None,
provider_api_url: config.api_url.clone(),
provider_transport: config.effective_provider_transport(),
zeroclaw_dir: config.config_path.parent().map(std::path::PathBuf::from),
secrets_encrypt: config.secrets.encrypt,
reasoning_enabled: config.runtime.reasoning_enabled,
@ -662,11 +663,14 @@ pub async fn run_gateway(host: &str, port: u16, config: Config) -> Result<()> {
}
// Wrap observer with broadcast capability for SSE
// Use cost-tracking observer when cost tracking is enabled
let base_observer = crate::observability::create_observer_with_cost_tracking(
&config.observability,
cost_tracker.clone(),
&config.cost,
);
let broadcast_observer: Arc<dyn crate::observability::Observer> =
Arc::new(sse::BroadcastObserver::new(
crate::observability::create_observer(&config.observability),
event_tx.clone(),
));
Arc::new(sse::BroadcastObserver::new(base_observer, event_tx.clone()));
let state = AppState {
config: config_state,

View File

@ -1,7 +1,7 @@
use super::{IntegrationCategory, IntegrationEntry, IntegrationStatus};
use crate::providers::{
is_glm_alias, is_minimax_alias, is_moonshot_alias, is_qianfan_alias, is_qwen_alias,
is_zai_alias,
is_doubao_alias, is_glm_alias, is_minimax_alias, is_moonshot_alias, is_qianfan_alias,
is_qwen_alias, is_siliconflow_alias, is_zai_alias,
};
/// Returns the full catalog of integrations
@ -436,6 +436,33 @@ pub fn all_integrations() -> Vec<IntegrationEntry> {
}
},
},
IntegrationEntry {
name: "Volcengine ARK",
description: "Doubao and ARK model catalog",
category: IntegrationCategory::AiModel,
status_fn: |c| {
if c.default_provider.as_deref().is_some_and(is_doubao_alias) {
IntegrationStatus::Active
} else {
IntegrationStatus::Available
}
},
},
IntegrationEntry {
name: "SiliconFlow",
description: "OpenAI-compatible hosted models and reasoning",
category: IntegrationCategory::AiModel,
status_fn: |c| {
if c.default_provider
.as_deref()
.is_some_and(is_siliconflow_alias)
{
IntegrationStatus::Active
} else {
IntegrationStatus::Available
}
},
},
IntegrationEntry {
name: "Groq",
description: "Llama 3.3 70B Versatile and low-latency models",
@ -991,5 +1018,31 @@ mod tests {
(qianfan.status_fn)(&config),
IntegrationStatus::Active
));
config.default_provider = Some("ark".to_string());
let volcengine = entries.iter().find(|e| e.name == "Volcengine ARK").unwrap();
assert!(matches!(
(volcengine.status_fn)(&config),
IntegrationStatus::Active
));
config.default_provider = Some("volcengine".to_string());
assert!(matches!(
(volcengine.status_fn)(&config),
IntegrationStatus::Active
));
config.default_provider = Some("siliconflow".to_string());
let siliconflow = entries.iter().find(|e| e.name == "SiliconFlow").unwrap();
assert!(matches!(
(siliconflow.status_fn)(&config),
IntegrationStatus::Active
));
config.default_provider = Some("silicon-cloud".to_string());
assert!(matches!(
(siliconflow.status_fn)(&config),
IntegrationStatus::Active
));
}
}

View File

@ -49,6 +49,7 @@ pub(crate) mod cost;
pub(crate) mod cron;
pub(crate) mod daemon;
pub(crate) mod doctor;
pub mod economic;
pub mod gateway;
pub mod goals;
pub(crate) mod hardware;

259
src/observability/cost.rs Normal file
View File

@ -0,0 +1,259 @@
//! Cost-tracking observer that wires provider token usage to the cost tracker.
//!
//! Intercepts `LlmResponse` events and records usage to the `CostTracker`,
//! calculating costs based on model pricing configuration.
use super::traits::{Observer, ObserverEvent, ObserverMetric};
use crate::config::schema::ModelPricing;
use crate::cost::{CostTracker, TokenUsage};
use std::collections::HashMap;
use std::sync::Arc;
/// Observer that records token usage to a CostTracker.
///
/// Listens for `LlmResponse` events and calculates costs using model pricing.
pub struct CostObserver {
tracker: Arc<CostTracker>,
prices: HashMap<String, ModelPricing>,
/// Default pricing for unknown models (USD per 1M tokens)
default_input_price: f64,
default_output_price: f64,
}
impl CostObserver {
/// Create a new cost observer with the given tracker and pricing config.
pub fn new(tracker: Arc<CostTracker>, prices: HashMap<String, ModelPricing>) -> Self {
Self {
tracker,
prices,
// Conservative defaults for unknown models
default_input_price: 3.0,
default_output_price: 15.0,
}
}
/// Look up pricing for a model, trying various name formats.
fn get_pricing(&self, provider: &str, model: &str) -> (f64, f64) {
// Try exact match first: "provider/model"
let full_name = format!("{provider}/{model}");
if let Some(pricing) = self.prices.get(&full_name) {
return (pricing.input, pricing.output);
}
// Try just the model name
if let Some(pricing) = self.prices.get(model) {
return (pricing.input, pricing.output);
}
// Try model family matching (e.g., "claude-sonnet-4" matches any claude-sonnet-4-*)
for (key, pricing) in &self.prices {
// Strip provider prefix if present
let key_model = key.split('/').next_back().unwrap_or(key);
// Check if model starts with the key (family match)
if model.starts_with(key_model) || key_model.starts_with(model) {
return (pricing.input, pricing.output);
}
// Check for common model name patterns
// e.g., "claude-3-5-sonnet-20241022" should match "claude-3.5-sonnet"
let normalized_model = model.replace('-', ".");
let normalized_key = key_model.replace('-', ".");
if normalized_model.contains(&normalized_key)
|| normalized_key.contains(&normalized_model)
{
return (pricing.input, pricing.output);
}
}
// Fall back to defaults
tracing::debug!(
"No pricing found for {}/{}, using defaults (${}/{} per 1M tokens)",
provider,
model,
self.default_input_price,
self.default_output_price
);
(self.default_input_price, self.default_output_price)
}
}
impl Observer for CostObserver {
fn record_event(&self, event: &ObserverEvent) {
if let ObserverEvent::LlmResponse {
provider,
model,
success: true,
input_tokens,
output_tokens,
..
} = event
{
// Only record if we have token counts
let input = input_tokens.unwrap_or(0);
let output = output_tokens.unwrap_or(0);
if input == 0 && output == 0 {
return;
}
let (input_price, output_price) = self.get_pricing(provider, model);
let full_model_name = format!("{provider}/{model}");
let usage = TokenUsage::new(full_model_name, input, output, input_price, output_price);
if let Err(e) = self.tracker.record_usage(usage) {
tracing::warn!("Failed to record cost usage: {e}");
}
}
}
fn record_metric(&self, _metric: &ObserverMetric) {
// Cost observer doesn't handle metrics
}
fn name(&self) -> &str {
"cost"
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::config::schema::CostConfig;
use std::time::Duration;
use tempfile::TempDir;
fn create_test_tracker() -> (TempDir, Arc<CostTracker>) {
let tmp = TempDir::new().unwrap();
let config = CostConfig {
enabled: true,
..Default::default()
};
let tracker = Arc::new(CostTracker::new(config, tmp.path()).unwrap());
(tmp, tracker)
}
#[test]
fn cost_observer_records_llm_response_usage() {
let (_tmp, tracker) = create_test_tracker();
let mut prices = HashMap::new();
prices.insert(
"anthropic/claude-sonnet-4-20250514".into(),
ModelPricing {
input: 3.0,
output: 15.0,
},
);
let observer = CostObserver::new(tracker.clone(), prices);
observer.record_event(&ObserverEvent::LlmResponse {
provider: "anthropic".into(),
model: "claude-sonnet-4-20250514".into(),
duration: Duration::from_millis(100),
success: true,
error_message: None,
input_tokens: Some(1000),
output_tokens: Some(500),
});
let summary = tracker.get_summary().unwrap();
assert_eq!(summary.request_count, 1);
// Cost: (1000/1M)*3 + (500/1M)*15 = 0.003 + 0.0075 = 0.0105
assert!((summary.session_cost_usd - 0.0105).abs() < 0.0001);
}
#[test]
fn cost_observer_ignores_failed_responses() {
let (_tmp, tracker) = create_test_tracker();
let observer = CostObserver::new(tracker.clone(), HashMap::new());
observer.record_event(&ObserverEvent::LlmResponse {
provider: "anthropic".into(),
model: "claude-sonnet-4".into(),
duration: Duration::from_millis(100),
success: false,
error_message: Some("API error".into()),
input_tokens: Some(1000),
output_tokens: Some(500),
});
let summary = tracker.get_summary().unwrap();
assert_eq!(summary.request_count, 0);
}
#[test]
fn cost_observer_ignores_zero_token_responses() {
let (_tmp, tracker) = create_test_tracker();
let observer = CostObserver::new(tracker.clone(), HashMap::new());
observer.record_event(&ObserverEvent::LlmResponse {
provider: "anthropic".into(),
model: "claude-sonnet-4".into(),
duration: Duration::from_millis(100),
success: true,
error_message: None,
input_tokens: None,
output_tokens: None,
});
let summary = tracker.get_summary().unwrap();
assert_eq!(summary.request_count, 0);
}
#[test]
fn cost_observer_uses_default_pricing_for_unknown_models() {
let (_tmp, tracker) = create_test_tracker();
let observer = CostObserver::new(tracker.clone(), HashMap::new());
observer.record_event(&ObserverEvent::LlmResponse {
provider: "unknown".into(),
model: "mystery-model".into(),
duration: Duration::from_millis(100),
success: true,
error_message: None,
input_tokens: Some(1_000_000), // 1M tokens
output_tokens: Some(1_000_000),
});
let summary = tracker.get_summary().unwrap();
assert_eq!(summary.request_count, 1);
// Default: $3 input + $15 output = $18 for 1M each
assert!((summary.session_cost_usd - 18.0).abs() < 0.01);
}
#[test]
fn cost_observer_matches_model_family() {
let (_tmp, tracker) = create_test_tracker();
let mut prices = HashMap::new();
prices.insert(
"openai/gpt-4o".into(),
ModelPricing {
input: 5.0,
output: 15.0,
},
);
let observer = CostObserver::new(tracker.clone(), prices);
// Model name with version suffix should still match
observer.record_event(&ObserverEvent::LlmResponse {
provider: "openai".into(),
model: "gpt-4o-2024-05-13".into(),
duration: Duration::from_millis(100),
success: true,
error_message: None,
input_tokens: Some(1_000_000),
output_tokens: Some(0),
});
let summary = tracker.get_summary().unwrap();
// Should use $5 input price, not default $3
assert!((summary.session_cost_usd - 5.0).abs() < 0.01);
}
}

View File

@ -1,3 +1,4 @@
pub mod cost;
pub mod log;
pub mod multi;
pub mod noop;
@ -12,6 +13,7 @@ pub mod verbose;
pub use self::log::LogObserver;
#[allow(unused_imports)]
pub use self::multi::MultiObserver;
pub use cost::CostObserver;
pub use noop::NoopObserver;
#[cfg(feature = "observability-otel")]
pub use otel::OtelObserver;
@ -20,10 +22,40 @@ pub use traits::{Observer, ObserverEvent};
#[allow(unused_imports)]
pub use verbose::VerboseObserver;
use crate::config::schema::CostConfig;
use crate::config::ObservabilityConfig;
use crate::cost::CostTracker;
use std::sync::Arc;
/// Factory: create the right observer from config
pub fn create_observer(config: &ObservabilityConfig) -> Box<dyn Observer> {
create_observer_internal(config)
}
/// Create an observer stack with optional cost tracking.
///
/// When cost tracking is enabled, wraps the base observer in a MultiObserver
/// that also includes a CostObserver for recording token usage.
pub fn create_observer_with_cost_tracking(
config: &ObservabilityConfig,
cost_tracker: Option<Arc<CostTracker>>,
cost_config: &CostConfig,
) -> Box<dyn Observer> {
let base_observer = create_observer_internal(config);
match cost_tracker {
Some(tracker) if cost_config.enabled => {
let cost_observer = CostObserver::new(tracker, cost_config.prices.clone());
Box::new(MultiObserver::new(vec![
base_observer,
Box::new(cost_observer),
]))
}
_ => base_observer,
}
}
fn create_observer_internal(config: &ObservabilityConfig) -> Box<dyn Observer> {
match config.backend.as_str() {
"log" => Box::new(LogObserver::new()),
"prometheus" => Box::new(PrometheusObserver::new()),

View File

@ -18,9 +18,9 @@ use crate::memory::{
selectable_memory_backends, MemoryBackendKind,
};
use crate::providers::{
canonical_china_provider_name, is_glm_alias, is_glm_cn_alias, is_minimax_alias,
is_moonshot_alias, is_qianfan_alias, is_qwen_alias, is_qwen_oauth_alias, is_zai_alias,
is_zai_cn_alias,
canonical_china_provider_name, is_doubao_alias, is_glm_alias, is_glm_cn_alias,
is_minimax_alias, is_moonshot_alias, is_qianfan_alias, is_qwen_alias, is_qwen_oauth_alias,
is_siliconflow_alias, is_zai_alias, is_zai_cn_alias,
};
use anyhow::{bail, Context, Result};
use console::style;
@ -186,6 +186,7 @@ pub async fn run_wizard(force: bool) -> Result<Config> {
proxy: crate::config::ProxyConfig::default(),
identity: identity_config,
cost: crate::config::CostConfig::default(),
economic: crate::config::EconomicConfig::default(),
peripherals: crate::config::PeripheralsConfig::default(),
agents: std::collections::HashMap::new(),
hooks: crate::config::HooksConfig::default(),
@ -550,6 +551,7 @@ async fn run_quick_setup_with_home(
proxy: crate::config::ProxyConfig::default(),
identity: crate::config::IdentityConfig::default(),
cost: crate::config::CostConfig::default(),
economic: crate::config::EconomicConfig::default(),
peripherals: crate::config::PeripheralsConfig::default(),
agents: std::collections::HashMap::new(),
hooks: crate::config::HooksConfig::default(),
@ -710,6 +712,9 @@ fn canonical_provider_name(provider_name: &str) -> &str {
}
if let Some(canonical) = canonical_china_provider_name(provider_name) {
if canonical == "doubao" {
return "volcengine";
}
return canonical;
}
@ -775,6 +780,8 @@ fn default_model_for_provider(provider: &str) -> String {
"glm" | "zai" => "glm-5".into(),
"minimax" => "MiniMax-M2.5".into(),
"qwen" => "qwen-plus".into(),
"volcengine" => "doubao-1-5-pro-32k-250115".into(),
"siliconflow" => "Pro/zai-org/GLM-4.7".into(),
"qwen-code" => "qwen3-coder-plus".into(),
"ollama" => "llama3.2".into(),
"llamacpp" => "ggml-org/gpt-oss-20b-GGUF".into(),
@ -1088,6 +1095,31 @@ fn curated_models_for_provider(provider_name: &str) -> Vec<(String, String)> {
"Qwen Turbo (fast and cost-efficient)".to_string(),
),
],
"volcengine" => vec![
(
"doubao-1-5-pro-32k-250115".to_string(),
"Doubao 1.5 Pro 32K (official sample model)".to_string(),
),
(
"doubao-seed-1-6-250615".to_string(),
"Doubao Seed 1.6 (reasoning flagship)".to_string(),
),
(
"deepseek-v3.2".to_string(),
"DeepSeek V3.2 (available in ARK catalog)".to_string(),
),
],
"siliconflow" => vec![
(
"Pro/zai-org/GLM-4.7".to_string(),
"GLM-4.7 Pro (official API example)".to_string(),
),
(
"Pro/deepseek-ai/DeepSeek-V3.2".to_string(),
"DeepSeek V3.2 Pro".to_string(),
),
("Qwen/Qwen3-32B".to_string(), "Qwen3 32B".to_string()),
],
"qwen-code" => vec![
(
"qwen3-coder-plus".to_string(),
@ -1264,6 +1296,8 @@ fn supports_live_model_fetch(provider_name: &str) -> bool {
| "glm"
| "zai"
| "qwen"
| "volcengine"
| "siliconflow"
| "nvidia"
)
}
@ -1276,6 +1310,9 @@ fn models_endpoint_for_provider(provider_name: &str) -> Option<&'static str> {
"moonshot-cn" | "kimi-cn" => Some("https://api.moonshot.cn/v1/models"),
"glm-cn" | "bigmodel" => Some("https://open.bigmodel.cn/api/paas/v4/models"),
"zai-cn" | "z.ai-cn" => Some("https://open.bigmodel.cn/api/coding/paas/v4/models"),
"volcengine" | "ark" | "doubao" | "doubao-cn" => {
Some("https://ark.cn-beijing.volces.com/api/v3/models")
}
_ => match canonical_provider_name(provider_name) {
"openai-codex" | "openai" => Some("https://api.openai.com/v1/models"),
"venice" => Some("https://api.venice.ai/api/v1/models"),
@ -1291,6 +1328,7 @@ fn models_endpoint_for_provider(provider_name: &str) -> Option<&'static str> {
"glm" => Some("https://api.z.ai/api/paas/v4/models"),
"zai" => Some("https://api.z.ai/api/coding/paas/v4/models"),
"qwen" => Some("https://dashscope.aliyuncs.com/compatible-mode/v1/models"),
"siliconflow" => Some("https://api.siliconflow.cn/v1/models"),
"nvidia" => Some("https://integrate.api.nvidia.com/v1/models"),
"astrai" => Some("https://as-trai.com/v1/models"),
"llamacpp" => Some("http://localhost:8080/v1/models"),
@ -2303,6 +2341,11 @@ async fn setup_provider(workspace_dir: &Path) -> Result<(String, String, String,
("qwen-us", "Qwen — DashScope US endpoint"),
("hunyuan", "Hunyuan — Tencent large models (T1, Turbo, Pro)"),
("qianfan", "Qianfan — Baidu AI models (China endpoint)"),
("volcengine", "Volcengine ARK — Doubao model family"),
(
"siliconflow",
"SiliconFlow — OpenAI-compatible hosted models",
),
("zai", "Z.AI — global coding endpoint"),
("zai-cn", "Z.AI — China coding endpoint (open.bigmodel.cn)"),
("synthetic", "Synthetic — Synthetic AI models"),
@ -2697,6 +2740,10 @@ async fn setup_provider(workspace_dir: &Path) -> Result<(String, String, String,
"https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key"
} else if is_qianfan_alias(provider_name) {
"https://cloud.baidu.com/doc/WENXINWORKSHOP/s/7lm0vxo78"
} else if is_doubao_alias(provider_name) {
"https://console.volcengine.com/ark/region:ark+cn-beijing/apiKey"
} else if is_siliconflow_alias(provider_name) {
"https://cloud.siliconflow.cn/account/ak"
} else {
match provider_name {
"openrouter" => "https://openrouter.ai/keys",
@ -3005,6 +3052,8 @@ fn provider_env_var(name: &str) -> &'static str {
"glm" => "GLM_API_KEY",
"minimax" => "MINIMAX_API_KEY",
"qwen" => "DASHSCOPE_API_KEY",
"volcengine" => "ARK_API_KEY",
"siliconflow" => "SILICONFLOW_API_KEY",
"hunyuan" => "HUNYUAN_API_KEY",
"qianfan" => "QIANFAN_API_KEY",
"zai" => "ZAI_API_KEY",
@ -7305,6 +7354,14 @@ mod tests {
assert_eq!(default_model_for_provider("moonshot"), "kimi-k2.5");
assert_eq!(default_model_for_provider("hunyuan"), "hunyuan-t1-latest");
assert_eq!(default_model_for_provider("tencent"), "hunyuan-t1-latest");
assert_eq!(
default_model_for_provider("siliconflow"),
"Pro/zai-org/GLM-4.7"
);
assert_eq!(
default_model_for_provider("volcengine"),
"doubao-1-5-pro-32k-250115"
);
assert_eq!(
default_model_for_provider("nvidia"),
"meta/llama-3.3-70b-instruct"
@ -7343,6 +7400,10 @@ mod tests {
assert_eq!(canonical_provider_name("minimax-cn"), "minimax");
assert_eq!(canonical_provider_name("zai-cn"), "zai");
assert_eq!(canonical_provider_name("z.ai-global"), "zai");
assert_eq!(canonical_provider_name("doubao"), "volcengine");
assert_eq!(canonical_provider_name("ark"), "volcengine");
assert_eq!(canonical_provider_name("silicon-cloud"), "siliconflow");
assert_eq!(canonical_provider_name("siliconcloud"), "siliconflow");
assert_eq!(canonical_provider_name("nvidia-nim"), "nvidia");
assert_eq!(canonical_provider_name("aws-bedrock"), "bedrock");
assert_eq!(canonical_provider_name("build.nvidia.com"), "nvidia");
@ -7485,6 +7546,23 @@ mod tests {
assert!(ids.contains(&"qwen3-max-2026-01-23".to_string()));
}
#[test]
fn curated_models_for_volcengine_and_siliconflow_include_expected_defaults() {
let volcengine_ids: Vec<String> = curated_models_for_provider("volcengine")
.into_iter()
.map(|(id, _)| id)
.collect();
assert!(volcengine_ids.contains(&"doubao-1-5-pro-32k-250115".to_string()));
assert!(volcengine_ids.contains(&"doubao-seed-1-6-250615".to_string()));
let siliconflow_ids: Vec<String> = curated_models_for_provider("siliconflow")
.into_iter()
.map(|(id, _)| id)
.collect();
assert!(siliconflow_ids.contains(&"Pro/zai-org/GLM-4.7".to_string()));
assert!(siliconflow_ids.contains(&"Pro/deepseek-ai/DeepSeek-V3.2".to_string()));
}
#[test]
fn supports_live_model_fetch_for_supported_and_unsupported_providers() {
assert!(supports_live_model_fetch("openai"));
@ -7506,6 +7584,11 @@ mod tests {
assert!(supports_live_model_fetch("glm-cn"));
assert!(supports_live_model_fetch("qwen-intl"));
assert!(supports_live_model_fetch("qwen-coding-plan"));
assert!(supports_live_model_fetch("siliconflow"));
assert!(supports_live_model_fetch("silicon-cloud"));
assert!(supports_live_model_fetch("volcengine"));
assert!(supports_live_model_fetch("doubao"));
assert!(supports_live_model_fetch("ark"));
assert!(!supports_live_model_fetch("minimax-cn"));
assert!(!supports_live_model_fetch("unknown-provider"));
}
@ -7564,6 +7647,22 @@ mod tests {
curated_models_for_provider("bedrock"),
curated_models_for_provider("aws-bedrock")
);
assert_eq!(
curated_models_for_provider("volcengine"),
curated_models_for_provider("doubao")
);
assert_eq!(
curated_models_for_provider("volcengine"),
curated_models_for_provider("ark")
);
assert_eq!(
curated_models_for_provider("siliconflow"),
curated_models_for_provider("silicon-cloud")
);
assert_eq!(
curated_models_for_provider("siliconflow"),
curated_models_for_provider("siliconcloud")
);
}
#[test]
@ -7596,6 +7695,18 @@ mod tests {
models_endpoint_for_provider("qwen-coding-plan"),
Some("https://coding.dashscope.aliyuncs.com/v1/models")
);
assert_eq!(
models_endpoint_for_provider("volcengine"),
Some("https://ark.cn-beijing.volces.com/api/v3/models")
);
assert_eq!(
models_endpoint_for_provider("doubao"),
Some("https://ark.cn-beijing.volces.com/api/v3/models")
);
assert_eq!(
models_endpoint_for_provider("ark"),
Some("https://ark.cn-beijing.volces.com/api/v3/models")
);
}
#[test]
@ -7616,6 +7727,14 @@ mod tests {
models_endpoint_for_provider("moonshot"),
Some("https://api.moonshot.ai/v1/models")
);
assert_eq!(
models_endpoint_for_provider("siliconflow"),
Some("https://api.siliconflow.cn/v1/models")
);
assert_eq!(
models_endpoint_for_provider("silicon-cloud"),
Some("https://api.siliconflow.cn/v1/models")
);
assert_eq!(
models_endpoint_for_provider("llamacpp"),
Some("http://localhost:8080/v1/models")
@ -7914,6 +8033,12 @@ mod tests {
assert_eq!(provider_env_var("minimax-oauth-cn"), "MINIMAX_API_KEY");
assert_eq!(provider_env_var("moonshot-intl"), "MOONSHOT_API_KEY");
assert_eq!(provider_env_var("zai-cn"), "ZAI_API_KEY");
assert_eq!(provider_env_var("doubao"), "ARK_API_KEY");
assert_eq!(provider_env_var("volcengine"), "ARK_API_KEY");
assert_eq!(provider_env_var("ark"), "ARK_API_KEY");
assert_eq!(provider_env_var("siliconflow"), "SILICONFLOW_API_KEY");
assert_eq!(provider_env_var("silicon-cloud"), "SILICONFLOW_API_KEY");
assert_eq!(provider_env_var("siliconcloud"), "SILICONFLOW_API_KEY");
assert_eq!(provider_env_var("nvidia"), "NVIDIA_API_KEY");
assert_eq!(provider_env_var("nvidia-nim"), "NVIDIA_API_KEY"); // alias
assert_eq!(provider_env_var("build.nvidia.com"), "NVIDIA_API_KEY"); // alias
@ -8006,13 +8131,14 @@ mod tests {
}
#[test]
fn channel_menu_choices_include_signal_and_nextcloud_talk() {
fn channel_menu_choices_include_signal_nextcloud_and_dingtalk() {
assert!(channel_menu_choices().contains(&ChannelMenuChoice::Signal));
assert!(channel_menu_choices().contains(&ChannelMenuChoice::NextcloudTalk));
assert!(channel_menu_choices().contains(&ChannelMenuChoice::DingTalk));
}
#[test]
fn launchable_channels_include_signal_mattermost_qq_and_nextcloud_talk() {
fn launchable_channels_include_signal_mattermost_qq_nextcloud_and_dingtalk() {
let mut channels = ChannelsConfig::default();
assert!(!has_launchable_channels(&channels));
@ -8056,5 +8182,13 @@ mod tests {
allowed_users: vec!["*".into()],
});
assert!(has_launchable_channels(&channels));
channels.nextcloud_talk = None;
channels.dingtalk = Some(crate::config::schema::DingTalkConfig {
client_id: "client-id".into(),
client_secret: "client-secret".into(),
allowed_users: vec!["*".into()],
});
assert!(has_launchable_channels(&channels));
}
}

View File

@ -74,6 +74,7 @@ const QWEN_OAUTH_DEFAULT_CLIENT_ID: &str = "f0304373b74a44d2b584a3fb70ca9e56";
const QWEN_OAUTH_CREDENTIAL_FILE: &str = ".qwen/oauth_creds.json";
const ZAI_GLOBAL_BASE_URL: &str = "https://api.z.ai/api/coding/paas/v4";
const ZAI_CN_BASE_URL: &str = "https://open.bigmodel.cn/api/coding/paas/v4";
const SILICONFLOW_BASE_URL: &str = "https://api.siliconflow.cn/v1";
const VERCEL_AI_GATEWAY_BASE_URL: &str = "https://ai-gateway.vercel.sh/v1";
pub(crate) fn is_minimax_intl_alias(name: &str) -> bool {
@ -179,6 +180,10 @@ pub(crate) fn is_doubao_alias(name: &str) -> bool {
matches!(name, "doubao" | "volcengine" | "ark" | "doubao-cn")
}
pub(crate) fn is_siliconflow_alias(name: &str) -> bool {
matches!(name, "siliconflow" | "silicon-cloud" | "siliconcloud")
}
#[derive(Clone, Copy, Debug)]
enum MinimaxOauthRegion {
Global,
@ -618,6 +623,8 @@ pub(crate) fn canonical_china_provider_name(name: &str) -> Option<&'static str>
Some("qianfan")
} else if is_doubao_alias(name) {
Some("doubao")
} else if is_siliconflow_alias(name) {
Some("siliconflow")
} else if matches!(name, "hunyuan" | "tencent") {
Some("hunyuan")
} else {
@ -683,6 +690,7 @@ fn zai_base_url(name: &str) -> Option<&'static str> {
pub struct ProviderRuntimeOptions {
pub auth_profile_override: Option<String>,
pub provider_api_url: Option<String>,
pub provider_transport: Option<String>,
pub zeroclaw_dir: Option<PathBuf>,
pub secrets_encrypt: bool,
pub reasoning_enabled: Option<bool>,
@ -697,6 +705,7 @@ impl Default for ProviderRuntimeOptions {
Self {
auth_profile_override: None,
provider_api_url: None,
provider_transport: None,
zeroclaw_dir: None,
secrets_encrypt: true,
reasoning_enabled: None,
@ -872,6 +881,7 @@ fn resolve_provider_credential(name: &str, credential_override: Option<&str>) ->
"hunyuan" | "tencent" => vec!["HUNYUAN_API_KEY"],
name if is_qianfan_alias(name) => vec!["QIANFAN_API_KEY"],
name if is_doubao_alias(name) => vec!["ARK_API_KEY", "DOUBAO_API_KEY"],
name if is_siliconflow_alias(name) => vec!["SILICONFLOW_API_KEY"],
name if is_qwen_alias(name) => vec!["DASHSCOPE_API_KEY"],
name if is_zai_alias(name) => vec!["ZAI_API_KEY"],
"nvidia" | "nvidia-nim" | "build.nvidia.com" => vec!["NVIDIA_API_KEY"],
@ -1181,6 +1191,13 @@ fn create_provider_with_url_and_options(
key,
AuthStyle::Bearer,
))),
name if is_siliconflow_alias(name) => Ok(Box::new(OpenAiCompatibleProvider::new_with_vision(
"SiliconFlow",
SILICONFLOW_BASE_URL,
key,
AuthStyle::Bearer,
true,
))),
name if qwen_base_url(name).is_some() => Ok(Box::new(OpenAiCompatibleProvider::new_with_vision(
"Qwen",
qwen_base_url(name).expect("checked in guard"),
@ -1512,7 +1529,15 @@ pub fn create_routed_provider_with_options(
.then_some(api_url)
.flatten();
let route_options = options.clone();
let mut route_options = options.clone();
if let Some(transport) = route
.transport
.as_deref()
.map(str::trim)
.filter(|value| !value.is_empty())
{
route_options.provider_transport = Some(transport.to_string());
}
match create_resilient_provider_with_options(
&route.provider,
@ -1542,19 +1567,8 @@ pub fn create_routed_provider_with_options(
}
}
// Build route table
let routes: Vec<(String, router::Route)> = model_routes
.iter()
.map(|r| {
(
r.hint.clone(),
router::Route {
provider_name: r.provider.clone(),
model: r.model.clone(),
},
)
})
.collect();
// Keep only successfully initialized routed providers and preserve
// their provider-id bindings (e.g. "<provider>#<hint>").
Ok(Box::new(
router::RouterProvider::new(providers, routes, default_model.to_string())
@ -1712,6 +1726,12 @@ pub fn list_providers() -> Vec<ProviderInfo> {
aliases: &["volcengine", "ark", "doubao-cn"],
local: false,
},
ProviderInfo {
name: "siliconflow",
display_name: "SiliconFlow",
aliases: &["silicon-cloud", "siliconcloud"],
local: false,
},
ProviderInfo {
name: "qwen",
display_name: "Qwen (DashScope / Qwen Code OAuth)",
@ -2069,6 +2089,9 @@ mod tests {
assert!(is_doubao_alias("volcengine"));
assert!(is_doubao_alias("ark"));
assert!(is_doubao_alias("doubao-cn"));
assert!(is_siliconflow_alias("siliconflow"));
assert!(is_siliconflow_alias("silicon-cloud"));
assert!(is_siliconflow_alias("siliconcloud"));
assert!(!is_moonshot_alias("openrouter"));
assert!(!is_glm_alias("openai"));
@ -2076,6 +2099,7 @@ mod tests {
assert!(!is_zai_alias("anthropic"));
assert!(!is_qianfan_alias("cohere"));
assert!(!is_doubao_alias("deepseek"));
assert!(!is_siliconflow_alias("volcengine"));
}
#[test]
@ -2099,6 +2123,14 @@ mod tests {
assert_eq!(canonical_china_provider_name("baidu"), Some("qianfan"));
assert_eq!(canonical_china_provider_name("doubao"), Some("doubao"));
assert_eq!(canonical_china_provider_name("volcengine"), Some("doubao"));
assert_eq!(
canonical_china_provider_name("siliconflow"),
Some("siliconflow")
);
assert_eq!(
canonical_china_provider_name("silicon-cloud"),
Some("siliconflow")
);
assert_eq!(canonical_china_provider_name("hunyuan"), Some("hunyuan"));
assert_eq!(canonical_china_provider_name("tencent"), Some("hunyuan"));
assert_eq!(canonical_china_provider_name("openai"), None);
@ -2316,6 +2348,13 @@ mod tests {
assert!(create_provider("doubao-cn", Some("key")).is_ok());
}
#[test]
fn factory_siliconflow() {
assert!(create_provider("siliconflow", Some("key")).is_ok());
assert!(create_provider("silicon-cloud", Some("key")).is_ok());
assert!(create_provider("siliconcloud", Some("key")).is_ok());
}
#[test]
fn factory_qwen() {
assert!(create_provider("qwen", Some("key")).is_ok());
@ -2776,6 +2815,8 @@ mod tests {
"bedrock",
"qianfan",
"doubao",
"volcengine",
"siliconflow",
"qwen",
"qwen-intl",
"qwen-cn",
@ -3049,6 +3090,7 @@ mod tests {
model: "anthropic/claude-sonnet-4.6".to_string(),
max_tokens: Some(4096),
api_key: None,
transport: None,
}];
let provider = create_routed_provider_with_options(

View File

@ -4,21 +4,81 @@ use crate::multimodal;
use crate::providers::traits::{ChatMessage, Provider, ProviderCapabilities};
use crate::providers::ProviderRuntimeOptions;
use async_trait::async_trait;
use futures_util::{SinkExt, StreamExt};
use reqwest::Client;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::path::PathBuf;
use std::time::Duration;
use tokio::time::timeout;
use tokio_tungstenite::{
connect_async,
tungstenite::{
client::IntoClientRequest,
http::{
header::{AUTHORIZATION, USER_AGENT},
HeaderValue as WsHeaderValue,
},
Message as WsMessage,
},
};
const DEFAULT_CODEX_RESPONSES_URL: &str = "https://chatgpt.com/backend-api/codex/responses";
const CODEX_RESPONSES_URL_ENV: &str = "ZEROCLAW_CODEX_RESPONSES_URL";
const CODEX_BASE_URL_ENV: &str = "ZEROCLAW_CODEX_BASE_URL";
const CODEX_TRANSPORT_ENV: &str = "ZEROCLAW_CODEX_TRANSPORT";
const CODEX_PROVIDER_TRANSPORT_ENV: &str = "ZEROCLAW_PROVIDER_TRANSPORT";
const CODEX_RESPONSES_WEBSOCKET_ENV_LEGACY: &str = "ZEROCLAW_RESPONSES_WEBSOCKET";
const DEFAULT_CODEX_INSTRUCTIONS: &str =
"You are ZeroClaw, a concise and helpful coding assistant.";
const CODEX_WS_CONNECT_TIMEOUT: Duration = Duration::from_secs(20);
const CODEX_WS_SEND_TIMEOUT: Duration = Duration::from_secs(15);
const CODEX_WS_READ_TIMEOUT: Duration = Duration::from_secs(60);
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum CodexTransport {
Auto,
WebSocket,
Sse,
}
#[derive(Debug)]
enum WebsocketRequestError {
TransportUnavailable(anyhow::Error),
Stream(anyhow::Error),
}
impl WebsocketRequestError {
fn transport_unavailable<E>(error: E) -> Self
where
E: Into<anyhow::Error>,
{
Self::TransportUnavailable(error.into())
}
fn stream<E>(error: E) -> Self
where
E: Into<anyhow::Error>,
{
Self::Stream(error.into())
}
}
impl std::fmt::Display for WebsocketRequestError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::TransportUnavailable(error) | Self::Stream(error) => error.fmt(f),
}
}
}
impl std::error::Error for WebsocketRequestError {}
pub struct OpenAiCodexProvider {
auth: AuthService,
auth_profile_override: Option<String>,
responses_url: String,
transport: CodexTransport,
custom_endpoint: bool,
gateway_api_key: Option<String>,
reasoning_level: Option<String>,
@ -104,6 +164,7 @@ impl OpenAiCodexProvider {
auth_profile_override: options.auth_profile_override.clone(),
custom_endpoint: !is_default_responses_url(&responses_url),
responses_url,
transport: resolve_transport_mode(options)?,
gateway_api_key: gateway_api_key.map(ToString::to_string),
reasoning_level: normalize_reasoning_level(
options.reasoning_level.as_deref(),
@ -204,6 +265,72 @@ fn first_nonempty(text: Option<&str>) -> Option<String> {
})
}
fn parse_transport_override(
raw: Option<&str>,
source: &str,
) -> anyhow::Result<Option<CodexTransport>> {
let Some(raw_value) = raw else {
return Ok(None);
};
let value = raw_value.trim();
if value.is_empty() {
return Ok(None);
}
let normalized = value.to_ascii_lowercase().replace(['-', '_'], "");
match normalized.as_str() {
"auto" => Ok(Some(CodexTransport::Auto)),
"websocket" | "ws" => Ok(Some(CodexTransport::WebSocket)),
"sse" | "http" => Ok(Some(CodexTransport::Sse)),
_ => anyhow::bail!(
"Invalid OpenAI Codex transport override '{value}' from {source}; expected one of: auto, websocket, sse"
),
}
}
fn parse_legacy_websocket_flag(raw: &str) -> Option<CodexTransport> {
let normalized = raw.trim().to_ascii_lowercase();
match normalized.as_str() {
"1" | "true" | "on" | "yes" => Some(CodexTransport::WebSocket),
"0" | "false" | "off" | "no" => Some(CodexTransport::Sse),
_ => None,
}
}
fn resolve_transport_mode(options: &ProviderRuntimeOptions) -> anyhow::Result<CodexTransport> {
if let Some(mode) = parse_transport_override(
options.provider_transport.as_deref(),
"provider.transport runtime override",
)? {
return Ok(mode);
}
if let Ok(value) = std::env::var(CODEX_TRANSPORT_ENV) {
if let Some(mode) = parse_transport_override(Some(&value), CODEX_TRANSPORT_ENV)? {
return Ok(mode);
}
}
if let Ok(value) = std::env::var(CODEX_PROVIDER_TRANSPORT_ENV) {
if let Some(mode) = parse_transport_override(Some(&value), CODEX_PROVIDER_TRANSPORT_ENV)? {
return Ok(mode);
}
}
if let Some(mode) = std::env::var(CODEX_RESPONSES_WEBSOCKET_ENV_LEGACY)
.ok()
.and_then(|value| parse_legacy_websocket_flag(&value))
{
tracing::warn!(
env = CODEX_RESPONSES_WEBSOCKET_ENV_LEGACY,
"Using deprecated websocket toggle env for OpenAI Codex transport"
);
return Ok(mode);
}
Ok(CodexTransport::Auto)
}
fn resolve_instructions(system_prompt: Option<&str>) -> String {
first_nonempty(system_prompt).unwrap_or_else(|| DEFAULT_CODEX_INSTRUCTIONS.to_string())
}
@ -526,6 +653,283 @@ async fn decode_responses_body(response: reqwest::Response) -> anyhow::Result<St
}
impl OpenAiCodexProvider {
fn responses_websocket_url(&self, model: &str) -> anyhow::Result<String> {
let mut url = reqwest::Url::parse(&self.responses_url)?;
let next_scheme: &'static str = match url.scheme() {
"https" | "wss" => "wss",
"http" | "ws" => "ws",
other => {
anyhow::bail!(
"OpenAI Codex websocket transport does not support URL scheme: {}",
other
);
}
};
url.set_scheme(next_scheme)
.map_err(|()| anyhow::anyhow!("failed to set websocket URL scheme"))?;
if !url.query_pairs().any(|(k, _)| k == "model") {
url.query_pairs_mut().append_pair("model", model);
}
Ok(url.into())
}
fn apply_auth_headers_ws(
&self,
request: &mut tokio_tungstenite::tungstenite::http::Request<()>,
bearer_token: &str,
account_id: Option<&str>,
access_token: Option<&str>,
use_gateway_api_key_auth: bool,
) -> anyhow::Result<()> {
let headers = request.headers_mut();
headers.insert(
AUTHORIZATION,
WsHeaderValue::from_str(&format!("Bearer {bearer_token}"))?,
);
headers.insert(
"OpenAI-Beta",
WsHeaderValue::from_static("responses=experimental"),
);
headers.insert("originator", WsHeaderValue::from_static("pi"));
headers.insert("accept", WsHeaderValue::from_static("text/event-stream"));
headers.insert(USER_AGENT, WsHeaderValue::from_static("zeroclaw"));
if let Some(account_id) = account_id {
headers.insert("chatgpt-account-id", WsHeaderValue::from_str(account_id)?);
}
if use_gateway_api_key_auth {
if let Some(access_token) = access_token {
headers.insert(
"x-openai-access-token",
WsHeaderValue::from_str(access_token)?,
);
}
if let Some(account_id) = account_id {
headers.insert("x-openai-account-id", WsHeaderValue::from_str(account_id)?);
}
}
Ok(())
}
async fn send_responses_websocket_request(
&self,
request: &ResponsesRequest,
model: &str,
bearer_token: &str,
account_id: Option<&str>,
access_token: Option<&str>,
use_gateway_api_key_auth: bool,
) -> Result<String, WebsocketRequestError> {
let ws_url = self
.responses_websocket_url(model)
.map_err(WebsocketRequestError::transport_unavailable)?;
let mut ws_request = ws_url.into_client_request().map_err(|error| {
WebsocketRequestError::transport_unavailable(anyhow::anyhow!(
"invalid websocket request URL: {error}"
))
})?;
self.apply_auth_headers_ws(
&mut ws_request,
bearer_token,
account_id,
access_token,
use_gateway_api_key_auth,
)
.map_err(WebsocketRequestError::transport_unavailable)?;
let payload = serde_json::json!({
"type": "response.create",
"model": &request.model,
"input": &request.input,
"instructions": &request.instructions,
"store": request.store,
"text": &request.text,
"reasoning": &request.reasoning,
"include": &request.include,
"tool_choice": &request.tool_choice,
"parallel_tool_calls": request.parallel_tool_calls,
});
let (mut ws_stream, _) = timeout(CODEX_WS_CONNECT_TIMEOUT, connect_async(ws_request))
.await
.map_err(|_| {
WebsocketRequestError::transport_unavailable(anyhow::anyhow!(
"OpenAI Codex websocket connect timed out after {}s",
CODEX_WS_CONNECT_TIMEOUT.as_secs()
))
})?
.map_err(WebsocketRequestError::transport_unavailable)?;
timeout(
CODEX_WS_SEND_TIMEOUT,
ws_stream.send(WsMessage::Text(
serde_json::to_string(&payload)
.map_err(WebsocketRequestError::transport_unavailable)?
.into(),
)),
)
.await
.map_err(|_| {
WebsocketRequestError::transport_unavailable(anyhow::anyhow!(
"OpenAI Codex websocket send timed out after {}s",
CODEX_WS_SEND_TIMEOUT.as_secs()
))
})?
.map_err(WebsocketRequestError::transport_unavailable)?;
let mut saw_delta = false;
let mut delta_accumulator = String::new();
let mut fallback_text: Option<String> = None;
let mut timed_out = false;
loop {
let frame = match timeout(CODEX_WS_READ_TIMEOUT, ws_stream.next()).await {
Ok(frame) => frame,
Err(_) => {
let _ = ws_stream.close(None).await;
if saw_delta || fallback_text.is_some() {
timed_out = true;
break;
}
return Err(WebsocketRequestError::stream(anyhow::anyhow!(
"OpenAI Codex websocket stream timed out after {}s waiting for events",
CODEX_WS_READ_TIMEOUT.as_secs()
)));
}
};
let Some(frame) = frame else {
break;
};
let frame = frame.map_err(WebsocketRequestError::stream)?;
let event: Value = match frame {
WsMessage::Text(text) => {
serde_json::from_str(text.as_ref()).map_err(WebsocketRequestError::stream)?
}
WsMessage::Binary(binary) => {
let text = String::from_utf8(binary.to_vec()).map_err(|error| {
WebsocketRequestError::stream(anyhow::anyhow!(
"invalid UTF-8 websocket frame from OpenAI Codex: {error}"
))
})?;
serde_json::from_str(&text).map_err(WebsocketRequestError::stream)?
}
WsMessage::Ping(payload) => {
ws_stream
.send(WsMessage::Pong(payload))
.await
.map_err(WebsocketRequestError::stream)?;
continue;
}
WsMessage::Close(_) => break,
_ => continue,
};
if let Some(message) = extract_stream_error_message(&event) {
return Err(WebsocketRequestError::stream(anyhow::anyhow!(
"OpenAI Codex websocket stream error: {message}"
)));
}
if let Some(text) = extract_stream_event_text(&event, saw_delta) {
let event_type = event.get("type").and_then(Value::as_str);
if event_type == Some("response.output_text.delta") {
saw_delta = true;
delta_accumulator.push_str(&text);
} else if fallback_text.is_none() {
fallback_text = Some(text);
}
}
let event_type = event.get("type").and_then(Value::as_str);
if event_type == Some("response.completed") || event_type == Some("response.done") {
if let Some(response_value) = event.get("response").cloned() {
if let Ok(parsed) = serde_json::from_value::<ResponsesResponse>(response_value)
{
if let Some(text) = extract_responses_text(&parsed) {
let _ = ws_stream.close(None).await;
return Ok(text);
}
}
}
if saw_delta {
let _ = ws_stream.close(None).await;
return nonempty_preserve(Some(&delta_accumulator)).ok_or_else(|| {
WebsocketRequestError::stream(anyhow::anyhow!(
"No response from OpenAI Codex"
))
});
}
if let Some(text) = fallback_text.clone() {
let _ = ws_stream.close(None).await;
return Ok(text);
}
}
}
if saw_delta {
return nonempty_preserve(Some(&delta_accumulator)).ok_or_else(|| {
WebsocketRequestError::stream(anyhow::anyhow!("No response from OpenAI Codex"))
});
}
if let Some(text) = fallback_text {
return Ok(text);
}
if timed_out {
return Err(WebsocketRequestError::stream(anyhow::anyhow!(
"No response from OpenAI Codex websocket stream before timeout"
)));
}
Err(WebsocketRequestError::stream(anyhow::anyhow!(
"No response from OpenAI Codex websocket stream"
)))
}
async fn send_responses_sse_request(
&self,
request: &ResponsesRequest,
bearer_token: &str,
account_id: Option<&str>,
access_token: Option<&str>,
use_gateway_api_key_auth: bool,
) -> anyhow::Result<String> {
let mut request_builder = self
.client
.post(&self.responses_url)
.header("Authorization", format!("Bearer {bearer_token}"))
.header("OpenAI-Beta", "responses=experimental")
.header("originator", "pi")
.header("accept", "text/event-stream")
.header("Content-Type", "application/json");
if let Some(account_id) = account_id {
request_builder = request_builder.header("chatgpt-account-id", account_id);
}
if use_gateway_api_key_auth {
if let Some(access_token) = access_token {
request_builder = request_builder.header("x-openai-access-token", access_token);
}
if let Some(account_id) = account_id {
request_builder = request_builder.header("x-openai-account-id", account_id);
}
}
let response = request_builder.json(request).send().await?;
if !response.status().is_success() {
return Err(super::api_error("OpenAI Codex", response).await);
}
decode_responses_body(response).await
}
async fn send_responses_request(
&self,
input: Vec<ResponsesInput>,
@ -613,35 +1017,59 @@ impl OpenAiCodexProvider {
access_token.as_deref().unwrap_or_default()
};
let mut request_builder = self
.client
.post(&self.responses_url)
.header("Authorization", format!("Bearer {bearer_token}"))
.header("OpenAI-Beta", "responses=experimental")
.header("originator", "pi")
.header("accept", "text/event-stream")
.header("Content-Type", "application/json");
if let Some(account_id) = account_id.as_deref() {
request_builder = request_builder.header("chatgpt-account-id", account_id);
}
if use_gateway_api_key_auth {
if let Some(access_token) = access_token.as_deref() {
request_builder = request_builder.header("x-openai-access-token", access_token);
match self.transport {
CodexTransport::WebSocket => self
.send_responses_websocket_request(
&request,
normalized_model,
bearer_token,
account_id.as_deref(),
access_token.as_deref(),
use_gateway_api_key_auth,
)
.await
.map_err(Into::into),
CodexTransport::Sse => {
self.send_responses_sse_request(
&request,
bearer_token,
account_id.as_deref(),
access_token.as_deref(),
use_gateway_api_key_auth,
)
.await
}
if let Some(account_id) = account_id.as_deref() {
request_builder = request_builder.header("x-openai-account-id", account_id);
CodexTransport::Auto => {
match self
.send_responses_websocket_request(
&request,
normalized_model,
bearer_token,
account_id.as_deref(),
access_token.as_deref(),
use_gateway_api_key_auth,
)
.await
{
Ok(text) => Ok(text),
Err(WebsocketRequestError::TransportUnavailable(error)) => {
tracing::warn!(
error = %error,
"OpenAI Codex websocket request failed; falling back to SSE"
);
self.send_responses_sse_request(
&request,
bearer_token,
account_id.as_deref(),
access_token.as_deref(),
use_gateway_api_key_auth,
)
.await
}
Err(WebsocketRequestError::Stream(error)) => Err(error),
}
}
}
let response = request_builder.json(&request).send().await?;
if !response.status().is_success() {
return Err(super::api_error("OpenAI Codex", response).await);
}
decode_responses_body(response).await
}
}
@ -809,6 +1237,85 @@ mod tests {
);
}
#[test]
fn resolve_transport_mode_defaults_to_auto() {
let _env_lock = env_lock();
let _transport_guard = EnvGuard::set(CODEX_TRANSPORT_ENV, None);
let _legacy_guard = EnvGuard::set(CODEX_RESPONSES_WEBSOCKET_ENV_LEGACY, None);
let _provider_guard = EnvGuard::set("ZEROCLAW_PROVIDER_TRANSPORT", None);
assert_eq!(
resolve_transport_mode(&ProviderRuntimeOptions::default()).unwrap(),
CodexTransport::Auto
);
}
#[test]
fn resolve_transport_mode_accepts_runtime_override() {
let _env_lock = env_lock();
let _transport_guard = EnvGuard::set(CODEX_TRANSPORT_ENV, Some("sse"));
let options = ProviderRuntimeOptions {
provider_transport: Some("websocket".to_string()),
..ProviderRuntimeOptions::default()
};
assert_eq!(
resolve_transport_mode(&options).unwrap(),
CodexTransport::WebSocket
);
}
#[test]
fn resolve_transport_mode_legacy_bool_env_is_supported() {
let _env_lock = env_lock();
let _transport_guard = EnvGuard::set(CODEX_TRANSPORT_ENV, None);
let _provider_guard = EnvGuard::set("ZEROCLAW_PROVIDER_TRANSPORT", None);
let _legacy_guard = EnvGuard::set(CODEX_RESPONSES_WEBSOCKET_ENV_LEGACY, Some("false"));
assert_eq!(
resolve_transport_mode(&ProviderRuntimeOptions::default()).unwrap(),
CodexTransport::Sse
);
}
#[test]
fn resolve_transport_mode_rejects_invalid_runtime_override() {
let _env_lock = env_lock();
let _transport_guard = EnvGuard::set(CODEX_TRANSPORT_ENV, None);
let _provider_guard = EnvGuard::set("ZEROCLAW_PROVIDER_TRANSPORT", None);
let _legacy_guard = EnvGuard::set(CODEX_RESPONSES_WEBSOCKET_ENV_LEGACY, None);
let options = ProviderRuntimeOptions {
provider_transport: Some("udp".to_string()),
..ProviderRuntimeOptions::default()
};
let err =
resolve_transport_mode(&options).expect_err("invalid runtime transport must fail");
assert!(err
.to_string()
.contains("Invalid OpenAI Codex transport override 'udp'"));
}
#[test]
fn websocket_url_uses_ws_scheme_and_model_query() {
let _env_lock = env_lock();
let _endpoint_guard = EnvGuard::set(CODEX_RESPONSES_URL_ENV, None);
let _base_guard = EnvGuard::set(CODEX_BASE_URL_ENV, None);
let options = ProviderRuntimeOptions::default();
let provider = OpenAiCodexProvider::new(&options, None).expect("provider should init");
let ws_url = provider
.responses_websocket_url("gpt-5.3-codex")
.expect("websocket URL should be derived");
assert_eq!(
ws_url,
"wss://chatgpt.com/backend-api/codex/responses?model=gpt-5.3-codex"
);
}
#[test]
fn default_responses_url_detector_handles_equivalent_urls() {
assert!(is_default_responses_url(DEFAULT_CODEX_RESPONSES_URL));
@ -1077,6 +1584,7 @@ data: [DONE]
fn capabilities_includes_vision() {
let options = ProviderRuntimeOptions {
provider_api_url: None,
provider_transport: None,
zeroclaw_dir: None,
secrets_encrypt: false,
auth_profile_override: None,

1683
src/tools/feishu_doc.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -31,6 +31,8 @@ pub mod cron_update;
pub mod delegate;
pub mod delegate_coordination_status;
pub mod docx_read;
#[cfg(feature = "channel-lark")]
pub mod feishu_doc;
pub mod file_edit;
pub mod file_read;
pub mod file_write;
@ -86,6 +88,8 @@ pub use cron_update::CronUpdateTool;
pub use delegate::DelegateTool;
pub use delegate_coordination_status::DelegateCoordinationStatusTool;
pub use docx_read::DocxReadTool;
#[cfg(feature = "channel-lark")]
pub use feishu_doc::FeishuDocTool;
pub use file_edit::FileEditTool;
pub use file_read::FileReadTool;
pub use file_write::FileWriteTool;
@ -428,6 +432,7 @@ pub fn all_tools_with_runtime(
let provider_runtime_options = crate::providers::ProviderRuntimeOptions {
auth_profile_override: None,
provider_api_url: root_config.api_url.clone(),
provider_transport: root_config.effective_provider_transport(),
zeroclaw_dir: root_config
.config_path
.parent()
@ -512,38 +517,41 @@ pub fn all_tools_with_runtime(
)));
}
// Inter-process agent communication (opt-in)
if root_config.agents_ipc.enabled {
match agents_ipc::IpcDb::open(workspace_dir, &root_config.agents_ipc) {
Ok(ipc_db) => {
let ipc_db = Arc::new(ipc_db);
tool_arcs.push(Arc::new(agents_ipc::AgentsListTool::new(ipc_db.clone())));
tool_arcs.push(Arc::new(agents_ipc::AgentsSendTool::new(
ipc_db.clone(),
// Feishu document tools (enabled when channel-lark feature is active)
#[cfg(feature = "channel-lark")]
{
let feishu_creds = root_config
.channels_config
.feishu
.as_ref()
.map(|fs| (fs.app_id.clone(), fs.app_secret.clone(), true))
.or_else(|| {
root_config
.channels_config
.lark
.as_ref()
.map(|lk| (lk.app_id.clone(), lk.app_secret.clone(), lk.use_feishu))
});
if let Some((app_id, app_secret, use_feishu)) = feishu_creds {
let app_id = app_id.trim().to_string();
let app_secret = app_secret.trim().to_string();
if app_id.is_empty() || app_secret.is_empty() {
tracing::warn!(
"feishu_doc: skipped registration because app credentials are empty"
);
} else {
tool_arcs.push(Arc::new(FeishuDocTool::new(
app_id,
app_secret,
use_feishu,
security.clone(),
)));
tool_arcs.push(Arc::new(agents_ipc::AgentsInboxTool::new(ipc_db.clone())));
tool_arcs.push(Arc::new(agents_ipc::StateGetTool::new(ipc_db.clone())));
tool_arcs.push(Arc::new(agents_ipc::StateSetTool::new(
ipc_db,
security.clone(),
)));
}
Err(e) => {
tracing::warn!("agents_ipc: failed to open IPC database: {e}");
}
}
}
// Load WASM plugin tools from the skills directory.
// Each installed skill package may ship one or more WASM tools under
// `<skill-dir>/tools/<tool-name>/{tool.wasm, manifest.json}`.
// Failures are logged and skipped — a broken plugin must not block startup.
let skills_dir = workspace_dir.join("skills");
let mut boxed = boxed_registry_from_arcs(tool_arcs);
let wasm_tools = wasm_tool::load_wasm_tools_from_skills(&skills_dir);
boxed.extend(wasm_tools);
boxed
boxed_registry_from_arcs(tool_arcs)
}
#[cfg(test)]

View File

@ -125,6 +125,42 @@ impl ModelRoutingConfigTool {
Ok(output)
}
fn normalize_transport_value(raw: &str, field: &str) -> anyhow::Result<String> {
let normalized = raw.trim().to_ascii_lowercase().replace(['-', '_'], "");
match normalized.as_str() {
"auto" => Ok("auto".to_string()),
"websocket" | "ws" => Ok("websocket".to_string()),
"sse" | "http" => Ok("sse".to_string()),
_ => anyhow::bail!("'{field}' must be one of: auto, websocket, sse"),
}
}
fn parse_optional_transport_update(
args: &Value,
field: &str,
) -> anyhow::Result<MaybeSet<String>> {
let Some(raw) = args.get(field) else {
return Ok(MaybeSet::Unset);
};
if raw.is_null() {
return Ok(MaybeSet::Null);
}
let value = raw
.as_str()
.ok_or_else(|| anyhow::anyhow!("'{field}' must be a string or null"))?
.trim();
if value.is_empty() {
return Ok(MaybeSet::Null);
}
Ok(MaybeSet::Set(Self::normalize_transport_value(
value, field,
)?))
}
fn parse_optional_f64_update(args: &Value, field: &str) -> anyhow::Result<MaybeSet<f64>> {
let Some(raw) = args.get(field) else {
return Ok(MaybeSet::Unset);
@ -217,6 +253,7 @@ impl ModelRoutingConfigTool {
"hint": route.hint,
"provider": route.provider,
"model": route.model,
"transport": route.transport,
"api_key_configured": has_provider_credential(&route.provider, route.api_key.as_deref()),
"classification": classification,
})
@ -429,6 +466,7 @@ impl ModelRoutingConfigTool {
let provider = Self::parse_non_empty_string(args, "provider")?;
let model = Self::parse_non_empty_string(args, "model")?;
let api_key_update = Self::parse_optional_string_update(args, "api_key")?;
let transport_update = Self::parse_optional_transport_update(args, "transport")?;
let keywords_update = if let Some(raw) = args.get("keywords") {
Some(Self::parse_string_list(raw, "keywords")?)
@ -466,6 +504,7 @@ impl ModelRoutingConfigTool {
model: model.clone(),
max_tokens: None,
api_key: None,
transport: None,
});
next_route.hint = hint.clone();
@ -478,6 +517,12 @@ impl ModelRoutingConfigTool {
MaybeSet::Unset => {}
}
match transport_update {
MaybeSet::Set(transport) => next_route.transport = Some(transport),
MaybeSet::Null => next_route.transport = None,
MaybeSet::Unset => {}
}
cfg.model_routes.retain(|route| route.hint != hint);
cfg.model_routes.push(next_route);
Self::normalize_and_sort_routes(&mut cfg.model_routes);
@ -782,6 +827,11 @@ impl Tool for ModelRoutingConfigTool {
"type": ["string", "null"],
"description": "Optional API key override for scenario route or delegate agent"
},
"transport": {
"type": ["string", "null"],
"enum": ["auto", "websocket", "sse", "ws", "http", null],
"description": "Optional route transport override for upsert_scenario (auto, websocket, sse)"
},
"keywords": {
"description": "Classification keywords for upsert_scenario (string or string array)",
"oneOf": [
@ -1003,6 +1053,7 @@ mod tests {
"hint": "coding",
"provider": "openai",
"model": "gpt-5.3-codex",
"transport": "websocket",
"classification_enabled": true,
"keywords": ["code", "bug", "refactor"],
"patterns": ["```"],
@ -1024,9 +1075,58 @@ mod tests {
item["hint"] == json!("coding")
&& item["provider"] == json!("openai")
&& item["model"] == json!("gpt-5.3-codex")
&& item["transport"] == json!("websocket")
}));
}
#[tokio::test]
async fn upsert_scenario_transport_alias_is_canonicalized() {
let tmp = TempDir::new().unwrap();
let tool = ModelRoutingConfigTool::new(test_config(&tmp).await, test_security());
let result = tool
.execute(json!({
"action": "upsert_scenario",
"hint": "analysis",
"provider": "openai",
"model": "gpt-5.3-codex",
"transport": "WS"
}))
.await
.unwrap();
assert!(result.success, "{:?}", result.error);
let get_result = tool.execute(json!({"action": "get"})).await.unwrap();
assert!(get_result.success);
let output: Value = serde_json::from_str(&get_result.output).unwrap();
let scenarios = output["scenarios"].as_array().unwrap();
assert!(scenarios.iter().any(|item| {
item["hint"] == json!("analysis") && item["transport"] == json!("websocket")
}));
}
#[tokio::test]
async fn upsert_scenario_rejects_invalid_transport() {
let tmp = TempDir::new().unwrap();
let tool = ModelRoutingConfigTool::new(test_config(&tmp).await, test_security());
let result = tool
.execute(json!({
"action": "upsert_scenario",
"hint": "analysis",
"provider": "openai",
"model": "gpt-5.3-codex",
"transport": "udp"
}))
.await
.unwrap();
assert!(!result.success);
assert!(result
.error
.unwrap_or_default()
.contains("'transport' must be one of: auto, websocket, sse"));
}
#[tokio::test]
async fn remove_scenario_also_removes_rule() {
let tmp = TempDir::new().unwrap();

View File

@ -448,3 +448,141 @@ async fn agent_handles_sequential_tool_then_text() {
"should produce final text after tool execution"
);
}
// ═════════════════════════════════════════════════════════════════════════════
// TG4.6: Loop detection
// ═════════════════════════════════════════════════════════════════════════════
/// No-progress repeat: provider returns same tool call every turn with identical
/// output (EchoTool with fixed input). Loop detection should stop early.
#[tokio::test]
async fn loop_detection_no_progress_repeat_stops_early() {
let responses: Vec<ChatResponse> = (0..10)
.map(|i| {
tool_response(vec![ToolCall {
id: format!("tc_{i}"),
name: "echo".into(),
arguments: r#"{"message": "same"}"#.into(),
}])
})
.collect();
let provider = Box::new(MockProvider::new(responses));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let result = agent.turn("repeat forever").await;
assert!(result.is_err(), "should error due to loop detection");
let err_msg = result.unwrap_err().to_string();
assert!(
err_msg.contains("detected loop pattern"),
"error should mention loop pattern: {err_msg}"
);
}
/// Repeated calls with *different* outputs should NOT trigger loop detection.
/// EchoTool returns the input, so varying inputs → varying outputs = progress.
#[tokio::test]
async fn loop_detection_different_outputs_no_false_positive() {
let mut responses: Vec<ChatResponse> = (0..5)
.map(|i| {
tool_response(vec![ToolCall {
id: format!("tc_{i}"),
name: "echo".into(),
arguments: format!(r#"{{"message": "msg_{i}"}}"#),
}])
})
.collect();
responses.push(text_response("All done"));
let provider = Box::new(MockProvider::new(responses));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let result = agent.turn("varying calls").await;
assert!(
result.is_ok(),
"should complete normally with varying outputs: {:?}",
result.err()
);
}
/// Ping-pong: alternating between two tools with fixed input/output.
#[tokio::test]
async fn loop_detection_ping_pong_stops_early() {
// A-B-A-B-A-B pattern (3 cycles, threshold=2)
let mut responses: Vec<ChatResponse> = Vec::new();
for i in 0..6 {
let (name, args) = if i % 2 == 0 {
("echo", r#"{"message": "ping"}"#)
} else {
("echo", r#"{"message": "pong"}"#)
};
responses.push(tool_response(vec![ToolCall {
id: format!("tc_{i}"),
name: name.into(),
arguments: args.into(),
}]));
}
// Note: ping-pong detection works when tool names differ OR args differ.
// Here we use same tool with different args, which counts as different signatures.
let provider = Box::new(MockProvider::new(responses));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let result = agent.turn("ping pong").await;
// The detector should fire (warning then hard stop) within the iterations
assert!(
result.is_err(),
"should error due to ping-pong loop detection"
);
}
/// Consecutive failures trigger loop detection.
#[tokio::test]
async fn loop_detection_failure_streak_stops_early() {
let responses: Vec<ChatResponse> = (0..10)
.map(|i| {
tool_response(vec![ToolCall {
id: format!("tc_{i}"),
name: "failing_tool".into(),
arguments: "{}".into(),
}])
})
.collect();
let provider = Box::new(MockProvider::new(responses));
let mut agent = build_agent(provider, vec![Box::new(FailingTool)]);
let result = agent.turn("keep failing").await;
assert!(
result.is_err(),
"should error due to failure streak detection"
);
let err_msg = result.unwrap_err().to_string();
assert!(
err_msg.contains("detected loop pattern"),
"error should mention loop pattern: {err_msg}"
);
}
/// Normal varied tool usage should not trigger any detection.
#[tokio::test]
async fn loop_detection_normal_flow_no_false_positive() {
let responses = vec![
tool_response(vec![ToolCall {
id: "tc1".into(),
name: "echo".into(),
arguments: r#"{"message": "hello"}"#.into(),
}]),
tool_response(vec![ToolCall {
id: "tc2".into(),
name: "echo".into(),
arguments: r#"{"message": "world"}"#.into(),
}]),
text_response("Final answer"),
];
let provider = Box::new(MockProvider::new(responses));
let mut agent = build_agent(provider, vec![Box::new(EchoTool)]);
let result = agent.turn("normal usage").await;
assert!(
result.is_ok(),
"normal varied flow should complete: {:?}",
result.err()
);
}

View File

@ -148,6 +148,7 @@ async fn openai_codex_second_vision_support() -> Result<()> {
let opts = ProviderRuntimeOptions {
auth_profile_override: Some("second".to_string()),
provider_api_url: None,
provider_transport: None,
zeroclaw_dir: None,
secrets_encrypt: false,
reasoning_enabled: None,

View File

@ -333,6 +333,19 @@ fn factory_resolves_doubao_provider() {
assert_provider_ok("doubao", Some("test-key"), None);
}
#[test]
fn factory_resolves_volcengine_provider() {
assert_provider_ok("volcengine", Some("test-key"), None);
assert_provider_ok("ark", Some("test-key"), None);
}
#[test]
fn factory_resolves_siliconflow_provider() {
assert_provider_ok("siliconflow", Some("test-key"), None);
assert_provider_ok("silicon-cloud", Some("test-key"), None);
assert_provider_ok("siliconcloud", Some("test-key"), None);
}
#[test]
fn factory_resolves_qianfan_provider() {
assert_provider_ok("qianfan", Some("test-key"), None);