Merge branch 'main' into chore_landing_readme_i18n_20260304

This commit is contained in:
JordanTheJet 2026-03-04 14:48:50 -05:00 committed by GitHub
commit 83767cbacc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
300 changed files with 49153 additions and 3393 deletions

View File

@ -1,3 +1,12 @@
# macOS targets — pin minimum OS version so binaries run on supported releases.
# Intel (x86_64): target macOS 10.15 Catalina and later.
# Apple Silicon (aarch64): target macOS 11.0 Big Sur and later (no Catalina hardware exists).
[target.x86_64-apple-darwin]
rustflags = ["-C", "link-arg=-mmacosx-version-min=10.15"]
[target.aarch64-apple-darwin]
rustflags = ["-C", "link-arg=-mmacosx-version-min=11.0"]
[target.x86_64-unknown-linux-musl]
rustflags = ["-C", "link-arg=-static"]
@ -15,3 +24,10 @@ linker = "clang"
[target.aarch64-linux-android]
linker = "clang"
# Windows targets — increase stack size for large JsonSchema derives
[target.x86_64-pc-windows-msvc]
rustflags = ["-C", "link-args=/STACK:8388608"]
[target.aarch64-pc-windows-msvc]
rustflags = ["-C", "link-args=/STACK:8388608"]

View File

@ -4,3 +4,9 @@ self-hosted-runner:
- X64
- racknerd
- aws-india
- light
- cpu40
- codeql
- codeql-general
- blacksmith-2vcpu-ubuntu-2404
- hetzner

View File

@ -89,7 +89,7 @@ env:
jobs:
canary-plan:
name: Canary Plan
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 20
outputs:
mode: ${{ steps.inputs.outputs.mode }}
@ -122,7 +122,8 @@ jobs:
trigger_rollback_on_abort="true"
rollback_branch="dev"
rollback_target_ref=""
fail_on_violation="true"
# Scheduled audits may not have live canary telemetry; report violations without failing by default.
fail_on_violation="false"
if [ "${GITHUB_EVENT_NAME}" = "workflow_dispatch" ]; then
mode="${{ github.event.inputs.mode || 'dry-run' }}"
@ -237,7 +238,7 @@ jobs:
name: Canary Execute
needs: [canary-plan]
if: github.event_name == 'workflow_dispatch' && needs.canary-plan.outputs.mode == 'execute' && needs.canary-plan.outputs.ready_to_execute == 'true'
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 10
permissions:
contents: write

View File

@ -50,7 +50,7 @@ env:
jobs:
audit:
name: CI Change Audit
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
timeout-minutes: 15
steps:
- name: Checkout
@ -59,9 +59,10 @@ jobs:
fetch-depth: 0
- name: Setup Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5
with:
python-version: "3.12"
shell: bash
run: |
set -euo pipefail
python3 --version
- name: Resolve base/head commits
id: refs

View File

@ -0,0 +1,88 @@
---
name: Post-Release Validation
on:
release:
types: ["published"]
permissions:
contents: read
jobs:
validate:
name: Validate Published Release
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 15
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Download and verify release assets
shell: bash
env:
RELEASE_TAG: ${{ github.event.release.tag_name }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set -euo pipefail
echo "Validating release: ${RELEASE_TAG}"
# 1. Check release exists and is not draft
release_json="$(gh api \
"repos/${GITHUB_REPOSITORY}/releases/tags/${RELEASE_TAG}")"
is_draft="$(echo "$release_json" \
| python3 -c "import sys,json; print(json.load(sys.stdin)['draft'])")"
if [ "$is_draft" = "True" ]; then
echo "::warning::Release ${RELEASE_TAG} is still in draft."
fi
# 2. Check expected assets against artifact contract
asset_count="$(echo "$release_json" \
| python3 -c "import sys,json; print(len(json.load(sys.stdin)['assets']))")"
contract=".github/release/release-artifact-contract.json"
expected_count="$(python3 -c "
import json
c = json.load(open('$contract'))
total = sum(len(c[k]) for k in c if k != 'schema_version')
print(total)
")"
echo "Release has ${asset_count} assets (contract expects ${expected_count})"
if [ "$asset_count" -lt "$expected_count" ]; then
echo "::error::Expected >=${expected_count} release assets (from ${contract}), found ${asset_count}"
exit 1
fi
# 3. Download checksum file and one archive
gh release download "${RELEASE_TAG}" \
--pattern "SHA256SUMS" \
--dir /tmp/release-check
gh release download "${RELEASE_TAG}" \
--pattern "zeroclaw-x86_64-unknown-linux-gnu.tar.gz" \
--dir /tmp/release-check
# 4. Verify checksum
cd /tmp/release-check
if sha256sum --check --ignore-missing SHA256SUMS; then
echo "SHA256 checksum verification: passed"
else
echo "::error::SHA256 checksum verification failed"
exit 1
fi
# 5. Extract binary
tar xzf zeroclaw-x86_64-unknown-linux-gnu.tar.gz
- name: Smoke-test release binary
shell: bash
env:
RELEASE_TAG: ${{ github.event.release.tag_name }}
run: |
set -euo pipefail
cd /tmp/release-check
if ./zeroclaw --version | grep -Fq "${RELEASE_TAG#v}"; then
echo "Binary version check: passed (${RELEASE_TAG})"
else
actual="$(./zeroclaw --version)"
echo "::error::Binary --version mismatch: ${actual}"
exit 1
fi
echo "Post-release validation: all checks passed"

View File

@ -39,7 +39,7 @@ env:
jobs:
probe:
name: Provider Connectivity Probe
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 20
steps:
- name: Checkout

View File

@ -2,13 +2,13 @@ name: CI Queue Hygiene
on:
schedule:
- cron: "*/15 * * * *"
- cron: "*/5 * * * *"
workflow_dispatch:
inputs:
apply:
description: "Cancel selected queued runs (false = dry-run report only)"
required: true
default: true
default: false
type: boolean
status:
description: "Queued-run status scope"
@ -42,7 +42,7 @@ env:
jobs:
hygiene:
name: Queue Hygiene
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
timeout-minutes: 15
steps:
- name: Checkout
@ -51,6 +51,8 @@ jobs:
- name: Run queue hygiene policy
id: hygiene
shell: bash
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set -euo pipefail
mkdir -p artifacts
@ -61,18 +63,24 @@ jobs:
if [ "${GITHUB_EVENT_NAME}" = "workflow_dispatch" ]; then
status_scope="${{ github.event.inputs.status || 'queued' }}"
max_cancel="${{ github.event.inputs.max_cancel || '120' }}"
apply_mode="${{ github.event.inputs.apply || 'true' }}"
apply_mode="${{ github.event.inputs.apply || 'false' }}"
fi
cmd=(python3 scripts/ci/queue_hygiene.py
--repo "${{ github.repository }}"
--status "${status_scope}"
--max-cancel "${max_cancel}"
--dedupe-workflow "CI Run"
--dedupe-workflow "Test E2E"
--dedupe-workflow "Docs Deploy"
--dedupe-workflow "PR Intake Checks"
--dedupe-workflow "PR Labeler"
--dedupe-workflow "PR Auto Responder"
--dedupe-workflow "Workflow Sanity"
--dedupe-workflow "PR Label Policy Check"
--priority-branch-prefix "release/"
--dedupe-include-non-pr
--non-pr-key branch
--output-json artifacts/queue-hygiene-report.json
--verbose)

View File

@ -8,7 +8,11 @@ on:
- "Cargo.lock"
- "src/**"
- "crates/**"
- "scripts/ci/ensure_c_toolchain.sh"
- "scripts/ci/ensure_cargo_component.sh"
- "scripts/ci/ensure_cc.sh"
- "scripts/ci/reproducible_build_check.sh"
- "scripts/ci/self_heal_rust_toolchain.sh"
- ".github/workflows/ci-reproducible-build.yml"
pull_request:
branches: [dev, main]
@ -17,7 +21,11 @@ on:
- "Cargo.lock"
- "src/**"
- "crates/**"
- "scripts/ci/ensure_c_toolchain.sh"
- "scripts/ci/ensure_cargo_component.sh"
- "scripts/ci/ensure_cc.sh"
- "scripts/ci/reproducible_build_check.sh"
- "scripts/ci/self_heal_rust_toolchain.sh"
- ".github/workflows/ci-reproducible-build.yml"
schedule:
- cron: "45 5 * * 1" # Weekly Monday 05:45 UTC
@ -50,17 +58,37 @@ env:
jobs:
reproducibility:
name: Reproducible Build Probe
runs-on: [self-hosted, aws-india]
timeout-minutes: 45
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 75
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- name: Checkout
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Self-heal Rust toolchain cache
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- name: Setup Rust
uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
env:
ENSURE_CARGO_COMPONENT_STRICT: "true"
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- name: Run reproducible build check
shell: bash
run: |

View File

@ -48,7 +48,7 @@ on:
- cron: "15 7 * * 1" # Weekly Monday 07:15 UTC
concurrency:
group: ci-rollback-${{ github.event.inputs.branch || 'dev' }}
group: ci-rollback-${{ github.event_name == 'workflow_dispatch' && (github.event.inputs.branch || 'dev') || github.ref_name }}
cancel-in-progress: false
permissions:
@ -64,7 +64,7 @@ env:
jobs:
rollback-plan:
name: Rollback Guard Plan
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 20
outputs:
branch: ${{ steps.plan.outputs.branch }}
@ -77,7 +77,7 @@ jobs:
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
ref: ${{ github.event.inputs.branch || 'dev' }}
ref: ${{ github.event_name == 'workflow_dispatch' && (github.event.inputs.branch || 'dev') || github.ref_name }}
- name: Build rollback plan
id: plan
@ -86,11 +86,12 @@ jobs:
set -euo pipefail
mkdir -p artifacts
branch_input="dev"
branch_input="${GITHUB_REF_NAME}"
mode_input="dry-run"
target_ref_input=""
allow_non_ancestor="false"
fail_on_violation="true"
# Scheduled audits can surface historical rollback violations; report without blocking by default.
fail_on_violation="false"
if [ "${GITHUB_EVENT_NAME}" = "workflow_dispatch" ]; then
branch_input="${{ github.event.inputs.branch || 'dev' }}"
@ -188,7 +189,7 @@ jobs:
name: Rollback Execute Actions
needs: [rollback-plan]
if: github.event_name == 'workflow_dispatch' && needs.rollback-plan.outputs.mode == 'execute' && needs.rollback-plan.outputs.ready_to_execute == 'true'
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 15
permissions:
contents: write

View File

@ -9,7 +9,7 @@ on:
branches: [dev, main]
concurrency:
group: ci-${{ github.event.pull_request.number || github.sha }}
group: ci-run-${{ github.event_name }}-${{ github.event.pull_request.number || github.ref_name || github.sha }}
cancel-in-progress: true
permissions:
@ -24,7 +24,7 @@ env:
jobs:
changes:
name: Detect Change Scope
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
outputs:
docs_only: ${{ steps.scope.outputs.docs_only }}
docs_changed: ${{ steps.scope.outputs.docs_changed }}
@ -50,19 +50,35 @@ jobs:
name: Lint Gate (Format + Clippy + Strict Delta)
needs: [changes]
if: needs.changes.outputs.rust_changed == 'true'
runs-on: [self-hosted, aws-india]
timeout-minutes: 40
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 75
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
- name: Self-heal Rust toolchain cache
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
components: rustfmt, clippy
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
with:
prefix-key: ci-run-check
cache-bin: false
- name: Run rust quality gate
run: ./scripts/ci/rust_quality_gate.sh
- name: Run strict lint delta gate
@ -70,20 +86,82 @@ jobs:
BASE_SHA: ${{ needs.changes.outputs.base_sha }}
run: ./scripts/ci/rust_strict_delta_gate.sh
test:
name: Test
workspace-check:
name: Workspace Check
needs: [changes]
if: needs.changes.outputs.rust_changed == 'true'
runs-on: [self-hosted, aws-india]
timeout-minutes: 60
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 45
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Self-heal Rust toolchain cache
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
with:
prefix-key: ci-run-workspace-check
cache-bin: false
- name: Check workspace
run: cargo check --workspace --locked
package-check:
name: Package Check (${{ matrix.package }})
needs: [changes]
if: needs.changes.outputs.rust_changed == 'true'
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 25
strategy:
fail-fast: false
matrix:
package: [zeroclaw-types, zeroclaw-core]
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Self-heal Rust toolchain cache
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
with:
prefix-key: ci-run-package-check
cache-bin: false
- name: Check package
run: cargo check -p ${{ matrix.package }} --locked
test:
name: Test
needs: [changes]
if: needs.changes.outputs.rust_changed == 'true'
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 120
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- name: Self-heal Rust toolchain cache
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
with:
prefix-key: ci-run-check
cache-bin: false
- name: Run tests with flake detection
shell: bash
env:
@ -92,6 +170,20 @@ jobs:
set -euo pipefail
mkdir -p artifacts
toolchain_bin=""
if [ -n "${CARGO:-}" ]; then
toolchain_bin="$(dirname "${CARGO}")"
elif [ -n "${RUSTC:-}" ]; then
toolchain_bin="$(dirname "${RUSTC}")"
fi
if [ -n "${toolchain_bin}" ] && [ -d "${toolchain_bin}" ]; then
case ":$PATH:" in
*":${toolchain_bin}:"*) ;;
*) export PATH="${toolchain_bin}:$PATH" ;;
esac
fi
if cargo test --locked --verbose; then
echo '{"flake_suspected":false,"status":"success"}' > artifacts/flake-probe.json
exit 0
@ -137,28 +229,51 @@ jobs:
name: Build (Smoke)
needs: [changes]
if: needs.changes.outputs.rust_changed == 'true'
runs-on: [self-hosted, aws-india]
timeout-minutes: 35
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 90
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- name: Self-heal Rust toolchain cache
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
with:
prefix-key: ci-run-build
cache-targets: true
cache-bin: false
- name: Build binary (smoke check)
run: cargo build --profile release-fast --locked --verbose
env:
CARGO_BUILD_JOBS: 2
CI_SMOKE_BUILD_ATTEMPTS: 3
run: bash scripts/ci/smoke_build_retry.sh
- name: Check binary size
env:
BINARY_SIZE_HARD_LIMIT_MB: 28
BINARY_SIZE_ADVISORY_MB: 20
BINARY_SIZE_TARGET_MB: 5
run: bash scripts/ci/check_binary_size.sh target/release-fast/zeroclaw
docs-only:
name: Docs-Only Fast Path
needs: [changes]
if: needs.changes.outputs.docs_only == 'true'
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
steps:
- name: Skip heavy jobs for docs-only change
run: echo "Docs-only change detected. Rust lint/test/build skipped."
@ -167,7 +282,7 @@ jobs:
name: Non-Rust Fast Path
needs: [changes]
if: needs.changes.outputs.docs_only != 'true' && needs.changes.outputs.rust_changed != 'true'
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
steps:
- name: Skip Rust jobs for non-Rust change scope
run: echo "No Rust-impacting files changed. Rust lint/test/build skipped."
@ -176,12 +291,16 @@ jobs:
name: Docs Quality
needs: [changes]
if: needs.changes.outputs.docs_changed == 'true'
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
timeout-minutes: 15
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
- name: Setup Node.js for markdown lint
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
with:
node-version: "22"
- name: Markdown lint (changed lines only)
env:
@ -231,7 +350,7 @@ jobs:
name: Lint Feedback
if: github.event_name == 'pull_request'
needs: [changes, lint, docs-quality]
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
permissions:
contents: read
pull-requests: write
@ -257,7 +376,7 @@ jobs:
name: License File Owner Guard
needs: [changes]
if: github.event_name == 'pull_request'
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
permissions:
contents: read
pull-requests: read
@ -274,8 +393,8 @@ jobs:
ci-required:
name: CI Required Gate
if: always()
needs: [changes, lint, test, build, docs-only, non-rust, docs-quality, lint-feedback, license-file-owner-guard]
runs-on: ubuntu-22.04
needs: [changes, lint, workspace-check, package-check, test, build, docs-only, non-rust, docs-quality, lint-feedback, license-file-owner-guard]
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
steps:
- name: Enforce required status
shell: bash
@ -322,10 +441,14 @@ jobs:
# --- Rust change path ---
lint_result="${{ needs.lint.result }}"
workspace_check_result="${{ needs.workspace-check.result }}"
package_check_result="${{ needs.package-check.result }}"
test_result="${{ needs.test.result }}"
build_result="${{ needs.build.result }}"
echo "lint=${lint_result}"
echo "workspace-check=${workspace_check_result}"
echo "package-check=${package_check_result}"
echo "test=${test_result}"
echo "build=${build_result}"
echo "docs=${docs_result}"
@ -333,8 +456,8 @@ jobs:
check_pr_governance
if [ "$lint_result" != "success" ] || [ "$test_result" != "success" ] || [ "$build_result" != "success" ]; then
echo "Required CI jobs did not pass: lint=${lint_result} test=${test_result} build=${build_result}"
if [ "$lint_result" != "success" ] || [ "$workspace_check_result" != "success" ] || [ "$package_check_result" != "success" ] || [ "$test_result" != "success" ] || [ "$build_result" != "success" ]; then
echo "Required CI jobs did not pass: lint=${lint_result} workspace-check=${workspace_check_result} package-check=${package_check_result} test=${test_result} build=${build_result}"
exit 1
fi

View File

@ -8,6 +8,7 @@ on:
- "Cargo.lock"
- "src/**"
- "crates/**"
- "scripts/ci/ensure_cc.sh"
- "scripts/ci/generate_provenance.py"
- ".github/workflows/ci-supply-chain-provenance.yml"
workflow_dispatch:
@ -31,7 +32,7 @@ env:
jobs:
provenance:
name: Build + Provenance Bundle
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 60
steps:
- name: Checkout
@ -42,12 +43,51 @@ jobs:
with:
toolchain: 1.92.0
- name: Ensure cargo component
shell: bash
env:
ENSURE_CARGO_COMPONENT_STRICT: "true"
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- name: Activate toolchain binaries on PATH
shell: bash
run: |
set -euo pipefail
toolchain_bin="$(dirname "$(rustup which --toolchain 1.92.0 cargo)")"
echo "$toolchain_bin" >> "$GITHUB_PATH"
- name: Resolve host target
id: rust-meta
shell: bash
run: |
set -euo pipefail
host_target="$(rustup run 1.92.0 rustc -vV | sed -n 's/^host: //p')"
if [ -z "${host_target}" ]; then
echo "::error::Unable to resolve Rust host target."
exit 1
fi
echo "host_target=${host_target}" >> "$GITHUB_OUTPUT"
- name: Runner preflight (compiler + disk)
shell: bash
run: |
set -euo pipefail
./scripts/ci/ensure_cc.sh
echo "Runner: ${RUNNER_NAME:-unknown} (${RUNNER_OS:-unknown}/${RUNNER_ARCH:-unknown})"
free_kb="$(df -Pk . | awk 'NR==2 {print $4}')"
min_kb=$((10 * 1024 * 1024))
if [ "${free_kb}" -lt "${min_kb}" ]; then
echo "::error::Insufficient disk space on runner (<10 GiB free)."
df -h .
exit 1
fi
- name: Build release-fast artifact
shell: bash
run: |
set -euo pipefail
mkdir -p artifacts
host_target="$(rustc -vV | sed -n 's/^host: //p')"
host_target="${{ steps.rust-meta.outputs.host_target }}"
cargo build --profile release-fast --locked --target "$host_target"
cp "target/${host_target}/release-fast/zeroclaw" "artifacts/zeroclaw-${host_target}"
sha256sum "artifacts/zeroclaw-${host_target}" > "artifacts/zeroclaw-${host_target}.sha256"
@ -56,7 +96,7 @@ jobs:
shell: bash
run: |
set -euo pipefail
host_target="$(rustc -vV | sed -n 's/^host: //p')"
host_target="${{ steps.rust-meta.outputs.host_target }}"
python3 scripts/ci/generate_provenance.py \
--artifact "artifacts/zeroclaw-${host_target}" \
--subject-name "zeroclaw-${host_target}" \
@ -69,7 +109,7 @@ jobs:
shell: bash
run: |
set -euo pipefail
host_target="$(rustc -vV | sed -n 's/^host: //p')"
host_target="${{ steps.rust-meta.outputs.host_target }}"
statement="artifacts/provenance-${host_target}.intoto.json"
cosign sign-blob --yes \
--bundle="${statement}.sigstore.json" \
@ -81,7 +121,7 @@ jobs:
shell: bash
run: |
set -euo pipefail
host_target="$(rustc -vV | sed -n 's/^host: //p')"
host_target="${{ steps.rust-meta.outputs.host_target }}"
python3 scripts/ci/emit_audit_event.py \
--event-type supply_chain_provenance \
--input-json "artifacts/provenance-${host_target}.intoto.json" \
@ -100,7 +140,7 @@ jobs:
shell: bash
run: |
set -euo pipefail
host_target="$(rustc -vV | sed -n 's/^host: //p')"
host_target="${{ steps.rust-meta.outputs.host_target }}"
{
echo "### Supply Chain Provenance"
echo "- Target: \`${host_target}\`"

View File

@ -2,7 +2,7 @@ name: Deploy Web to GitHub Pages
on:
push:
branches: [main, dev]
branches: [main]
paths:
- 'web/**'
workflow_dispatch:
@ -18,7 +18,7 @@ concurrency:
jobs:
build:
runs-on: ubuntu-latest
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
steps:
- name: Checkout
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
@ -48,7 +48,7 @@ jobs:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
needs: build
steps:
- name: Deploy to GitHub Pages

View File

@ -41,7 +41,7 @@ on:
default: ""
concurrency:
group: docs-deploy-${{ github.event.pull_request.number || github.sha }}
group: docs-deploy-${{ github.event_name }}-${{ github.event.pull_request.number || github.ref_name || github.sha }}
cancel-in-progress: true
permissions:
@ -56,7 +56,7 @@ env:
jobs:
docs-quality:
name: Docs Quality Gate
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 20
outputs:
docs_files: ${{ steps.scope.outputs.docs_files }}
@ -73,6 +73,11 @@ jobs:
with:
fetch-depth: 0
- name: Setup Node.js
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
with:
node-version: "22"
- name: Resolve docs diff scope
id: scope
shell: bash
@ -160,6 +165,11 @@ jobs:
if-no-files-found: ignore
retention-days: ${{ steps.deploy_guard.outputs.docs_guard_artifact_retention_days || 21 }}
- name: Setup Node.js for markdown lint
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
with:
node-version: "22"
- name: Markdown quality gate
env:
BASE_SHA: ${{ steps.scope.outputs.base_sha }}
@ -203,7 +213,7 @@ jobs:
name: Docs Preview Artifact
needs: [docs-quality]
if: github.event_name == 'pull_request' || (github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_target == 'preview')
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 15
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
@ -237,7 +247,7 @@ jobs:
name: Deploy Docs to GitHub Pages
needs: [docs-quality]
if: needs.docs-quality.outputs.deploy_target == 'production' && needs.docs-quality.outputs.ready_to_deploy == 'true'
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 20
permissions:
contents: read

View File

@ -51,7 +51,7 @@ env:
jobs:
resolve-profile:
name: Resolve Matrix Profile
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
outputs:
profile: ${{ steps.resolve.outputs.profile }}
lane_job_prefix: ${{ steps.resolve.outputs.lane_job_prefix }}
@ -127,7 +127,7 @@ jobs:
github.event_name != 'pull_request' ||
contains(github.event.pull_request.labels.*.name, 'ci:full') ||
contains(github.event.pull_request.labels.*.name, 'ci:feature-matrix')
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: ${{ fromJSON(needs.resolve-profile.outputs.lane_timeout_minutes) }}
strategy:
fail-fast: false
@ -155,6 +155,11 @@ jobs:
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- name: Ensure cargo component
shell: bash
env:
ENSURE_CARGO_COMPONENT_STRICT: "true"
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
with:
@ -278,7 +283,7 @@ jobs:
name: ${{ needs.resolve-profile.outputs.summary_job_name }}
needs: [resolve-profile, feature-check]
if: always()
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4

View File

@ -27,7 +27,7 @@ env:
jobs:
nightly-lanes:
name: Nightly Lane (${{ matrix.name }})
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 70
strategy:
fail-fast: false
@ -53,6 +53,11 @@ jobs:
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- name: Ensure cargo component
shell: bash
env:
ENSURE_CARGO_COMPONENT_STRICT: "true"
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
with:
@ -137,7 +142,7 @@ jobs:
name: Nightly Summary & Routing
needs: [nightly-lanes]
if: always()
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4

View File

@ -22,7 +22,7 @@ concurrency:
jobs:
build:
runs-on: ubuntu-latest
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
steps:
- name: Checkout
@ -53,7 +53,7 @@ jobs:
deploy:
needs: build
runs-on: ubuntu-latest
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}

View File

@ -8,7 +8,9 @@ on:
types: [opened, labeled, unlabeled]
concurrency:
group: pr-auto-response-${{ github.event.pull_request.number || github.event.issue.number || github.run_id }}
# Keep cancellation within the same lifecycle action to avoid `labeled`
# events canceling an in-flight `opened` run for the same issue/PR.
group: pr-auto-response-${{ github.event.pull_request.number || github.event.issue.number || github.run_id }}-${{ github.event.action || 'unknown' }}
cancel-in-progress: true
permissions: {}
@ -21,12 +23,11 @@ env:
jobs:
contributor-tier-issues:
# Only run for opened/reopened events to avoid duplicate runs with labeled-routes job
if: >-
(github.event_name == 'issues' &&
(github.event.action == 'opened' || github.event.action == 'reopened' || github.event.action == 'labeled' || github.event.action == 'unlabeled')) ||
(github.event_name == 'pull_request_target' &&
(github.event.action == 'labeled' || github.event.action == 'unlabeled'))
runs-on: ubuntu-22.04
(github.event.action == 'opened' || github.event.action == 'reopened'))
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
permissions:
contents: read
issues: write
@ -45,7 +46,7 @@ jobs:
await script({ github, context, core });
first-interaction:
if: github.event.action == 'opened'
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
permissions:
issues: write
pull-requests: write
@ -76,7 +77,7 @@ jobs:
labeled-routes:
if: github.event.action == 'labeled'
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
permissions:
contents: read
issues: write

View File

@ -17,7 +17,7 @@ jobs:
permissions:
issues: write
pull-requests: write
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
timeout-minutes: 10
steps:
- name: Mark stale issues and pull requests

View File

@ -18,7 +18,7 @@ env:
jobs:
nudge-stale-prs:
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
timeout-minutes: 10
permissions:
contents: read

View File

@ -23,7 +23,7 @@ env:
jobs:
intake:
name: Intake Checks
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
timeout-minutes: 10
steps:
- name: Checkout repository

View File

@ -28,7 +28,7 @@ env:
jobs:
contributor-tier-consistency:
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
timeout-minutes: 10
steps:
- name: Checkout

View File

@ -32,7 +32,7 @@ env:
jobs:
label:
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
steps:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4

View File

@ -17,6 +17,11 @@ on:
- "scripts/ci/ghcr_publish_contract_guard.py"
- "scripts/ci/ghcr_vulnerability_gate.py"
workflow_dispatch:
inputs:
release_tag:
description: "Existing release tag to publish (e.g. v0.2.0). Leave empty for smoke-only run."
required: false
type: string
concurrency:
group: docker-${{ github.event.pull_request.number || github.ref }}
@ -32,8 +37,8 @@ env:
jobs:
pr-smoke:
name: PR Docker Smoke
if: github.event_name == 'workflow_dispatch' || (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository)
runs-on: [self-hosted, aws-india]
if: (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository) || (github.event_name == 'workflow_dispatch' && inputs.release_tag == '')
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 25
permissions:
contents: read
@ -41,6 +46,20 @@ jobs:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Resolve Docker API version
shell: bash
run: |
set -euo pipefail
server_api="$(docker version --format '{{.Server.APIVersion}}')"
min_api="$(docker version --format '{{.Server.MinAPIVersion}}' 2>/dev/null || true)"
if [[ -z "${server_api}" || "${server_api}" == "<no value>" ]]; then
echo "::error::Unable to detect Docker server API version."
docker version || true
exit 1
fi
echo "DOCKER_API_VERSION=${server_api}" >> "$GITHUB_ENV"
echo "Using Docker API version ${server_api} (server min: ${min_api:-unknown})"
- name: Setup Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
@ -72,9 +91,9 @@ jobs:
publish:
name: Build and Push Docker Image
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') && github.repository == 'zeroclaw-labs/zeroclaw'
runs-on: [self-hosted, aws-india]
timeout-minutes: 45
if: github.repository == 'zeroclaw-labs/zeroclaw' && ((github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v')) || (github.event_name == 'workflow_dispatch' && inputs.release_tag != ''))
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 90
permissions:
contents: read
packages: write
@ -82,6 +101,22 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
ref: ${{ github.event_name == 'workflow_dispatch' && format('refs/tags/{0}', inputs.release_tag) || github.ref }}
- name: Resolve Docker API version
shell: bash
run: |
set -euo pipefail
server_api="$(docker version --format '{{.Server.APIVersion}}')"
min_api="$(docker version --format '{{.Server.MinAPIVersion}}' 2>/dev/null || true)"
if [[ -z "${server_api}" || "${server_api}" == "<no value>" ]]; then
echo "::error::Unable to detect Docker server API version."
docker version || true
exit 1
fi
echo "DOCKER_API_VERSION=${server_api}" >> "$GITHUB_ENV"
echo "Using Docker API version ${server_api} (server min: ${min_api:-unknown})"
- name: Setup Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
@ -99,22 +134,42 @@ jobs:
run: |
set -euo pipefail
IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}"
SHA_SUFFIX="sha-${GITHUB_SHA::12}"
if [[ "${GITHUB_EVENT_NAME}" == "push" ]]; then
if [[ "${GITHUB_REF}" != refs/tags/v* ]]; then
echo "::error::Docker publish is restricted to v* tag pushes."
exit 1
fi
RELEASE_TAG="${GITHUB_REF#refs/tags/}"
elif [[ "${GITHUB_EVENT_NAME}" == "workflow_dispatch" ]]; then
RELEASE_TAG="${{ inputs.release_tag }}"
if [[ -z "${RELEASE_TAG}" ]]; then
echo "::error::workflow_dispatch publish requires inputs.release_tag"
exit 1
fi
if [[ ! "${RELEASE_TAG}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+([.-][0-9A-Za-z.-]+)?$ ]]; then
echo "::error::release_tag must be vX.Y.Z or vX.Y.Z-suffix (received: ${RELEASE_TAG})"
exit 1
fi
if ! git rev-parse --verify "refs/tags/${RELEASE_TAG}" >/dev/null 2>&1; then
echo "::error::release tag not found in checkout: ${RELEASE_TAG}"
exit 1
fi
else
echo "::error::Unsupported event for publish: ${GITHUB_EVENT_NAME}"
exit 1
fi
RELEASE_SHA="$(git rev-parse HEAD)"
SHA_SUFFIX="sha-${RELEASE_SHA::12}"
SHA_TAG="${IMAGE}:${SHA_SUFFIX}"
LATEST_SUFFIX="latest"
LATEST_TAG="${IMAGE}:${LATEST_SUFFIX}"
if [[ "${GITHUB_REF}" != refs/tags/v* ]]; then
echo "::error::Docker publish is restricted to v* tag pushes."
exit 1
fi
RELEASE_TAG="${GITHUB_REF#refs/tags/}"
VERSION_TAG="${IMAGE}:${RELEASE_TAG}"
TAGS="${VERSION_TAG},${SHA_TAG},${LATEST_TAG}"
{
echo "tags=${TAGS}"
echo "release_tag=${RELEASE_TAG}"
echo "release_sha=${RELEASE_SHA}"
echo "sha_tag=${SHA_SUFFIX}"
echo "latest_tag=${LATEST_SUFFIX}"
} >> "$GITHUB_OUTPUT"
@ -124,6 +179,8 @@ jobs:
with:
context: .
push: true
build-args: |
ZEROCLAW_CARGO_ALL_FEATURES=true
tags: ${{ steps.meta.outputs.tags }}
platforms: linux/amd64,linux/arm64
cache-from: type=gha
@ -173,7 +230,7 @@ jobs:
python3 scripts/ci/ghcr_publish_contract_guard.py \
--repository "${GITHUB_REPOSITORY,,}" \
--release-tag "${{ steps.meta.outputs.release_tag }}" \
--sha "${GITHUB_SHA}" \
--sha "${{ steps.meta.outputs.release_sha }}" \
--policy-file .github/release/ghcr-tag-policy.json \
--output-json artifacts/ghcr-publish-contract.json \
--output-md artifacts/ghcr-publish-contract.md \
@ -328,11 +385,25 @@ jobs:
if-no-files-found: ignore
retention-days: 21
- name: Upload Trivy SARIF
- name: Detect Trivy SARIF report
id: trivy-sarif
if: always()
shell: bash
run: |
set -euo pipefail
sarif_path="artifacts/trivy-${{ steps.meta.outputs.release_tag }}.sarif"
if [ -f "${sarif_path}" ]; then
echo "exists=true" >> "$GITHUB_OUTPUT"
else
echo "exists=false" >> "$GITHUB_OUTPUT"
echo "::notice::Trivy SARIF report not found at ${sarif_path}; skipping SARIF upload."
fi
- name: Upload Trivy SARIF
if: always() && steps.trivy-sarif.outputs.exists == 'true'
uses: github/codeql-action/upload-sarif@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4
with:
sarif_file: artifacts/trivy-${{ github.ref_name }}.sarif
sarif_file: artifacts/trivy-${{ steps.meta.outputs.release_tag }}.sarif
category: ghcr-trivy
- name: Upload Trivy report artifacts
@ -341,9 +412,9 @@ jobs:
with:
name: ghcr-trivy-report
path: |
artifacts/trivy-${{ github.ref_name }}.sarif
artifacts/trivy-${{ github.ref_name }}.txt
artifacts/trivy-${{ github.ref_name }}.json
artifacts/trivy-${{ steps.meta.outputs.release_tag }}.sarif
artifacts/trivy-${{ steps.meta.outputs.release_tag }}.txt
artifacts/trivy-${{ steps.meta.outputs.release_tag }}.json
artifacts/trivy-sha-*.txt
artifacts/trivy-sha-*.json
artifacts/trivy-latest.txt

View File

@ -43,7 +43,7 @@ env:
jobs:
prerelease-guard:
name: Pre-release Guard
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 20
outputs:
release_tag: ${{ steps.vars.outputs.release_tag }}
@ -177,7 +177,7 @@ jobs:
needs: [prerelease-guard]
# Keep GNU Linux prerelease artifacts on Ubuntu 22.04 so runtime GLIBC
# symbols remain compatible with Debian 12 / Ubuntu 22.04 hosts.
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 45
steps:
- name: Checkout tag
@ -239,7 +239,7 @@ jobs:
name: Publish GitHub Pre-release
needs: [prerelease-guard, build-prerelease]
if: needs.prerelease-guard.outputs.ready_to_publish == 'true'
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 15
steps:
- name: Download prerelease artifacts

View File

@ -47,7 +47,8 @@ env:
jobs:
prepare:
name: Prepare Release Context
runs-on: [self-hosted, aws-india]
if: github.event_name != 'push' || !contains(github.ref_name, '-')
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
outputs:
release_ref: ${{ steps.vars.outputs.release_ref }}
release_tag: ${{ steps.vars.outputs.release_tag }}
@ -106,7 +107,35 @@ jobs:
} >> "$GITHUB_STEP_SUMMARY"
- name: Checkout
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Install gh CLI
shell: bash
run: |
set -euo pipefail
if command -v gh &>/dev/null; then
echo "gh already available: $(gh --version | head -1)"
exit 0
fi
echo "Installing gh CLI..."
curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg \
| sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" \
| sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null
for i in {1..60}; do
if sudo fuser /var/lib/apt/lists/lock >/dev/null 2>&1 \
|| sudo fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1 \
|| sudo fuser /var/lib/dpkg/lock >/dev/null 2>&1; then
echo "apt/dpkg locked; waiting ($i/60)..."
sleep 5
else
break
fi
done
sudo apt-get -o DPkg::Lock::Timeout=600 -o Acquire::Retries=3 update -qq
sudo apt-get -o DPkg::Lock::Timeout=600 -o Acquire::Retries=3 install -y gh
env:
GH_TOKEN: ${{ github.token }}
- name: Validate release trigger and authorization guard
shell: bash
@ -127,6 +156,8 @@ jobs:
--output-json artifacts/release-trigger-guard.json \
--output-md artifacts/release-trigger-guard.md \
--fail-on-violation
env:
GH_TOKEN: ${{ github.token }}
- name: Emit release trigger audit event
if: always()
@ -164,20 +195,24 @@ jobs:
needs: [prepare]
runs-on: ${{ matrix.os }}
timeout-minutes: 40
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}-${{ matrix.target }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}-${{ matrix.target }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/target
strategy:
fail-fast: false
matrix:
include:
# Keep GNU Linux release artifacts on Ubuntu 22.04 to preserve
# a broadly compatible GLIBC baseline for user distributions.
- os: ubuntu-22.04
- os: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
target: x86_64-unknown-linux-gnu
artifact: zeroclaw
archive_ext: tar.gz
cross_compiler: ""
linker_env: ""
linker: ""
- os: self-hosted
- os: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
target: x86_64-unknown-linux-musl
artifact: zeroclaw
archive_ext: tar.gz
@ -185,14 +220,14 @@ jobs:
linker_env: ""
linker: ""
use_cross: true
- os: ubuntu-22.04
- os: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
target: aarch64-unknown-linux-gnu
artifact: zeroclaw
archive_ext: tar.gz
cross_compiler: gcc-aarch64-linux-gnu
linker_env: CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER
linker: aarch64-linux-gnu-gcc
- os: self-hosted
- os: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
target: aarch64-unknown-linux-musl
artifact: zeroclaw
archive_ext: tar.gz
@ -200,14 +235,14 @@ jobs:
linker_env: ""
linker: ""
use_cross: true
- os: ubuntu-22.04
- os: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
target: armv7-unknown-linux-gnueabihf
artifact: zeroclaw
archive_ext: tar.gz
cross_compiler: gcc-arm-linux-gnueabihf
linker_env: CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER
linker: arm-linux-gnueabihf-gcc
- os: self-hosted
- os: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
target: armv7-linux-androideabi
artifact: zeroclaw
archive_ext: tar.gz
@ -216,7 +251,7 @@ jobs:
linker: ""
android_ndk: true
android_api: 21
- os: self-hosted
- os: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
target: aarch64-linux-android
artifact: zeroclaw
archive_ext: tar.gz
@ -225,7 +260,7 @@ jobs:
linker: ""
android_ndk: true
android_api: 21
- os: self-hosted
- os: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
target: x86_64-unknown-freebsd
artifact: zeroclaw
archive_ext: tar.gz
@ -260,6 +295,10 @@ jobs:
with:
ref: ${{ needs.prepare.outputs.release_ref }}
- name: Self-heal Rust toolchain cache
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
@ -270,14 +309,38 @@ jobs:
- name: Install cross for cross-built targets
if: matrix.use_cross
shell: bash
run: |
cargo install cross --git https://github.com/cross-rs/cross
set -euo pipefail
echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> "$GITHUB_PATH"
cargo install cross --locked --version 0.2.5
command -v cross
cross --version
- name: Install cross-compilation toolchain (Linux)
if: runner.os == 'Linux' && matrix.cross_compiler != ''
run: |
sudo apt-get update -qq
sudo apt-get install -y "${{ matrix.cross_compiler }}"
set -euo pipefail
for i in {1..60}; do
if sudo fuser /var/lib/apt/lists/lock >/dev/null 2>&1 \
|| sudo fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1 \
|| sudo fuser /var/lib/dpkg/lock >/dev/null 2>&1; then
echo "apt/dpkg locked; waiting ($i/60)..."
sleep 5
else
break
fi
done
sudo apt-get -o DPkg::Lock::Timeout=600 -o Acquire::Retries=3 update -qq
sudo apt-get -o DPkg::Lock::Timeout=600 -o Acquire::Retries=3 install -y "${{ matrix.cross_compiler }}"
# Install matching libc dev headers for cross targets
# (required by ring/aws-lc-sys C compilation)
case "${{ matrix.target }}" in
armv7-unknown-linux-gnueabihf)
sudo apt-get -o DPkg::Lock::Timeout=600 -o Acquire::Retries=3 install -y libc6-dev-armhf-cross ;;
aarch64-unknown-linux-gnu)
sudo apt-get -o DPkg::Lock::Timeout=600 -o Acquire::Retries=3 install -y libc6-dev-arm64-cross ;;
esac
- name: Setup Android NDK
if: matrix.android_ndk
@ -290,8 +353,18 @@ jobs:
NDK_ROOT="${RUNNER_TEMP}/android-ndk"
NDK_HOME="${NDK_ROOT}/android-ndk-${NDK_VERSION}"
sudo apt-get update -qq
sudo apt-get install -y unzip
for i in {1..60}; do
if sudo fuser /var/lib/apt/lists/lock >/dev/null 2>&1 \
|| sudo fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1 \
|| sudo fuser /var/lib/dpkg/lock >/dev/null 2>&1; then
echo "apt/dpkg locked; waiting ($i/60)..."
sleep 5
else
break
fi
done
sudo apt-get -o DPkg::Lock::Timeout=600 -o Acquire::Retries=3 update -qq
sudo apt-get -o DPkg::Lock::Timeout=600 -o Acquire::Retries=3 install -y unzip
mkdir -p "${NDK_ROOT}"
curl -fsSL "${NDK_URL}" -o "${RUNNER_TEMP}/${NDK_ZIP}"
@ -362,6 +435,10 @@ jobs:
- name: Check binary size (Unix)
if: runner.os != 'Windows'
env:
BINARY_SIZE_HARD_LIMIT_MB: 28
BINARY_SIZE_ADVISORY_MB: 20
BINARY_SIZE_TARGET_MB: 5
run: bash scripts/ci/check_binary_size.sh "target/${{ matrix.target }}/release-fast/${{ matrix.artifact }}" "${{ matrix.target }}"
- name: Package (Unix)
@ -386,7 +463,7 @@ jobs:
verify-artifacts:
name: Verify Artifact Set
needs: [prepare, build-release]
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
@ -447,7 +524,7 @@ jobs:
name: Publish Release
if: needs.prepare.outputs.publish_release == 'true'
needs: [prepare, verify-artifacts]
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 45
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4

102
.github/workflows/release-build.yml vendored Normal file
View File

@ -0,0 +1,102 @@
name: Production Release Build
on:
push:
branches: ["main"]
tags: ["v*"]
workflow_dispatch:
concurrency:
group: production-release-build-${{ github.ref || github.run_id }}
cancel-in-progress: false
permissions:
contents: read
env:
GIT_CONFIG_COUNT: "1"
GIT_CONFIG_KEY_0: core.hooksPath
GIT_CONFIG_VALUE_0: /dev/null
CARGO_TERM_COLOR: always
jobs:
build-and-test:
name: Build and Test (Linux x86_64)
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 120
steps:
- name: Checkout
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- name: Self-heal Rust toolchain cache
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
- name: Setup Rust
uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
components: rustfmt, clippy
- name: Ensure C toolchain for Rust builds
shell: bash
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
env:
ENSURE_CARGO_COMPONENT_STRICT: "true"
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- name: Ensure rustfmt and clippy components
shell: bash
run: rustup component add rustfmt clippy --toolchain 1.92.0
- name: Activate toolchain binaries on PATH
shell: bash
run: |
set -euo pipefail
toolchain_bin="$(dirname "$(rustup which --toolchain 1.92.0 cargo)")"
echo "$toolchain_bin" >> "$GITHUB_PATH"
- name: Cache Cargo registry and target
uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
with:
prefix-key: production-release-build
shared-key: ${{ runner.os }}-${{ hashFiles('Cargo.lock') }}
cache-targets: true
cache-bin: false
- name: Rust quality gates
shell: bash
run: |
set -euo pipefail
./scripts/ci/rust_quality_gate.sh
cargo test --locked --lib --bins --verbose
- name: Build production binary (canonical)
shell: bash
run: cargo build --release --locked
- name: Prepare artifact bundle
shell: bash
run: |
set -euo pipefail
mkdir -p artifacts
cp target/release/zeroclaw artifacts/zeroclaw
sha256sum artifacts/zeroclaw > artifacts/zeroclaw.sha256
- name: Upload production artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: zeroclaw-linux-amd64
path: |
artifacts/zeroclaw
artifacts/zeroclaw.sha256
if-no-files-found: error
retention-days: 21

View File

@ -88,8 +88,8 @@ module.exports = async ({ github, context, core }) => {
blockingFindings.push(`Dangerous patch markers found (${dangerousProblems.length})`);
}
if (linearKeys.length === 0) {
blockingFindings.push(
"Missing Linear issue key reference (`RMN-<id>`, `CDV-<id>`, or `COM-<id>`) in PR title/body.",
advisoryFindings.push(
"Missing Linear issue key reference (`RMN-<id>`, `CDV-<id>`, or `COM-<id>`) in PR title/body (recommended for traceability, non-blocking).",
);
}
@ -156,7 +156,7 @@ module.exports = async ({ github, context, core }) => {
"",
"Action items:",
"1. Complete required PR template sections/fields.",
"2. Link this PR to exactly one active Linear issue key (`RMN-xxx`/`CDV-xxx`/`COM-xxx`).",
"2. (Recommended) Link this PR to one active Linear issue key (`RMN-xxx`/`CDV-xxx`/`COM-xxx`) for traceability.",
"3. Remove tabs, trailing whitespace, and merge conflict markers from added lines.",
"4. Re-run local checks before pushing:",
" - `./scripts/ci/rust_quality_gate.sh`",

View File

@ -15,6 +15,9 @@ on:
- ".github/security/unsafe-audit-governance.json"
- "scripts/ci/install_gitleaks.sh"
- "scripts/ci/install_syft.sh"
- "scripts/ci/ensure_c_toolchain.sh"
- "scripts/ci/ensure_cargo_component.sh"
- "scripts/ci/self_heal_rust_toolchain.sh"
- "scripts/ci/deny_policy_guard.py"
- "scripts/ci/secrets_governance_guard.py"
- "scripts/ci/unsafe_debt_audit.py"
@ -22,29 +25,12 @@ on:
- "scripts/ci/config/unsafe_debt_policy.toml"
- "scripts/ci/emit_audit_event.py"
- "scripts/ci/security_regression_tests.sh"
- "scripts/ci/ensure_cc.sh"
- ".github/workflows/sec-audit.yml"
pull_request:
branches: [dev, main]
paths:
- "Cargo.toml"
- "Cargo.lock"
- "src/**"
- "crates/**"
- "deny.toml"
- ".gitleaks.toml"
- ".github/security/gitleaks-allowlist-governance.json"
- ".github/security/deny-ignore-governance.json"
- ".github/security/unsafe-audit-governance.json"
- "scripts/ci/install_gitleaks.sh"
- "scripts/ci/install_syft.sh"
- "scripts/ci/deny_policy_guard.py"
- "scripts/ci/secrets_governance_guard.py"
- "scripts/ci/unsafe_debt_audit.py"
- "scripts/ci/unsafe_policy_guard.py"
- "scripts/ci/config/unsafe_debt_policy.toml"
- "scripts/ci/emit_audit_event.py"
- "scripts/ci/security_regression_tests.sh"
- ".github/workflows/sec-audit.yml"
# Do not gate pull_request by paths: main branch protection requires
# "Security Required Gate" to always report a status on PRs.
merge_group:
branches: [dev, main]
schedule:
@ -86,14 +72,34 @@ env:
jobs:
audit:
name: Security Audit
runs-on: [self-hosted, aws-india]
timeout-minutes: 20
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 45
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Self-heal Rust toolchain cache
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
env:
ENSURE_CARGO_COMPONENT_STRICT: "true"
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- uses: rustsec/audit-check@69366f33c96575abad1ee0dba8212993eecbe998 # v2.0.0
with:
@ -101,11 +107,28 @@ jobs:
deny:
name: License & Supply Chain
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 20
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- name: Ensure cargo component
shell: bash
env:
ENSURE_CARGO_COMPONENT_STRICT: "true"
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- name: Enforce deny policy hygiene
shell: bash
run: |
@ -118,9 +141,46 @@ jobs:
--output-md artifacts/deny-policy-guard.md \
--fail-on-violation
- uses: EmbarkStudios/cargo-deny-action@3fd3802e88374d3fe9159b834c7714ec57d6c979 # v2
with:
command: check advisories licenses sources
- name: Install cargo-deny
shell: bash
run: |
set -euo pipefail
version="0.19.0"
arch="$(uname -m)"
case "${arch}" in
x86_64|amd64)
target="x86_64-unknown-linux-musl"
expected_sha256="0e8c2aa59128612c90d9e09c02204e912f29a5b8d9a64671b94608cbe09e064f"
;;
aarch64|arm64)
target="aarch64-unknown-linux-musl"
expected_sha256="2b3567a60b7491c159d1cef8b7d8479d1ad2a31e29ef49462634ad4552fcc77d"
;;
*)
echo "Unsupported runner architecture for cargo-deny: ${arch}" >&2
exit 1
;;
esac
install_dir="${RUNNER_TEMP}/cargo-deny-${version}"
archive="${RUNNER_TEMP}/cargo-deny-${version}-${target}.tar.gz"
mkdir -p "${install_dir}"
curl --proto '=https' --tlsv1.2 --fail --location --silent --show-error \
--output "${archive}" \
"https://github.com/EmbarkStudios/cargo-deny/releases/download/${version}/cargo-deny-${version}-${target}.tar.gz"
actual_sha256="$(sha256sum "${archive}" | awk '{print $1}')"
if [ "${actual_sha256}" != "${expected_sha256}" ]; then
echo "Checksum mismatch for cargo-deny ${version} (${target})" >&2
echo "Expected: ${expected_sha256}" >&2
echo "Actual: ${actual_sha256}" >&2
exit 1
fi
tar -xzf "${archive}" -C "${install_dir}" --strip-components=1
echo "${install_dir}" >> "${GITHUB_PATH}"
"${install_dir}/cargo-deny" --version
- name: Run cargo-deny checks
shell: bash
run: cargo-deny check advisories licenses sources
- name: Emit deny audit event
if: always()
@ -156,23 +216,42 @@ jobs:
security-regressions:
name: Security Regression Tests
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 30
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- name: Self-heal Rust toolchain cache
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
env:
ENSURE_CARGO_COMPONENT_STRICT: "true"
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
with:
prefix-key: sec-audit-security-regressions
cache-bin: false
- name: Run security regression suite
shell: bash
run: ./scripts/ci/security_regression_tests.sh
secrets:
name: Secrets Governance (Gitleaks)
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
timeout-minutes: 20
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
@ -367,7 +446,7 @@ jobs:
sbom:
name: SBOM Snapshot
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
timeout-minutes: 20
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
@ -432,11 +511,17 @@ jobs:
unsafe-debt:
name: Unsafe Debt Audit
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
timeout-minutes: 20
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Setup Python 3.11
shell: bash
run: |
set -euo pipefail
python3 --version
- name: Enforce unsafe policy governance
shell: bash
run: |
@ -571,7 +656,7 @@ jobs:
name: Security Required Gate
if: always() && (github.event_name == 'pull_request' || github.event_name == 'push' || github.event_name == 'merge_group')
needs: [audit, deny, security-regressions, secrets, sbom, unsafe-debt]
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
steps:
- name: Enforce security gate
shell: bash

View File

@ -8,7 +8,11 @@ on:
- "Cargo.lock"
- "src/**"
- "crates/**"
- "scripts/ci/ensure_c_toolchain.sh"
- "scripts/ci/ensure_cargo_component.sh"
- ".github/codeql/**"
- "scripts/ci/self_heal_rust_toolchain.sh"
- "scripts/ci/ensure_cc.sh"
- ".github/workflows/sec-codeql.yml"
pull_request:
branches: [dev, main]
@ -17,7 +21,11 @@ on:
- "Cargo.lock"
- "src/**"
- "crates/**"
- "scripts/ci/ensure_c_toolchain.sh"
- "scripts/ci/ensure_cargo_component.sh"
- ".github/codeql/**"
- "scripts/ci/self_heal_rust_toolchain.sh"
- "scripts/ci/ensure_cc.sh"
- ".github/workflows/sec-codeql.yml"
merge_group:
branches: [dev, main]
@ -41,16 +49,46 @@ env:
jobs:
select-runner:
name: Select CodeQL Runner Lane
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
outputs:
labels: ${{ steps.lane.outputs.labels }}
lane: ${{ steps.lane.outputs.lane }}
steps:
- name: Resolve branch lane
id: lane
shell: bash
run: |
set -euo pipefail
branch="${GITHUB_HEAD_REF:-${GITHUB_REF_NAME}}"
if [[ "$branch" == release/* ]]; then
echo 'labels=["self-hosted","Linux","X64","hetzner","codeql"]' >> "$GITHUB_OUTPUT"
echo 'lane=release' >> "$GITHUB_OUTPUT"
else
echo 'labels=["self-hosted","Linux","X64","hetzner","codeql","codeql-general"]' >> "$GITHUB_OUTPUT"
echo 'lane=general' >> "$GITHUB_OUTPUT"
fi
codeql:
name: CodeQL Analysis
runs-on: [self-hosted, aws-india]
timeout-minutes: 60
needs: [select-runner]
runs-on: ${{ fromJSON(needs.select-runner.outputs.labels) }}
timeout-minutes: 120
env:
CARGO_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/cargo
RUSTUP_HOME: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/rustup
CARGO_TARGET_DIR: ${{ github.workspace }}/.ci-rust/${{ github.run_id }}-${{ github.run_attempt }}-${{ github.job }}/target
steps:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
- name: Ensure C toolchain
shell: bash
run: bash ./scripts/ci/ensure_c_toolchain.sh
- name: Initialize CodeQL
uses: github/codeql-action/init@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4
with:
@ -59,10 +97,26 @@ jobs:
queries: security-and-quality
- name: Set up Rust
shell: bash
run: ./scripts/ci/self_heal_rust_toolchain.sh 1.92.0
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- name: Ensure cargo component
shell: bash
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
with:
prefix-key: sec-codeql-build
cache-targets: true
cache-bin: false
- name: Build
run: cargo build --workspace --all-targets --locked
@ -70,3 +124,14 @@ jobs:
uses: github/codeql-action/analyze@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4
with:
category: "/language:rust"
- name: Summarize lane
if: always()
shell: bash
run: |
{
echo "### CodeQL Runner Lane"
echo "- Branch: \`${GITHUB_HEAD_REF:-${GITHUB_REF_NAME}}\`"
echo "- Lane: \`${{ needs.select-runner.outputs.lane }}\`"
echo "- Labels: \`${{ needs.select-runner.outputs.labels }}\`"
} >> "$GITHUB_STEP_SUMMARY"

View File

@ -91,7 +91,7 @@ env:
jobs:
vorpal:
name: Vorpal Reviewdog Scan
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 20
steps:
- name: Checkout

View File

@ -17,7 +17,7 @@ permissions:
jobs:
update-notice:
name: Update NOTICE with new contributors
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
timeout-minutes: 20
steps:
- name: Checkout repository

View File

@ -22,7 +22,7 @@ env:
jobs:
benchmarks:
name: Criterion Benchmarks
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 30
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4

View File

@ -10,11 +10,12 @@ on:
- "crates/**"
- "tests/**"
- "scripts/**"
- "scripts/ci/ensure_cc.sh"
- ".github/workflows/test-e2e.yml"
workflow_dispatch:
concurrency:
group: e2e-${{ github.event.pull_request.number || github.sha }}
group: test-e2e-${{ github.event_name }}-${{ github.event.pull_request.number || github.ref_name || github.sha }}
cancel-in-progress: true
permissions:
@ -29,13 +30,37 @@ env:
jobs:
integration-tests:
name: Integration / E2E Tests
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 30
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- name: Ensure cargo component
shell: bash
env:
ENSURE_CARGO_COMPONENT_STRICT: "true"
run: bash ./scripts/ci/ensure_cargo_component.sh 1.92.0
- name: Ensure C toolchain for Rust builds
run: ./scripts/ci/ensure_cc.sh
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v3
- name: Runner preflight (compiler + disk)
shell: bash
run: |
set -euo pipefail
echo "Runner: ${RUNNER_NAME:-unknown} (${RUNNER_OS:-unknown}/${RUNNER_ARCH:-unknown})"
if ! command -v cc >/dev/null 2>&1; then
echo "::error::Missing 'cc' compiler on runner. Install build-essential (Debian/Ubuntu) or equivalent."
exit 1
fi
cc --version | head -n1
free_kb="$(df -Pk . | awk 'NR==2 {print $4}')"
min_kb=$((10 * 1024 * 1024))
if [ "${free_kb}" -lt "${min_kb}" ]; then
echo "::error::Insufficient disk space on runner (<10 GiB free)."
df -h .
exit 1
fi
- name: Run integration / E2E tests
run: cargo test --test agent_e2e --locked --verbose

View File

@ -27,7 +27,7 @@ env:
jobs:
fuzz:
name: Fuzz (${{ matrix.target }})
runs-on: [self-hosted, aws-india]
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 60
strategy:
fail-fast: false

View File

@ -2,15 +2,89 @@ name: Test Self-Hosted Runner
on:
workflow_dispatch:
schedule:
- cron: "30 2 * * *"
permissions:
contents: read
jobs:
test-runner:
runs-on: self-hosted
runner-health:
name: Runner Health / self-hosted aws-india
runs-on: [self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]
timeout-minutes: 10
steps:
- name: Check runner info
run: |
echo "Runner: $(hostname)"
echo "OS: $(uname -a)"
echo "Docker: $(docker --version)"
echo "User: $(whoami)"
if command -v rustc >/dev/null 2>&1; then
echo "Rust: $(rustc --version)"
else
echo "Rust: <not installed>"
fi
if command -v cargo >/dev/null 2>&1; then
echo "Cargo: $(cargo --version)"
else
echo "Cargo: <not installed>"
fi
if command -v cc >/dev/null 2>&1; then
echo "CC: $(cc --version | head -n1)"
else
echo "CC: <not installed>"
fi
if command -v gcc >/dev/null 2>&1; then
echo "GCC: $(gcc --version | head -n1)"
else
echo "GCC: <not installed>"
fi
if command -v clang >/dev/null 2>&1; then
echo "Clang: $(clang --version | head -n1)"
else
echo "Clang: <not installed>"
fi
if command -v docker >/dev/null 2>&1; then
echo "Docker: $(docker --version)"
else
echo "Docker: <not installed>"
fi
- name: Verify compiler + disk prerequisites
shell: bash
run: |
set -euo pipefail
failed=0
if ! command -v cc >/dev/null 2>&1; then
echo "::error::Missing 'cc'. Install build-essential (or gcc/clang + symlink)."
failed=1
fi
free_kb="$(df -Pk . | awk 'NR==2 {print $4}')"
min_kb=$((10 * 1024 * 1024))
if [ "${free_kb}" -lt "${min_kb}" ]; then
echo "::error::Disk free below 10 GiB; clean runner workspace/cache."
df -h .
failed=1
fi
inode_used_pct="$(df -Pi . | awk 'NR==2 {gsub(/%/, "", $5); print $5}')"
if [ "${inode_used_pct}" -ge 95 ]; then
echo "::error::Inode usage >=95%; clean files to avoid ENOSPC."
df -i .
failed=1
fi
if [ "${failed}" -ne 0 ]; then
exit 1
fi
- name: Test Docker
run: docker run --rm hello-world
shell: bash
run: |
set -euo pipefail
if ! command -v docker >/dev/null 2>&1; then
echo "::notice::Docker is not installed on this self-hosted runner. Skipping docker smoke test."
exit 0
fi
docker run --rm hello-world

View File

@ -28,7 +28,7 @@ env:
jobs:
no-tabs:
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
timeout-minutes: 10
steps:
- name: Normalize git global hooks config
@ -67,7 +67,7 @@ jobs:
PY
actionlint:
runs-on: ubuntu-22.04
runs-on: [self-hosted, Linux, X64, aws-india, light, cpu40]
timeout-minutes: 10
steps:
- name: Normalize git global hooks config

8
.gitignore vendored
View File

@ -1,4 +1,6 @@
/target
/target_ci
/target_review*
firmware/*/target
*.db
*.db-journal
@ -12,7 +14,9 @@ site/node_modules/
site/.vite/
site/public/docs-content/
gh-pages/
.idea
.claude
# Environment files (may contain secrets)
.env
@ -30,10 +34,12 @@ venv/
# Secret keys and credentials
.secret_key
otp-secret
*.key
*.pem
credentials.json
/config.toml
.worktrees/
# Nix
result
result

View File

@ -3,6 +3,22 @@
This file defines the default working protocol for coding agents in this repository.
Scope: entire repository.
## 0) Session Default Target (Mandatory)
- When operator intent does not explicitly specify another repository/path, treat the active coding target as this repository (`/home/ubuntu/zeroclaw`).
- Do not switch to or implement in other repositories unless the operator explicitly requests that scope in the current conversation.
- Ambiguous wording (for example "这个仓库", "当前项目", "the repo") is resolved to `/home/ubuntu/zeroclaw` by default.
- Context mentioning external repositories does not authorize cross-repo edits; explicit current-turn override is required.
- Before any repo-affecting action, verify target lock (`pwd` + git root) to prevent accidental execution in sibling repositories.
## 0.1) Clean Worktree First Gate (Mandatory)
- Before handling any repository content (analysis, debugging, coding, tests, docs, CI), create a **new clean dedicated git worktree** for the active task.
- Do not perform substantive task work in a dirty workspace.
- Do not reuse a previously dirty worktree for a new task track.
- If the current location is dirty, stop and bootstrap a clean worktree/branch first.
- If worktree bootstrap fails, stop and report the blocker; do not continue in-place.
## 1) Project Snapshot (Read First)
ZeroClaw is a Rust-first autonomous agent runtime optimized for:

View File

@ -18,6 +18,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
value if the input used the legacy `enc:` format
- `SecretStore::needs_migration()` — Check if a value uses the legacy `enc:` format
- `SecretStore::is_secure_encrypted()` — Check if a value uses the secure `enc2:` format
- `feishu_doc` tool — Feishu/Lark document operations (`read`, `write`, `append`, `create`, `list_blocks`, `get_block`, `update_block`, `delete_block`, `create_table`, `write_table_cells`, `create_table_with_values`, `upload_image`, `upload_file`)
- Agent session persistence guidance now includes explicit backend/strategy/TTL key names for rollout notes.
- **Telegram mention_only mode** — New config option `mention_only` for Telegram channel.
When enabled, bot only responds to messages that @-mention the bot in group chats.
Direct messages always work regardless of this setting. Default: `false`.
@ -65,4 +67,4 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Workspace escape prevention
- Forbidden system path protection (`/etc`, `/root`, `~/.ssh`)
[0.1.0]: https://github.com/theonlyhennygod/zeroclaw/releases/tag/v0.1.0
[0.1.0]: https://github.com/zeroclaw-labs/zeroclaw/releases/tag/v0.1.0

1158
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,17 @@
[workspace]
members = [".", "crates/robot-kit"]
members = [
".",
"crates/robot-kit",
"crates/zeroclaw-types",
"crates/zeroclaw-core",
]
resolver = "2"
[package]
name = "zeroclaw"
version = "0.1.7"
version = "0.1.8"
edition = "2021"
build = "build.rs"
authors = ["theonlyhennygod"]
license = "MIT OR Apache-2.0"
description = "Zero overhead. Zero compromise. 100% Rust. The fastest, smallest AI assistant."
@ -34,7 +40,6 @@ matrix-sdk = { version = "0.16", optional = true, default-features = false, feat
# Serialization
serde = { version = "1.0", default-features = false, features = ["derive"] }
serde_json = { version = "1.0", default-features = false, features = ["std"] }
serde_ignored = "0.1"
# Config
directories = "6.0"
@ -58,8 +63,9 @@ image = { version = "0.25", default-features = false, features = ["jpeg", "png"]
# URL encoding for web search
urlencoding = "2.1"
# HTML to plain text conversion (web_fetch tool)
# HTML to plain text / markdown conversion (web_fetch tool)
nanohtml2text = "0.2"
html2md = { package = "fast_html2md", version = "0.0.58", optional = true }
# Zip archive extraction
zip = { version = "8.1", default-features = false, features = ["deflate"] }
@ -119,6 +125,8 @@ cron = "0.15"
dialoguer = { version = "0.12", features = ["fuzzy-select"] }
rustyline = "17.0"
console = "0.16"
crossterm = "0.29"
ratatui = { version = "0.29", default-features = false, features = ["crossterm"] }
# Hardware discovery (device path globbing)
glob = "0.3"
@ -163,6 +171,11 @@ opentelemetry = { version = "0.31", default-features = false, features = ["trace
opentelemetry_sdk = { version = "0.31", default-features = false, features = ["trace", "metrics"], optional = true }
opentelemetry-otlp = { version = "0.31", default-features = false, features = ["trace", "metrics", "http-proto", "reqwest-blocking-client", "reqwest-rustls-webpki-roots"], optional = true }
# WASM runtime for plugin execution
# Keep this on a RustSec-patched line that remains compatible with the
# workspace rust-version = "1.87".
wasmtime = { version = "36.0.6", default-features = false, features = ["runtime", "cranelift"] }
# Serial port for peripheral communication (STM32, etc.)
tokio-serial = { version = "5", default-features = false, optional = true }
@ -180,8 +193,7 @@ tempfile = "3.14"
# WASM plugin runtime (optional, enable with --features wasm-tools)
# Uses WASI stdio protocol — tools read JSON from stdin, write JSON to stdout.
wasmtime = { version = "24.0.6", optional = true, default-features = false, features = ["cranelift", "runtime"] }
wasmtime-wasi = { version = "24.0.6", optional = true, default-features = false, features = ["preview1"] }
wasmtime-wasi = { version = "36.0.6", optional = true, default-features = false, features = ["preview1"] }
# Terminal QR rendering for WhatsApp Web pairing flow.
qrcode = { version = "0.14", optional = true }
@ -205,9 +217,8 @@ landlock = { version = "0.4", optional = true }
libc = "0.2"
[features]
# Default enables wasm-tools where platform runtime dependencies are available.
# Unsupported targets (for example Android/Termux) use a stub implementation.
default = ["wasm-tools"]
# Keep default minimal for widest host compatibility (including macOS 10.15).
default = []
hardware = ["nusb", "tokio-serial"]
channel-matrix = ["dep:matrix-sdk"]
channel-lark = ["dep:prost"]
@ -231,13 +242,13 @@ probe = ["dep:probe-rs"]
rag-pdf = ["dep:pdf-extract"]
# wasm-tools = WASM plugin engine for dynamically-loaded tool packages (WASI stdio protocol)
# Runtime implementation is active on Linux/macOS/Windows; unsupported targets use stubs.
wasm-tools = ["dep:wasmtime", "dep:wasmtime-wasi"]
wasm-tools = ["dep:wasmtime-wasi"]
# whatsapp-web = Native WhatsApp Web client with custom rusqlite storage backend
whatsapp-web = ["dep:wa-rs", "dep:wa-rs-core", "dep:wa-rs-binary", "dep:wa-rs-proto", "dep:wa-rs-ureq-http", "dep:wa-rs-tokio-transport", "dep:serde-big-array", "dep:prost", "dep:qrcode"]
# Optional provider feature flags used by cfg(feature = "...") guards.
# Keep disabled by default to preserve current runtime behavior.
firecrawl = []
web-fetch-html2md = []
web-fetch-html2md = ["dep:html2md"]
[profile.release]
opt-level = "z" # Optimize for size
@ -249,8 +260,9 @@ panic = "abort" # Reduce binary size
[profile.release-fast]
inherits = "release"
codegen-units = 8 # Parallel codegen for faster builds on powerful machines (16GB+ RAM recommended)
# Use: cargo build --profile release-fast
# Keep release-fast under CI binary size safeguard (20MB hard gate).
# Using 1 codegen unit preserves release-level size characteristics.
codegen-units = 1
[profile.dist]
inherits = "release"

View File

@ -5,31 +5,40 @@ FROM rust:1.93-slim@sha256:7e6fa79cf81be23fd45d857f75f583d80cfdbb11c91fa06180fd7
WORKDIR /app
ARG ZEROCLAW_CARGO_FEATURES=""
ARG ZEROCLAW_CARGO_ALL_FEATURES="false"
# Install build dependencies
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y \
libudev-dev \
pkg-config \
&& rm -rf /var/lib/apt/lists/*
# 1. Copy manifests to cache dependencies
COPY Cargo.toml Cargo.lock ./
COPY build.rs build.rs
COPY crates/robot-kit/Cargo.toml crates/robot-kit/Cargo.toml
COPY crates/zeroclaw-types/Cargo.toml crates/zeroclaw-types/Cargo.toml
COPY crates/zeroclaw-core/Cargo.toml crates/zeroclaw-core/Cargo.toml
# Create dummy targets declared in Cargo.toml so manifest parsing succeeds.
RUN mkdir -p src benches crates/robot-kit/src \
RUN mkdir -p src benches crates/robot-kit/src crates/zeroclaw-types/src crates/zeroclaw-core/src \
&& echo "fn main() {}" > src/main.rs \
&& echo "fn main() {}" > benches/agent_benchmarks.rs \
&& echo "pub fn placeholder() {}" > crates/robot-kit/src/lib.rs
&& echo "pub fn placeholder() {}" > crates/robot-kit/src/lib.rs \
&& echo "pub fn placeholder() {}" > crates/zeroclaw-types/src/lib.rs \
&& echo "pub fn placeholder() {}" > crates/zeroclaw-core/src/lib.rs
RUN --mount=type=cache,id=zeroclaw-cargo-registry,target=/usr/local/cargo/registry,sharing=locked \
--mount=type=cache,id=zeroclaw-cargo-git,target=/usr/local/cargo/git,sharing=locked \
--mount=type=cache,id=zeroclaw-target,target=/app/target,sharing=locked \
if [ -n "$ZEROCLAW_CARGO_FEATURES" ]; then \
cargo build --release --features "$ZEROCLAW_CARGO_FEATURES"; \
if [ "$ZEROCLAW_CARGO_ALL_FEATURES" = "true" ]; then \
cargo build --release --locked --all-features; \
elif [ -n "$ZEROCLAW_CARGO_FEATURES" ]; then \
cargo build --release --locked --features "$ZEROCLAW_CARGO_FEATURES"; \
else \
cargo build --release --locked; \
fi
RUN rm -rf src benches crates/robot-kit/src
RUN rm -rf src benches crates/robot-kit/src crates/zeroclaw-types/src crates/zeroclaw-core/src
# 2. Copy only build-relevant source paths (avoid cache-busting on docs/tests/scripts)
COPY src/ src/
@ -58,8 +67,10 @@ RUN mkdir -p web/dist && \
RUN --mount=type=cache,id=zeroclaw-cargo-registry,target=/usr/local/cargo/registry,sharing=locked \
--mount=type=cache,id=zeroclaw-cargo-git,target=/usr/local/cargo/git,sharing=locked \
--mount=type=cache,id=zeroclaw-target,target=/app/target,sharing=locked \
if [ -n "$ZEROCLAW_CARGO_FEATURES" ]; then \
cargo build --release --features "$ZEROCLAW_CARGO_FEATURES"; \
if [ "$ZEROCLAW_CARGO_ALL_FEATURES" = "true" ]; then \
cargo build --release --locked --all-features; \
elif [ -n "$ZEROCLAW_CARGO_FEATURES" ]; then \
cargo build --release --locked --features "$ZEROCLAW_CARGO_FEATURES"; \
else \
cargo build --release --locked; \
fi && \

View File

@ -25,12 +25,12 @@ Built by students and members of the Harvard, MIT, and Sundai.Club communities.
</p>
<p align="center">
🌐 <strong>Languages:</strong> <a href="README.md">English</a> · <a href="docs/i18n/zh-CN/README.md">简体中文</a> · <a href="docs/i18n/ja/README.md">日本語</a> · <a href="docs/i18n/ru/README.md">Русский</a> · <a href="docs/i18n/fr/README.md">Français</a> · <a href="docs/i18n/vi/README.md">Tiếng Việt</a> · <a href="docs/i18n/el/README.md">Ελληνικά</a>
🌐 <strong>Languages:</strong> <a href="README.md">English</a> · <a href="docs/i18n/zh-CN/README.md">简体中文</a> · <a href="docs/i18n/es/README.md">Español</a> · <a href="docs/i18n/pt/README.md">Português</a> · <a href="docs/i18n/it/README.md">Italiano</a> · <a href="docs/i18n/ja/README.md">日本語</a> · <a href="docs/i18n/ru/README.md">Русский</a> · <a href="docs/i18n/fr/README.md">Français</a> · <a href="docs/i18n/vi/README.md">Tiếng Việt</a> · <a href="docs/i18n/el/README.md">Ελληνικά</a>
</p>
<p align="center">
<a href="#quick-start">Getting Started</a> |
<a href="bootstrap.sh">One-Click Setup</a> |
<a href="docs/one-click-bootstrap.md">One-Click Setup</a> |
<a href="docs/README.md">Docs Hub</a> |
<a href="docs/SUMMARY.md">Docs TOC</a>
</p>
@ -46,12 +46,12 @@ Built by students and members of the Harvard, MIT, and Sundai.Club communities.
</p>
<p align="center">
<strong>Fast, small, and fully autonomous Operating System</strong><br />
<strong>Fast, small, and fully autonomous Framework</strong><br />
Deploy anywhere. Swap anything.
</p>
<p align="center">
ZeroClaw is the <strong>runtime operating system</strong> for agentic workflows — infrastructure that abstracts models, tools, memory, and execution so agents can be built once and run anywhere.
ZeroClaw is the <strong>runtime framework</strong> for agentic workflows — infrastructure that abstracts models, tools, memory, and execution so agents can be built once and run anywhere.
</p>
<p align="center"><code>Trait-driven architecture · secure-by-default runtime · provider/channel/tool swappable · pluggable everything</code></p>
@ -83,6 +83,12 @@ Use this board for important notices (breaking changes, security advisories, mai
## Quick Start
### Option 0: One-line Installer (Default TUI Onboarding)
```bash
curl -fsSL https://zeroclawlabs.ai/install.sh | bash
```
### Option 1: Homebrew (macOS/Linuxbrew)
```bash
@ -108,11 +114,11 @@ cargo install zeroclaw
### First Run
```bash
# Start the gateway daemon
zeroclaw gateway start
# Start the gateway (serves the Web Dashboard API/UI)
zeroclaw gateway
# Open the web UI
zeroclaw dashboard
# Open the dashboard URL shown in startup logs
# (default: http://127.0.0.1:3000/)
# Or chat directly
zeroclaw chat "Hello!"
@ -120,6 +126,16 @@ zeroclaw chat "Hello!"
For detailed setup options, see [docs/one-click-bootstrap.md](docs/one-click-bootstrap.md).
### Installation Docs (Canonical Source)
Use repository docs as the source of truth for install/setup instructions:
- [README Quick Start](#quick-start)
- [docs/one-click-bootstrap.md](docs/one-click-bootstrap.md)
- [docs/getting-started/README.md](docs/getting-started/README.md)
Issue comments can provide context, but they are not canonical installation documentation.
## Benchmark Snapshot (ZeroClaw vs OpenClaw, Reproducible)
Local machine quick benchmark (macOS arm64, Feb 2026) normalized for 0.8GHz edge hardware.

View File

@ -13,6 +13,8 @@
cargo test telegram --lib
```
Toolchain note: CI/release metadata is aligned with Rust `1.88`; use the same stable toolchain when reproducing release-facing checks locally.
## 📝 What Was Created For You
### 1. **test_telegram_integration.sh** (Main Test Suite)
@ -298,6 +300,6 @@ If all tests pass:
## 📞 Support
- Issues: https://github.com/theonlyhennygod/zeroclaw/issues
- Issues: https://github.com/zeroclaw-labs/zeroclaw/issues
- Docs: `./TESTING_TELEGRAM.md`
- Help: `zeroclaw --help`

View File

@ -115,6 +115,9 @@ After running automated tests, perform these manual checks:
- Send message with @botname mention
- Verify: Bot responds and mention is stripped
- DM/private chat should always work regardless of mention_only
- Regression check (group non-text): verify group media without mention does not trigger bot reply
- Regression command:
`cargo test -q telegram_mention_only_group_photo_without_caption_is_ignored`
6. **Error logging**
@ -349,4 +352,4 @@ zeroclaw channel doctor
- [Telegram Bot API Documentation](https://core.telegram.org/bots/api)
- [ZeroClaw Main README](README.md)
- [Contributing Guide](CONTRIBUTING.md)
- [Issue Tracker](https://github.com/theonlyhennygod/zeroclaw/issues)
- [Issue Tracker](https://github.com/zeroclaw-labs/zeroclaw/issues)

View File

@ -42,6 +42,8 @@ impl BenchProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
}]),
}
}
@ -59,6 +61,8 @@ impl BenchProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
},
ChatResponse {
text: Some("done".into()),
@ -66,6 +70,8 @@ impl BenchProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
},
]),
}
@ -98,6 +104,8 @@ impl Provider for BenchProvider {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
});
}
Ok(guard.remove(0))
@ -166,6 +174,8 @@ Let me know if you need more."#
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
let multi_tool = ChatResponse {
@ -185,6 +195,8 @@ Let me know if you need more."#
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
c.bench_function("xml_parse_single_tool_call", |b| {
@ -220,6 +232,8 @@ fn bench_native_parsing(c: &mut Criterion) {
usage: None,
reasoning_content: None,
quota_metadata: None,
stop_reason: None,
raw_stop_reason: None,
};
c.bench_function("native_parse_tool_calls", |b| {

80
build.rs Normal file
View File

@ -0,0 +1,80 @@
use std::env;
use std::path::PathBuf;
use std::process::Command;
fn git_short_sha(manifest_dir: &str) -> Option<String> {
let output = Command::new("git")
.args(["rev-parse", "--short", "HEAD"])
.current_dir(manifest_dir)
.output()
.ok()?;
if !output.status.success() {
return None;
}
let short_sha = String::from_utf8(output.stdout).ok()?;
let trimmed = short_sha.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_string())
}
}
fn emit_git_rerun_hints(manifest_dir: &str) {
let output = Command::new("git")
.args(["rev-parse", "--git-dir"])
.current_dir(manifest_dir)
.output();
let Ok(output) = output else {
return;
};
if !output.status.success() {
return;
}
let Ok(git_dir_raw) = String::from_utf8(output.stdout) else {
return;
};
let git_dir_raw = git_dir_raw.trim();
if git_dir_raw.is_empty() {
return;
}
let git_dir = if PathBuf::from(git_dir_raw).is_absolute() {
PathBuf::from(git_dir_raw)
} else {
PathBuf::from(manifest_dir).join(git_dir_raw)
};
println!("cargo:rerun-if-changed={}", git_dir.join("HEAD").display());
println!("cargo:rerun-if-changed={}", git_dir.join("refs").display());
}
fn main() {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-env-changed=ZEROCLAW_GIT_SHORT_SHA");
let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap_or_else(|_| ".".to_string());
emit_git_rerun_hints(&manifest_dir);
let package_version = env::var("CARGO_PKG_VERSION").unwrap_or_else(|_| "0.0.0".to_string());
let short_sha = env::var("ZEROCLAW_GIT_SHORT_SHA")
.ok()
.filter(|v| !v.trim().is_empty())
.or_else(|| git_short_sha(&manifest_dir));
let build_version = if let Some(sha) = short_sha.as_deref() {
format!("{package_version} ({sha})")
} else {
package_version
};
println!("cargo:rustc-env=ZEROCLAW_BUILD_VERSION={build_version}");
println!(
"cargo:rustc-env=ZEROCLAW_GIT_SHORT_SHA={}",
short_sha.unwrap_or_default()
);
}

View File

@ -171,7 +171,7 @@ sudo usermod -aG dialout $USER
```bash
# Clone repo (or copy from USB)
git clone https://github.com/theonlyhennygod/zeroclaw
git clone https://github.com/zeroclaw-labs/zeroclaw
cd zeroclaw
# Build robot kit

View File

@ -0,0 +1,12 @@
[package]
name = "zeroclaw-core"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
description = "Core contracts and boundaries for staged multi-crate extraction."
[lib]
path = "src/lib.rs"
[dependencies]
zeroclaw-types = { path = "../zeroclaw-types" }

View File

@ -0,0 +1,8 @@
#![forbid(unsafe_code)]
//! Core contracts for the staged workspace split.
//!
//! This crate is intentionally minimal in PR-1 (scaffolding only).
/// Marker constant proving dependency linkage to `zeroclaw-types`.
pub const CORE_CRATE_ID: &str = zeroclaw_types::CRATE_ID;

View File

@ -0,0 +1,9 @@
[package]
name = "zeroclaw-types"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
description = "Foundational shared types for staged multi-crate extraction."
[lib]
path = "src/lib.rs"

View File

@ -0,0 +1,8 @@
#![forbid(unsafe_code)]
//! Shared foundational types for the staged workspace split.
//!
//! This crate is intentionally minimal in PR-1 (scaffolding only).
/// Marker constant proving the crate is linked in workspace checks.
pub const CRATE_ID: &str = "zeroclaw-types";

View File

@ -29,6 +29,8 @@ Localized hubs: [简体中文](i18n/zh-CN/README.md) · [日本語](i18n/ja/READ
| See project PR/issue docs snapshot | [project-triage-snapshot-2026-02-18.md](project-triage-snapshot-2026-02-18.md) |
| Perform i18n completion for docs changes | [i18n-guide.md](i18n-guide.md) |
Installation source-of-truth: keep install/run instructions in repository docs and README pages; issue comments are supplemental context only.
## Quick Decision Tree (10 seconds)
- Need first-time setup or install? → [getting-started/README.md](getting-started/README.md)

View File

@ -94,6 +94,7 @@ Last refreshed: **February 28, 2026**.
- [pr-workflow.md](pr-workflow.md)
- [reviewer-playbook.md](reviewer-playbook.md)
- [ci-map.md](ci-map.md)
- [ci-blacksmith.md](ci-blacksmith.md)
- [actions-source-policy.md](actions-source-policy.md)
- [cargo-slicer-speedup.md](cargo-slicer-speedup.md)
@ -111,5 +112,7 @@ Last refreshed: **February 28, 2026**.
- [project-triage-snapshot-2026-02-18.md](project-triage-snapshot-2026-02-18.md)
- [docs-audit-2026-02-24.md](docs-audit-2026-02-24.md)
- [project/m4-5-rfi-spike-2026-02-28.md](project/m4-5-rfi-spike-2026-02-28.md)
- [project/f1-3-agent-lifecycle-state-machine-rfi-2026-03-01.md](project/f1-3-agent-lifecycle-state-machine-rfi-2026-03-01.md)
- [project/q0-3-stop-reason-state-machine-rfi-2026-03-01.md](project/q0-3-stop-reason-state-machine-rfi-2026-03-01.md)
- [i18n-gap-backlog.md](i18n-gap-backlog.md)
- [docs-inventory.md](docs-inventory.md)

View File

@ -66,7 +66,7 @@ sudo apt-get update
sudo apt-get install -y pkg-config libssl-dev
# Clone zeroclaw (or scp your project)
git clone https://github.com/theonlyhennygod/zeroclaw.git
git clone https://github.com/zeroclaw-labs/zeroclaw.git
cd zeroclaw
# Build (takes ~1530 min on Uno Q)
@ -199,7 +199,7 @@ Now when you message your Telegram bot *"Turn on the LED"* or *"Set pin 13 high"
| 2 | `ssh arduino@<IP>` |
| 3 | `curl -sSf https://sh.rustup.rs \| sh -s -- -y && source ~/.cargo/env` |
| 4 | `sudo apt-get install -y pkg-config libssl-dev` |
| 5 | `git clone https://github.com/theonlyhennygod/zeroclaw.git && cd zeroclaw` |
| 5 | `git clone https://github.com/zeroclaw-labs/zeroclaw.git && cd zeroclaw` |
| 6 | `cargo build --release --features hardware` |
| 7 | `zeroclaw onboard --api-key KEY --provider openrouter` |
| 8 | Edit `~/.zeroclaw/config.toml` (add Telegram bot_token) |

View File

@ -56,6 +56,8 @@ Telegram/Discord sender-scoped model routing:
Supervised tool approvals (all non-CLI channels):
- `/approve-request <tool-name>` — create a pending approval request
- `/approve-confirm <request-id>` — confirm pending request (same sender + same chat/channel only)
- `/approve-allow <request-id>` — approve the current pending runtime execution request once (no policy persistence)
- `/approve-deny <request-id>` — deny the current pending runtime execution request
- `/approve-pending` — list pending requests for your current sender+chat/channel scope
- `/approve <tool-name>` — direct one-step approve + persist (`autonomy.auto_approve`, compatibility path)
- `/unapprove <tool-name>` — revoke and remove persisted approval
@ -76,6 +78,7 @@ Notes:
- You can restrict who can use approval-management commands via `[autonomy].non_cli_approval_approvers`.
- Configure natural-language approval mode via `[autonomy].non_cli_natural_language_approval_mode`.
- `autonomy.non_cli_excluded_tools` is reloaded from `config.toml` at runtime; `/approvals` shows the currently effective list.
- Default non-CLI exclusions include both `shell` and `process`; remove `process` from `[autonomy].non_cli_excluded_tools` only when you explicitly want background command execution in chat channels.
- Each incoming message injects a runtime tool-availability snapshot into the system prompt, derived from the same exclusion policy used by execution.
## Inbound Image Marker Protocol
@ -145,6 +148,7 @@ If `[channels_config.matrix]`, `[channels_config.lark]`, or `[channels_config.fe
| QQ | bot gateway | No |
| Napcat | websocket receive + HTTP send (OneBot) | No (typically local/LAN) |
| Linq | webhook (`/linq`) | Yes (public HTTPS callback) |
| WATI | webhook (`/wati`) | Yes (public HTTPS callback) |
| iMessage | local integration | No |
| ACP | stdio (JSON-RPC 2.0) | No |
| Nostr | relay websocket (NIP-04 / NIP-17) | No |
@ -163,7 +167,7 @@ Field names differ by channel:
- `allowed_users` (Telegram/Discord/Slack/Mattermost/Matrix/IRC/Lark/Feishu/DingTalk/QQ/Napcat/Nextcloud Talk/ACP)
- `allowed_from` (Signal)
- `allowed_numbers` (WhatsApp)
- `allowed_numbers` (WhatsApp/WATI)
- `allowed_senders` (Email/Linq)
- `allowed_contacts` (iMessage)
- `allowed_pubkeys` (Nostr)
@ -199,7 +203,7 @@ allowed_sender_ids = ["123456789", "987"] # optional; "*" allowed
[channels_config.telegram]
bot_token = "123456:telegram-token"
allowed_users = ["*"]
stream_mode = "off" # optional: off | partial
stream_mode = "off" # optional: off | partial | on
draft_update_interval_ms = 1000 # optional: edit throttle for partial streaming
mention_only = false # legacy fallback; used when group_reply.mode is not set
interrupt_on_new_message = false # optional: cancel in-flight same-sender same-chat request
@ -215,6 +219,7 @@ Telegram notes:
- `interrupt_on_new_message = true` preserves interrupted user turns in conversation history, then restarts generation on the newest message.
- Interruption scope is strict: same sender in the same chat. Messages from different chats are processed independently.
- `ack_enabled = false` disables the emoji reaction (⚡️, 👌, 👀, 🔥, 👍) sent to incoming messages as acknowledgment.
- `stream_mode = "on"` uses Telegram's native `sendMessageDraft` flow for private chats. Non-private chats, or runtime `sendMessageDraft` API failures, automatically fall back to `partial`.
### 4.2 Discord
@ -541,7 +546,29 @@ Notes:
allowed_contacts = ["*"]
```
### 4.18 ACP
### 4.20 WATI
```toml
[channels_config.wati]
api_token = "wati-api-token"
api_url = "https://live-mt-server.wati.io" # optional
webhook_secret = "required-shared-secret"
tenant_id = "tenant-id" # optional
allowed_numbers = ["*"] # optional, "*" = allow all
```
Notes:
- Inbound webhook endpoint: `POST /wati`.
- WATI webhook auth is fail-closed:
- `500` when `webhook_secret` is not configured.
- `401` when signature/bearer auth is missing or invalid.
- Accepted auth methods:
- `X-Hub-Signature-256`, `X-Wati-Signature`, or `X-Webhook-Signature` HMAC-SHA256 (`sha256=<hex>` or raw hex)
- `Authorization: Bearer <webhook_secret>` fallback
- `ZEROCLAW_WATI_WEBHOOK_SECRET` overrides `webhook_secret` when set.
### 4.21 ACP
ACP (Agent Client Protocol) enables ZeroClaw to act as a client for OpenCode ACP server,
allowing remote control of OpenCode behavior through JSON-RPC 2.0 communication over stdio.

64
docs/ci-blacksmith.md Normal file
View File

@ -0,0 +1,64 @@
# Blacksmith Production Build Pipeline
This document describes the production binary build lane for ZeroClaw on Blacksmith-backed GitHub Actions runners.
## Workflow
- File: `.github/workflows/release-build.yml`
- Workflow name: `Production Release Build`
- Triggers:
- Push to `main`
- Push tags matching `v*`
- Manual dispatch (`workflow_dispatch`)
## Runner Labels
The workflow runs on the same Blacksmith self-hosted runner label-set used by the rest of CI:
`[self-hosted, Linux, X64, aws-india, blacksmith-2vcpu-ubuntu-2404, hetzner]`
This keeps runner routing consistent with existing CI jobs and actionlint policy.
## Canonical Commands
Quality gates (must pass before release build):
```bash
cargo fmt --all -- --check
cargo clippy --locked --all-targets -- -D warnings
cargo test --locked --verbose
```
Production build command (canonical):
```bash
cargo build --release --locked
```
## Artifact Output
- Binary path: `target/release/zeroclaw`
- Uploaded artifact name: `zeroclaw-linux-amd64`
- Uploaded files:
- `artifacts/zeroclaw`
- `artifacts/zeroclaw.sha256`
## Re-run and Debug
1. Open Actions run for `Production Release Build`.
2. Use `Re-run failed jobs` (or full rerun) from the run page.
3. Inspect step logs in this order: `Rust quality gates` -> `Build production binary (canonical)` -> `Prepare artifact bundle`.
4. Download `zeroclaw-linux-amd64` from the run artifacts and verify checksum:
```bash
sha256sum -c zeroclaw.sha256
```
5. Reproduce locally from repository root with the same command set:
```bash
cargo fmt --all -- --check
cargo clippy --locked --all-targets -- -D warnings
cargo test --locked --verbose
cargo build --release --locked
```

View File

@ -61,6 +61,11 @@ Merge-blocking checks should stay small and deterministic. Optional checks are u
- Noise control: excludes common test/fixture paths and test file patterns by default (`include_tests=false`)
- `.github/workflows/pub-release.yml` (`Release`)
- Purpose: build release artifacts in verification mode (manual/scheduled) and publish GitHub releases on tag push or manual publish mode
- `.github/workflows/release-build.yml` (`Production Release Build`)
- Purpose: build reproducible Linux x86_64 production binaries on `main` pushes and `v*` tags using Blacksmith runners
- Canonical build command: `cargo build --release --locked`
- Quality gates: `cargo fmt --all -- --check`, `cargo clippy --locked --all-targets -- -D warnings`, and `cargo test --locked --verbose` before release build
- Artifact output: `zeroclaw-linux-amd64` (`target/release/zeroclaw` + `.sha256`)
- `.github/workflows/pr-label-policy-check.yml` (`Label Policy Sanity`)
- Purpose: validate shared contributor-tier policy in `.github/label-policy.json` and ensure label workflows consume that policy
@ -98,6 +103,7 @@ Merge-blocking checks should stay small and deterministic. Optional checks are u
- `Feature Matrix`: push on Rust + workflow paths to `dev`, merge queue, weekly schedule, manual dispatch; PRs only when `ci:full` or `ci:feature-matrix` label is applied
- `Nightly All-Features`: daily schedule and manual dispatch
- `Release`: tag push (`v*`), weekly schedule (verification-only), manual dispatch (verification or publish)
- `Production Release Build`: push to `main`, push tags matching `v*`, manual dispatch
- `Security Audit`: push to `dev` and `main`, PRs to `dev` and `main`, weekly schedule
- `Sec Vorpal Reviewdog`: manual dispatch only
- `Workflow Sanity`: PR/push when `.github/workflows/**`, `.github/*.yml`, or `.github/*.yaml` change
@ -116,18 +122,20 @@ Merge-blocking checks should stay small and deterministic. Optional checks are u
2. Docker failures on PRs: inspect `.github/workflows/pub-docker-img.yml` `pr-smoke` job.
- For tag-publish failures, inspect `ghcr-publish-contract.json` / `audit-event-ghcr-publish-contract.json`, `ghcr-vulnerability-gate.json` / `audit-event-ghcr-vulnerability-gate.json`, and Trivy artifacts from `pub-docker-img.yml`.
3. Release failures (tag/manual/scheduled): inspect `.github/workflows/pub-release.yml` and the `prepare` job outputs.
4. Security failures: inspect `.github/workflows/sec-audit.yml` and `deny.toml`.
5. Workflow syntax/lint failures: inspect `.github/workflows/workflow-sanity.yml`.
6. PR intake failures: inspect `.github/workflows/pr-intake-checks.yml` sticky comment and run logs.
7. Label policy parity failures: inspect `.github/workflows/pr-label-policy-check.yml`.
8. Docs failures in CI: inspect `docs-quality` job logs in `.github/workflows/ci-run.yml`.
9. Strict delta lint failures in CI: inspect `lint-strict-delta` job logs and compare with `BASE_SHA` diff scope.
4. Production release build failures (`main`/`v*`): inspect `.github/workflows/release-build.yml` quality-gate + build steps.
5. Security failures: inspect `.github/workflows/sec-audit.yml` and `deny.toml`.
6. Workflow syntax/lint failures: inspect `.github/workflows/workflow-sanity.yml`.
7. PR intake failures: inspect `.github/workflows/pr-intake-checks.yml` sticky comment and run logs. If intake policy changed recently, trigger a fresh `pull_request_target` event (for example close/reopen PR) because `Re-run jobs` can reuse the original workflow snapshot.
8. Label policy parity failures: inspect `.github/workflows/pr-label-policy-check.yml`.
9. Docs failures in CI: inspect `docs-quality` job logs in `.github/workflows/ci-run.yml`.
10. Strict delta lint failures in CI: inspect `lint-strict-delta` job logs and compare with `BASE_SHA` diff scope.
## Maintenance Rules
- Keep merge-blocking checks deterministic and reproducible (`--locked` where applicable).
- Keep merge-queue compatibility explicit by supporting `merge_group` on required workflows (`ci-run`, `sec-audit`, and `sec-codeql`).
- Keep PRs mapped to Linear issue keys (`RMN-*`/`CDV-*`/`COM-*`) via PR intake checks.
- Keep PRs mapped to Linear issue keys (`RMN-*`/`CDV-*`/`COM-*`) when available for traceability (recommended by PR intake checks, non-blocking).
- Keep PR intake backfills event-driven: when intake logic changes, prefer triggering a fresh PR event over rerunning old runs so checks evaluate against the latest workflow/script snapshot.
- Keep `deny.toml` advisory ignore entries in object form with explicit reasons (enforced by `deny_policy_guard.py`).
- Keep deny ignore governance metadata current in `.github/security/deny-ignore-governance.json` (owner/reason/expiry/ticket enforced by `deny_policy_guard.py`).
- Keep gitleaks allowlist governance metadata current in `.github/security/gitleaks-allowlist-governance.json` (owner/reason/expiry/ticket enforced by `secrets_governance_guard.py`).
@ -139,6 +147,7 @@ Merge-blocking checks should stay small and deterministic. Optional checks are u
- Keep pre-release stage transition policy + matrix coverage + transition audit semantics current in `.github/release/prerelease-stage-gates.json`.
- Keep required check naming stable and documented in `docs/operations/required-check-mapping.md` before changing branch protection settings.
- Follow `docs/release-process.md` for verify-before-publish release cadence and tag discipline.
- Keep production build reproducibility anchored to `cargo build --release --locked` in `.github/workflows/release-build.yml`.
- Keep merge-blocking rust quality policy aligned across `.github/workflows/ci-run.yml`, `dev/ci.sh`, and `.githooks/pre-push` (`./scripts/ci/rust_quality_gate.sh` + `./scripts/ci/rust_strict_delta_gate.sh`).
- Use `./scripts/ci/rust_strict_delta_gate.sh` (or `./dev/ci.sh lint-delta`) as the incremental strict merge gate for changed Rust lines.
- Run full strict lint audits regularly via `./scripts/ci/rust_quality_gate.sh --strict` (for example through `./dev/ci.sh lint-strict`) and track cleanup in focused PRs.

View File

@ -15,6 +15,7 @@ Last verified: **February 28, 2026**.
| `service` | Manage user-level OS service lifecycle |
| `doctor` | Run diagnostics and freshness checks |
| `status` | Print current configuration and system summary |
| `update` | Check or install latest ZeroClaw release |
| `estop` | Engage/resume emergency stop levels and inspect estop state |
| `cron` | Manage scheduled tasks |
| `models` | Refresh provider model catalogs |
@ -40,6 +41,8 @@ Last verified: **February 28, 2026**.
- `zeroclaw onboard --api-key <KEY> --provider <ID> --memory <sqlite|lucid|markdown|none>`
- `zeroclaw onboard --api-key <KEY> --provider <ID> --model <MODEL_ID> --memory <sqlite|lucid|markdown|none>`
- `zeroclaw onboard --api-key <KEY> --provider <ID> --model <MODEL_ID> --memory <sqlite|lucid|markdown|none> --force`
- `zeroclaw onboard --migrate-openclaw`
- `zeroclaw onboard --migrate-openclaw --openclaw-source <PATH> --openclaw-config <PATH>`
`onboard` safety behavior:
@ -48,6 +51,8 @@ Last verified: **February 28, 2026**.
- Provider-only update (update provider/model/API key while preserving existing channels, tunnel, memory, hooks, and other settings)
- In non-interactive environments, existing `config.toml` causes a safe refusal unless `--force` is passed.
- Use `zeroclaw onboard --channels-only` when you only need to rotate channel tokens/allowlists.
- OpenClaw migration mode is merge-first by design: existing ZeroClaw data/config is preserved, missing fields are filled, and list-like values are union-merged with de-duplication.
- Interactive onboarding can auto-detect `~/.openclaw` and prompt for optional merge migration even without `--migrate-openclaw`.
### `agent`
@ -59,9 +64,11 @@ Last verified: **February 28, 2026**.
Tip:
- In interactive chat, you can ask for route changes in natural language (for example “conversation uses kimi, coding uses gpt-5.3-codex”); the assistant can persist this via tool `model_routing_config`.
- In interactive chat, you can also ask for runtime orchestration changes in natural language (for example “disable agent teams”, “enable subagents”, “set max concurrent subagents to 24”, “use least_loaded strategy”); the assistant can persist this via `model_routing_config` action `set_orchestration`.
- In interactive chat, you can also ask to:
- switch web search provider/fallbacks (`web_search_config`)
- inspect or update domain access policy (`web_access_config`)
- preview/apply OpenClaw merge migration (`openclaw_migration`)
### `gateway` / `daemon`
@ -98,6 +105,18 @@ Notes:
- `zeroclaw service status`
- `zeroclaw service uninstall`
### `update`
- `zeroclaw update --check` (check for new release, no install)
- `zeroclaw update` (install latest release binary for current platform)
- `zeroclaw update --force` (reinstall even if current version matches latest)
- `zeroclaw update --instructions` (print install-method-specific guidance)
Notes:
- If ZeroClaw is installed via Homebrew, prefer `brew upgrade zeroclaw`.
- `update --instructions` detects common install methods and prints the safest path.
### `cron`
- `zeroclaw cron list`
@ -120,7 +139,7 @@ Notes:
- `zeroclaw models refresh --provider <ID>`
- `zeroclaw models refresh --force`
`models refresh` currently supports live catalog refresh for provider IDs: `openrouter`, `openai`, `anthropic`, `groq`, `mistral`, `deepseek`, `xai`, `together-ai`, `gemini`, `ollama`, `llamacpp`, `sglang`, `vllm`, `astrai`, `venice`, `fireworks`, `cohere`, `moonshot`, `glm`, `zai`, `qwen`, `volcengine` (`doubao`/`ark` aliases), `siliconflow`, and `nvidia`.
`models refresh` currently supports live catalog refresh for provider IDs: `openrouter`, `openai`, `anthropic`, `groq`, `mistral`, `deepseek`, `xai`, `together-ai`, `gemini`, `ollama`, `llamacpp`, `sglang`, `vllm`, `astrai`, `venice`, `fireworks`, `cohere`, `moonshot`, `stepfun`, `glm`, `zai`, `qwen`, `volcengine` (`doubao`/`ark` aliases), `siliconflow`, and `nvidia`.
#### Live model availability test
@ -173,6 +192,8 @@ Runtime in-chat commands while channel server is running:
- Supervised tool approvals (all non-CLI channels):
- `/approve-request <tool-name>` (create pending approval request)
- `/approve-confirm <request-id>` (confirm pending request; same sender + same chat/channel only)
- `/approve-allow <request-id>` (approve current pending runtime execution request once; no policy persistence)
- `/approve-deny <request-id>` (deny current pending runtime execution request)
- `/approve-pending` (list pending requests in current sender+chat/channel scope)
- `/approve <tool-name>` (direct one-step grant + persist to `autonomy.auto_approve`, compatibility path)
- `/unapprove <tool-name>` (revoke + remove from `autonomy.auto_approve`)
@ -259,11 +280,24 @@ Registry packages are installed to `~/.zeroclaw/workspace/skills/<name>/`.
Use `skills audit` to manually validate a candidate skill directory (or an installed skill by name) before sharing it.
Workspace symlink policy:
- Symlinked entries under `~/.zeroclaw/workspace/skills/` are blocked by default.
- To allow shared local skill directories, set `[skills].trusted_skill_roots` in `config.toml`.
- A symlinked skill is accepted only when its resolved canonical target is inside one of the trusted roots.
Skill manifests (`SKILL.toml`) support `prompts` and `[[tools]]`; both are injected into the agent system prompt at runtime, so the model can follow skill instructions without manually reading skill files.
### `migrate`
- `zeroclaw migrate openclaw [--source <path>] [--dry-run]`
- `zeroclaw migrate openclaw [--source <path>] [--source-config <path>] [--dry-run] [--no-memory] [--no-config]`
`migrate openclaw` behavior:
- Default mode migrates both memory and config/agents with merge-first semantics.
- Existing ZeroClaw values are preserved; migration does not overwrite existing user content.
- Memory migration de-duplicates repeated content during merge while keeping existing entries intact.
- `--dry-run` prints a migration report without writing data.
- `--no-memory` or `--no-config` scopes migration to selected modules.
### `config`

View File

@ -46,6 +46,7 @@ Use named profiles to map a logical provider id to a provider name/base URL and
|---|---|---|
| `name` | unset | Optional provider id override (for example `openai`, `openai-codex`) |
| `base_url` | unset | Optional OpenAI-compatible endpoint URL |
| `auth_header` | unset | Optional auth header for `custom:` endpoints (for example `api-key` for Azure OpenAI) |
| `wire_api` | unset | Optional protocol mode: `responses` or `chat_completions` |
| `model` | unset | Optional profile-scoped default model |
| `api_key` | unset | Optional profile-scoped API key (used when top-level `api_key` is empty) |
@ -55,6 +56,7 @@ Notes:
- If both top-level `api_key` and profile `api_key` are present, top-level `api_key` wins.
- If top-level `default_model` is still the global OpenRouter default, profile `model` is used as an automatic compatibility override.
- `auth_header` is only applied when the resolved provider is `custom:<url>` and the profile `base_url` matches that custom URL.
- Secrets encryption applies to profile API keys when `secrets.encrypt = true`.
Example:
@ -129,6 +131,8 @@ Operational note for container users:
| `max_history_messages` | `50` | Maximum conversation history messages retained per session |
| `parallel_tools` | `false` | Enable parallel tool execution within a single iteration |
| `tool_dispatcher` | `auto` | Tool dispatch strategy |
| `allowed_tools` | `[]` | Primary-agent tool allowlist. When non-empty, only listed tools are exposed in context |
| `denied_tools` | `[]` | Primary-agent tool denylist applied after `allowed_tools` |
| `loop_detection_no_progress_threshold` | `3` | Same tool+args producing identical output this many times triggers loop detection. `0` disables |
| `loop_detection_ping_pong_cycles` | `2` | A→B→A→B alternating pattern cycle count threshold. `0` disables |
| `loop_detection_failure_streak` | `3` | Same tool consecutive failure count threshold. `0` disables |
@ -139,8 +143,126 @@ Notes:
- If a channel message exceeds this value, the runtime returns: `Agent exceeded maximum tool iterations (<value>)`.
- In CLI, gateway, and channel tool loops, multiple independent tool calls are executed concurrently by default when the pending calls do not require approval gating; result order remains stable.
- `parallel_tools` applies to the `Agent::turn()` API surface. It does not gate the runtime loop used by CLI, gateway, or channel handlers.
- `allowed_tools` / `denied_tools` are applied at startup before prompt construction. Excluded tools are omitted from system prompt context and tool specs.
- Unknown entries in `allowed_tools` are skipped and logged at debug level.
- If both `allowed_tools` and `denied_tools` are configured and the denylist removes all allowlisted matches, startup fails fast with a clear config error.
- **Loop detection** intervenes before `max_tool_iterations` is exhausted. On first detection the agent receives a self-correction prompt; if the loop persists the agent is stopped early. Detection is result-aware: repeated calls with *different* outputs (genuine progress) do not trigger. Set any threshold to `0` to disable that detector.
Example:
```toml
[agent]
allowed_tools = [
"delegate",
"subagent_spawn",
"subagent_list",
"subagent_manage",
"memory_recall",
"memory_store",
"task_plan",
]
denied_tools = ["shell", "file_write", "browser_open"]
```
## `[agent.teams]`
Controls synchronous team delegation behavior (`delegate` tool).
| Key | Default | Purpose |
|---|---|---|
| `enabled` | `true` | Enable/disable agent-team delegation runtime |
| `auto_activate` | `true` | Allow automatic team-agent selection when `delegate.agent` is omitted or `"auto"` |
| `max_agents` | `32` | Max active delegate profiles considered for team selection |
| `strategy` | `adaptive` | Load-balancing strategy: `semantic`, `adaptive`, `least_loaded` |
| `load_window_secs` | `120` | Sliding window used for recent load/failure scoring |
| `inflight_penalty` | `8` | Score penalty per in-flight task |
| `recent_selection_penalty` | `2` | Score penalty per recent assignment within the load window |
| `recent_failure_penalty` | `12` | Score penalty per recent failure within the load window |
Notes:
- `semantic` preserves lexical/metadata matching priority.
- `adaptive` blends semantic signals with runtime load and recent outcomes (default).
- `least_loaded` prioritizes healthy least-loaded agents before semantic tie-breakers.
- `max_agents` has no hard-coded upper cap in tooling; use any positive integer that fits the platform.
- `max_agents` and `load_window_secs` must be greater than `0`.
Example:
```toml
[agent.teams]
enabled = true
auto_activate = true
max_agents = 48
strategy = "adaptive"
load_window_secs = 180
inflight_penalty = 10
recent_selection_penalty = 3
recent_failure_penalty = 14
```
## `[agent.subagents]`
Controls asynchronous/background delegation (`subagent_spawn`, `subagent_list`, `subagent_manage`).
| Key | Default | Purpose |
|---|---|---|
| `enabled` | `true` | Enable/disable background sub-agent runtime |
| `auto_activate` | `true` | Allow automatic sub-agent selection when `subagent_spawn.agent` is omitted or `"auto"` |
| `max_concurrent` | `10` | Max number of concurrently running background sub-agents |
| `strategy` | `adaptive` | Load-balancing strategy: `semantic`, `adaptive`, `least_loaded` |
| `load_window_secs` | `180` | Sliding window used for recent load/failure scoring |
| `inflight_penalty` | `10` | Score penalty per in-flight task |
| `recent_selection_penalty` | `3` | Score penalty per recent assignment within the load window |
| `recent_failure_penalty` | `16` | Score penalty per recent failure within the load window |
| `queue_wait_ms` | `15000` | Wait duration for free concurrency slot before failing (`0` = fail-fast) |
| `queue_poll_ms` | `200` | Poll interval while waiting for a slot |
Notes:
- `max_concurrent` has no hard-coded upper cap in tooling; use any positive integer that fits the platform.
- `max_concurrent`, `load_window_secs`, and `queue_poll_ms` must be greater than `0`.
- `queue_wait_ms = 0` is valid and forces immediate failure when at capacity.
Example:
```toml
[agent.subagents]
enabled = true
auto_activate = true
max_concurrent = 24
strategy = "least_loaded"
load_window_secs = 240
inflight_penalty = 12
recent_selection_penalty = 4
recent_failure_penalty = 18
queue_wait_ms = 30000
queue_poll_ms = 250
```
## Runtime Orchestration Updates (Natural Language + Tool)
You can update the orchestration controls in interactive chat with natural language requests (for example: "disable subagents", "set subagents max concurrent to 20", "switch team strategy to least-loaded").
The runtime persists these updates via `model_routing_config` (`action = "set_orchestration"`), and delegation tools hot-apply them without requiring a process restart.
Example tool payload:
```json
{
"action": "set_orchestration",
"teams_enabled": true,
"teams_strategy": "adaptive",
"max_team_agents": 64,
"subagents_enabled": true,
"subagents_auto_activate": true,
"max_concurrent_subagents": 32,
"subagents_strategy": "least_loaded",
"subagents_queue_wait_ms": 15000,
"subagents_queue_poll_ms": 200
}
```
## `[security.otp]`
| Key | Default | Purpose |
@ -278,6 +400,18 @@ Environment overrides:
- `ZEROCLAW_URL_ACCESS_DOMAIN_BLOCKLIST` / `URL_ACCESS_DOMAIN_BLOCKLIST` (comma-separated)
- `ZEROCLAW_URL_ACCESS_APPROVED_DOMAINS` / `URL_ACCESS_APPROVED_DOMAINS` (comma-separated)
## `[security]`
| Key | Default | Purpose |
|---|---|---|
| `canary_tokens` | `true` | Inject per-turn canary token into system prompt and block responses that echo it |
Notes:
- Canary tokens are generated per turn and are redacted from runtime traces.
- This guard is additive to `security.outbound_leak_guard`: canary catches prompt-context leakage, while outbound leak guard catches credential-like material.
- Set `canary_tokens = false` to disable this layer.
## `[security.syscall_anomaly]`
| Key | Default | Purpose |
@ -536,6 +670,7 @@ Notes:
|---|---|---|
| `open_skills_enabled` | `false` | Opt-in loading/sync of community `open-skills` repository |
| `open_skills_dir` | unset | Optional local path for `open-skills` (defaults to `$HOME/open-skills` when enabled) |
| `trusted_skill_roots` | `[]` | Allowlist of directory roots for symlink targets in `workspace/skills/*` |
| `prompt_injection_mode` | `full` | Skill prompt verbosity: `full` (inline instructions/tools) or `compact` (name/description/location only) |
| `clawhub_token` | unset | Optional Bearer token for authenticated ClawhHub skill downloads |
@ -548,7 +683,8 @@ Notes:
- `ZEROCLAW_SKILLS_PROMPT_MODE` accepts `full` or `compact`.
- Precedence for enable flag: `ZEROCLAW_OPEN_SKILLS_ENABLED``skills.open_skills_enabled` in `config.toml` → default `false`.
- `prompt_injection_mode = "compact"` is recommended on low-context local models to reduce startup prompt size while keeping skill files available on demand.
- Skill loading and `zeroclaw skills install` both apply a static security audit. Skills that contain symlinks, script-like files, high-risk shell payload snippets, or unsafe markdown link traversal are rejected.
- Symlinked workspace skills are blocked by default. Set `trusted_skill_roots` to allow local shared-skill directories after explicit trust review.
- `zeroclaw skills install` and `zeroclaw skills audit` apply a static security audit. Skills that contain script-like files, high-risk shell payload snippets, or unsafe markdown link traversal are rejected.
- `clawhub_token` is sent as `Authorization: Bearer <token>` when downloading from ClawhHub. Obtain a token from [https://clawhub.ai](https://clawhub.ai) after signing in. Required if the API returns 429 (rate-limited) or 401 (unauthorized) for anonymous requests.
**ClawhHub token example:**
@ -620,6 +756,11 @@ Notes:
- Remote URL only when `allow_remote_fetch = true`
- Allowed MIME types: `image/png`, `image/jpeg`, `image/webp`, `image/gif`, `image/bmp`.
- When the active provider does not support vision, requests fail with a structured capability error (`capability=vision`) instead of silently dropping images.
- In `proxy.scope = "services"` mode, remote image fetch uses service-key routing. For best compatibility include relevant selectors/keys such as:
- `channel.qq` (QQ media hosts like `multimedia.nt.qq.com.cn`)
- `tool.multimodal` (dedicated multimodal fetch path)
- `tool.http_request` (compatibility fallback path)
- `provider.*` or the active provider key (for example `provider.openai`)
## `[browser]`
@ -710,8 +851,8 @@ When using `credential_profile`, do not also set the same header key in `args.he
| Key | Default | Purpose |
|---|---|---|
| `enabled` | `false` | Enable `web_fetch` for page-to-text extraction |
| `provider` | `fast_html2md` | Fetch/render backend: `fast_html2md`, `nanohtml2text`, `firecrawl` |
| `api_key` | unset | API key for provider backends that require it (e.g. `firecrawl`) |
| `provider` | `fast_html2md` | Fetch/render backend: `fast_html2md`, `nanohtml2text`, `firecrawl`, `tavily` |
| `api_key` | unset | API key for provider backends that require it (e.g. `firecrawl`, `tavily`) |
| `api_url` | unset | Optional API URL override (self-hosted/alternate endpoint) |
| `allowed_domains` | `["*"]` | Domain allowlist (`"*"` allows all public domains) |
| `blocked_domains` | `[]` | Denylist applied before allowlist |
@ -855,6 +996,7 @@ Environment overrides:
| `level` | `supervised` | `read_only`, `supervised`, or `full` |
| `workspace_only` | `true` | reject absolute path inputs unless explicitly disabled |
| `allowed_commands` | _required for shell execution_ | allowlist of executable names, explicit executable paths, or `"*"` |
| `command_context_rules` | `[]` | per-command context-aware allow/deny/require-approval rules (domain/path constraints, optional high-risk override) |
| `forbidden_paths` | built-in protected list | explicit path denylist (system paths + sensitive dotdirs by default) |
| `allowed_roots` | `[]` | additional roots allowed outside workspace after canonicalization |
| `max_actions_per_hour` | `20` | per-policy action budget |
@ -865,7 +1007,7 @@ Environment overrides:
| `allow_sensitive_file_writes` | `false` | allow `file_write`/`file_edit` on sensitive files/dirs (for example `.env`, `.aws/credentials`, private keys) |
| `auto_approve` | `[]` | tool operations always auto-approved |
| `always_ask` | `[]` | tool operations that always require approval |
| `non_cli_excluded_tools` | `[]` | tools hidden from non-CLI channel tool specs |
| `non_cli_excluded_tools` | built-in denylist (includes `shell`, `process`, `file_write`, ...) | tools hidden from non-CLI channel tool specs |
| `non_cli_approval_approvers` | `[]` | optional allowlist for who can run non-CLI approval-management commands |
| `non_cli_natural_language_approval_mode` | `direct` | natural-language behavior for approval-management commands (`direct`, `request_confirm`, `disabled`) |
| `non_cli_natural_language_approval_mode_by_channel` | `{}` | per-channel override map for natural-language approval mode |
@ -876,6 +1018,11 @@ Notes:
- Access outside the workspace requires `allowed_roots`, even when `workspace_only = false`.
- `allowed_roots` supports absolute paths, `~/...`, and workspace-relative paths.
- `allowed_commands` entries can be command names (for example, `"git"`), explicit executable paths (for example, `"/usr/bin/antigravity"`), or `"*"` to allow any command name/path (risk gates still apply).
- `command_context_rules` can narrow or override `allowed_commands` for matching commands:
- `action = "allow"` rules are restrictive when present for a command: at least one allow rule must match.
- `action = "deny"` rules explicitly block matching contexts.
- `action = "require_approval"` forces explicit approval (`approved=true`) in supervised mode for matching segments, even if `shell` is in `auto_approve`.
- `allow_high_risk = true` allows a matching high-risk command to pass the hard block, but supervised mode still requires `approved=true`.
- `file_read` blocks sensitive secret-bearing files/directories by default. Set `allow_sensitive_file_reads = true` only for controlled debugging sessions.
- `file_write` and `file_edit` block sensitive secret-bearing files/directories by default. Set `allow_sensitive_file_writes = true` only for controlled break-glass sessions.
- `file_read`, `file_write`, and `file_edit` refuse multiply-linked files (hard-link guard) to reduce workspace path bypass risk via hard-link escapes.
@ -885,6 +1032,10 @@ Notes:
- One-step flow: `/approve <tool>`.
- Two-step flow: `/approve-request <tool>` then `/approve-confirm <request-id>` (same sender + same chat/channel).
Both paths write to `autonomy.auto_approve` and remove the tool from `autonomy.always_ask`.
- For pending runtime execution prompts (including Telegram inline approval buttons), use:
- `/approve-allow <request-id>` to approve only the current pending request.
- `/approve-deny <request-id>` to reject the current pending request.
This path does not modify `autonomy.auto_approve` or `autonomy.always_ask`.
- `non_cli_natural_language_approval_mode` controls how strict natural-language approval intents are:
- `direct` (default): natural-language approval grants immediately (private-chat friendly).
- `request_confirm`: natural-language approval creates a pending request that needs explicit confirm.
@ -897,6 +1048,7 @@ Notes:
- `telegram:alice` allows only that channel+sender pair.
- `telegram:*` allows any sender on Telegram.
- `*:alice` allows `alice` on any channel.
- By default, `process` is excluded on non-CLI channels alongside `shell`. To opt in intentionally, remove `"process"` from `[autonomy].non_cli_excluded_tools` in `config.toml`.
- Use `/unapprove <tool>` to remove persisted approval from `autonomy.auto_approve`.
- `/approve-pending` lists pending requests for the current sender+chat/channel scope.
- If a tool remains unavailable after approval, check `autonomy.non_cli_excluded_tools` (runtime `/approvals` shows this list). Channel runtime reloads this list from `config.toml` automatically.
@ -906,6 +1058,22 @@ Notes:
workspace_only = false
forbidden_paths = ["/etc", "/root", "/proc", "/sys", "~/.ssh", "~/.gnupg", "~/.aws"]
allowed_roots = ["~/Desktop/projects", "/opt/shared-repo"]
[[autonomy.command_context_rules]]
command = "curl"
action = "allow"
allowed_domains = ["api.github.com", "*.example.internal"]
allow_high_risk = true
[[autonomy.command_context_rules]]
command = "rm"
action = "allow"
allowed_path_prefixes = ["/tmp"]
allow_high_risk = true
[[autonomy.command_context_rules]]
command = "rm"
action = "require_approval"
```
## `[memory]`
@ -1063,10 +1231,70 @@ Notes:
- `mode = "all_messages"` or `mode = "mention_only"`
- `allowed_sender_ids = ["..."]` to bypass mention gating in groups
- `allowed_users` allowlist checks still run first
- Telegram/Discord/Lark/Feishu ACK emoji reactions are configurable under
`[channels_config.ack_reaction.<channel>]` with switchable enable state,
custom emoji pools, and conditional rules.
- Legacy `mention_only` flags (Telegram/Discord/Mattermost/Lark) remain supported as fallback only.
If `group_reply.mode` is set, it takes precedence over legacy `mention_only`.
- While `zeroclaw channel start` is running, updates to `default_provider`, `default_model`, `default_temperature`, `api_key`, `api_url`, and `reliability.*` are hot-applied from `config.toml` on the next inbound message.
### `[channels_config.ack_reaction.<channel>]`
Per-channel ACK reaction policy (`<channel>`: `telegram`, `discord`, `lark`, `feishu`).
| Key | Default | Purpose |
|---|---|---|
| `enabled` | `true` | Master switch for ACK reactions on this channel |
| `strategy` | `random` | Pool selection strategy: `random` or `first` |
| `sample_rate` | `1.0` | Probabilistic gate in `[0.0, 1.0]` for channel fallback ACKs |
| `emojis` | `[]` | Channel-level custom fallback pool (uses built-in pool when empty) |
| `rules` | `[]` | Ordered conditional rules; first matching rule can react or suppress |
Rule object fields (`[[channels_config.ack_reaction.<channel>.rules]]`):
| Key | Default | Purpose |
|---|---|---|
| `enabled` | `true` | Enable/disable this single rule |
| `contains_any` | `[]` | Match when message contains any keyword (case-insensitive) |
| `contains_all` | `[]` | Match when message contains all keywords (case-insensitive) |
| `contains_none` | `[]` | Match only when message contains none of these keywords |
| `regex_any` | `[]` | Match when any regex pattern matches |
| `regex_all` | `[]` | Match only when all regex patterns match |
| `regex_none` | `[]` | Match only when none of these regex patterns match |
| `sender_ids` | `[]` | Match only these sender IDs (`"*"` matches all) |
| `chat_ids` | `[]` | Match only these chat/channel IDs (`"*"` matches all) |
| `chat_types` | `[]` | Restrict to `group` and/or `direct` |
| `locale_any` | `[]` | Restrict by locale tag (prefix supported, e.g. `zh`) |
| `action` | `react` | `react` to emit ACK, `suppress` to force no ACK when matched |
| `sample_rate` | unset | Optional rule-level gate in `[0.0, 1.0]` (overrides channel `sample_rate`) |
| `strategy` | unset | Optional per-rule strategy override |
| `emojis` | `[]` | Emoji pool used when this rule matches |
Example:
```toml
[channels_config.ack_reaction.telegram]
enabled = true
strategy = "random"
sample_rate = 1.0
emojis = ["✅", "👌", "🔥"]
[[channels_config.ack_reaction.telegram.rules]]
contains_any = ["deploy", "release"]
contains_none = ["dry-run"]
regex_none = ["panic|fatal"]
chat_ids = ["-100200300"]
chat_types = ["group"]
strategy = "first"
sample_rate = 0.9
emojis = ["🚀"]
[[channels_config.ack_reaction.telegram.rules]]
contains_any = ["error", "failed"]
action = "suppress"
sample_rate = 1.0
```
### `[channels_config.nostr]`
| Key | Default | Purpose |

View File

@ -2,7 +2,7 @@
This inventory classifies documentation by intent and canonical location.
Last reviewed: **February 28, 2026**.
Last reviewed: **March 1, 2026**.
## Classification Legend
@ -125,6 +125,8 @@ These are valuable context, but **not strict runtime contracts**.
| `docs/project-triage-snapshot-2026-02-18.md` | Snapshot |
| `docs/docs-audit-2026-02-24.md` | Snapshot (docs architecture audit) |
| `docs/project/m4-5-rfi-spike-2026-02-28.md` | Snapshot (M4-5 workspace split RFI baseline and execution plan) |
| `docs/project/f1-3-agent-lifecycle-state-machine-rfi-2026-03-01.md` | Snapshot (F1-3 lifecycle state machine RFI) |
| `docs/project/q0-3-stop-reason-state-machine-rfi-2026-03-01.md` | Snapshot (Q0-3 stop-reason/continuation RFI) |
| `docs/i18n-gap-backlog.md` | Snapshot (i18n depth gap tracking) |
## Maintenance Contract

View File

@ -18,7 +18,7 @@ For first-time setup and quick orientation.
| I want guided prompts | `zeroclaw onboard --interactive` |
| Config exists, just fix channels | `zeroclaw onboard --channels-only` |
| Config exists, I intentionally want full overwrite | `zeroclaw onboard --force` |
| Using subscription auth | See [Subscription Auth](../../README.md#subscription-auth-openai-codex--claude-code) |
| Using OpenAI Codex subscription auth | See [OpenAI Codex OAuth Quick Setup](#openai-codex-oauth-quick-setup) |
## Onboarding and Validation
@ -28,6 +28,50 @@ For first-time setup and quick orientation.
- Ollama cloud models (`:cloud`) require a remote `api_url` and API key (for example `api_url = "https://ollama.com"`).
- Validate environment: `zeroclaw status` + `zeroclaw doctor`
## OpenAI Codex OAuth Quick Setup
Use this path when you want `openai-codex` with subscription OAuth credentials (no API key required).
1. Authenticate:
```bash
zeroclaw auth login --provider openai-codex
```
2. Verify auth material is loaded:
```bash
zeroclaw auth status --provider openai-codex
```
3. Set provider/model defaults:
```toml
default_provider = "openai-codex"
default_model = "gpt-5.3-codex"
default_temperature = 0.2
[provider]
transport = "auto"
reasoning_level = "high"
```
4. Optional stable fallback model (if your account/region does not currently expose `gpt-5.3-codex`):
```toml
default_model = "gpt-5.2-codex"
```
5. Start chat:
```bash
zeroclaw chat
```
Notes:
- You do not need to define a custom `[model_providers."openai-codex"]` block for normal OAuth usage.
- If you see raw `<tool_call>` tags in output, first verify you are on the built-in `openai-codex` provider path above and not a custom OpenAI-compatible provider override.
## Next
- Runtime operations: [../operations/README.md](../operations/README.md)

View File

@ -20,6 +20,13 @@ If both exist, your shell `PATH` order decides which one runs.
## 2) Update on macOS
Quick way to get install-method-specific guidance:
```bash
zeroclaw update --instructions
zeroclaw update --check
```
### A) Homebrew install
```bash
@ -54,6 +61,13 @@ Re-run your download/install flow with the latest release asset, then verify:
zeroclaw --version
```
You can also use the built-in updater for manual/local installs:
```bash
zeroclaw update
zeroclaw --version
```
## 3) Uninstall on macOS
### A) Stop and remove background service first

View File

@ -66,7 +66,7 @@ ssh arduino@<UNO_Q_IP>
4. **Λήψη και Μεταγλώττιση**:
```bash
git clone https://github.com/theonlyhennygod/zeroclaw.git
git clone https://github.com/zeroclaw-labs/zeroclaw.git
cd zeroclaw
cargo build --release --features hardware
```

View File

@ -44,6 +44,15 @@
- `zeroclaw daemon [--host <HOST>] [--port <PORT>]`
- Το `--new-pairing` καθαρίζει όλα τα αποθηκευμένα paired tokens και δημιουργεί νέο pairing code κατά την εκκίνηση του gateway.
### 2.2 OpenClaw Migration Surface
- `zeroclaw onboard --migrate-openclaw`
- `zeroclaw onboard --migrate-openclaw --openclaw-source <PATH> --openclaw-config <PATH>`
- `zeroclaw migrate openclaw --dry-run`
- `zeroclaw migrate openclaw`
Σημείωση: στο agent runtime υπάρχει επίσης το εργαλείο `openclaw_migration` για controlled preview/apply migration flows.
### 3. `cron` (Προγραμματισμός Εργασιών)
Δυνατότητα αυτοματισμού εντολών:

View File

@ -68,4 +68,13 @@ allowed_users = ["το-όνομά-σας"] # Ποιοι επιτρέπεται
- Αν αλλάξετε το αρχείο `config.toml`, πρέπει να κάνετε επανεκκίνηση το ZeroClaw για να δει τις αλλαγές.
- Χρησιμοποιήστε την εντολή `zeroclaw doctor` για να βεβαιωθείτε ότι οι ρυθμίσεις σας είναι σωστές.
## Ενημέρωση (2026-03-03)
- Στην ενότητα `[agent]` προστέθηκαν τα `allowed_tools` και `denied_tools`.
- Αν το `allowed_tools` δεν είναι κενό, ο primary agent βλέπει μόνο τα εργαλεία της λίστας.
- Το `denied_tools` εφαρμόζεται μετά το allowlist και αφαιρεί επιπλέον εργαλεία.
- Άγνωστες τιμές στο `allowed_tools` αγνοούνται (με debug log) και δεν μπλοκάρουν την εκκίνηση.
- Αν `allowed_tools` και `denied_tools` καταλήξουν να αφαιρέσουν όλα τα εκτελέσιμα εργαλεία, η εκκίνηση αποτυγχάνει άμεσα με σαφές μήνυμα ρύθμισης.
- Για πλήρη πίνακα πεδίων και παράδειγμα, δείτε το αγγλικό `config-reference.md` στην ενότητα `[agent]`.
- Μην μοιράζεστε ποτέ το αρχείο `config.toml` με άλλους, καθώς περιέχει τα μυστικά κλειδιά σας (tokens).

127
docs/i18n/es/README.md Normal file
View File

@ -0,0 +1,127 @@
<p align="center">
<img src="../../../zeroclaw.png" alt="ZeroClaw" width="200" />
</p>
<h1 align="center">ZeroClaw 🦀</h1>
<p align="center">
<strong>Sobrecarga cero. Compromiso cero. 100% Rust. 100% Agnóstico.</strong><br>
⚡️ <strong>Funciona en cualquier hardware con <5MB RAM: ¡99% menos memoria que OpenClaw y 98% más económico que un Mac mini!</strong>
</p>
<p align="center">
<a href="../../../LICENSE-APACHE"><img src="https://img.shields.io/badge/license-MIT%20OR%20Apache%202.0-blue.svg" alt="Licencia: MIT OR Apache-2.0" /></a>
<a href="../../../NOTICE"><img src="https://img.shields.io/github/contributors/zeroclaw-labs/zeroclaw?color=green" alt="Colaboradores" /></a>
<a href="https://buymeacoffee.com/argenistherose"><img src="https://img.shields.io/badge/Buy%20Me%20a%20Coffee-Donate-yellow.svg?style=flat&logo=buy-me-a-coffee" alt="Buy Me a Coffee" /></a>
<a href="https://x.com/zeroclawlabs?s=21"><img src="https://img.shields.io/badge/X-%40zeroclawlabs-000000?style=flat&logo=x&logoColor=white" alt="X: @zeroclawlabs" /></a>
<a href="https://zeroclawlabs.cn/group.jpg"><img src="https://img.shields.io/badge/WeChat-Group-B7D7A8?logo=wechat&logoColor=white" alt="Grupo WeChat" /></a>
<a href="https://t.me/zeroclawlabs"><img src="https://img.shields.io/badge/Telegram-%40zeroclawlabs-26A5E4?style=flat&logo=telegram&logoColor=white" alt="Telegram: @zeroclawlabs" /></a>
<a href="https://www.facebook.com/groups/zeroclaw"><img src="https://img.shields.io/badge/Facebook-Group-1877F2?style=flat&logo=facebook&logoColor=white" alt="Grupo Facebook" /></a>
<a href="https://www.reddit.com/r/zeroclawlabs/"><img src="https://img.shields.io/badge/Reddit-r%2Fzeroclawlabs-FF4500?style=flat&logo=reddit&logoColor=white" alt="Reddit: r/zeroclawlabs" /></a>
</p>
<p align="center">
Desarrollado por estudiantes y miembros de las comunidades de Harvard, MIT y Sundai.Club.
</p>
<p align="center">
🌐 <strong>Idiomas:</strong> <a href="../../../README.md">English</a> · <a href="../zh-CN/README.md">简体中文</a> · <a href="README.md">Español</a> · <a href="../pt/README.md">Português</a> · <a href="../it/README.md">Italiano</a> · <a href="../ja/README.md">日本語</a> · <a href="../ru/README.md">Русский</a> · <a href="../fr/README.md">Français</a> · <a href="../vi/README.md">Tiếng Việt</a> · <a href="../el/README.md">Ελληνικά</a>
</p>
<p align="center">
<strong>Framework rápido, pequeño y totalmente autónomo</strong><br />
Despliega en cualquier lugar. Intercambia cualquier cosa.
</p>
<p align="center">
ZeroClaw es el <strong>framework de runtime</strong> para flujos de trabajo agents — infraestructura que abstrae modelos, herramientas, memoria y ejecución para que los agentes puedan construirse una vez y ejecutarse en cualquier lugar.
</p>
<p align="center"><code>Arquitectura basada en traits · runtime seguro por defecto · proveedor/canal/herramienta intercambiable · todo conectable</code></p>
### ✨ Características
- 🏎️ **Runtime Ligero por Defecto:** Los flujos de trabajo comunes de CLI y estado se ejecutan en una envoltura de memoria de pocos megabytes en builds de release.
- 💰 **Despliegue Económico:** Diseñado para placas de bajo costo e instancias cloud pequeñas sin dependencias de runtime pesadas.
- ⚡ **Arranques en Frío Rápidos:** El runtime Rust de binario único mantiene el inicio de comandos y daemon casi instantáneo para operaciones diarias.
- 🌍 **Arquitectura Portátil:** Un flujo de trabajo binary-first a través de ARM, x86 y RISC-V con proveedores/canales/herramientas intercambiables.
- 🔍 **Fase de Investigación:** Recopilación proactiva de información a través de herramientas antes de la generación de respuestas — reduce alucinaciones verificando hechos primero.
### Por qué los equipos eligen ZeroClaw
- **Ligero por defecto:** binario Rust pequeño, inicio rápido, huella de memoria baja.
- **Seguro por diseño:** emparejamiento, sandboxing estricto, listas de permitidos explícitas, alcance de workspace.
- **Totalmente intercambiable:** los sistemas principales son traits (proveedores, canales, herramientas, memoria, túneles).
- **Sin lock-in:** soporte de proveedor compatible con OpenAI + endpoints personalizados conectables.
## Inicio Rápido
### Opción 1: Homebrew (macOS/Linuxbrew)
```bash
brew install zeroclaw
```
### Opción 2: Clonar + Bootstrap
```bash
git clone https://github.com/zeroclaw-labs/zeroclaw.git
cd zeroclaw
./bootstrap.sh
```
> **Nota:** Las builds desde fuente requieren ~2GB RAM y ~6GB disco. Para sistemas con recursos limitados, usa `./bootstrap.sh --prefer-prebuilt` para descargar un binario pre-compilado.
### Opción 3: Cargo Install
```bash
cargo install zeroclaw
```
### Primera Ejecución
```bash
# Iniciar el gateway (sirve el API/UI del Dashboard Web)
zeroclaw gateway
# Abrir la URL del dashboard mostrada en los logs de inicio
# (por defecto: http://127.0.0.1:3000/)
# O chatear directamente
zeroclaw chat "¡Hola!"
```
Para opciones de configuración detalladas, consulta [docs/one-click-bootstrap.md](../../../docs/one-click-bootstrap.md).
---
## ⚠️ Repositorio Oficial y Advertencia de Suplantación
**Este es el único repositorio oficial de ZeroClaw:**
> https://github.com/zeroclaw-labs/zeroclaw
Cualquier otro repositorio, organización, dominio o paquete que afirme ser "ZeroClaw" o implique afiliación con ZeroClaw Labs **no está autorizado y no está afiliado con este proyecto**.
Si encuentras suplantación o uso indebido de marca, por favor [abre un issue](https://github.com/zeroclaw-labs/zeroclaw/issues).
---
## Licencia
ZeroClaw tiene doble licencia para máxima apertura y protección de colaboradores:
| Licencia | Caso de uso |
|---|---|
| [MIT](../../../LICENSE-MIT) | Open-source, investigación, académico, uso personal |
| [Apache 2.0](../../../LICENSE-APACHE) | Protección de patentes, institucional, despliegue comercial |
Puedes elegir cualquiera de las dos licencias. **Los colaboradores otorgan automáticamente derechos bajo ambas** — consulta [CLA.md](../../../CLA.md) para el acuerdo completo de colaborador.
## Contribuir
Consulta [CONTRIBUTING.md](../../../CONTRIBUTING.md) y [CLA.md](../../../CLA.md). Implementa un trait, envía un PR.
---
**ZeroClaw** — Sobrecarga cero. Compromiso cero. Despliega en cualquier lugar. Intercambia cualquier cosa. 🦀

View File

@ -20,3 +20,4 @@ Source anglaise:
## Mise à jour récente
- `zeroclaw gateway` prend en charge `--new-pairing` pour effacer les tokens appairés et générer un nouveau code d'appairage.
- Le guide anglais inclut désormais les surfaces de migration OpenClaw: `zeroclaw onboard --migrate-openclaw`, `zeroclaw migrate openclaw` et l'outil agent `openclaw_migration` (traduction complète en cours).

View File

@ -21,3 +21,8 @@ Source anglaise:
- Ajout de `provider.reasoning_level` (OpenAI Codex `/responses`). Voir la source anglaise pour les détails.
- Valeur par défaut de `agent.max_tool_iterations` augmentée à `20` (fallback sûr si `0`).
- Ajout de `agent.allowed_tools` et `agent.denied_tools` pour filtrer les outils visibles par l'agent principal.
- `allowed_tools` non vide: seuls les outils listés sont exposés.
- `denied_tools`: retrait supplémentaire appliqué après `allowed_tools`.
- Les entrées inconnues dans `allowed_tools` sont ignorées (log debug), sans échec de démarrage.
- Si `allowed_tools` + `denied_tools` suppriment tous les outils exécutables, le démarrage échoue immédiatement avec une erreur de configuration claire.

View File

@ -20,3 +20,21 @@ Source anglaise:
## Notes de mise à jour
- Ajout d'un réglage `provider.reasoning_level` pour le niveau de raisonnement OpenAI Codex. Voir la source anglaise pour les détails.
- 2026-03-01: ajout de la prise en charge du provider StepFun (`stepfun`, alias `step`, `step-ai`, `step_ai`).
## StepFun (Résumé)
- Provider ID: `stepfun`
- Aliases: `step`, `step-ai`, `step_ai`
- Base API URL: `https://api.stepfun.com/v1`
- Endpoints: `POST /v1/chat/completions`, `GET /v1/models`
- Auth env var: `STEP_API_KEY` (fallback: `STEPFUN_API_KEY`)
- Modèle par défaut: `step-3.5-flash`
Validation rapide:
```bash
export STEP_API_KEY="your-stepfun-api-key"
zeroclaw models refresh --provider stepfun
zeroclaw agent --provider stepfun --model step-3.5-flash -m "ping"
```

141
docs/i18n/it/README.md Normal file
View File

@ -0,0 +1,141 @@
<p align="center">
<img src="../../../zeroclaw.png" alt="ZeroClaw" width="200" />
</p>
<h1 align="center">ZeroClaw 🦀</h1>
<p align="center">
<strong>Zero overhead. Zero compromesso. 100% Rust. 100% Agnostico.</strong><br>
⚡️ <strong>Funziona su qualsiasi hardware con <5MB RAM: 99% meno memoria di OpenClaw e 98% più economico di un Mac mini!</strong>
</p>
<p align="center">
<a href="../../../LICENSE-APACHE"><img src="https://img.shields.io/badge/license-MIT%20OR%20Apache%202.0-blue.svg" alt="Licenza: MIT OR Apache-2.0" /></a>
<a href="../../../NOTICE"><img src="https://img.shields.io/github/contributors/zeroclaw-labs/zeroclaw?color=green" alt="Contributori" /></a>
<a href="https://buymeacoffee.com/argenistherose"><img src="https://img.shields.io/badge/Buy%20Me%20a%20Coffee-Donate-yellow.svg?style=flat&logo=buy-me-a-coffee" alt="Buy Me a Coffee" /></a>
<a href="https://x.com/zeroclawlabs?s=21"><img src="https://img.shields.io/badge/X-%40zeroclawlabs-000000?style=flat&logo=x&logoColor=white" alt="X: @zeroclawlabs" /></a>
<a href="https://zeroclawlabs.cn/group.jpg"><img src="https://img.shields.io/badge/WeChat-Group-B7D7A8?logo=wechat&logoColor=white" alt="Gruppo WeChat" /></a>
<a href="https://t.me/zeroclawlabs"><img src="https://img.shields.io/badge/Telegram-%40zeroclawlabs-26A5E4?style=flat&logo=telegram&logoColor=white" alt="Telegram: @zeroclawlabs" /></a>
<a href="https://www.facebook.com/groups/zeroclaw"><img src="https://img.shields.io/badge/Facebook-Group-1877F2?style=flat&logo=facebook&logoColor=white" alt="Gruppo Facebook" /></a>
<a href="https://www.reddit.com/r/zeroclawlabs/"><img src="https://img.shields.io/badge/Reddit-r%2Fzeroclawlabs-FF4500?style=flat&logo=reddit&logoColor=white" alt="Reddit: r/zeroclawlabs" /></a>
</p>
<p align="center">
Sviluppato da studenti e membri delle comunità Harvard, MIT e Sundai.Club.
</p>
<p align="center">
🌐 <strong>Lingue:</strong> <a href="../../../README.md">English</a> · <a href="../zh-CN/README.md">简体中文</a> · <a href="../es/README.md">Español</a> · <a href="../pt/README.md">Português</a> · <a href="README.md">Italiano</a> · <a href="../ja/README.md">日本語</a> · <a href="../ru/README.md">Русский</a> · <a href="../fr/README.md">Français</a> · <a href="../vi/README.md">Tiếng Việt</a> · <a href="../el/README.md">Ελληνικά</a>
</p>
<p align="center">
<strong>Framework veloce, piccolo e completamente autonomo</strong><br />
Distribuisci ovunque. Scambia qualsiasi cosa.
</p>
<p align="center">
ZeroClaw è il <strong>framework runtime</strong> per workflow agentici — infrastruttura che astrae modelli, strumenti, memoria ed esecuzione così gli agenti possono essere costruiti una volta ed eseguiti ovunque.
</p>
<p align="center"><code>Architettura basata su trait · runtime sicuro per impostazione predefinita · provider/canale/strumento scambiabile · tutto collegabile</code></p>
### ✨ Caratteristiche
- 🏎️ **Runtime Leggero per Impostazione Predefinita:** I comuni workflow CLI e di stato vengono eseguiti in un envelope di memoria di pochi megabyte nelle build di release.
- 💰 **Distribuzione Economica:** Progettato per schede economiche e piccole istanze cloud senza dipendenze di runtime pesanti.
- ⚡ **Avvii a Freddo Rapidi:** Il runtime Rust a singolo binario mantiene l'avvio di comandi e daemon quasi istantaneo per le operazioni quotidiane.
- 🌍 **Architettura Portatile:** Un workflow binary-first attraverso ARM, x86 e RISC-V con provider/canali/strumenti scambiabili.
- 🔍 **Fase di Ricerca:** Raccolta proattiva di informazioni attraverso gli strumenti prima della generazione della risposta — riduce le allucinazioni verificando prima i fatti.
### Perché i team scelgono ZeroClaw
- **Leggero per impostazione predefinita:** binario Rust piccolo, avvio rapido, footprint di memoria basso.
- **Sicuro per design:** pairing, sandboxing rigoroso, liste di permessi esplicite, scope del workspace.
- **Completamente scambiabile:** i sistemi core sono trait (provider, canali, strumenti, memoria, tunnel).
- **Nessun lock-in:** supporto provider compatibile con OpenAI + endpoint personalizzati collegabili.
## Avvio Rapido
### Opzione 1: Homebrew (macOS/Linuxbrew)
```bash
brew install zeroclaw
```
### Opzione 2: Clona + Bootstrap
```bash
git clone https://github.com/zeroclaw-labs/zeroclaw.git
cd zeroclaw
./bootstrap.sh
```
> **Nota:** Le build da sorgente richiedono ~2GB RAM e ~6GB disco. Per sistemi con risorse limitate, usa `./bootstrap.sh --prefer-prebuilt` per scaricare un binario precompilato.
### Opzione 3: Cargo Install
```bash
cargo install zeroclaw
```
### Prima Esecuzione
```bash
# Avvia il gateway (serve l'API/UI della Dashboard Web)
zeroclaw gateway
# Apri l'URL del dashboard mostrata nei log di avvio
# (default: http://127.0.0.1:3000/)
# O chatta direttamente
zeroclaw chat "Ciao!"
```
Per opzioni di configurazione dettagliate, consulta [docs/one-click-bootstrap.md](../../../docs/one-click-bootstrap.md).
---
## ⚠️ Repository Ufficiale e Avviso di Impersonazione
**Questo è l'unico repository ufficiale di ZeroClaw:**
> https://github.com/zeroclaw-labs/zeroclaw
Qualsiasi altro repository, organizzazione, dominio o pacchetto che affermi di essere "ZeroClaw" o implichi affiliazione con ZeroClaw Labs **non è autorizzato e non è affiliato con questo progetto**.
Se incontri impersonazione o uso improprio del marchio, per favore [apri una issue](https://github.com/zeroclaw-labs/zeroclaw/issues).
---
## Licenza
ZeroClaw è con doppia licenza per massima apertura e protezione dei contributori:
| Licenza | Caso d'uso |
|---|---|
| [MIT](../../../LICENSE-MIT) | Open-source, ricerca, accademico, uso personale |
| [Apache 2.0](../../../LICENSE-APACHE) | Protezione brevetti, istituzionale, distribuzione commerciale |
Puoi scegliere qualsiasi licenza. **I contributori concedono automaticamente diritti sotto entrambe** — consulta [CLA.md](../../../CLA.md) per l'accordo completo dei contributori.
## Contribuire
Consulta [CONTRIBUTING.md](../../../CONTRIBUTING.md) e [CLA.md](../../../CLA.md). Implementa un trait, invia un PR.
---
**ZeroClaw** — Zero overhead. Zero compromesso. Distribuisci ovunque. Scambia qualsiasi cosa. 🦀
---
## Star History
<p align="center">
<a href="https://www.star-history.com/#zeroclaw-labs/zeroclaw&type=date&legend=top-left">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=zeroclaw-labs/zeroclaw&type=date&theme=dark&legend=top-left" />
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=zeroclaw-labs/zeroclaw&type=date&legend=top-left" />
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=zeroclaw-labs/zeroclaw&type=date&legend=top-left" />
</picture>
</a>
</p>

View File

@ -20,3 +20,4 @@
## 最新更新
- `zeroclaw gateway``--new-pairing` をサポートし、既存のペアリングトークンを消去して新しいペアリングコードを生成できます。
- OpenClaw 移行関連の英語原文が更新されました: `zeroclaw onboard --migrate-openclaw`、`zeroclaw migrate openclaw`、およびエージェントツール `openclaw_migration`(ローカライズ追従は継続中)。

View File

@ -16,3 +16,12 @@
- 設定キー名は英語のまま保持します。
- 実行時挙動の定義は英語版原文を優先します。
## 更新ート2026-03-03
- `[agent]``allowed_tools` / `denied_tools` が追加されました。
- `allowed_tools` が空でない場合、メインエージェントには許可リストのツールのみ公開されます。
- `denied_tools` は許可リスト適用後に追加でツールを除外します。
- `allowed_tools` の未一致エントリは起動失敗にせず、debug ログのみ出力されます。
- `allowed_tools``denied_tools` の組み合わせで実行可能ツールが 0 件になる場合は、明確な設定エラーで fail-fast します。
- 詳細な表と例は英語版 `config-reference.md``[agent]` セクションを参照してください。

View File

@ -16,3 +16,24 @@
- Provider ID と環境変数名は英語のまま保持します。
- 正式な仕様は英語版原文を優先します。
## 更新ノート
- 2026-03-01: StepFun provider 対応を追加(`stepfun`、alias: `step` / `step-ai` / `step_ai`)。
## StepFun クイックガイド
- Provider ID: `stepfun`
- Aliases: `step`, `step-ai`, `step_ai`
- Base API URL: `https://api.stepfun.com/v1`
- Endpoints: `POST /v1/chat/completions`, `GET /v1/models`
- 認証 env var: `STEP_API_KEY`fallback: `STEPFUN_API_KEY`
- 既定モデル: `step-3.5-flash`
クイック検証:
```bash
export STEP_API_KEY="your-stepfun-api-key"
zeroclaw models refresh --provider stepfun
zeroclaw agent --provider stepfun --model step-3.5-flash -m "ping"
```

141
docs/i18n/pt/README.md Normal file
View File

@ -0,0 +1,141 @@
<p align="center">
<img src="../../../zeroclaw.png" alt="ZeroClaw" width="200" />
</p>
<h1 align="center">ZeroClaw 🦀</h1>
<p align="center">
<strong>Sobrecarga zero. Compromisso zero. 100% Rust. 100% Agnóstico.</strong><br>
⚡️ <strong>Funciona em qualquer hardware com <5MB RAM: 99% menos memória que OpenClaw e 98% mais barato que um Mac mini!</strong>
</p>
<p align="center">
<a href="../../../LICENSE-APACHE"><img src="https://img.shields.io/badge/license-MIT%20OR%20Apache%202.0-blue.svg" alt="Licença: MIT OR Apache-2.0" /></a>
<a href="../../../NOTICE"><img src="https://img.shields.io/github/contributors/zeroclaw-labs/zeroclaw?color=green" alt="Contribuidores" /></a>
<a href="https://buymeacoffee.com/argenistherose"><img src="https://img.shields.io/badge/Buy%20Me%20a%20Coffee-Donate-yellow.svg?style=flat&logo=buy-me-a-coffee" alt="Buy Me a Coffee" /></a>
<a href="https://x.com/zeroclawlabs?s=21"><img src="https://img.shields.io/badge/X-%40zeroclawlabs-000000?style=flat&logo=x&logoColor=white" alt="X: @zeroclawlabs" /></a>
<a href="https://zeroclawlabs.cn/group.jpg"><img src="https://img.shields.io/badge/WeChat-Group-B7D7A8?logo=wechat&logoColor=white" alt="Grupo WeChat" /></a>
<a href="https://t.me/zeroclawlabs"><img src="https://img.shields.io/badge/Telegram-%40zeroclawlabs-26A5E4?style=flat&logo=telegram&logoColor=white" alt="Telegram: @zeroclawlabs" /></a>
<a href="https://www.facebook.com/groups/zeroclaw"><img src="https://img.shields.io/badge/Facebook-Group-1877F2?style=flat&logo=facebook&logoColor=white" alt="Grupo Facebook" /></a>
<a href="https://www.reddit.com/r/zeroclawlabs/"><img src="https://img.shields.io/badge/Reddit-r%2Fzeroclawlabs-FF4500?style=flat&logo=reddit&logoColor=white" alt="Reddit: r/zeroclawlabs" /></a>
</p>
<p align="center">
Desenvolvido por estudantes e membros das comunidades de Harvard, MIT e Sundai.Club.
</p>
<p align="center">
🌐 <strong>Idiomas:</strong> <a href="../../../README.md">English</a> · <a href="../zh-CN/README.md">简体中文</a> · <a href="../es/README.md">Español</a> · <a href="README.md">Português</a> · <a href="../it/README.md">Italiano</a> · <a href="../ja/README.md">日本語</a> · <a href="../ru/README.md">Русский</a> · <a href="../fr/README.md">Français</a> · <a href="../vi/README.md">Tiếng Việt</a> · <a href="../el/README.md">Ελληνικά</a>
</p>
<p align="center">
<strong>Framework rápido, pequeno e totalmente autônomo</strong><br />
Implante em qualquer lugar. Troque qualquer coisa.
</p>
<p align="center">
ZeroClaw é o <strong>framework de runtime</strong> para fluxos de trabalho agentes — infraestrutura que abstrai modelos, ferramentas, memória e execução para que agentes possam ser construídos uma vez e executados em qualquer lugar.
</p>
<p align="center"><code>Arquitetura baseada em traits · runtime seguro por padrão · provedor/canal/ferramenta trocável · tudo conectável</code></p>
### ✨ Características
- 🏎️ **Runtime Enxuto por Padrão:** Fluxos de trabalho comuns de CLI e status rodam em um envelope de memória de poucos megabytes em builds de release.
- 💰 **Implantação Econômica:** Projetado para placas de baixo custo e instâncias cloud pequenas sem dependências de runtime pesadas.
- ⚡ **Inícios a Frio Rápidos:** Runtime Rust de binário único mantém inicialização de comandos e daemon quase instantânea para operações diárias.
- 🌍 **Arquitetura Portátil:** Um fluxo de trabalho binary-first através de ARM, x86 e RISC-V com provedores/canais/ferramentas trocáveis.
- 🔍 **Fase de Pesquisa:** Coleta proativa de informações através de ferramentas antes da geração de resposta — reduz alucinações verificando fatos primeiro.
### Por que as equipes escolhem ZeroClaw
- **Enxuto por padrão:** binário Rust pequeno, inicialização rápida, pegada de memória baixa.
- **Seguro por design:** pareamento, sandboxing estrito, listas de permitidos explícitas, escopo de workspace.
- **Totalmente trocável:** sistemas principais são traits (provedores, canais, ferramentas, memória, túneis).
- **Sem lock-in:** suporte de provedor compatível com OpenAI + endpoints personalizados conectáveis.
## Início Rápido
### Opção 1: Homebrew (macOS/Linuxbrew)
```bash
brew install zeroclaw
```
### Opção 2: Clonar + Bootstrap
```bash
git clone https://github.com/zeroclaw-labs/zeroclaw.git
cd zeroclaw
./bootstrap.sh
```
> **Nota:** Builds a partir do fonte requerem ~2GB RAM e ~6GB disco. Para sistemas com recursos limitados, use `./bootstrap.sh --prefer-prebuilt` para baixar um binário pré-compilado.
### Opção 3: Cargo Install
```bash
cargo install zeroclaw
```
### Primeira Execução
```bash
# Iniciar o gateway (serve o API/UI do Dashboard Web)
zeroclaw gateway
# Abrir a URL do dashboard mostrada nos logs de inicialização
# (por padrão: http://127.0.0.1:3000/)
# Ou conversar diretamente
zeroclaw chat "Olá!"
```
Para opções de configuração detalhadas, consulte [docs/one-click-bootstrap.md](../../../docs/one-click-bootstrap.md).
---
## ⚠️ Repositório Oficial e Aviso de Representação
**Este é o único repositório oficial do ZeroClaw:**
> https://github.com/zeroclaw-labs/zeroclaw
Qualquer outro repositório, organização, domínio ou pacote que afirme ser "ZeroClaw" ou implique afiliação com ZeroClaw Labs **não está autorizado e não é afiliado com este projeto**.
Se você encontrar representação ou uso indevido de marca, por favor [abra uma issue](https://github.com/zeroclaw-labs/zeroclaw/issues).
---
## Licença
ZeroClaw tem licença dupla para máxima abertura e proteção de contribuidores:
| Licença | Caso de uso |
|---|---|
| [MIT](../../../LICENSE-MIT) | Open-source, pesquisa, acadêmico, uso pessoal |
| [Apache 2.0](../../../LICENSE-APACHE) | Proteção de patentes, institucional, implantação comercial |
Você pode escolher qualquer uma das licenças. **Os contribuidores concedem automaticamente direitos sob ambas** — consulte [CLA.md](../../../CLA.md) para o acordo completo de contribuidor.
## Contribuindo
Consulte [CONTRIBUTING.md](../../../CONTRIBUTING.md) e [CLA.md](../../../CLA.md). Implemente uma trait, envie um PR.
---
**ZeroClaw** — Sobrecarga zero. Compromisso zero. Implante em qualquer lugar. Troque qualquer coisa. 🦀
---
## Star History
<p align="center">
<a href="https://www.star-history.com/#zeroclaw-labs/zeroclaw&type=date&legend=top-left">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=zeroclaw-labs/zeroclaw&type=date&theme=dark&legend=top-left" />
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=zeroclaw-labs/zeroclaw&type=date&legend=top-left" />
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=zeroclaw-labs/zeroclaw&type=date&legend=top-left" />
</picture>
</a>
</p>

View File

@ -20,3 +20,4 @@
## Последнее обновление
- `zeroclaw gateway` поддерживает `--new-pairing`: флаг очищает сохранённые paired-токены и генерирует новый код сопряжения.
- В английский оригинал добавлены поверхности миграции OpenClaw: `zeroclaw onboard --migrate-openclaw`, `zeroclaw migrate openclaw` и агентный инструмент `openclaw_migration` (полная локализация этих пунктов в процессе).

View File

@ -16,3 +16,12 @@
- Названия config keys не переводятся.
- Точное runtime-поведение определяется английским оригиналом.
## Обновление (2026-03-03)
- В секции `[agent]` добавлены `allowed_tools` и `denied_tools`.
- Если `allowed_tools` не пуст, основному агенту показываются только инструменты из allowlist.
- `denied_tools` применяется после allowlist и дополнительно исключает инструменты.
- Неизвестные элементы `allowed_tools` пропускаются (с debug-логом) и не ломают запуск.
- Если одновременно заданы `allowed_tools` и `denied_tools`, и после фильтрации не остается исполняемых инструментов, запуск завершается fail-fast с явной ошибкой конфигурации.
- Полная таблица параметров и пример остаются в английском `config-reference.md` в разделе `[agent]`.

View File

@ -16,3 +16,24 @@
- Provider ID и имена env переменных не переводятся.
- Нормативное описание поведения — в английском оригинале.
## Обновления
- 2026-03-01: добавлена поддержка провайдера StepFun (`stepfun`, алиасы `step`, `step-ai`, `step_ai`).
## StepFun (Кратко)
- Provider ID: `stepfun`
- Алиасы: `step`, `step-ai`, `step_ai`
- Base API URL: `https://api.stepfun.com/v1`
- Эндпоинты: `POST /v1/chat/completions`, `GET /v1/models`
- Переменная авторизации: `STEP_API_KEY` (fallback: `STEPFUN_API_KEY`)
- Модель по умолчанию: `step-3.5-flash`
Быстрая проверка:
```bash
export STEP_API_KEY="your-stepfun-api-key"
zeroclaw models refresh --provider stepfun
zeroclaw agent --provider stepfun --model step-3.5-flash -m "ping"
```

View File

@ -66,7 +66,7 @@ sudo apt-get update
sudo apt-get install -y pkg-config libssl-dev
# Clone zeroclaw (hoặc scp project của bạn)
git clone https://github.com/theonlyhennygod/zeroclaw.git
git clone https://github.com/zeroclaw-labs/zeroclaw.git
cd zeroclaw
# Build (~1530 phút trên Uno Q)
@ -199,7 +199,7 @@ Giờ khi bạn nhắn tin cho Telegram bot *"Turn on the LED"* hoặc *"Set pin
| 2 | `ssh arduino@<IP>` |
| 3 | `curl -sSf https://sh.rustup.rs \| sh -s -- -y && source ~/.cargo/env` |
| 4 | `sudo apt-get install -y pkg-config libssl-dev` |
| 5 | `git clone https://github.com/theonlyhennygod/zeroclaw.git && cd zeroclaw` |
| 5 | `git clone https://github.com/zeroclaw-labs/zeroclaw.git && cd zeroclaw` |
| 6 | `cargo build --release --no-default-features` |
| 7 | `zeroclaw onboard --api-key KEY --provider openrouter` |
| 8 | Chỉnh sửa `~/.zeroclaw/config.toml` (thêm Telegram bot_token) |

View File

@ -105,7 +105,7 @@ Các kiểm tra chặn merge nên giữ nhỏ và mang tính quyết định. C
8. Cảnh báo drift tính tái lập build: kiểm tra artifact của `.github/workflows/ci-reproducible-build.yml`.
9. Lỗi provenance/ký số: kiểm tra log và bundle artifact của `.github/workflows/ci-supply-chain-provenance.yml`.
10. Sự cố lập kế hoạch/thực thi rollback: kiểm tra summary + artifact `ci-rollback-plan` của `.github/workflows/ci-rollback.yml`.
11. PR intake thất bại: kiểm tra comment sticky `.github/workflows/pr-intake-checks.yml` và run log.
11. PR intake thất bại: kiểm tra comment sticky `.github/workflows/pr-intake-checks.yml` và run log. Nếu policy intake vừa thay đổi, hãy kích hoạt sự kiện `pull_request_target` mới (ví dụ close/reopen PR) vì `Re-run jobs` có thể dùng lại snapshot workflow cũ.
12. Lỗi parity chính sách nhãn: kiểm tra `.github/workflows/pr-label-policy-check.yml`.
13. Lỗi tài liệu trong CI: kiểm tra log job `docs-quality` trong `.github/workflows/ci-run.yml`.
14. Lỗi strict delta lint trong CI: kiểm tra log job `lint-strict-delta` và so sánh với phạm vi diff `BASE_SHA`.
@ -115,7 +115,8 @@ Các kiểm tra chặn merge nên giữ nhỏ và mang tính quyết định. C
- Giữ các kiểm tra chặn merge mang tính quyết định và tái tạo được (`--locked` khi áp dụng được).
- Đảm bảo tương thích merge queue bằng cách hỗ trợ `merge_group` cho các workflow bắt buộc (`ci-run`, `sec-audit`, `sec-codeql`).
- Bắt buộc PR liên kết với Linear issue key (`RMN-*`/`CDV-*`/`COM-*`) qua PR intake checks.
- Khuyến nghị PR liên kết với Linear issue key (`RMN-*`/`CDV-*`/`COM-*`) khi có để truy vết (PR intake checks chỉ cảnh báo, không chặn merge).
- Với backfill PR intake, ưu tiên kích hoạt sự kiện PR mới thay vì rerun run cũ để đảm bảo check đánh giá theo snapshot workflow/script mới nhất.
- Bắt buộc entry `advisories.ignore` trong `deny.toml` dùng object có `id` + `reason` (được kiểm tra bởi `deny_policy_guard.py`).
- Giữ metadata governance cho deny ignore trong `.github/security/deny-ignore-governance.json` luôn cập nhật (owner/reason/expiry/ticket được kiểm tra bởi `deny_policy_guard.py`).
- Giữ metadata quản trị allowlist gitleaks trong `.github/security/gitleaks-allowlist-governance.json` luôn cập nhật (owner/reason/expiry/ticket được kiểm tra bởi `secrets_governance_guard.py`).

View File

@ -36,6 +36,8 @@ Xác minh lần cuối: **2026-02-28**.
- `zeroclaw onboard --channels-only`
- `zeroclaw onboard --api-key <KEY> --provider <ID> --memory <sqlite|lucid|markdown|none>`
- `zeroclaw onboard --api-key <KEY> --provider <ID> --model <MODEL_ID> --memory <sqlite|lucid|markdown|none>`
- `zeroclaw onboard --migrate-openclaw`
- `zeroclaw onboard --migrate-openclaw --openclaw-source <PATH> --openclaw-config <PATH>`
### `agent`
@ -77,7 +79,7 @@ Xác minh lần cuối: **2026-02-28**.
- `zeroclaw models refresh --provider <ID>`
- `zeroclaw models refresh --force`
`models refresh` hiện hỗ trợ làm mới danh mục trực tiếp cho các provider: `openrouter`, `openai`, `anthropic`, `groq`, `mistral`, `deepseek`, `xai`, `together-ai`, `gemini`, `ollama`, `llamacpp`, `sglang`, `vllm`, `astrai`, `venice`, `fireworks`, `cohere`, `moonshot`, `glm`, `zai`, `qwen`, `volcengine` (alias `doubao`/`ark`), `siliconflow``nvidia`.
`models refresh` hiện hỗ trợ làm mới danh mục trực tiếp cho các provider: `openrouter`, `openai`, `anthropic`, `groq`, `mistral`, `deepseek`, `xai`, `together-ai`, `gemini`, `ollama`, `llamacpp`, `sglang`, `vllm`, `astrai`, `venice`, `fireworks`, `cohere`, `moonshot`, `stepfun`, `glm`, `zai`, `qwen`, `volcengine` (alias `doubao`/`ark`), `siliconflow``nvidia`.
### `channel`
@ -120,7 +122,9 @@ Skill manifest (`SKILL.toml`) hỗ trợ `prompts` và `[[tools]]`; cả hai đ
### `migrate`
- `zeroclaw migrate openclaw [--source <path>] [--dry-run]`
- `zeroclaw migrate openclaw [--source <path>] [--source-config <path>] [--dry-run]`
Gợi ý: trong hội thoại agent, bề mặt tool `openclaw_migration` cho phép preview hoặc áp dụng migration bằng tool-call có kiểm soát quyền.
### `config`

View File

@ -81,6 +81,8 @@ Lưu ý cho người dùng container:
| `max_history_messages` | `50` | Số tin nhắn lịch sử tối đa giữ lại mỗi phiên |
| `parallel_tools` | `false` | Bật thực thi tool song song trong một lượt |
| `tool_dispatcher` | `auto` | Chiến lược dispatch tool |
| `allowed_tools` | `[]` | Allowlist tool cho agent chính. Khi không rỗng, chỉ các tool liệt kê mới được đưa vào context |
| `denied_tools` | `[]` | Denylist tool cho agent chính, áp dụng sau `allowed_tools` |
Lưu ý:
@ -88,6 +90,25 @@ Lưu ý:
- Nếu tin nhắn kênh vượt giá trị này, runtime trả về: `Agent exceeded maximum tool iterations (<value>)`.
- Trong vòng lặp tool của CLI, gateway và channel, các lời gọi tool độc lập được thực thi đồng thời mặc định khi không cần phê duyệt; thứ tự kết quả giữ ổn định.
- `parallel_tools` áp dụng cho API `Agent::turn()`. Không ảnh hưởng đến vòng lặp runtime của CLI, gateway hay channel.
- `allowed_tools` / `denied_tools` được áp dụng lúc khởi động trước khi dựng prompt. Tool bị loại sẽ không xuất hiện trong system prompt hoặc tool specs.
- Mục không khớp trong `allowed_tools` được bỏ qua (không làm lỗi khởi động) và ghi log mức debug.
- Nếu đồng thời đặt `allowed_tools``denied_tools` rồi denylist loại toàn bộ tool đã allow, tiến trình sẽ fail-fast với lỗi cấu hình rõ ràng.
Ví dụ:
```toml
[agent]
allowed_tools = [
"delegate",
"subagent_spawn",
"subagent_list",
"subagent_manage",
"memory_recall",
"memory_store",
"task_plan",
]
denied_tools = ["shell", "file_write", "browser_open"]
```
## `[agents.<name>]`
@ -530,6 +551,7 @@ Lưu ý:
- Allowlist kênh mặc định từ chối tất cả (`[]` nghĩa là từ chối tất cả)
- Gateway mặc định yêu cầu ghép nối
- Mặc định chặn public bind
- `security.canary_tokens = true` bật canary token theo từng lượt để phát hiện rò rỉ ngữ cảnh hệ thống
## Lệnh kiểm tra

View File

@ -2,7 +2,7 @@
Tài liệu này liệt kê các provider ID, alias và biến môi trường chứa thông tin xác thực.
Cập nhật lần cuối: **2026-02-28**.
Cập nhật lần cuối: **2026-03-01**.
## Cách liệt kê các Provider
@ -33,6 +33,7 @@ Với chuỗi provider dự phòng (`reliability.fallback_providers`), mỗi pro
| `vercel` | `vercel-ai` | Không | `VERCEL_API_KEY` |
| `cloudflare` | `cloudflare-ai` | Không | `CLOUDFLARE_API_KEY` |
| `moonshot` | `kimi` | Không | `MOONSHOT_API_KEY` |
| `stepfun` | `step`, `step-ai`, `step_ai` | Không | `STEP_API_KEY`, `STEPFUN_API_KEY` |
| `kimi-code` | `kimi_coding`, `kimi_for_coding` | Không | `KIMI_CODE_API_KEY`, `MOONSHOT_API_KEY` |
| `synthetic` | — | Không | `SYNTHETIC_API_KEY` |
| `opencode` | `opencode-zen` | Không | `OPENCODE_API_KEY` |
@ -87,6 +88,29 @@ zeroclaw models refresh --provider volcengine
zeroclaw agent --provider volcengine --model doubao-1-5-pro-32k-250115 -m "ping"
```
### Ghi chú về StepFun
- Provider ID: `stepfun` (alias: `step`, `step-ai`, `step_ai`)
- Base API URL: `https://api.stepfun.com/v1`
- Chat endpoint: `/chat/completions`
- Model discovery endpoint: `/models`
- Xác thực: `STEP_API_KEY` (fallback: `STEPFUN_API_KEY`)
- Model mặc định: `step-3.5-flash`
Ví dụ thiết lập nhanh:
```bash
export STEP_API_KEY="your-stepfun-api-key"
zeroclaw onboard --provider stepfun --api-key "$STEP_API_KEY" --model step-3.5-flash --force
```
Kiểm tra nhanh:
```bash
zeroclaw models refresh --provider stepfun
zeroclaw agent --provider stepfun --model step-3.5-flash -m "ping"
```
### Ghi chú về SiliconFlow
- Provider ID: `siliconflow` (alias: `silicon-cloud`, `siliconcloud`)

View File

@ -20,3 +20,4 @@
## 最近更新
- `zeroclaw gateway` 新增 `--new-pairing` 参数,可清空已配对 token 并在网关启动时生成新的配对码。
- OpenClaw 迁移相关命令已加入英文原文:`zeroclaw onboard --migrate-openclaw`、`zeroclaw migrate openclaw`,并新增 agent 工具 `openclaw_migration`(本地化条目待补全,先以英文原文为准)。

View File

@ -16,3 +16,12 @@
- 配置键保持英文,避免本地化改写键名。
- 生产行为以英文原文定义为准。
## 更新说明2026-03-03
- `[agent]` 新增 `allowed_tools``denied_tools`
- `allowed_tools` 非空时,只向主代理暴露白名单工具。
- `denied_tools` 在白名单过滤后继续移除工具。
- 未匹配的 `allowed_tools` 项会被跳过(调试日志提示),不会导致启动失败。
- 若同时配置 `allowed_tools``denied_tools` 且最终将可执行工具全部移除,启动会快速失败并给出明确错误。
- 详细字段表与示例见英文原文 `config-reference.md``[agent]` 小节。

View File

@ -16,3 +16,25 @@
- Provider ID 与环境变量名称保持英文。
- 规范与行为说明以英文原文为准。
## 更新记录
- 2026-03-01新增 StepFun provider 对齐信息(`stepfun` / `step` / `step-ai` / `step_ai`)。
## StepFun 快速说明
- Provider ID`stepfun`
- 别名:`step`、`step-ai`、`step_ai`
- Base API URL`https://api.stepfun.com/v1`
- 模型列表端点:`GET /v1/models`
- 对话端点:`POST /v1/chat/completions`
- 鉴权变量:`STEP_API_KEY`(回退:`STEPFUN_API_KEY`
- 默认模型:`step-3.5-flash`
快速验证:
```bash
export STEP_API_KEY="your-stepfun-api-key"
zeroclaw models refresh --provider stepfun
zeroclaw agent --provider stepfun --model step-3.5-flash -m "ping"
```

View File

@ -2,7 +2,31 @@
This guide walks you through migrating an OpenClaw deployment to ZeroClaw. It covers configuration conversion, endpoint changes, and the architectural differences you need to know.
## Quick Start
## Quick Start (Built-in Merge Migration)
ZeroClaw now includes a built-in OpenClaw migration flow:
```bash
# Preview migration report (no writes)
zeroclaw migrate openclaw --dry-run
# Apply merge migration (memory + config + agents)
zeroclaw migrate openclaw
# Optional: run migration during onboarding
zeroclaw onboard --migrate-openclaw
```
Localization status: this guide currently ships in English only. Localized follow-through for `zh-CN`, `ja`, `ru`, `fr`, `vi`, and `el` is deferred; translators should carry over the exact CLI forms `zeroclaw migrate openclaw` and `zeroclaw onboard --migrate-openclaw` first.
Default migration semantics are **merge-first**:
- Existing ZeroClaw values are preserved (no blind overwrite).
- Missing provider/model/channel/agent fields are filled from OpenClaw.
- List-like fields (for example agent tools / allowlists) are union-merged with de-duplication.
- Memory import skips duplicate content to reduce noise while keeping existing data.
## Legacy Conversion Script (Optional)
```bash
# 1. Convert your OpenClaw config

View File

@ -60,9 +60,29 @@ If verification fails, the gateway returns `401 Unauthorized`.
## 5. Message routing behavior
- ZeroClaw ignores bot-originated webhook events (`actorType = bots`).
- ZeroClaw accepts both payload variants:
- legacy Talk webhook payloads (`type = "message"`)
- Activity Streams 2.0 payloads (`type = "Create"` + `object.type = "Note"`)
- ZeroClaw ignores bot-originated webhook events (`actorType = bots` or `actor.type = "Application"`).
- ZeroClaw ignores non-message/system events.
- Reply routing uses the Talk room token from the webhook payload.
- Reply routing uses the Talk room token from `object.token` (legacy) or `target.id` (AS2).
- For actor allowlists, both full (`users/alice`) and short (`alice`) IDs are accepted.
Example Activity Streams 2.0 webhook payload:
```json
{
"type": "Create",
"actor": { "type": "Person", "id": "users/test", "name": "test" },
"object": {
"type": "Note",
"id": "177",
"content": "{\"message\":\"hello\",\"parameters\":[]}",
"mediaType": "text/markdown"
},
"target": { "type": "Collection", "id": "yyrubgfp", "name": "TESTCHAT" }
}
```
## 6. Quick validation checklist

View File

@ -2,7 +2,7 @@
This page defines the fastest supported path to install and initialize ZeroClaw.
Last verified: **February 20, 2026**.
Last verified: **March 4, 2026**.
## Option 0: Homebrew (macOS/Linuxbrew)
@ -22,6 +22,7 @@ What it does by default:
1. `cargo build --release --locked`
2. `cargo install --path . --force --locked`
3. In interactive no-flag sessions, launches TUI onboarding (`zeroclaw onboard --interactive-ui`)
### Resource preflight and pre-built flow
@ -50,7 +51,8 @@ To bypass pre-built flow and force source compilation:
## Dual-mode bootstrap
Default behavior is **app-only** (build/install ZeroClaw) and expects existing Rust toolchain.
Default behavior builds/install ZeroClaw and, for interactive no-flag runs, starts TUI onboarding.
It still expects an existing Rust toolchain unless you enable bootstrap flags below.
For fresh machines, enable environment bootstrap explicitly:
@ -69,11 +71,19 @@ Notes:
## Option B: Remote one-liner
```bash
curl -fsSL https://raw.githubusercontent.com/zeroclaw-labs/zeroclaw/main/scripts/bootstrap.sh | bash
curl -fsSL https://zeroclawlabs.ai/install.sh | bash
```
Equivalent GitHub-hosted installer entrypoint:
```bash
curl -fsSL https://raw.githubusercontent.com/zeroclaw-labs/zeroclaw/main/install.sh | bash
```
For high-security environments, prefer Option A so you can review the script before execution.
No-arg interactive runs default to full-screen TUI onboarding.
Legacy compatibility:
```bash
@ -124,6 +134,8 @@ ZEROCLAW_API_KEY="sk-..." ZEROCLAW_PROVIDER="openrouter" ./bootstrap.sh --onboar
./bootstrap.sh --interactive-onboard
```
This launches the full-screen TUI onboarding flow (`zeroclaw onboard --interactive-ui`).
## Useful flags
- `--install-system-deps`

View File

@ -0,0 +1,108 @@
# CI Runner Incident Report: main branch red on 2026-03-02
This report is for CI runner maintainers to debug runner health regressions first, before restoring self-hosted execution for critical workflows.
## Scope
- Repo: `zeroclaw-labs/zeroclaw`
- Date window: 2026-03-02 (UTC)
- Impacted checks:
- `CI Supply Chain Provenance / Build + Provenance Bundle (push)`
- `Test E2E / Integration / E2E Tests (push)`
## Executive Summary
`main` became red due to runner-environment failures in self-hosted pools.
Observed failure classes:
1. Missing C compiler linker (`cc`) causing Rust build-script compile failures.
2. Disk exhaustion (`No space left on device`) on at least one self-hosted E2E run.
These are host-level failures and were reproduced across unrelated merge commits.
## Evidence
| Time (UTC) | Workflow run | Commit | Runner | Failure signature |
|---|---|---|---|---|
| 2026-03-02T02:04:42Z | https://github.com/zeroclaw-labs/zeroclaw/actions/runs/22558446611 | `4b16ac92197d98bd64a43ae750d473b9f1c6d66d` | `runner-a` (`self-hosted-pool`) | `error: linker 'cc' not found` + `No such file or directory (os error 2)` |
| 2026-03-02T02:04:42Z | https://github.com/zeroclaw-labs/zeroclaw/actions/runs/22558446636 | `4b16ac92197d98bd64a43ae750d473b9f1c6d66d` | `runner-b` (`self-hosted-pool`) | `error: linker 'cc' not found` + `No such file or directory (os error 2)` |
| 2026-03-02T01:54:26Z | https://github.com/zeroclaw-labs/zeroclaw/actions/runs/22558247107 | `b8e5707d180004fe00fa12bfacd1bcf29f195457` | `runner-c` (`self-hosted-pool`) | `error: linker 'cc' not found` + `No such file or directory (os error 2)` |
| 2026-03-02T01:25:15Z | https://github.com/zeroclaw-labs/zeroclaw/actions/runs/22557668884 | `64a2a271c74fc84276e98231196b749f29276d17` | `runner-d` (`self-hosted-pool`) | `error: linker 'cc' not found` + `No such file or directory (os error 2)` |
| 2026-03-02T01:25:15Z | https://github.com/zeroclaw-labs/zeroclaw/actions/runs/22557668895 | `64a2a271c74fc84276e98231196b749f29276d17` | `runner-e` (`self-hosted-pool`) | `No space left on device` |
## Why this is runner infra
- Same `cc` failure appears in multiple independent merges.
- Failure happens within ~11-15 seconds during bootstrap/compile stage.
- Similar test lane succeeded in nearby window on a different runner host, indicating host drift rather than deterministic code break.
## Debug Procedure (Runner Maintainers)
Run on each affected host and attach outputs to incident ticket.
```bash
# identity
hostname
uname -a
# required build toolchain
command -v cc || true
command -v gcc || true
command -v clang || true
command -v rustc || true
command -v cargo || true
ls -l /usr/bin/cc || true
# versions
cc --version || true
gcc --version | head -n1 || true
clang --version | head -n1 || true
rustc --version || true
cargo --version || true
# disk and inode pressure
df -h /
df -h /opt/actions-runners || true
df -Pi /
df -Pi /opt/actions-runners || true
# top disk consumers
du -h /opt/actions-runners --max-depth=2 2>/dev/null | sort -h | tail -n 40
# runner service logs (service name may vary)
sudo journalctl -u actions.runner\* --since "2026-03-02 00:00:00" -n 300 --no-pager || true
```
If `cc` is missing:
```bash
sudo apt-get update
sudo apt-get install -y build-essential pkg-config clang
command -v cc || sudo ln -sf /usr/bin/gcc /usr/bin/cc
cc --version
```
If disk is low / inode pressure is high:
```bash
sudo du -h /opt/actions-runners --max-depth=3 | sort -h | tail -n 60
# clean stale _work/_temp/_diag artifacts per runner ops policy
```
## Mitigation Applied in This PR
1. Immediate unblock on `main`:
- `test-e2e.yml` moved to `ubuntu-22.04`.
- `ci-supply-chain-provenance.yml` moved to `ubuntu-22.04`.
2. Preflight hardening:
- added explicit checks for `cc` and free disk (>=10 GiB) in those jobs.
3. Root-cause visibility:
- `test-self-hosted.yml` now includes compiler + disk/inode checks and daily schedule.
## Exit Criteria to move lanes back to self-hosted
1. Self-hosted health workflow passes on representative nodes.
2. 10 consecutive critical runs pass on self-hosted without `cc` or ENOSPC failures.
3. Runner image baseline explicitly includes compiler/runtime prerequisites and cleanup policy.
4. Health checks remain stable for 24h after rollback from hosted fallback.

View File

@ -7,9 +7,14 @@ This document maps merge-critical workflows to expected check names.
| Required check name | Source workflow | Scope |
| --- | --- | --- |
| `CI Required Gate` | `.github/workflows/ci-run.yml` | core Rust/doc merge gate |
| `Security Audit` | `.github/workflows/sec-audit.yml` | dependencies, secrets, governance |
| `Feature Matrix Summary` | `.github/workflows/feature-matrix.yml` | feature-combination compile matrix |
| `Workflow Sanity` | `.github/workflows/workflow-sanity.yml` | workflow syntax and lint |
| `Security Required Gate` | `.github/workflows/sec-audit.yml` | aggregated security merge gate |
Supplemental monitors (non-blocking unless added to branch protection contexts):
- `CI Change Audit` (`.github/workflows/ci-change-audit.yml`)
- `CodeQL Analysis` (`.github/workflows/sec-codeql.yml`)
- `Workflow Sanity` (`.github/workflows/workflow-sanity.yml`)
- `Feature Matrix Summary` (`.github/workflows/feature-matrix.yml`)
Feature matrix lane check names (informational, non-required):
@ -28,12 +33,14 @@ Feature matrix lane check names (informational, non-required):
## Verification Procedure
1. Resolve latest workflow run IDs:
1. Check active branch protection required contexts:
- `gh api repos/zeroclaw-labs/zeroclaw/branches/main/protection --jq '.required_status_checks.contexts[]'`
2. Resolve latest workflow run IDs:
- `gh run list --repo zeroclaw-labs/zeroclaw --workflow feature-matrix.yml --limit 1`
- `gh run list --repo zeroclaw-labs/zeroclaw --workflow ci-run.yml --limit 1`
2. Enumerate check/job names and compare to this mapping:
3. Enumerate check/job names and compare to this mapping:
- `gh run view <run_id> --repo zeroclaw-labs/zeroclaw --json jobs --jq '.jobs[].name'`
3. If any merge-critical check name changed, update this file before changing branch protection policy.
4. If any merge-critical check name changed, update this file before changing branch protection policy.
## Notes

View File

@ -83,6 +83,20 @@ Safety behavior:
4. Drain runners, then apply cleanup.
5. Re-run health report and confirm queue/availability recovery.
## 3.1) Build Smoke Exit `143` Triage
When `CI Run / Build (Smoke)` fails with `Process completed with exit code 143`:
1. Treat it as external termination (SIGTERM), not a compile error.
2. Confirm the build step ended with `Terminated` and no Rust compiler diagnostic was emitted.
3. Check current pool pressure (`runner_health_report.py`) before retrying.
4. Re-run once after pressure drops; persistent `143` should be handled as runner-capacity remediation.
Important:
- `error: cannot install while Rust is installed` from rustup bootstrap can appear in setup logs on pre-provisioned runners.
- That message is not itself a terminal failure when subsequent `rustup toolchain install` and `rustup default` succeed.
## 4) Queue Hygiene (Dry-Run First)
Dry-run example:

View File

@ -0,0 +1,178 @@
# WASM Plugin Runtime Design (Capability-Segmented, WASI Preview 2)
## Context
ZeroClaw currently uses in-process trait/factory extension points for providers, tools, channels, memory, runtime adapters, observers, peripherals, and hooks. Hook interfaces exist, but several lifecycle events are either missing or not wired in runtime paths.
## Objective
Design and implement a production-safe system WASM plugin runtime that supports:
- hook plugins
- tool plugins
- provider plugins
- `BeforeCompaction` / `AfterCompaction` hook points
- `ToolResultPersist` modifying hook
- `ObserverBridge` (legacy observer -> hook adapter)
- `fire_gateway_stop` runtime wiring
- built-in `session_memory` and `boot_script` hooks
- hot-reload without service restart
## Chosen Direction
Capability-segmented plugin API on WASI Preview 2 + WIT.
Why:
- cleaner authoring surface than a monolithic plugin ABI
- stronger permission boundaries per capability
- easier long-term compatibility/versioning
- lower blast radius for failures and upgrades
## Architecture
### 1. Plugin Subsystem
Add `src/plugins/` as first-class subsystem:
- `src/plugins/mod.rs`
- `src/plugins/traits.rs`
- `src/plugins/manifest.rs`
- `src/plugins/runtime.rs`
- `src/plugins/registry.rs`
- `src/plugins/hot_reload.rs`
- `src/plugins/bridge/observer.rs`
### 2. WIT Contracts
Define separate contracts under `wit/zeroclaw/`:
- `hooks/v1`
- `tools/v1`
- `providers/v1`
Each contract has independent semver policy and compatibility checks.
### 3. Capability Model
Manifest-declared capabilities are deny-by-default.
Host grants capability-specific rights through config policy.
Examples:
- `hooks`
- `tools.execute`
- `providers.chat`
- optional I/O scopes (network/fs/secrets) via explicit allowlists
### 4. Runtime Lifecycle
1. Discover plugin manifests in configured directories.
2. Validate metadata (ABI version, checksum/signature policy, capabilities).
3. Instantiate plugin runtime components in immutable snapshot.
4. Register plugin-provided hook handlers, tools, and providers.
5. Atomically publish snapshot.
### 5. Dispatch Model
#### Hooks
- Void hooks: bounded parallel fanout + timeout.
- Modifying hooks: deterministic ordered pipeline (priority desc, stable plugin-id tie-breaker).
#### Tools
- Merge native and plugin tool specs.
- Route tool calls by ownership.
- Keep host-side security policy enforcement before plugin execution.
- Apply `ToolResultPersist` modifying hook before final persistence and feedback.
#### Providers
- Extend provider factory lookup to include plugin provider registry.
- Plugin providers participate in existing resilience and routing wrappers.
### 6. New Hook Points
Add and wire:
- `BeforeCompaction`
- `AfterCompaction`
- `ToolResultPersist`
- `fire_gateway_stop` call site on graceful gateway shutdown
### 7. Built-in Hooks
Provide built-ins loaded through same hook registry:
- `session_memory`
- `boot_script`
This keeps runtime behavior consistent between native and plugin hooks.
### 8. ObserverBridge
Add adapter that maps observer events into hook events, preserving legacy observer flows while enabling hook-based plugin processing.
### 9. Hot Reload
- Watch plugin files/manifests.
- Rebuild and validate candidate snapshot fully.
- Atomic swap on success.
- Keep old snapshot if reload fails.
- In-flight invocations continue on the snapshot they started with.
## Safety and Reliability
- Per-plugin memory/CPU/time/concurrency limits.
- Invocation timeout and trap isolation.
- Circuit breaker for repeatedly failing plugins.
- No plugin error may crash core runtime path.
- Sensitive payload redaction at host observability boundary.
## Compatibility Strategy
- Independent major-version compatibility checks per WIT package.
- Reject incompatible plugins at load time with clear diagnostics.
- Preserve native implementations as fallback path.
## Testing Strategy
### Unit
- manifest parsing and capability policy
- ABI compatibility checks
- hook ordering and cancellation semantics
- timeout/trap handling
### Integration
- plugin tool registration/execution
- plugin provider routing + fallback
- compaction hook sequence
- gateway stop hook firing
- hot-reload swap/rollback behavior
### Regression
- native-only mode unchanged when plugins disabled
- security policy enforcement remains intact
## Rollout Plan
1. Foundation: subsystem + config + ABI skeleton.
2. Hook integration + new hook points + built-ins.
3. Tool plugin routing.
4. Provider plugin routing.
5. Hot reload + ObserverBridge.
6. SDK + docs + example plugins.
## Non-goals (v1)
- dynamic cross-plugin dependency resolution
- distributed remote plugin registries
- automatic plugin marketplace installation
## Risks
- ABI churn if contracts are not tightly scoped.
- runtime overhead with poorly bounded plugin execution.
- operational complexity from hot-reload races.
## Mitigations
- capability segmentation + strict semver.
- hard limits and circuit breakers.
- immutable snapshot architecture for reload safety.

View File

@ -0,0 +1,415 @@
# WASM Plugin Runtime Implementation Plan
> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan
> task-by-task.
**Goal:** Build a WASI Preview 2 + WIT plugin runtime that supports hook/tool/provider plugins, new
hook points, ObserverBridge, and hot-reload with safe fallback.
**Architecture:** Add a capability-segmented plugin subsystem (`src/plugins/**`) and route
hook/tool/provider dispatch through immutable plugin snapshots. Keep native implementations intact
as fallback. Enforce deny-by-default capability policy with host-side limits and deterministic
modifying-hook ordering.
**Tech Stack:** Rust, Tokio, Wasmtime (component model), WASI Preview 2, WIT, serde, notify,
existing ZeroClaw traits/factories.
---
## Task 1: Add plugin config schema and defaults
**Files:**
- Modify: `src/config/schema.rs`
- Modify: `src/config/mod.rs`
- Test: `src/config/schema.rs` (inline tests)
- Step 1: Write the failing test
```rust
#[test]
fn plugins_config_defaults_safe() {
let cfg = HooksConfig::default();
// replace with PluginConfig once added
assert!(cfg.enabled);
}
```
- Step 2: Run test to verify it fails Run: `cargo test --locked config::schema -- --nocapture`
Expected: FAIL because `PluginsConfig` fields/assertions do not exist yet.
- Step 3: Write minimal implementation
- Add `PluginsConfig` with:
- `enabled: bool`
- `dirs: Vec<String>`
- `hot_reload: bool`
- `limits` (timeout/memory/concurrency)
- capability allow/deny lists
- Add defaults: disabled-by-default runtime loading, deny-by-default capabilities.
- Step 4: Run test to verify it passes Run: `cargo test --locked config::schema -- --nocapture`
Expected: PASS.
- Step 5: Commit
```bash
git add src/config/schema.rs src/config/mod.rs
git commit -m "feat(config): add plugin runtime config schema"
```
## Task 2: Scaffold plugin subsystem modules
**Files:**
- Create: `src/plugins/mod.rs`
- Create: `src/plugins/traits.rs`
- Create: `src/plugins/manifest.rs`
- Create: `src/plugins/runtime.rs`
- Create: `src/plugins/registry.rs`
- Create: `src/plugins/hot_reload.rs`
- Create: `src/plugins/bridge/mod.rs`
- Create: `src/plugins/bridge/observer.rs`
- Modify: `src/lib.rs`
- Test: inline tests in new modules
- Step 1: Write the failing test
```rust
#[test]
fn plugin_registry_empty_by_default() {
let reg = PluginRegistry::default();
assert!(reg.hooks().is_empty());
}
```
- Step 2: Run test to verify it fails Run: `cargo test --locked plugins:: -- --nocapture`
Expected: FAIL because modules/types do not exist.
- Step 3: Write minimal implementation
- Add module exports and basic structs/enums.
- Keep runtime no-op while preserving compile-time interfaces.
- Step 4: Run test to verify it passes Run: `cargo test --locked plugins:: -- --nocapture`
Expected: PASS.
- Step 5: Commit
```bash
git add src/plugins src/lib.rs
git commit -m "feat(plugins): scaffold plugin subsystem modules"
```
## Task 3: Add WIT capability contracts and ABI version checks
**Files:**
- Create: `wit/zeroclaw/hooks/v1/*.wit`
- Create: `wit/zeroclaw/tools/v1/*.wit`
- Create: `wit/zeroclaw/providers/v1/*.wit`
- Modify: `src/plugins/manifest.rs`
- Test: `src/plugins/manifest.rs` inline tests
- Step 1: Write the failing test
```rust
#[test]
fn manifest_rejects_incompatible_wit_major() {
let m = PluginManifest { wit_package: "zeroclaw:hooks@2.0.0".into(), ..Default::default() };
assert!(validate_manifest(&m).is_err());
}
```
- Step 2: Run test to verify it fails Run:
`cargo test --locked manifest_rejects_incompatible_wit_major -- --nocapture` Expected: FAIL before
validator exists.
- Step 3: Write minimal implementation
- Add WIT package declarations and version policy parser.
- Validate major compatibility per capability package.
- Step 4: Run test to verify it passes Run:
`cargo test --locked manifest_rejects_incompatible_wit_major -- --nocapture` Expected: PASS.
- Step 5: Commit
```bash
git add wit src/plugins/manifest.rs
git commit -m "feat(plugins): add wit contracts and abi compatibility checks"
```
## Task 4: Hook runtime integration and missing lifecycle wiring
**Files:**
- Modify: `src/hooks/traits.rs`
- Modify: `src/hooks/runner.rs`
- Modify: `src/gateway/mod.rs`
- Modify: `src/agent/loop_.rs`
- Modify: `src/channels/mod.rs`
- Test: inline tests in `src/hooks/runner.rs`, `src/agent/loop_.rs`
- Step 1: Write the failing test
```rust
#[tokio::test]
async fn fire_gateway_stop_is_called_on_shutdown_path() {
// assert hook observed stop signal
}
```
- Step 2: Run test to verify it fails Run:
`cargo test --locked fire_gateway_stop_is_called_on_shutdown_path -- --nocapture` Expected: FAIL due
to missing call site.
- Step 3: Write minimal implementation
- Add hook events: `BeforeCompaction`, `AfterCompaction`, `ToolResultPersist`.
- Wire `fire_gateway_stop` in graceful shutdown path.
- Trigger compaction hooks around compaction flows.
- Step 4: Run test to verify it passes Run: `cargo test --locked hooks::runner -- --nocapture`
Expected: PASS.
- Step 5: Commit
```bash
git add src/hooks src/gateway/mod.rs src/agent/loop_.rs src/channels/mod.rs
git commit -m "feat(hooks): add compaction/persist hooks and gateway stop lifecycle wiring"
```
## Task 5: Implement built-in `session_memory` and `boot_script` hooks
**Files:**
- Create: `src/hooks/builtin/session_memory.rs`
- Create: `src/hooks/builtin/boot_script.rs`
- Modify: `src/hooks/builtin/mod.rs`
- Modify: `src/config/schema.rs`
- Modify: `src/agent/loop_.rs`
- Modify: `src/channels/mod.rs`
- Test: inline tests in new builtins
- Step 1: Write the failing test
```rust
#[tokio::test]
async fn session_memory_hook_persists_and_recalls_expected_context() {}
```
- Step 2: Run test to verify it fails Run:
`cargo test --locked session_memory_hook -- --nocapture` Expected: FAIL before hook exists.
- Step 3: Write minimal implementation
- Register both built-ins through `HookRunner` initialization paths.
- `session_memory`: persist/retrieve session-scoped summaries.
- `boot_script`: mutate prompt/context at startup/session begin.
- Step 4: Run test to verify it passes Run: `cargo test --locked hooks::builtin -- --nocapture`
Expected: PASS.
- Step 5: Commit
```bash
git add src/hooks/builtin src/config/schema.rs src/agent/loop_.rs src/channels/mod.rs
git commit -m "feat(hooks): add session_memory and boot_script built-in hooks"
```
## Task 6: Add plugin tool registration and execution routing
**Files:**
- Modify: `src/tools/mod.rs`
- Modify: `src/tools/traits.rs`
- Modify: `src/agent/loop_.rs`
- Modify: `src/plugins/registry.rs`
- Modify: `src/plugins/runtime.rs`
- Test: `src/agent/loop_.rs` inline tests, `src/tools/mod.rs` tests
- Step 1: Write the failing test
```rust
#[tokio::test]
async fn plugin_tool_spec_is_visible_and_executable() {}
```
- Step 2: Run test to verify it fails Run:
`cargo test --locked plugin_tool_spec_is_visible_and_executable -- --nocapture` Expected: FAIL
before routing exists.
- Step 3: Write minimal implementation
- Merge plugin tool specs with native specs.
- Route execution by owner.
- Keep host security checks before plugin invocation.
- Apply `ToolResultPersist` before persistence/feedback.
- Step 4: Run test to verify it passes Run: `cargo test --locked agent::loop_ -- --nocapture`
Expected: PASS for plugin tool tests.
- Step 5: Commit
```bash
git add src/tools/mod.rs src/tools/traits.rs src/agent/loop_.rs src/plugins/registry.rs src/plugins/runtime.rs
git commit -m "feat(tools): support wasm plugin tool registration and execution"
```
## Task 7: Add plugin provider registration and factory integration
**Files:**
- Modify: `src/providers/mod.rs`
- Modify: `src/providers/traits.rs`
- Modify: `src/plugins/registry.rs`
- Modify: `src/plugins/runtime.rs`
- Test: `src/providers/mod.rs` inline tests
- Step 1: Write the failing test
```rust
#[test]
fn factory_can_create_plugin_provider() {}
```
- Step 2: Run test to verify it fails Run:
`cargo test --locked factory_can_create_plugin_provider -- --nocapture` Expected: FAIL before plugin
provider lookup exists.
- Step 3: Write minimal implementation
- Extend provider factory to resolve plugin providers after native map.
- Ensure resilient/routed providers can wrap plugin providers.
- Step 4: Run test to verify it passes Run: `cargo test --locked providers::mod -- --nocapture`
Expected: PASS.
- Step 5: Commit
```bash
git add src/providers/mod.rs src/providers/traits.rs src/plugins/registry.rs src/plugins/runtime.rs
git commit -m "feat(providers): integrate wasm plugin providers into factory and routing"
```
## Task 8: Implement ObserverBridge
**Files:**
- Modify: `src/plugins/bridge/observer.rs`
- Modify: `src/observability/mod.rs`
- Modify: `src/agent/loop_.rs`
- Modify: `src/gateway/mod.rs`
- Test: `src/plugins/bridge/observer.rs` inline tests
- Step 1: Write the failing test
```rust
#[test]
fn observer_bridge_emits_hook_events_for_legacy_observer_stream() {}
```
- Step 2: Run test to verify it fails Run:
`cargo test --locked observer_bridge_emits_hook_events_for_legacy_observer_stream -- --nocapture`
Expected: FAIL before bridge wiring.
- Step 3: Write minimal implementation
- Implement adapter mapping observer events into hook dispatch.
- Wire where observer is created in agent/channel/gateway flows.
- Step 4: Run test to verify it passes Run: `cargo test --locked plugins::bridge -- --nocapture`
Expected: PASS.
- Step 5: Commit
```bash
git add src/plugins/bridge/observer.rs src/observability/mod.rs src/agent/loop_.rs src/gateway/mod.rs
git commit -m "feat(observability): add observer-to-hook bridge for plugin runtime"
```
## Task 9: Implement hot reload with immutable snapshots
**Files:**
- Modify: `src/plugins/hot_reload.rs`
- Modify: `src/plugins/registry.rs`
- Modify: `src/plugins/runtime.rs`
- Modify: `src/main.rs`
- Test: `src/plugins/hot_reload.rs` inline tests
- Step 1: Write the failing test
```rust
#[tokio::test]
async fn reload_failure_keeps_previous_snapshot_active() {}
```
- Step 2: Run test to verify it fails Run:
`cargo test --locked reload_failure_keeps_previous_snapshot_active -- --nocapture` Expected: FAIL
before atomic swap logic.
- Step 3: Write minimal implementation
- File watcher rebuilds candidate snapshot.
- Validate fully before publish.
- Atomic swap on success; rollback on failure.
- Preserve in-flight snapshot handles.
- Step 4: Run test to verify it passes Run:
`cargo test --locked plugins::hot_reload -- --nocapture` Expected: PASS.
- Step 5: Commit
```bash
git add src/plugins/hot_reload.rs src/plugins/registry.rs src/plugins/runtime.rs src/main.rs
git commit -m "feat(plugins): add safe hot-reload with immutable snapshot swap"
```
## Task 10: Documentation and verification pass
**Files:**
- Create: `docs/plugins-runtime.md`
- Modify: `docs/config-reference.md`
- Modify: `docs/commands-reference.md`
- Modify: `docs/troubleshooting.md`
- Modify: locale docs where equivalents exist (`fr`, `vi` minimum for
config/commands/troubleshooting)
- Step 1: Write the failing doc checks
- Define link/consistency checks and navigation parity expectations.
- Step 2: Run doc checks to verify failures (if stale links exist) Run: project markdown/link
checks used in repo CI. Expected: potential FAIL until docs updated.
- Step 3: Write minimal documentation updates
- Plugin config keys, lifecycle, safety model, hot reload behavior, operator troubleshooting.
- Step 4: Run full validation Run:
```bash
cargo fmt --all -- --check
cargo clippy --all-targets -- -D warnings
cargo test --locked
```
Expected: PASS.
- Step 5: Commit
```bash
git add docs src
git commit -m "docs(plugins): document wasm plugin runtime config lifecycle and operations"
```
## Final Integration Checklist
- Ensure plugins disabled mode preserves existing behavior.
- Ensure security defaults remain deny-by-default.
- Ensure hook ordering and cancellation semantics are deterministic.
- Ensure provider/tool fallback behavior is unchanged for native implementations.
- Ensure hot-reload failures are non-fatal and reversible.

135
docs/plugins-runtime.md Normal file
View File

@ -0,0 +1,135 @@
# WASM Plugin Runtime (Experimental)
This document describes the current experimental plugin runtime for ZeroClaw.
## Scope
Current implementation supports:
- plugin manifest discovery from `[plugins].load_paths`
- plugin-declared tool registration into tool specs
- plugin-declared provider registration into provider factory resolution
- host-side WASM invocation bridge for tool/provider calls
- manifest fingerprint tracking scaffolding (hot-reload toggle is not yet exposed in schema)
## Config
```toml
[plugins]
enabled = true
load_paths = ["plugins"]
allow = []
deny = []
```
Defaults are deny-by-default and disabled-by-default.
Execution limits are currently conservative fixed defaults in runtime code:
- `invoke_timeout_ms = 2000`
- `memory_limit_bytes = 67108864`
- `max_concurrency = 8`
## Manifest Files
The runtime scans each configured directory for:
- `*.plugin.toml`
- `*.plugin.json`
Minimal TOML example:
```toml
id = "demo"
version = "1.0.0"
module_path = "plugins/demo.wasm"
wit_packages = ["zeroclaw:tools@1.0.0", "zeroclaw:providers@1.0.0"]
[[tools]]
name = "demo_tool"
description = "Demo tool"
providers = ["demo-provider"]
```
## WIT Package Compatibility
Supported package majors:
- `zeroclaw:hooks@1.x`
- `zeroclaw:tools@1.x`
- `zeroclaw:providers@1.x`
Unknown packages or mismatched major versions are rejected during manifest load.
## WASM Host ABI (Current Bridge)
The current bridge calls core-WASM exports directly.
Required exports:
- `memory`
- `alloc(i32) -> i32`
- `dealloc(i32, i32)`
- `zeroclaw_tool_execute(i32, i32) -> i64`
- `zeroclaw_provider_chat(i32, i32) -> i64`
Conventions:
- Input is UTF-8 JSON written by host into guest memory.
- Return value packs output pointer/length into `i64`:
- high 32 bits: pointer
- low 32 bits: length
- Host reads UTF-8 output JSON/string and deallocates buffers.
Tool call payload shape:
```json
{
"tool": "demo_tool",
"args": { "key": "value" }
}
```
Provider call payload shape:
```json
{
"provider": "demo-provider",
"system_prompt": "optional",
"message": "user prompt",
"model": "model-name",
"temperature": 0.7
}
```
Provider output may be either plain text or JSON:
```json
{
"text": "response text",
"error": null
}
```
If `error` is non-null, host treats the call as failed.
## Hot Reload
Manifest fingerprints are tracked internally, but the config schema does not currently expose a
`[plugins].hot_reload` toggle. Runtime hot-reload remains disabled by default until that schema
support is added.
## Observer Bridge
Observer creation paths route through `ObserverBridge` to keep plugin runtime event flow compatible
with existing observer backends.
## Limitations
Current bridge is intentionally minimal:
- no full WIT component-model host bindings yet
- no per-plugin sandbox isolation beyond process/runtime defaults
- no signature verification or trust policy enforcement yet
- tool/provider manifests define registration; execution ABI is currently fixed to the core-WASM
export contract above

View File

@ -96,12 +96,16 @@ Automation assists with triage and guardrails, but final merge accountability re
Maintain these branch protection rules on `dev` and `main`:
- Require status checks before merge.
- Require check `CI Required Gate`.
- Require checks `CI Required Gate` and `Security Required Gate`.
- Consider also requiring `CI Change Audit` and `CodeQL Analysis` for stricter CI/CD governance.
- Require pull request reviews before merge.
- Require at least 1 approving review.
- Require approval after the most recent push.
- Require CODEOWNERS review for protected paths.
- For CI/CD-related paths (`.github/workflows/**`, `.github/codeql/**`, `.github/connectivity/**`, `.github/release/**`, `.github/security/**`, `.github/actionlint.yaml`, `.github/dependabot.yml`, `scripts/ci/**`, and CI governance docs), require an explicit approving review from `@chumyin` via `CI Required Gate`.
- Keep branch/ruleset bypass limited to org owners.
- Dismiss stale approvals when new commits are pushed.
- For CI/CD-related paths (`.github/workflows/**`, `.github/codeql/**`, `.github/connectivity/**`, `.github/release/**`, `.github/security/**`, `.github/actionlint.yaml`, `.github/dependabot.yml`, `scripts/ci/**`, and CI governance docs), require CODEOWNERS review with `@chumyin` ownership.
- Keep bypass allowances empty by default (use time-boxed break-glass only when absolutely required).
- Enforce branch protection for admins.
- Require conversation resolution before merge.
- Restrict force-push on protected branches.
- Route normal contributor PRs to `main` by default (`dev` is optional for dedicated integration batching).
- Allow direct merges to `main` once required checks and review policy pass.
@ -123,7 +127,7 @@ Maintain these branch protection rules on `dev` and `main`:
### 4.2 Step B: Validation
- `CI Required Gate` is the merge gate.
- `CI Required Gate` and `Security Required Gate` are the merge gates.
- Docs-only PRs use fast-path and skip heavy Rust jobs.
- Non-doc PRs must pass lint, tests, and release build smoke check.
- Rust-impacting PRs use the same required gate set as `dev`/`main` pushes (no PR build-only shortcut).

View File

@ -7,6 +7,8 @@ Time-bound project status snapshots for planning documentation and operations wo
- [../project-triage-snapshot-2026-02-18.md](../project-triage-snapshot-2026-02-18.md)
- [../docs-audit-2026-02-24.md](../docs-audit-2026-02-24.md)
- [m4-5-rfi-spike-2026-02-28.md](m4-5-rfi-spike-2026-02-28.md)
- [f1-3-agent-lifecycle-state-machine-rfi-2026-03-01.md](f1-3-agent-lifecycle-state-machine-rfi-2026-03-01.md)
- [q0-3-stop-reason-state-machine-rfi-2026-03-01.md](q0-3-stop-reason-state-machine-rfi-2026-03-01.md)
## Scope

View File

@ -0,0 +1,193 @@
# F1-3 Agent Lifecycle State Machine RFI (2026-03-01)
Status: RFI complete, implementation planning ready.
GitHub issue: [#2308](https://github.com/zeroclaw-labs/zeroclaw/issues/2308)
Linear: [RMN-256](https://linear.app/zeroclawlabs/issue/RMN-256/rfi-f1-3-agent-lifecycle-state-machine)
## Summary
ZeroClaw currently has strong component supervision and health snapshots, but it does not expose a
formal agent lifecycle state model. This RFI defines a lifecycle FSM, transition contract,
synchronization model, persistence posture, and migration path that can be implemented without
changing existing daemon reliability behavior.
## Current-State Findings
### Existing behavior that already works
- `src/daemon/mod.rs` supervises gateway/channels/heartbeat/scheduler with restart backoff.
- `src/health/mod.rs` tracks per-component `status`, `last_ok`, `last_error`, and `restart_count`.
- `src/agent/session.rs` persists conversational history with memory/SQLite backends and TTL cleanup.
- `src/agent/loop_.rs` and `src/agent/agent.rs` provide bounded per-turn execution loops.
### Gaps blocking lifecycle consistency
- No typed lifecycle enum for the agent runtime (or per-session runtime state).
- No validated transition guard rails (invalid transitions are not prevented centrally).
- Health state and lifecycle state are conflated (`ok`/`error` are not full lifecycle semantics).
- Persistence only covers health snapshots and conversation history, not lifecycle transitions.
- No single integration contract for daemon, channels, supervisor, and health endpoint consumers.
## Proposed Lifecycle Model
### State definitions
- `Created`: runtime object exists but not started.
- `Starting`: dependencies are being initialized.
- `Running`: normal operation, accepting and processing work.
- `Degraded`: still running but with elevated failure/restart signals.
- `Suspended`: intentionally paused (manual pause, e-stop, or maintenance gate).
- `Backoff`: recovering after crash/error; restart cooldown active.
- `Terminating`: graceful shutdown in progress.
- `Terminated`: clean shutdown completed.
- `Crashed`: unrecoverable failure after retry budget is exhausted.
### State diagram
```mermaid
stateDiagram-v2
[*] --> Created
Created --> Starting: daemon run/start
Starting --> Running: init_ok
Starting --> Backoff: init_fail
Running --> Degraded: component_error_threshold
Degraded --> Running: recovered
Running --> Suspended: pause_or_estop
Degraded --> Suspended: pause_or_estop
Suspended --> Running: resume
Backoff --> Starting: retry_after_backoff
Backoff --> Crashed: retry_budget_exhausted
Running --> Terminating: shutdown_signal
Degraded --> Terminating: shutdown_signal
Suspended --> Terminating: shutdown_signal
Terminating --> Terminated: shutdown_complete
Crashed --> Terminating: manual_stop
```
### Transition table
| From | Trigger | Guard | To | Action |
|---|---|---|---|---|
| `Created` | daemon start | config valid | `Starting` | emit lifecycle event |
| `Starting` | init success | all required components healthy | `Running` | clear restart streak |
| `Starting` | init failure | retry budget available | `Backoff` | increment restart streak |
| `Running` | component errors | restart streak >= threshold | `Degraded` | set degraded cause |
| `Degraded` | recovery success | error window clears | `Running` | clear degraded cause |
| `Running`/`Degraded` | pause/e-stop | operator or policy signal | `Suspended` | stop intake/execution |
| `Suspended` | resume | policy allows | `Running` | re-enable intake |
| `Backoff` | retry timer | retry budget available | `Starting` | start component init |
| `Backoff` | retry exhausted | no retries left | `Crashed` | emit terminal failure event |
| non-terminal states | shutdown | signal received | `Terminating` | drain and stop workers |
| `Terminating` | done | all workers stopped | `Terminated` | persist final snapshot |
## Implementation Approach
### State representation
Add a dedicated lifecycle type in runtime/daemon scope:
```rust
enum AgentLifecycleState {
Created,
Starting,
Running,
Degraded { cause: String },
Suspended { reason: String },
Backoff { retry_in_ms: u64, attempt: u32 },
Terminating,
Terminated,
Crashed { reason: String },
}
```
### Synchronization model
- Use a single `LifecycleRegistry` (`Arc<RwLock<...>>`) owned by daemon runtime.
- Route all lifecycle writes through `transition(from, to, trigger)` with guard checks.
- Emit transition events from one place, then fan out to health snapshot and observability.
- Reject invalid transitions at runtime and log them as policy violations.
## Persistence Decision
Decision: **hybrid persistence**.
- Runtime source of truth: in-memory lifecycle registry for low-latency transitions.
- Durable checkpoint: persisted lifecycle snapshot alongside `daemon_state.json`.
- Optional append-only transition journal (`lifecycle_events.jsonl`) for audit and forensics.
Rationale:
- In-memory state keeps current daemon behavior fast and simple.
- Persistent checkpoint enables status restoration after restart and improves operator clarity.
- Event journal is valuable for post-incident analysis without changing runtime control flow.
## Integration Points
- `src/daemon/mod.rs`
- wrap supervisor start/failure/backoff/shutdown with explicit lifecycle transitions.
- `src/health/mod.rs`
- expose lifecycle state in health snapshot without replacing component-level health detail.
- `src/main.rs` (`status`, `restart`, e-stop surfaces)
- render lifecycle state and transition reason in CLI output.
- `src/channels/mod.rs` and channel workers
- gate message intake when lifecycle is `Suspended`, `Terminating`, `Crashed`, or `Terminated`.
- `src/agent/session.rs`
- keep session history semantics unchanged; add optional link from session to runtime lifecycle id.
## Migration Plan
### Phase 1: Non-breaking state plumbing
- Add lifecycle enum/registry and default transitions in daemon startup/shutdown.
- Include lifecycle state in health JSON output.
- Keep existing component health fields unchanged.
### Phase 2: Supervisor transition wiring
- Convert supervisor restart/error signals into lifecycle transitions.
- Add backoff metadata (`attempt`, `retry_in_ms`) to lifecycle snapshots.
### Phase 3: Intake gating + operator controls
- Enforce channel/gateway intake gating by lifecycle state.
- Surface lifecycle controls and richer status output in CLI.
### Phase 4: Persistence + event journal
- Persist snapshot and optional JSONL transition events.
- Add recovery behavior for daemon restart from persisted snapshot.
## Verification and Testing Plan
### Unit tests
- transition guard tests for all valid/invalid state pairs.
- lifecycle-to-health serialization tests.
- persistence round-trip tests for snapshot and event journal.
### Integration tests
- daemon startup failure -> backoff -> recovery path.
- repeated failure -> `Crashed` transition.
- suspend/resume behavior for channel intake and scheduler activity.
### Chaos/failure tests
- component panic/exit simulation under supervisor.
- rapid restart storm protection and state consistency checks.
## Risks and Mitigations
| Risk | Impact | Mitigation |
|---|---|---|
| Overlap between health and lifecycle semantics | Operator confusion | Keep both domains explicit and documented |
| Invalid transition bugs during rollout | Runtime inconsistency | Central transition API with guard checks |
| Excessive persistence I/O | Throughput impact | snapshot throttling + async event writes |
| Channel behavior regressions on suspend | Message loss | add intake gating tests and dry-run mode |
## Implementation Readiness Checklist
- [x] State diagram and transition table documented.
- [x] State representation and synchronization approach selected.
- [x] Persistence strategy documented.
- [x] Integration points and migration plan documented.

View File

@ -0,0 +1,222 @@
# Q0-3 Stop-Reason State Machine + Max-Tokens Continuation RFI (2026-03-01)
Status: RFI complete, implementation planning ready.
GitHub issue: [#2309](https://github.com/zeroclaw-labs/zeroclaw/issues/2309)
Linear: [RMN-257](https://linear.app/zeroclawlabs/issue/RMN-257/rfi-q0-3-stop-reason-state-machine-max-tokens-continuation)
## Summary
ZeroClaw currently parses text/tool calls and token usage across providers, but it does not carry a
normalized stop reason into `ChatResponse`, and there is no deterministic continuation loop for
`max_tokens` truncation. This RFI defines a provider mapping model, a continuation FSM, partial
tool-call recovery policy, and observability/testing requirements.
## Current-State Findings
### Confirmed implementation behavior
- `src/providers/traits.rs` `ChatResponse` has no stop-reason field.
- Provider adapters parse text/tool-calls/usage, but stop reason fields are mostly discarded.
- `src/agent/loop_.rs` finalizes response if no parsed tool calls are present.
- Existing parser in `src/agent/loop_/parsing.rs` already handles many malformed/truncated
tool-call formats safely (no panic), but this is parsing recovery, not continuation policy.
### Known gap
- When a provider truncates output due to max token cap, the loop lacks a dedicated continuation
path. Result: partial responses can be returned silently.
## Proposed Stop-Reason Model
### Normalized enum
```rust
enum NormalizedStopReason {
EndTurn,
ToolCall,
MaxTokens,
ContextWindowExceeded,
SafetyBlocked,
Cancelled,
Unknown(String),
}
```
### `ChatResponse` extension
Add stop-reason payload to provider response contract:
```rust
pub struct ChatResponse {
pub text: Option<String>,
pub tool_calls: Vec<ToolCall>,
pub usage: Option<TokenUsage>,
pub reasoning_content: Option<String>,
pub quota_metadata: Option<QuotaMetadata>,
pub stop_reason: Option<NormalizedStopReason>,
pub raw_stop_reason: Option<String>,
}
```
`raw_stop_reason` preserves provider-native values for diagnostics and future mapping updates.
## Provider Mapping Matrix
This table defines implementation targets for active provider families in ZeroClaw.
| Provider family | Native field | Native values | Normalized |
|---|---|---|---|
| OpenAI / OpenRouter / OpenAI-compatible chat | `finish_reason` | `stop` | `EndTurn` |
| OpenAI / OpenRouter / OpenAI-compatible chat | `finish_reason` | `tool_calls`, `function_call` | `ToolCall` |
| OpenAI / OpenRouter / OpenAI-compatible chat | `finish_reason` | `length` | `MaxTokens` |
| OpenAI / OpenRouter / OpenAI-compatible chat | `finish_reason` | `content_filter` | `SafetyBlocked` |
| Anthropic messages | `stop_reason` | `end_turn`, `stop_sequence` | `EndTurn` |
| Anthropic messages | `stop_reason` | `tool_use` | `ToolCall` |
| Anthropic messages | `stop_reason` | `max_tokens` | `MaxTokens` |
| Anthropic messages | `stop_reason` | `model_context_window_exceeded` | `ContextWindowExceeded` |
| Gemini generateContent | `finishReason` | `STOP` | `EndTurn` |
| Gemini generateContent | `finishReason` | `MAX_TOKENS` | `MaxTokens` |
| Gemini generateContent | `finishReason` | `SAFETY`, `RECITATION` | `SafetyBlocked` |
| Bedrock Converse | `stopReason` | `end_turn` | `EndTurn` |
| Bedrock Converse | `stopReason` | `tool_use` | `ToolCall` |
| Bedrock Converse | `stopReason` | `max_tokens` | `MaxTokens` |
| Bedrock Converse | `stopReason` | `guardrail_intervened` | `SafetyBlocked` |
Notes:
- Unknown values map to `Unknown(raw)` and must be logged once per provider/model combination.
- Mapping must be unit-tested against fixture payloads for each provider adapter.
## Continuation State Machine
### Goals
- Continue only when stop reason indicates output truncation.
- Bound retries and total output growth.
- Preserve tool-call correctness (never execute partial JSON).
### State diagram
```mermaid
stateDiagram-v2
[*] --> Request
Request --> EvaluateStop: provider_response
EvaluateStop --> Complete: EndTurn
EvaluateStop --> ExecuteTools: ToolCall
EvaluateStop --> ContinuePending: MaxTokens
EvaluateStop --> Abort: SafetyBlocked/ContextWindowExceeded/UnknownFatal
ContinuePending --> RequestContinuation: under_limits
RequestContinuation --> EvaluateStop: provider_response
ContinuePending --> AbortPartial: retry_limit_or_budget_exceeded
AbortPartial --> Complete: return_partial_with_notice
ExecuteTools --> Request: tool_results_appended
```
### Hard limits (defaults)
- `max_continuations_per_turn = 3`
- `max_total_completion_tokens_per_turn = 4 * initial_max_tokens` (configurable)
- `max_total_output_chars_per_turn = 120_000` (safety cap)
## Partial Tool-Call JSON Policy
### Rules
- Never execute tool calls when parsed payload is incomplete/ambiguous.
- If `MaxTokens` and parser detects malformed/partial tool-call body:
- request deterministic re-emission of the tool call payload only.
- keep attempt budget separate (`max_tool_repair_attempts = 1`).
- If repair fails, degrade safely:
- return a partial response with explicit truncation notice.
- emit structured event for operator diagnosis.
### Recovery prompt contract
Use a strict system-side continuation hint:
```text
Previous response was truncated by token limit.
Continue exactly from where you left off.
If you intended a tool call, emit one complete tool call payload only.
Do not repeat already-sent text.
```
## Observability Requirements
Emit structured events per turn:
- `stop_reason_observed`
- provider, model, normalized reason, raw reason, turn id, iteration.
- `continuation_attempt`
- attempt index, cumulative output tokens/chars, budget remaining.
- `continuation_terminated`
- terminal reason (`completed`, `retry_limit`, `budget_exhausted`, `safety_blocked`).
- `tool_payload_repair`
- parse issue type, repair attempted, repair success/failure.
Metrics:
- counter: continuations triggered by provider/model.
- counter: truncation exits without continuation (guardrail/budget cases).
- histogram: continuation attempts per turn.
- histogram: end-to-end turn latency for continued turns.
## Implementation Outline
### Provider layer
- Parse and map native stop reason fields in each adapter.
- Populate `stop_reason` and `raw_stop_reason` in `ChatResponse`.
- Add fixture-based unit tests for mapping.
### Agent loop layer
- Introduce `ContinuationController` in `src/agent/loop_.rs`.
- Route `MaxTokens` through continuation FSM before finalization.
- Merge continuation text chunks into one coherent assistant response.
- Keep existing tool parsing and loop-detection guards intact.
### Config layer
Add config keys under `agent`:
- `continuation_max_attempts`
- `continuation_max_output_chars`
- `continuation_max_total_completion_tokens`
- `continuation_tool_repair_attempts`
## Verification and Testing Plan
### Unit tests
- stop-reason mapping tests per provider adapter.
- continuation FSM transition tests (all terminal paths).
- budget cap tests and retry-limit behavior.
### Integration tests
- mock provider returns `MaxTokens` then successful continuation.
- mock provider returns repeated `MaxTokens` until retry cap.
- mock provider emits partial tool-call JSON then repaired payload.
### Regression tests
- ensure non-truncated normal responses are unchanged.
- ensure existing parser recovery tests in `loop_/parsing.rs` remain green.
- verify no duplicate text when continuation merges.
## Risks and Mitigations
| Risk | Impact | Mitigation |
|---|---|---|
| Provider mapping drift | incorrect continuation triggers | keep `raw_stop_reason` + tests |
| Continuation repetition loops | poor UX, extra tokens | dedupe heuristics + strict caps |
| Partial tool-call execution | unsafe tool behavior | hard block on malformed payload |
| Latency growth | slower responses | cap attempts and emit metrics |
## Implementation Readiness Checklist
- [x] Provider stop-reason mapping documented.
- [x] Continuation policy and hard limits documented.
- [x] Partial tool-call handling strategy documented.
- [x] Proposed state machine documented for implementation.

View File

@ -2,7 +2,7 @@
This document maps provider IDs, aliases, and credential environment variables.
Last verified: **February 28, 2026**.
Last verified: **March 1, 2026**.
## How to List Providers
@ -35,6 +35,7 @@ credential is not reused for fallback providers.
| `vercel` | `vercel-ai` | No | `VERCEL_API_KEY` |
| `cloudflare` | `cloudflare-ai` | No | `CLOUDFLARE_API_KEY` |
| `moonshot` | `kimi` | No | `MOONSHOT_API_KEY` |
| `stepfun` | `step`, `step-ai`, `step_ai` | No | `STEP_API_KEY`, `STEPFUN_API_KEY` |
| `kimi-code` | `kimi_coding`, `kimi_for_coding` | No | `KIMI_CODE_API_KEY`, `MOONSHOT_API_KEY` |
| `synthetic` | — | No | `SYNTHETIC_API_KEY` |
| `opencode` | `opencode-zen` | No | `OPENCODE_API_KEY` |
@ -137,6 +138,33 @@ zeroclaw models refresh --provider volcengine
zeroclaw agent --provider volcengine --model doubao-1-5-pro-32k-250115 -m "ping"
```
### StepFun Notes
- Provider ID: `stepfun` (aliases: `step`, `step-ai`, `step_ai`)
- Base API URL: `https://api.stepfun.com/v1`
- Chat endpoint: `/chat/completions`
- Model discovery endpoint: `/models`
- Authentication: `STEP_API_KEY` (fallback: `STEPFUN_API_KEY`)
- Default model preset: `step-3.5-flash`
- Official docs:
- Chat Completions: <https://platform.stepfun.com/docs/zh/api-reference/chat/chat-completion-create>
- Models List: <https://platform.stepfun.com/docs/api-reference/models/list>
- OpenAI migration guide: <https://platform.stepfun.com/docs/guide/openai>
Minimal setup example:
```bash
export STEP_API_KEY="your-stepfun-api-key"
zeroclaw onboard --provider stepfun --api-key "$STEP_API_KEY" --model step-3.5-flash --force
```
Quick validation:
```bash
zeroclaw models refresh --provider stepfun
zeroclaw agent --provider stepfun --model step-3.5-flash -m "ping"
```
### SiliconFlow Notes
- Provider ID: `siliconflow` (aliases: `silicon-cloud`, `siliconcloud`)

Some files were not shown because too many files have changed in this diff Show More