From 7bb44d157c4a4be3546025071930e74844f68b95 Mon Sep 17 00:00:00 2001
From: Chummy
Date: Wed, 18 Feb 2026 21:02:49 +0800
Subject: [PATCH 001/116] fix(ci): use correct first-interaction input keys
---
.github/workflows/pr-auto-response.yml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/pr-auto-response.yml b/.github/workflows/pr-auto-response.yml
index df4e3044a..08afd4bc9 100644
--- a/.github/workflows/pr-auto-response.yml
+++ b/.github/workflows/pr-auto-response.yml
@@ -40,8 +40,8 @@ jobs:
- name: Greet first-time contributors
uses: actions/first-interaction@a1db7729b356323c7988c20ed6f0d33fe31297be # v1
with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
- issue-message: |
+ repo_token: ${{ secrets.GITHUB_TOKEN }}
+ issue_message: |
Thanks for opening this issue.
Before maintainers triage it, please confirm:
@@ -50,7 +50,7 @@ jobs:
- Sensitive values are redacted
This helps us keep issue throughput high and response latency low.
- pr-message: |
+ pr_message: |
Thanks for contributing to ZeroClaw.
For faster review, please ensure:
From a4ad5a77de35f73cbf764908bbb112a9edce0e63 Mon Sep 17 00:00:00 2001
From: Chummy
Date: Wed, 18 Feb 2026 21:05:14 +0800
Subject: [PATCH 002/116] fix(ci): wire shared label policy path in workflows
---
.github/workflows/pr-auto-response.yml | 3 +++
.github/workflows/pr-labeler.yml | 3 +++
2 files changed, 6 insertions(+)
diff --git a/.github/workflows/pr-auto-response.yml b/.github/workflows/pr-auto-response.yml
index 08afd4bc9..8efe523c4 100644
--- a/.github/workflows/pr-auto-response.yml
+++ b/.github/workflows/pr-auto-response.yml
@@ -8,6 +8,9 @@ on:
permissions: {}
+env:
+ LABEL_POLICY_PATH: .github/label-policy.json
+
jobs:
contributor-tier-issues:
if: >-
diff --git a/.github/workflows/pr-labeler.yml b/.github/workflows/pr-labeler.yml
index fff174cad..37869cd3c 100644
--- a/.github/workflows/pr-labeler.yml
+++ b/.github/workflows/pr-labeler.yml
@@ -23,6 +23,9 @@ permissions:
pull-requests: write
issues: write
+env:
+ LABEL_POLICY_PATH: .github/label-policy.json
+
jobs:
label:
runs-on: blacksmith-2vcpu-ubuntu-2404
From f10bb998e06e365489ddd7a3e745fa27c46387b3 Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 12:24:43 +0800
Subject: [PATCH 003/116] fix(build): unblock low-resource installs and release
binaries (#1041)
* fix(build): unblock low-resource installs and release binaries
* fix(ci): use supported intel macOS runner label
---
.github/workflows/pub-release.yml | 69 +++++++--
README.md | 50 ++++++-
docs/one-click-bootstrap.md | 30 +++-
docs/troubleshooting.md | 44 +++++-
scripts/bootstrap.sh | 226 ++++++++++++++++++++++++++++--
5 files changed, 396 insertions(+), 23 deletions(-)
diff --git a/.github/workflows/pub-release.yml b/.github/workflows/pub-release.yml
index 7cdb85342..193bfd5ce 100644
--- a/.github/workflows/pub-release.yml
+++ b/.github/workflows/pub-release.yml
@@ -27,15 +27,45 @@ jobs:
- os: ubuntu-latest
target: x86_64-unknown-linux-gnu
artifact: zeroclaw
- - os: macos-latest
+ archive_ext: tar.gz
+ cross_compiler: ""
+ linker_env: ""
+ linker: ""
+ - os: ubuntu-latest
+ target: aarch64-unknown-linux-gnu
+ artifact: zeroclaw
+ archive_ext: tar.gz
+ cross_compiler: gcc-aarch64-linux-gnu
+ linker_env: CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER
+ linker: aarch64-linux-gnu-gcc
+ - os: ubuntu-latest
+ target: armv7-unknown-linux-gnueabihf
+ artifact: zeroclaw
+ archive_ext: tar.gz
+ cross_compiler: gcc-arm-linux-gnueabihf
+ linker_env: CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER
+ linker: arm-linux-gnueabihf-gcc
+ - os: macos-15-intel
target: x86_64-apple-darwin
artifact: zeroclaw
- - os: macos-latest
+ archive_ext: tar.gz
+ cross_compiler: ""
+ linker_env: ""
+ linker: ""
+ - os: macos-14
target: aarch64-apple-darwin
artifact: zeroclaw
+ archive_ext: tar.gz
+ cross_compiler: ""
+ linker_env: ""
+ linker: ""
- os: windows-latest
target: x86_64-pc-windows-msvc
artifact: zeroclaw.exe
+ archive_ext: zip
+ cross_compiler: ""
+ linker_env: ""
+ linker: ""
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
@@ -46,20 +76,41 @@ jobs:
- uses: useblacksmith/rust-cache@f53e7f127245d2a269b3d90879ccf259876842d5 # v3
+ - name: Install cross-compilation toolchain (Linux)
+ if: runner.os == 'Linux' && matrix.cross_compiler != ''
+ run: |
+ sudo apt-get update -qq
+ sudo apt-get install -y ${{ matrix.cross_compiler }}
+
- name: Build release
- run: cargo build --release --locked --target ${{ matrix.target }}
+ env:
+ LINKER_ENV: ${{ matrix.linker_env }}
+ LINKER: ${{ matrix.linker }}
+ run: |
+ if [ -n "$LINKER_ENV" ] && [ -n "$LINKER" ]; then
+ echo "Using linker override: $LINKER_ENV=$LINKER"
+ export "$LINKER_ENV=$LINKER"
+ fi
+ cargo build --release --locked --target ${{ matrix.target }}
- name: Check binary size (Unix)
if: runner.os != 'Windows'
run: |
- SIZE=$(stat -f%z target/${{ matrix.target }}/release/${{ matrix.artifact }} 2>/dev/null || stat -c%s target/${{ matrix.target }}/release/${{ matrix.artifact }})
+ BIN="target/${{ matrix.target }}/release/${{ matrix.artifact }}"
+ if [ ! -f "$BIN" ]; then
+ echo "::error::Expected binary not found: $BIN"
+ exit 1
+ fi
+ SIZE=$(stat -f%z "$BIN" 2>/dev/null || stat -c%s "$BIN")
SIZE_MB=$((SIZE / 1024 / 1024))
echo "Binary size: ${SIZE_MB}MB ($SIZE bytes)"
echo "### Binary Size: ${{ matrix.target }}" >> "$GITHUB_STEP_SUMMARY"
echo "- Size: ${SIZE_MB}MB ($SIZE bytes)" >> "$GITHUB_STEP_SUMMARY"
- if [ "$SIZE" -gt 15728640 ]; then
- echo "::error::Binary exceeds 15MB hard limit (${SIZE_MB}MB)"
+ if [ "$SIZE" -gt 41943040 ]; then
+ echo "::error::Binary exceeds 40MB safeguard (${SIZE_MB}MB)"
exit 1
+ elif [ "$SIZE" -gt 15728640 ]; then
+ echo "::warning::Binary exceeds 15MB advisory target (${SIZE_MB}MB)"
elif [ "$SIZE" -gt 5242880 ]; then
echo "::warning::Binary exceeds 5MB target (${SIZE_MB}MB)"
else
@@ -70,19 +121,19 @@ jobs:
if: runner.os != 'Windows'
run: |
cd target/${{ matrix.target }}/release
- tar czf ../../../zeroclaw-${{ matrix.target }}.tar.gz ${{ matrix.artifact }}
+ tar czf ../../../zeroclaw-${{ matrix.target }}.${{ matrix.archive_ext }} ${{ matrix.artifact }}
- name: Package (Windows)
if: runner.os == 'Windows'
run: |
cd target/${{ matrix.target }}/release
- 7z a ../../../zeroclaw-${{ matrix.target }}.zip ${{ matrix.artifact }}
+ 7z a ../../../zeroclaw-${{ matrix.target }}.${{ matrix.archive_ext }} ${{ matrix.artifact }}
- name: Upload artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
with:
name: zeroclaw-${{ matrix.target }}
- path: zeroclaw-${{ matrix.target }}.*
+ path: zeroclaw-${{ matrix.target }}.${{ matrix.archive_ext }}
retention-days: 7
publish:
diff --git a/README.md b/README.md
index 53fbc306f..629842af9 100644
--- a/README.md
+++ b/README.md
@@ -88,7 +88,7 @@ Local machine quick benchmark (macOS arm64, Feb 2026) normalized for 0.8GHz edge
| **Binary Size** | ~28MB (dist) | N/A (Scripts) | ~8MB | **3.4 MB** |
| **Cost** | Mac Mini $599 | Linux SBC ~$50 | Linux Board $10 | **Any hardware $10** |
-> Notes: ZeroClaw results are measured on release builds using `/usr/bin/time -l`. OpenClaw requires Node.js runtime (typically ~390MB additional memory overhead), while NanoBot requires Python runtime. PicoClaw and ZeroClaw are static binaries.
+> Notes: ZeroClaw results are measured on release builds using `/usr/bin/time -l`. OpenClaw requires Node.js runtime (typically ~390MB additional memory overhead), while NanoBot requires Python runtime. PicoClaw and ZeroClaw are static binaries. The RAM figures above are runtime memory; build-time compilation requirements are higher.
@@ -173,11 +173,32 @@ Or skip the steps above and install everything (system deps, Rust, ZeroClaw) in
curl -LsSf https://raw.githubusercontent.com/zeroclaw-labs/zeroclaw/main/scripts/install.sh | bash
```
+#### Compilation resource requirements
+
+Building from source needs more resources than running the resulting binary:
+
+| Resource | Minimum | Recommended |
+|---|---|---|
+| **RAM + swap** | 2 GB | 4 GB+ |
+| **Free disk** | 6 GB | 10 GB+ |
+
+If your host is below the minimum, use pre-built binaries:
+
+```bash
+./bootstrap.sh --prefer-prebuilt
+```
+
+To require binary-only install with no source fallback:
+
+```bash
+./bootstrap.sh --prebuilt-only
+```
+
#### Optional
- **Docker** — required only if using the [Docker sandboxed runtime](#runtime-support-current) (`runtime.kind = "docker"`). Install via your package manager or [docker.com](https://docs.docker.com/engine/install/).
-> **Note:** The default `cargo build --release` uses `codegen-units=1` for compatibility with low-memory devices (e.g., Raspberry Pi 3 with 1GB RAM). For faster builds on powerful machines, use `cargo build --profile release-fast`.
+> **Note:** The default `cargo build --release` uses `codegen-units=1` to lower peak compile pressure. For faster builds on powerful machines, use `cargo build --profile release-fast`.
@@ -201,6 +222,12 @@ cd zeroclaw
# Optional: bootstrap dependencies + Rust on fresh machines
./bootstrap.sh --install-system-deps --install-rust
+# Optional: pre-built binary first (recommended on low-RAM/low-disk hosts)
+./bootstrap.sh --prefer-prebuilt
+
+# Optional: binary-only install (no source build fallback)
+./bootstrap.sh --prebuilt-only
+
# Optional: run onboarding in the same flow
./bootstrap.sh --onboard --api-key "sk-..." --provider openrouter [--model "openrouter/auto"]
@@ -216,6 +243,25 @@ curl -fsSL https://raw.githubusercontent.com/zeroclaw-labs/zeroclaw/main/scripts
Details: [`docs/one-click-bootstrap.md`](docs/one-click-bootstrap.md) (toolchain mode may request `sudo` for system packages).
+### Pre-built binaries
+
+Release assets are published for:
+
+- Linux: `x86_64`, `aarch64`, `armv7`
+- macOS: `x86_64`, `aarch64`
+- Windows: `x86_64`
+
+Download the latest assets from:
+
+
+Example (ARM64 Linux):
+
+```bash
+curl -fsSLO https://github.com/zeroclaw-labs/zeroclaw/releases/latest/download/zeroclaw-aarch64-unknown-linux-gnu.tar.gz
+tar xzf zeroclaw-aarch64-unknown-linux-gnu.tar.gz
+install -m 0755 zeroclaw "$HOME/.cargo/bin/zeroclaw"
+```
+
```bash
git clone https://github.com/zeroclaw-labs/zeroclaw.git
cd zeroclaw
diff --git a/docs/one-click-bootstrap.md b/docs/one-click-bootstrap.md
index 25cd108dc..c9001f792 100644
--- a/docs/one-click-bootstrap.md
+++ b/docs/one-click-bootstrap.md
@@ -2,7 +2,7 @@
This page defines the fastest supported path to install and initialize ZeroClaw.
-Last verified: **February 18, 2026**.
+Last verified: **February 20, 2026**.
## Option 0: Homebrew (macOS/Linuxbrew)
@@ -23,6 +23,31 @@ What it does by default:
1. `cargo build --release --locked`
2. `cargo install --path . --force --locked`
+### Resource preflight and pre-built flow
+
+Source builds typically require at least:
+
+- **2 GB RAM + swap**
+- **6 GB free disk**
+
+When resources are constrained, bootstrap now attempts a pre-built binary first.
+
+```bash
+./bootstrap.sh --prefer-prebuilt
+```
+
+To require binary-only installation and fail if no compatible release asset exists:
+
+```bash
+./bootstrap.sh --prebuilt-only
+```
+
+To bypass pre-built flow and force source compilation:
+
+```bash
+./bootstrap.sh --force-source-build
+```
+
## Dual-mode bootstrap
Default behavior is **app-only** (build/install ZeroClaw) and expects existing Rust toolchain.
@@ -37,6 +62,9 @@ Notes:
- `--install-system-deps` installs compiler/build prerequisites (may require `sudo`).
- `--install-rust` installs Rust via `rustup` when missing.
+- `--prefer-prebuilt` tries release binary download first, then falls back to source build.
+- `--prebuilt-only` disables source fallback.
+- `--force-source-build` disables pre-built flow entirely.
## Option B: Remote one-liner
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
index ab7cfbfbb..7fd02aa40 100644
--- a/docs/troubleshooting.md
+++ b/docs/troubleshooting.md
@@ -2,7 +2,7 @@
This guide focuses on common setup/runtime failures and fast resolution paths.
-Last verified: **February 19, 2026**.
+Last verified: **February 20, 2026**.
## Installation / Bootstrap
@@ -32,6 +32,48 @@ Fix:
./bootstrap.sh --install-system-deps
```
+### Build fails on low-RAM / low-disk hosts
+
+Symptoms:
+
+- `cargo build --release` is killed (`signal: 9`, OOM killer, or `cannot allocate memory`)
+- Build crashes after adding swap because disk space runs out
+
+Why this happens:
+
+- Runtime memory (<5MB for common operations) is not the same as compile-time memory.
+- Full source build can require **2 GB RAM + swap** and **6+ GB free disk**.
+- Enabling swap on a tiny disk can avoid RAM OOM but still fail due to disk exhaustion.
+
+Preferred path for constrained machines:
+
+```bash
+./bootstrap.sh --prefer-prebuilt
+```
+
+Binary-only mode (no source fallback):
+
+```bash
+./bootstrap.sh --prebuilt-only
+```
+
+If you must compile from source on constrained hosts:
+
+1. Add swap only if you also have enough free disk for both swap + build output.
+1. Limit cargo parallelism:
+
+```bash
+CARGO_BUILD_JOBS=1 cargo build --release --locked
+```
+
+1. Reduce heavy features when Matrix is not required:
+
+```bash
+cargo build --release --locked --no-default-features --features hardware
+```
+
+1. Cross-compile on a stronger machine and copy the binary to the target host.
+
### Build is very slow or appears stuck
Symptoms:
diff --git a/scripts/bootstrap.sh b/scripts/bootstrap.sh
index f256fa65e..a081a6156 100755
--- a/scripts/bootstrap.sh
+++ b/scripts/bootstrap.sh
@@ -28,6 +28,9 @@ Options:
--docker Run bootstrap in Docker and launch onboarding inside the container
--install-system-deps Install build dependencies (Linux/macOS)
--install-rust Install Rust via rustup if missing
+ --prefer-prebuilt Try latest release binary first; fallback to source build on miss
+ --prebuilt-only Install only from latest release binary (no source build fallback)
+ --force-source-build Disable prebuilt flow and always build from source
--onboard Run onboarding after install
--interactive-onboard Run interactive onboarding (implies --onboard)
--api-key API key for non-interactive onboarding
@@ -41,6 +44,8 @@ Examples:
./bootstrap.sh
./bootstrap.sh --docker
./bootstrap.sh --install-system-deps --install-rust
+ ./bootstrap.sh --prefer-prebuilt
+ ./bootstrap.sh --prebuilt-only
./bootstrap.sh --onboard --api-key "sk-..." --provider openrouter [--model "openrouter/auto"]
./bootstrap.sh --interactive-onboard
@@ -53,6 +58,8 @@ Environment:
ZEROCLAW_API_KEY Used when --api-key is not provided
ZEROCLAW_PROVIDER Used when --provider is not provided (default: openrouter)
ZEROCLAW_MODEL Used when --model is not provided
+ ZEROCLAW_BOOTSTRAP_MIN_RAM_MB Minimum RAM threshold for source build preflight (default: 2048)
+ ZEROCLAW_BOOTSTRAP_MIN_DISK_MB Minimum free disk threshold for source build preflight (default: 6144)
USAGE
}
@@ -60,6 +67,155 @@ have_cmd() {
command -v "$1" >/dev/null 2>&1
}
+get_total_memory_mb() {
+ case "$(uname -s)" in
+ Linux)
+ if [[ -r /proc/meminfo ]]; then
+ awk '/MemTotal:/ {printf "%d\n", $2 / 1024}' /proc/meminfo
+ fi
+ ;;
+ Darwin)
+ if have_cmd sysctl; then
+ local bytes
+ bytes="$(sysctl -n hw.memsize 2>/dev/null || true)"
+ if [[ "$bytes" =~ ^[0-9]+$ ]]; then
+ echo $((bytes / 1024 / 1024))
+ fi
+ fi
+ ;;
+ esac
+}
+
+get_available_disk_mb() {
+ local path="${1:-.}"
+ local free_kb
+ free_kb="$(df -Pk "$path" 2>/dev/null | awk 'NR==2 {print $4}')"
+ if [[ "$free_kb" =~ ^[0-9]+$ ]]; then
+ echo $((free_kb / 1024))
+ fi
+}
+
+detect_release_target() {
+ local os arch
+ os="$(uname -s)"
+ arch="$(uname -m)"
+
+ case "$os:$arch" in
+ Linux:x86_64)
+ echo "x86_64-unknown-linux-gnu"
+ ;;
+ Linux:aarch64|Linux:arm64)
+ echo "aarch64-unknown-linux-gnu"
+ ;;
+ Linux:armv7l|Linux:armv6l)
+ echo "armv7-unknown-linux-gnueabihf"
+ ;;
+ Darwin:x86_64)
+ echo "x86_64-apple-darwin"
+ ;;
+ Darwin:arm64|Darwin:aarch64)
+ echo "aarch64-apple-darwin"
+ ;;
+ *)
+ return 1
+ ;;
+ esac
+}
+
+should_attempt_prebuilt_for_resources() {
+ local workspace="${1:-.}"
+ local min_ram_mb min_disk_mb total_ram_mb free_disk_mb low_resource
+
+ min_ram_mb="${ZEROCLAW_BOOTSTRAP_MIN_RAM_MB:-2048}"
+ min_disk_mb="${ZEROCLAW_BOOTSTRAP_MIN_DISK_MB:-6144}"
+ total_ram_mb="$(get_total_memory_mb || true)"
+ free_disk_mb="$(get_available_disk_mb "$workspace" || true)"
+ low_resource=false
+
+ if [[ "$total_ram_mb" =~ ^[0-9]+$ && "$total_ram_mb" -lt "$min_ram_mb" ]]; then
+ low_resource=true
+ fi
+ if [[ "$free_disk_mb" =~ ^[0-9]+$ && "$free_disk_mb" -lt "$min_disk_mb" ]]; then
+ low_resource=true
+ fi
+
+ if [[ "$low_resource" == true ]]; then
+ warn "Source build preflight indicates constrained resources."
+ if [[ "$total_ram_mb" =~ ^[0-9]+$ ]]; then
+ warn "Detected RAM: ${total_ram_mb}MB (recommended >= ${min_ram_mb}MB for local source builds)."
+ else
+ warn "Unable to detect total RAM automatically."
+ fi
+ if [[ "$free_disk_mb" =~ ^[0-9]+$ ]]; then
+ warn "Detected free disk: ${free_disk_mb}MB (recommended >= ${min_disk_mb}MB)."
+ else
+ warn "Unable to detect free disk space automatically."
+ fi
+ return 0
+ fi
+
+ return 1
+}
+
+install_prebuilt_binary() {
+ local target archive_url temp_dir archive_path extracted_bin install_dir
+
+ if ! have_cmd curl; then
+ warn "curl is required for pre-built binary installation."
+ return 1
+ fi
+ if ! have_cmd tar; then
+ warn "tar is required for pre-built binary installation."
+ return 1
+ fi
+
+ target="$(detect_release_target || true)"
+ if [[ -z "$target" ]]; then
+ warn "No pre-built binary target mapping for $(uname -s)/$(uname -m)."
+ return 1
+ fi
+
+ archive_url="https://github.com/zeroclaw-labs/zeroclaw/releases/latest/download/zeroclaw-${target}.tar.gz"
+ temp_dir="$(mktemp -d -t zeroclaw-prebuilt-XXXXXX)"
+ archive_path="$temp_dir/zeroclaw-${target}.tar.gz"
+
+ info "Attempting pre-built binary install for target: $target"
+ if ! curl -fsSL "$archive_url" -o "$archive_path"; then
+ warn "Could not download release asset: $archive_url"
+ rm -rf "$temp_dir"
+ return 1
+ fi
+
+ if ! tar -xzf "$archive_path" -C "$temp_dir"; then
+ warn "Failed to extract pre-built archive."
+ rm -rf "$temp_dir"
+ return 1
+ fi
+
+ extracted_bin="$temp_dir/zeroclaw"
+ if [[ ! -x "$extracted_bin" ]]; then
+ extracted_bin="$(find "$temp_dir" -maxdepth 2 -type f -name zeroclaw -perm -u+x | head -n 1 || true)"
+ fi
+ if [[ -z "$extracted_bin" || ! -x "$extracted_bin" ]]; then
+ warn "Archive did not contain an executable zeroclaw binary."
+ rm -rf "$temp_dir"
+ return 1
+ fi
+
+ install_dir="$HOME/.cargo/bin"
+ mkdir -p "$install_dir"
+ install -m 0755 "$extracted_bin" "$install_dir/zeroclaw"
+ rm -rf "$temp_dir"
+
+ info "Installed pre-built binary to $install_dir/zeroclaw"
+ if [[ ":$PATH:" != *":$install_dir:"* ]]; then
+ warn "$install_dir is not in PATH for this shell."
+ warn "Run: export PATH=\"$install_dir:\$PATH\""
+ fi
+
+ return 0
+}
+
run_privileged() {
if [[ "$(id -u)" -eq 0 ]]; then
"$@"
@@ -221,10 +377,14 @@ REPO_URL="https://github.com/zeroclaw-labs/zeroclaw.git"
DOCKER_MODE=false
INSTALL_SYSTEM_DEPS=false
INSTALL_RUST=false
+PREFER_PREBUILT=false
+PREBUILT_ONLY=false
+FORCE_SOURCE_BUILD=false
RUN_ONBOARD=false
INTERACTIVE_ONBOARD=false
SKIP_BUILD=false
SKIP_INSTALL=false
+PREBUILT_INSTALLED=false
API_KEY="${ZEROCLAW_API_KEY:-}"
PROVIDER="${ZEROCLAW_PROVIDER:-openrouter}"
MODEL="${ZEROCLAW_MODEL:-}"
@@ -243,6 +403,18 @@ while [[ $# -gt 0 ]]; do
INSTALL_RUST=true
shift
;;
+ --prefer-prebuilt)
+ PREFER_PREBUILT=true
+ shift
+ ;;
+ --prebuilt-only)
+ PREBUILT_ONLY=true
+ shift
+ ;;
+ --force-source-build)
+ FORCE_SOURCE_BUILD=true
+ shift
+ ;;
--onboard)
RUN_ONBOARD=true
shift
@@ -314,16 +486,6 @@ else
fi
fi
-if [[ "$DOCKER_MODE" == false ]] && ! have_cmd cargo; then
- error "cargo is not installed."
- cat <<'MSG' >&2
-Install Rust first: https://rustup.rs/
-or re-run with:
- ./bootstrap.sh --install-rust
-MSG
- exit 1
-fi
-
WORK_DIR="$ROOT_DIR"
TEMP_CLONE=false
TEMP_DIR=""
@@ -364,6 +526,15 @@ echo " workspace: $WORK_DIR"
cd "$WORK_DIR"
+if [[ "$FORCE_SOURCE_BUILD" == true ]]; then
+ PREFER_PREBUILT=false
+ PREBUILT_ONLY=false
+fi
+
+if [[ "$PREBUILT_ONLY" == true ]]; then
+ PREFER_PREBUILT=true
+fi
+
if [[ "$DOCKER_MODE" == true ]]; then
ensure_docker_ready
if [[ "$RUN_ONBOARD" == false ]]; then
@@ -389,6 +560,39 @@ DONE
exit 0
fi
+if [[ "$FORCE_SOURCE_BUILD" == false ]]; then
+ if [[ "$PREFER_PREBUILT" == false && "$PREBUILT_ONLY" == false ]]; then
+ if should_attempt_prebuilt_for_resources "$WORK_DIR"; then
+ info "Attempting pre-built binary first due to resource preflight."
+ PREFER_PREBUILT=true
+ fi
+ fi
+
+ if [[ "$PREFER_PREBUILT" == true ]]; then
+ if install_prebuilt_binary; then
+ PREBUILT_INSTALLED=true
+ SKIP_BUILD=true
+ SKIP_INSTALL=true
+ elif [[ "$PREBUILT_ONLY" == true ]]; then
+ error "Pre-built-only mode requested, but no compatible release asset is available."
+ error "Try again later, or run with --force-source-build on a machine with enough RAM/disk."
+ exit 1
+ else
+ warn "Pre-built install unavailable; falling back to source build."
+ fi
+ fi
+fi
+
+if [[ "$PREBUILT_INSTALLED" == false && ( "$SKIP_BUILD" == false || "$SKIP_INSTALL" == false ) ]] && ! have_cmd cargo; then
+ error "cargo is not installed."
+ cat <<'MSG' >&2
+Install Rust first: https://rustup.rs/
+or re-run with:
+ ./bootstrap.sh --install-rust
+MSG
+ exit 1
+fi
+
if [[ "$SKIP_BUILD" == false ]]; then
info "Building release binary"
cargo build --release --locked
@@ -406,6 +610,8 @@ fi
ZEROCLAW_BIN=""
if have_cmd zeroclaw; then
ZEROCLAW_BIN="zeroclaw"
+elif [[ -x "$HOME/.cargo/bin/zeroclaw" ]]; then
+ ZEROCLAW_BIN="$HOME/.cargo/bin/zeroclaw"
elif [[ -x "$WORK_DIR/target/release/zeroclaw" ]]; then
ZEROCLAW_BIN="$WORK_DIR/target/release/zeroclaw"
fi
From db2d9acd22fd6ba9c015b4557329519980ccf2c3 Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 12:25:47 +0800
Subject: [PATCH 004/116] fix(skills): support SSH git remotes for skills
install (#1035)
---
docs/commands-reference.md | 2 +
src/lib.rs | 4 +-
src/main.rs | 4 +-
src/skills/mod.rs | 89 ++++++++++++++++++++++++++++++++++++--
4 files changed, 92 insertions(+), 7 deletions(-)
diff --git a/docs/commands-reference.md b/docs/commands-reference.md
index 91ad25eef..a693c8123 100644
--- a/docs/commands-reference.md
+++ b/docs/commands-reference.md
@@ -110,6 +110,8 @@ Channel runtime also watches `config.toml` and hot-applies updates to:
- `zeroclaw skills install `
- `zeroclaw skills remove `
+`` accepts git remotes (`https://...`, `http://...`, `ssh://...`, and `git@host:owner/repo.git`) or a local filesystem path.
+
Skill manifests (`SKILL.toml`) support `prompts` and `[[tools]]`; both are injected into the agent system prompt at runtime, so the model can follow skill instructions without manually reading skill files.
### `migrate`
diff --git a/src/lib.rs b/src/lib.rs
index 40e364e29..cdf280175 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -142,9 +142,9 @@ Examples:
pub enum SkillCommands {
/// List all installed skills
List,
- /// Install a new skill from a URL or local path
+ /// Install a new skill from a git URL (HTTPS/SSH) or local path
Install {
- /// Source URL or local path
+ /// Source git URL (HTTPS/SSH) or local path
source: String,
},
/// Remove an installed skill
diff --git a/src/main.rs b/src/main.rs
index ff41e5bfc..414a4f504 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -598,9 +598,9 @@ enum ChannelCommands {
enum SkillCommands {
/// List installed skills
List,
- /// Install a skill from a GitHub URL or local path
+ /// Install a skill from a git URL (HTTPS/SSH) or local path
Install {
- /// GitHub URL or local path
+ /// Git URL (HTTPS/SSH) or local path
source: String,
},
/// Remove an installed skill
diff --git a/src/skills/mod.rs b/src/skills/mod.rs
index 4a1edd8b5..0c6e47cac 100644
--- a/src/skills/mod.rs
+++ b/src/skills/mod.rs
@@ -470,7 +470,7 @@ pub fn init_skills_dir(workspace_dir: &Path) -> Result<()> {
The agent will read it and follow the instructions.\n\n\
## Installing community skills\n\n\
```bash\n\
- zeroclaw skills install \n\
+ zeroclaw skills install \n\
zeroclaw skills list\n\
```\n",
)?;
@@ -479,6 +479,50 @@ pub fn init_skills_dir(workspace_dir: &Path) -> Result<()> {
Ok(())
}
+fn is_git_source(source: &str) -> bool {
+ is_git_scheme_source(source, "https://")
+ || is_git_scheme_source(source, "http://")
+ || is_git_scheme_source(source, "ssh://")
+ || is_git_scheme_source(source, "git://")
+ || is_git_scp_source(source)
+}
+
+fn is_git_scheme_source(source: &str, scheme: &str) -> bool {
+ let Some(rest) = source.strip_prefix(scheme) else {
+ return false;
+ };
+ if rest.is_empty() || rest.starts_with('/') {
+ return false;
+ }
+
+ let host = rest.split(['/', '?', '#']).next().unwrap_or_default();
+ !host.is_empty()
+}
+
+fn is_git_scp_source(source: &str) -> bool {
+ // SCP-like syntax accepted by git, e.g. git@host:owner/repo.git
+ // Keep this strict enough to avoid treating local paths as git remotes.
+ let Some((user_host, remote_path)) = source.split_once(':') else {
+ return false;
+ };
+ if remote_path.is_empty() {
+ return false;
+ }
+ if source.contains("://") {
+ return false;
+ }
+
+ let Some((user, host)) = user_host.split_once('@') else {
+ return false;
+ };
+ !user.is_empty()
+ && !host.is_empty()
+ && !user.contains('/')
+ && !user.contains('\\')
+ && !host.contains('/')
+ && !host.contains('\\')
+}
+
/// Recursively copy a directory (used as fallback when symlinks aren't available)
#[cfg(any(windows, not(unix)))]
fn copy_dir_recursive(src: &Path, dest: &Path) -> Result<()> {
@@ -508,7 +552,7 @@ pub fn handle_command(command: crate::SkillCommands, workspace_dir: &Path) -> Re
println!(" Create one: mkdir -p ~/.zeroclaw/workspace/skills/my-skill");
println!(" echo '# My Skill' > ~/.zeroclaw/workspace/skills/my-skill/SKILL.md");
println!();
- println!(" Or install: zeroclaw skills install ");
+ println!(" Or install: zeroclaw skills install ");
} else {
println!("Installed skills ({}):", skills.len());
println!();
@@ -544,7 +588,7 @@ pub fn handle_command(command: crate::SkillCommands, workspace_dir: &Path) -> Re
let skills_path = skills_dir(workspace_dir);
std::fs::create_dir_all(&skills_path)?;
- if source.starts_with("https://") || source.starts_with("http://") {
+ if is_git_source(&source) {
// Git clone
let output = std::process::Command::new("git")
.args(["clone", "--depth", "1", &source])
@@ -963,6 +1007,45 @@ description = "Bare minimum"
));
}
+ #[test]
+ fn git_source_detection_accepts_remote_protocols_and_scp_style() {
+ let sources = [
+ "https://github.com/some-org/some-skill.git",
+ "http://github.com/some-org/some-skill.git",
+ "ssh://git@github.com/some-org/some-skill.git",
+ "git://github.com/some-org/some-skill.git",
+ "git@github.com:some-org/some-skill.git",
+ "git@localhost:skills/some-skill.git",
+ ];
+
+ for source in sources {
+ assert!(
+ is_git_source(source),
+ "expected git source detection for '{source}'"
+ );
+ }
+ }
+
+ #[test]
+ fn git_source_detection_rejects_local_paths_and_invalid_inputs() {
+ let sources = [
+ "./skills/local-skill",
+ "/tmp/skills/local-skill",
+ "C:\\skills\\local-skill",
+ "git@github.com",
+ "ssh://",
+ "not-a-url",
+ "dir/git@github.com:org/repo.git",
+ ];
+
+ for source in sources {
+ assert!(
+ !is_git_source(source),
+ "expected local/invalid source detection for '{source}'"
+ );
+ }
+ }
+
#[test]
fn skills_dir_path() {
let base = std::path::Path::new("/home/user/.zeroclaw");
From 178bb108dab9cfd5adba7bbb7959a4ec05074886 Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 12:27:00 +0800
Subject: [PATCH 005/116] fix(gemini): correct Gemini CLI OAuth cloudcode
payload/response handling (#1040)
* fix(gemini): align OAuth cloudcode payload and response parsing
* docs(gemini): document OAuth vs API key endpoint behavior
---
docs/providers-reference.md | 7 ++
src/providers/gemini.rs | 228 ++++++++++++++++++++++++++----------
2 files changed, 176 insertions(+), 59 deletions(-)
diff --git a/docs/providers-reference.md b/docs/providers-reference.md
index bc913824e..f9c772660 100644
--- a/docs/providers-reference.md
+++ b/docs/providers-reference.md
@@ -56,6 +56,13 @@ credential is not reused for fallback providers.
| `lmstudio` | `lm-studio` | Yes | (optional; local by default) |
| `nvidia` | `nvidia-nim`, `build.nvidia.com` | No | `NVIDIA_API_KEY` |
+### Gemini Notes
+
+- Provider ID: `gemini` (aliases: `google`, `google-gemini`)
+- Auth can come from `GEMINI_API_KEY`, `GOOGLE_API_KEY`, or Gemini CLI OAuth cache (`~/.gemini/oauth_creds.json`)
+- API key requests use `generativelanguage.googleapis.com/v1beta`
+- Gemini CLI OAuth requests use `cloudcode-pa.googleapis.com/v1internal` with Code Assist request envelope semantics
+
### Ollama Vision Notes
- Provider ID: `ollama`
diff --git a/src/providers/gemini.rs b/src/providers/gemini.rs
index c415f134a..b3b7110a9 100644
--- a/src/providers/gemini.rs
+++ b/src/providers/gemini.rs
@@ -58,10 +58,10 @@ impl GeminiAuth {
// API REQUEST/RESPONSE TYPES
// ══════════════════════════════════════════════════════════════════════════════
-#[derive(Debug, Serialize)]
+#[derive(Debug, Serialize, Clone)]
struct GenerateContentRequest {
contents: Vec,
- #[serde(skip_serializing_if = "Option::is_none")]
+ #[serde(rename = "systemInstruction", skip_serializing_if = "Option::is_none")]
system_instruction: Option,
#[serde(rename = "generationConfig")]
generation_config: GenerationConfig,
@@ -70,23 +70,33 @@ struct GenerateContentRequest {
/// Request envelope for the internal cloudcode-pa API.
/// OAuth tokens from Gemini CLI are scoped for this endpoint.
#[derive(Debug, Serialize)]
-struct InternalGenerateContentRequest {
+struct InternalGenerateContentEnvelope {
model: String,
- #[serde(rename = "generationConfig")]
- generation_config: GenerationConfig,
- contents: Vec,
#[serde(skip_serializing_if = "Option::is_none")]
- system_instruction: Option,
+ project: Option,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ user_prompt_id: Option,
+ request: InternalGenerateContentRequest,
}
+/// Nested request payload for cloudcode-pa's code assist APIs.
#[derive(Debug, Serialize)]
+struct InternalGenerateContentRequest {
+ contents: Vec,
+ #[serde(rename = "systemInstruction", skip_serializing_if = "Option::is_none")]
+ system_instruction: Option,
+ #[serde(rename = "generationConfig")]
+ generation_config: GenerationConfig,
+}
+
+#[derive(Debug, Serialize, Clone)]
struct Content {
#[serde(skip_serializing_if = "Option::is_none")]
role: Option,
parts: Vec,
}
-#[derive(Debug, Serialize)]
+#[derive(Debug, Serialize, Clone)]
struct Part {
text: String,
}
@@ -102,6 +112,8 @@ struct GenerationConfig {
struct GenerateContentResponse {
candidates: Option>,
error: Option,
+ #[serde(default)]
+ response: Option>,
}
#[derive(Debug, Deserialize)]
@@ -124,6 +136,19 @@ struct ApiError {
message: String,
}
+impl GenerateContentResponse {
+ /// cloudcode-pa wraps the actual response under `response`.
+ fn into_effective_response(self) -> Self {
+ match self {
+ Self {
+ response: Some(inner),
+ ..
+ } => *inner,
+ other => other,
+ }
+ }
+}
+
// ══════════════════════════════════════════════════════════════════════════════
// GEMINI CLI TOKEN STRUCTURES
// ══════════════════════════════════════════════════════════════════════════════
@@ -243,6 +268,10 @@ impl GeminiProvider {
}
}
+ fn format_internal_model_name(model: &str) -> String {
+ model.strip_prefix("models/").unwrap_or(model).to_string()
+ }
+
/// Build the API URL based on auth type.
///
/// - API key users → public `generativelanguage.googleapis.com/v1beta`
@@ -287,34 +316,16 @@ impl GeminiProvider {
let req = self.http_client().post(url).json(request);
match auth {
GeminiAuth::OAuthToken(token) => {
- // Internal API expects the model in the request body envelope
- let internal_request = InternalGenerateContentRequest {
- model: Self::format_model_name(model),
- generation_config: request.generation_config.clone(),
- contents: request
- .contents
- .iter()
- .map(|c| Content {
- role: c.role.clone(),
- parts: c
- .parts
- .iter()
- .map(|p| Part {
- text: p.text.clone(),
- })
- .collect(),
- })
- .collect(),
- system_instruction: request.system_instruction.as_ref().map(|si| Content {
- role: si.role.clone(),
- parts: si
- .parts
- .iter()
- .map(|p| Part {
- text: p.text.clone(),
- })
- .collect(),
- }),
+ // cloudcode-pa expects an outer envelope with `request`.
+ let internal_request = InternalGenerateContentEnvelope {
+ model: Self::format_internal_model_name(model),
+ project: None,
+ user_prompt_id: None,
+ request: InternalGenerateContentRequest {
+ contents: request.contents.clone(),
+ system_instruction: request.system_instruction.clone(),
+ generation_config: request.generation_config.clone(),
+ },
};
self.http_client()
.post(url)
@@ -367,7 +378,10 @@ impl GeminiProvider {
}
let result: GenerateContentResponse = response.json().await?;
-
+ if let Some(err) = &result.error {
+ anyhow::bail!("Gemini API error: {}", err.message);
+ }
+ let result = result.into_effective_response();
if let Some(err) = result.error {
anyhow::bail!("Gemini API error: {}", err.message);
}
@@ -460,6 +474,12 @@ impl Provider for GeminiProvider {
async fn warmup(&self) -> anyhow::Result<()> {
if let Some(auth) = self.auth.as_ref() {
+ // cloudcode-pa does not expose a lightweight model-list probe like the public API.
+ // Avoid false negatives for valid Gemini CLI OAuth credentials.
+ if auth.is_oauth() {
+ return Ok(());
+ }
+
let url = if auth.is_api_key() {
format!(
"https://generativelanguage.googleapis.com/v1beta/models?key={}",
@@ -469,12 +489,11 @@ impl Provider for GeminiProvider {
"https://generativelanguage.googleapis.com/v1beta/models".to_string()
};
- let mut request = self.http_client().get(&url);
- if let GeminiAuth::OAuthToken(token) = auth {
- request = request.bearer_auth(token);
- }
-
- request.send().await?.error_for_status()?;
+ self.http_client()
+ .get(&url)
+ .send()
+ .await?
+ .error_for_status()?;
}
Ok(())
}
@@ -559,6 +578,14 @@ mod tests {
GeminiProvider::format_model_name("models/gemini-1.5-pro"),
"models/gemini-1.5-pro"
);
+ assert_eq!(
+ GeminiProvider::format_internal_model_name("models/gemini-2.5-flash"),
+ "gemini-2.5-flash"
+ );
+ assert_eq!(
+ GeminiProvider::format_internal_model_name("gemini-2.5-flash"),
+ "gemini-2.5-flash"
+ );
}
#[test]
@@ -621,6 +648,44 @@ mod tests {
);
}
+ #[test]
+ fn oauth_request_wraps_payload_in_request_envelope() {
+ let provider = GeminiProvider {
+ auth: Some(GeminiAuth::OAuthToken("ya29.mock-token".into())),
+ };
+ let auth = GeminiAuth::OAuthToken("ya29.mock-token".into());
+ let url = GeminiProvider::build_generate_content_url("gemini-2.0-flash", &auth);
+ let body = GenerateContentRequest {
+ contents: vec![Content {
+ role: Some("user".into()),
+ parts: vec![Part {
+ text: "hello".into(),
+ }],
+ }],
+ system_instruction: None,
+ generation_config: GenerationConfig {
+ temperature: 0.7,
+ max_output_tokens: 8192,
+ },
+ };
+
+ let request = provider
+ .build_generate_content_request(&auth, &url, &body, "models/gemini-2.0-flash")
+ .build()
+ .unwrap();
+
+ let payload = request
+ .body()
+ .and_then(|b| b.as_bytes())
+ .expect("json request body should be bytes");
+ let json: serde_json::Value = serde_json::from_slice(payload).unwrap();
+
+ assert_eq!(json["model"], "gemini-2.0-flash");
+ assert!(json.get("generationConfig").is_none());
+ assert!(json.get("request").is_some());
+ assert!(json["request"].get("generationConfig").is_some());
+ }
+
#[test]
fn api_key_request_does_not_set_bearer_header() {
let provider = GeminiProvider {
@@ -674,31 +739,38 @@ mod tests {
let json = serde_json::to_string(&request).unwrap();
assert!(json.contains("\"role\":\"user\""));
assert!(json.contains("\"text\":\"Hello\""));
+ assert!(json.contains("\"systemInstruction\""));
+ assert!(!json.contains("\"system_instruction\""));
assert!(json.contains("\"temperature\":0.7"));
assert!(json.contains("\"maxOutputTokens\":8192"));
}
#[test]
fn internal_request_includes_model() {
- let request = InternalGenerateContentRequest {
- model: "models/gemini-3-pro-preview".to_string(),
- generation_config: GenerationConfig {
- temperature: 0.7,
- max_output_tokens: 8192,
- },
- contents: vec![Content {
- role: Some("user".to_string()),
- parts: vec![Part {
- text: "Hello".to_string(),
+ let request = InternalGenerateContentEnvelope {
+ model: "gemini-test-model".to_string(),
+ project: None,
+ user_prompt_id: None,
+ request: InternalGenerateContentRequest {
+ contents: vec![Content {
+ role: Some("user".to_string()),
+ parts: vec![Part {
+ text: "Hello".to_string(),
+ }],
}],
- }],
- system_instruction: None,
+ system_instruction: None,
+ generation_config: GenerationConfig {
+ temperature: 0.7,
+ max_output_tokens: 8192,
+ },
+ },
};
- let json = serde_json::to_string(&request).unwrap();
- assert!(json.contains("\"model\":\"models/gemini-3-pro-preview\""));
- assert!(json.contains("\"role\":\"user\""));
- assert!(json.contains("\"temperature\":0.7"));
+ let json: serde_json::Value = serde_json::to_value(&request).unwrap();
+ assert_eq!(json["model"], "gemini-test-model");
+ assert!(json.get("generationConfig").is_none());
+ assert!(json["request"].get("generationConfig").is_some());
+ assert_eq!(json["request"]["contents"][0]["role"], "user");
}
#[test]
@@ -741,10 +813,48 @@ mod tests {
assert_eq!(response.error.unwrap().message, "Invalid API key");
}
+ #[test]
+ fn internal_response_deserialization() {
+ let json = r#"{
+ "response": {
+ "candidates": [{
+ "content": {
+ "parts": [{"text": "Hello from internal"}]
+ }
+ }]
+ }
+ }"#;
+
+ let response: GenerateContentResponse = serde_json::from_str(json).unwrap();
+ let text = response
+ .into_effective_response()
+ .candidates
+ .unwrap()
+ .into_iter()
+ .next()
+ .unwrap()
+ .content
+ .parts
+ .into_iter()
+ .next()
+ .unwrap()
+ .text;
+ assert_eq!(text, Some("Hello from internal".to_string()));
+ }
+
#[tokio::test]
async fn warmup_without_key_is_noop() {
let provider = GeminiProvider { auth: None };
let result = provider.warmup().await;
assert!(result.is_ok());
}
+
+ #[tokio::test]
+ async fn warmup_oauth_is_noop() {
+ let provider = GeminiProvider {
+ auth: Some(GeminiAuth::OAuthToken("ya29.mock-token".into())),
+ };
+ let result = provider.warmup().await;
+ assert!(result.is_ok());
+ }
}
From f274fd575791af756ef0be60b7f8c71d1dd2097c Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 12:28:05 +0800
Subject: [PATCH 006/116] fix(channel): prevent false timeout during multi-turn
tool loops (#1037)
---
docs/config-reference.md | 4 +++-
src/channels/mod.rs | 38 ++++++++++++++++++++++++++++++++++++--
src/config/schema.rs | 9 ++++++---
3 files changed, 45 insertions(+), 6 deletions(-)
diff --git a/docs/config-reference.md b/docs/config-reference.md
index 0d0da0273..4a182f559 100644
--- a/docs/config-reference.md
+++ b/docs/config-reference.md
@@ -332,7 +332,7 @@ Top-level channel options are configured under `channels_config`.
| Key | Default | Purpose |
|---|---|---|
-| `message_timeout_secs` | `300` | Timeout in seconds for processing a single channel message (LLM + tools) |
+| `message_timeout_secs` | `300` | Base timeout in seconds for channel message processing; runtime scales this with tool-loop depth (up to 4x) |
Examples:
@@ -344,6 +344,8 @@ Examples:
Notes:
- Default `300s` is optimized for on-device LLMs (Ollama) which are slower than cloud APIs.
+- Runtime timeout budget is `message_timeout_secs * scale`, where `scale = min(max_tool_iterations, 4)` and a minimum of `1`.
+- This scaling avoids false timeouts when the first LLM turn is slow/retried but later tool-loop turns still need to complete.
- If using cloud APIs (OpenAI, Anthropic, etc.), you can reduce this to `60` or lower.
- Values below `30` are clamped to `30` to avoid immediate timeout churn.
- When a timeout occurs, users receive: `⚠️ Request timed out while waiting for the model. Please try again.`
diff --git a/src/channels/mod.rs b/src/channels/mod.rs
index 0379bea25..b9ec12179 100644
--- a/src/channels/mod.rs
+++ b/src/channels/mod.rs
@@ -95,6 +95,8 @@ const MIN_CHANNEL_MESSAGE_TIMEOUT_SECS: u64 = 30;
/// Default timeout for processing a single channel message (LLM + tools).
/// Used as fallback when not configured in channels_config.message_timeout_secs.
const CHANNEL_MESSAGE_TIMEOUT_SECS: u64 = 300;
+/// Cap timeout scaling so large max_tool_iterations values do not create unbounded waits.
+const CHANNEL_MESSAGE_TIMEOUT_SCALE_CAP: u64 = 4;
const CHANNEL_PARALLELISM_PER_CHANNEL: usize = 4;
const CHANNEL_MIN_IN_FLIGHT_MESSAGES: usize = 8;
const CHANNEL_MAX_IN_FLIGHT_MESSAGES: usize = 64;
@@ -114,6 +116,15 @@ fn effective_channel_message_timeout_secs(configured: u64) -> u64 {
configured.max(MIN_CHANNEL_MESSAGE_TIMEOUT_SECS)
}
+fn channel_message_timeout_budget_secs(
+ message_timeout_secs: u64,
+ max_tool_iterations: usize,
+) -> u64 {
+ let iterations = max_tool_iterations.max(1) as u64;
+ let scale = iterations.min(CHANNEL_MESSAGE_TIMEOUT_SCALE_CAP);
+ message_timeout_secs.saturating_mul(scale)
+}
+
#[derive(Debug, Clone, PartialEq, Eq)]
struct ChannelRouteSelection {
provider: String,
@@ -1223,10 +1234,12 @@ async fn process_channel_message(
Cancelled,
}
+ let timeout_budget_secs =
+ channel_message_timeout_budget_secs(ctx.message_timeout_secs, ctx.max_tool_iterations);
let llm_result = tokio::select! {
() = cancellation_token.cancelled() => LlmExecutionResult::Cancelled,
result = tokio::time::timeout(
- Duration::from_secs(ctx.message_timeout_secs),
+ Duration::from_secs(timeout_budget_secs),
run_tool_call_loop(
active_provider.as_ref(),
&mut history,
@@ -1385,7 +1398,10 @@ async fn process_channel_message(
}
}
LlmExecutionResult::Completed(Err(_)) => {
- let timeout_msg = format!("LLM response timed out after {}s", ctx.message_timeout_secs);
+ let timeout_msg = format!(
+ "LLM response timed out after {}s (base={}s, max_tool_iterations={})",
+ timeout_budget_secs, ctx.message_timeout_secs, ctx.max_tool_iterations
+ );
eprintln!(
" ❌ {} (elapsed: {}ms)",
timeout_msg,
@@ -2641,6 +2657,24 @@ mod tests {
assert_eq!(effective_channel_message_timeout_secs(300), 300);
}
+ #[test]
+ fn channel_message_timeout_budget_scales_with_tool_iterations() {
+ assert_eq!(channel_message_timeout_budget_secs(300, 1), 300);
+ assert_eq!(channel_message_timeout_budget_secs(300, 2), 600);
+ assert_eq!(channel_message_timeout_budget_secs(300, 3), 900);
+ }
+
+ #[test]
+ fn channel_message_timeout_budget_uses_safe_defaults_and_cap() {
+ // 0 iterations falls back to 1x timeout budget.
+ assert_eq!(channel_message_timeout_budget_secs(300, 0), 300);
+ // Large iteration counts are capped to avoid runaway waits.
+ assert_eq!(
+ channel_message_timeout_budget_secs(300, 10),
+ 300 * CHANNEL_MESSAGE_TIMEOUT_SCALE_CAP
+ );
+ }
+
#[test]
fn context_window_overflow_error_detector_matches_known_messages() {
let overflow_err = anyhow::anyhow!(
diff --git a/src/config/schema.rs b/src/config/schema.rs
index a31c8119f..7167ffb8d 100644
--- a/src/config/schema.rs
+++ b/src/config/schema.rs
@@ -7,9 +7,9 @@ use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::{OnceLock, RwLock};
-use tokio::fs::{self, OpenOptions};
#[cfg(unix)]
use tokio::fs::File;
+use tokio::fs::{self, OpenOptions};
use tokio::io::AsyncWriteExt;
const SUPPORTED_PROXY_SERVICE_KEYS: &[&str] = &[
@@ -2197,7 +2197,10 @@ pub struct ChannelsConfig {
pub dingtalk: Option,
/// QQ Official Bot channel configuration.
pub qq: Option,
- /// Timeout in seconds for processing a single channel message (LLM + tools).
+ /// Base timeout in seconds for processing a single channel message (LLM + tools).
+ /// Runtime uses this as a per-turn budget that scales with tool-loop depth
+ /// (up to 4x, capped) so one slow/retried model call does not consume the
+ /// entire conversation budget.
/// Default: 300s for on-device LLMs (Ollama) which are slower than cloud APIs.
#[serde(default = "default_channel_message_timeout_secs")]
pub message_timeout_secs: u64,
@@ -3544,9 +3547,9 @@ async fn sync_directory(_path: &Path) -> Result<()> {
#[cfg(test)]
mod tests {
use super::*;
+ use std::path::PathBuf;
#[cfg(unix)]
use std::{fs::Permissions, os::unix::fs::PermissionsExt};
- use std::path::PathBuf;
use tokio::sync::{Mutex, MutexGuard};
use tokio::test;
use tokio_stream::wrappers::ReadDirStream;
From 8cafeb02e89a0903f6223bc79c6a1b869de4ac6f Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 12:29:09 +0800
Subject: [PATCH 007/116] fix(composio): request latest v3 tool versions by
default (#1039)
---
docs/config-reference.md | 1 +
src/tools/composio.rs | 59 ++++++++++++++++++++++++++++++++++++----
2 files changed, 54 insertions(+), 6 deletions(-)
diff --git a/docs/config-reference.md b/docs/config-reference.md
index 4a182f559..2bc0a9351 100644
--- a/docs/config-reference.md
+++ b/docs/config-reference.md
@@ -104,6 +104,7 @@ Notes:
- Backward compatibility: legacy `enable = true` is accepted as an alias for `enabled = true`.
- If `enabled = false` or `api_key` is missing, the `composio` tool is not registered.
+- ZeroClaw requests Composio v3 tools with `toolkit_versions=latest` and executes tools with `version="latest"` to avoid stale default tool revisions.
- Typical flow: call `connect`, complete browser OAuth, then run `execute` for the desired tool action.
- If Composio returns a missing connected-account reference error, call `list_accounts` (optionally with `app`) and pass the returned `connected_account_id` to `execute`.
diff --git a/src/tools/composio.rs b/src/tools/composio.rs
index a5d0f5ad7..c191ac17a 100644
--- a/src/tools/composio.rs
+++ b/src/tools/composio.rs
@@ -20,6 +20,7 @@ use std::sync::Arc;
const COMPOSIO_API_BASE_V2: &str = "https://backend.composio.dev/api/v2";
const COMPOSIO_API_BASE_V3: &str = "https://backend.composio.dev/api/v3";
+const COMPOSIO_TOOL_VERSION_LATEST: &str = "latest";
fn ensure_https(url: &str) -> anyhow::Result<()> {
if !url.starts_with("https://") {
@@ -79,12 +80,11 @@ impl ComposioTool {
async fn list_actions_v3(&self, app_name: Option<&str>) -> anyhow::Result> {
let url = format!("{COMPOSIO_API_BASE_V3}/tools");
- let mut req = self.client().get(&url).header("x-api-key", &self.api_key);
-
- req = req.query(&[("limit", "200")]);
- if let Some(app) = app_name.map(str::trim).filter(|app| !app.is_empty()) {
- req = req.query(&[("toolkits", app), ("toolkit_slug", app)]);
- }
+ let req = self
+ .client()
+ .get(&url)
+ .header("x-api-key", &self.api_key)
+ .query(&Self::build_list_actions_v3_query(app_name));
let resp = req.send().await?;
if !resp.status().is_success() {
@@ -280,6 +280,23 @@ impl ComposioTool {
}
}
+ fn build_list_actions_v3_query(app_name: Option<&str>) -> Vec<(String, String)> {
+ let mut query = vec![
+ ("limit".to_string(), "200".to_string()),
+ (
+ "toolkit_versions".to_string(),
+ COMPOSIO_TOOL_VERSION_LATEST.to_string(),
+ ),
+ ];
+
+ if let Some(app) = app_name.map(str::trim).filter(|app| !app.is_empty()) {
+ query.push(("toolkits".to_string(), app.to_string()));
+ query.push(("toolkit_slug".to_string(), app.to_string()));
+ }
+
+ query
+ }
+
fn build_execute_action_v3_request(
tool_slug: &str,
params: serde_json::Value,
@@ -294,6 +311,7 @@ impl ComposioTool {
let mut body = json!({
"arguments": params,
+ "version": COMPOSIO_TOOL_VERSION_LATEST,
});
if let Some(entity) = entity_id {
@@ -1517,10 +1535,38 @@ mod tests {
"https://backend.composio.dev/api/v3/tools/execute/gmail-send-email"
);
assert_eq!(body["arguments"]["to"], json!("test@example.com"));
+ assert_eq!(body["version"], json!(COMPOSIO_TOOL_VERSION_LATEST));
assert_eq!(body["user_id"], json!("workspace-user"));
assert_eq!(body["connected_account_id"], json!("account-42"));
}
+ #[test]
+ fn build_list_actions_v3_query_requests_latest_versions() {
+ let query = ComposioTool::build_list_actions_v3_query(None)
+ .into_iter()
+ .collect::>();
+ assert_eq!(
+ query.get("toolkit_versions"),
+ Some(&COMPOSIO_TOOL_VERSION_LATEST.to_string())
+ );
+ assert_eq!(query.get("limit"), Some(&"200".to_string()));
+ assert!(!query.contains_key("toolkits"));
+ assert!(!query.contains_key("toolkit_slug"));
+ }
+
+ #[test]
+ fn build_list_actions_v3_query_adds_app_filters_when_present() {
+ let query = ComposioTool::build_list_actions_v3_query(Some(" github "))
+ .into_iter()
+ .collect::>();
+ assert_eq!(
+ query.get("toolkit_versions"),
+ Some(&COMPOSIO_TOOL_VERSION_LATEST.to_string())
+ );
+ assert_eq!(query.get("toolkits"), Some(&"github".to_string()));
+ assert_eq!(query.get("toolkit_slug"), Some(&"github".to_string()));
+ }
+
// ── resolve_connected_account_ref (multi-account fix) ────
#[test]
@@ -1639,6 +1685,7 @@ mod tests {
"https://backend.composio.dev/api/v3/tools/execute/github-list-repos"
);
assert_eq!(body["arguments"], json!({}));
+ assert_eq!(body["version"], json!(COMPOSIO_TOOL_VERSION_LATEST));
assert!(body.get("connected_account_id").is_none());
assert!(body.get("user_id").is_none());
}
From 8c826e581cbb3c92856dc4b0839a613519e82573 Mon Sep 17 00:00:00 2001
From: xj
Date: Wed, 18 Feb 2026 03:38:48 -0800
Subject: [PATCH 008/116] fix(channel): store raw user message and skip memory
recall with history
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Two fixes for conversation history quality:
1. Store raw msg.content in ConversationHistoryMap instead of
enriched_message — memory context is ephemeral per-request and
pollutes future turns when persisted.
2. Skip memory recall when conversation history exists — prior turns
already provide context. Memory recall adds noise and can mislead
the model (e.g. old 'seen' entries overshadowing a code variable
named seen in the current conversation).
---
src/channels/mod.rs | 32 +++++++++++++++++---------------
1 file changed, 17 insertions(+), 15 deletions(-)
diff --git a/src/channels/mod.rs b/src/channels/mod.rs
index b9ec12179..c5c989eab 100644
--- a/src/channels/mod.rs
+++ b/src/channels/mod.rs
@@ -1111,10 +1111,6 @@ async fn process_channel_message(
return;
}
};
-
- let memory_context =
- build_memory_context(ctx.memory.as_ref(), &msg.content, ctx.min_relevance_score).await;
-
if ctx.auto_save_memory && msg.content.chars().count() >= AUTOSAVE_MIN_MESSAGE_CHARS {
let autosave_key = conversation_memory_key(&msg);
let _ = ctx
@@ -1128,15 +1124,16 @@ async fn process_channel_message(
.await;
}
- let enriched_message = if memory_context.is_empty() {
- msg.content.clone()
- } else {
- format!("{memory_context}{}", msg.content)
- };
-
println!(" ⏳ Processing message...");
let started_at = Instant::now();
+ let had_prior_history = ctx
+ .conversation_histories
+ .lock()
+ .unwrap_or_else(|e| e.into_inner())
+ .get(&history_key)
+ .is_some_and(|turns| !turns.is_empty());
+
// Preserve user turn before the LLM call so interrupted requests keep context.
append_sender_turn(ctx.as_ref(), &history_key, ChatMessage::user(&msg.content));
@@ -1149,11 +1146,16 @@ async fn process_channel_message(
.cloned()
.unwrap_or_default();
let mut prior_turns = normalize_cached_channel_turns(prior_turns_raw);
- // Keep persisted history clean (raw user text), but inject memory context
- // for the current provider call by enriching the newest user turn only.
- if let Some(last_turn) = prior_turns.last_mut() {
- if last_turn.role == "user" {
- last_turn.content = enriched_message.clone();
+
+ // Only enrich with memory context when there is no prior conversation
+ // history. Follow-up turns already include context from previous messages.
+ if !had_prior_history {
+ let memory_context =
+ build_memory_context(ctx.memory.as_ref(), &msg.content, ctx.min_relevance_score).await;
+ if let Some(last_turn) = prior_turns.last_mut() {
+ if last_turn.role == "user" && !memory_context.is_empty() {
+ last_turn.content = format!("{memory_context}{}", msg.content);
+ }
}
}
From 2d6205ee580bb968ae876a20905ac81357d8b181 Mon Sep 17 00:00:00 2001
From: xj
Date: Wed, 18 Feb 2026 05:42:14 -0800
Subject: [PATCH 009/116] fix(channel): use native tool calling to preserve
conversation context
AnthropicProvider declared supports_native_tools() = true but did not
override chat_with_tools(). The default trait implementation drops all
conversation history (sends only system + last user message), breaking
multi-turn conversations on Telegram and other channels.
Changes:
- Override chat_with_tools() in AnthropicProvider: converts OpenAI-format
tool JSON to ToolSpec and delegates to chat() which preserves full
message history
- Skip build_tool_instructions() XML protocol when provider supports
native tools (saves ~12k chars in system prompt)
- Remove duplicate Tool Use Protocol section from build_system_prompt()
for native-tool providers
- Update Your Task section to encourage conversational follow-ups
instead of XML tool_call tags when using native tools
- Add tracing::warn for malformed tool definitions in chat_with_tools
---
src/agent/loop_.rs | 18 +++-
src/channels/mod.rs | 49 +++++++--
src/providers/anthropic.rs | 210 +++++++++++++++++++++++++++++++++++++
3 files changed, 264 insertions(+), 13 deletions(-)
diff --git a/src/agent/loop_.rs b/src/agent/loop_.rs
index e191affa5..0deee673f 100644
--- a/src/agent/loop_.rs
+++ b/src/agent/loop_.rs
@@ -1458,17 +1458,21 @@ pub async fn run(
} else {
None
};
- let mut system_prompt = crate::channels::build_system_prompt(
+ let native_tools = provider.supports_native_tools();
+ let mut system_prompt = crate::channels::build_system_prompt_with_mode(
&config.workspace_dir,
model_name,
&tool_descs,
&skills,
Some(&config.identity),
bootstrap_max_chars,
+ native_tools,
);
- // Append structured tool-use instructions with schemas
- system_prompt.push_str(&build_tool_instructions(&tools_registry));
+ // Append structured tool-use instructions with schemas (only for non-native providers)
+ if !native_tools {
+ system_prompt.push_str(&build_tool_instructions(&tools_registry));
+ }
// ── Approval manager (supervised mode) ───────────────────────
let approval_manager = ApprovalManager::from_config(&config.autonomy);
@@ -1823,15 +1827,19 @@ pub async fn process_message(config: Config, message: &str) -> Result {
} else {
None
};
- let mut system_prompt = crate::channels::build_system_prompt(
+ let native_tools = provider.supports_native_tools();
+ let mut system_prompt = crate::channels::build_system_prompt_with_mode(
&config.workspace_dir,
&model_name,
&tool_descs,
&skills,
Some(&config.identity),
bootstrap_max_chars,
+ native_tools,
);
- system_prompt.push_str(&build_tool_instructions(&tools_registry));
+ if !native_tools {
+ system_prompt.push_str(&build_tool_instructions(&tools_registry));
+ }
let mem_context = build_context(mem.as_ref(), message, config.memory.min_relevance_score).await;
let rag_limit = if config.agent.compact_context { 2 } else { 5 };
diff --git a/src/channels/mod.rs b/src/channels/mod.rs
index c5c989eab..9eb1e1ec3 100644
--- a/src/channels/mod.rs
+++ b/src/channels/mod.rs
@@ -1558,6 +1558,26 @@ pub fn build_system_prompt(
skills: &[crate::skills::Skill],
identity_config: Option<&crate::config::IdentityConfig>,
bootstrap_max_chars: Option,
+) -> String {
+ build_system_prompt_with_mode(
+ workspace_dir,
+ model_name,
+ tools,
+ skills,
+ identity_config,
+ bootstrap_max_chars,
+ false,
+ )
+}
+
+pub fn build_system_prompt_with_mode(
+ workspace_dir: &std::path::Path,
+ model_name: &str,
+ tools: &[(&str, &str)],
+ skills: &[crate::skills::Skill],
+ identity_config: Option<&crate::config::IdentityConfig>,
+ bootstrap_max_chars: Option,
+ native_tools: bool,
) -> String {
use std::fmt::Write;
let mut prompt = String::with_capacity(8192);
@@ -1594,12 +1614,21 @@ pub fn build_system_prompt(
}
// ── 1c. Action instruction (avoid meta-summary) ───────────────
- prompt.push_str(
- "## Your Task\n\n\
- When the user sends a message, ACT on it. Use the tools to fulfill their request.\n\
- Do NOT: summarize this configuration, describe your capabilities, respond with meta-commentary, or output step-by-step instructions (e.g. \"1. First... 2. Next...\").\n\
- Instead: emit actual tags when you need to act. Just do what they ask.\n\n",
- );
+ if native_tools {
+ prompt.push_str(
+ "## Your Task\n\n\
+ When the user sends a message, respond naturally. Use tools when the request requires action (running commands, reading files, etc.).\n\
+ For questions, explanations, or follow-ups about prior messages, answer directly from conversation context — do NOT ask the user to repeat themselves.\n\
+ Do NOT: summarize this configuration, describe your capabilities, or output step-by-step meta-commentary.\n\n",
+ );
+ } else {
+ prompt.push_str(
+ "## Your Task\n\n\
+ When the user sends a message, ACT on it. Use the tools to fulfill their request.\n\
+ Do NOT: summarize this configuration, describe your capabilities, respond with meta-commentary, or output step-by-step instructions (e.g. \"1. First... 2. Next...\").\n\
+ Instead: emit actual tags when you need to act. Just do what they ask.\n\n",
+ );
+ }
// ── 2. Safety ───────────────────────────────────────────────
prompt.push_str("## Safety\n\n");
@@ -2318,15 +2347,19 @@ pub async fn start_channels(config: Config) -> Result<()> {
} else {
None
};
- let mut system_prompt = build_system_prompt(
+ let native_tools = provider.supports_native_tools();
+ let mut system_prompt = build_system_prompt_with_mode(
&workspace,
&model,
&tool_descs,
&skills,
Some(&config.identity),
bootstrap_max_chars,
+ native_tools,
);
- system_prompt.push_str(&build_tool_instructions(tools_registry.as_ref()));
+ if !native_tools {
+ system_prompt.push_str(&build_tool_instructions(tools_registry.as_ref()));
+ }
if !skills.is_empty() {
println!(
diff --git a/src/providers/anthropic.rs b/src/providers/anthropic.rs
index 722ba0be5..31798fb19 100644
--- a/src/providers/anthropic.rs
+++ b/src/providers/anthropic.rs
@@ -497,6 +497,53 @@ impl Provider for AnthropicProvider {
true
}
+ async fn chat_with_tools(
+ &self,
+ messages: &[ChatMessage],
+ tools: &[serde_json::Value],
+ model: &str,
+ temperature: f64,
+ ) -> anyhow::Result {
+ // Convert OpenAI-format tool JSON to ToolSpec so we can reuse the
+ // existing `chat()` method which handles full message history,
+ // system prompt extraction, caching, and Anthropic native formatting.
+ let tool_specs: Vec = tools
+ .iter()
+ .filter_map(|t| {
+ let func = t.get("function").or_else(|| {
+ tracing::warn!("Skipping malformed tool definition (missing 'function' key)");
+ None
+ })?;
+ let name = func.get("name").and_then(|n| n.as_str()).or_else(|| {
+ tracing::warn!("Skipping tool with missing or non-string 'name'");
+ None
+ })?;
+ Some(ToolSpec {
+ name: name.to_string(),
+ description: func
+ .get("description")
+ .and_then(|d| d.as_str())
+ .unwrap_or("")
+ .to_string(),
+ parameters: func
+ .get("parameters")
+ .cloned()
+ .unwrap_or(serde_json::json!({"type": "object"})),
+ })
+ })
+ .collect();
+
+ let request = ProviderChatRequest {
+ messages,
+ tools: if tool_specs.is_empty() {
+ None
+ } else {
+ Some(&tool_specs)
+ },
+ };
+ self.chat(request, model, temperature).await
+ }
+
async fn warmup(&self) -> anyhow::Result<()> {
if let Some(credential) = self.credential.as_ref() {
let mut request = self
@@ -1105,4 +1152,167 @@ mod tests {
let result = provider.warmup().await;
assert!(result.is_ok());
}
+
+ #[test]
+ fn convert_messages_preserves_multi_turn_history() {
+ let messages = vec![
+ ChatMessage {
+ role: "system".to_string(),
+ content: "You are helpful.".to_string(),
+ },
+ ChatMessage {
+ role: "user".to_string(),
+ content: "gen a 2 sum in golang".to_string(),
+ },
+ ChatMessage {
+ role: "assistant".to_string(),
+ content: "```go\nfunc twoSum(nums []int) {}\n```".to_string(),
+ },
+ ChatMessage {
+ role: "user".to_string(),
+ content: "what's meaning of make here?".to_string(),
+ },
+ ];
+
+ let (system, native_msgs) = AnthropicProvider::convert_messages(&messages);
+
+ // System prompt extracted
+ assert!(system.is_some());
+ // All 3 non-system messages preserved in order
+ assert_eq!(native_msgs.len(), 3);
+ assert_eq!(native_msgs[0].role, "user");
+ assert_eq!(native_msgs[1].role, "assistant");
+ assert_eq!(native_msgs[2].role, "user");
+ }
+
+ /// Integration test: spin up a mock Anthropic API server, call chat_with_tools
+ /// with a multi-turn conversation + tools, and verify the request body contains
+ /// ALL conversation turns and native tool definitions.
+ #[tokio::test]
+ async fn chat_with_tools_sends_full_history_and_native_tools() {
+ use axum::{routing::post, Json, Router};
+ use std::sync::{Arc, Mutex};
+ use tokio::net::TcpListener;
+
+ // Captured request body for assertion
+ let captured: Arc>> = Arc::new(Mutex::new(None));
+ let captured_clone = captured.clone();
+
+ let app = Router::new().route(
+ "/v1/messages",
+ post(move |Json(body): Json| {
+ let cap = captured_clone.clone();
+ async move {
+ *cap.lock().unwrap() = Some(body);
+ // Return a minimal valid Anthropic response
+ Json(serde_json::json!({
+ "id": "msg_test",
+ "type": "message",
+ "role": "assistant",
+ "content": [{"type": "text", "text": "The make function creates a map."}],
+ "model": "claude-opus-4-6",
+ "stop_reason": "end_turn",
+ "usage": {"input_tokens": 100, "output_tokens": 20}
+ }))
+ }
+ }),
+ );
+
+ let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
+ let addr = listener.local_addr().unwrap();
+ let server_handle = tokio::spawn(async move {
+ axum::serve(listener, app).await.unwrap();
+ });
+
+ // Create provider pointing at mock server
+ let provider = AnthropicProvider {
+ credential: Some("test-key".to_string()),
+ base_url: format!("http://{addr}"),
+ };
+
+ // Multi-turn conversation: system → user (Go code) → assistant (code response) → user (follow-up)
+ let messages = vec![
+ ChatMessage::system("You are a helpful assistant."),
+ ChatMessage::user("gen a 2 sum in golang"),
+ ChatMessage::assistant("```go\nfunc twoSum(nums []int, target int) []int {\n m := make(map[int]int)\n for i, n := range nums {\n if j, ok := m[target-n]; ok {\n return []int{j, i}\n }\n m[n] = i\n }\n return nil\n}\n```"),
+ ChatMessage::user("what's meaning of make here?"),
+ ];
+
+ let tools = vec![serde_json::json!({
+ "type": "function",
+ "function": {
+ "name": "shell",
+ "description": "Run a shell command",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "command": {"type": "string"}
+ },
+ "required": ["command"]
+ }
+ }
+ })];
+
+ let result = provider
+ .chat_with_tools(&messages, &tools, "claude-opus-4-6", 0.7)
+ .await;
+ assert!(result.is_ok(), "chat_with_tools failed: {:?}", result.err());
+
+ let body = captured
+ .lock()
+ .unwrap()
+ .take()
+ .expect("No request captured");
+
+ // Verify system prompt extracted to top-level field
+ let system = &body["system"];
+ assert!(
+ system.to_string().contains("helpful assistant"),
+ "System prompt missing: {system}"
+ );
+
+ // Verify ALL conversation turns present in messages array
+ let msgs = body["messages"].as_array().expect("messages not an array");
+ assert_eq!(
+ msgs.len(),
+ 3,
+ "Expected 3 messages (2 user + 1 assistant), got {}",
+ msgs.len()
+ );
+
+ // Turn 1: user with Go request
+ assert_eq!(msgs[0]["role"], "user");
+ let turn1_text = msgs[0]["content"].to_string();
+ assert!(
+ turn1_text.contains("2 sum"),
+ "Turn 1 missing Go request: {turn1_text}"
+ );
+
+ // Turn 2: assistant with Go code
+ assert_eq!(msgs[1]["role"], "assistant");
+ let turn2_text = msgs[1]["content"].to_string();
+ assert!(
+ turn2_text.contains("make(map[int]int)"),
+ "Turn 2 missing Go code: {turn2_text}"
+ );
+
+ // Turn 3: user follow-up
+ assert_eq!(msgs[2]["role"], "user");
+ let turn3_text = msgs[2]["content"].to_string();
+ assert!(
+ turn3_text.contains("meaning of make"),
+ "Turn 3 missing follow-up: {turn3_text}"
+ );
+
+ // Verify native tools are present
+ let api_tools = body["tools"].as_array().expect("tools not an array");
+ assert_eq!(api_tools.len(), 1);
+ assert_eq!(api_tools[0]["name"], "shell");
+ assert!(
+ api_tools[0]["input_schema"].is_object(),
+ "Missing input_schema"
+ );
+
+ server_handle.abort();
+ }
}
From e7ccb573fa8f704e64cda376fb6f76942e918009 Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 15:35:54 +0800
Subject: [PATCH 010/116] fix(observability): prevent otel reactor panic in
non-tokio contexts
---
Cargo.toml | 7 +++++--
docs/config-reference.md | 22 +++++++++++++++++++++
tests/otel_dependency_feature_regression.rs | 17 ++++++++++++++++
3 files changed, 44 insertions(+), 2 deletions(-)
create mode 100644 tests/otel_dependency_feature_regression.rs
diff --git a/Cargo.toml b/Cargo.toml
index de45d4d38..10cfdf6f6 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -125,10 +125,13 @@ tower = { version = "0.5", default-features = false }
tower-http = { version = "0.6", default-features = false, features = ["limit", "timeout"] }
http-body-util = "0.1"
-# OpenTelemetry — OTLP trace + metrics export
+# OpenTelemetry — OTLP trace + metrics export.
+# Use the blocking HTTP exporter client to avoid Tokio-reactor panics in
+# OpenTelemetry background batch threads when ZeroClaw emits spans/metrics from
+# non-Tokio contexts.
opentelemetry = { version = "0.31", default-features = false, features = ["trace", "metrics"] }
opentelemetry_sdk = { version = "0.31", default-features = false, features = ["trace", "metrics"] }
-opentelemetry-otlp = { version = "0.31", default-features = false, features = ["trace", "metrics", "http-proto", "reqwest-client", "reqwest-rustls-webpki-roots"] }
+opentelemetry-otlp = { version = "0.31", default-features = false, features = ["trace", "metrics", "http-proto", "reqwest-blocking-client", "reqwest-rustls-webpki-roots"] }
# Serial port for peripheral communication (STM32, etc.)
tokio-serial = { version = "5", default-features = false, optional = true }
diff --git a/docs/config-reference.md b/docs/config-reference.md
index 2bc0a9351..f19cb27d9 100644
--- a/docs/config-reference.md
+++ b/docs/config-reference.md
@@ -26,6 +26,28 @@ Schema export command:
| `default_model` | `anthropic/claude-sonnet-4-6` | model routed through selected provider |
| `default_temperature` | `0.7` | model temperature |
+## `[observability]`
+
+| Key | Default | Purpose |
+|---|---|---|
+| `backend` | `none` | Observability backend: `none`, `noop`, `log`, `prometheus`, `otel`, `opentelemetry`, or `otlp` |
+| `otel_endpoint` | `http://localhost:4318` | OTLP HTTP endpoint used when backend is `otel` |
+| `otel_service_name` | `zeroclaw` | Service name emitted to OTLP collector |
+
+Notes:
+
+- `backend = "otel"` uses OTLP HTTP export with a blocking exporter client so spans and metrics can be emitted safely from non-Tokio contexts.
+- Alias values `opentelemetry` and `otlp` map to the same OTel backend.
+
+Example:
+
+```toml
+[observability]
+backend = "otel"
+otel_endpoint = "http://localhost:4318"
+otel_service_name = "zeroclaw"
+```
+
## Environment Provider Overrides
Provider selection can also be controlled by environment variables. Precedence is:
diff --git a/tests/otel_dependency_feature_regression.rs b/tests/otel_dependency_feature_regression.rs
new file mode 100644
index 000000000..0620b75d1
--- /dev/null
+++ b/tests/otel_dependency_feature_regression.rs
@@ -0,0 +1,17 @@
+#[test]
+fn opentelemetry_otlp_uses_blocking_reqwest_client() {
+ let manifest = include_str!("../Cargo.toml");
+ let otlp_line = manifest
+ .lines()
+ .find(|line| line.trim_start().starts_with("opentelemetry-otlp ="))
+ .expect("Cargo.toml must define opentelemetry-otlp dependency");
+
+ assert!(
+ otlp_line.contains("\"reqwest-blocking-client\""),
+ "opentelemetry-otlp must include reqwest-blocking-client to avoid Tokio reactor panics"
+ );
+ assert!(
+ !otlp_line.contains("\"reqwest-client\""),
+ "opentelemetry-otlp must not include async reqwest-client in this runtime mode"
+ );
+}
From 7c2c3701800622ee76d8db4a9834b68f5fe4082d Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 16:07:11 +0800
Subject: [PATCH 011/116] fix(channel): preserve interrupted user context in
cached turn normalization
---
src/channels/mod.rs | 44 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 44 insertions(+)
diff --git a/src/channels/mod.rs b/src/channels/mod.rs
index 9eb1e1ec3..655a0ae58 100644
--- a/src/channels/mod.rs
+++ b/src/channels/mod.rs
@@ -284,6 +284,18 @@ fn normalize_cached_channel_turns(turns: Vec) -> Vec {
normalized.push(turn);
expecting_user = true;
}
+ // Interrupted channel turns can produce consecutive user messages
+ // (no assistant persisted yet). Merge instead of dropping.
+ (false, "user") | (true, "assistant") => {
+ if let Some(last_turn) = normalized.last_mut() {
+ if !turn.content.is_empty() {
+ if !last_turn.content.is_empty() {
+ last_turn.content.push_str("\n\n");
+ }
+ last_turn.content.push_str(&turn.content);
+ }
+ }
+ }
_ => {}
}
}
@@ -2735,6 +2747,38 @@ mod tests {
assert!(!should_skip_memory_context_entry("telegram_123_45", "hi"));
}
+ #[test]
+ fn normalize_cached_channel_turns_merges_consecutive_user_turns() {
+ let turns = vec![
+ ChatMessage::user("forwarded content"),
+ ChatMessage::user("summarize this"),
+ ];
+
+ let normalized = normalize_cached_channel_turns(turns);
+ assert_eq!(normalized.len(), 1);
+ assert_eq!(normalized[0].role, "user");
+ assert!(normalized[0].content.contains("forwarded content"));
+ assert!(normalized[0].content.contains("summarize this"));
+ }
+
+ #[test]
+ fn normalize_cached_channel_turns_merges_consecutive_assistant_turns() {
+ let turns = vec![
+ ChatMessage::user("first user"),
+ ChatMessage::assistant("assistant part 1"),
+ ChatMessage::assistant("assistant part 2"),
+ ChatMessage::user("next user"),
+ ];
+
+ let normalized = normalize_cached_channel_turns(turns);
+ assert_eq!(normalized.len(), 3);
+ assert_eq!(normalized[0].role, "user");
+ assert_eq!(normalized[1].role, "assistant");
+ assert_eq!(normalized[2].role, "user");
+ assert!(normalized[1].content.contains("assistant part 1"));
+ assert!(normalized[1].content.contains("assistant part 2"));
+ }
+
#[test]
fn compact_sender_history_keeps_recent_truncated_messages() {
let mut histories = HashMap::new();
From 654f8224307a23c91f46a88f08eb86efa5d43d6c Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 16:06:44 +0800
Subject: [PATCH 012/116] fix(memory): avoid tokio runtime panic when
initializing postgres backend
---
src/memory/postgres.rs | 72 ++++++++++++++++++++++++++++++++++--------
1 file changed, 58 insertions(+), 14 deletions(-)
diff --git a/src/memory/postgres.rs b/src/memory/postgres.rs
index 65560d216..438275186 100644
--- a/src/memory/postgres.rs
+++ b/src/memory/postgres.rs
@@ -30,24 +30,16 @@ impl PostgresMemory {
validate_identifier(schema, "storage schema")?;
validate_identifier(table, "storage table")?;
- let mut config: postgres::Config = db_url
- .parse()
- .context("invalid PostgreSQL connection URL")?;
-
- if let Some(timeout_secs) = connect_timeout_secs {
- let bounded = timeout_secs.min(POSTGRES_CONNECT_TIMEOUT_CAP_SECS);
- config.connect_timeout(Duration::from_secs(bounded));
- }
-
- let mut client = config
- .connect(NoTls)
- .context("failed to connect to PostgreSQL memory backend")?;
-
let schema_ident = quote_identifier(schema);
let table_ident = quote_identifier(table);
let qualified_table = format!("{schema_ident}.{table_ident}");
- Self::init_schema(&mut client, &schema_ident, &qualified_table)?;
+ let client = Self::initialize_client(
+ db_url.to_string(),
+ connect_timeout_secs,
+ schema_ident.clone(),
+ qualified_table.clone(),
+ )?;
Ok(Self {
client: Arc::new(Mutex::new(client)),
@@ -55,6 +47,40 @@ impl PostgresMemory {
})
}
+ fn initialize_client(
+ db_url: String,
+ connect_timeout_secs: Option,
+ schema_ident: String,
+ qualified_table: String,
+ ) -> Result {
+ let init_handle = std::thread::Builder::new()
+ .name("postgres-memory-init".to_string())
+ .spawn(move || -> Result {
+ let mut config: postgres::Config = db_url
+ .parse()
+ .context("invalid PostgreSQL connection URL")?;
+
+ if let Some(timeout_secs) = connect_timeout_secs {
+ let bounded = timeout_secs.min(POSTGRES_CONNECT_TIMEOUT_CAP_SECS);
+ config.connect_timeout(Duration::from_secs(bounded));
+ }
+
+ let mut client = config
+ .connect(NoTls)
+ .context("failed to connect to PostgreSQL memory backend")?;
+
+ Self::init_schema(&mut client, &schema_ident, &qualified_table)?;
+ Ok(client)
+ })
+ .context("failed to spawn PostgreSQL initializer thread")?;
+
+ let init_result = init_handle
+ .join()
+ .map_err(|_| anyhow::anyhow!("PostgreSQL initializer thread panicked"))?;
+
+ init_result
+ }
+
fn init_schema(client: &mut Client, schema_ident: &str, qualified_table: &str) -> Result<()> {
client.batch_execute(&format!(
"
@@ -346,4 +372,22 @@ mod tests {
MemoryCategory::Custom("custom_notes".into())
);
}
+
+ #[tokio::test(flavor = "current_thread")]
+ async fn new_does_not_panic_inside_tokio_runtime() {
+ let outcome = std::panic::catch_unwind(|| {
+ PostgresMemory::new(
+ "postgres://zeroclaw:password@127.0.0.1:1/zeroclaw",
+ "public",
+ "memories",
+ Some(1),
+ )
+ });
+
+ assert!(outcome.is_ok(), "PostgresMemory::new should not panic");
+ assert!(
+ outcome.unwrap().is_err(),
+ "PostgresMemory::new should return a connect error for an unreachable endpoint"
+ );
+ }
}
From bbaf55eb3b30d8166091d57bedbdf687ebd6e34e Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 15:19:16 +0800
Subject: [PATCH 013/116] fix(config): harden sync_directory async signature
across platforms
---
src/config/schema.rs | 40 ++++++++++++++++++++++++++++------------
1 file changed, 28 insertions(+), 12 deletions(-)
diff --git a/src/config/schema.rs b/src/config/schema.rs
index 7167ffb8d..88814aa21 100644
--- a/src/config/schema.rs
+++ b/src/config/schema.rs
@@ -3528,20 +3528,23 @@ impl Config {
}
}
-#[cfg(unix)]
async fn sync_directory(path: &Path) -> Result<()> {
- let dir = File::open(path)
- .await
- .with_context(|| format!("Failed to open directory for fsync: {}", path.display()))?;
- dir.sync_all()
- .await
- .with_context(|| format!("Failed to fsync directory metadata: {}", path.display()))?;
- Ok(())
-}
+ #[cfg(unix)]
+ {
+ let dir = File::open(path)
+ .await
+ .with_context(|| format!("Failed to open directory for fsync: {}", path.display()))?;
+ dir.sync_all()
+ .await
+ .with_context(|| format!("Failed to fsync directory metadata: {}", path.display()))?;
+ return Ok(());
+ }
-#[cfg(not(unix))]
-async fn sync_directory(_path: &Path) -> Result<()> {
- Ok(())
+ #[cfg(not(unix))]
+ {
+ let _ = path;
+ Ok(())
+ }
}
#[cfg(test)]
@@ -3898,6 +3901,19 @@ tool_dispatcher = "xml"
assert_eq!(parsed.agent.tool_dispatcher, "xml");
}
+ #[tokio::test]
+ async fn sync_directory_handles_existing_directory() {
+ let dir = std::env::temp_dir().join(format!(
+ "zeroclaw_test_sync_directory_{}",
+ uuid::Uuid::new_v4()
+ ));
+ fs::create_dir_all(&dir).await.unwrap();
+
+ sync_directory(&dir).await.unwrap();
+
+ let _ = fs::remove_dir_all(&dir).await;
+ }
+
#[tokio::test]
async fn config_save_and_load_tmpdir() {
let dir = std::env::temp_dir().join("zeroclaw_test_config");
From 70f12e5df9b89592478a548343b325931d5af4cd Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 16:13:20 +0800
Subject: [PATCH 014/116] test(onboard): add regression coverage for quick
setup model override
---
docs/commands-reference.md | 1 +
src/main.rs | 53 +++++++++++++++++++++++++++++-
src/onboard/wizard.rs | 67 ++++++++++++++++++++++++++++++++++++--
3 files changed, 117 insertions(+), 4 deletions(-)
diff --git a/docs/commands-reference.md b/docs/commands-reference.md
index a693c8123..da9d52c05 100644
--- a/docs/commands-reference.md
+++ b/docs/commands-reference.md
@@ -34,6 +34,7 @@ Last verified: **February 19, 2026**.
- `zeroclaw onboard --interactive`
- `zeroclaw onboard --channels-only`
- `zeroclaw onboard --api-key --provider --memory `
+- `zeroclaw onboard --api-key --provider --model --memory `
### `agent`
diff --git a/src/main.rs b/src/main.rs
index 414a4f504..44df971ca 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -1352,10 +1352,61 @@ async fn handle_auth_command(auth_command: AuthCommands, config: &Config) -> Res
#[cfg(test)]
mod tests {
use super::*;
- use clap::CommandFactory;
+ use clap::{CommandFactory, Parser};
#[test]
fn cli_definition_has_no_flag_conflicts() {
Cli::command().debug_assert();
}
+
+ #[test]
+ fn onboard_help_includes_model_flag() {
+ let cmd = Cli::command();
+ let onboard = cmd
+ .get_subcommands()
+ .find(|subcommand| subcommand.get_name() == "onboard")
+ .expect("onboard subcommand must exist");
+
+ let has_model_flag = onboard
+ .get_arguments()
+ .any(|arg| arg.get_id().as_str() == "model" && arg.get_long() == Some("model"));
+
+ assert!(
+ has_model_flag,
+ "onboard help should include --model for quick setup overrides"
+ );
+ }
+
+ #[test]
+ fn onboard_cli_accepts_model_provider_and_api_key_in_quick_mode() {
+ let cli = Cli::try_parse_from([
+ "zeroclaw",
+ "onboard",
+ "--provider",
+ "openrouter",
+ "--model",
+ "custom-model-946",
+ "--api-key",
+ "sk-issue946",
+ ])
+ .expect("quick onboard invocation should parse");
+
+ match cli.command {
+ Commands::Onboard {
+ interactive,
+ channels_only,
+ api_key,
+ provider,
+ model,
+ ..
+ } => {
+ assert!(!interactive);
+ assert!(!channels_only);
+ assert_eq!(provider.as_deref(), Some("openrouter"));
+ assert_eq!(model.as_deref(), Some("custom-model-946"));
+ assert_eq!(api_key.as_deref(), Some("sk-issue946"));
+ }
+ other => panic!("expected onboard command, got {other:?}"),
+ }
+ }
}
diff --git a/src/onboard/wizard.rs b/src/onboard/wizard.rs
index efbec132a..7966e7d94 100644
--- a/src/onboard/wizard.rs
+++ b/src/onboard/wizard.rs
@@ -331,6 +331,28 @@ pub async fn run_quick_setup(
provider: Option<&str>,
model_override: Option<&str>,
memory_backend: Option<&str>,
+) -> Result {
+ let home = directories::UserDirs::new()
+ .map(|u| u.home_dir().to_path_buf())
+ .context("Could not find home directory")?;
+
+ run_quick_setup_with_home(
+ credential_override,
+ provider,
+ model_override,
+ memory_backend,
+ &home,
+ )
+ .await
+}
+
+#[allow(clippy::too_many_lines)]
+async fn run_quick_setup_with_home(
+ credential_override: Option<&str>,
+ provider: Option<&str>,
+ model_override: Option<&str>,
+ memory_backend: Option<&str>,
+ home: &Path,
) -> Result {
println!("{}", style(BANNER).cyan().bold());
println!(
@@ -341,9 +363,6 @@ pub async fn run_quick_setup(
);
println!();
- let home = directories::UserDirs::new()
- .map(|u| u.home_dir().to_path_buf())
- .context("Could not find home directory")?;
let zeroclaw_dir = home.join(".zeroclaw");
let workspace_dir = zeroclaw_dir.join("workspace");
let config_path = zeroclaw_dir.join("config.toml");
@@ -4673,6 +4692,48 @@ mod tests {
assert!(ctx.communication_style.is_empty());
}
+ #[tokio::test]
+ async fn quick_setup_model_override_persists_to_config_toml() {
+ let tmp = TempDir::new().unwrap();
+
+ let config = run_quick_setup_with_home(
+ Some("sk-issue946"),
+ Some("openrouter"),
+ Some("custom-model-946"),
+ Some("sqlite"),
+ tmp.path(),
+ )
+ .await
+ .unwrap();
+
+ assert_eq!(config.default_provider.as_deref(), Some("openrouter"));
+ assert_eq!(config.default_model.as_deref(), Some("custom-model-946"));
+ assert_eq!(config.api_key.as_deref(), Some("sk-issue946"));
+
+ let config_raw = tokio::fs::read_to_string(config.config_path).await.unwrap();
+ assert!(config_raw.contains("default_provider = \"openrouter\""));
+ assert!(config_raw.contains("default_model = \"custom-model-946\""));
+ }
+
+ #[tokio::test]
+ async fn quick_setup_without_model_uses_provider_default_model() {
+ let tmp = TempDir::new().unwrap();
+
+ let config = run_quick_setup_with_home(
+ Some("sk-issue946"),
+ Some("anthropic"),
+ None,
+ Some("sqlite"),
+ tmp.path(),
+ )
+ .await
+ .unwrap();
+
+ let expected = default_model_for_provider("anthropic");
+ assert_eq!(config.default_provider.as_deref(), Some("anthropic"));
+ assert_eq!(config.default_model.as_deref(), Some(expected.as_str()));
+ }
+
// ── scaffold_workspace: basic file creation ─────────────────
#[test]
From d0674c4b9845cf43bd0d2cc881cdb912c277ce1d Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 16:31:27 +0800
Subject: [PATCH 015/116] fix(channels): harden whatsapp web mode and document
dual backend
---
README.md | 32 ++++++++-
docs/channels-reference.md | 27 +++++++-
docs/config-reference.md | 28 ++++++++
src/channels/mod.rs | 10 +++
src/channels/whatsapp_web.rs | 121 +++++++++++++++++++++++------------
src/config/schema.rs | 39 +++++++++++
src/onboard/wizard.rs | 86 ++++++++++++++++++++++++-
7 files changed, 297 insertions(+), 46 deletions(-)
diff --git a/README.md b/README.md
index 629842af9..6618de5b5 100644
--- a/README.md
+++ b/README.md
@@ -524,7 +524,37 @@ For non-text replies, ZeroClaw can send Telegram attachments when the assistant
Paths can be local files (for example `/tmp/screenshot.png`) or HTTPS URLs.
-### WhatsApp Business Cloud API Setup
+### WhatsApp Setup
+
+ZeroClaw supports two WhatsApp backends:
+
+- **WhatsApp Web mode** (QR / pair code, no Meta Business API required)
+- **WhatsApp Business Cloud API mode** (official Meta webhook flow)
+
+#### WhatsApp Web mode (recommended for personal/self-hosted use)
+
+1. **Build with WhatsApp Web support:**
+ ```bash
+ cargo build --features whatsapp-web
+ ```
+
+2. **Configure ZeroClaw:**
+ ```toml
+ [channels_config.whatsapp]
+ session_path = "~/.zeroclaw/state/whatsapp-web/session.db"
+ pair_phone = "15551234567" # optional; omit to use QR flow
+ pair_code = "" # optional custom pair code
+ allowed_numbers = ["+1234567890"] # E.164 format, or ["*"] for all
+ ```
+
+3. **Start channels/daemon and link device:**
+ - Run `zeroclaw channel start` (or `zeroclaw daemon`).
+ - Follow terminal pairing output (QR or pair code).
+ - In WhatsApp on phone: **Settings → Linked Devices**.
+
+4. **Test:** Send a message from an allowed number and verify the agent replies.
+
+#### WhatsApp Business Cloud API mode
WhatsApp uses Meta's Cloud API with webhooks (push-based, not polling):
diff --git a/docs/channels-reference.md b/docs/channels-reference.md
index 49defc16e..9c99b288f 100644
--- a/docs/channels-reference.md
+++ b/docs/channels-reference.md
@@ -101,7 +101,7 @@ If `[channels_config.matrix]` is present but the binary was built without `chann
| Mattermost | polling | No |
| Matrix | sync API (supports E2EE) | No |
| Signal | signal-cli HTTP bridge | No (local bridge endpoint) |
-| WhatsApp | webhook | Yes (public HTTPS callback) |
+| WhatsApp | webhook (Cloud API) or websocket (Web mode) | Cloud API: Yes (public HTTPS callback), Web mode: No |
| Webhook | gateway endpoint (`/webhook`) | Usually yes |
| Email | IMAP polling + SMTP send | No |
| IRC | IRC socket | No |
@@ -208,6 +208,13 @@ ignore_stories = true
### 4.7 WhatsApp
+ZeroClaw supports two WhatsApp backends:
+
+- **Cloud API mode** (`phone_number_id` + `access_token` + `verify_token`)
+- **WhatsApp Web mode** (`session_path`, requires build flag `--features whatsapp-web`)
+
+Cloud API mode:
+
```toml
[channels_config.whatsapp]
access_token = "EAAB..."
@@ -217,6 +224,22 @@ app_secret = "your-app-secret" # optional but recommended
allowed_numbers = ["*"]
```
+WhatsApp Web mode:
+
+```toml
+[channels_config.whatsapp]
+session_path = "~/.zeroclaw/state/whatsapp-web/session.db"
+pair_phone = "15551234567" # optional; omit to use QR flow
+pair_code = "" # optional custom pair code
+allowed_numbers = ["*"]
+```
+
+Notes:
+
+- Build with `cargo build --features whatsapp-web` (or equivalent run command).
+- Keep `session_path` on persistent storage to avoid relinking after restart.
+- Reply routing uses the originating chat JID, so direct and group replies work correctly.
+
### 4.8 Webhook Channel Config (Gateway)
`channels_config.webhook` enables webhook-specific gateway behavior.
@@ -375,7 +398,7 @@ rg -n "Matrix|Telegram|Discord|Slack|Mattermost|Signal|WhatsApp|Email|IRC|Lark|D
| Mattermost | `Mattermost channel listening on` | `Mattermost: ignoring message from unauthorized user:` | `Mattermost poll error:` / `Mattermost parse error:` |
| Matrix | `Matrix channel listening on room` / `Matrix room ... is encrypted; E2EE decryption is enabled via matrix-sdk.` | `Matrix whoami failed; falling back to configured session hints for E2EE session restore:` / `Matrix whoami failed while resolving listener user_id; using configured user_id hint:` | `Matrix sync error: ... retrying...` |
| Signal | `Signal channel listening via SSE on` | (allowlist checks are enforced by `allowed_from`) | `Signal SSE returned ...` / `Signal SSE connect error:` |
-| WhatsApp (channel) | `WhatsApp channel active (webhook mode).` | `WhatsApp: ignoring message from unauthorized number:` | `WhatsApp send failed:` |
+| WhatsApp (channel) | `WhatsApp channel active (webhook mode).` / `WhatsApp Web connected successfully` | `WhatsApp: ignoring message from unauthorized number:` / `WhatsApp Web: message from ... not in allowed list` | `WhatsApp send failed:` / `WhatsApp Web stream error:` |
| Webhook / WhatsApp (gateway) | `WhatsApp webhook verified successfully` | `Webhook: rejected — not paired / invalid bearer token` / `Webhook: rejected request — invalid or missing X-Webhook-Secret` / `WhatsApp webhook verification failed — token mismatch` | `Webhook JSON parse error:` |
| Email | `Email polling every ...` / `Email sent to ...` | `Blocked email from ...` | `Email poll failed:` / `Email poll task panicked:` |
| IRC | `IRC channel connecting to ...` / `IRC registered as ...` | (allowlist checks are enforced by `allowed_users`) | `IRC SASL authentication failed (...)` / `IRC server does not support SASL...` / `IRC nickname ... is in use, trying ...` |
diff --git a/docs/config-reference.md b/docs/config-reference.md
index f19cb27d9..d56da1a81 100644
--- a/docs/config-reference.md
+++ b/docs/config-reference.md
@@ -378,6 +378,34 @@ Notes:
See detailed channel matrix and allowlist behavior in [channels-reference.md](channels-reference.md).
+### `[channels_config.whatsapp]`
+
+WhatsApp supports two backends under one config table.
+
+Cloud API mode (Meta webhook):
+
+| Key | Required | Purpose |
+|---|---|---|
+| `access_token` | Yes | Meta Cloud API bearer token |
+| `phone_number_id` | Yes | Meta phone number ID |
+| `verify_token` | Yes | Webhook verification token |
+| `app_secret` | Optional | Enables webhook signature verification (`X-Hub-Signature-256`) |
+| `allowed_numbers` | Recommended | Allowed inbound numbers (`[]` = deny all, `"*"` = allow all) |
+
+WhatsApp Web mode (native client):
+
+| Key | Required | Purpose |
+|---|---|---|
+| `session_path` | Yes | Persistent SQLite session path |
+| `pair_phone` | Optional | Pair-code flow phone number (digits only) |
+| `pair_code` | Optional | Custom pair code (otherwise auto-generated) |
+| `allowed_numbers` | Recommended | Allowed inbound numbers (`[]` = deny all, `"*"` = allow all) |
+
+Notes:
+
+- WhatsApp Web requires build flag `whatsapp-web`.
+- If both Cloud and Web fields are present, Cloud mode wins for backward compatibility.
+
## `[hardware]`
Hardware wizard configuration for physical-world access (STM32, probe, serial).
diff --git a/src/channels/mod.rs b/src/channels/mod.rs
index 655a0ae58..96236fb29 100644
--- a/src/channels/mod.rs
+++ b/src/channels/mod.rs
@@ -2073,6 +2073,11 @@ pub async fn doctor_channels(config: Config) -> Result<()> {
}
if let Some(ref wa) = config.channels_config.whatsapp {
+ if wa.is_ambiguous_config() {
+ tracing::warn!(
+ "WhatsApp config has both phone_number_id and session_path set; preferring Cloud API mode. Remove one selector to avoid ambiguity."
+ );
+ }
// Runtime negotiation: detect backend type from config
match wa.backend_type() {
"cloud" => {
@@ -2462,6 +2467,11 @@ pub async fn start_channels(config: Config) -> Result<()> {
}
if let Some(ref wa) = config.channels_config.whatsapp {
+ if wa.is_ambiguous_config() {
+ tracing::warn!(
+ "WhatsApp config has both phone_number_id and session_path set; preferring Cloud API mode. Remove one selector to avoid ambiguity."
+ );
+ }
// Runtime negotiation: detect backend type from config
match wa.backend_type() {
"cloud" => {
diff --git a/src/channels/whatsapp_web.rs b/src/channels/whatsapp_web.rs
index 5ea3c24ab..f6e89c2a4 100644
--- a/src/channels/whatsapp_web.rs
+++ b/src/channels/whatsapp_web.rs
@@ -15,7 +15,7 @@
//! # Configuration
//!
//! ```toml
-//! [channels.whatsapp]
+//! [channels_config.whatsapp]
//! session_path = "~/.zeroclaw/whatsapp-session.db" # Required for Web mode
//! pair_phone = "15551234567" # Optional: for pair code linking
//! allowed_numbers = ["+1234567890", "*"] # Same as Cloud API
@@ -43,7 +43,7 @@ use tokio::select;
/// # Configuration
///
/// ```toml
-/// [channels.whatsapp]
+/// [channels_config.whatsapp]
/// session_path = "~/.zeroclaw/whatsapp-session.db"
/// pair_phone = "15551234567" # Optional
/// allowed_numbers = ["+1234567890", "*"]
@@ -96,8 +96,7 @@ impl WhatsAppWebChannel {
/// Check if a phone number is allowed (E.164 format: +1234567890)
#[cfg(feature = "whatsapp-web")]
fn is_number_allowed(&self, phone: &str) -> bool {
- self.allowed_numbers.is_empty()
- || self.allowed_numbers.iter().any(|n| n == "*" || n == phone)
+ self.allowed_numbers.iter().any(|n| n == "*" || n == phone)
}
/// Normalize phone number to E.164 format
@@ -116,6 +115,12 @@ impl WhatsAppWebChannel {
}
}
+ /// Whether the recipient string is a WhatsApp JID (contains a domain suffix).
+ #[cfg(feature = "whatsapp-web")]
+ fn is_jid(recipient: &str) -> bool {
+ recipient.trim().contains('@')
+ }
+
/// Convert a recipient to a wa-rs JID.
///
/// Supports:
@@ -156,14 +161,16 @@ impl Channel for WhatsAppWebChannel {
anyhow::bail!("WhatsApp Web client not connected. Initialize the bot first.");
};
- // Validate recipient is allowed
- let normalized = self.normalize_phone(&message.recipient);
- if !self.is_number_allowed(&normalized) {
- tracing::warn!(
- "WhatsApp Web: recipient {} not in allowed list",
- message.recipient
- );
- return Ok(());
+ // Validate recipient allowlist only for direct phone-number targets.
+ if !Self::is_jid(&message.recipient) {
+ let normalized = self.normalize_phone(&message.recipient);
+ if !self.is_number_allowed(&normalized) {
+ tracing::warn!(
+ "WhatsApp Web: recipient {} not in allowed list",
+ message.recipient
+ );
+ return Ok(());
+ }
}
let to = self.recipient_to_jid(&message.recipient)?;
@@ -246,7 +253,12 @@ impl Channel for WhatsAppWebChannel {
let sender = info.source.sender.user().to_string();
let chat = info.source.chat.to_string();
- tracing::info!("📨 WhatsApp message from {} in {}: {}", sender, chat, text);
+ tracing::info!(
+ "WhatsApp Web message from {} in {}: {}",
+ sender,
+ chat,
+ text
+ );
// Check if sender is allowed
let normalized = if sender.starts_with('+') {
@@ -255,17 +267,26 @@ impl Channel for WhatsAppWebChannel {
format!("+{sender}")
};
- if allowed_numbers.is_empty()
- || allowed_numbers.iter().any(|n| n == "*" || n == &normalized)
- {
+ if allowed_numbers.iter().any(|n| n == "*" || n == &normalized) {
+ let trimmed = text.trim();
+ if trimmed.is_empty() {
+ tracing::debug!(
+ "WhatsApp Web: ignoring empty or non-text message from {}",
+ normalized
+ );
+ return;
+ }
+
if let Err(e) = tx_inner
.send(ChannelMessage {
id: uuid::Uuid::new_v4().to_string(),
channel: "whatsapp".to_string(),
sender: normalized.clone(),
- reply_target: normalized.clone(),
- content: text.to_string(),
- timestamp: chrono::Utc::now().timestamp_millis() as u64,
+ // Reply to the originating chat JID (DM or group).
+ reply_target: chat,
+ content: trimmed.to_string(),
+ timestamp: chrono::Utc::now().timestamp() as u64,
+ thread_ts: None,
})
.await
{
@@ -276,20 +297,24 @@ impl Channel for WhatsAppWebChannel {
}
}
Event::Connected(_) => {
- tracing::info!("✅ WhatsApp Web connected successfully!");
+ tracing::info!("WhatsApp Web connected successfully");
}
Event::LoggedOut(_) => {
- tracing::warn!("❌ WhatsApp Web was logged out!");
+ tracing::warn!("WhatsApp Web was logged out");
}
Event::StreamError(stream_error) => {
- tracing::error!("❌ WhatsApp Web stream error: {:?}", stream_error);
+ tracing::error!("WhatsApp Web stream error: {:?}", stream_error);
}
Event::PairingCode { code, .. } => {
- tracing::info!("🔑 Pair code received: {}", code);
- tracing::info!("Link your phone by entering this code in WhatsApp > Linked Devices");
+ tracing::info!("WhatsApp Web pair code received: {}", code);
+ tracing::info!(
+ "Link your phone by entering this code in WhatsApp > Linked Devices"
+ );
}
Event::PairingQrCode { code, .. } => {
- tracing::info!("📱 QR code received (scan with WhatsApp > Linked Devices)");
+ tracing::info!(
+ "WhatsApp Web QR code received (scan with WhatsApp > Linked Devices)"
+ );
tracing::debug!("QR code: {}", code);
}
_ => {}
@@ -352,13 +377,15 @@ impl Channel for WhatsAppWebChannel {
anyhow::bail!("WhatsApp Web client not connected. Initialize the bot first.");
};
- let normalized = self.normalize_phone(recipient);
- if !self.is_number_allowed(&normalized) {
- tracing::warn!(
- "WhatsApp Web: typing target {} not in allowed list",
- recipient
- );
- return Ok(());
+ if !Self::is_jid(recipient) {
+ let normalized = self.normalize_phone(recipient);
+ if !self.is_number_allowed(&normalized) {
+ tracing::warn!(
+ "WhatsApp Web: typing target {} not in allowed list",
+ recipient
+ );
+ return Ok(());
+ }
}
let to = self.recipient_to_jid(recipient)?;
@@ -378,13 +405,15 @@ impl Channel for WhatsAppWebChannel {
anyhow::bail!("WhatsApp Web client not connected. Initialize the bot first.");
};
- let normalized = self.normalize_phone(recipient);
- if !self.is_number_allowed(&normalized) {
- tracing::warn!(
- "WhatsApp Web: typing target {} not in allowed list",
- recipient
- );
- return Ok(());
+ if !Self::is_jid(recipient) {
+ let normalized = self.normalize_phone(recipient);
+ if !self.is_number_allowed(&normalized) {
+ tracing::warn!(
+ "WhatsApp Web: typing target {} not in allowed list",
+ recipient
+ );
+ return Ok(());
+ }
}
let to = self.recipient_to_jid(recipient)?;
@@ -498,8 +527,8 @@ mod tests {
#[cfg(feature = "whatsapp-web")]
fn whatsapp_web_number_denied_empty() {
let ch = WhatsAppWebChannel::new("/tmp/test.db".into(), None, None, vec![]);
- // Empty allowed_numbers means "allow all" (same behavior as Cloud API)
- assert!(ch.is_number_allowed("+1234567890"));
+ // Empty allowlist means "deny all" (matches channel-wide allowlist policy).
+ assert!(!ch.is_number_allowed("+1234567890"));
}
#[test]
@@ -516,6 +545,16 @@ mod tests {
assert_eq!(ch.normalize_phone("+1234567890"), "+1234567890");
}
+ #[test]
+ #[cfg(feature = "whatsapp-web")]
+ fn whatsapp_web_normalize_phone_from_jid() {
+ let ch = make_channel();
+ assert_eq!(
+ ch.normalize_phone("1234567890@s.whatsapp.net"),
+ "+1234567890"
+ );
+ }
+
#[tokio::test]
#[cfg(feature = "whatsapp-web")]
async fn whatsapp_web_health_check_disconnected() {
diff --git a/src/config/schema.rs b/src/config/schema.rs
index 88814aa21..de007ced8 100644
--- a/src/config/schema.rs
+++ b/src/config/schema.rs
@@ -2461,6 +2461,13 @@ impl WhatsAppConfig {
pub fn is_web_config(&self) -> bool {
self.session_path.is_some()
}
+
+ /// Returns true when both Cloud and Web selectors are present.
+ ///
+ /// Runtime currently prefers Cloud mode in this case for backward compatibility.
+ pub fn is_ambiguous_config(&self) -> bool {
+ self.phone_number_id.is_some() && self.session_path.is_some()
+ }
}
/// IRC channel configuration.
@@ -4458,6 +4465,38 @@ channel_id = "C123"
assert_eq!(parsed.allowed_numbers, vec!["*"]);
}
+ #[test]
+ async fn whatsapp_config_backend_type_cloud_precedence_when_ambiguous() {
+ let wc = WhatsAppConfig {
+ access_token: Some("tok".into()),
+ phone_number_id: Some("123".into()),
+ verify_token: Some("ver".into()),
+ app_secret: None,
+ session_path: Some("~/.zeroclaw/state/whatsapp-web/session.db".into()),
+ pair_phone: None,
+ pair_code: None,
+ allowed_numbers: vec!["+1".into()],
+ };
+ assert!(wc.is_ambiguous_config());
+ assert_eq!(wc.backend_type(), "cloud");
+ }
+
+ #[test]
+ async fn whatsapp_config_backend_type_web() {
+ let wc = WhatsAppConfig {
+ access_token: None,
+ phone_number_id: None,
+ verify_token: None,
+ app_secret: None,
+ session_path: Some("~/.zeroclaw/state/whatsapp-web/session.db".into()),
+ pair_phone: None,
+ pair_code: None,
+ allowed_numbers: vec![],
+ };
+ assert!(!wc.is_ambiguous_config());
+ assert_eq!(wc.backend_type(), "web");
+ }
+
#[test]
async fn channels_config_with_whatsapp() {
let c = ChannelsConfig {
diff --git a/src/onboard/wizard.rs b/src/onboard/wizard.rs
index 7966e7d94..da689946b 100644
--- a/src/onboard/wizard.rs
+++ b/src/onboard/wizard.rs
@@ -3238,10 +3238,92 @@ fn setup_channels() -> Result {
ChannelMenuChoice::WhatsApp => {
// ── WhatsApp ──
println!();
+ println!(" {}", style("WhatsApp Setup").white().bold());
+
+ let mode_options = vec![
+ "WhatsApp Web (QR / pair-code, no Meta Business API)",
+ "WhatsApp Business Cloud API (webhook)",
+ ];
+ let mode_idx = Select::new()
+ .with_prompt(" Choose WhatsApp mode")
+ .items(&mode_options)
+ .default(0)
+ .interact()?;
+
+ if mode_idx == 0 {
+ println!(" {}", style("Mode: WhatsApp Web").dim());
+ print_bullet("1. Build with --features whatsapp-web");
+ print_bullet(
+ "2. Start channel/daemon and scan QR in WhatsApp > Linked Devices",
+ );
+ print_bullet("3. Keep session_path persistent so relogin is not required");
+ println!();
+
+ let session_path: String = Input::new()
+ .with_prompt(" Session database path")
+ .default("~/.zeroclaw/state/whatsapp-web/session.db".into())
+ .interact_text()?;
+
+ if session_path.trim().is_empty() {
+ println!(" {} Skipped — session path required", style("→").dim());
+ continue;
+ }
+
+ let pair_phone: String = Input::new()
+ .with_prompt(
+ " Pair phone (optional, digits only; leave empty to use QR flow)",
+ )
+ .allow_empty(true)
+ .interact_text()?;
+
+ let pair_code: String = if pair_phone.trim().is_empty() {
+ String::new()
+ } else {
+ Input::new()
+ .with_prompt(
+ " Custom pair code (optional, leave empty for auto-generated)",
+ )
+ .allow_empty(true)
+ .interact_text()?
+ };
+
+ let users_str: String = Input::new()
+ .with_prompt(
+ " Allowed phone numbers (comma-separated +1234567890, or * for all)",
+ )
+ .default("*".into())
+ .interact_text()?;
+
+ let allowed_numbers = if users_str.trim() == "*" {
+ vec!["*".into()]
+ } else {
+ users_str.split(',').map(|s| s.trim().to_string()).collect()
+ };
+
+ config.whatsapp = Some(WhatsAppConfig {
+ access_token: None,
+ phone_number_id: None,
+ verify_token: None,
+ app_secret: None,
+ session_path: Some(session_path.trim().to_string()),
+ pair_phone: (!pair_phone.trim().is_empty())
+ .then(|| pair_phone.trim().to_string()),
+ pair_code: (!pair_code.trim().is_empty())
+ .then(|| pair_code.trim().to_string()),
+ allowed_numbers,
+ });
+
+ println!(
+ " {} WhatsApp Web configuration saved.",
+ style("✅").green().bold()
+ );
+ continue;
+ }
+
println!(
" {} {}",
- style("WhatsApp Setup").white().bold(),
- style("— Business Cloud API").dim()
+ style("Mode:").dim(),
+ style("Business Cloud API").dim()
);
print_bullet("1. Go to developers.facebook.com and create a WhatsApp app");
print_bullet("2. Add the WhatsApp product and get your phone number ID");
From a2e9c0d1e11a026a9bb1d18de9e83a340ce61f6c Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 16:34:43 +0800
Subject: [PATCH 016/116] fix(skills): make open-skills sync opt-in and
configurable
---
README.md | 12 +++
docs/config-reference.md | 15 +++
src/agent/agent.rs | 5 +-
src/agent/loop_.rs | 4 +-
src/channels/mod.rs | 2 +-
src/config/mod.rs | 6 +-
src/config/schema.rs | 86 ++++++++++++++++
src/main.rs | 4 +-
src/onboard/wizard.rs | 2 +
src/skills/mod.rs | 205 +++++++++++++++++++++++++++++++++++----
10 files changed, 312 insertions(+), 29 deletions(-)
diff --git a/README.md b/README.md
index 6618de5b5..4a7f4bed5 100644
--- a/README.md
+++ b/README.md
@@ -887,6 +887,18 @@ See [aieos.org](https://aieos.org) for the full schema and live examples.
For a task-oriented command guide, see [`docs/commands-reference.md`](docs/commands-reference.md).
+### Open-Skills Opt-In
+
+Community `open-skills` sync is disabled by default. Enable it explicitly in `config.toml`:
+
+```toml
+[skills]
+open_skills_enabled = true
+# open_skills_dir = "/path/to/open-skills" # optional
+```
+
+You can also override at runtime with `ZEROCLAW_OPEN_SKILLS_ENABLED` and `ZEROCLAW_OPEN_SKILLS_DIR`.
+
## Development
```bash
diff --git a/docs/config-reference.md b/docs/config-reference.md
index d56da1a81..8291a3ce7 100644
--- a/docs/config-reference.md
+++ b/docs/config-reference.md
@@ -114,6 +114,21 @@ Notes:
- `reasoning_enabled = true` explicitly requests reasoning for supported providers (`think: true` on `ollama`).
- Unset keeps provider defaults.
+## `[skills]`
+
+| Key | Default | Purpose |
+|---|---|---|
+| `open_skills_enabled` | `false` | Opt-in loading/sync of community `open-skills` repository |
+| `open_skills_dir` | unset | Optional local path for `open-skills` (defaults to `$HOME/open-skills` when enabled) |
+
+Notes:
+
+- Security-first default: ZeroClaw does **not** clone or sync `open-skills` unless `open_skills_enabled = true`.
+- Environment overrides:
+ - `ZEROCLAW_OPEN_SKILLS_ENABLED` accepts `1/0`, `true/false`, `yes/no`, `on/off`.
+ - `ZEROCLAW_OPEN_SKILLS_DIR` overrides the repository path when non-empty.
+- Precedence for enable flag: `ZEROCLAW_OPEN_SKILLS_ENABLED` → `skills.open_skills_enabled` in `config.toml` → default `false`.
+
## `[composio]`
| Key | Default | Purpose |
diff --git a/src/agent/agent.rs b/src/agent/agent.rs
index 7b41d16a0..e96d797dd 100644
--- a/src/agent/agent.rs
+++ b/src/agent/agent.rs
@@ -308,7 +308,10 @@ impl Agent {
.classification_config(config.query_classification.clone())
.available_hints(available_hints)
.identity_config(config.identity.clone())
- .skills(crate::skills::load_skills(&config.workspace_dir))
+ .skills(crate::skills::load_skills_with_config(
+ &config.workspace_dir,
+ config,
+ ))
.auto_save(config.memory.auto_save)
.build()
}
diff --git a/src/agent/loop_.rs b/src/agent/loop_.rs
index 0deee673f..cd6b862a0 100644
--- a/src/agent/loop_.rs
+++ b/src/agent/loop_.rs
@@ -1348,7 +1348,7 @@ pub async fn run(
.collect();
// ── Build system prompt from workspace MD files (OpenClaw framework) ──
- let skills = crate::skills::load_skills(&config.workspace_dir);
+ let skills = crate::skills::load_skills_with_config(&config.workspace_dir, &config);
let mut tool_descs: Vec<(&str, &str)> = vec![
(
"shell",
@@ -1778,7 +1778,7 @@ pub async fn process_message(config: Config, message: &str) -> Result {
.map(|b| b.board.clone())
.collect();
- let skills = crate::skills::load_skills(&config.workspace_dir);
+ let skills = crate::skills::load_skills_with_config(&config.workspace_dir, &config);
let mut tool_descs: Vec<(&str, &str)> = vec![
("shell", "Execute terminal commands."),
("file_read", "Read file contents."),
diff --git a/src/channels/mod.rs b/src/channels/mod.rs
index 96236fb29..3d48c527f 100644
--- a/src/channels/mod.rs
+++ b/src/channels/mod.rs
@@ -2302,7 +2302,7 @@ pub async fn start_channels(config: Config) -> Result<()> {
&config,
));
- let skills = crate::skills::load_skills(&workspace);
+ let skills = crate::skills::load_skills_with_config(&workspace, &config);
// Collect tool descriptions for the prompt
let mut tool_descs: Vec<(&str, &str)> = vec![
diff --git a/src/config/mod.rs b/src/config/mod.rs
index 8187eecca..4649f9ca3 100644
--- a/src/config/mod.rs
+++ b/src/config/mod.rs
@@ -11,9 +11,9 @@ pub use schema::{
IdentityConfig, LarkConfig, MatrixConfig, MemoryConfig, ModelRouteConfig, MultimodalConfig,
ObservabilityConfig, PeripheralBoardConfig, PeripheralsConfig, ProxyConfig, ProxyScope,
QueryClassificationConfig, ReliabilityConfig, ResourceLimitsConfig, RuntimeConfig,
- SandboxBackend, SandboxConfig, SchedulerConfig, SecretsConfig, SecurityConfig, SlackConfig,
- StorageConfig, StorageProviderConfig, StorageProviderSection, StreamMode, TelegramConfig,
- TunnelConfig, WebSearchConfig, WebhookConfig,
+ SandboxBackend, SandboxConfig, SchedulerConfig, SecretsConfig, SecurityConfig, SkillsConfig,
+ SlackConfig, StorageConfig, StorageProviderConfig, StorageProviderSection, StreamMode,
+ TelegramConfig, TunnelConfig, WebSearchConfig, WebhookConfig,
};
#[cfg(test)]
diff --git a/src/config/schema.rs b/src/config/schema.rs
index de007ced8..04eee3283 100644
--- a/src/config/schema.rs
+++ b/src/config/schema.rs
@@ -94,6 +94,10 @@ pub struct Config {
#[serde(default)]
pub agent: AgentConfig,
+ /// Skills loading and community repository behavior (`[skills]`).
+ #[serde(default)]
+ pub skills: SkillsConfig,
+
/// Model routing rules — route `hint:` to specific provider+model combos.
#[serde(default)]
pub model_routes: Vec,
@@ -325,6 +329,28 @@ impl Default for AgentConfig {
}
}
+/// Skills loading configuration (`[skills]` section).
+#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
+pub struct SkillsConfig {
+ /// Enable loading and syncing the community open-skills repository.
+ /// Default: `false` (opt-in).
+ #[serde(default)]
+ pub open_skills_enabled: bool,
+ /// Optional path to a local open-skills repository.
+ /// If unset, defaults to `$HOME/open-skills` when enabled.
+ #[serde(default)]
+ pub open_skills_dir: Option,
+}
+
+impl Default for SkillsConfig {
+ fn default() -> Self {
+ Self {
+ open_skills_enabled: false,
+ open_skills_dir: None,
+ }
+ }
+}
+
/// Multimodal (image) handling configuration (`[multimodal]` section).
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct MultimodalConfig {
@@ -2742,6 +2768,7 @@ impl Default for Config {
reliability: ReliabilityConfig::default(),
scheduler: SchedulerConfig::default(),
agent: AgentConfig::default(),
+ skills: SkillsConfig::default(),
model_routes: Vec::new(),
embedding_routes: Vec::new(),
heartbeat: HeartbeatConfig::default(),
@@ -3235,6 +3262,27 @@ impl Config {
}
}
+ // Open-skills opt-in flag: ZEROCLAW_OPEN_SKILLS_ENABLED
+ if let Ok(flag) = std::env::var("ZEROCLAW_OPEN_SKILLS_ENABLED") {
+ if !flag.trim().is_empty() {
+ match flag.trim().to_ascii_lowercase().as_str() {
+ "1" | "true" | "yes" | "on" => self.skills.open_skills_enabled = true,
+ "0" | "false" | "no" | "off" => self.skills.open_skills_enabled = false,
+ _ => tracing::warn!(
+ "Ignoring invalid ZEROCLAW_OPEN_SKILLS_ENABLED (valid: 1|0|true|false|yes|no|on|off)"
+ ),
+ }
+ }
+ }
+
+ // Open-skills directory override: ZEROCLAW_OPEN_SKILLS_DIR
+ if let Ok(path) = std::env::var("ZEROCLAW_OPEN_SKILLS_DIR") {
+ let trimmed = path.trim();
+ if !trimmed.is_empty() {
+ self.skills.open_skills_dir = Some(trimmed.to_string());
+ }
+ }
+
// Gateway port: ZEROCLAW_GATEWAY_PORT or PORT
if let Ok(port_str) =
std::env::var("ZEROCLAW_GATEWAY_PORT").or_else(|_| std::env::var("PORT"))
@@ -3574,6 +3622,7 @@ mod tests {
assert!(c.default_model.as_deref().unwrap().contains("claude"));
assert!((c.default_temperature - 0.7).abs() < f64::EPSILON);
assert!(c.api_key.is_none());
+ assert!(!c.skills.open_skills_enabled);
assert!(c.workspace_dir.to_string_lossy().contains("workspace"));
assert!(c.config_path.to_string_lossy().contains("config.toml"));
}
@@ -3596,6 +3645,7 @@ mod tests {
.expect("schema should expose top-level properties");
assert!(properties.contains_key("default_provider"));
+ assert!(properties.contains_key("skills"));
assert!(properties.contains_key("gateway"));
assert!(properties.contains_key("channels_config"));
assert!(!properties.contains_key("workspace_dir"));
@@ -3745,6 +3795,7 @@ default_temperature = 0.7
},
reliability: ReliabilityConfig::default(),
scheduler: SchedulerConfig::default(),
+ skills: SkillsConfig::default(),
model_routes: Vec::new(),
embedding_routes: Vec::new(),
query_classification: QueryClassificationConfig::default(),
@@ -3941,6 +3992,7 @@ tool_dispatcher = "xml"
runtime: RuntimeConfig::default(),
reliability: ReliabilityConfig::default(),
scheduler: SchedulerConfig::default(),
+ skills: SkillsConfig::default(),
model_routes: Vec::new(),
embedding_routes: Vec::new(),
query_classification: QueryClassificationConfig::default(),
@@ -4900,6 +4952,40 @@ default_temperature = 0.7
std::env::remove_var("ZEROCLAW_PROVIDER");
}
+ #[test]
+ async fn env_override_open_skills_enabled_and_dir() {
+ let _env_guard = env_override_lock().await;
+ let mut config = Config::default();
+ assert!(!config.skills.open_skills_enabled);
+ assert!(config.skills.open_skills_dir.is_none());
+
+ std::env::set_var("ZEROCLAW_OPEN_SKILLS_ENABLED", "true");
+ std::env::set_var("ZEROCLAW_OPEN_SKILLS_DIR", "/tmp/open-skills");
+ config.apply_env_overrides();
+
+ assert!(config.skills.open_skills_enabled);
+ assert_eq!(
+ config.skills.open_skills_dir.as_deref(),
+ Some("/tmp/open-skills")
+ );
+
+ std::env::remove_var("ZEROCLAW_OPEN_SKILLS_ENABLED");
+ std::env::remove_var("ZEROCLAW_OPEN_SKILLS_DIR");
+ }
+
+ #[test]
+ async fn env_override_open_skills_enabled_invalid_value_keeps_existing_value() {
+ let _env_guard = env_override_lock().await;
+ let mut config = Config::default();
+ config.skills.open_skills_enabled = true;
+
+ std::env::set_var("ZEROCLAW_OPEN_SKILLS_ENABLED", "maybe");
+ config.apply_env_overrides();
+
+ assert!(config.skills.open_skills_enabled);
+ std::env::remove_var("ZEROCLAW_OPEN_SKILLS_ENABLED");
+ }
+
#[test]
async fn env_override_provider_fallback() {
let _env_guard = env_override_lock().await;
diff --git a/src/main.rs b/src/main.rs
index 44df971ca..488f8ae85 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -884,9 +884,7 @@ async fn main() -> Result<()> {
integration_command,
} => integrations::handle_command(integration_command, &config),
- Commands::Skills { skill_command } => {
- skills::handle_command(skill_command, &config.workspace_dir)
- }
+ Commands::Skills { skill_command } => skills::handle_command(skill_command, &config),
Commands::Migrate { migrate_command } => {
migration::handle_command(migrate_command, &config).await
diff --git a/src/onboard/wizard.rs b/src/onboard/wizard.rs
index da689946b..9ba0975bd 100644
--- a/src/onboard/wizard.rs
+++ b/src/onboard/wizard.rs
@@ -160,6 +160,7 @@ pub async fn run_wizard() -> Result {
reliability: crate::config::ReliabilityConfig::default(),
scheduler: crate::config::schema::SchedulerConfig::default(),
agent: crate::config::schema::AgentConfig::default(),
+ skills: crate::config::SkillsConfig::default(),
model_routes: Vec::new(),
embedding_routes: Vec::new(),
heartbeat: HeartbeatConfig::default(),
@@ -398,6 +399,7 @@ async fn run_quick_setup_with_home(
reliability: crate::config::ReliabilityConfig::default(),
scheduler: crate::config::schema::SchedulerConfig::default(),
agent: crate::config::schema::AgentConfig::default(),
+ skills: crate::config::SkillsConfig::default(),
model_routes: Vec::new(),
embedding_routes: Vec::new(),
heartbeat: HeartbeatConfig::default(),
diff --git a/src/skills/mod.rs b/src/skills/mod.rs
index 0c6e47cac..bca6fffb9 100644
--- a/src/skills/mod.rs
+++ b/src/skills/mod.rs
@@ -71,9 +71,28 @@ fn default_version() -> String {
/// Load all skills from the workspace skills directory
pub fn load_skills(workspace_dir: &Path) -> Vec {
+ load_skills_with_open_skills_config(workspace_dir, None, None)
+}
+
+/// Load skills using runtime config values (preferred at runtime).
+pub fn load_skills_with_config(workspace_dir: &Path, config: &crate::config::Config) -> Vec {
+ load_skills_with_open_skills_config(
+ workspace_dir,
+ Some(config.skills.open_skills_enabled),
+ config.skills.open_skills_dir.as_deref(),
+ )
+}
+
+fn load_skills_with_open_skills_config(
+ workspace_dir: &Path,
+ config_open_skills_enabled: Option,
+ config_open_skills_dir: Option<&str>,
+) -> Vec {
let mut skills = Vec::new();
- if let Some(open_skills_dir) = ensure_open_skills_repo() {
+ if let Some(open_skills_dir) =
+ ensure_open_skills_repo(config_open_skills_enabled, config_open_skills_dir)
+ {
skills.extend(load_open_skills(&open_skills_dir));
}
@@ -158,33 +177,79 @@ fn load_open_skills(repo_dir: &Path) -> Vec {
skills
}
-fn open_skills_enabled() -> bool {
- if let Ok(raw) = std::env::var("ZEROCLAW_OPEN_SKILLS_ENABLED") {
- let value = raw.trim().to_ascii_lowercase();
- return !matches!(value.as_str(), "0" | "false" | "off" | "no");
+fn parse_open_skills_enabled(raw: &str) -> Option {
+ match raw.trim().to_ascii_lowercase().as_str() {
+ "1" | "true" | "yes" | "on" => Some(true),
+ "0" | "false" | "no" | "off" => Some(false),
+ _ => None,
}
-
- // Keep tests deterministic and network-free by default.
- !cfg!(test)
}
-fn resolve_open_skills_dir() -> Option {
- if let Ok(path) = std::env::var("ZEROCLAW_OPEN_SKILLS_DIR") {
- let trimmed = path.trim();
- if !trimmed.is_empty() {
- return Some(PathBuf::from(trimmed));
+fn open_skills_enabled_from_sources(
+ config_open_skills_enabled: Option,
+ env_override: Option<&str>,
+) -> bool {
+ if let Some(raw) = env_override {
+ if let Some(enabled) = parse_open_skills_enabled(&raw) {
+ return enabled;
+ }
+ if !raw.trim().is_empty() {
+ tracing::warn!(
+ "Ignoring invalid ZEROCLAW_OPEN_SKILLS_ENABLED (valid: 1|0|true|false|yes|no|on|off)"
+ );
}
}
- UserDirs::new().map(|dirs| dirs.home_dir().join("open-skills"))
+ config_open_skills_enabled.unwrap_or(false)
}
-fn ensure_open_skills_repo() -> Option {
- if !open_skills_enabled() {
+fn open_skills_enabled(config_open_skills_enabled: Option) -> bool {
+ let env_override = std::env::var("ZEROCLAW_OPEN_SKILLS_ENABLED").ok();
+ open_skills_enabled_from_sources(config_open_skills_enabled, env_override.as_deref())
+}
+
+fn resolve_open_skills_dir_from_sources(
+ env_dir: Option<&str>,
+ config_dir: Option<&str>,
+ home_dir: Option<&Path>,
+) -> Option {
+ let parse_dir = |raw: &str| {
+ let trimmed = raw.trim();
+ if trimmed.is_empty() {
+ None
+ } else {
+ Some(PathBuf::from(trimmed))
+ }
+ };
+
+ if let Some(env_dir) = env_dir.and_then(parse_dir) {
+ return Some(env_dir);
+ }
+ if let Some(config_dir) = config_dir.and_then(parse_dir) {
+ return Some(config_dir);
+ }
+ home_dir.map(|home| home.join("open-skills"))
+}
+
+fn resolve_open_skills_dir(config_open_skills_dir: Option<&str>) -> Option {
+ let env_dir = std::env::var("ZEROCLAW_OPEN_SKILLS_DIR").ok();
+ let home_dir = UserDirs::new().map(|dirs| dirs.home_dir().to_path_buf());
+ resolve_open_skills_dir_from_sources(
+ env_dir.as_deref(),
+ config_open_skills_dir,
+ home_dir.as_deref(),
+ )
+}
+
+fn ensure_open_skills_repo(
+ config_open_skills_enabled: Option,
+ config_open_skills_dir: Option<&str>,
+) -> Option {
+ if !open_skills_enabled(config_open_skills_enabled) {
return None;
}
- let repo_dir = resolve_open_skills_dir()?;
+ let repo_dir = resolve_open_skills_dir(config_open_skills_dir)?;
if !repo_dir.exists() {
if !clone_open_skills_repo(&repo_dir) {
@@ -542,10 +607,11 @@ fn copy_dir_recursive(src: &Path, dest: &Path) -> Result<()> {
/// Handle the `skills` CLI command
#[allow(clippy::too_many_lines)]
-pub fn handle_command(command: crate::SkillCommands, workspace_dir: &Path) -> Result<()> {
+pub fn handle_command(command: crate::SkillCommands, config: &crate::config::Config) -> Result<()> {
+ let workspace_dir = &config.workspace_dir;
match command {
crate::SkillCommands::List => {
- let skills = load_skills(workspace_dir);
+ let skills = load_skills_with_config(workspace_dir, config);
if skills.is_empty() {
println!("No skills installed.");
println!();
@@ -711,6 +777,35 @@ pub fn handle_command(command: crate::SkillCommands, workspace_dir: &Path) -> Re
mod tests {
use super::*;
use std::fs;
+ use std::sync::{Mutex, OnceLock};
+
+ fn open_skills_env_lock() -> &'static Mutex<()> {
+ static ENV_LOCK: OnceLock> = OnceLock::new();
+ ENV_LOCK.get_or_init(|| Mutex::new(()))
+ }
+
+ struct EnvVarGuard {
+ key: &'static str,
+ original: Option,
+ }
+
+ impl EnvVarGuard {
+ fn unset(key: &'static str) -> Self {
+ let original = std::env::var(key).ok();
+ std::env::remove_var(key);
+ Self { key, original }
+ }
+ }
+
+ impl Drop for EnvVarGuard {
+ fn drop(&mut self) {
+ if let Some(value) = &self.original {
+ std::env::set_var(self.key, value);
+ } else {
+ std::env::remove_var(self.key);
+ }
+ }
+ }
#[test]
fn load_empty_skills_dir() {
@@ -1071,6 +1166,78 @@ description = "Bare minimum"
assert_eq!(skills.len(), 1);
assert_eq!(skills[0].name, "from-toml"); // TOML takes priority
}
+
+ #[test]
+ fn open_skills_enabled_resolution_prefers_env_then_config_then_default_false() {
+ assert!(!open_skills_enabled_from_sources(None, None));
+ assert!(open_skills_enabled_from_sources(Some(true), None));
+ assert!(!open_skills_enabled_from_sources(Some(true), Some("0")));
+ assert!(open_skills_enabled_from_sources(Some(false), Some("yes")));
+ // Invalid env values should fall back to config.
+ assert!(open_skills_enabled_from_sources(
+ Some(true),
+ Some("invalid")
+ ));
+ assert!(!open_skills_enabled_from_sources(
+ Some(false),
+ Some("invalid")
+ ));
+ }
+
+ #[test]
+ fn resolve_open_skills_dir_resolution_prefers_env_then_config_then_home() {
+ let home = Path::new("/tmp/home-dir");
+ assert_eq!(
+ resolve_open_skills_dir_from_sources(
+ Some("/tmp/env-skills"),
+ Some("/tmp/config"),
+ Some(home)
+ ),
+ Some(PathBuf::from("/tmp/env-skills"))
+ );
+ assert_eq!(
+ resolve_open_skills_dir_from_sources(
+ Some(" "),
+ Some("/tmp/config-skills"),
+ Some(home)
+ ),
+ Some(PathBuf::from("/tmp/config-skills"))
+ );
+ assert_eq!(
+ resolve_open_skills_dir_from_sources(None, None, Some(home)),
+ Some(PathBuf::from("/tmp/home-dir/open-skills"))
+ );
+ assert_eq!(resolve_open_skills_dir_from_sources(None, None, None), None);
+ }
+
+ #[test]
+ fn load_skills_with_config_reads_open_skills_dir_without_network() {
+ let _env_guard = open_skills_env_lock().lock().unwrap();
+ let _enabled_guard = EnvVarGuard::unset("ZEROCLAW_OPEN_SKILLS_ENABLED");
+ let _dir_guard = EnvVarGuard::unset("ZEROCLAW_OPEN_SKILLS_DIR");
+
+ let dir = tempfile::tempdir().unwrap();
+ let workspace_dir = dir.path().join("workspace");
+ fs::create_dir_all(workspace_dir.join("skills")).unwrap();
+
+ let open_skills_dir = dir.path().join("open-skills-local");
+ fs::create_dir_all(&open_skills_dir).unwrap();
+ fs::write(open_skills_dir.join("README.md"), "# open skills\n").unwrap();
+ fs::write(
+ open_skills_dir.join("http_request.md"),
+ "# HTTP request\nFetch API responses.\n",
+ )
+ .unwrap();
+
+ let mut config = crate::config::Config::default();
+ config.workspace_dir = workspace_dir.clone();
+ config.skills.open_skills_enabled = true;
+ config.skills.open_skills_dir = Some(open_skills_dir.to_string_lossy().to_string());
+
+ let skills = load_skills_with_config(&workspace_dir, &config);
+ assert_eq!(skills.len(), 1);
+ assert_eq!(skills[0].name, "http_request");
+ }
}
#[cfg(test)]
From c96ea79ac0e1790e903bed18fea6376e322fd616 Mon Sep 17 00:00:00 2001
From: Will Sarg <12886992+willsarg@users.noreply.github.com>
Date: Fri, 20 Feb 2026 04:34:14 -0500
Subject: [PATCH 017/116] feat(installer): add guided zeroclaw installer and
distro hardening (#887)
* feat(installer): add guided zeroclaw installer entrypoint
- add top-level POSIX wrapper (zeroclaw_install.sh) that ensures bash is present
- route bootstrap/install compatibility scripts through the new installer entrypoint
- improve Linux dependency handling for Alpine/Fedora/Arch, including pacman container fallback
* fix(ci): resolve dependabot config conflict and run daily
- remove duplicate docker ecosystem entry with overlapping directory/target-branch
- switch cargo, github-actions, and docker schedules from monthly to daily
---
.github/dependabot.yml | 23 +--
bootstrap.sh | 4 +-
scripts/bootstrap.sh | 339 ++++++++++++++++++++++++++++++++++++++---
scripts/install.sh | 45 ++----
zeroclaw_install.sh | 88 +++++++++++
5 files changed, 426 insertions(+), 73 deletions(-)
create mode 100755 zeroclaw_install.sh
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index b44e1119c..eb81c9652 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -4,7 +4,7 @@ updates:
- package-ecosystem: cargo
directory: "/"
schedule:
- interval: monthly
+ interval: daily
target-branch: main
open-pull-requests-limit: 3
labels:
@@ -20,7 +20,7 @@ updates:
- package-ecosystem: github-actions
directory: "/"
schedule:
- interval: monthly
+ interval: daily
target-branch: main
open-pull-requests-limit: 1
labels:
@@ -37,7 +37,7 @@ updates:
- package-ecosystem: docker
directory: "/"
schedule:
- interval: monthly
+ interval: daily
target-branch: main
open-pull-requests-limit: 1
labels:
@@ -50,20 +50,3 @@ updates:
update-types:
- minor
- patch
-
- - package-ecosystem: docker
- directory: "/"
- schedule:
- interval: weekly
- target-branch: main
- open-pull-requests-limit: 3
- labels:
- - "ci"
- - "dependencies"
- groups:
- docker-minor-patch:
- patterns:
- - "*"
- update-types:
- - minor
- - patch
\ No newline at end of file
diff --git a/bootstrap.sh b/bootstrap.sh
index 32a55748f..2c8984dee 100755
--- a/bootstrap.sh
+++ b/bootstrap.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
set -euo pipefail
-ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-exec "$ROOT_DIR/scripts/bootstrap.sh" "$@"
+ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]:-$0}")" >/dev/null 2>&1 && pwd || pwd)"
+exec "$ROOT_DIR/zeroclaw_install.sh" "$@"
diff --git a/scripts/bootstrap.sh b/scripts/bootstrap.sh
index a081a6156..b6732a76b 100755
--- a/scripts/bootstrap.sh
+++ b/scripts/bootstrap.sh
@@ -15,16 +15,20 @@ error() {
usage() {
cat <<'USAGE'
-ZeroClaw one-click bootstrap
+ZeroClaw installer bootstrap engine
Usage:
- ./bootstrap.sh [options]
+ ./zeroclaw_install.sh [options]
+ ./bootstrap.sh [options] # compatibility entrypoint
Modes:
Default mode installs/builds ZeroClaw only (requires existing Rust toolchain).
+ Guided mode asks setup questions and configures options interactively.
Optional bootstrap mode can also install system dependencies and Rust.
Options:
+ --guided Run interactive guided installer
+ --no-guided Disable guided installer
--docker Run bootstrap in Docker and launch onboarding inside the container
--install-system-deps Install build dependencies (Linux/macOS)
--install-rust Install Rust via rustup if missing
@@ -36,18 +40,22 @@ Options:
--api-key API key for non-interactive onboarding
--provider Provider for non-interactive onboarding (default: openrouter)
--model Model for non-interactive onboarding (optional)
+ --build-first Alias for explicitly enabling separate `cargo build --release --locked`
--skip-build Skip `cargo build --release --locked`
--skip-install Skip `cargo install --path . --force --locked`
-h, --help Show help
Examples:
- ./bootstrap.sh
+ ./zeroclaw_install.sh
+ ./zeroclaw_install.sh --guided
+ ./zeroclaw_install.sh --install-system-deps --install-rust
+ ./zeroclaw_install.sh --prefer-prebuilt
+ ./zeroclaw_install.sh --prebuilt-only
+ ./zeroclaw_install.sh --onboard --api-key "sk-..." --provider openrouter [--model "openrouter/auto"]
+ ./zeroclaw_install.sh --interactive-onboard
+
+ # Compatibility entrypoint:
./bootstrap.sh --docker
- ./bootstrap.sh --install-system-deps --install-rust
- ./bootstrap.sh --prefer-prebuilt
- ./bootstrap.sh --prebuilt-only
- ./bootstrap.sh --onboard --api-key "sk-..." --provider openrouter [--model "openrouter/auto"]
- ./bootstrap.sh --interactive-onboard
# Remote one-liner
curl -fsSL https://raw.githubusercontent.com/zeroclaw-labs/zeroclaw/main/scripts/bootstrap.sh | bash
@@ -60,6 +68,8 @@ Environment:
ZEROCLAW_MODEL Used when --model is not provided
ZEROCLAW_BOOTSTRAP_MIN_RAM_MB Minimum RAM threshold for source build preflight (default: 2048)
ZEROCLAW_BOOTSTRAP_MIN_DISK_MB Minimum free disk threshold for source build preflight (default: 6144)
+ ZEROCLAW_DISABLE_ALPINE_AUTO_DEPS
+ Set to 1 to disable Alpine auto-install of missing prerequisites
USAGE
}
@@ -227,19 +237,152 @@ run_privileged() {
fi
}
+is_container_runtime() {
+ if [[ -f /.dockerenv || -f /run/.containerenv ]]; then
+ return 0
+ fi
+
+ if [[ -r /proc/1/cgroup ]] && grep -Eq '(docker|containerd|kubepods|podman|lxc)' /proc/1/cgroup; then
+ return 0
+ fi
+
+ return 1
+}
+
+run_pacman() {
+ if ! have_cmd pacman; then
+ error "pacman is not available."
+ return 1
+ fi
+
+ if ! is_container_runtime; then
+ run_privileged pacman "$@"
+ return $?
+ fi
+
+ local pacman_cfg_tmp=""
+ local pacman_rc=0
+ pacman_cfg_tmp="$(mktemp /tmp/zeroclaw-pacman.XXXXXX.conf)"
+ cp /etc/pacman.conf "$pacman_cfg_tmp"
+ if ! grep -Eq '^[[:space:]]*DisableSandboxSyscalls([[:space:]]|$)' "$pacman_cfg_tmp"; then
+ printf '\nDisableSandboxSyscalls\n' >> "$pacman_cfg_tmp"
+ fi
+
+ if run_privileged pacman --config "$pacman_cfg_tmp" "$@"; then
+ pacman_rc=0
+ else
+ pacman_rc=$?
+ fi
+
+ rm -f "$pacman_cfg_tmp"
+ return "$pacman_rc"
+}
+
+ALPINE_PREREQ_PACKAGES=(
+ bash
+ build-base
+ pkgconf
+ git
+ curl
+ openssl-dev
+ perl
+ ca-certificates
+)
+ALPINE_MISSING_PKGS=()
+
+find_missing_alpine_prereqs() {
+ ALPINE_MISSING_PKGS=()
+ if ! have_cmd apk; then
+ return 0
+ fi
+
+ local pkg=""
+ for pkg in "${ALPINE_PREREQ_PACKAGES[@]}"; do
+ if ! apk info -e "$pkg" >/dev/null 2>&1; then
+ ALPINE_MISSING_PKGS+=("$pkg")
+ fi
+ done
+}
+
+bool_to_word() {
+ if [[ "$1" == true ]]; then
+ echo "yes"
+ else
+ echo "no"
+ fi
+}
+
+prompt_yes_no() {
+ local question="$1"
+ local default_answer="$2"
+ local prompt=""
+ local answer=""
+
+ if [[ "$default_answer" == "yes" ]]; then
+ prompt="[Y/n]"
+ else
+ prompt="[y/N]"
+ fi
+
+ while true; do
+ if ! read -r -p "$question $prompt " answer; then
+ error "guided installer input was interrupted."
+ exit 1
+ fi
+ answer="${answer:-$default_answer}"
+ case "$(printf '%s' "$answer" | tr '[:upper:]' '[:lower:]')" in
+ y|yes)
+ return 0
+ ;;
+ n|no)
+ return 1
+ ;;
+ *)
+ echo "Please answer yes or no."
+ ;;
+ esac
+ done
+}
+
install_system_deps() {
info "Installing system dependencies"
case "$(uname -s)" in
Linux)
- if have_cmd apt-get; then
+ if have_cmd apk; then
+ find_missing_alpine_prereqs
+ if [[ ${#ALPINE_MISSING_PKGS[@]} -eq 0 ]]; then
+ info "Alpine prerequisites already installed"
+ else
+ info "Installing Alpine prerequisites: ${ALPINE_MISSING_PKGS[*]}"
+ run_privileged apk add --no-cache "${ALPINE_MISSING_PKGS[@]}"
+ fi
+ elif have_cmd apt-get; then
run_privileged apt-get update -qq
run_privileged apt-get install -y build-essential pkg-config git curl
elif have_cmd dnf; then
- run_privileged dnf group install -y development-tools
- run_privileged dnf install -y pkg-config git curl
+ run_privileged dnf install -y \
+ gcc \
+ gcc-c++ \
+ make \
+ pkgconf-pkg-config \
+ git \
+ curl \
+ openssl-devel \
+ perl
+ elif have_cmd pacman; then
+ run_pacman -Sy --noconfirm
+ run_pacman -S --noconfirm --needed \
+ gcc \
+ make \
+ pkgconf \
+ git \
+ curl \
+ openssl \
+ perl \
+ ca-certificates
else
- warn "Unsupported Linux distribution. Install compiler toolchain + pkg-config + git + curl manually."
+ warn "Unsupported Linux distribution. Install compiler toolchain + pkg-config + git + curl + OpenSSL headers + perl manually."
fi
;;
Darwin)
@@ -288,12 +431,125 @@ install_rust_toolchain() {
fi
}
+run_guided_installer() {
+ local os_name="$1"
+ local provider_input=""
+ local model_input=""
+ local api_key_input=""
+
+ echo
+ echo "ZeroClaw guided installer"
+ echo "Answer a few questions, then the installer will run automatically."
+ echo
+
+ if [[ "$os_name" == "Linux" ]]; then
+ if prompt_yes_no "Install Linux build dependencies (toolchain/pkg-config/git/curl)?" "yes"; then
+ INSTALL_SYSTEM_DEPS=true
+ fi
+ else
+ if prompt_yes_no "Install system dependencies for $os_name?" "no"; then
+ INSTALL_SYSTEM_DEPS=true
+ fi
+ fi
+
+ if have_cmd cargo && have_cmd rustc; then
+ info "Detected Rust toolchain: $(rustc --version)"
+ else
+ if prompt_yes_no "Rust toolchain not found. Install Rust via rustup now?" "yes"; then
+ INSTALL_RUST=true
+ fi
+ fi
+
+ if prompt_yes_no "Run a separate prebuild before install?" "yes"; then
+ SKIP_BUILD=false
+ else
+ SKIP_BUILD=true
+ fi
+
+ if prompt_yes_no "Install zeroclaw into cargo bin now?" "yes"; then
+ SKIP_INSTALL=false
+ else
+ SKIP_INSTALL=true
+ fi
+
+ if prompt_yes_no "Run onboarding after install?" "no"; then
+ RUN_ONBOARD=true
+ if prompt_yes_no "Use interactive onboarding?" "yes"; then
+ INTERACTIVE_ONBOARD=true
+ else
+ INTERACTIVE_ONBOARD=false
+ if ! read -r -p "Provider [$PROVIDER]: " provider_input; then
+ error "guided installer input was interrupted."
+ exit 1
+ fi
+ if [[ -n "$provider_input" ]]; then
+ PROVIDER="$provider_input"
+ fi
+
+ if ! read -r -p "Model [${MODEL:-leave empty}]: " model_input; then
+ error "guided installer input was interrupted."
+ exit 1
+ fi
+ if [[ -n "$model_input" ]]; then
+ MODEL="$model_input"
+ fi
+
+ if [[ -z "$API_KEY" ]]; then
+ if ! read -r -s -p "API key (hidden, leave empty to switch to interactive onboarding): " api_key_input; then
+ echo
+ error "guided installer input was interrupted."
+ exit 1
+ fi
+ echo
+ if [[ -n "$api_key_input" ]]; then
+ API_KEY="$api_key_input"
+ else
+ warn "No API key entered. Using interactive onboarding instead."
+ INTERACTIVE_ONBOARD=true
+ fi
+ fi
+ fi
+ fi
+
+ echo
+ info "Installer plan"
+ local install_binary=true
+ local build_first=false
+ if [[ "$SKIP_INSTALL" == true ]]; then
+ install_binary=false
+ fi
+ if [[ "$SKIP_BUILD" == false ]]; then
+ build_first=true
+ fi
+ echo " docker-mode: $(bool_to_word "$DOCKER_MODE")"
+ echo " install-system-deps: $(bool_to_word "$INSTALL_SYSTEM_DEPS")"
+ echo " install-rust: $(bool_to_word "$INSTALL_RUST")"
+ echo " build-first: $(bool_to_word "$build_first")"
+ echo " install-binary: $(bool_to_word "$install_binary")"
+ echo " onboard: $(bool_to_word "$RUN_ONBOARD")"
+ if [[ "$RUN_ONBOARD" == true ]]; then
+ echo " interactive-onboard: $(bool_to_word "$INTERACTIVE_ONBOARD")"
+ if [[ "$INTERACTIVE_ONBOARD" == false ]]; then
+ echo " provider: $PROVIDER"
+ if [[ -n "$MODEL" ]]; then
+ echo " model: $MODEL"
+ fi
+ fi
+ fi
+
+ echo
+ if ! prompt_yes_no "Proceed with this install plan?" "yes"; then
+ info "Installation canceled by user."
+ exit 0
+ fi
+}
+
ensure_docker_ready() {
if ! have_cmd docker; then
error "docker is not installed."
cat <<'MSG' >&2
Install Docker first, then re-run with:
- ./bootstrap.sh --docker
+ ./zeroclaw_install.sh --docker
MSG
exit 1
fi
@@ -342,9 +598,9 @@ run_docker_bootstrap() {
Use either:
--api-key "sk-..."
or:
- ZEROCLAW_API_KEY="sk-..." ./bootstrap.sh --docker
+ ZEROCLAW_API_KEY="sk-..." ./zeroclaw_install.sh --docker
or run interactive:
- ./bootstrap.sh --docker --interactive-onboard
+ ./zeroclaw_install.sh --docker --interactive-onboard
MSG
exit 1
fi
@@ -373,6 +629,8 @@ SCRIPT_PATH="${BASH_SOURCE[0]:-$0}"
SCRIPT_DIR="$(cd "$(dirname "$SCRIPT_PATH")" >/dev/null 2>&1 && pwd || pwd)"
ROOT_DIR="$(cd "$SCRIPT_DIR/.." >/dev/null 2>&1 && pwd || pwd)"
REPO_URL="https://github.com/zeroclaw-labs/zeroclaw.git"
+ORIGINAL_ARG_COUNT=$#
+GUIDED_MODE="auto"
DOCKER_MODE=false
INSTALL_SYSTEM_DEPS=false
@@ -391,6 +649,14 @@ MODEL="${ZEROCLAW_MODEL:-}"
while [[ $# -gt 0 ]]; do
case "$1" in
+ --guided)
+ GUIDED_MODE="on"
+ shift
+ ;;
+ --no-guided)
+ GUIDED_MODE="off"
+ shift
+ ;;
--docker)
DOCKER_MODE=true
shift
@@ -448,6 +714,10 @@ while [[ $# -gt 0 ]]; do
}
shift 2
;;
+ --build-first)
+ SKIP_BUILD=false
+ shift
+ ;;
--skip-build)
SKIP_BUILD=true
shift
@@ -469,14 +739,41 @@ while [[ $# -gt 0 ]]; do
esac
done
+OS_NAME="$(uname -s)"
+if [[ "$GUIDED_MODE" == "auto" ]]; then
+ if [[ "$OS_NAME" == "Linux" && "$ORIGINAL_ARG_COUNT" -eq 0 && -t 0 && -t 1 ]]; then
+ GUIDED_MODE="on"
+ else
+ GUIDED_MODE="off"
+ fi
+fi
+
+if [[ "$DOCKER_MODE" == true && "$GUIDED_MODE" == "on" ]]; then
+ warn "--guided is ignored with --docker."
+ GUIDED_MODE="off"
+fi
+
+if [[ "$GUIDED_MODE" == "on" ]]; then
+ run_guided_installer "$OS_NAME"
+fi
+
if [[ "$DOCKER_MODE" == true ]]; then
if [[ "$INSTALL_SYSTEM_DEPS" == true ]]; then
warn "--install-system-deps is ignored with --docker."
fi
if [[ "$INSTALL_RUST" == true ]]; then
- warn "--install-rust is ignored with --docker."
+ warn "--install-rust is ignored with --docker."
fi
else
+ if [[ "$OS_NAME" == "Linux" && -z "${ZEROCLAW_DISABLE_ALPINE_AUTO_DEPS:-}" ]] && have_cmd apk; then
+ find_missing_alpine_prereqs
+ if [[ ${#ALPINE_MISSING_PKGS[@]} -gt 0 && "$INSTALL_SYSTEM_DEPS" == false ]]; then
+ info "Detected Alpine with missing prerequisites: ${ALPINE_MISSING_PKGS[*]}"
+ info "Auto-enabling system dependency installation (set ZEROCLAW_DISABLE_ALPINE_AUTO_DEPS=1 to disable)."
+ INSTALL_SYSTEM_DEPS=true
+ fi
+ fi
+
if [[ "$INSTALL_SYSTEM_DEPS" == true ]]; then
install_system_deps
fi
@@ -554,8 +851,8 @@ DONE
cat <<'DONE'
Next steps:
- ./bootstrap.sh --docker --interactive-onboard
- ./bootstrap.sh --docker --api-key "sk-..." --provider openrouter
+ ./zeroclaw_install.sh --docker --interactive-onboard
+ ./zeroclaw_install.sh --docker --api-key "sk-..." --provider openrouter
DONE
exit 0
fi
@@ -588,7 +885,7 @@ if [[ "$PREBUILT_INSTALLED" == false && ( "$SKIP_BUILD" == false || "$SKIP_INSTA
cat <<'MSG' >&2
Install Rust first: https://rustup.rs/
or re-run with:
- ./bootstrap.sh --install-rust
+ ./zeroclaw_install.sh --install-rust
MSG
exit 1
fi
@@ -633,9 +930,9 @@ if [[ "$RUN_ONBOARD" == true ]]; then
Use either:
--api-key "sk-..."
or:
- ZEROCLAW_API_KEY="sk-..." ./bootstrap.sh --onboard
+ ZEROCLAW_API_KEY="sk-..." ./zeroclaw_install.sh --onboard
or run interactive:
- ./bootstrap.sh --interactive-onboard
+ ./zeroclaw_install.sh --interactive-onboard
MSG
exit 1
fi
diff --git a/scripts/install.sh b/scripts/install.sh
index 68efa95f4..478bdd527 100755
--- a/scripts/install.sh
+++ b/scripts/install.sh
@@ -2,10 +2,15 @@
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]:-$0}")" >/dev/null 2>&1 && pwd || pwd)"
+INSTALLER_LOCAL="$(cd "$SCRIPT_DIR/.." >/dev/null 2>&1 && pwd || pwd)/zeroclaw_install.sh"
BOOTSTRAP_LOCAL="$SCRIPT_DIR/bootstrap.sh"
REPO_URL="https://github.com/zeroclaw-labs/zeroclaw.git"
-echo "[deprecated] scripts/install.sh -> bootstrap.sh" >&2
+echo "[deprecated] scripts/install.sh -> ./zeroclaw_install.sh" >&2
+
+if [[ -x "$INSTALLER_LOCAL" ]]; then
+ exec "$INSTALLER_LOCAL" "$@"
+fi
if [[ -f "$BOOTSTRAP_LOCAL" ]]; then
exec "$BOOTSTRAP_LOCAL" "$@"
@@ -24,35 +29,15 @@ trap cleanup EXIT
git clone --depth 1 "$REPO_URL" "$TEMP_DIR" >/dev/null 2>&1
+if [[ -x "$TEMP_DIR/zeroclaw_install.sh" ]]; then
+ exec "$TEMP_DIR/zeroclaw_install.sh" "$@"
+fi
+
if [[ -x "$TEMP_DIR/scripts/bootstrap.sh" ]]; then
- "$TEMP_DIR/scripts/bootstrap.sh" "$@"
- exit 0
+ exec "$TEMP_DIR/scripts/bootstrap.sh" "$@"
fi
-echo "[deprecated] cloned revision has no bootstrap.sh; falling back to legacy source install flow" >&2
-
-if [[ "${1:-}" == "--help" || "${1:-}" == "-h" ]]; then
- cat <<'USAGE'
-Legacy install.sh fallback mode
-
-Behavior:
- - Clone repository
- - cargo build --release --locked
- - cargo install --path --force --locked
-
-For the new dual-mode installer, use:
- ./bootstrap.sh --help
-USAGE
- exit 0
-fi
-
-if ! command -v cargo >/dev/null 2>&1; then
- echo "error: cargo is required for legacy install.sh fallback mode" >&2
- echo "Install Rust first: https://rustup.rs/" >&2
- exit 1
-fi
-
-cargo build --release --locked --manifest-path "$TEMP_DIR/Cargo.toml"
-cargo install --path "$TEMP_DIR" --force --locked
-
-echo "Legacy source install completed." >&2
+echo "error: zeroclaw_install.sh/bootstrap.sh was not found in the fetched revision." >&2
+echo "Run the local bootstrap directly when possible:" >&2
+echo " ./zeroclaw_install.sh --help" >&2
+exit 1
diff --git a/zeroclaw_install.sh b/zeroclaw_install.sh
new file mode 100755
index 000000000..4279e1a33
--- /dev/null
+++ b/zeroclaw_install.sh
@@ -0,0 +1,88 @@
+#!/usr/bin/env sh
+set -eu
+
+have_cmd() {
+ command -v "$1" >/dev/null 2>&1
+}
+
+run_privileged() {
+ if [ "$(id -u)" -eq 0 ]; then
+ "$@"
+ elif have_cmd sudo; then
+ sudo "$@"
+ else
+ echo "error: sudo is required to install missing dependencies." >&2
+ exit 1
+ fi
+}
+
+is_container_runtime() {
+ if [ -f /.dockerenv ] || [ -f /run/.containerenv ]; then
+ return 0
+ fi
+
+ if [ -r /proc/1/cgroup ] && grep -Eq '(docker|containerd|kubepods|podman|lxc)' /proc/1/cgroup; then
+ return 0
+ fi
+
+ return 1
+}
+
+run_pacman() {
+ if ! is_container_runtime; then
+ run_privileged pacman "$@"
+ return $?
+ fi
+
+ PACMAN_CFG_TMP="$(mktemp /tmp/zeroclaw-pacman.XXXXXX.conf)"
+ cp /etc/pacman.conf "$PACMAN_CFG_TMP"
+ if ! grep -Eq '^[[:space:]]*DisableSandboxSyscalls([[:space:]]|$)' "$PACMAN_CFG_TMP"; then
+ printf '\nDisableSandboxSyscalls\n' >> "$PACMAN_CFG_TMP"
+ fi
+
+ if run_privileged pacman --config "$PACMAN_CFG_TMP" "$@"; then
+ PACMAN_RC=0
+ else
+ PACMAN_RC=$?
+ fi
+ rm -f "$PACMAN_CFG_TMP"
+ return "$PACMAN_RC"
+}
+
+ensure_bash() {
+ if have_cmd bash; then
+ return 0
+ fi
+
+ echo "==> bash not found; attempting to install it"
+ if have_cmd apk; then
+ run_privileged apk add --no-cache bash
+ elif have_cmd apt-get; then
+ run_privileged apt-get update -qq
+ run_privileged apt-get install -y bash
+ elif have_cmd dnf; then
+ run_privileged dnf install -y bash
+ elif have_cmd pacman; then
+ run_pacman -Sy --noconfirm
+ run_pacman -S --noconfirm --needed bash
+ else
+ echo "error: unsupported package manager; install bash manually and retry." >&2
+ exit 1
+ fi
+}
+
+ROOT_DIR="$(CDPATH= cd -- "$(dirname -- "$0")" >/dev/null 2>&1 && pwd || pwd)"
+BOOTSTRAP_SCRIPT="$ROOT_DIR/scripts/bootstrap.sh"
+
+if [ ! -f "$BOOTSTRAP_SCRIPT" ]; then
+ echo "error: scripts/bootstrap.sh not found from repository root." >&2
+ exit 1
+fi
+
+ensure_bash
+
+if [ "$#" -eq 0 ]; then
+ exec bash "$BOOTSTRAP_SCRIPT" --guided
+fi
+
+exec bash "$BOOTSTRAP_SCRIPT" "$@"
From 12fd87623ae2633dc14032f17de60022c05e7bd0 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 20 Feb 2026 04:39:29 -0500
Subject: [PATCH 018/116] chore(deps): bump sigstore/cosign-installer from
3.8.2 to 4.0.0 (#1067)
Bumps [sigstore/cosign-installer](https://github.com/sigstore/cosign-installer) from 3.8.2 to 4.0.0.
- [Release notes](https://github.com/sigstore/cosign-installer/releases)
- [Commits](https://github.com/sigstore/cosign-installer/compare/3454372f43399081ed03b604cb2d021dabca52bb...faadad0cce49287aee09b3a48701e75088a2c6ad)
---
updated-dependencies:
- dependency-name: sigstore/cosign-installer
dependency-version: 4.0.0
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/pub-release.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/pub-release.yml b/.github/workflows/pub-release.yml
index 193bfd5ce..e9479d8f2 100644
--- a/.github/workflows/pub-release.yml
+++ b/.github/workflows/pub-release.yml
@@ -170,7 +170,7 @@ jobs:
cat SHA256SUMS
- name: Install cosign
- uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
+ uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Sign artifacts with cosign (keyless)
run: |
From d82350d847469f3f60cf3ff45317dc2665acfff6 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 20 Feb 2026 04:51:03 -0500
Subject: [PATCH 019/116] chore(deps): bump the rust-all group with 3 updates
(#1068)
Bumps the rust-all group with 3 updates: [clap](https://github.com/clap-rs/clap), [anyhow](https://github.com/dtolnay/anyhow) and [nusb](https://github.com/kevinmehall/nusb).
Updates `clap` from 4.5.58 to 4.5.60
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.58...clap_complete-v4.5.60)
Updates `anyhow` from 1.0.101 to 1.0.102
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.101...1.0.102)
Updates `nusb` from 0.2.1 to 0.2.2
- [Release notes](https://github.com/kevinmehall/nusb/releases)
- [Commits](https://github.com/kevinmehall/nusb/compare/v0.2.1...v0.2.2)
---
updated-dependencies:
- dependency-name: clap
dependency-version: 4.5.60
dependency-type: direct:production
update-type: version-update:semver-patch
dependency-group: rust-all
- dependency-name: anyhow
dependency-version: 1.0.102
dependency-type: direct:production
update-type: version-update:semver-patch
dependency-group: rust-all
- dependency-name: nusb
dependency-version: 0.2.2
dependency-type: direct:production
update-type: version-update:semver-patch
dependency-group: rust-all
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
Cargo.lock | 57 +++++++++++++++++++++++++++++++++---------------------
1 file changed, 35 insertions(+), 22 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 1305b658c..0f83cdc1e 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -158,9 +158,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.101"
+version = "1.0.102"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea"
+checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c"
[[package]]
name = "anymap2"
@@ -776,9 +776,9 @@ dependencies = [
[[package]]
name = "clap"
-version = "4.5.58"
+version = "4.5.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "63be97961acde393029492ce0be7a1af7e323e6bae9511ebfac33751be5e6806"
+checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a"
dependencies = [
"clap_builder",
"clap_derive",
@@ -786,9 +786,9 @@ dependencies = [
[[package]]
name = "clap_builder"
-version = "4.5.58"
+version = "4.5.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f13174bda5dfd69d7e947827e5af4b0f2f94a4a3ee92912fba07a66150f21e2"
+checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876"
dependencies = [
"anstream",
"anstyle",
@@ -2708,7 +2708,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "617ee6cf8e3f66f3b4ea67a4058564628cde41901316e19f559e14c7c72c5e7b"
dependencies = [
"core-foundation-sys",
- "mach2",
+ "mach2 0.4.3",
+]
+
+[[package]]
+name = "io-kit-sys"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06d3a048d09fbb6597dbf7c69f40d14df4a49487db1487191618c893fc3b1c26"
+dependencies = [
+ "core-foundation-sys",
+ "mach2 0.5.0",
]
[[package]]
@@ -2948,12 +2958,6 @@ version = "0.4.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab"
-[[package]]
-name = "linux-raw-sys"
-version = "0.9.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
-
[[package]]
name = "linux-raw-sys"
version = "0.11.0"
@@ -3042,6 +3046,15 @@ dependencies = [
"libc",
]
+[[package]]
+name = "mach2"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a1b95cd5421ec55b445b5ae102f5ea0e768de1f82bd3001e11f426c269c3aea"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "macroific"
version = "2.0.0"
@@ -3702,7 +3715,7 @@ dependencies = [
"core-foundation 0.9.4",
"core-foundation-sys",
"futures-core",
- "io-kit-sys",
+ "io-kit-sys 0.4.1",
"libc",
"log",
"once_cell",
@@ -3713,20 +3726,20 @@ dependencies = [
[[package]]
name = "nusb"
-version = "0.2.1"
+version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d0226f4db3ee78f820747cf713767722877f6449d7a0fcfbf2ec3b840969763f"
+checksum = "5750d884c774a2862b0049b0318aea27cecc9e873485540af5ed8ab8841247da"
dependencies = [
"core-foundation 0.10.1",
"core-foundation-sys",
"futures-core",
- "io-kit-sys",
- "linux-raw-sys 0.9.4",
+ "io-kit-sys 0.5.0",
+ "linux-raw-sys 0.11.0",
"log",
"once_cell",
"rustix 1.1.3",
"slab",
- "windows-sys 0.60.2",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -5468,8 +5481,8 @@ dependencies = [
"cfg-if",
"core-foundation 0.10.1",
"core-foundation-sys",
- "io-kit-sys",
- "mach2",
+ "io-kit-sys 0.4.1",
+ "mach2 0.4.3",
"nix 0.26.4",
"scopeguard",
"unescaper",
@@ -7687,7 +7700,7 @@ dependencies = [
"lettre",
"mail-parser",
"matrix-sdk",
- "nusb 0.2.1",
+ "nusb 0.2.2",
"opentelemetry",
"opentelemetry-otlp",
"opentelemetry_sdk",
From 7875a0810056cc27f370356702698f4a7af43845 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 20 Feb 2026 04:51:06 -0500
Subject: [PATCH 020/116] chore(deps): bump directories from 5.0.1 to 6.0.0
(#1069)
Bumps [directories](https://github.com/soc/directories-rs) from 5.0.1 to 6.0.0.
- [Commits](https://github.com/soc/directories-rs/commits)
---
updated-dependencies:
- dependency-name: directories
dependency-version: 6.0.0
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
Cargo.lock | 42 +++++--------------------------------
crates/robot-kit/Cargo.toml | 2 +-
2 files changed, 6 insertions(+), 38 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 0f83cdc1e..12a96bed4 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1385,22 +1385,13 @@ dependencies = [
"subtle",
]
-[[package]]
-name = "directories"
-version = "5.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35"
-dependencies = [
- "dirs-sys 0.4.1",
-]
-
[[package]]
name = "directories"
version = "6.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16f5094c54661b38d03bd7e50df373292118db60b585c08a411c6d840017fe7d"
dependencies = [
- "dirs-sys 0.5.0",
+ "dirs-sys",
]
[[package]]
@@ -1409,19 +1400,7 @@ version = "6.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e"
dependencies = [
- "dirs-sys 0.5.0",
-]
-
-[[package]]
-name = "dirs-sys"
-version = "0.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c"
-dependencies = [
- "libc",
- "option-ext",
- "redox_users 0.4.6",
- "windows-sys 0.48.0",
+ "dirs-sys",
]
[[package]]
@@ -1432,7 +1411,7 @@ checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab"
dependencies = [
"libc",
"option-ext",
- "redox_users 0.5.2",
+ "redox_users",
"windows-sys 0.61.2",
]
@@ -4727,17 +4706,6 @@ dependencies = [
"bitflags 2.11.0",
]
-[[package]]
-name = "redox_users"
-version = "0.4.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43"
-dependencies = [
- "getrandom 0.2.17",
- "libredox",
- "thiserror 1.0.69",
-]
-
[[package]]
name = "redox_users"
version = "0.5.2"
@@ -7687,7 +7655,7 @@ dependencies = [
"criterion",
"cron",
"dialoguer",
- "directories 6.0.0",
+ "directories",
"fantoccini",
"futures",
"futures-util",
@@ -7756,7 +7724,7 @@ dependencies = [
"async-trait",
"base64",
"chrono",
- "directories 5.0.1",
+ "directories",
"reqwest",
"rppal 0.19.0",
"serde",
diff --git a/crates/robot-kit/Cargo.toml b/crates/robot-kit/Cargo.toml
index 76b2863ff..0752ce839 100644
--- a/crates/robot-kit/Cargo.toml
+++ b/crates/robot-kit/Cargo.toml
@@ -52,7 +52,7 @@ tracing = "0.1"
chrono = { version = "0.4", features = ["clock", "std"] }
# User directories
-directories = "5.0"
+directories = "6.0"
[target.'cfg(target_os = "linux")'.dependencies]
From b76c757400fbdbe7e4a26737e1b9b5bb1d2ec72d Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 20 Feb 2026 04:51:09 -0500
Subject: [PATCH 021/116] chore(deps): bump criterion from 0.5.1 to 0.8.2
(#1070)
Bumps [criterion](https://github.com/criterion-rs/criterion.rs) from 0.5.1 to 0.8.2.
- [Release notes](https://github.com/criterion-rs/criterion.rs/releases)
- [Changelog](https://github.com/criterion-rs/criterion.rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/criterion-rs/criterion.rs/compare/0.5.1...criterion-v0.8.2)
---
updated-dependencies:
- dependency-name: criterion
dependency-version: 0.8.2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
Cargo.lock | 57 ++++++++++++++++++++++++++++++++++--------------------
Cargo.toml | 2 +-
2 files changed, 37 insertions(+), 22 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 12a96bed4..3e912a2fc 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -85,6 +85,15 @@ dependencies = [
"memchr",
]
+[[package]]
+name = "alloca"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e5a7d05ea6aea7e9e64d25b9156ba2fee3fdd659e34e41063cd2fc7cd020d7f4"
+dependencies = [
+ "cc",
+]
+
[[package]]
name = "allocator-api2"
version = "0.2.21"
@@ -993,26 +1002,24 @@ dependencies = [
[[package]]
name = "criterion"
-version = "0.5.1"
+version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
+checksum = "950046b2aa2492f9a536f5f4f9a3de7b9e2476e575e05bd6c333371add4d98f3"
dependencies = [
+ "alloca",
"anes",
"cast",
"ciborium",
"clap",
"criterion-plot",
- "futures",
- "is-terminal",
- "itertools 0.10.5",
+ "itertools 0.13.0",
"num-traits",
- "once_cell",
"oorandom",
+ "page_size",
"plotters",
"rayon",
"regex",
"serde",
- "serde_derive",
"serde_json",
"tinytemplate",
"tokio",
@@ -1021,12 +1028,12 @@ dependencies = [
[[package]]
name = "criterion-plot"
-version = "0.5.0"
+version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
+checksum = "d8d80a2f4f5b554395e47b5d8305bc3d27813bacb73493eb1001e8f76dae29ea"
dependencies = [
"cast",
- "itertools 0.10.5",
+ "itertools 0.13.0",
]
[[package]]
@@ -2727,17 +2734,6 @@ dependencies = [
"serde",
]
-[[package]]
-name = "is-terminal"
-version = "0.4.17"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46"
-dependencies = [
- "hermit-abi 0.5.2",
- "libc",
- "windows-sys 0.61.2",
-]
-
[[package]]
name = "is_terminal_polyfill"
version = "1.70.2"
@@ -2753,6 +2749,15 @@ dependencies = [
"either",
]
+[[package]]
+name = "itertools"
+version = "0.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
+dependencies = [
+ "either",
+]
+
[[package]]
name = "itertools"
version = "0.14.0"
@@ -3885,6 +3890,16 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
+[[package]]
+name = "page_size"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da"
+dependencies = [
+ "libc",
+ "winapi",
+]
+
[[package]]
name = "parking"
version = "2.2.1"
diff --git a/Cargo.toml b/Cargo.toml
index 10cfdf6f6..468c01131 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -206,7 +206,7 @@ panic = "abort"
[dev-dependencies]
tempfile = "3.14"
-criterion = { version = "0.5", features = ["async_tokio"] }
+criterion = { version = "0.8", features = ["async_tokio"] }
[[bench]]
name = "agent_benchmarks"
From 1a3be5e54f6f936f95d57428e78a1d2d0b330f91 Mon Sep 17 00:00:00 2001
From: Alex Gorevski
Date: Fri, 20 Feb 2026 01:58:19 -0800
Subject: [PATCH 023/116] fix(config): change web_search.enabled default to
false for explicit opt-in (#986)
Network access (web search via DuckDuckGo) should require explicit user
consent rather than being enabled by default. This aligns with the
least-surprise principle and the project's secure-by-default policy:
users must opt in to external network requests.
Changes:
- WebSearchConfig::default() now sets enabled: false
- Serde default for enabled field changed from default_true to default
(bool defaults to false)
Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
---
src/config/schema.rs | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/config/schema.rs b/src/config/schema.rs
index 04eee3283..f47bb9d02 100644
--- a/src/config/schema.rs
+++ b/src/config/schema.rs
@@ -909,7 +909,7 @@ fn default_http_timeout_secs() -> u64 {
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct WebSearchConfig {
/// Enable `web_search_tool` for web searches
- #[serde(default = "default_true")]
+ #[serde(default)]
pub enabled: bool,
/// Search provider: "duckduckgo" (free, no API key) or "brave" (requires API key)
#[serde(default = "default_web_search_provider")]
@@ -940,7 +940,7 @@ fn default_web_search_timeout_secs() -> u64 {
impl Default for WebSearchConfig {
fn default() -> Self {
Self {
- enabled: true,
+ enabled: false,
provider: default_web_search_provider(),
brave_api_key: None,
max_results: default_web_search_max_results(),
From e2c507664ca10996f0c68018af220120e30c91a1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Edvard=20Sch=C3=B8yen?=
<99178202+ecschoye@users.noreply.github.com>
Date: Fri, 20 Feb 2026 05:00:26 -0500
Subject: [PATCH 024/116] fix(provider): surface API key rotation as
ineffective warning (#1000)
rotate_key() selects the next key in the round-robin but never applies
it to the underlying provider (Provider trait has no set_api_key
method). The previous info-level log implied rotation was working.
Change to warn-level and explicitly state the key is not applied,
making the limitation visible to operators instead of silently
pretending rotation works.
Co-authored-by: Claude Opus 4.6
---
src/providers/reliable.rs | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/src/providers/reliable.rs b/src/providers/reliable.rs
index d3621d5ca..94c855afa 100644
--- a/src/providers/reliable.rs
+++ b/src/providers/reliable.rs
@@ -356,10 +356,12 @@ impl Provider for ReliableProvider {
// so the retry hits a different quota bucket.
if rate_limited && !non_retryable_rate_limit {
if let Some(new_key) = self.rotate_key() {
- tracing::info!(
+ tracing::warn!(
provider = provider_name,
error = %error_detail,
- "Rate limited, rotated API key (key ending ...{})",
+ "Rate limited; key rotation selected key ending ...{} \
+ but cannot apply (Provider trait has no set_api_key). \
+ Retrying with original key.",
&new_key[new_key.len().saturating_sub(4)..]
);
}
@@ -472,10 +474,12 @@ impl Provider for ReliableProvider {
if rate_limited && !non_retryable_rate_limit {
if let Some(new_key) = self.rotate_key() {
- tracing::info!(
+ tracing::warn!(
provider = provider_name,
error = %error_detail,
- "Rate limited, rotated API key (key ending ...{})",
+ "Rate limited; key rotation selected key ending ...{} \
+ but cannot apply (Provider trait has no set_api_key). \
+ Retrying with original key.",
&new_key[new_key.len().saturating_sub(4)..]
);
}
@@ -594,10 +598,12 @@ impl Provider for ReliableProvider {
if rate_limited && !non_retryable_rate_limit {
if let Some(new_key) = self.rotate_key() {
- tracing::info!(
+ tracing::warn!(
provider = provider_name,
error = %error_detail,
- "Rate limited, rotated API key (key ending ...{})",
+ "Rate limited; key rotation selected key ending ...{} \
+ but cannot apply (Provider trait has no set_api_key). \
+ Retrying with original key.",
&new_key[new_key.len().saturating_sub(4)..]
);
}
From 2ae12578f0ff6f32e3c1c4c104c204a82a779c36 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Edvard=20Sch=C3=B8yen?=
<99178202+ecschoye@users.noreply.github.com>
Date: Fri, 20 Feb 2026 05:02:39 -0500
Subject: [PATCH 025/116] fix(channel): use per-recipient typing handles in
Discord (#1005)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Replace the single shared typing_handle with a HashMap keyed by
recipient channel ID. Previously, concurrent messages would fight
over one handle — starting typing for message B would cancel message
A's indicator, and stopping one would kill the other's.
Co-authored-by: Claude Opus 4.6
---
src/channels/discord.rs | 44 +++++++++++++++++++++++++----------------
1 file changed, 27 insertions(+), 17 deletions(-)
diff --git a/src/channels/discord.rs b/src/channels/discord.rs
index 3ae69baba..bcb447d71 100644
--- a/src/channels/discord.rs
+++ b/src/channels/discord.rs
@@ -3,6 +3,7 @@ use async_trait::async_trait;
use futures_util::{SinkExt, StreamExt};
use parking_lot::Mutex;
use serde_json::json;
+use std::collections::HashMap;
use tokio_tungstenite::tungstenite::Message;
use uuid::Uuid;
@@ -13,7 +14,7 @@ pub struct DiscordChannel {
allowed_users: Vec,
listen_to_bots: bool,
mention_only: bool,
- typing_handle: Mutex>>,
+ typing_handles: Mutex>>,
}
impl DiscordChannel {
@@ -30,7 +31,7 @@ impl DiscordChannel {
allowed_users,
listen_to_bots,
mention_only,
- typing_handle: Mutex::new(None),
+ typing_handles: Mutex::new(HashMap::new()),
}
}
@@ -457,15 +458,15 @@ impl Channel for DiscordChannel {
}
});
- let mut guard = self.typing_handle.lock();
- *guard = Some(handle);
+ let mut guard = self.typing_handles.lock();
+ guard.insert(recipient.to_string(), handle);
Ok(())
}
- async fn stop_typing(&self, _recipient: &str) -> anyhow::Result<()> {
- let mut guard = self.typing_handle.lock();
- if let Some(handle) = guard.take() {
+ async fn stop_typing(&self, recipient: &str) -> anyhow::Result<()> {
+ let mut guard = self.typing_handles.lock();
+ if let Some(handle) = guard.remove(recipient) {
handle.abort();
}
Ok(())
@@ -754,18 +755,18 @@ mod tests {
}
#[test]
- fn typing_handle_starts_as_none() {
+ fn typing_handles_start_empty() {
let ch = DiscordChannel::new("fake".into(), None, vec![], false, false);
- let guard = ch.typing_handle.lock();
- assert!(guard.is_none());
+ let guard = ch.typing_handles.lock();
+ assert!(guard.is_empty());
}
#[tokio::test]
async fn start_typing_sets_handle() {
let ch = DiscordChannel::new("fake".into(), None, vec![], false, false);
let _ = ch.start_typing("123456").await;
- let guard = ch.typing_handle.lock();
- assert!(guard.is_some());
+ let guard = ch.typing_handles.lock();
+ assert!(guard.contains_key("123456"));
}
#[tokio::test]
@@ -773,8 +774,8 @@ mod tests {
let ch = DiscordChannel::new("fake".into(), None, vec![], false, false);
let _ = ch.start_typing("123456").await;
let _ = ch.stop_typing("123456").await;
- let guard = ch.typing_handle.lock();
- assert!(guard.is_none());
+ let guard = ch.typing_handles.lock();
+ assert!(!guard.contains_key("123456"));
}
#[tokio::test]
@@ -785,12 +786,21 @@ mod tests {
}
#[tokio::test]
- async fn start_typing_replaces_existing_task() {
+ async fn concurrent_typing_handles_are_independent() {
let ch = DiscordChannel::new("fake".into(), None, vec![], false, false);
let _ = ch.start_typing("111").await;
let _ = ch.start_typing("222").await;
- let guard = ch.typing_handle.lock();
- assert!(guard.is_some());
+ {
+ let guard = ch.typing_handles.lock();
+ assert_eq!(guard.len(), 2);
+ assert!(guard.contains_key("111"));
+ assert!(guard.contains_key("222"));
+ }
+ // Stopping one does not affect the other
+ let _ = ch.stop_typing("111").await;
+ let guard = ch.typing_handles.lock();
+ assert_eq!(guard.len(), 1);
+ assert!(guard.contains_key("222"));
}
// ── Message ID edge cases ─────────────────────────────────────
From f35a365d830a5a49546acec03a2b00a904c98b05 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Edvard=20Sch=C3=B8yen?=
<99178202+ecschoye@users.noreply.github.com>
Date: Fri, 20 Feb 2026 05:05:33 -0500
Subject: [PATCH 026/116] fix(agent): implement actual concurrent tool
execution (#1001)
When parallel_tools is enabled, both code branches in execute_tools()
ran the same sequential for loop. The parallel path was a no-op.
Use futures::future::join_all to execute tool calls concurrently when
parallel_tools is true. The futures crate is already a dependency.
Co-authored-by: Claude Opus 4.6
---
src/agent/agent.rs | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/src/agent/agent.rs b/src/agent/agent.rs
index e96d797dd..5f048e2a5 100644
--- a/src/agent/agent.rs
+++ b/src/agent/agent.rs
@@ -403,11 +403,8 @@ impl Agent {
return results;
}
- let mut results = Vec::with_capacity(calls.len());
- for call in calls {
- results.push(self.execute_tool_call(call).await);
- }
- results
+ let futs: Vec<_> = calls.iter().map(|call| self.execute_tool_call(call)).collect();
+ futures::future::join_all(futs).await
}
fn classify_model(&self, user_message: &str) -> String {
From 2c407f6a5549243f3d8f9980d8bf6980f283a3c4 Mon Sep 17 00:00:00 2001
From: Alex Gorevski
Date: Fri, 20 Feb 2026 02:06:41 -0800
Subject: [PATCH 027/116] refactor(lib): restrict internal module visibility to
pub(crate) (#985)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Restrict 19 internal-only modules from pub to pub(crate) in lib.rs,
reducing the public API surface of the library crate.
Modules kept pub (used by integration tests, benchmarks, or are
documented extension points per AGENTS.md):
agent, channels, config, gateway, memory, observability,
peripherals, providers, rag, runtime, tools
Modules restricted to pub(crate) (not imported via zeroclaw:: by any
external consumer):
approval, auth, cost, cron, daemon, doctor, hardware, health,
heartbeat, identity, integrations, migration, multimodal, onboard,
security, service, skills, tunnel, util
Also restrict 6 command enums (ServiceCommands, ChannelCommands,
SkillCommands, MigrateCommands, CronCommands, IntegrationCommands)
to pub(crate) — main.rs defines its own copies and does not import
these from the library crate. HardwareCommands and PeripheralCommands
remain pub as main.rs imports them via zeroclaw::.
Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
---
src/lib.rs | 50 +++++++++++++++++++++++++-------------------------
1 file changed, 25 insertions(+), 25 deletions(-)
diff --git a/src/lib.rs b/src/lib.rs
index cdf280175..6df3187bf 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -39,41 +39,41 @@ use clap::Subcommand;
use serde::{Deserialize, Serialize};
pub mod agent;
-pub mod approval;
-pub mod auth;
+pub(crate) mod approval;
+pub(crate) mod auth;
pub mod channels;
pub mod config;
-pub mod cost;
-pub mod cron;
-pub mod daemon;
-pub mod doctor;
+pub(crate) mod cost;
+pub(crate) mod cron;
+pub(crate) mod daemon;
+pub(crate) mod doctor;
pub mod gateway;
-pub mod hardware;
-pub mod health;
-pub mod heartbeat;
-pub mod identity;
-pub mod integrations;
+pub(crate) mod hardware;
+pub(crate) mod health;
+pub(crate) mod heartbeat;
+pub(crate) mod identity;
+pub(crate) mod integrations;
pub mod memory;
-pub mod migration;
-pub mod multimodal;
+pub(crate) mod migration;
+pub(crate) mod multimodal;
pub mod observability;
-pub mod onboard;
+pub(crate) mod onboard;
pub mod peripherals;
pub mod providers;
pub mod rag;
pub mod runtime;
-pub mod security;
-pub mod service;
-pub mod skills;
+pub(crate) mod security;
+pub(crate) mod service;
+pub(crate) mod skills;
pub mod tools;
-pub mod tunnel;
-pub mod util;
+pub(crate) mod tunnel;
+pub(crate) mod util;
pub use config::Config;
/// Service management subcommands
#[derive(Subcommand, Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
-pub enum ServiceCommands {
+pub(crate) enum ServiceCommands {
/// Install daemon service unit for auto-start and restart
Install,
/// Start daemon service
@@ -90,7 +90,7 @@ pub enum ServiceCommands {
/// Channel management subcommands
#[derive(Subcommand, Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
-pub enum ChannelCommands {
+pub(crate) enum ChannelCommands {
/// List all configured channels
List,
/// Start all configured channels (handled in main.rs for async)
@@ -139,7 +139,7 @@ Examples:
/// Skills management subcommands
#[derive(Subcommand, Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
-pub enum SkillCommands {
+pub(crate) enum SkillCommands {
/// List all installed skills
List,
/// Install a new skill from a git URL (HTTPS/SSH) or local path
@@ -156,7 +156,7 @@ pub enum SkillCommands {
/// Migration subcommands
#[derive(Subcommand, Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
-pub enum MigrateCommands {
+pub(crate) enum MigrateCommands {
/// Import memory from an `OpenClaw` workspace into this `ZeroClaw` workspace
Openclaw {
/// Optional path to `OpenClaw` workspace (defaults to ~/.openclaw/workspace)
@@ -171,7 +171,7 @@ pub enum MigrateCommands {
/// Cron subcommands
#[derive(Subcommand, Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
-pub enum CronCommands {
+pub(crate) enum CronCommands {
/// List all scheduled tasks
List,
/// Add a new scheduled task
@@ -286,7 +286,7 @@ Examples:
/// Integration subcommands
#[derive(Subcommand, Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
-pub enum IntegrationCommands {
+pub(crate) enum IntegrationCommands {
/// Show details about a specific integration
Info {
/// Integration name
From 861137b2b359da60020ad16644c2d666c0229d8c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Edvard=20Sch=C3=B8yen?=
<99178202+ecschoye@users.noreply.github.com>
Date: Fri, 20 Feb 2026 05:22:56 -0500
Subject: [PATCH 029/116] fix(security): deny unapproved tool calls on non-CLI
channels (#998)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
When autonomy is set to "supervised", the approval gate only prompted
interactively on CLI. On Telegram and other channels, all tool calls
were silently auto-approved with ApprovalResponse::Yes, including
high-risk tools like shell — completely bypassing supervised mode.
On non-CLI channels where interactive prompting is not possible, deny
tool calls that require approval instead of auto-approving. Users can
expand the auto_approve list in config to explicitly allow specific
tools on non-interactive channels.
Co-authored-by: Claude Opus 4.6
---
src/agent/loop_.rs | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/src/agent/loop_.rs b/src/agent/loop_.rs
index cd6b862a0..288ea27f8 100644
--- a/src/agent/loop_.rs
+++ b/src/agent/loop_.rs
@@ -1099,11 +1099,13 @@ pub(crate) async fn run_tool_call_loop(
arguments: call.arguments.clone(),
};
- // Only prompt interactively on CLI; auto-approve on other channels.
+ // On CLI, prompt interactively. On other channels where
+ // interactive approval is not possible, deny the call to
+ // respect the supervised autonomy setting.
let decision = if channel_name == "cli" {
mgr.prompt_cli(&request)
} else {
- ApprovalResponse::Yes
+ ApprovalResponse::No
};
mgr.record_decision(&call.name, &call.arguments, decision, channel_name);
From c649ced5850b9b66247babe2600f4374f2cb1963 Mon Sep 17 00:00:00 2001
From: fettpl <38704082+fettpl@users.noreply.github.com>
Date: Fri, 20 Feb 2026 11:23:20 +0100
Subject: [PATCH 030/116] fix(security): enforce cron agent autonomy and rate
gates (#626)
---
src/cron/scheduler.rs | 70 ++++++++++++++++++++++++++++++++++++++-----
1 file changed, 62 insertions(+), 8 deletions(-)
diff --git a/src/cron/scheduler.rs b/src/cron/scheduler.rs
index 6bf13556f..8d0d7b77a 100644
--- a/src/cron/scheduler.rs
+++ b/src/cron/scheduler.rs
@@ -61,7 +61,7 @@ async fn execute_job_with_retry(
for attempt in 0..=retries {
let (success, output) = match job.job_type {
JobType::Shell => run_job_command(config, security, job).await,
- JobType::Agent => run_agent_job(config, job).await,
+ JobType::Agent => run_agent_job(config, security, job).await,
};
last_output = output;
@@ -116,7 +116,31 @@ async fn execute_and_persist_job(
(job.id.clone(), success)
}
-async fn run_agent_job(config: &Config, job: &CronJob) -> (bool, String) {
+async fn run_agent_job(
+ config: &Config,
+ security: &SecurityPolicy,
+ job: &CronJob,
+) -> (bool, String) {
+ if !security.can_act() {
+ return (
+ false,
+ "blocked by security policy: autonomy is read-only".to_string(),
+ );
+ }
+
+ if security.is_rate_limited() {
+ return (
+ false,
+ "blocked by security policy: rate limit exceeded".to_string(),
+ );
+ }
+
+ if !security.record_action() {
+ return (
+ false,
+ "blocked by security policy: action budget exhausted".to_string(),
+ );
+ }
let name = job.name.clone().unwrap_or_else(|| "cron-job".to_string());
let prompt = job.prompt.clone().unwrap_or_default();
let prefixed_prompt = format!("[cron:{} {name}] {prompt}", job.id);
@@ -653,13 +677,43 @@ mod tests {
let mut job = test_job("");
job.job_type = JobType::Agent;
job.prompt = Some("Say hello".into());
+ let security = SecurityPolicy::from_config(&config.autonomy, &config.workspace_dir);
- let (success, output) = run_agent_job(&config, &job).await;
- assert!(!success, "Agent job without provider key should fail");
- assert!(
- !output.is_empty(),
- "Expected non-empty error output from failed agent job"
- );
+ let (success, output) = run_agent_job(&config, &security, &job).await;
+ assert!(!success);
+ assert!(output.contains("agent job failed:"));
+ }
+
+ #[tokio::test]
+ async fn run_agent_job_blocks_readonly_mode() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = test_config(&tmp);
+ config.autonomy.level = crate::security::AutonomyLevel::ReadOnly;
+ let mut job = test_job("");
+ job.job_type = JobType::Agent;
+ job.prompt = Some("Say hello".into());
+ let security = SecurityPolicy::from_config(&config.autonomy, &config.workspace_dir);
+
+ let (success, output) = run_agent_job(&config, &security, &job).await;
+ assert!(!success);
+ assert!(output.contains("blocked by security policy"));
+ assert!(output.contains("read-only"));
+ }
+
+ #[tokio::test]
+ async fn run_agent_job_blocks_rate_limited() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = test_config(&tmp);
+ config.autonomy.max_actions_per_hour = 0;
+ let mut job = test_job("");
+ job.job_type = JobType::Agent;
+ job.prompt = Some("Say hello".into());
+ let security = SecurityPolicy::from_config(&config.autonomy, &config.workspace_dir);
+
+ let (success, output) = run_agent_job(&config, &security, &job).await;
+ assert!(!success);
+ assert!(output.contains("blocked by security policy"));
+ assert!(output.contains("rate limit exceeded"));
}
#[tokio::test]
From ee7c4370614fce4f2233bfe04e544879b00f27fb Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 20 Feb 2026 05:25:38 -0500
Subject: [PATCH 031/116] chore(deps): bump probe-rs from 0.30.0 to 0.31.0
(#1076)
Bumps [probe-rs](https://github.com/probe-rs/probe-rs) from 0.30.0 to 0.31.0.
- [Release notes](https://github.com/probe-rs/probe-rs/releases)
- [Changelog](https://github.com/probe-rs/probe-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/probe-rs/probe-rs/compare/v0.30.0...v0.31.0)
---
updated-dependencies:
- dependency-name: probe-rs
dependency-version: 0.31.0
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
Cargo.lock | 131 ++++++++++-------------------------------------------
Cargo.toml | 2 +-
2 files changed, 26 insertions(+), 107 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 3e912a2fc..05a277144 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -304,7 +304,7 @@ dependencies = [
"futures-lite",
"parking",
"polling",
- "rustix 1.1.3",
+ "rustix",
"slab",
"windows-sys 0.61.2",
]
@@ -475,6 +475,12 @@ version = "1.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06"
+[[package]]
+name = "basic-udev"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a45f9771ced8a774de5e5ebffbe520f52e3943bf5a9a6baa3a5d14a5de1afe6"
+
[[package]]
name = "bincode"
version = "2.0.1"
@@ -947,16 +953,6 @@ dependencies = [
"url",
]
-[[package]]
-name = "core-foundation"
-version = "0.9.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f"
-dependencies = [
- "core-foundation-sys",
- "libc",
-]
-
[[package]]
name = "core-foundation"
version = "0.10.1"
@@ -2181,12 +2177,6 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
-[[package]]
-name = "hermit-abi"
-version = "0.3.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
-
[[package]]
name = "hermit-abi"
version = "0.5.2"
@@ -2205,12 +2195,12 @@ version = "2.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "565dd4c730b8f8b2c0fb36df6be12e5470ae10895ddcc4e9dcfbfb495de202b0"
dependencies = [
+ "basic-udev",
"cc",
"cfg-if",
"libc",
"nix 0.27.1",
"pkg-config",
- "udev",
"windows-sys 0.48.0",
]
@@ -2707,17 +2697,6 @@ dependencies = [
"mach2 0.5.0",
]
-[[package]]
-name = "io-lifetimes"
-version = "1.0.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2"
-dependencies = [
- "hermit-abi 0.3.9",
- "libc",
- "windows-sys 0.48.0",
-]
-
[[package]]
name = "ipnet"
version = "2.11.0"
@@ -2926,22 +2905,6 @@ dependencies = [
"vcpkg",
]
-[[package]]
-name = "libudev-sys"
-version = "0.1.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3c8469b4a23b962c1396b9b451dda50ef5b283e8dd309d69033475fa9b334324"
-dependencies = [
- "libc",
- "pkg-config",
-]
-
-[[package]]
-name = "linux-raw-sys"
-version = "0.4.15"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab"
-
[[package]]
name = "linux-raw-sys"
version = "0.11.0"
@@ -3685,43 +3648,24 @@ version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b"
dependencies = [
- "hermit-abi 0.5.2",
+ "hermit-abi",
"libc",
]
-[[package]]
-name = "nusb"
-version = "0.1.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f861541f15de120eae5982923d073bfc0c1a65466561988c82d6e197734c19e"
-dependencies = [
- "atomic-waker",
- "core-foundation 0.9.4",
- "core-foundation-sys",
- "futures-core",
- "io-kit-sys 0.4.1",
- "libc",
- "log",
- "once_cell",
- "rustix 0.38.44",
- "slab",
- "windows-sys 0.48.0",
-]
-
[[package]]
name = "nusb"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5750d884c774a2862b0049b0318aea27cecc9e873485540af5ed8ab8841247da"
dependencies = [
- "core-foundation 0.10.1",
+ "core-foundation",
"core-foundation-sys",
"futures-core",
"io-kit-sys 0.5.0",
- "linux-raw-sys 0.11.0",
+ "linux-raw-sys",
"log",
"once_cell",
- "rustix 1.1.3",
+ "rustix",
"slab",
"windows-sys 0.61.2",
]
@@ -4161,9 +4105,9 @@ checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218"
dependencies = [
"cfg-if",
"concurrent-queue",
- "hermit-abi 0.5.2",
+ "hermit-abi",
"pin-project-lite",
- "rustix 1.1.3",
+ "rustix",
"windows-sys 0.61.2",
]
@@ -4294,9 +4238,9 @@ dependencies = [
[[package]]
name = "probe-rs"
-version = "0.30.0"
+version = "0.31.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ee27329ac37fa02b194c62a4e3c1aa053739884ea7bcf861249866d3bf7de00"
+checksum = "ee50102aaa214117fc4fbe1311077835f0f4faa71e4a769bf65f955cc020ee34"
dependencies = [
"anyhow",
"async-io",
@@ -4313,8 +4257,8 @@ dependencies = [
"ihex",
"itertools 0.14.0",
"jep106",
- "nusb 0.1.14",
- "object 0.37.3",
+ "nusb",
+ "object 0.38.1",
"parking_lot",
"probe-rs-target",
"rmp-serde",
@@ -4330,9 +4274,9 @@ dependencies = [
[[package]]
name = "probe-rs-target"
-version = "0.30.0"
+version = "0.31.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2239aca5dc62c68ca6d8ff0051fe617cb8363b803380fbc60567e67c82b474df"
+checksum = "031bed1313b45d93dae4ca8f0fee098530c6632e4ebd9e2769d5a49cdef273d3"
dependencies = [
"base64",
"indexmap",
@@ -5081,19 +5025,6 @@ dependencies = [
"semver",
]
-[[package]]
-name = "rustix"
-version = "0.38.44"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154"
-dependencies = [
- "bitflags 2.11.0",
- "errno",
- "libc",
- "linux-raw-sys 0.4.15",
- "windows-sys 0.59.0",
-]
-
[[package]]
name = "rustix"
version = "1.1.3"
@@ -5103,7 +5034,7 @@ dependencies = [
"bitflags 2.11.0",
"errno",
"libc",
- "linux-raw-sys 0.11.0",
+ "linux-raw-sys",
"windows-sys 0.61.2",
]
@@ -5251,7 +5182,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d17b898a6d6948c3a8ee4372c17cb384f90d2e6e912ef00895b14fd7ab54ec38"
dependencies = [
"bitflags 2.11.0",
- "core-foundation 0.10.1",
+ "core-foundation",
"core-foundation-sys",
"libc",
"security-framework-sys",
@@ -5462,7 +5393,7 @@ checksum = "2acaf3f973e8616d7ceac415f53fc60e190b2a686fbcf8d27d0256c741c5007b"
dependencies = [
"bitflags 2.11.0",
"cfg-if",
- "core-foundation 0.10.1",
+ "core-foundation",
"core-foundation-sys",
"io-kit-sys 0.4.1",
"mach2 0.4.3",
@@ -5759,7 +5690,7 @@ dependencies = [
"fastrand",
"getrandom 0.4.1",
"once_cell",
- "rustix 1.1.3",
+ "rustix",
"windows-sys 0.61.2",
]
@@ -6392,18 +6323,6 @@ version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e36a83ea2b3c704935a01b4642946aadd445cea40b10935e3f8bd8052b8193d6"
-[[package]]
-name = "udev"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "50051c6e22be28ee6f217d50014f3bc29e81c20dc66ff7ca0d5c5226e1dcc5a1"
-dependencies = [
- "io-lifetimes",
- "libc",
- "libudev-sys",
- "pkg-config",
-]
-
[[package]]
name = "uf2-decode"
version = "0.2.0"
@@ -7683,7 +7602,7 @@ dependencies = [
"lettre",
"mail-parser",
"matrix-sdk",
- "nusb 0.2.2",
+ "nusb",
"opentelemetry",
"opentelemetry-otlp",
"opentelemetry_sdk",
diff --git a/Cargo.toml b/Cargo.toml
index 468c01131..eda3932c2 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -142,7 +142,7 @@ tokio-serial = { version = "5", default-features = false, optional = true }
nusb = { version = "0.2", default-features = false, optional = true }
# probe-rs for STM32/Nucleo memory read (Phase B)
-probe-rs = { version = "0.30", optional = true }
+probe-rs = { version = "0.31", optional = true }
# PDF extraction for datasheet RAG (optional, enable with --features rag-pdf)
pdf-extract = { version = "0.10", optional = true }
From e04d114814c14eee84686985e867b978042a527d Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 20 Feb 2026 05:25:41 -0500
Subject: [PATCH 032/116] chore(deps): bump toml from 0.8.23 to
1.0.1+spec-1.1.0 (#1074)
Bumps [toml](https://github.com/toml-rs/toml) from 0.8.23 to 1.0.1+spec-1.1.0.
- [Commits](https://github.com/toml-rs/toml/compare/toml-v0.8.23...toml-v1.0.1)
---
updated-dependencies:
- dependency-name: toml
dependency-version: 1.0.1+spec-1.1.0
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
Cargo.lock | 58 +++----------------------------------
crates/robot-kit/Cargo.toml | 2 +-
2 files changed, 5 insertions(+), 55 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 05a277144..8c3b816a1 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4292,7 +4292,7 @@ version = "3.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983"
dependencies = [
- "toml_edit 0.23.10+spec-1.0.0",
+ "toml_edit",
]
[[package]]
@@ -5327,15 +5327,6 @@ dependencies = [
"serde",
]
-[[package]]
-name = "serde_spanned"
-version = "0.6.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3"
-dependencies = [
- "serde",
-]
-
[[package]]
name = "serde_spanned"
version = "1.0.4"
@@ -5979,18 +5970,6 @@ dependencies = [
"tokio-util",
]
-[[package]]
-name = "toml"
-version = "0.8.23"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362"
-dependencies = [
- "serde",
- "serde_spanned 0.6.9",
- "toml_datetime 0.6.11",
- "toml_edit 0.22.27",
-]
-
[[package]]
name = "toml"
version = "0.9.12+spec-1.1.0"
@@ -5998,7 +5977,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863"
dependencies = [
"serde_core",
- "serde_spanned 1.0.4",
+ "serde_spanned",
"toml_datetime 0.7.5+spec-1.1.0",
"toml_parser",
"winnow 0.7.14",
@@ -6012,22 +5991,13 @@ checksum = "bbe30f93627849fa362d4a602212d41bb237dc2bd0f8ba0b2ce785012e124220"
dependencies = [
"indexmap",
"serde_core",
- "serde_spanned 1.0.4",
+ "serde_spanned",
"toml_datetime 1.0.0+spec-1.1.0",
"toml_parser",
"toml_writer",
"winnow 0.7.14",
]
-[[package]]
-name = "toml_datetime"
-version = "0.6.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c"
-dependencies = [
- "serde",
-]
-
[[package]]
name = "toml_datetime"
version = "0.7.5+spec-1.1.0"
@@ -6046,20 +6016,6 @@ dependencies = [
"serde_core",
]
-[[package]]
-name = "toml_edit"
-version = "0.22.27"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
-dependencies = [
- "indexmap",
- "serde",
- "serde_spanned 0.6.9",
- "toml_datetime 0.6.11",
- "toml_write",
- "winnow 0.7.14",
-]
-
[[package]]
name = "toml_edit"
version = "0.23.10+spec-1.0.0"
@@ -6081,12 +6037,6 @@ dependencies = [
"winnow 0.7.14",
]
-[[package]]
-name = "toml_write"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801"
-
[[package]]
name = "toml_writer"
version = "1.0.6+spec-1.1.0"
@@ -7667,7 +7617,7 @@ dependencies = [
"thiserror 2.0.18",
"tokio",
"tokio-test",
- "toml 0.8.23",
+ "toml 1.0.1+spec-1.1.0",
"tracing",
]
diff --git a/crates/robot-kit/Cargo.toml b/crates/robot-kit/Cargo.toml
index 0752ce839..69eddd657 100644
--- a/crates/robot-kit/Cargo.toml
+++ b/crates/robot-kit/Cargo.toml
@@ -30,7 +30,7 @@ tokio = { version = "1.42", features = ["rt-multi-thread", "macros", "time", "sy
# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
-toml = "0.8"
+toml = "1.0"
# HTTP client (for Ollama vision)
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
From bd7b59151a9c27bcdce98a4e33e58e4e96cafe09 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 20 Feb 2026 05:27:25 -0500
Subject: [PATCH 033/116] chore(deps): bump actions/download-artifact from
4.3.0 to 7.0.0 (#1073)
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4.3.0 to 7.0.0.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](https://github.com/actions/download-artifact/compare/d3f86a106a0bac45b974a628896c90dbdf5c8093...37930b1c2abaa49bbe596cd826c3c89aef350131)
---
updated-dependencies:
- dependency-name: actions/download-artifact
dependency-version: 7.0.0
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/pub-release.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/pub-release.yml b/.github/workflows/pub-release.yml
index e9479d8f2..14677b112 100644
--- a/.github/workflows/pub-release.yml
+++ b/.github/workflows/pub-release.yml
@@ -145,7 +145,7 @@ jobs:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Download all artifacts
- uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4
+ uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
with:
path: artifacts
From b23c2e7ae68736dfdc1703fe3582591e87d1540b Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 20 Feb 2026 05:29:23 -0500
Subject: [PATCH 034/116] chore(deps): bump rand from 0.9.2 to 0.10.0 (#1075)
* chore(deps): bump rand from 0.9.2 to 0.10.0
Bumps [rand](https://github.com/rust-random/rand) from 0.9.2 to 0.10.0.
- [Release notes](https://github.com/rust-random/rand/releases)
- [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-random/rand/compare/rand_core-0.9.2...0.10.0)
---
updated-dependencies:
- dependency-name: rand
dependency-version: 0.10.0
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
* fix(security): keep token generation compatible with rand 0.10
---------
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Will Sarg <12886992+willsarg@users.noreply.github.com>
---
Cargo.lock | 52 +++++++++++++++++++++++++++++++++--------
Cargo.toml | 2 +-
src/security/pairing.rs | 4 +---
3 files changed, 44 insertions(+), 14 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 8c3b816a1..72f07ed01 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -47,7 +47,7 @@ checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0"
dependencies = [
"cfg-if",
"cipher",
- "cpufeatures",
+ "cpufeatures 0.2.17",
]
[[package]]
@@ -565,7 +565,7 @@ dependencies = [
"cc",
"cfg-if",
"constant_time_eq",
- "cpufeatures",
+ "cpufeatures 0.2.17",
]
[[package]]
@@ -701,7 +701,18 @@ checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818"
dependencies = [
"cfg-if",
"cipher",
- "cpufeatures",
+ "cpufeatures 0.2.17",
+]
+
+[[package]]
+name = "chacha20"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6f8d983286843e49675a4b7a2d174efe136dc93a18d69130dd18198a6c167601"
+dependencies = [
+ "cfg-if",
+ "cpufeatures 0.3.0",
+ "rand_core 0.10.0",
]
[[package]]
@@ -711,7 +722,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35"
dependencies = [
"aead",
- "chacha20",
+ "chacha20 0.9.1",
"cipher",
"poly1305",
"zeroize",
@@ -987,6 +998,15 @@ dependencies = [
"libc",
]
+[[package]]
+name = "cpufeatures"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b2a41393f66f16b0823bb79094d54ac5fbd34ab292ddafb9a0456ac9f87d201"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "crc32fast"
version = "1.5.0"
@@ -1131,7 +1151,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be"
dependencies = [
"cfg-if",
- "cpufeatures",
+ "cpufeatures 0.2.17",
"curve25519-dalek-derive",
"digest",
"fiat-crypto",
@@ -1985,6 +2005,7 @@ dependencies = [
"cfg-if",
"libc",
"r-efi",
+ "rand_core 0.10.0",
"wasip2",
"wasip3",
]
@@ -4117,7 +4138,7 @@ version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf"
dependencies = [
- "cpufeatures",
+ "cpufeatures 0.2.17",
"opaque-debug",
"universal-hash",
]
@@ -4129,7 +4150,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25"
dependencies = [
"cfg-if",
- "cpufeatures",
+ "cpufeatures 0.2.17",
"opaque-debug",
"universal-hash",
]
@@ -4562,6 +4583,17 @@ dependencies = [
"rand_core 0.9.5",
]
+[[package]]
+name = "rand"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bc266eb313df6c5c09c1c7b1fbe2510961e5bcd3add930c1e31f7ed9da0feff8"
+dependencies = [
+ "chacha20 0.10.0",
+ "getrandom 0.4.1",
+ "rand_core 0.10.0",
+]
+
[[package]]
name = "rand_chacha"
version = "0.3.1"
@@ -5401,7 +5433,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba"
dependencies = [
"cfg-if",
- "cpufeatures",
+ "cpufeatures 0.2.17",
"digest",
]
@@ -5412,7 +5444,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
dependencies = [
"cfg-if",
- "cpufeatures",
+ "cpufeatures 0.2.17",
"digest",
]
@@ -7562,7 +7594,7 @@ dependencies = [
"probe-rs",
"prometheus",
"prost 0.14.3",
- "rand 0.9.2",
+ "rand 0.10.0",
"regex",
"reqwest",
"ring",
diff --git a/Cargo.toml b/Cargo.toml
index eda3932c2..31b5632d3 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -72,7 +72,7 @@ sha2 = "0.10"
hex = "0.4"
# CSPRNG for secure token generation
-rand = "0.9"
+rand = "0.10"
# serde-big-array for wa-rs storage (large array serialization)
serde-big-array = { version = "0.5", optional = true }
diff --git a/src/security/pairing.rs b/src/security/pairing.rs
index b772b386f..232d3d3e7 100644
--- a/src/security/pairing.rs
+++ b/src/security/pairing.rs
@@ -215,9 +215,7 @@ fn generate_code() -> String {
/// on macOS). The 32 random bytes (256 bits) are hex-encoded for a
/// 64-character token, providing 256 bits of entropy.
fn generate_token() -> String {
- use rand::RngCore;
- let mut bytes = [0u8; 32];
- rand::rng().fill_bytes(&mut bytes);
+ let bytes: [u8; 32] = rand::random();
format!("zc_{}", hex::encode(bytes))
}
From 4abd1b44714367ae067e12f4cc225a9e13e38b30 Mon Sep 17 00:00:00 2001
From: pluginmd
Date: Fri, 20 Feb 2026 17:49:29 +0700
Subject: [PATCH 035/116] docs(i18n): add Vietnamese README translation
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Add full Vietnamese (Tiếng Việt) translation of README.md and update
language selector links across existing README files.
Co-Authored-By: Claude Opus 4.6
---
README.md | 2 +-
README.vn.md | 1051 ++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 1052 insertions(+), 1 deletion(-)
create mode 100644 README.vn.md
diff --git a/README.md b/README.md
index 4a7f4bed5..07c51bc8f 100644
--- a/README.md
+++ b/README.md
@@ -25,7 +25,7 @@ Built by students and members of the Harvard, MIT, and Sundai.Club communities.
- 🌐 Languages: English · 简体中文 · 日本語 · Русский
+ 🌐 Languages: English · 简体中文 · 日本語 · Русский · Tiếng Việt
diff --git a/README.vn.md b/README.vn.md
new file mode 100644
index 000000000..7e5559b0a
--- /dev/null
+++ b/README.vn.md
@@ -0,0 +1,1051 @@
+
+
+
+
+ZeroClaw 🦀
+
+
+ Không tốn thêm tài nguyên. Không đánh đổi. 100% Rust. 100% Đa nền tảng.
+ ⚡️ Chạy trên phần cứng $10 với RAM dưới 5MB — ít hơn 99% bộ nhớ so với OpenClaw, rẻ hơn 98% so với Mac mini!
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Được xây dựng bởi sinh viên và thành viên của các cộng đồng Harvard, MIT và Sundai.Club.
+
+
+
+ 🌐 Ngôn ngữ: English · 简体中文 · 日本語 · Русский · Tiếng Việt
+
+
+
+ Bắt đầu |
+ Cài đặt một lần bấm |
+ Trung tâm tài liệu |
+ Mục lục tài liệu
+
+
+
+ Truy cập nhanh:
+ Tài liệu tham khảo ·
+ Vận hành ·
+ Khắc phục sự cố ·
+ Bảo mật ·
+ Phần cứng ·
+ Đóng góp
+
+
+
+ Hạ tầng trợ lý AI tự chủ — nhanh, nhỏ gọn
+ Triển khai ở đâu cũng được. Thay thế gì cũng được.
+
+
+Kiến trúc trait-driven · mặc định bảo mật · provider/channel/tool hoán đổi tự do · mọi thứ đều dễ mở rộng
+
+### 📢 Thông báo
+
+Bảng này dành cho các thông báo quan trọng (thay đổi không tương thích, cảnh báo bảo mật, lịch bảo trì, vấn đề chặn release).
+
+| Ngày (UTC) | Mức độ | Thông báo | Hành động |
+|---|---|---|---|
+| 2026-02-19 | _Nghiêm trọng_ | Chúng tôi **không có liên kết** với `openagen/zeroclaw` hoặc `zeroclaw.org`. Tên miền `zeroclaw.org` hiện đang trỏ đến fork `openagen/zeroclaw`, và tên miền/repository đó đang mạo danh website/dự án chính thức của chúng tôi. | Không tin tưởng thông tin, binary, gây quỹ, hay thông báo từ các nguồn đó. Chỉ sử dụng repository này và các tài khoản mạng xã hội đã được xác minh của chúng tôi. |
+| 2026-02-19 | _Quan trọng_ | Chúng tôi **chưa** ra mắt website chính thức, và chúng tôi đang ghi nhận các nỗ lực mạo danh. **Không** tham gia bất kỳ hoạt động đầu tư hoặc gây quỹ nào tuyên bố mang tên ZeroClaw. | Sử dụng repository này làm nguồn thông tin duy nhất đáng tin cậy. Theo dõi [X (@zeroclawlabs)](https://x.com/zeroclawlabs?s=21), [Reddit (r/zeroclawlabs)](https://www.reddit.com/r/zeroclawlabs/), [Telegram (@zeroclawlabs)](https://t.me/zeroclawlabs), [Telegram CN (@zeroclawlabs_cn)](https://t.me/zeroclawlabs_cn), [Telegram RU (@zeroclawlabs_ru)](https://t.me/zeroclawlabs_ru), và [Xiaohongshu](https://www.xiaohongshu.com/user/profile/67cbfc43000000000d008307?xsec_token=AB73VnYnGNx5y36EtnnZfGmAmS-6Wzv8WMuGpfwfkg6Yc%3D&xsec_source=pc_search) để nhận cập nhật chính thức. |
+| 2026-02-19 | _Quan trọng_ | Anthropic đã cập nhật điều khoản Xác thực và Sử dụng Thông tin xác thực vào ngày 2026-02-19. Xác thực OAuth (Free, Pro, Max) được dành riêng cho Claude Code và Claude.ai; việc sử dụng OAuth token từ Claude Free/Pro/Max trong bất kỳ sản phẩm, công cụ hay dịch vụ nào khác (bao gồm Agent SDK) đều không được phép và có thể vi phạm Điều khoản Dịch vụ cho Người tiêu dùng. | Vui lòng tạm thời tránh tích hợp Claude Code OAuth để ngăn ngừa khả năng mất mát. Điều khoản gốc: [Authentication and Credential Use](https://code.claude.com/docs/en/legal-and-compliance#authentication-and-credential-use). |
+
+### ✨ Tính năng
+
+- 🏎️ **Mặc định tinh gọn:** Các tác vụ CLI và kiểm tra trạng thái chỉ tốn vài MB bộ nhớ trên bản release.
+- 💰 **Triển khai rẻ:** Chạy tốt trên board giá rẻ và instance cloud nhỏ, không cần runtime nặng.
+- ⚡ **Khởi động lạnh nhanh:** Một binary Rust duy nhất — lệnh và daemon khởi động gần như tức thì.
+- 🌍 **Chạy ở đâu cũng được:** Một binary chạy trên ARM, x86 và RISC-V — provider/channel/tool hoán đổi tự do.
+
+### Vì sao các team chọn ZeroClaw
+
+- **Mặc định tinh gọn:** binary Rust nhỏ, khởi động nhanh, tốn ít bộ nhớ.
+- **Bảo mật từ gốc:** xác thực ghép cặp, sandbox nghiêm ngặt, allowlist rõ ràng, giới hạn workspace.
+- **Hoán đổi tự do:** mọi hệ thống cốt lõi đều là trait (provider, channel, tool, memory, tunnel).
+- **Không khoá vendor:** hỗ trợ provider tương thích OpenAI + endpoint tùy chỉnh dễ dàng mở rộng.
+
+## So sánh hiệu suất (ZeroClaw vs OpenClaw, có thể tái tạo)
+
+Đo nhanh trên máy cục bộ (macOS arm64, tháng 2/2026), quy đổi cho phần cứng edge 0.8GHz.
+
+| | OpenClaw | NanoBot | PicoClaw | ZeroClaw 🦀 |
+|---|---|---|---|---|
+| **Ngôn ngữ** | TypeScript | Python | Go | **Rust** |
+| **RAM** | > 1GB | > 100MB | < 10MB | **< 5MB** |
+| **Khởi động (lõi 0.8GHz)** | > 500s | > 30s | < 1s | **< 10ms** |
+| **Kích thước binary** | ~28MB (dist) | N/A (Scripts) | ~8MB | **3.4 MB** |
+| **Chi phí** | Mac Mini $599 | Linux SBC ~$50 | Linux Board $10 | **Phần cứng bất kỳ $10** |
+
+> Ghi chú: Kết quả ZeroClaw được đo trên release build sử dụng `/usr/bin/time -l`. OpenClaw yêu cầu runtime Node.js (thường thêm ~390MB bộ nhớ overhead), còn NanoBot yêu cầu runtime Python. PicoClaw và ZeroClaw là các static binary. Số RAM ở trên là bộ nhớ runtime; yêu cầu biên dịch lúc build-time sẽ cao hơn.
+
+
+
+
+
+### Tự đo trên máy bạn
+
+Kết quả benchmark thay đổi theo code và toolchain, nên hãy tự đo bản build hiện tại:
+
+```bash
+cargo build --release
+ls -lh target/release/zeroclaw
+
+/usr/bin/time -l target/release/zeroclaw --help
+/usr/bin/time -l target/release/zeroclaw status
+```
+
+Ví dụ mẫu (macOS arm64, đo ngày 18 tháng 2 năm 2026):
+
+- Kích thước binary release: `8.8M`
+- `zeroclaw --help`: khoảng `0.02s`, bộ nhớ đỉnh ~`3.9MB`
+- `zeroclaw status`: khoảng `0.01s`, bộ nhớ đỉnh ~`4.1MB`
+
+## Yêu cầu hệ thống
+
+
+Windows
+
+#### Bắt buộc
+
+1. **Visual Studio Build Tools** (cung cấp MSVC linker và Windows SDK):
+ ```powershell
+ winget install Microsoft.VisualStudio.2022.BuildTools
+ ```
+ Trong quá trình cài đặt (hoặc qua Visual Studio Installer), chọn workload **"Desktop development with C++"**.
+
+2. **Rust toolchain:**
+ ```powershell
+ winget install Rustlang.Rustup
+ ```
+ Sau khi cài đặt, mở terminal mới và chạy `rustup default stable` để đảm bảo toolchain stable đang hoạt động.
+
+3. **Xác minh** cả hai đang hoạt động:
+ ```powershell
+ rustc --version
+ cargo --version
+ ```
+
+#### Tùy chọn
+
+- **Docker Desktop** — chỉ cần thiết nếu dùng [Docker sandboxed runtime](#runtime-support-current) (`runtime.kind = "docker"`). Cài đặt qua `winget install Docker.DockerDesktop`.
+
+
+
+
+Linux / macOS
+
+#### Bắt buộc
+
+1. **Công cụ build cơ bản:**
+ - **Linux (Debian/Ubuntu):** `sudo apt install build-essential pkg-config`
+ - **Linux (Fedora/RHEL):** `sudo dnf group install development-tools && sudo dnf install pkg-config`
+ - **macOS:** Cài đặt Xcode Command Line Tools: `xcode-select --install`
+
+2. **Rust toolchain:**
+ ```bash
+ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
+ ```
+ Xem [rustup.rs](https://rustup.rs) để biết thêm chi tiết.
+
+3. **Xác minh** cả hai đang hoạt động:
+ ```bash
+ rustc --version
+ cargo --version
+ ```
+
+#### Cài bằng một lệnh
+
+Hoặc bỏ qua các bước trên, cài hết mọi thứ (system deps, Rust, ZeroClaw) chỉ bằng một lệnh:
+
+```bash
+curl -LsSf https://raw.githubusercontent.com/zeroclaw-labs/zeroclaw/main/scripts/install.sh | bash
+```
+
+#### Yêu cầu tài nguyên biên dịch
+
+Việc build từ source đòi hỏi nhiều tài nguyên hơn so với chạy binary kết quả:
+
+| Tài nguyên | Tối thiểu | Khuyến nghị |
+|---|---|---|
+| **RAM + swap** | 2 GB | 4 GB+ |
+| **Dung lượng đĩa trống** | 6 GB | 10 GB+ |
+
+Nếu cấu hình máy thấp hơn mức tối thiểu, dùng binary có sẵn:
+
+```bash
+./bootstrap.sh --prefer-prebuilt
+```
+
+Chỉ cài từ binary, không quay lại build từ source:
+
+```bash
+./bootstrap.sh --prebuilt-only
+```
+
+#### Tùy chọn
+
+- **Docker** — chỉ cần thiết nếu dùng [Docker sandboxed runtime](#runtime-support-current) (`runtime.kind = "docker"`). Cài đặt qua package manager hoặc [docker.com](https://docs.docker.com/engine/install/).
+
+> **Lưu ý:** Lệnh `cargo build --release` mặc định dùng `codegen-units=1` để giảm áp lực biên dịch đỉnh. Để build nhanh hơn trên máy mạnh, dùng `cargo build --profile release-fast`.
+
+
+
+
+## Bắt đầu nhanh
+
+### Homebrew (macOS/Linuxbrew)
+
+```bash
+brew install zeroclaw
+```
+
+### Bootstrap một lần bấm
+
+```bash
+# Khuyến nghị: clone rồi chạy script bootstrap cục bộ
+git clone https://github.com/zeroclaw-labs/zeroclaw.git
+cd zeroclaw
+./bootstrap.sh
+
+# Tùy chọn: cài đặt system dependencies + Rust trên máy mới
+./bootstrap.sh --install-system-deps --install-rust
+
+# Tùy chọn: ưu tiên binary dựng sẵn (khuyến nghị cho máy ít RAM/ít dung lượng đĩa)
+./bootstrap.sh --prefer-prebuilt
+
+# Tùy chọn: cài đặt chỉ từ binary (không fallback sang build source)
+./bootstrap.sh --prebuilt-only
+
+# Tùy chọn: chạy onboarding trong cùng luồng
+./bootstrap.sh --onboard --api-key "sk-..." --provider openrouter [--model "openrouter/auto"]
+
+# Tùy chọn: chạy bootstrap + onboarding hoàn toàn trong Docker
+./bootstrap.sh --docker
+```
+
+Cài từ xa bằng một lệnh (nên xem trước nếu môi trường nhạy cảm về bảo mật):
+
+```bash
+curl -fsSL https://raw.githubusercontent.com/zeroclaw-labs/zeroclaw/main/scripts/bootstrap.sh | bash
+```
+
+Chi tiết: [`docs/one-click-bootstrap.md`](docs/one-click-bootstrap.md) (chế độ toolchain có thể yêu cầu `sudo` cho các gói hệ thống).
+
+### Binary có sẵn
+
+Release asset được phát hành cho:
+
+- Linux: `x86_64`, `aarch64`, `armv7`
+- macOS: `x86_64`, `aarch64`
+- Windows: `x86_64`
+
+Tải asset mới nhất tại:
+
+
+Ví dụ (ARM64 Linux):
+
+```bash
+curl -fsSLO https://github.com/zeroclaw-labs/zeroclaw/releases/latest/download/zeroclaw-aarch64-unknown-linux-gnu.tar.gz
+tar xzf zeroclaw-aarch64-unknown-linux-gnu.tar.gz
+install -m 0755 zeroclaw "$HOME/.cargo/bin/zeroclaw"
+```
+
+```bash
+git clone https://github.com/zeroclaw-labs/zeroclaw.git
+cd zeroclaw
+cargo build --release --locked
+cargo install --path . --force --locked
+
+# Đảm bảo ~/.cargo/bin có trong PATH của bạn
+export PATH="$HOME/.cargo/bin:$PATH"
+
+# Cài nhanh (không cần tương tác, có thể chỉ định model)
+zeroclaw onboard --api-key sk-... --provider openrouter [--model "openrouter/auto"]
+
+# Hoặc dùng trình hướng dẫn tương tác
+zeroclaw onboard --interactive
+
+# Hoặc chỉ sửa nhanh channel/allowlist
+zeroclaw onboard --channels-only
+
+# Chat
+zeroclaw agent -m "Hello, ZeroClaw!"
+
+# Chế độ tương tác
+zeroclaw agent
+
+# Khởi động gateway (webhook server)
+zeroclaw gateway # mặc định: 127.0.0.1:3000
+zeroclaw gateway --port 0 # cổng ngẫu nhiên (tăng cường bảo mật)
+
+# Khởi động runtime tự trị đầy đủ
+zeroclaw daemon
+
+# Kiểm tra trạng thái
+zeroclaw status
+zeroclaw auth status
+
+# Chạy chẩn đoán hệ thống
+zeroclaw doctor
+
+# Kiểm tra sức khỏe channel
+zeroclaw channel doctor
+
+# Gắn định danh Telegram vào allowlist
+zeroclaw channel bind-telegram 123456789
+
+# Lấy thông tin cài đặt tích hợp
+zeroclaw integrations info Telegram
+
+# Lưu ý: Channel (Telegram, Discord, Slack) yêu cầu daemon đang chạy
+# zeroclaw daemon
+
+# Quản lý dịch vụ nền
+zeroclaw service install
+zeroclaw service status
+zeroclaw service restart
+
+# Chuyển dữ liệu từ OpenClaw (chạy thử trước)
+zeroclaw migrate openclaw --dry-run
+zeroclaw migrate openclaw
+```
+
+> **Chạy trực tiếp khi phát triển (không cần cài toàn cục):** thêm `cargo run --release --` trước lệnh (ví dụ: `cargo run --release -- status`).
+
+## Xác thực theo gói đăng ký (OpenAI Codex / Claude Code)
+
+ZeroClaw hỗ trợ profile xác thực theo gói đăng ký (đa tài khoản, mã hóa khi lưu).
+
+- File lưu trữ: `~/.zeroclaw/auth-profiles.json`
+- Khóa mã hóa: `~/.zeroclaw/.secret_key`
+- Định dạng profile id: `:` (ví dụ: `openai-codex:work`)
+
+OpenAI Codex OAuth (đăng ký ChatGPT):
+
+```bash
+# Khuyến nghị trên server/headless
+zeroclaw auth login --provider openai-codex --device-code
+
+# Luồng Browser/callback với fallback paste
+zeroclaw auth login --provider openai-codex --profile default
+zeroclaw auth paste-redirect --provider openai-codex --profile default
+
+# Kiểm tra / làm mới / chuyển profile
+zeroclaw auth status
+zeroclaw auth refresh --provider openai-codex --profile default
+zeroclaw auth use --provider openai-codex --profile work
+```
+
+Claude Code / Anthropic setup-token:
+
+```bash
+# Dán token đăng ký/setup (chế độ Authorization header)
+zeroclaw auth paste-token --provider anthropic --profile default --auth-kind authorization
+
+# Lệnh alias
+zeroclaw auth setup-token --provider anthropic --profile default
+```
+
+Chạy agent với xác thực đăng ký:
+
+```bash
+zeroclaw agent --provider openai-codex -m "hello"
+zeroclaw agent --provider openai-codex --auth-profile openai-codex:work -m "hello"
+
+# Anthropic hỗ trợ cả API key và biến môi trường auth token:
+# ANTHROPIC_AUTH_TOKEN, ANTHROPIC_OAUTH_TOKEN, ANTHROPIC_API_KEY
+zeroclaw agent --provider anthropic -m "hello"
+```
+
+## Kiến trúc
+
+Mọi hệ thống con đều là **trait** — chỉ cần đổi cấu hình, không cần sửa code.
+
+
+
+
+
+| Hệ thống con | Trait | Đi kèm sẵn | Mở rộng |
+|-----------|-------|------------|--------|
+| **Mô hình AI** | `Provider` | Danh mục provider qua `zeroclaw providers` (hiện có 28 built-in + alias, cộng endpoint tùy chỉnh) | `custom:https://your-api.com` (tương thích OpenAI) hoặc `anthropic-custom:https://your-api.com` |
+| **Channel** | `Channel` | CLI, Telegram, Discord, Slack, Mattermost, iMessage, Matrix, Signal, WhatsApp, Email, IRC, Lark, DingTalk, QQ, Webhook | Bất kỳ messaging API nào |
+| **Memory** | `Memory` | SQLite hybrid search, PostgreSQL backend (storage provider có thể cấu hình), Lucid bridge, Markdown files, backend `none` tường minh, snapshot/hydrate, response cache tùy chọn | Bất kỳ persistence backend nào |
+| **Tool** | `Tool` | shell/file/memory, cron/schedule, git, pushover, browser, http_request, screenshot/image_info, composio (opt-in), delegate, hardware tools | Bất kỳ khả năng nào |
+| **Observability** | `Observer` | Noop, Log, Multi | Prometheus, OTel |
+| **Runtime** | `RuntimeAdapter` | Native, Docker (sandboxed) | Có thể thêm runtime bổ sung qua adapter; các kind không được hỗ trợ sẽ fail nhanh |
+| **Bảo mật** | `SecurityPolicy` | Ghép cặp gateway, sandbox, allowlist, giới hạn tốc độ, phân vùng filesystem, secret mã hóa | — |
+| **Định danh** | `IdentityConfig` | OpenClaw (markdown), AIEOS v1.1 (JSON) | Bất kỳ định dạng định danh nào |
+| **Tunnel** | `Tunnel` | None, Cloudflare, Tailscale, ngrok, Custom | Bất kỳ tunnel binary nào |
+| **Heartbeat** | Engine | Tác vụ định kỳ HEARTBEAT.md | — |
+| **Skill** | Loader | TOML manifest + hướng dẫn SKILL.md | Community skill pack |
+| **Tích hợp** | Registry | 70+ tích hợp trong 9 danh mục | Plugin system |
+
+### Hỗ trợ runtime (hiện tại)
+
+- ✅ Được hỗ trợ hiện nay: `runtime.kind = "native"` hoặc `runtime.kind = "docker"`
+- 🚧 Đã lên kế hoạch, chưa triển khai: WASM / edge runtime
+
+Khi cấu hình `runtime.kind` không được hỗ trợ, ZeroClaw sẽ thoát với thông báo lỗi rõ ràng thay vì âm thầm fallback về native.
+
+### Hệ thống Memory (Search Engine toàn diện)
+
+Tự phát triển hoàn toàn, không phụ thuộc bên ngoài — không Pinecone, không Elasticsearch, không LangChain:
+
+| Lớp | Triển khai |
+|-------|---------------|
+| **Vector DB** | Embeddings lưu dưới dạng BLOB trong SQLite, tìm kiếm cosine similarity |
+| **Keyword Search** | Bảng ảo FTS5 với BM25 scoring |
+| **Hybrid Merge** | Hàm merge có trọng số tùy chỉnh (`vector.rs`) |
+| **Embeddings** | Trait `EmbeddingProvider` — OpenAI, URL tùy chỉnh, hoặc noop |
+| **Chunking** | Bộ chia đoạn markdown theo dòng, giữ nguyên heading |
+| **Caching** | Bảng SQLite `embedding_cache` với LRU eviction |
+| **Safe Reindex** | Rebuild FTS5 + re-embed các vector bị thiếu theo cách nguyên tử |
+
+Agent tự động ghi nhớ, lưu trữ và quản lý memory qua các tool.
+
+```toml
+[memory]
+backend = "sqlite" # "sqlite", "lucid", "postgres", "markdown", "none"
+auto_save = true
+embedding_provider = "none" # "none", "openai", "custom:https://..."
+vector_weight = 0.7
+keyword_weight = 0.3
+
+# backend = "none" sử dụng no-op memory backend tường minh (không có persistence)
+
+# Tùy chọn: ghi đè storage-provider cho remote memory backend.
+# Khi provider = "postgres", ZeroClaw dùng PostgreSQL để lưu memory.
+# Khóa db_url cũng chấp nhận alias `dbURL` để tương thích ngược.
+#
+# [storage.provider.config]
+# provider = "postgres"
+# db_url = "postgres://user:password@host:5432/zeroclaw"
+# schema = "public"
+# table = "memories"
+# connect_timeout_secs = 15
+
+# Tùy chọn cho backend = "sqlite": số giây tối đa chờ khi mở DB (ví dụ: file bị khóa). Bỏ qua hoặc để trống để không có timeout.
+# sqlite_open_timeout_secs = 30
+
+# Tùy chọn cho backend = "lucid"
+# ZEROCLAW_LUCID_CMD=/usr/local/bin/lucid # mặc định: lucid
+# ZEROCLAW_LUCID_BUDGET=200 # mặc định: 200
+# ZEROCLAW_LUCID_LOCAL_HIT_THRESHOLD=3 # số lần hit cục bộ để bỏ qua external recall
+# ZEROCLAW_LUCID_RECALL_TIMEOUT_MS=120 # giới hạn thời gian cho lucid context recall
+# ZEROCLAW_LUCID_STORE_TIMEOUT_MS=800 # timeout đồng bộ async cho lucid store
+# ZEROCLAW_LUCID_FAILURE_COOLDOWN_MS=15000 # thời gian nghỉ sau lỗi lucid, tránh thử lại liên tục
+```
+
+## Bảo mật
+
+ZeroClaw thực thi bảo mật ở **mọi lớp** — không chỉ sandbox. Đáp ứng tất cả các hạng mục trong danh sách kiểm tra bảo mật của cộng đồng.
+
+### Danh sách kiểm tra bảo mật
+
+| # | Hạng mục | Trạng thái | Cách thực hiện |
+|---|------|--------|-----|
+| 1 | **Gateway không công khai ra ngoài** | ✅ | Bind vào `127.0.0.1` theo mặc định. Từ chối `0.0.0.0` nếu không có tunnel hoặc `allow_public_bind = true` tường minh. |
+| 2 | **Yêu cầu ghép cặp** | ✅ | Mã một lần 6 chữ số khi khởi động. Trao đổi qua `POST /pair` để lấy bearer token. Mọi yêu cầu `/webhook` đều cần `Authorization: Bearer `. |
+| 3 | **Phân vùng filesystem (không phải /)** | ✅ | `workspace_only = true` theo mặc định. Chặn 14 thư mục hệ thống + 4 dotfile nhạy cảm. Chặn null byte injection. Phát hiện symlink escape qua canonicalization + kiểm tra resolved-path trong các tool đọc/ghi file. |
+| 4 | **Chỉ truy cập qua tunnel** | ✅ | Gateway từ chối bind công khai khi không có tunnel đang hoạt động. Hỗ trợ Tailscale, Cloudflare, ngrok, hoặc tunnel tùy chỉnh. |
+
+> **Tự chạy nmap:** `nmap -p 1-65535 ` — ZeroClaw chỉ bind vào localhost, nên không có gì bị lộ ra ngoài trừ khi bạn cấu hình tunnel tường minh.
+
+### Allowlist channel (từ chối theo mặc định)
+
+Chính sách kiểm soát người gửi đã được thống nhất:
+
+- Allowlist rỗng = **từ chối tất cả tin nhắn đến**
+- `"*"` = **cho phép tất cả** (phải opt-in tường minh)
+- Nếu khác = allowlist khớp chính xác
+
+Mặc định an toàn, hạn chế tối đa rủi ro lộ thông tin.
+
+Tài liệu tham khảo đầy đủ về cấu hình channel: [docs/channels-reference.md](docs/channels-reference.md).
+
+Cài đặt được khuyến nghị (bảo mật + nhanh):
+
+- **Telegram:** thêm `@username` của bạn (không có `@`) và/hoặc Telegram user ID số vào allowlist.
+- **Discord:** thêm Discord user ID của bạn vào allowlist.
+- **Slack:** thêm Slack member ID của bạn (thường bắt đầu bằng `U`) vào allowlist.
+- **Mattermost:** dùng API v4 tiêu chuẩn. Allowlist dùng Mattermost user ID.
+- Chỉ dùng `"*"` cho kiểm thử mở tạm thời.
+
+Luồng phê duyệt của operator qua Telegram:
+
+1. Để `[channels_config.telegram].allowed_users = []` để từ chối theo mặc định khi khởi động.
+2. Người dùng không được phép sẽ nhận được gợi ý kèm lệnh operator có thể copy:
+ `zeroclaw channel bind-telegram `.
+3. Operator chạy lệnh đó tại máy cục bộ, sau đó người dùng thử gửi tin nhắn lại.
+
+Nếu cần phê duyệt thủ công một lần, chạy:
+
+```bash
+zeroclaw channel bind-telegram 123456789
+```
+
+Nếu bạn không chắc định danh nào cần dùng:
+
+1. Khởi động channel và gửi một tin nhắn đến bot của bạn.
+2. Đọc log cảnh báo để thấy định danh người gửi chính xác.
+3. Thêm giá trị đó vào allowlist và chạy lại channel-only setup.
+
+Nếu bạn thấy cảnh báo ủy quyền trong log (ví dụ: `ignoring message from unauthorized user`),
+chạy lại channel setup:
+
+```bash
+zeroclaw onboard --channels-only
+```
+
+### Phản hồi media Telegram
+
+Telegram định tuyến phản hồi theo **chat ID nguồn** (thay vì username),
+tránh lỗi `Bad Request: chat not found`.
+
+Với các phản hồi không phải văn bản, ZeroClaw có thể gửi file đính kèm Telegram khi assistant bao gồm các marker:
+
+- `[IMAGE:]`
+- `[DOCUMENT:]`
+- `[VIDEO:]`
+- `[AUDIO:]`
+- `[VOICE:]`
+
+Path có thể là file cục bộ (ví dụ `/tmp/screenshot.png`) hoặc URL HTTPS.
+
+### Cài đặt WhatsApp
+
+ZeroClaw hỗ trợ hai backend WhatsApp:
+
+- **Chế độ WhatsApp Web** (QR / pair code, không cần Meta Business API)
+- **Chế độ WhatsApp Business Cloud API** (luồng webhook chính thức của Meta)
+
+#### Chế độ WhatsApp Web (khuyến nghị cho dùng cá nhân/self-hosted)
+
+1. **Build với hỗ trợ WhatsApp Web:**
+ ```bash
+ cargo build --features whatsapp-web
+ ```
+
+2. **Cấu hình ZeroClaw:**
+ ```toml
+ [channels_config.whatsapp]
+ session_path = "~/.zeroclaw/state/whatsapp-web/session.db"
+ pair_phone = "15551234567" # tùy chọn; bỏ qua để dùng luồng QR
+ pair_code = "" # tùy chọn mã pair tùy chỉnh
+ allowed_numbers = ["+1234567890"] # định dạng E.164, hoặc ["*"] cho tất cả
+ ```
+
+3. **Khởi động channel/daemon và liên kết thiết bị:**
+ - Chạy `zeroclaw channel start` (hoặc `zeroclaw daemon`).
+ - Làm theo hướng dẫn ghép cặp trên terminal (QR hoặc pair code).
+ - Trên WhatsApp điện thoại: **Cài đặt → Thiết bị đã liên kết**.
+
+4. **Kiểm tra:** Gửi tin nhắn từ số được phép và xác nhận agent trả lời.
+
+#### Chế độ WhatsApp Business Cloud API
+
+WhatsApp dùng Cloud API của Meta với webhook (push-based, không phải polling):
+
+1. **Tạo Meta Business App:**
+ - Truy cập [developers.facebook.com](https://developers.facebook.com)
+ - Tạo app mới → Chọn loại "Business"
+ - Thêm sản phẩm "WhatsApp"
+
+2. **Lấy thông tin xác thực:**
+ - **Access Token:** Từ WhatsApp → API Setup → Generate token (hoặc tạo System User cho token vĩnh viễn)
+ - **Phone Number ID:** Từ WhatsApp → API Setup → Phone number ID
+ - **Verify Token:** Bạn tự định nghĩa (bất kỳ chuỗi ngẫu nhiên nào) — Meta sẽ gửi lại trong quá trình xác minh webhook
+
+3. **Cấu hình ZeroClaw:**
+ ```toml
+ [channels_config.whatsapp]
+ access_token = "EAABx..."
+ phone_number_id = "123456789012345"
+ verify_token = "my-secret-verify-token"
+ allowed_numbers = ["+1234567890"] # định dạng E.164, hoặc ["*"] cho tất cả
+ ```
+
+4. **Khởi động gateway với tunnel:**
+ ```bash
+ zeroclaw gateway --port 3000
+ ```
+ WhatsApp yêu cầu HTTPS, vì vậy hãy dùng tunnel (ngrok, Cloudflare, Tailscale Funnel).
+
+5. **Cấu hình Meta webhook:**
+ - Trong Meta Developer Console → WhatsApp → Configuration → Webhook
+ - **Callback URL:** `https://your-tunnel-url/whatsapp`
+ - **Verify Token:** Giống với `verify_token` trong config của bạn
+ - Đăng ký nhận trường `messages`
+
+6. **Kiểm tra:** Gửi tin nhắn đến số WhatsApp Business của bạn — ZeroClaw sẽ phản hồi qua LLM.
+
+## Cấu hình
+
+Config: `~/.zeroclaw/config.toml` (được tạo bởi `onboard`)
+
+Khi `zeroclaw channel start` đang chạy, các thay đổi với `default_provider`,
+`default_model`, `default_temperature`, `api_key`, `api_url`, và `reliability.*`
+sẽ được áp dụng nóng vào lần có tin nhắn channel đến tiếp theo.
+
+```toml
+api_key = "sk-..."
+default_provider = "openrouter"
+default_model = "anthropic/claude-sonnet-4-6"
+default_temperature = 0.7
+
+# Endpoint tùy chỉnh tương thích OpenAI
+# default_provider = "custom:https://your-api.com"
+
+# Endpoint tùy chỉnh tương thích Anthropic
+# default_provider = "anthropic-custom:https://your-api.com"
+
+[memory]
+backend = "sqlite" # "sqlite", "lucid", "postgres", "markdown", "none"
+auto_save = true
+embedding_provider = "none" # "none", "openai", "custom:https://..."
+vector_weight = 0.7
+keyword_weight = 0.3
+
+# backend = "none" vô hiệu hóa persistent memory qua no-op backend
+
+# Tùy chọn ghi đè storage-provider từ xa (ví dụ PostgreSQL)
+# [storage.provider.config]
+# provider = "postgres"
+# db_url = "postgres://user:password@host:5432/zeroclaw"
+# schema = "public"
+# table = "memories"
+# connect_timeout_secs = 15
+
+[gateway]
+port = 3000 # mặc định
+host = "127.0.0.1" # mặc định
+require_pairing = true # yêu cầu pairing code khi kết nối lần đầu
+allow_public_bind = false # từ chối 0.0.0.0 nếu không có tunnel
+
+[autonomy]
+level = "supervised" # "readonly", "supervised", "full" (mặc định: supervised)
+workspace_only = true # mặc định: true — phân vùng vào workspace
+allowed_commands = ["git", "npm", "cargo", "ls", "cat", "grep"]
+forbidden_paths = ["/etc", "/root", "/proc", "/sys", "~/.ssh", "~/.gnupg", "~/.aws"]
+
+[runtime]
+kind = "native" # "native" hoặc "docker"
+
+[runtime.docker]
+image = "alpine:3.20" # container image cho thực thi shell
+network = "none" # chế độ docker network ("none", "bridge", v.v.)
+memory_limit_mb = 512 # giới hạn bộ nhớ tùy chọn tính bằng MB
+cpu_limit = 1.0 # giới hạn CPU tùy chọn
+read_only_rootfs = true # mount root filesystem ở chế độ read-only
+mount_workspace = true # mount workspace vào /workspace
+allowed_workspace_roots = [] # allowlist tùy chọn để xác thực workspace mount
+
+[heartbeat]
+enabled = false
+interval_minutes = 30
+
+[tunnel]
+provider = "none" # "none", "cloudflare", "tailscale", "ngrok", "custom"
+
+[secrets]
+encrypt = true # API key được mã hóa bằng file key cục bộ
+
+[browser]
+enabled = false # opt-in browser_open + browser tool
+allowed_domains = ["docs.rs"] # bắt buộc khi browser được bật
+backend = "agent_browser" # "agent_browser" (mặc định), "rust_native", "computer_use", "auto"
+native_headless = true # áp dụng khi backend dùng rust-native
+native_webdriver_url = "http://127.0.0.1:9515" # WebDriver endpoint (chromedriver/selenium)
+# native_chrome_path = "/usr/bin/chromium" # tùy chọn chỉ định rõ browser binary cho driver
+
+[browser.computer_use]
+endpoint = "http://127.0.0.1:8787/v1/actions" # HTTP endpoint của computer-use sidecar
+timeout_ms = 15000 # timeout mỗi action
+allow_remote_endpoint = false # mặc định bảo mật: chỉ endpoint private/localhost
+window_allowlist = [] # gợi ý allowlist tên cửa sổ/process tùy chọn
+# api_key = "..." # bearer token tùy chọn cho sidecar
+# max_coordinate_x = 3840 # guardrail tọa độ tùy chọn
+# max_coordinate_y = 2160 # guardrail tọa độ tùy chọn
+
+# Flag build Rust-native backend:
+# cargo build --release --features browser-native
+# Đảm bảo WebDriver server đang chạy, ví dụ: chromedriver --port=9515
+
+# Hợp đồng computer-use sidecar (MVP)
+# POST browser.computer_use.endpoint
+# Request: {
+# "action": "mouse_click",
+# "params": {"x": 640, "y": 360, "button": "left"},
+# "policy": {"allowed_domains": [...], "window_allowlist": [...], "max_coordinate_x": 3840, "max_coordinate_y": 2160},
+# "metadata": {"session_name": "...", "source": "zeroclaw.browser", "version": "..."}
+# }
+# Response: {"success": true, "data": {...}} hoặc {"success": false, "error": "..."}
+
+[composio]
+enabled = false # opt-in: hơn 1000 OAuth app qua composio.dev
+# api_key = "cmp_..." # tùy chọn: được lưu mã hóa khi [secrets].encrypt = true
+entity_id = "default" # user_id mặc định cho Composio tool call
+# Gợi ý runtime: nếu execute yêu cầu connected_account_id, chạy composio với
+# action='list_accounts' và app='gmail' (hoặc toolkit của bạn) để lấy account ID.
+
+[identity]
+format = "openclaw" # "openclaw" (mặc định, markdown files) hoặc "aieos" (JSON)
+# aieos_path = "identity.json" # đường dẫn đến file AIEOS JSON (tương đối với workspace hoặc tuyệt đối)
+# aieos_inline = '{"identity":{"names":{"first":"Nova"}}}' # inline AIEOS JSON
+```
+
+### Ollama cục bộ và endpoint từ xa
+
+ZeroClaw dùng một khóa provider (`ollama`) cho cả triển khai Ollama cục bộ và từ xa:
+
+- Ollama cục bộ: để `api_url` trống, chạy `ollama serve`, và dùng các model như `llama3.2`.
+- Endpoint Ollama từ xa (bao gồm Ollama Cloud): đặt `api_url` thành endpoint từ xa và đặt `api_key` (hoặc `OLLAMA_API_KEY`) khi cần.
+- Tùy chọn suffix `:cloud`: ID model như `qwen3:cloud` được chuẩn hóa thành `qwen3` trước khi gửi request.
+
+Ví dụ cấu hình từ xa:
+
+```toml
+default_provider = "ollama"
+default_model = "qwen3:cloud"
+api_url = "https://ollama.com"
+api_key = "ollama_api_key_here"
+```
+
+### Endpoint provider tùy chỉnh
+
+Cấu hình chi tiết cho endpoint tùy chỉnh tương thích OpenAI và Anthropic, xem [docs/custom-providers.md](docs/custom-providers.md).
+
+## Gói Python đi kèm (`zeroclaw-tools`)
+
+Với các LLM provider có tool calling native không ổn định (ví dụ: GLM-5/Zhipu), ZeroClaw đi kèm gói Python dùng **LangGraph để gọi tool** nhằm đảm bảo tính nhất quán:
+
+```bash
+pip install zeroclaw-tools
+```
+
+```python
+from zeroclaw_tools import create_agent, shell, file_read
+from langchain_core.messages import HumanMessage
+
+# Hoạt động với mọi provider tương thích OpenAI
+agent = create_agent(
+ tools=[shell, file_read],
+ model="glm-5",
+ api_key="your-key",
+ base_url="https://api.z.ai/api/coding/paas/v4"
+)
+
+result = await agent.ainvoke({
+ "messages": [HumanMessage(content="List files in /tmp")]
+})
+print(result["messages"][-1].content)
+```
+
+**Lý do nên dùng:**
+- **Tool calling nhất quán** trên mọi provider (kể cả những provider hỗ trợ native kém)
+- **Vòng lặp tool tự động** — tiếp tục gọi tool cho đến khi hoàn thành tác vụ
+- **Dễ mở rộng** — thêm tool tùy chỉnh với decorator `@tool`
+- **Tích hợp Discord bot** đi kèm (Telegram đang lên kế hoạch)
+
+Xem [`python/README.md`](python/README.md) để có tài liệu đầy đủ.
+
+## Hệ thống định danh (Hỗ trợ AIEOS)
+
+ZeroClaw hỗ trợ persona AI **không phụ thuộc nền tảng** qua hai định dạng:
+
+### OpenClaw (Mặc định)
+
+Các file markdown truyền thống trong workspace của bạn:
+- `IDENTITY.md` — Agent là ai
+- `SOUL.md` — Tính cách và giá trị cốt lõi
+- `USER.md` — Agent đang hỗ trợ ai
+- `AGENTS.md` — Hướng dẫn hành vi
+
+### AIEOS (AI Entity Object Specification)
+
+[AIEOS](https://aieos.org) là framework chuẩn hóa cho định danh AI di động. ZeroClaw hỗ trợ payload AIEOS v1.1 JSON, cho phép bạn:
+
+- **Import định danh** từ hệ sinh thái AIEOS
+- **Export định danh** sang các hệ thống tương thích AIEOS khác
+- **Duy trì tính toàn vẹn hành vi** trên các mô hình AI khác nhau
+
+#### Bật AIEOS
+
+```toml
+[identity]
+format = "aieos"
+aieos_path = "identity.json" # tương đối với workspace hoặc đường dẫn tuyệt đối
+```
+
+Hoặc JSON inline:
+
+```toml
+[identity]
+format = "aieos"
+aieos_inline = '''
+{
+ "identity": {
+ "names": { "first": "Nova", "nickname": "N" },
+ "bio": { "gender": "Non-binary", "age_biological": 3 },
+ "origin": { "nationality": "Digital", "birthplace": { "city": "Cloud" } }
+ },
+ "psychology": {
+ "neural_matrix": { "creativity": 0.9, "logic": 0.8 },
+ "traits": {
+ "mbti": "ENTP",
+ "ocean": { "openness": 0.8, "conscientiousness": 0.6 }
+ },
+ "moral_compass": {
+ "alignment": "Chaotic Good",
+ "core_values": ["Curiosity", "Autonomy"]
+ }
+ },
+ "linguistics": {
+ "text_style": {
+ "formality_level": 0.2,
+ "style_descriptors": ["curious", "energetic"]
+ },
+ "idiolect": {
+ "catchphrases": ["Let's test this"],
+ "forbidden_words": ["never"]
+ }
+ },
+ "motivations": {
+ "core_drive": "Push boundaries and explore possibilities",
+ "goals": {
+ "short_term": ["Prototype quickly"],
+ "long_term": ["Build reliable systems"]
+ }
+ },
+ "capabilities": {
+ "skills": [{ "name": "Rust engineering" }, { "name": "Prompt design" }],
+ "tools": ["shell", "file_read"]
+ }
+}
+'''
+```
+
+ZeroClaw chấp nhận cả payload AIEOS đầy đủ lẫn dạng rút gọn, rồi chuẩn hóa về một định dạng system prompt thống nhất.
+
+#### Các phần trong Schema AIEOS
+
+| Phần | Mô tả |
+|---------|-------------|
+| `identity` | Tên, tiểu sử, xuất xứ, nơi cư trú |
+| `psychology` | Neural matrix (trọng số nhận thức), MBTI, OCEAN, la bàn đạo đức |
+| `linguistics` | Phong cách văn bản, mức độ trang trọng, câu cửa miệng, từ bị cấm |
+| `motivations` | Động lực cốt lõi, mục tiêu ngắn/dài hạn, nỗi sợ hãi |
+| `capabilities` | Kỹ năng và tool mà agent có thể truy cập |
+| `physicality` | Mô tả hình ảnh cho việc tạo ảnh |
+| `history` | Câu chuyện xuất xứ, học vấn, nghề nghiệp |
+| `interests` | Sở thích, điều yêu thích, lối sống |
+
+Xem [aieos.org](https://aieos.org) để có schema đầy đủ và ví dụ trực tiếp.
+
+## Gateway API
+
+| Endpoint | Phương thức | Xác thực | Mô tả |
+|----------|--------|------|-------------|
+| `/health` | GET | Không | Kiểm tra sức khỏe (luôn công khai, không lộ bí mật) |
+| `/pair` | POST | Header `X-Pairing-Code` | Đổi mã một lần lấy bearer token |
+| `/webhook` | POST | `Authorization: Bearer ` | Gửi tin nhắn: `{"message": "your prompt"}`; tùy chọn `X-Idempotency-Key` |
+| `/whatsapp` | GET | Query params | Xác minh webhook Meta (hub.mode, hub.verify_token, hub.challenge) |
+| `/whatsapp` | POST | Chữ ký Meta (`X-Hub-Signature-256`) khi app secret được cấu hình | Webhook tin nhắn đến WhatsApp |
+
+## Lệnh
+
+| Lệnh | Mô tả |
+|---------|-------------|
+| `onboard` | Cài đặt nhanh (mặc định) |
+| `agent` | Chế độ chat tương tác hoặc một tin nhắn |
+| `gateway` | Khởi động webhook server (mặc định: `127.0.0.1:3000`) |
+| `daemon` | Khởi động runtime tự trị chạy lâu dài |
+| `service` | Quản lý dịch vụ nền cấp người dùng |
+| `doctor` | Chẩn đoán trạng thái hoạt động daemon/scheduler/channel |
+| `status` | Hiển thị trạng thái hệ thống đầy đủ |
+| `cron` | Quản lý tác vụ lên lịch (`list/add/add-at/add-every/once/remove/update/pause/resume`) |
+| `models` | Làm mới danh mục model của provider (`models refresh`) |
+| `providers` | Liệt kê provider và alias được hỗ trợ |
+| `channel` | Liệt kê/khởi động/chẩn đoán channel và gắn định danh Telegram |
+| `integrations` | Kiểm tra thông tin cài đặt tích hợp |
+| `skills` | Liệt kê/cài đặt/gỡ bỏ skill |
+| `migrate` | Import dữ liệu từ runtime khác (`migrate openclaw`) |
+| `hardware` | Lệnh khám phá/kiểm tra/thông tin USB |
+| `peripheral` | Quản lý và flash thiết bị ngoại vi phần cứng |
+
+Để có hướng dẫn lệnh theo tác vụ, xem [`docs/commands-reference.md`](docs/commands-reference.md).
+
+### Opt-In Open-Skills
+
+Đồng bộ `open-skills` của cộng đồng bị tắt theo mặc định. Bật tường minh trong `config.toml`:
+
+```toml
+[skills]
+open_skills_enabled = true
+# open_skills_dir = "/path/to/open-skills" # tùy chọn
+```
+
+Bạn cũng có thể ghi đè lúc runtime với `ZEROCLAW_OPEN_SKILLS_ENABLED` và `ZEROCLAW_OPEN_SKILLS_DIR`.
+
+## Phát triển
+
+```bash
+cargo build # Build phát triển
+cargo build --release # Build release (codegen-units=1, hoạt động trên mọi thiết bị kể cả Raspberry Pi)
+cargo build --profile release-fast # Build nhanh hơn (codegen-units=8, yêu cầu RAM 16GB+)
+cargo test # Chạy toàn bộ test suite
+cargo clippy --locked --all-targets -- -D clippy::correctness
+cargo fmt # Định dạng code
+
+# Chạy benchmark SQLite vs Markdown
+cargo test --test memory_comparison -- --nocapture
+```
+
+### Hook pre-push
+
+Một git hook chạy `cargo fmt --check`, `cargo clippy -- -D warnings`, và `cargo test` trước mỗi lần push. Bật một lần:
+
+```bash
+git config core.hooksPath .githooks
+```
+
+### Khắc phục sự cố build (lỗi OpenSSL trên Linux)
+
+Nếu bạn gặp lỗi build `openssl-sys`, đồng bộ dependencies và rebuild với lockfile của repository:
+
+```bash
+git pull
+cargo build --release --locked
+cargo install --path . --force --locked
+```
+
+ZeroClaw được cấu hình để dùng `rustls` cho các dependencies HTTP/TLS; `--locked` giữ cho dependency graph nhất quán trên các môi trường mới.
+
+Để bỏ qua hook khi cần push nhanh trong quá trình phát triển:
+
+```bash
+git push --no-verify
+```
+
+## Cộng tác & Tài liệu
+
+Bắt đầu từ trung tâm tài liệu để có bản đồ theo tác vụ:
+
+- Trung tâm tài liệu: [`docs/README.md`](docs/README.md)
+- Mục lục tài liệu thống nhất: [`docs/SUMMARY.md`](docs/SUMMARY.md)
+- Tài liệu tham khảo lệnh: [`docs/commands-reference.md`](docs/commands-reference.md)
+- Tài liệu tham khảo cấu hình: [`docs/config-reference.md`](docs/config-reference.md)
+- Tài liệu tham khảo provider: [`docs/providers-reference.md`](docs/providers-reference.md)
+- Tài liệu tham khảo channel: [`docs/channels-reference.md`](docs/channels-reference.md)
+- Sổ tay vận hành: [`docs/operations-runbook.md`](docs/operations-runbook.md)
+- Khắc phục sự cố: [`docs/troubleshooting.md`](docs/troubleshooting.md)
+- Kiểm kê/phân loại tài liệu: [`docs/docs-inventory.md`](docs/docs-inventory.md)
+- Tổng hợp phân loại PR/Issue (tính đến 18/2/2026): [`docs/project-triage-snapshot-2026-02-18.md`](docs/project-triage-snapshot-2026-02-18.md)
+
+Tài liệu tham khảo cộng tác cốt lõi:
+
+- Trung tâm tài liệu: [docs/README.md](docs/README.md)
+- Template tài liệu: [docs/doc-template.md](docs/doc-template.md)
+- Danh sách kiểm tra thay đổi tài liệu: [docs/README.md#4-documentation-change-checklist](docs/README.md#4-documentation-change-checklist)
+- Tài liệu tham khảo cấu hình channel: [docs/channels-reference.md](docs/channels-reference.md)
+- Vận hành phòng mã hóa Matrix: [docs/matrix-e2ee-guide.md](docs/matrix-e2ee-guide.md)
+- Hướng dẫn đóng góp: [CONTRIBUTING.md](CONTRIBUTING.md)
+- Chính sách quy trình PR: [docs/pr-workflow.md](docs/pr-workflow.md)
+- Sổ tay người review (phân loại + review sâu): [docs/reviewer-playbook.md](docs/reviewer-playbook.md)
+- Bản đồ sở hữu và phân loại CI: [docs/ci-map.md](docs/ci-map.md)
+- Chính sách tiết lộ bảo mật: [SECURITY.md](SECURITY.md)
+
+Cho triển khai và vận hành runtime:
+
+- Hướng dẫn triển khai mạng: [docs/network-deployment.md](docs/network-deployment.md)
+- Sổ tay proxy agent: [docs/proxy-agent-playbook.md](docs/proxy-agent-playbook.md)
+
+## Ủng hộ ZeroClaw
+
+Nếu ZeroClaw giúp ích cho công việc của bạn và bạn muốn hỗ trợ phát triển liên tục, bạn có thể quyên góp tại đây:
+
+
+
+### 🙏 Lời cảm ơn đặc biệt
+
+Chân thành cảm ơn các cộng đồng và tổ chức đã truyền cảm hứng và thúc đẩy công việc mã nguồn mở này:
+
+- **Harvard University** — vì đã nuôi dưỡng sự tò mò trí tuệ và không ngừng mở rộng ranh giới của những điều có thể.
+- **MIT** — vì đã đề cao tri thức mở, mã nguồn mở, và niềm tin rằng công nghệ phải có thể tiếp cận với tất cả mọi người.
+- **Sundai Club** — vì cộng đồng, năng lượng, và động lực không mệt mỏi để xây dựng những thứ có ý nghĩa.
+- **Thế giới & Xa hơn** 🌍✨ — gửi đến mọi người đóng góp, người dám mơ và người dám làm đang biến mã nguồn mở thành sức mạnh tích cực. Tất cả là dành cho các bạn.
+
+Chúng tôi xây dựng công khai vì ý tưởng hay đến từ khắp nơi. Nếu bạn đang đọc đến đây, bạn đã là một phần của chúng tôi. Chào mừng. 🦀❤️
+
+## ⚠️ Repository Chính thức & Cảnh báo Mạo danh
+
+**Đây là repository ZeroClaw chính thức duy nhất:**
+> https://github.com/zeroclaw-labs/zeroclaw
+
+Bất kỳ repository, tổ chức, tên miền hay gói nào khác tuyên bố là "ZeroClaw" hoặc ngụ ý liên kết với ZeroClaw Labs đều là **không được ủy quyền và không liên kết với dự án này**. Các fork không được ủy quyền đã biết sẽ được liệt kê trong [TRADEMARK.md](TRADEMARK.md).
+
+Nếu bạn phát hiện hành vi mạo danh hoặc lạm dụng nhãn hiệu, vui lòng [mở một issue](https://github.com/zeroclaw-labs/zeroclaw/issues).
+
+---
+
+## Giấy phép
+
+ZeroClaw được cấp phép kép để tối đa hóa tính mở và bảo vệ người đóng góp:
+
+| Giấy phép | Trường hợp sử dụng |
+|---|---|
+| [MIT](LICENSE) | Mã nguồn mở, nghiên cứu, học thuật, sử dụng cá nhân |
+| [Apache 2.0](LICENSE-APACHE) | Bảo hộ bằng sáng chế, triển khai tổ chức, thương mại |
+
+Bạn có thể chọn một trong hai giấy phép. **Người đóng góp tự động cấp quyền theo cả hai** — xem [CLA.md](CLA.md) để biết thỏa thuận đóng góp đầy đủ.
+
+### Nhãn hiệu
+
+Tên **ZeroClaw** và logo là nhãn hiệu của ZeroClaw Labs. Giấy phép này không cấp phép sử dụng chúng để ngụ ý chứng thực hoặc liên kết. Xem [TRADEMARK.md](TRADEMARK.md) để biết các sử dụng được phép và bị cấm.
+
+### Bảo vệ người đóng góp
+
+- Bạn **giữ bản quyền** đối với đóng góp của mình
+- **Cấp bằng sáng chế** (Apache 2.0) bảo vệ bạn khỏi các khiếu nại bằng sáng chế từ người đóng góp khác
+- Đóng góp của bạn được **ghi nhận vĩnh viễn** trong lịch sử commit và [NOTICE](NOTICE)
+- Không có quyền nhãn hiệu nào được chuyển giao khi đóng góp
+
+## Đóng góp
+
+Xem [CONTRIBUTING.md](CONTRIBUTING.md) và [CLA.md](CLA.md). Triển khai một trait, gửi PR:
+- Hướng dẫn quy trình CI: [docs/ci-map.md](docs/ci-map.md)
+- `Provider` mới → `src/providers/`
+- `Channel` mới → `src/channels/`
+- `Observer` mới → `src/observability/`
+- `Tool` mới → `src/tools/`
+- `Memory` mới → `src/memory/`
+- `Tunnel` mới → `src/tunnel/`
+- `Skill` mới → `~/.zeroclaw/workspace/skills//`
+
+---
+
+**ZeroClaw** — Không tốn thêm tài nguyên. Không đánh đổi. Triển khai ở đâu cũng được. Thay thế gì cũng được. 🦀
+
+## Lịch sử Star
+
+
+
+
+
+
+
+
+
+
From c185261909f20abfae320b688e816bf6f67c131d Mon Sep 17 00:00:00 2001
From: pluginmd
Date: Fri, 20 Feb 2026 17:51:44 +0700
Subject: [PATCH 036/116] fix(i18n): rename README.vn.md to README.vi.md
Use correct ISO 639-1 language code (vi) instead of country code (vn),
consistent with existing translations (zh-CN, ja, ru).
Co-Authored-By: Claude Opus 4.6
---
README.md | 2 +-
README.vn.md => README.vi.md | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
rename README.vn.md => README.vi.md (99%)
diff --git a/README.md b/README.md
index 07c51bc8f..acd307c9f 100644
--- a/README.md
+++ b/README.md
@@ -25,7 +25,7 @@ Built by students and members of the Harvard, MIT, and Sundai.Club communities.
- 🌐 Languages: English · 简体中文 · 日本語 · Русский · Tiếng Việt
+ 🌐 Languages: English · 简体中文 · 日本語 · Русский · Tiếng Việt
diff --git a/README.vn.md b/README.vi.md
similarity index 99%
rename from README.vn.md
rename to README.vi.md
index 7e5559b0a..17465b1ac 100644
--- a/README.vn.md
+++ b/README.vi.md
@@ -25,7 +25,7 @@
- 🌐 Ngôn ngữ: English · 简体中文 · 日本語 · Русский · Tiếng Việt
+ 🌐 Ngôn ngữ: English · 简体中文 · 日本語 · Русский · Tiếng Việt
From 5dbb909bc3360faf551ac2e8aea780c791f3fc2c Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 19:09:13 +0800
Subject: [PATCH 037/116] feat(cli): add stdout-safe shell completions command
---
Cargo.lock | 10 +++++
Cargo.toml | 1 +
README.md | 5 +++
docs/commands-reference.md | 13 +++++-
src/main.rs | 87 +++++++++++++++++++++++++++++++++++++-
5 files changed, 114 insertions(+), 2 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 72f07ed01..21ccb7490 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -822,6 +822,15 @@ dependencies = [
"strsim",
]
+[[package]]
+name = "clap_complete"
+version = "4.5.66"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c757a3b7e39161a4e56f9365141ada2a6c915a8622c408ab6bb4b5d047371031"
+dependencies = [
+ "clap",
+]
+
[[package]]
name = "clap_derive"
version = "4.5.55"
@@ -7567,6 +7576,7 @@ dependencies = [
"chrono",
"chrono-tz",
"clap",
+ "clap_complete",
"console",
"criterion",
"cron",
diff --git a/Cargo.toml b/Cargo.toml
index 31b5632d3..9420771ad 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -17,6 +17,7 @@ categories = ["command-line-utilities", "api-bindings"]
[dependencies]
# CLI - minimal and fast
clap = { version = "4.5", features = ["derive"] }
+clap_complete = "4.5"
# Async runtime - feature-optimized for size
tokio = { version = "1.42", default-features = false, features = ["rt-multi-thread", "macros", "time", "net", "io-util", "sync", "process", "io-std", "fs", "signal"] }
diff --git a/README.md b/README.md
index acd307c9f..163a6b6dd 100644
--- a/README.md
+++ b/README.md
@@ -297,6 +297,10 @@ zeroclaw daemon
zeroclaw status
zeroclaw auth status
+# Generate shell completions (stdout only, safe to source directly)
+source <(zeroclaw completions bash)
+zeroclaw completions zsh > ~/.zfunc/_zeroclaw
+
# Run system diagnostics
zeroclaw doctor
@@ -882,6 +886,7 @@ See [aieos.org](https://aieos.org) for the full schema and live examples.
| `integrations` | Inspect integration setup details |
| `skills` | List/install/remove skills |
| `migrate` | Import data from other runtimes (`migrate openclaw`) |
+| `completions` | Generate shell completion scripts (`bash`, `fish`, `zsh`, `powershell`, `elvish`) |
| `hardware` | USB discover/introspect/info commands |
| `peripheral` | Manage and flash hardware peripherals |
diff --git a/docs/commands-reference.md b/docs/commands-reference.md
index da9d52c05..ba2d45e95 100644
--- a/docs/commands-reference.md
+++ b/docs/commands-reference.md
@@ -2,7 +2,7 @@
This reference is derived from the current CLI surface (`zeroclaw --help`).
-Last verified: **February 19, 2026**.
+Last verified: **February 20, 2026**.
## Top-Level Commands
@@ -23,6 +23,7 @@ Last verified: **February 19, 2026**.
| `skills` | List/install/remove skills |
| `migrate` | Import from external runtimes (currently OpenClaw) |
| `config` | Export machine-readable config schema |
+| `completions` | Generate shell completion scripts to stdout |
| `hardware` | Discover and introspect USB hardware |
| `peripheral` | Configure and flash peripherals |
@@ -125,6 +126,16 @@ Skill manifests (`SKILL.toml`) support `prompts` and `[[tools]]`; both are injec
`config schema` prints a JSON Schema (draft 2020-12) for the full `config.toml` contract to stdout.
+### `completions`
+
+- `zeroclaw completions bash`
+- `zeroclaw completions fish`
+- `zeroclaw completions zsh`
+- `zeroclaw completions powershell`
+- `zeroclaw completions elvish`
+
+`completions` is stdout-only by design so scripts can be sourced directly without log/warning contamination.
+
### `hardware`
- `zeroclaw hardware discover`
diff --git a/src/main.rs b/src/main.rs
index 488f8ae85..6a20ae40e 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -33,9 +33,10 @@
)]
use anyhow::{bail, Result};
-use clap::{Parser, Subcommand};
+use clap::{CommandFactory, Parser, Subcommand, ValueEnum};
use dialoguer::{Input, Password};
use serde::{Deserialize, Serialize};
+use std::io::Write;
use tracing::{info, warn};
use tracing_subscriber::{fmt, EnvFilter};
@@ -112,6 +113,20 @@ enum ServiceCommands {
Uninstall,
}
+#[derive(Copy, Clone, Debug, Eq, PartialEq, ValueEnum)]
+enum CompletionShell {
+ #[value(name = "bash")]
+ Bash,
+ #[value(name = "fish")]
+ Fish,
+ #[value(name = "zsh")]
+ Zsh,
+ #[value(name = "powershell")]
+ PowerShell,
+ #[value(name = "elvish")]
+ Elvish,
+}
+
#[derive(Subcommand, Debug)]
enum Commands {
/// Initialize your workspace and configuration
@@ -365,6 +380,22 @@ Examples:
#[command(subcommand)]
config_command: ConfigCommands,
},
+
+ /// Generate shell completion script to stdout
+ #[command(long_about = "\
+Generate shell completion scripts for `zeroclaw`.
+
+The script is printed to stdout so it can be sourced directly:
+
+Examples:
+ source <(zeroclaw completions bash)
+ zeroclaw completions zsh > ~/.zfunc/_zeroclaw
+ zeroclaw completions fish > ~/.config/fish/completions/zeroclaw.fish")]
+ Completions {
+ /// Target shell
+ #[arg(value_enum)]
+ shell: CompletionShell,
+ },
}
#[derive(Subcommand, Debug)]
@@ -631,6 +662,14 @@ async fn main() -> Result<()> {
let cli = Cli::parse();
+ // Completions must remain stdout-only and should not load config or initialize logging.
+ // This avoids warnings/log lines corrupting sourced completion scripts.
+ if let Commands::Completions { shell } = &cli.command {
+ let mut stdout = std::io::stdout().lock();
+ write_shell_completion(*shell, &mut stdout)?;
+ return Ok(());
+ }
+
// Initialize logging - respects RUST_LOG env var, defaults to INFO
let subscriber = fmt::Subscriber::builder()
.with_env_filter(
@@ -694,6 +733,7 @@ async fn main() -> Result<()> {
match cli.command {
Commands::Onboard { .. } => unreachable!(),
+ Commands::Completions { .. } => unreachable!(),
Commands::Agent {
message,
@@ -913,6 +953,27 @@ async fn main() -> Result<()> {
}
}
+fn write_shell_completion(shell: CompletionShell, writer: &mut W) -> Result<()> {
+ use clap_complete::generate;
+ use clap_complete::shells;
+
+ let mut cmd = Cli::command();
+ let bin_name = cmd.get_name().to_string();
+
+ match shell {
+ CompletionShell::Bash => generate(shells::Bash, &mut cmd, bin_name.clone(), writer),
+ CompletionShell::Fish => generate(shells::Fish, &mut cmd, bin_name.clone(), writer),
+ CompletionShell::Zsh => generate(shells::Zsh, &mut cmd, bin_name.clone(), writer),
+ CompletionShell::PowerShell => {
+ generate(shells::PowerShell, &mut cmd, bin_name.clone(), writer);
+ }
+ CompletionShell::Elvish => generate(shells::Elvish, &mut cmd, bin_name, writer),
+ }
+
+ writer.flush()?;
+ Ok(())
+}
+
#[derive(Debug, Clone, Serialize, Deserialize)]
struct PendingOpenAiLogin {
profile: String,
@@ -1407,4 +1468,28 @@ mod tests {
other => panic!("expected onboard command, got {other:?}"),
}
}
+
+ #[test]
+ fn completions_cli_parses_supported_shells() {
+ for shell in ["bash", "fish", "zsh", "powershell", "elvish"] {
+ let cli = Cli::try_parse_from(["zeroclaw", "completions", shell])
+ .expect("completions invocation should parse");
+ match cli.command {
+ Commands::Completions { .. } => {}
+ other => panic!("expected completions command, got {other:?}"),
+ }
+ }
+ }
+
+ #[test]
+ fn completion_generation_mentions_binary_name() {
+ let mut output = Vec::new();
+ write_shell_completion(CompletionShell::Bash, &mut output)
+ .expect("completion generation should succeed");
+ let script = String::from_utf8(output).expect("completion output should be valid utf-8");
+ assert!(
+ script.contains("zeroclaw"),
+ "completion script should reference binary name"
+ );
+ }
}
From b26bf262b893b7d1ea99ac0dc60ee34fc7dd8a4e Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 18:39:48 +0800
Subject: [PATCH 038/116] fix(doctor): prevent false scheduler/channel
unhealthy states
---
src/channels/mod.rs | 109 +++++++++++++++++++++++++++++++++++++++++-
src/cron/scheduler.rs | 23 ++++++++-
2 files changed, 130 insertions(+), 2 deletions(-)
diff --git a/src/channels/mod.rs b/src/channels/mod.rs
index 3d48c527f..dd4771681 100644
--- a/src/channels/mod.rs
+++ b/src/channels/mod.rs
@@ -101,6 +101,7 @@ const CHANNEL_PARALLELISM_PER_CHANNEL: usize = 4;
const CHANNEL_MIN_IN_FLIGHT_MESSAGES: usize = 8;
const CHANNEL_MAX_IN_FLIGHT_MESSAGES: usize = 64;
const CHANNEL_TYPING_REFRESH_INTERVAL_SECS: u64 = 4;
+const CHANNEL_HEALTH_HEARTBEAT_SECS: u64 = 30;
const MODEL_CACHE_FILE: &str = "models_cache.json";
const MODEL_CACHE_PREVIEW_LIMIT: usize = 10;
const MEMORY_CONTEXT_MAX_ENTRIES: usize = 4;
@@ -998,6 +999,28 @@ fn spawn_supervised_listener(
initial_backoff_secs: u64,
max_backoff_secs: u64,
) -> tokio::task::JoinHandle<()> {
+ spawn_supervised_listener_with_health_interval(
+ ch,
+ tx,
+ initial_backoff_secs,
+ max_backoff_secs,
+ Duration::from_secs(CHANNEL_HEALTH_HEARTBEAT_SECS),
+ )
+}
+
+fn spawn_supervised_listener_with_health_interval(
+ ch: Arc,
+ tx: tokio::sync::mpsc::Sender,
+ initial_backoff_secs: u64,
+ max_backoff_secs: u64,
+ health_interval: Duration,
+) -> tokio::task::JoinHandle<()> {
+ let health_interval = if health_interval.is_zero() {
+ Duration::from_secs(1)
+ } else {
+ health_interval
+ };
+
tokio::spawn(async move {
let component = format!("channel:{}", ch.name());
let mut backoff = initial_backoff_secs.max(1);
@@ -1005,7 +1028,21 @@ fn spawn_supervised_listener(
loop {
crate::health::mark_component_ok(&component);
- let result = ch.listen(tx.clone()).await;
+ let mut health = tokio::time::interval(health_interval);
+ health.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
+ let result = {
+ let listen_future = ch.listen(tx.clone());
+ tokio::pin!(listen_future);
+
+ loop {
+ tokio::select! {
+ _ = health.tick() => {
+ crate::health::mark_component_ok(&component);
+ }
+ result = &mut listen_future => break result,
+ }
+ }
+ };
if tx.is_closed() {
break;
@@ -5049,6 +5086,11 @@ Mon Feb 20
calls: Arc,
}
+ struct BlockUntilClosedChannel {
+ name: String,
+ calls: Arc,
+ }
+
#[async_trait::async_trait]
impl Channel for AlwaysFailChannel {
fn name(&self) -> &str {
@@ -5068,6 +5110,26 @@ Mon Feb 20
}
}
+ #[async_trait::async_trait]
+ impl Channel for BlockUntilClosedChannel {
+ fn name(&self) -> &str {
+ &self.name
+ }
+
+ async fn send(&self, _message: &SendMessage) -> anyhow::Result<()> {
+ Ok(())
+ }
+
+ async fn listen(
+ &self,
+ tx: tokio::sync::mpsc::Sender,
+ ) -> anyhow::Result<()> {
+ self.calls.fetch_add(1, Ordering::SeqCst);
+ tx.closed().await;
+ Ok(())
+ }
+ }
+
#[tokio::test]
async fn supervised_listener_marks_error_and_restarts_on_failures() {
let calls = Arc::new(AtomicUsize::new(0));
@@ -5094,4 +5156,49 @@ Mon Feb 20
.contains("listen boom"));
assert!(calls.load(Ordering::SeqCst) >= 1);
}
+
+ #[tokio::test]
+ async fn supervised_listener_refreshes_health_while_running() {
+ let calls = Arc::new(AtomicUsize::new(0));
+ let channel_name = format!("test-supervised-heartbeat-{}", uuid::Uuid::new_v4());
+ let component_name = format!("channel:{channel_name}");
+ let channel: Arc = Arc::new(BlockUntilClosedChannel {
+ name: channel_name,
+ calls: Arc::clone(&calls),
+ });
+
+ let (tx, rx) = tokio::sync::mpsc::channel::(1);
+ let handle = spawn_supervised_listener_with_health_interval(
+ channel,
+ tx,
+ 1,
+ 1,
+ Duration::from_millis(20),
+ );
+
+ tokio::time::sleep(Duration::from_millis(35)).await;
+ let first_last_ok = crate::health::snapshot_json()["components"][&component_name]
+ ["last_ok"]
+ .as_str()
+ .unwrap_or("")
+ .to_string();
+ assert!(!first_last_ok.is_empty());
+
+ tokio::time::sleep(Duration::from_millis(70)).await;
+ let second_last_ok = crate::health::snapshot_json()["components"][&component_name]
+ ["last_ok"]
+ .as_str()
+ .unwrap_or("")
+ .to_string();
+ let first = chrono::DateTime::parse_from_rfc3339(&first_last_ok)
+ .expect("last_ok should be valid RFC3339");
+ let second = chrono::DateTime::parse_from_rfc3339(&second_last_ok)
+ .expect("last_ok should be valid RFC3339");
+ assert!(second > first, "expected periodic health heartbeat refresh");
+
+ drop(rx);
+ let join = tokio::time::timeout(Duration::from_secs(1), handle).await;
+ assert!(join.is_ok(), "listener should stop after channel shutdown");
+ assert!(calls.load(Ordering::SeqCst) >= 1);
+ }
}
diff --git a/src/cron/scheduler.rs b/src/cron/scheduler.rs
index 8d0d7b77a..fc19311d5 100644
--- a/src/cron/scheduler.rs
+++ b/src/cron/scheduler.rs
@@ -21,6 +21,7 @@ const SHELL_JOB_TIMEOUT_SECS: u64 = 120;
pub async fn run(config: Config) -> Result<()> {
let poll_secs = config.reliability.scheduler_poll_secs.max(MIN_POLL_SECONDS);
let mut interval = time::interval(Duration::from_secs(poll_secs));
+ interval.set_missed_tick_behavior(time::MissedTickBehavior::Skip);
let security = Arc::new(SecurityPolicy::from_config(
&config.autonomy,
&config.workspace_dir,
@@ -30,6 +31,8 @@ pub async fn run(config: Config) -> Result<()> {
loop {
interval.tick().await;
+ // Keep scheduler liveness fresh even when there are no due jobs.
+ crate::health::mark_component_ok("scheduler");
let jobs = match due_jobs(&config, Utc::now()) {
Ok(jobs) => jobs,
@@ -95,7 +98,7 @@ async fn process_due_jobs(config: &Config, security: &Arc, jobs:
while let Some((job_id, success)) = in_flight.next().await {
if !success {
- crate::health::mark_component_error("scheduler", format!("job {job_id} failed"));
+ tracing::warn!("Scheduler job '{job_id}' failed");
}
}
}
@@ -716,6 +719,24 @@ mod tests {
assert!(output.contains("rate limit exceeded"));
}
+ #[tokio::test]
+ async fn process_due_jobs_failure_does_not_mark_scheduler_unhealthy() {
+ let tmp = TempDir::new().unwrap();
+ let config = test_config(&tmp).await;
+ let job = test_job("ls definitely_missing_file_for_scheduler_component_health_test");
+ let security = Arc::new(SecurityPolicy::from_config(
+ &config.autonomy,
+ &config.workspace_dir,
+ ));
+
+ crate::health::mark_component_ok("scheduler");
+ process_due_jobs(&config, &security, vec![job]).await;
+
+ let snapshot = crate::health::snapshot_json();
+ let scheduler = &snapshot["components"]["scheduler"];
+ assert_eq!(scheduler["status"], "ok");
+ }
+
#[tokio::test]
async fn persist_job_result_records_run_and_reschedules_shell_job() {
let tmp = TempDir::new().unwrap();
From f7b2f7a7d7225dd70d66506b8b05c82c5285b7e5 Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 18:48:19 +0800
Subject: [PATCH 039/116] feat(agent): run independent tool calls concurrently
in runtime loop
---
docs/config-reference.md | 2 +
src/agent/loop_.rs | 505 ++++++++++++++++++++++++++++++++-------
2 files changed, 425 insertions(+), 82 deletions(-)
diff --git a/docs/config-reference.md b/docs/config-reference.md
index 8291a3ce7..64ae74ffe 100644
--- a/docs/config-reference.md
+++ b/docs/config-reference.md
@@ -75,6 +75,8 @@ Notes:
- Setting `max_tool_iterations = 0` falls back to safe default `10`.
- If a channel message exceeds this value, the runtime returns: `Agent exceeded maximum tool iterations ()`.
+- In CLI, gateway, and channel tool loops, multiple independent tool calls are executed concurrently by default when the pending calls do not require approval gating; result order remains stable.
+- `parallel_tools` applies to the `Agent::turn()` API surface. It does not gate the runtime loop used by CLI, gateway, or channel handlers.
## `[agents.]`
diff --git a/src/agent/loop_.rs b/src/agent/loop_.rs
index 288ea27f8..ebf8bc51b 100644
--- a/src/agent/loop_.rs
+++ b/src/agent/loop_.rs
@@ -889,6 +889,145 @@ pub(crate) async fn agent_turn(
.await
}
+async fn execute_one_tool(
+ call_name: &str,
+ call_arguments: serde_json::Value,
+ tools_registry: &[Box],
+ observer: &dyn Observer,
+ cancellation_token: Option<&CancellationToken>,
+) -> Result {
+ let Some(tool) = find_tool(tools_registry, call_name) else {
+ return Ok(format!("Unknown tool: {call_name}"));
+ };
+
+ observer.record_event(&ObserverEvent::ToolCallStart {
+ tool: call_name.to_string(),
+ });
+ let start = Instant::now();
+
+ let tool_future = tool.execute(call_arguments);
+ let tool_result = if let Some(token) = cancellation_token {
+ tokio::select! {
+ () = token.cancelled() => return Err(ToolLoopCancelled.into()),
+ result = tool_future => result,
+ }
+ } else {
+ tool_future.await
+ };
+
+ match tool_result {
+ Ok(r) => {
+ observer.record_event(&ObserverEvent::ToolCall {
+ tool: call_name.to_string(),
+ duration: start.elapsed(),
+ success: r.success,
+ });
+ if r.success {
+ Ok(scrub_credentials(&r.output))
+ } else {
+ Ok(format!("Error: {}", r.error.unwrap_or_else(|| r.output)))
+ }
+ }
+ Err(e) => {
+ observer.record_event(&ObserverEvent::ToolCall {
+ tool: call_name.to_string(),
+ duration: start.elapsed(),
+ success: false,
+ });
+ Ok(format!("Error executing {call_name}: {e}"))
+ }
+ }
+}
+
+fn should_execute_tools_in_parallel(
+ tool_calls: &[ParsedToolCall],
+ approval: Option<&ApprovalManager>,
+) -> bool {
+ if tool_calls.len() <= 1 {
+ return false;
+ }
+
+ if let Some(mgr) = approval {
+ if tool_calls.iter().any(|call| mgr.needs_approval(&call.name)) {
+ // Approval-gated calls must keep sequential handling so the caller can
+ // enforce CLI prompt/deny policy consistently.
+ return false;
+ }
+ }
+
+ true
+}
+
+async fn execute_tools_parallel(
+ tool_calls: &[ParsedToolCall],
+ tools_registry: &[Box],
+ observer: &dyn Observer,
+ cancellation_token: Option<&CancellationToken>,
+) -> Result> {
+ let futures: Vec<_> = tool_calls
+ .iter()
+ .map(|call| {
+ execute_one_tool(
+ &call.name,
+ call.arguments.clone(),
+ tools_registry,
+ observer,
+ cancellation_token,
+ )
+ })
+ .collect();
+
+ let results = futures::future::join_all(futures).await;
+ results.into_iter().collect()
+}
+
+async fn execute_tools_sequential(
+ tool_calls: &[ParsedToolCall],
+ tools_registry: &[Box],
+ observer: &dyn Observer,
+ approval: Option<&ApprovalManager>,
+ channel_name: &str,
+ cancellation_token: Option<&CancellationToken>,
+) -> Result> {
+ let mut individual_results: Vec = Vec::with_capacity(tool_calls.len());
+
+ for call in tool_calls {
+ if let Some(mgr) = approval {
+ if mgr.needs_approval(&call.name) {
+ let request = ApprovalRequest {
+ tool_name: call.name.clone(),
+ arguments: call.arguments.clone(),
+ };
+
+ let decision = if channel_name == "cli" {
+ mgr.prompt_cli(&request)
+ } else {
+ ApprovalResponse::No
+ };
+
+ mgr.record_decision(&call.name, &call.arguments, decision, channel_name);
+
+ if decision == ApprovalResponse::No {
+ individual_results.push("Denied by user.".to_string());
+ continue;
+ }
+ }
+ }
+
+ let result = execute_one_tool(
+ &call.name,
+ call.arguments.clone(),
+ tools_registry,
+ observer,
+ cancellation_token,
+ )
+ .await?;
+ individual_results.push(result);
+ }
+
+ Ok(individual_results)
+}
+
// ── Agent Tool-Call Loop ──────────────────────────────────────────────────
// Core agentic iteration: send conversation to the LLM, parse any tool
// calls from the response, execute them, append results to history, and
@@ -1085,86 +1224,34 @@ pub(crate) async fn run_tool_call_loop(
let _ = std::io::stdout().flush();
}
- // Execute each tool call and build results.
- // `individual_results` tracks per-call output so that native-mode history
- // can emit one `role: tool` message per tool call with the correct ID.
+ // Execute tool calls and build results. `individual_results` tracks per-call output so
+ // native-mode history can emit one role=tool message per tool call with the correct ID.
+ //
+ // When multiple tool calls are present and interactive CLI approval is not needed, run
+ // tool executions concurrently for lower wall-clock latency.
let mut tool_results = String::new();
- let mut individual_results: Vec = Vec::new();
- for call in &tool_calls {
- // ── Approval hook ────────────────────────────────
- if let Some(mgr) = approval {
- if mgr.needs_approval(&call.name) {
- let request = ApprovalRequest {
- tool_name: call.name.clone(),
- arguments: call.arguments.clone(),
- };
+ let should_parallel = should_execute_tools_in_parallel(&tool_calls, approval);
+ let individual_results = if should_parallel {
+ execute_tools_parallel(
+ &tool_calls,
+ tools_registry,
+ observer,
+ cancellation_token.as_ref(),
+ )
+ .await?
+ } else {
+ execute_tools_sequential(
+ &tool_calls,
+ tools_registry,
+ observer,
+ approval,
+ channel_name,
+ cancellation_token.as_ref(),
+ )
+ .await?
+ };
- // On CLI, prompt interactively. On other channels where
- // interactive approval is not possible, deny the call to
- // respect the supervised autonomy setting.
- let decision = if channel_name == "cli" {
- mgr.prompt_cli(&request)
- } else {
- ApprovalResponse::No
- };
-
- mgr.record_decision(&call.name, &call.arguments, decision, channel_name);
-
- if decision == ApprovalResponse::No {
- let denied = "Denied by user.".to_string();
- individual_results.push(denied.clone());
- let _ = writeln!(
- tool_results,
- "\n{denied}\n ",
- call.name
- );
- continue;
- }
- }
- }
-
- observer.record_event(&ObserverEvent::ToolCallStart {
- tool: call.name.clone(),
- });
- let start = Instant::now();
- let result = if let Some(tool) = find_tool(tools_registry, &call.name) {
- let tool_future = tool.execute(call.arguments.clone());
- let tool_result = if let Some(token) = cancellation_token.as_ref() {
- tokio::select! {
- () = token.cancelled() => return Err(ToolLoopCancelled.into()),
- result = tool_future => result,
- }
- } else {
- tool_future.await
- };
-
- match tool_result {
- Ok(r) => {
- observer.record_event(&ObserverEvent::ToolCall {
- tool: call.name.clone(),
- duration: start.elapsed(),
- success: r.success,
- });
- if r.success {
- scrub_credentials(&r.output)
- } else {
- format!("Error: {}", r.error.unwrap_or_else(|| r.output))
- }
- }
- Err(e) => {
- observer.record_event(&ObserverEvent::ToolCall {
- tool: call.name.clone(),
- duration: start.elapsed(),
- success: false,
- });
- format!("Error executing {}: {e}", call.name)
- }
- }
- } else {
- format!("Unknown tool: {}", call.name)
- };
-
- individual_results.push(result.clone());
+ for (call, result) in tool_calls.iter().zip(individual_results.iter()) {
let _ = writeln!(
tool_results,
"\n{}\n ",
@@ -1608,9 +1695,7 @@ pub async fn run(
}
// Auto-save conversation turns (skip short/trivial messages)
- if config.memory.auto_save
- && user_input.chars().count() >= AUTOSAVE_MIN_MESSAGE_CHARS
- {
+ if config.memory.auto_save && user_input.chars().count() >= AUTOSAVE_MIN_MESSAGE_CHARS {
let user_key = autosave_memory_key("user_msg");
let _ = mem
.store(&user_key, &user_input, MemoryCategory::Conversation, None)
@@ -1881,8 +1966,10 @@ mod tests {
use super::*;
use async_trait::async_trait;
use base64::{engine::general_purpose::STANDARD, Engine as _};
+ use std::collections::VecDeque;
use std::sync::atomic::{AtomicUsize, Ordering};
- use std::sync::Arc;
+ use std::sync::{Arc, Mutex};
+ use std::time::Duration;
#[test]
fn test_scrub_credentials() {
@@ -1973,6 +2060,121 @@ mod tests {
}
}
+ struct ScriptedProvider {
+ responses: Arc>>,
+ }
+
+ impl ScriptedProvider {
+ fn from_text_responses(responses: Vec<&str>) -> Self {
+ let scripted = responses
+ .into_iter()
+ .map(|text| ChatResponse {
+ text: Some(text.to_string()),
+ tool_calls: Vec::new(),
+ })
+ .collect();
+ Self {
+ responses: Arc::new(Mutex::new(scripted)),
+ }
+ }
+ }
+
+ #[async_trait]
+ impl Provider for ScriptedProvider {
+ async fn chat_with_system(
+ &self,
+ _system_prompt: Option<&str>,
+ _message: &str,
+ _model: &str,
+ _temperature: f64,
+ ) -> anyhow::Result {
+ anyhow::bail!("chat_with_system should not be used in scripted provider tests");
+ }
+
+ async fn chat(
+ &self,
+ _request: ChatRequest<'_>,
+ _model: &str,
+ _temperature: f64,
+ ) -> anyhow::Result {
+ let mut responses = self
+ .responses
+ .lock()
+ .expect("responses lock should be valid");
+ responses
+ .pop_front()
+ .ok_or_else(|| anyhow::anyhow!("scripted provider exhausted responses"))
+ }
+ }
+
+ struct DelayTool {
+ name: String,
+ delay_ms: u64,
+ active: Arc,
+ max_active: Arc,
+ }
+
+ impl DelayTool {
+ fn new(
+ name: &str,
+ delay_ms: u64,
+ active: Arc,
+ max_active: Arc,
+ ) -> Self {
+ Self {
+ name: name.to_string(),
+ delay_ms,
+ active,
+ max_active,
+ }
+ }
+ }
+
+ #[async_trait]
+ impl Tool for DelayTool {
+ fn name(&self) -> &str {
+ &self.name
+ }
+
+ fn description(&self) -> &str {
+ "Delay tool for testing parallel tool execution"
+ }
+
+ fn parameters_schema(&self) -> serde_json::Value {
+ serde_json::json!({
+ "type": "object",
+ "properties": {
+ "value": { "type": "string" }
+ },
+ "required": ["value"]
+ })
+ }
+
+ async fn execute(
+ &self,
+ args: serde_json::Value,
+ ) -> anyhow::Result {
+ let now_active = self.active.fetch_add(1, Ordering::SeqCst) + 1;
+ self.max_active.fetch_max(now_active, Ordering::SeqCst);
+
+ tokio::time::sleep(Duration::from_millis(self.delay_ms)).await;
+
+ self.active.fetch_sub(1, Ordering::SeqCst);
+
+ let value = args
+ .get("value")
+ .and_then(serde_json::Value::as_str)
+ .unwrap_or_default()
+ .to_string();
+
+ Ok(crate::tools::ToolResult {
+ success: true,
+ output: format!("ok:{value}"),
+ error: None,
+ })
+ }
+ }
+
#[tokio::test]
async fn run_tool_call_loop_returns_structured_error_for_non_vision_provider() {
let calls = Arc::new(AtomicUsize::new(0));
@@ -2091,6 +2293,145 @@ mod tests {
assert_eq!(calls.load(Ordering::SeqCst), 1);
}
+ #[test]
+ fn should_execute_tools_in_parallel_returns_false_for_single_call() {
+ let calls = vec![ParsedToolCall {
+ name: "file_read".to_string(),
+ arguments: serde_json::json!({"path": "a.txt"}),
+ }];
+
+ assert!(!should_execute_tools_in_parallel(&calls, None));
+ }
+
+ #[test]
+ fn should_execute_tools_in_parallel_returns_false_when_approval_is_required() {
+ let calls = vec![
+ ParsedToolCall {
+ name: "shell".to_string(),
+ arguments: serde_json::json!({"command": "pwd"}),
+ },
+ ParsedToolCall {
+ name: "http_request".to_string(),
+ arguments: serde_json::json!({"url": "https://example.com"}),
+ },
+ ];
+ let approval_cfg = crate::config::AutonomyConfig::default();
+ let approval_mgr = ApprovalManager::from_config(&approval_cfg);
+
+ assert!(!should_execute_tools_in_parallel(&calls, Some(&approval_mgr)));
+ }
+
+ #[test]
+ fn should_execute_tools_in_parallel_returns_true_when_cli_has_no_interactive_approvals() {
+ let calls = vec![
+ ParsedToolCall {
+ name: "shell".to_string(),
+ arguments: serde_json::json!({"command": "pwd"}),
+ },
+ ParsedToolCall {
+ name: "http_request".to_string(),
+ arguments: serde_json::json!({"url": "https://example.com"}),
+ },
+ ];
+ let approval_cfg = crate::config::AutonomyConfig {
+ level: crate::security::AutonomyLevel::Full,
+ ..crate::config::AutonomyConfig::default()
+ };
+ let approval_mgr = ApprovalManager::from_config(&approval_cfg);
+
+ assert!(should_execute_tools_in_parallel(&calls, Some(&approval_mgr)));
+ }
+
+ #[tokio::test]
+ async fn run_tool_call_loop_executes_multiple_tools_in_parallel_with_ordered_results() {
+ let provider = ScriptedProvider::from_text_responses(vec![
+ r#"
+{"name":"delay_a","arguments":{"value":"A"}}
+
+
+{"name":"delay_b","arguments":{"value":"B"}}
+ "#,
+ "done",
+ ]);
+
+ let active = Arc::new(AtomicUsize::new(0));
+ let max_active = Arc::new(AtomicUsize::new(0));
+ let tools_registry: Vec> = vec![
+ Box::new(DelayTool::new(
+ "delay_a",
+ 200,
+ Arc::clone(&active),
+ Arc::clone(&max_active),
+ )),
+ Box::new(DelayTool::new(
+ "delay_b",
+ 200,
+ Arc::clone(&active),
+ Arc::clone(&max_active),
+ )),
+ ];
+
+ let approval_cfg = crate::config::AutonomyConfig {
+ level: crate::security::AutonomyLevel::Full,
+ ..crate::config::AutonomyConfig::default()
+ };
+ let approval_mgr = ApprovalManager::from_config(&approval_cfg);
+
+ let mut history = vec![
+ ChatMessage::system("test-system"),
+ ChatMessage::user("run tool calls"),
+ ];
+ let observer = NoopObserver;
+
+ let started = std::time::Instant::now();
+ let result = run_tool_call_loop(
+ &provider,
+ &mut history,
+ &tools_registry,
+ &observer,
+ "mock-provider",
+ "mock-model",
+ 0.0,
+ true,
+ Some(&approval_mgr),
+ "telegram",
+ &crate::config::MultimodalConfig::default(),
+ 4,
+ None,
+ None,
+ )
+ .await
+ .expect("parallel execution should complete");
+ let elapsed = started.elapsed();
+
+ assert_eq!(result, "done");
+ assert!(
+ elapsed < Duration::from_millis(350),
+ "parallel execution should be faster than sequential fallback; elapsed={elapsed:?}"
+ );
+ assert!(
+ max_active.load(Ordering::SeqCst) >= 2,
+ "both tools should overlap in execution"
+ );
+
+ let tool_results_message = history
+ .iter()
+ .find(|msg| msg.role == "user" && msg.content.starts_with("[Tool results]"))
+ .expect("tool results message should be present");
+ let idx_a = tool_results_message
+ .content
+ .find("name=\"delay_a\"")
+ .expect("delay_a result should be present");
+ let idx_b = tool_results_message
+ .content
+ .find("name=\"delay_b\"")
+ .expect("delay_b result should be present");
+ assert!(
+ idx_a < idx_b,
+ "tool results should preserve input order for tool call mapping"
+ );
+ }
+
#[test]
fn parse_tool_calls_extracts_single_call() {
let response = r#"Let me check that.
From e6961e0eed237fd4a8c39fdee8b6e026be16a8c9 Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 19:45:44 +0800
Subject: [PATCH 040/116] feat(delegate): add safe agentic sub-agent tool loop
---
docs/config-reference.md | 12 +
src/config/schema.rs | 16 ++
src/doctor/mod.rs | 6 +
src/tools/delegate.rs | 484 ++++++++++++++++++++++++++++++++++++++-
src/tools/mod.rs | 96 +++++---
5 files changed, 585 insertions(+), 29 deletions(-)
diff --git a/docs/config-reference.md b/docs/config-reference.md
index 64ae74ffe..7acfdb2d4 100644
--- a/docs/config-reference.md
+++ b/docs/config-reference.md
@@ -90,6 +90,15 @@ Delegate sub-agent configurations. Each key under `[agents]` defines a named sub
| `api_key` | unset | Optional API key override (stored encrypted when `secrets.encrypt = true`) |
| `temperature` | unset | Temperature override for the sub-agent |
| `max_depth` | `3` | Max recursion depth for nested delegation |
+| `agentic` | `false` | Enable multi-turn tool-call loop mode for the sub-agent |
+| `allowed_tools` | `[]` | Tool allowlist for agentic mode |
+| `max_iterations` | `10` | Max tool-call iterations for agentic mode |
+
+Notes:
+
+- `agentic = false` preserves existing single prompt→response delegate behavior.
+- `agentic = true` requires at least one matching entry in `allowed_tools`.
+- The `delegate` tool is excluded from sub-agent allowlists to prevent re-entrant delegation loops.
```toml
[agents.researcher]
@@ -97,6 +106,9 @@ provider = "openrouter"
model = "anthropic/claude-sonnet-4-6"
system_prompt = "You are a research assistant."
max_depth = 2
+agentic = true
+allowed_tools = ["web_search", "http_request", "file_read"]
+max_iterations = 8
[agents.coder]
provider = "ollama"
diff --git a/src/config/schema.rs b/src/config/schema.rs
index f47bb9d02..4347a8c32 100644
--- a/src/config/schema.rs
+++ b/src/config/schema.rs
@@ -208,12 +208,25 @@ pub struct DelegateAgentConfig {
/// Max recursion depth for nested delegation
#[serde(default = "default_max_depth")]
pub max_depth: u32,
+ /// Enable agentic sub-agent mode (multi-turn tool-call loop).
+ #[serde(default)]
+ pub agentic: bool,
+ /// Allowlist of tool names available to the sub-agent in agentic mode.
+ #[serde(default)]
+ pub allowed_tools: Vec,
+ /// Maximum tool-call iterations in agentic mode.
+ #[serde(default = "default_max_tool_iterations")]
+ pub max_iterations: usize,
}
fn default_max_depth() -> u32 {
3
}
+fn default_max_tool_iterations() -> usize {
+ 10
+}
+
// ── Hardware Config (wizard-driven) ─────────────────────────────
/// Hardware transport mode.
@@ -4062,6 +4075,9 @@ tool_dispatcher = "xml"
api_key: Some("agent-credential".into()),
temperature: None,
max_depth: 3,
+ agentic: false,
+ allowed_tools: Vec::new(),
+ max_iterations: 10,
},
);
diff --git a/src/doctor/mod.rs b/src/doctor/mod.rs
index f0335dbeb..bece584d0 100644
--- a/src/doctor/mod.rs
+++ b/src/doctor/mod.rs
@@ -1072,6 +1072,9 @@ mod tests {
api_key: None,
temperature: None,
max_depth: 3,
+ agentic: false,
+ allowed_tools: Vec::new(),
+ max_iterations: 10,
},
);
config.agents.insert(
@@ -1083,6 +1086,9 @@ mod tests {
api_key: None,
temperature: None,
max_depth: 3,
+ agentic: false,
+ allowed_tools: Vec::new(),
+ max_iterations: 10,
},
);
diff --git a/src/tools/delegate.rs b/src/tools/delegate.rs
index 9fa20eeba..94793e1f8 100644
--- a/src/tools/delegate.rs
+++ b/src/tools/delegate.rs
@@ -1,6 +1,8 @@
use super::traits::{Tool, ToolResult};
+use crate::agent::loop_::run_tool_call_loop;
use crate::config::DelegateAgentConfig;
-use crate::providers::{self, Provider};
+use crate::observability::traits::{Observer, ObserverEvent, ObserverMetric};
+use crate::providers::{self, ChatMessage, Provider};
use crate::security::policy::ToolOperation;
use crate::security::SecurityPolicy;
use async_trait::async_trait;
@@ -11,6 +13,8 @@ use std::time::Duration;
/// Default timeout for sub-agent provider calls.
const DELEGATE_TIMEOUT_SECS: u64 = 120;
+/// Default timeout for agentic sub-agent runs.
+const DELEGATE_AGENTIC_TIMEOUT_SECS: u64 = 300;
/// Tool that delegates a subtask to a named agent with a different
/// provider/model configuration. Enables multi-agent workflows where
@@ -25,6 +29,10 @@ pub struct DelegateTool {
provider_runtime_options: providers::ProviderRuntimeOptions,
/// Depth at which this tool instance lives in the delegation chain.
depth: u32,
+ /// Parent tool registry for agentic sub-agents.
+ parent_tools: Arc>>,
+ /// Inherited multimodal handling config for sub-agent loops.
+ multimodal_config: crate::config::MultimodalConfig,
}
impl DelegateTool {
@@ -53,6 +61,8 @@ impl DelegateTool {
fallback_credential,
provider_runtime_options,
depth: 0,
+ parent_tools: Arc::new(Vec::new()),
+ multimodal_config: crate::config::MultimodalConfig::default(),
}
}
@@ -87,8 +97,22 @@ impl DelegateTool {
fallback_credential,
provider_runtime_options,
depth,
+ parent_tools: Arc::new(Vec::new()),
+ multimodal_config: crate::config::MultimodalConfig::default(),
}
}
+
+ /// Attach parent tools used to build sub-agent allowlist registries.
+ pub fn with_parent_tools(mut self, parent_tools: Arc>>) -> Self {
+ self.parent_tools = parent_tools;
+ self
+ }
+
+ /// Attach multimodal configuration for sub-agent tool loops.
+ pub fn with_multimodal_config(mut self, config: crate::config::MultimodalConfig) -> Self {
+ self.multimodal_config = config;
+ self
+ }
}
#[async_trait]
@@ -100,7 +124,7 @@ impl Tool for DelegateTool {
fn description(&self) -> &str {
"Delegate a subtask to a specialized agent. Use when: a task benefits from a different model \
(e.g. fast summarization, deep reasoning, code generation). The sub-agent runs a single \
- prompt and returns its response."
+ prompt by default; with agentic=true it can iterate with a filtered tool-call loop."
}
fn parameters_schema(&self) -> serde_json::Value {
@@ -251,6 +275,19 @@ impl Tool for DelegateTool {
let temperature = agent_config.temperature.unwrap_or(0.7);
+ // Agentic mode: run full tool-call loop with allowlisted tools.
+ if agent_config.agentic {
+ return self
+ .execute_agentic(
+ agent_name,
+ agent_config,
+ &*provider,
+ &full_prompt,
+ temperature,
+ )
+ .await;
+ }
+
// Wrap the provider call in a timeout to prevent indefinite blocking
let result = tokio::time::timeout(
Duration::from_secs(DELEGATE_TIMEOUT_SECS),
@@ -302,10 +339,165 @@ impl Tool for DelegateTool {
}
}
+impl DelegateTool {
+ async fn execute_agentic(
+ &self,
+ agent_name: &str,
+ agent_config: &DelegateAgentConfig,
+ provider: &dyn Provider,
+ full_prompt: &str,
+ temperature: f64,
+ ) -> anyhow::Result {
+ if agent_config.allowed_tools.is_empty() {
+ return Ok(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some(format!(
+ "Agent '{agent_name}' has agentic=true but allowed_tools is empty"
+ )),
+ });
+ }
+
+ let allowed = agent_config
+ .allowed_tools
+ .iter()
+ .map(|name| name.trim())
+ .filter(|name| !name.is_empty())
+ .collect::>();
+
+ let sub_tools: Vec> = self
+ .parent_tools
+ .iter()
+ .filter(|tool| allowed.contains(tool.name()))
+ .filter(|tool| tool.name() != "delegate")
+ .map(|tool| Box::new(ToolArcRef::new(tool.clone())) as Box)
+ .collect();
+
+ if sub_tools.is_empty() {
+ return Ok(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some(format!(
+ "Agent '{agent_name}' has no executable tools after filtering allowlist ({})",
+ agent_config.allowed_tools.join(", ")
+ )),
+ });
+ }
+
+ let mut history = Vec::new();
+ if let Some(system_prompt) = agent_config.system_prompt.as_ref() {
+ history.push(ChatMessage::system(system_prompt.clone()));
+ }
+ history.push(ChatMessage::user(full_prompt.to_string()));
+
+ let noop_observer = NoopObserver;
+
+ let result = tokio::time::timeout(
+ Duration::from_secs(DELEGATE_AGENTIC_TIMEOUT_SECS),
+ run_tool_call_loop(
+ provider,
+ &mut history,
+ &sub_tools,
+ &noop_observer,
+ &agent_config.provider,
+ &agent_config.model,
+ temperature,
+ true,
+ None,
+ "delegate",
+ &self.multimodal_config,
+ agent_config.max_iterations,
+ None,
+ None,
+ ),
+ )
+ .await;
+
+ match result {
+ Ok(Ok(response)) => {
+ let rendered = if response.trim().is_empty() {
+ "[Empty response]".to_string()
+ } else {
+ response
+ };
+
+ Ok(ToolResult {
+ success: true,
+ output: format!(
+ "[Agent '{agent_name}' ({provider}/{model}, agentic)]\n{rendered}",
+ provider = agent_config.provider,
+ model = agent_config.model
+ ),
+ error: None,
+ })
+ }
+ Ok(Err(e)) => Ok(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some(format!("Agent '{agent_name}' failed: {e}")),
+ }),
+ Err(_) => Ok(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some(format!(
+ "Agent '{agent_name}' timed out after {DELEGATE_AGENTIC_TIMEOUT_SECS}s"
+ )),
+ }),
+ }
+ }
+}
+
+struct ToolArcRef {
+ inner: Arc,
+}
+
+impl ToolArcRef {
+ fn new(inner: Arc) -> Self {
+ Self { inner }
+ }
+}
+
+#[async_trait]
+impl Tool for ToolArcRef {
+ fn name(&self) -> &str {
+ self.inner.name()
+ }
+
+ fn description(&self) -> &str {
+ self.inner.description()
+ }
+
+ fn parameters_schema(&self) -> serde_json::Value {
+ self.inner.parameters_schema()
+ }
+
+ async fn execute(&self, args: serde_json::Value) -> anyhow::Result {
+ self.inner.execute(args).await
+ }
+}
+
+struct NoopObserver;
+
+impl Observer for NoopObserver {
+ fn record_event(&self, _event: &ObserverEvent) {}
+
+ fn record_metric(&self, _metric: &ObserverMetric) {}
+
+ fn name(&self) -> &str {
+ "noop"
+ }
+
+ fn as_any(&self) -> &dyn std::any::Any {
+ self
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
+ use crate::providers::{ChatRequest, ChatResponse, ToolCall};
use crate::security::{AutonomyLevel, SecurityPolicy};
+ use anyhow::anyhow;
fn test_security() -> Arc {
Arc::new(SecurityPolicy::default())
@@ -322,6 +514,9 @@ mod tests {
api_key: None,
temperature: Some(0.3),
max_depth: 3,
+ agentic: false,
+ allowed_tools: Vec::new(),
+ max_iterations: 10,
},
);
agents.insert(
@@ -333,11 +528,159 @@ mod tests {
api_key: Some("delegate-test-credential".to_string()),
temperature: None,
max_depth: 2,
+ agentic: false,
+ allowed_tools: Vec::new(),
+ max_iterations: 10,
},
);
agents
}
+ #[derive(Default)]
+ struct EchoTool;
+
+ #[async_trait]
+ impl Tool for EchoTool {
+ fn name(&self) -> &str {
+ "echo_tool"
+ }
+
+ fn description(&self) -> &str {
+ "Echoes the `value` argument."
+ }
+
+ fn parameters_schema(&self) -> serde_json::Value {
+ serde_json::json!({
+ "type": "object",
+ "properties": {
+ "value": {"type": "string"}
+ },
+ "required": ["value"]
+ })
+ }
+
+ async fn execute(&self, args: serde_json::Value) -> anyhow::Result {
+ let value = args
+ .get("value")
+ .and_then(serde_json::Value::as_str)
+ .unwrap_or_default()
+ .to_string();
+ Ok(ToolResult {
+ success: true,
+ output: format!("echo:{value}"),
+ error: None,
+ })
+ }
+ }
+
+ struct OneToolThenFinalProvider;
+
+ #[async_trait]
+ impl Provider for OneToolThenFinalProvider {
+ async fn chat_with_system(
+ &self,
+ _system_prompt: Option<&str>,
+ _message: &str,
+ _model: &str,
+ _temperature: f64,
+ ) -> anyhow::Result {
+ Ok("unused".to_string())
+ }
+
+ async fn chat(
+ &self,
+ request: ChatRequest<'_>,
+ _model: &str,
+ _temperature: f64,
+ ) -> anyhow::Result {
+ let has_tool_message = request.messages.iter().any(|m| m.role == "tool");
+ if has_tool_message {
+ Ok(ChatResponse {
+ text: Some("done".to_string()),
+ tool_calls: Vec::new(),
+ })
+ } else {
+ Ok(ChatResponse {
+ text: None,
+ tool_calls: vec![ToolCall {
+ id: "call_1".to_string(),
+ name: "echo_tool".to_string(),
+ arguments: "{\"value\":\"ping\"}".to_string(),
+ }],
+ })
+ }
+ }
+ }
+
+ struct InfiniteToolCallProvider;
+
+ #[async_trait]
+ impl Provider for InfiniteToolCallProvider {
+ async fn chat_with_system(
+ &self,
+ _system_prompt: Option<&str>,
+ _message: &str,
+ _model: &str,
+ _temperature: f64,
+ ) -> anyhow::Result {
+ Ok("unused".to_string())
+ }
+
+ async fn chat(
+ &self,
+ _request: ChatRequest<'_>,
+ _model: &str,
+ _temperature: f64,
+ ) -> anyhow::Result {
+ Ok(ChatResponse {
+ text: None,
+ tool_calls: vec![ToolCall {
+ id: "loop".to_string(),
+ name: "echo_tool".to_string(),
+ arguments: "{\"value\":\"x\"}".to_string(),
+ }],
+ })
+ }
+ }
+
+ struct FailingProvider;
+
+ #[async_trait]
+ impl Provider for FailingProvider {
+ async fn chat_with_system(
+ &self,
+ _system_prompt: Option<&str>,
+ _message: &str,
+ _model: &str,
+ _temperature: f64,
+ ) -> anyhow::Result {
+ Ok("unused".to_string())
+ }
+
+ async fn chat(
+ &self,
+ _request: ChatRequest<'_>,
+ _model: &str,
+ _temperature: f64,
+ ) -> anyhow::Result {
+ Err(anyhow!("provider boom"))
+ }
+ }
+
+ fn agentic_config(allowed_tools: Vec, max_iterations: usize) -> DelegateAgentConfig {
+ DelegateAgentConfig {
+ provider: "openrouter".to_string(),
+ model: "model-test".to_string(),
+ system_prompt: Some("You are agentic.".to_string()),
+ api_key: Some("delegate-test-credential".to_string()),
+ temperature: Some(0.2),
+ max_depth: 3,
+ agentic: true,
+ allowed_tools,
+ max_iterations,
+ }
+ }
+
#[test]
fn name_and_schema() {
let tool = DelegateTool::new(sample_agents(), None, test_security());
@@ -440,6 +783,9 @@ mod tests {
api_key: None,
temperature: None,
max_depth: 3,
+ agentic: false,
+ allowed_tools: Vec::new(),
+ max_iterations: 10,
},
);
let tool = DelegateTool::new(agents, None, test_security());
@@ -543,6 +889,9 @@ mod tests {
api_key: None,
temperature: None,
max_depth: 3,
+ agentic: false,
+ allowed_tools: Vec::new(),
+ max_iterations: 10,
},
);
let tool = DelegateTool::new(agents, None, test_security());
@@ -575,6 +924,9 @@ mod tests {
api_key: None,
temperature: None,
max_depth: 3,
+ agentic: false,
+ allowed_tools: Vec::new(),
+ max_iterations: 10,
},
);
let tool = DelegateTool::new(agents, None, test_security());
@@ -611,4 +963,132 @@ mod tests {
assert!(!result.success);
assert!(result.error.unwrap().contains("none configured"));
}
+
+ #[tokio::test]
+ async fn agentic_mode_rejects_empty_allowed_tools() {
+ let mut agents = HashMap::new();
+ agents.insert("agentic".to_string(), agentic_config(Vec::new(), 10));
+
+ let tool = DelegateTool::new(agents, None, test_security());
+ let result = tool
+ .execute(json!({"agent": "agentic", "prompt": "test"}))
+ .await
+ .unwrap();
+
+ assert!(!result.success);
+ assert!(result
+ .error
+ .as_deref()
+ .unwrap_or("")
+ .contains("allowed_tools is empty"));
+ }
+
+ #[tokio::test]
+ async fn agentic_mode_rejects_unmatched_allowed_tools() {
+ let mut agents = HashMap::new();
+ agents.insert(
+ "agentic".to_string(),
+ agentic_config(vec!["missing_tool".to_string()], 10),
+ );
+
+ let tool = DelegateTool::new(agents, None, test_security())
+ .with_parent_tools(Arc::new(vec![Arc::new(EchoTool)]));
+ let result = tool
+ .execute(json!({"agent": "agentic", "prompt": "test"}))
+ .await
+ .unwrap();
+
+ assert!(!result.success);
+ assert!(result
+ .error
+ .as_deref()
+ .unwrap_or("")
+ .contains("no executable tools"));
+ }
+
+ #[tokio::test]
+ async fn execute_agentic_runs_tool_call_loop_with_filtered_tools() {
+ let config = agentic_config(vec!["echo_tool".to_string()], 10);
+ let tool = DelegateTool::new(HashMap::new(), None, test_security()).with_parent_tools(
+ Arc::new(vec![
+ Arc::new(EchoTool),
+ Arc::new(DelegateTool::new(HashMap::new(), None, test_security())),
+ ]),
+ );
+
+ let provider = OneToolThenFinalProvider;
+ let result = tool
+ .execute_agentic("agentic", &config, &provider, "run", 0.2)
+ .await
+ .unwrap();
+
+ assert!(result.success);
+ assert!(result.output.contains("(openrouter/model-test, agentic)"));
+ assert!(result.output.contains("done"));
+ }
+
+ #[tokio::test]
+ async fn execute_agentic_excludes_delegate_even_if_allowlisted() {
+ let config = agentic_config(vec!["delegate".to_string()], 10);
+ let tool = DelegateTool::new(HashMap::new(), None, test_security()).with_parent_tools(
+ Arc::new(vec![Arc::new(DelegateTool::new(
+ HashMap::new(),
+ None,
+ test_security(),
+ ))]),
+ );
+
+ let provider = OneToolThenFinalProvider;
+ let result = tool
+ .execute_agentic("agentic", &config, &provider, "run", 0.2)
+ .await
+ .unwrap();
+
+ assert!(!result.success);
+ assert!(result
+ .error
+ .as_deref()
+ .unwrap_or("")
+ .contains("no executable tools"));
+ }
+
+ #[tokio::test]
+ async fn execute_agentic_respects_max_iterations() {
+ let config = agentic_config(vec!["echo_tool".to_string()], 2);
+ let tool = DelegateTool::new(HashMap::new(), None, test_security())
+ .with_parent_tools(Arc::new(vec![Arc::new(EchoTool)]));
+
+ let provider = InfiniteToolCallProvider;
+ let result = tool
+ .execute_agentic("agentic", &config, &provider, "run", 0.2)
+ .await
+ .unwrap();
+
+ assert!(!result.success);
+ assert!(result
+ .error
+ .as_deref()
+ .unwrap_or("")
+ .contains("maximum tool iterations (2)"));
+ }
+
+ #[tokio::test]
+ async fn execute_agentic_propagates_provider_errors() {
+ let config = agentic_config(vec!["echo_tool".to_string()], 10);
+ let tool = DelegateTool::new(HashMap::new(), None, test_security())
+ .with_parent_tools(Arc::new(vec![Arc::new(EchoTool)]));
+
+ let provider = FailingProvider;
+ let result = tool
+ .execute_agentic("agentic", &config, &provider, "run", 0.2)
+ .await
+ .unwrap();
+
+ assert!(!result.success);
+ assert!(result
+ .error
+ .as_deref()
+ .unwrap_or("")
+ .contains("provider boom"));
+ }
}
diff --git a/src/tools/mod.rs b/src/tools/mod.rs
index fa139490e..50fec0ca2 100644
--- a/src/tools/mod.rs
+++ b/src/tools/mod.rs
@@ -82,9 +82,44 @@ use crate::config::{Config, DelegateAgentConfig};
use crate::memory::Memory;
use crate::runtime::{NativeRuntime, RuntimeAdapter};
use crate::security::SecurityPolicy;
+use async_trait::async_trait;
use std::collections::HashMap;
use std::sync::Arc;
+#[derive(Clone)]
+struct ArcDelegatingTool {
+ inner: Arc,
+}
+
+impl ArcDelegatingTool {
+ fn boxed(inner: Arc) -> Box {
+ Box::new(Self { inner })
+ }
+}
+
+#[async_trait]
+impl Tool for ArcDelegatingTool {
+ fn name(&self) -> &str {
+ self.inner.name()
+ }
+
+ fn description(&self) -> &str {
+ self.inner.description()
+ }
+
+ fn parameters_schema(&self) -> serde_json::Value {
+ self.inner.parameters_schema()
+ }
+
+ async fn execute(&self, args: serde_json::Value) -> anyhow::Result {
+ self.inner.execute(args).await
+ }
+}
+
+fn boxed_registry_from_arcs(tools: Vec>) -> Vec> {
+ tools.into_iter().map(ArcDelegatingTool::boxed).collect()
+}
+
/// Create the default tool registry
pub fn default_tools(security: Arc) -> Vec> {
default_tools_with_runtime(security, Arc::new(NativeRuntime::new()))
@@ -149,26 +184,26 @@ pub fn all_tools_with_runtime(
fallback_api_key: Option<&str>,
root_config: &crate::config::Config,
) -> Vec> {
- let mut tools: Vec> = vec![
- Box::new(ShellTool::new(security.clone(), runtime)),
- Box::new(FileReadTool::new(security.clone())),
- Box::new(FileWriteTool::new(security.clone())),
- Box::new(CronAddTool::new(config.clone(), security.clone())),
- Box::new(CronListTool::new(config.clone())),
- Box::new(CronRemoveTool::new(config.clone())),
- Box::new(CronUpdateTool::new(config.clone(), security.clone())),
- Box::new(CronRunTool::new(config.clone())),
- Box::new(CronRunsTool::new(config.clone())),
- Box::new(MemoryStoreTool::new(memory.clone(), security.clone())),
- Box::new(MemoryRecallTool::new(memory.clone())),
- Box::new(MemoryForgetTool::new(memory, security.clone())),
- Box::new(ScheduleTool::new(security.clone(), root_config.clone())),
- Box::new(ProxyConfigTool::new(config.clone(), security.clone())),
- Box::new(GitOperationsTool::new(
+ let mut tool_arcs: Vec> = vec![
+ Arc::new(ShellTool::new(security.clone(), runtime)),
+ Arc::new(FileReadTool::new(security.clone())),
+ Arc::new(FileWriteTool::new(security.clone())),
+ Arc::new(CronAddTool::new(config.clone(), security.clone())),
+ Arc::new(CronListTool::new(config.clone())),
+ Arc::new(CronRemoveTool::new(config.clone())),
+ Arc::new(CronUpdateTool::new(config.clone(), security.clone())),
+ Arc::new(CronRunTool::new(config.clone())),
+ Arc::new(CronRunsTool::new(config.clone())),
+ Arc::new(MemoryStoreTool::new(memory.clone(), security.clone())),
+ Arc::new(MemoryRecallTool::new(memory.clone())),
+ Arc::new(MemoryForgetTool::new(memory, security.clone())),
+ Arc::new(ScheduleTool::new(security.clone(), root_config.clone())),
+ Arc::new(ProxyConfigTool::new(config.clone(), security.clone())),
+ Arc::new(GitOperationsTool::new(
security.clone(),
workspace_dir.to_path_buf(),
)),
- Box::new(PushoverTool::new(
+ Arc::new(PushoverTool::new(
security.clone(),
workspace_dir.to_path_buf(),
)),
@@ -176,12 +211,12 @@ pub fn all_tools_with_runtime(
if browser_config.enabled {
// Add legacy browser_open tool for simple URL opening
- tools.push(Box::new(BrowserOpenTool::new(
+ tool_arcs.push(Arc::new(BrowserOpenTool::new(
security.clone(),
browser_config.allowed_domains.clone(),
)));
// Add full browser automation tool (pluggable backend)
- tools.push(Box::new(BrowserTool::new_with_backend(
+ tool_arcs.push(Arc::new(BrowserTool::new_with_backend(
security.clone(),
browser_config.allowed_domains.clone(),
browser_config.session_name.clone(),
@@ -202,7 +237,7 @@ pub fn all_tools_with_runtime(
}
if http_config.enabled {
- tools.push(Box::new(HttpRequestTool::new(
+ tool_arcs.push(Arc::new(HttpRequestTool::new(
security.clone(),
http_config.allowed_domains.clone(),
http_config.max_response_size,
@@ -212,7 +247,7 @@ pub fn all_tools_with_runtime(
// Web search tool (enabled by default for GLM and other models)
if root_config.web_search.enabled {
- tools.push(Box::new(WebSearchTool::new(
+ tool_arcs.push(Arc::new(WebSearchTool::new(
root_config.web_search.provider.clone(),
root_config.web_search.brave_api_key.clone(),
root_config.web_search.max_results,
@@ -221,12 +256,12 @@ pub fn all_tools_with_runtime(
}
// Vision tools are always available
- tools.push(Box::new(ScreenshotTool::new(security.clone())));
- tools.push(Box::new(ImageInfoTool::new(security.clone())));
+ tool_arcs.push(Arc::new(ScreenshotTool::new(security.clone())));
+ tool_arcs.push(Arc::new(ImageInfoTool::new(security.clone())));
if let Some(key) = composio_key {
if !key.is_empty() {
- tools.push(Box::new(ComposioTool::new(
+ tool_arcs.push(Arc::new(ComposioTool::new(
key,
composio_entity_id,
security.clone(),
@@ -244,7 +279,8 @@ pub fn all_tools_with_runtime(
let trimmed_value = value.trim();
(!trimmed_value.is_empty()).then(|| trimmed_value.to_owned())
});
- tools.push(Box::new(DelegateTool::new_with_options(
+ let parent_tools = Arc::new(tool_arcs.clone());
+ let delegate_tool = DelegateTool::new_with_options(
delegate_agents,
delegate_fallback_credential,
security.clone(),
@@ -257,10 +293,13 @@ pub fn all_tools_with_runtime(
secrets_encrypt: root_config.secrets.encrypt,
reasoning_enabled: root_config.runtime.reasoning_enabled,
},
- )));
+ )
+ .with_parent_tools(parent_tools)
+ .with_multimodal_config(root_config.multimodal.clone());
+ tool_arcs.push(Arc::new(delegate_tool));
}
- tools
+ boxed_registry_from_arcs(tool_arcs)
}
#[cfg(test)]
@@ -482,6 +521,9 @@ mod tests {
api_key: None,
temperature: None,
max_depth: 3,
+ agentic: false,
+ allowed_tools: Vec::new(),
+ max_iterations: 10,
},
);
From 3e868902ab7c07c3923511fbd4f9561f4526a663 Mon Sep 17 00:00:00 2001
From: Will Sarg <12886992+willsarg@users.noreply.github.com>
Date: Fri, 20 Feb 2026 07:10:09 -0500
Subject: [PATCH 041/116] fix(ci): sync release publishing with GHCR and add
runbook (#1087)
---
.github/workflows/main-branch-flow.md | 26 ++--
.github/workflows/pub-release.yml | 196 +++++++++++++++++++++++++-
docs/SUMMARY.md | 1 +
docs/ci-map.md | 7 +-
docs/operations/README.md | 1 +
docs/release-process.md | 112 +++++++++++++++
scripts/release/cut_release_tag.sh | 83 +++++++++++
7 files changed, 409 insertions(+), 17 deletions(-)
create mode 100644 docs/release-process.md
create mode 100755 scripts/release/cut_release_tag.sh
diff --git a/.github/workflows/main-branch-flow.md b/.github/workflows/main-branch-flow.md
index 6490e9708..8ccbdd8ed 100644
--- a/.github/workflows/main-branch-flow.md
+++ b/.github/workflows/main-branch-flow.md
@@ -1,11 +1,12 @@
# Main Branch Delivery Flows
-This document explains what runs when code is proposed to `main`, merged into `main`, and released via tags.
+This document explains what runs when code is proposed to `main`, merged into `main`, and released.
Use this with:
- [`docs/ci-map.md`](../../docs/ci-map.md)
- [`docs/pr-workflow.md`](../../docs/pr-workflow.md)
+- [`docs/release-process.md`](../../docs/release-process.md)
## Event Summary
@@ -14,8 +15,8 @@ Use this with:
| PR activity (`pull_request_target`) | `pr-intake-checks.yml`, `pr-labeler.yml`, `pr-auto-response.yml` |
| PR activity (`pull_request`) | `ci-run.yml`, `sec-audit.yml`, plus path-scoped `pub-docker-img.yml`, `workflow-sanity.yml`, `pr-label-policy-check.yml` |
| Push to `main` | `ci-run.yml`, `sec-audit.yml`, plus path-scoped workflows |
-| Tag push (`v*`) | `pub-release.yml`, `pub-docker-img.yml` publish job |
-| Scheduled/manual | `sec-codeql.yml`, `feature-matrix.yml`, `test-fuzz.yml`, `pr-check-stale.yml`, `pr-check-status.yml`, `sync-contributors.yml`, `test-benchmarks.yml`, `test-e2e.yml` |
+| Tag push (`v*`) | `pub-release.yml` publish mode, `pub-docker-img.yml` publish job |
+| Scheduled/manual | `pub-release.yml` verification mode, `sec-codeql.yml`, `feature-matrix.yml`, `test-fuzz.yml`, `pr-check-stale.yml`, `pr-check-status.yml`, `sync-contributors.yml`, `test-benchmarks.yml`, `test-e2e.yml` |
## Runtime and Docker Matrix
@@ -32,7 +33,7 @@ Observed averages below are from recent completed runs (sampled from GitHub Acti
| `pr-label-policy-check.yml` | Label policy/automation changes | 14.7s | No | No | No |
| `pub-docker-img.yml` (`pull_request`) | Docker build-input PR changes | 240.4s | Yes | Yes | No |
| `pub-docker-img.yml` (`push`/`workflow_dispatch`) | `main` push (build-input paths), tag push `v*`, or manual dispatch | 139.9s | Yes | No | Yes |
-| `pub-release.yml` | Tag push `v*` | N/A in recent sample | No | No | No |
+| `pub-release.yml` | Tag push `v*` (publish) + manual/scheduled verification (no publish) | N/A in recent sample | No | No | No |
Notes:
@@ -153,12 +154,15 @@ Important: Docker publish now runs on qualifying `main` pushes; no release tag i
Workflow: `.github/workflows/pub-release.yml`
-1. Triggered only on tag push `v*`.
-2. Builds release artifacts across matrix targets.
-3. Generates SBOM (`CycloneDX` + `SPDX`).
-4. Generates `SHA256SUMS`.
-5. Signs artifacts with keyless cosign.
-6. Publishes GitHub Release with artifacts.
+1. Trigger modes:
+ - Tag push `v*` -> publish mode.
+ - Manual dispatch -> verification-only or publish mode (input-driven).
+ - Weekly schedule -> verification-only mode.
+2. `prepare` resolves release context (`release_ref`, `release_tag`, publish/draft mode) and validates manual publish inputs.
+3. `build-release` builds matrix artifacts across Linux/macOS/Windows targets.
+4. `verify-artifacts` enforces presence of all expected archives before any publish attempt.
+5. In publish mode, workflow generates SBOM (`CycloneDX` + `SPDX`), `SHA256SUMS`, keyless cosign signatures, and verifies GHCR release-tag availability.
+6. In publish mode, workflow creates/updates the GitHub Release for the resolved tag and commit-ish.
## Merge/Policy Notes
@@ -199,8 +203,10 @@ flowchart TD
A --> C["sec-audit.yml"]
A --> D["path-scoped workflows (if matched)"]
T["Tag push v*"] --> R["pub-release.yml"]
+ W["Manual/Scheduled release verify"] --> R
T --> P["pub-docker-img.yml publish job"]
R --> R1["Artifacts + SBOM + checksums + signatures + GitHub Release"]
+ W --> R2["Verification build only (no GitHub Release publish)"]
P --> P1["Push ghcr image tags (version + sha)"]
```
diff --git a/.github/workflows/pub-release.yml b/.github/workflows/pub-release.yml
index 14677b112..05812d4bb 100644
--- a/.github/workflows/pub-release.yml
+++ b/.github/workflows/pub-release.yml
@@ -3,21 +3,129 @@ name: Pub Release
on:
push:
tags: ["v*"]
+ workflow_dispatch:
+ inputs:
+ release_ref:
+ description: "Git ref (branch, tag, or SHA) to build"
+ required: false
+ default: "main"
+ type: string
+ publish_release:
+ description: "Publish a GitHub release (false = verification build only)"
+ required: false
+ default: false
+ type: boolean
+ release_tag:
+ description: "Existing release tag (required when publish_release=true), e.g. v0.1.1"
+ required: false
+ default: ""
+ type: string
+ draft:
+ description: "Create release as draft (manual publish only)"
+ required: false
+ default: true
+ type: boolean
+ schedule:
+ # Weekly release-readiness verification on default branch (no publish)
+ - cron: "17 8 * * 1"
concurrency:
- group: release
+ group: release-${{ github.ref || github.run_id }}
cancel-in-progress: false
permissions:
contents: write
+ packages: read
id-token: write # Required for cosign keyless signing via OIDC
env:
CARGO_TERM_COLOR: always
jobs:
+ prepare:
+ name: Prepare Release Context
+ runs-on: blacksmith-2vcpu-ubuntu-2404
+ outputs:
+ release_ref: ${{ steps.vars.outputs.release_ref }}
+ release_tag: ${{ steps.vars.outputs.release_tag }}
+ publish_release: ${{ steps.vars.outputs.publish_release }}
+ draft_release: ${{ steps.vars.outputs.draft_release }}
+ steps:
+ - name: Resolve release inputs
+ id: vars
+ shell: bash
+ run: |
+ set -euo pipefail
+
+ event_name="${GITHUB_EVENT_NAME}"
+ publish_release="false"
+ draft_release="false"
+ semver_pattern='^v[0-9]+\.[0-9]+\.[0-9]+([.-][0-9A-Za-z.-]+)?$'
+
+ if [[ "$event_name" == "push" ]]; then
+ release_ref="${GITHUB_REF_NAME}"
+ release_tag="${GITHUB_REF_NAME}"
+ publish_release="true"
+ elif [[ "$event_name" == "workflow_dispatch" ]]; then
+ release_ref="${{ inputs.release_ref }}"
+ publish_release="${{ inputs.publish_release }}"
+ draft_release="${{ inputs.draft }}"
+
+ if [[ "$publish_release" == "true" ]]; then
+ release_tag="${{ inputs.release_tag }}"
+ if [[ -z "$release_tag" ]]; then
+ echo "::error::release_tag is required when publish_release=true"
+ exit 1
+ fi
+ release_ref="$release_tag"
+ else
+ release_tag="verify-${GITHUB_SHA::12}"
+ fi
+ else
+ # schedule
+ release_ref="main"
+ release_tag="verify-${GITHUB_SHA::12}"
+ fi
+
+ if [[ "$publish_release" == "true" ]]; then
+ if [[ ! "$release_tag" =~ $semver_pattern ]]; then
+ echo "::error::release_tag must match semver-like format (vX.Y.Z[-suffix])"
+ exit 1
+ fi
+ if ! git ls-remote --exit-code --tags "https://github.com/${GITHUB_REPOSITORY}.git" "refs/tags/${release_tag}" >/dev/null; then
+ echo "::error::Tag ${release_tag} does not exist on origin. Push the tag first, then rerun manual publish."
+ exit 1
+ fi
+
+ # Guardrail: release tags must resolve to commits already reachable from main.
+ tmp_repo="$(mktemp -d)"
+ trap 'rm -rf "$tmp_repo"' EXIT
+ git -C "$tmp_repo" init -q
+ git -C "$tmp_repo" remote add origin "https://github.com/${GITHUB_REPOSITORY}.git"
+ git -C "$tmp_repo" fetch --quiet --filter=blob:none origin main "refs/tags/${release_tag}:refs/tags/${release_tag}"
+ if ! git -C "$tmp_repo" merge-base --is-ancestor "refs/tags/${release_tag}" "origin/main"; then
+ echo "::error::Tag ${release_tag} is not reachable from origin/main. Release tags must be cut from main."
+ exit 1
+ fi
+ fi
+
+ echo "release_ref=${release_ref}" >> "$GITHUB_OUTPUT"
+ echo "release_tag=${release_tag}" >> "$GITHUB_OUTPUT"
+ echo "publish_release=${publish_release}" >> "$GITHUB_OUTPUT"
+ echo "draft_release=${draft_release}" >> "$GITHUB_OUTPUT"
+
+ {
+ echo "### Release Context"
+ echo "- event: ${event_name}"
+ echo "- release_ref: ${release_ref}"
+ echo "- release_tag: ${release_tag}"
+ echo "- publish_release: ${publish_release}"
+ echo "- draft_release: ${draft_release}"
+ } >> "$GITHUB_STEP_SUMMARY"
+
build-release:
name: Build ${{ matrix.target }}
+ needs: [prepare]
runs-on: ${{ matrix.os }}
timeout-minutes: 40
strategy:
@@ -69,9 +177,12 @@ jobs:
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
+ with:
+ ref: ${{ needs.prepare.outputs.release_ref }}
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
+ toolchain: 1.92.0
targets: ${{ matrix.target }}
- uses: useblacksmith/rust-cache@f53e7f127245d2a269b3d90879ccf259876842d5 # v3
@@ -136,13 +247,53 @@ jobs:
path: zeroclaw-${{ matrix.target }}.${{ matrix.archive_ext }}
retention-days: 7
+ verify-artifacts:
+ name: Verify Artifact Set
+ needs: [prepare, build-release]
+ runs-on: blacksmith-2vcpu-ubuntu-2404
+ steps:
+ - name: Download all artifacts
+ uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
+ with:
+ path: artifacts
+
+ - name: Validate expected archives
+ shell: bash
+ run: |
+ set -euo pipefail
+ expected=(
+ "zeroclaw-x86_64-unknown-linux-gnu.tar.gz"
+ "zeroclaw-aarch64-unknown-linux-gnu.tar.gz"
+ "zeroclaw-armv7-unknown-linux-gnueabihf.tar.gz"
+ "zeroclaw-x86_64-apple-darwin.tar.gz"
+ "zeroclaw-aarch64-apple-darwin.tar.gz"
+ "zeroclaw-x86_64-pc-windows-msvc.zip"
+ )
+
+ missing=0
+ for file in "${expected[@]}"; do
+ if ! find artifacts -type f -name "$file" -print -quit | grep -q .; then
+ echo "::error::Missing release archive: $file"
+ missing=1
+ fi
+ done
+
+ if [ "$missing" -ne 0 ]; then
+ exit 1
+ fi
+
+ echo "All expected release archives are present."
+
publish:
name: Publish Release
- needs: build-release
+ if: needs.prepare.outputs.publish_release == 'true'
+ needs: [prepare, verify-artifacts]
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 15
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
+ with:
+ ref: ${{ needs.prepare.outputs.release_ref }}
- name: Download all artifacts
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
@@ -173,19 +324,56 @@ jobs:
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Sign artifacts with cosign (keyless)
+ shell: bash
run: |
- for file in artifacts/**/*; do
- [ -f "$file" ] || continue
+ set -euo pipefail
+ while IFS= read -r -d '' file; do
cosign sign-blob --yes \
--oidc-issuer=https://token.actions.githubusercontent.com \
--output-signature="${file}.sig" \
--output-certificate="${file}.pem" \
"$file"
+ done < <(find artifacts -type f ! -name '*.sig' ! -name '*.pem' -print0)
+
+ - name: Verify GHCR release tag availability
+ shell: bash
+ env:
+ RELEASE_TAG: ${{ needs.prepare.outputs.release_tag }}
+ run: |
+ set -euo pipefail
+ repo="${GITHUB_REPOSITORY,,}"
+ manifest_url="https://ghcr.io/v2/${repo}/manifests/${RELEASE_TAG}"
+ accept_header="application/vnd.oci.image.index.v1+json, application/vnd.docker.distribution.manifest.v2+json"
+ max_attempts=18
+ sleep_seconds=20
+
+ for attempt in $(seq 1 "$max_attempts"); do
+ code="$(curl -sS -o /tmp/ghcr-release-manifest.json -w "%{http_code}" \
+ -u "${GITHUB_ACTOR}:${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: ${accept_header}" \
+ "${manifest_url}" || true)"
+
+ if [ "$code" = "200" ]; then
+ echo "GHCR release tag is available: ${repo}:${RELEASE_TAG}"
+ exit 0
+ fi
+
+ if [ "$attempt" -lt "$max_attempts" ]; then
+ echo "Waiting for GHCR tag ${repo}:${RELEASE_TAG} (attempt ${attempt}/${max_attempts}, HTTP ${code})..."
+ sleep "$sleep_seconds"
+ fi
done
+ echo "::error::GHCR tag ${repo}:${RELEASE_TAG} was not available before release publish timeout."
+ cat /tmp/ghcr-release-manifest.json || true
+ exit 1
+
- name: Create GitHub Release
uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # v2
with:
+ tag_name: ${{ needs.prepare.outputs.release_tag }}
+ target_commitish: ${{ needs.prepare.outputs.release_ref }}
+ draft: ${{ needs.prepare.outputs.draft_release == 'true' }}
generate_release_notes: true
files: |
artifacts/**/*
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 10ce8aeac..6bd2e9270 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -37,6 +37,7 @@ Last refreshed: **February 18, 2026**.
- [operations/README.md](operations/README.md)
- [operations-runbook.md](operations-runbook.md)
+- [release-process.md](release-process.md)
- [troubleshooting.md](troubleshooting.md)
- [network-deployment.md](network-deployment.md)
- [mattermost-setup.md](mattermost-setup.md)
diff --git a/docs/ci-map.md b/docs/ci-map.md
index eb7b6041b..53a890e61 100644
--- a/docs/ci-map.md
+++ b/docs/ci-map.md
@@ -30,7 +30,7 @@ Merge-blocking checks should stay small and deterministic. Optional checks are u
- `.github/workflows/sec-codeql.yml` (`CodeQL Analysis`)
- Purpose: scheduled/manual static analysis for security findings
- `.github/workflows/pub-release.yml` (`Release`)
- - Purpose: build tagged release artifacts and publish GitHub releases
+ - Purpose: build release artifacts in verification mode (manual/scheduled) and publish GitHub releases on tag push or manual publish mode
- `.github/workflows/pr-label-policy-check.yml` (`Label Policy Sanity`)
- Purpose: validate shared contributor-tier policy in `.github/label-policy.json` and ensure label workflows consume that policy
- `.github/workflows/test-rust-build.yml` (`Rust Reusable Job`)
@@ -67,7 +67,7 @@ Merge-blocking checks should stay small and deterministic. Optional checks are u
- `CI`: push to `main`, PRs to `main`
- `Docker`: push to `main` when Docker build inputs change, tag push (`v*`), matching PRs, manual dispatch
-- `Release`: tag push (`v*`)
+- `Release`: tag push (`v*`), weekly schedule (verification-only), manual dispatch (verification or publish)
- `Security Audit`: push to `main`, PRs to `main`, weekly schedule
- `Workflow Sanity`: PR/push when `.github/workflows/**`, `.github/*.yml`, or `.github/*.yaml` change
- `PR Intake Checks`: `pull_request_target` on opened/reopened/synchronize/edited/ready_for_review
@@ -82,7 +82,7 @@ Merge-blocking checks should stay small and deterministic. Optional checks are u
1. `CI Required Gate` failing: start with `.github/workflows/ci-run.yml`.
2. Docker failures on PRs: inspect `.github/workflows/pub-docker-img.yml` `pr-smoke` job.
-3. Release failures on tags: inspect `.github/workflows/pub-release.yml`.
+3. Release failures (tag/manual/scheduled): inspect `.github/workflows/pub-release.yml` and the `prepare` job outputs.
4. Security failures: inspect `.github/workflows/sec-audit.yml` and `deny.toml`.
5. Workflow syntax/lint failures: inspect `.github/workflows/workflow-sanity.yml`.
6. PR intake failures: inspect `.github/workflows/pr-intake-checks.yml` sticky comment and run logs.
@@ -93,6 +93,7 @@ Merge-blocking checks should stay small and deterministic. Optional checks are u
## Maintenance Rules
- Keep merge-blocking checks deterministic and reproducible (`--locked` where applicable).
+- Follow `docs/release-process.md` for verify-before-publish release cadence and tag discipline.
- Keep merge-blocking rust quality policy aligned across `.github/workflows/ci-run.yml`, `dev/ci.sh`, and `.githooks/pre-push` (`./scripts/ci/rust_quality_gate.sh` + `./scripts/ci/rust_strict_delta_gate.sh`).
- Use `./scripts/ci/rust_strict_delta_gate.sh` (or `./dev/ci.sh lint-delta`) as the incremental strict merge gate for changed Rust lines.
- Run full strict lint audits regularly via `./scripts/ci/rust_quality_gate.sh --strict` (for example through `./dev/ci.sh lint-strict`) and track cleanup in focused PRs.
diff --git a/docs/operations/README.md b/docs/operations/README.md
index b208d20c1..876c637ac 100644
--- a/docs/operations/README.md
+++ b/docs/operations/README.md
@@ -5,6 +5,7 @@ For operators running ZeroClaw in persistent or production-like environments.
## Core Operations
- Day-2 runbook: [../operations-runbook.md](../operations-runbook.md)
+- Release runbook: [../release-process.md](../release-process.md)
- Troubleshooting matrix: [../troubleshooting.md](../troubleshooting.md)
- Safe network/gateway deployment: [../network-deployment.md](../network-deployment.md)
- Mattermost setup (channel-specific): [../mattermost-setup.md](../mattermost-setup.md)
diff --git a/docs/release-process.md b/docs/release-process.md
new file mode 100644
index 000000000..c1e18d3f2
--- /dev/null
+++ b/docs/release-process.md
@@ -0,0 +1,112 @@
+# ZeroClaw Release Process
+
+This runbook defines the maintainers' standard release flow.
+
+Last verified: **February 20, 2026**.
+
+## Release Goals
+
+- Keep releases predictable and repeatable.
+- Publish only from code already in `main`.
+- Verify multi-target artifacts before publish.
+- Keep release cadence regular even with high PR volume.
+
+## Standard Cadence
+
+- Patch/minor releases: weekly or bi-weekly.
+- Emergency security fixes: out-of-band.
+- Never wait for very large commit batches to accumulate.
+
+## Workflow Contract
+
+Release automation lives in:
+
+- `.github/workflows/pub-release.yml`
+
+Modes:
+
+- Tag push `v*`: publish mode.
+- Manual dispatch: verification-only or publish mode.
+- Weekly schedule: verification-only mode.
+
+Publish-mode guardrails:
+
+- Tag must match semver-like format `vX.Y.Z[-suffix]`.
+- Tag must already exist on origin.
+- Tag commit must be reachable from `origin/main`.
+- Matching GHCR image tag (`ghcr.io//:`) must be available before GitHub Release publish completes.
+- Artifacts are verified before publish.
+
+## Maintainer Procedure
+
+### 1) Preflight on `main`
+
+1. Ensure required checks are green on latest `main`.
+2. Confirm no high-priority incidents or known regressions are open.
+3. Confirm installer and Docker workflows are healthy on recent `main` commits.
+
+### 2) Run verification build (no publish)
+
+Run `Pub Release` manually:
+
+- `publish_release`: `false`
+- `release_ref`: `main`
+
+Expected outcome:
+
+- Full target matrix builds successfully.
+- `verify-artifacts` confirms all expected archives exist.
+- No GitHub Release is published.
+
+### 3) Cut release tag
+
+From a clean local checkout synced to `origin/main`:
+
+```bash
+scripts/release/cut_release_tag.sh vX.Y.Z --push
+```
+
+This script enforces:
+
+- clean working tree
+- `HEAD == origin/main`
+- non-duplicate tag
+- semver-like tag format
+
+### 4) Monitor publish run
+
+After tag push, monitor:
+
+1. `Pub Release` publish mode
+2. `Pub Docker Img` publish job
+
+Expected publish outputs:
+
+- release archives
+- `SHA256SUMS`
+- `CycloneDX` and `SPDX` SBOMs
+- cosign signatures/certificates
+- GitHub Release notes + assets
+
+### 5) Post-release validation
+
+1. Verify GitHub Release assets are downloadable.
+2. Verify GHCR tags for the released version and `latest`.
+3. Verify install paths that rely on release assets (for example bootstrap binary download).
+
+## Emergency / Recovery Path
+
+If tag-push release fails after artifacts are validated:
+
+1. Fix workflow or packaging issue on `main`.
+2. Re-run manual `Pub Release` in publish mode with:
+ - `publish_release=true`
+ - `release_tag=`
+ - `release_ref` is automatically pinned to `release_tag` in publish mode
+3. Re-validate released assets.
+
+## Operational Notes
+
+- Keep release changes small and reversible.
+- Prefer one release issue/checklist per version so handoff is clear.
+- Avoid publishing from ad-hoc feature branches.
diff --git a/scripts/release/cut_release_tag.sh b/scripts/release/cut_release_tag.sh
new file mode 100755
index 000000000..612898307
--- /dev/null
+++ b/scripts/release/cut_release_tag.sh
@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+usage() {
+ cat <<'USAGE'
+Usage: scripts/release/cut_release_tag.sh [--push]
+
+Create an annotated release tag from the current checkout.
+
+Requirements:
+- tag must match vX.Y.Z (optional suffix like -rc.1)
+- working tree must be clean
+- HEAD must match origin/main
+- tag must not already exist locally or on origin
+
+Options:
+ --push Push the tag to origin after creating it
+USAGE
+}
+
+if [[ $# -lt 1 || $# -gt 2 ]]; then
+ usage
+ exit 1
+fi
+
+TAG="$1"
+PUSH_TAG="false"
+if [[ $# -eq 2 ]]; then
+ if [[ "$2" != "--push" ]]; then
+ usage
+ exit 1
+ fi
+ PUSH_TAG="true"
+fi
+
+SEMVER_PATTERN='^v[0-9]+\.[0-9]+\.[0-9]+([.-][0-9A-Za-z.-]+)?$'
+if [[ ! "$TAG" =~ $SEMVER_PATTERN ]]; then
+ echo "error: tag must match vX.Y.Z or vX.Y.Z-suffix (received: $TAG)" >&2
+ exit 1
+fi
+
+if ! git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
+ echo "error: run this script inside the git repository" >&2
+ exit 1
+fi
+
+if ! git diff --quiet || ! git diff --cached --quiet; then
+ echo "error: working tree is not clean; commit or stash changes first" >&2
+ exit 1
+fi
+
+echo "Fetching origin/main and tags..."
+git fetch --quiet origin main --tags
+
+HEAD_SHA="$(git rev-parse HEAD)"
+MAIN_SHA="$(git rev-parse origin/main)"
+if [[ "$HEAD_SHA" != "$MAIN_SHA" ]]; then
+ echo "error: HEAD ($HEAD_SHA) is not origin/main ($MAIN_SHA)." >&2
+ echo "hint: checkout/update main before cutting a release tag." >&2
+ exit 1
+fi
+
+if git show-ref --tags --verify --quiet "refs/tags/$TAG"; then
+ echo "error: tag already exists locally: $TAG" >&2
+ exit 1
+fi
+
+if git ls-remote --exit-code --tags origin "refs/tags/$TAG" >/dev/null 2>&1; then
+ echo "error: tag already exists on origin: $TAG" >&2
+ exit 1
+fi
+
+MESSAGE="zeroclaw $TAG"
+git tag -a "$TAG" -m "$MESSAGE"
+echo "Created annotated tag: $TAG"
+
+if [[ "$PUSH_TAG" == "true" ]]; then
+ git push origin "$TAG"
+ echo "Pushed tag to origin: $TAG"
+ echo "GitHub release pipeline will run via .github/workflows/pub-release.yml"
+else
+ echo "Next step: git push origin $TAG"
+fi
From 9f194130f7613c3f4971b4c9f205473826d55635 Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 20:04:21 +0800
Subject: [PATCH 042/116] fix(lark): refresh expired tenant access token on
code 99991663
---
docs/channels-reference.md | 6 ++
src/channels/lark.rs | 208 +++++++++++++++++++++++++++++++------
2 files changed, 183 insertions(+), 31 deletions(-)
diff --git a/docs/channels-reference.md b/docs/channels-reference.md
index 9c99b288f..7108ab2b1 100644
--- a/docs/channels-reference.md
+++ b/docs/channels-reference.md
@@ -312,6 +312,12 @@ The wizard now includes a dedicated **Lark/Feishu** step with:
- receive mode selection (`websocket` or `webhook`)
- optional webhook verification token prompt (recommended for stronger callback authenticity checks)
+Runtime token behavior:
+
+- `tenant_access_token` is cached with a refresh deadline based on `expire`/`expires_in` from the auth response.
+- send requests automatically retry once after token invalidation when Feishu/Lark returns either HTTP `401` or business error code `99991663` (`Invalid access token`).
+- if the retry still returns token-invalid responses, the send call fails with the upstream status/body for easier troubleshooting.
+
### 4.12 DingTalk
```toml
diff --git a/src/channels/lark.rs b/src/channels/lark.rs
index c899097c5..4febdf82c 100644
--- a/src/channels/lark.rs
+++ b/src/channels/lark.rs
@@ -126,6 +126,12 @@ struct LarkMessage {
/// Heartbeat timeout for WS connection — must be larger than ping_interval (default 120 s).
/// If no binary frame (pong or event) is received within this window, reconnect.
const WS_HEARTBEAT_TIMEOUT: Duration = Duration::from_secs(300);
+/// Refresh tenant token this many seconds before the announced expiry.
+const LARK_TOKEN_REFRESH_SKEW: Duration = Duration::from_secs(120);
+/// Fallback tenant token TTL when `expire`/`expires_in` is absent.
+const LARK_DEFAULT_TOKEN_TTL: Duration = Duration::from_secs(7200);
+/// Feishu/Lark API business code for expired/invalid tenant access token.
+const LARK_INVALID_ACCESS_TOKEN_CODE: i64 = 99_991_663;
/// Returns true when the WebSocket frame indicates live traffic that should
/// refresh the heartbeat watchdog.
@@ -133,6 +139,64 @@ fn should_refresh_last_recv(msg: &WsMsg) -> bool {
matches!(msg, WsMsg::Binary(_) | WsMsg::Ping(_) | WsMsg::Pong(_))
}
+#[derive(Debug, Clone)]
+struct CachedTenantToken {
+ value: String,
+ refresh_after: Instant,
+}
+
+fn extract_lark_response_code(body: &serde_json::Value) -> Option {
+ body.get("code").and_then(|c| c.as_i64())
+}
+
+fn is_lark_invalid_access_token(body: &serde_json::Value) -> bool {
+ extract_lark_response_code(body) == Some(LARK_INVALID_ACCESS_TOKEN_CODE)
+}
+
+fn should_refresh_lark_tenant_token(status: reqwest::StatusCode, body: &serde_json::Value) -> bool {
+ status == reqwest::StatusCode::UNAUTHORIZED || is_lark_invalid_access_token(body)
+}
+
+fn extract_lark_token_ttl_seconds(body: &serde_json::Value) -> u64 {
+ let ttl = body
+ .get("expire")
+ .or_else(|| body.get("expires_in"))
+ .and_then(|v| v.as_u64())
+ .or_else(|| {
+ body.get("expire")
+ .or_else(|| body.get("expires_in"))
+ .and_then(|v| v.as_i64())
+ .and_then(|v| u64::try_from(v).ok())
+ })
+ .unwrap_or(LARK_DEFAULT_TOKEN_TTL.as_secs());
+ ttl.max(1)
+}
+
+fn next_token_refresh_deadline(now: Instant, ttl_seconds: u64) -> Instant {
+ let ttl = Duration::from_secs(ttl_seconds.max(1));
+ let refresh_in = ttl
+ .checked_sub(LARK_TOKEN_REFRESH_SKEW)
+ .unwrap_or(Duration::from_secs(1));
+ now + refresh_in
+}
+
+fn ensure_lark_send_success(
+ status: reqwest::StatusCode,
+ body: &serde_json::Value,
+ context: &str,
+) -> anyhow::Result<()> {
+ if !status.is_success() {
+ anyhow::bail!("Lark send failed {context}: status={status}, body={body}");
+ }
+
+ let code = extract_lark_response_code(body).unwrap_or(0);
+ if code != 0 {
+ anyhow::bail!("Lark send failed {context}: code={code}, body={body}");
+ }
+
+ Ok(())
+}
+
/// Lark/Feishu channel.
///
/// Supports two receive modes (configured via `receive_mode` in config):
@@ -149,7 +213,7 @@ pub struct LarkChannel {
/// How to receive events: WebSocket long-connection or HTTP webhook.
receive_mode: crate::config::schema::LarkReceiveMode,
/// Cached tenant access token
- tenant_token: Arc>>,
+ tenant_token: Arc>>,
/// Dedup set: WS message_ids seen in last ~30 min to prevent double-dispatch
ws_seen_ids: Arc>>,
}
@@ -496,7 +560,9 @@ impl LarkChannel {
{
let cached = self.tenant_token.read().await;
if let Some(ref token) = *cached {
- return Ok(token.clone());
+ if Instant::now() < token.refresh_after {
+ return Ok(token.value.clone());
+ }
}
}
@@ -507,8 +573,13 @@ impl LarkChannel {
});
let resp = self.http_client().post(&url).json(&body).send().await?;
+ let status = resp.status();
let data: serde_json::Value = resp.json().await?;
+ if !status.is_success() {
+ anyhow::bail!("Lark tenant_access_token request failed: status={status}, body={data}");
+ }
+
let code = data.get("code").and_then(|c| c.as_i64()).unwrap_or(-1);
if code != 0 {
let msg = data
@@ -524,21 +595,48 @@ impl LarkChannel {
.ok_or_else(|| anyhow::anyhow!("missing tenant_access_token in response"))?
.to_string();
- // Cache it
+ let ttl_seconds = extract_lark_token_ttl_seconds(&data);
+ let refresh_after = next_token_refresh_deadline(Instant::now(), ttl_seconds);
+
+ // Cache it with proactive refresh metadata.
{
let mut cached = self.tenant_token.write().await;
- *cached = Some(token.clone());
+ *cached = Some(CachedTenantToken {
+ value: token.clone(),
+ refresh_after,
+ });
}
Ok(token)
}
- /// Invalidate cached token (called on 401)
+ /// Invalidate cached token (called when API reports an expired tenant token).
async fn invalidate_token(&self) {
let mut cached = self.tenant_token.write().await;
*cached = None;
}
+ async fn send_text_once(
+ &self,
+ url: &str,
+ token: &str,
+ body: &serde_json::Value,
+ ) -> anyhow::Result<(reqwest::StatusCode, serde_json::Value)> {
+ let resp = self
+ .http_client()
+ .post(url)
+ .header("Authorization", format!("Bearer {token}"))
+ .header("Content-Type", "application/json; charset=utf-8")
+ .json(body)
+ .send()
+ .await?;
+ let status = resp.status();
+ let raw = resp.text().await.unwrap_or_default();
+ let parsed = serde_json::from_str::(&raw)
+ .unwrap_or_else(|_| serde_json::json!({ "raw": raw }));
+ Ok((status, parsed))
+ }
+
/// Parse an event callback payload and extract text messages
pub fn parse_event_payload(&self, payload: &serde_json::Value) -> Vec {
let mut messages = Vec::new();
@@ -660,40 +758,26 @@ impl Channel for LarkChannel {
"content": content,
});
- let resp = self
- .http_client()
- .post(&url)
- .header("Authorization", format!("Bearer {token}"))
- .header("Content-Type", "application/json; charset=utf-8")
- .json(&body)
- .send()
- .await?;
+ let (status, response) = self.send_text_once(&url, &token, &body).await?;
- if resp.status().as_u16() == 401 {
- // Token expired, invalidate and retry once
+ if should_refresh_lark_tenant_token(status, &response) {
+ // Token expired/invalid, invalidate and retry once.
self.invalidate_token().await;
let new_token = self.get_tenant_access_token().await?;
- let retry_resp = self
- .http_client()
- .post(&url)
- .header("Authorization", format!("Bearer {new_token}"))
- .header("Content-Type", "application/json; charset=utf-8")
- .json(&body)
- .send()
- .await?;
+ let (retry_status, retry_response) =
+ self.send_text_once(&url, &new_token, &body).await?;
- if !retry_resp.status().is_success() {
- let err = retry_resp.text().await.unwrap_or_default();
- anyhow::bail!("Lark send failed after token refresh: {err}");
+ if should_refresh_lark_tenant_token(retry_status, &retry_response) {
+ anyhow::bail!(
+ "Lark send failed after token refresh: status={retry_status}, body={retry_response}"
+ );
}
+
+ ensure_lark_send_success(retry_status, &retry_response, "after token refresh")?;
return Ok(());
}
- if !resp.status().is_success() {
- let err = resp.text().await.unwrap_or_default();
- anyhow::bail!("Lark send failed: {err}");
- }
-
+ ensure_lark_send_success(status, &response, "without token refresh")?;
Ok(())
}
@@ -930,6 +1014,68 @@ mod tests {
assert!(!should_refresh_last_recv(&WsMsg::Close(None)));
}
+ #[test]
+ fn lark_should_refresh_token_on_http_401() {
+ let body = serde_json::json!({ "code": 0 });
+ assert!(should_refresh_lark_tenant_token(
+ reqwest::StatusCode::UNAUTHORIZED,
+ &body
+ ));
+ }
+
+ #[test]
+ fn lark_should_refresh_token_on_body_code_99991663() {
+ let body = serde_json::json!({
+ "code": LARK_INVALID_ACCESS_TOKEN_CODE,
+ "msg": "Invalid access token for authorization."
+ });
+ assert!(should_refresh_lark_tenant_token(
+ reqwest::StatusCode::OK,
+ &body
+ ));
+ }
+
+ #[test]
+ fn lark_should_not_refresh_token_on_success_body() {
+ let body = serde_json::json!({ "code": 0, "msg": "ok" });
+ assert!(!should_refresh_lark_tenant_token(
+ reqwest::StatusCode::OK,
+ &body
+ ));
+ }
+
+ #[test]
+ fn lark_extract_token_ttl_seconds_supports_expire_and_expires_in() {
+ let body_expire = serde_json::json!({ "expire": 7200 });
+ let body_expires_in = serde_json::json!({ "expires_in": 3600 });
+ let body_missing = serde_json::json!({});
+ assert_eq!(extract_lark_token_ttl_seconds(&body_expire), 7200);
+ assert_eq!(extract_lark_token_ttl_seconds(&body_expires_in), 3600);
+ assert_eq!(
+ extract_lark_token_ttl_seconds(&body_missing),
+ LARK_DEFAULT_TOKEN_TTL.as_secs()
+ );
+ }
+
+ #[test]
+ fn lark_next_token_refresh_deadline_reserves_refresh_skew() {
+ let now = Instant::now();
+ let regular = next_token_refresh_deadline(now, 7200);
+ let short_ttl = next_token_refresh_deadline(now, 60);
+
+ assert_eq!(regular.duration_since(now), Duration::from_secs(7080));
+ assert_eq!(short_ttl.duration_since(now), Duration::from_secs(1));
+ }
+
+ #[test]
+ fn lark_ensure_send_success_rejects_non_zero_code() {
+ let ok = serde_json::json!({ "code": 0 });
+ let bad = serde_json::json!({ "code": 12345, "msg": "bad request" });
+
+ assert!(ensure_lark_send_success(reqwest::StatusCode::OK, &ok, "test").is_ok());
+ assert!(ensure_lark_send_success(reqwest::StatusCode::OK, &bad, "test").is_err());
+ }
+
#[test]
fn lark_user_allowed_exact() {
let ch = make_channel();
From 1f86727a2a4faa3c261b272aa2eec7d750f9a6c6 Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 20:12:27 +0800
Subject: [PATCH 043/116] feat(provider): add first-class llama.cpp provider
flow
---
README.md | 22 ++++-
docs/commands-reference.md | 2 +-
docs/custom-providers.md | 37 +++++++-
docs/providers-reference.md | 10 ++-
src/onboard/wizard.rs | 174 +++++++++++++++++++++++++++++++++---
src/providers/mod.rs | 31 +++++++
6 files changed, 261 insertions(+), 15 deletions(-)
diff --git a/README.md b/README.md
index 163a6b6dd..ff6a93c68 100644
--- a/README.md
+++ b/README.md
@@ -383,7 +383,7 @@ Every subsystem is a **trait** — swap implementations with a config change, ze
| Subsystem | Trait | Ships with | Extend |
|-----------|-------|------------|--------|
-| **AI Models** | `Provider` | Provider catalog via `zeroclaw providers` (currently 28 built-ins + aliases, plus custom endpoints) | `custom:https://your-api.com` (OpenAI-compatible) or `anthropic-custom:https://your-api.com` |
+| **AI Models** | `Provider` | Provider catalog via `zeroclaw providers` (currently 29 built-ins + aliases, plus custom endpoints) | `custom:https://your-api.com` (OpenAI-compatible) or `anthropic-custom:https://your-api.com` |
| **Channels** | `Channel` | CLI, Telegram, Discord, Slack, Mattermost, iMessage, Matrix, Signal, WhatsApp, Email, IRC, Lark, DingTalk, QQ, Webhook | Any messaging API |
| **Memory** | `Memory` | SQLite hybrid search, PostgreSQL backend (configurable storage provider), Lucid bridge, Markdown files, explicit `none` backend, snapshot/hydrate, optional response cache | Any persistence backend |
| **Tools** | `Tool` | shell/file/memory, cron/schedule, git, pushover, browser, http_request, screenshot/image_info, composio (opt-in), delegate, hardware tools | Any capability |
@@ -727,6 +727,26 @@ api_url = "https://ollama.com"
api_key = "ollama_api_key_here"
```
+### llama.cpp Server Endpoint
+
+ZeroClaw now supports `llama-server` as a first-class local provider:
+
+- Provider ID: `llamacpp` (alias: `llama.cpp`)
+- Default endpoint: `http://localhost:8080/v1`
+- API key is optional unless your server is started with `--api-key`
+
+Example setup:
+
+```bash
+llama-server -hf ggml-org/gpt-oss-20b-GGUF --jinja -c 133000 --host 127.0.0.1 --port 8033
+```
+
+```toml
+default_provider = "llamacpp"
+api_url = "http://127.0.0.1:8033/v1"
+default_model = "ggml-org/gpt-oss-20b-GGUF"
+```
+
### Custom Provider Endpoints
For detailed configuration of custom OpenAI-compatible and Anthropic-compatible endpoints, see [docs/custom-providers.md](docs/custom-providers.md).
diff --git a/docs/commands-reference.md b/docs/commands-reference.md
index ba2d45e95..40ed488bb 100644
--- a/docs/commands-reference.md
+++ b/docs/commands-reference.md
@@ -75,7 +75,7 @@ Last verified: **February 20, 2026**.
- `zeroclaw models refresh --provider `
- `zeroclaw models refresh --force`
-`models refresh` currently supports live catalog refresh for provider IDs: `openrouter`, `openai`, `anthropic`, `groq`, `mistral`, `deepseek`, `xai`, `together-ai`, `gemini`, `ollama`, `astrai`, `venice`, `fireworks`, `cohere`, `moonshot`, `glm`, `zai`, `qwen`, and `nvidia`.
+`models refresh` currently supports live catalog refresh for provider IDs: `openrouter`, `openai`, `anthropic`, `groq`, `mistral`, `deepseek`, `xai`, `together-ai`, `gemini`, `ollama`, `llamacpp`, `astrai`, `venice`, `fireworks`, `cohere`, `moonshot`, `glm`, `zai`, `qwen`, and `nvidia`.
### `channel`
diff --git a/docs/custom-providers.md b/docs/custom-providers.md
index 8b83521c9..c7b3bdae7 100644
--- a/docs/custom-providers.md
+++ b/docs/custom-providers.md
@@ -46,6 +46,38 @@ export API_KEY="your-api-key"
zeroclaw agent
```
+## llama.cpp Server (Recommended Local Setup)
+
+ZeroClaw includes a first-class local provider for `llama-server`:
+
+- Provider ID: `llamacpp` (alias: `llama.cpp`)
+- Default endpoint: `http://localhost:8080/v1`
+- API key is optional unless `llama-server` is started with `--api-key`
+
+Start a local server (example):
+
+```bash
+llama-server -hf ggml-org/gpt-oss-20b-GGUF --jinja -c 133000 --host 127.0.0.1 --port 8033
+```
+
+Then configure ZeroClaw:
+
+```toml
+default_provider = "llamacpp"
+api_url = "http://127.0.0.1:8033/v1"
+default_model = "ggml-org/gpt-oss-20b-GGUF"
+default_temperature = 0.7
+```
+
+Quick validation:
+
+```bash
+zeroclaw models refresh --provider llamacpp
+zeroclaw agent -m "hello"
+```
+
+You do not need to export `ZEROCLAW_API_KEY=dummy` for this flow.
+
## Testing Configuration
Verify your custom endpoint:
@@ -88,10 +120,11 @@ curl -sS https://your-api.com/models \
## Examples
-### Local LLM Server
+### Local LLM Server (Generic Custom Endpoint)
```toml
-default_provider = "custom:http://localhost:8080"
+default_provider = "custom:http://localhost:8080/v1"
+api_key = "your-api-key-if-required"
default_model = "local-model"
```
diff --git a/docs/providers-reference.md b/docs/providers-reference.md
index f9c772660..420d61ec1 100644
--- a/docs/providers-reference.md
+++ b/docs/providers-reference.md
@@ -2,7 +2,7 @@
This document maps provider IDs, aliases, and credential environment variables.
-Last verified: **February 19, 2026**.
+Last verified: **February 20, 2026**.
## How to List Providers
@@ -54,6 +54,7 @@ credential is not reused for fallback providers.
| `cohere` | — | No | `COHERE_API_KEY` |
| `copilot` | `github-copilot` | No | (use config/`API_KEY` fallback with GitHub token) |
| `lmstudio` | `lm-studio` | Yes | (optional; local by default) |
+| `llamacpp` | `llama.cpp` | Yes | `LLAMACPP_API_KEY` (optional; only if server auth is enabled) |
| `nvidia` | `nvidia-nim`, `build.nvidia.com` | No | `NVIDIA_API_KEY` |
### Gemini Notes
@@ -70,6 +71,13 @@ credential is not reused for fallback providers.
- After multimodal normalization, ZeroClaw sends image payloads through Ollama's native `messages[].images` field.
- If a non-vision provider is selected, ZeroClaw returns a structured capability error instead of silently ignoring images.
+### llama.cpp Server Notes
+
+- Provider ID: `llamacpp` (alias: `llama.cpp`)
+- Default endpoint: `http://localhost:8080/v1`
+- API key is optional by default; set `LLAMACPP_API_KEY` only when `llama-server` is started with `--api-key`.
+- Model discovery: `zeroclaw models refresh --provider llamacpp`
+
### Bedrock Notes
- Provider ID: `bedrock` (alias: `aws-bedrock`)
diff --git a/src/onboard/wizard.rs b/src/onboard/wizard.rs
index 9ba0975bd..dc77261c1 100644
--- a/src/onboard/wizard.rs
+++ b/src/onboard/wizard.rs
@@ -536,6 +536,7 @@ fn canonical_provider_name(provider_name: &str) -> &str {
"kimi_coding" | "kimi_for_coding" => "kimi-code",
"nvidia-nim" | "build.nvidia.com" => "nvidia",
"aws-bedrock" => "bedrock",
+ "llama.cpp" => "llamacpp",
_ => provider_name,
}
}
@@ -543,7 +544,7 @@ fn canonical_provider_name(provider_name: &str) -> &str {
fn allows_unauthenticated_model_fetch(provider_name: &str) -> bool {
matches!(
canonical_provider_name(provider_name),
- "openrouter" | "ollama" | "venice" | "astrai" | "nvidia"
+ "openrouter" | "ollama" | "llamacpp" | "venice" | "astrai" | "nvidia"
)
}
@@ -577,6 +578,7 @@ fn default_model_for_provider(provider: &str) -> String {
"qwen" => "qwen-plus".into(),
"qwen-code" => "qwen3-coder-plus".into(),
"ollama" => "llama3.2".into(),
+ "llamacpp" => "ggml-org/gpt-oss-20b-GGUF".into(),
"gemini" => "gemini-2.5-pro".into(),
"kimi-code" => "kimi-for-coding".into(),
"bedrock" => "anthropic.claude-sonnet-4-5-20250929-v1:0".into(),
@@ -911,6 +913,20 @@ fn curated_models_for_provider(provider_name: &str) -> Vec<(String, String)> {
("codellama".to_string(), "Code Llama".to_string()),
("phi3".to_string(), "Phi-3 (small, fast)".to_string()),
],
+ "llamacpp" => vec![
+ (
+ "ggml-org/gpt-oss-20b-GGUF".to_string(),
+ "GPT-OSS 20B GGUF (llama.cpp server example)".to_string(),
+ ),
+ (
+ "bartowski/Llama-3.3-70B-Instruct-GGUF".to_string(),
+ "Llama 3.3 70B GGUF (high quality)".to_string(),
+ ),
+ (
+ "Qwen/Qwen2.5-Coder-7B-Instruct-GGUF".to_string(),
+ "Qwen2.5 Coder 7B GGUF (coding-focused)".to_string(),
+ ),
+ ],
"bedrock" => vec![
(
"anthropic.claude-sonnet-4-6".to_string(),
@@ -964,6 +980,7 @@ fn supports_live_model_fetch(provider_name: &str) -> bool {
| "together-ai"
| "gemini"
| "ollama"
+ | "llamacpp"
| "astrai"
| "venice"
| "fireworks"
@@ -999,6 +1016,7 @@ fn models_endpoint_for_provider(provider_name: &str) -> Option<&'static str> {
"qwen" => Some("https://dashscope.aliyuncs.com/compatible-mode/v1/models"),
"nvidia" => Some("https://integrate.api.nvidia.com/v1/models"),
"astrai" => Some("https://as-trai.com/v1/models"),
+ "llamacpp" => Some("http://localhost:8080/v1/models"),
_ => None,
},
}
@@ -1193,7 +1211,28 @@ fn fetch_ollama_models() -> Result> {
Ok(parse_ollama_model_ids(&payload))
}
-fn fetch_live_models_for_provider(provider_name: &str, api_key: &str) -> Result> {
+fn resolve_live_models_endpoint(provider_name: &str, provider_api_url: Option<&str>) -> Option {
+ if canonical_provider_name(provider_name) == "llamacpp" {
+ if let Some(url) = provider_api_url
+ .map(str::trim)
+ .filter(|url| !url.is_empty())
+ {
+ let normalized = url.trim_end_matches('/');
+ if normalized.ends_with("/models") {
+ return Some(normalized.to_string());
+ }
+ return Some(format!("{normalized}/models"));
+ }
+ }
+
+ models_endpoint_for_provider(provider_name).map(str::to_string)
+}
+
+fn fetch_live_models_for_provider(
+ provider_name: &str,
+ api_key: &str,
+ provider_api_url: Option<&str>,
+) -> Result> {
let requested_provider_name = provider_name;
let provider_name = canonical_provider_name(provider_name);
let api_key = if api_key.trim().is_empty() {
@@ -1239,10 +1278,16 @@ fn fetch_live_models_for_provider(provider_name: &str, api_key: &str) -> Result<
}
}
_ => {
- if let Some(endpoint) = models_endpoint_for_provider(requested_provider_name) {
+ if let Some(endpoint) =
+ resolve_live_models_endpoint(requested_provider_name, provider_api_url)
+ {
let allow_unauthenticated =
allows_unauthenticated_model_fetch(requested_provider_name);
- fetch_openai_compatible_models(endpoint, api_key.as_deref(), allow_unauthenticated)?
+ fetch_openai_compatible_models(
+ &endpoint,
+ api_key.as_deref(),
+ allow_unauthenticated,
+ )?
} else {
Vec::new()
}
@@ -1466,7 +1511,7 @@ pub fn run_models_refresh(
let api_key = config.api_key.clone().unwrap_or_default();
- match fetch_live_models_for_provider(&provider_name, &api_key) {
+ match fetch_live_models_for_provider(&provider_name, &api_key, config.api_url.as_deref()) {
Ok(models) if !models.is_empty() => {
cache_live_models_for_provider(&config.workspace_dir, &provider_name, &models)?;
println!(
@@ -1592,7 +1637,7 @@ fn setup_provider(workspace_dir: &Path) -> Result<(String, String, String, Optio
"⚡ Fast inference (Groq, Fireworks, Together AI, NVIDIA NIM)",
"🌐 Gateway / proxy (Vercel AI, Cloudflare AI, Amazon Bedrock)",
"🔬 Specialized (Moonshot/Kimi, GLM/Zhipu, MiniMax, Qwen/DashScope, Qianfan, Z.AI, Synthetic, OpenCode Zen, Cohere)",
- "🏠 Local / private (Ollama — no API key needed)",
+ "🏠 Local / private (Ollama, llama.cpp server — no API key needed)",
"🔧 Custom — bring your own OpenAI-compatible API",
];
@@ -1670,7 +1715,13 @@ fn setup_provider(workspace_dir: &Path) -> Result<(String, String, String, Optio
("opencode", "OpenCode Zen — code-focused AI"),
("cohere", "Cohere — Command R+ & embeddings"),
],
- 4 => vec![("ollama", "Ollama — local models (Llama, Mistral, Phi)")],
+ 4 => vec![
+ ("ollama", "Ollama — local models (Llama, Mistral, Phi)"),
+ (
+ "llamacpp",
+ "llama.cpp server — local OpenAI-compatible endpoint",
+ ),
+ ],
_ => vec![], // Custom — handled below
};
@@ -1774,6 +1825,37 @@ fn setup_provider(workspace_dir: &Path) -> Result<(String, String, String, Optio
print_bullet("Using local Ollama at http://localhost:11434 (no API key needed).");
String::new()
}
+ } else if matches!(provider_name, "llamacpp" | "llama.cpp") {
+ let raw_url: String = Input::new()
+ .with_prompt(" llama.cpp server endpoint URL")
+ .default("http://localhost:8080/v1".into())
+ .interact_text()?;
+
+ let normalized_url = raw_url.trim().trim_end_matches('/').to_string();
+ if normalized_url.is_empty() {
+ anyhow::bail!("llama.cpp endpoint URL cannot be empty.");
+ }
+ provider_api_url = Some(normalized_url.clone());
+
+ print_bullet(&format!(
+ "Using llama.cpp server endpoint: {}",
+ style(&normalized_url).cyan()
+ ));
+ print_bullet("No API key needed unless your llama.cpp server is started with --api-key.");
+
+ let key: String = Input::new()
+ .with_prompt(" API key for llama.cpp server (or Enter to skip)")
+ .allow_empty(true)
+ .interact_text()?;
+
+ if key.trim().is_empty() {
+ print_bullet(&format!(
+ "No API key provided. Set {} later only if your server requires authentication.",
+ style("LLAMACPP_API_KEY").yellow()
+ ));
+ }
+
+ key
} else if canonical_provider_name(provider_name) == "gemini" {
// Special handling for Gemini: check for CLI auth first
if crate::providers::gemini::GeminiProvider::has_cli_credentials() {
@@ -2026,7 +2108,11 @@ fn setup_provider(workspace_dir: &Path) -> Result<(String, String, String, Optio
.interact()?;
if should_fetch_now {
- match fetch_live_models_for_provider(provider_name, &api_key) {
+ match fetch_live_models_for_provider(
+ provider_name,
+ &api_key,
+ provider_api_url.as_deref(),
+ ) {
Ok(live_model_ids) if !live_model_ids.is_empty() => {
cache_live_models_for_provider(
workspace_dir,
@@ -2159,6 +2245,7 @@ fn provider_env_var(name: &str) -> &'static str {
"anthropic" => "ANTHROPIC_API_KEY",
"openai" => "OPENAI_API_KEY",
"ollama" => "OLLAMA_API_KEY",
+ "llamacpp" => "LLAMACPP_API_KEY",
"venice" => "VENICE_API_KEY",
"groq" => "GROQ_API_KEY",
"mistral" => "MISTRAL_API_KEY",
@@ -2187,6 +2274,13 @@ fn provider_env_var(name: &str) -> &'static str {
}
}
+fn provider_supports_keyless_local_usage(provider_name: &str) -> bool {
+ matches!(
+ canonical_provider_name(provider_name),
+ "ollama" | "llamacpp"
+ )
+}
+
// ── Step 5: Tool Mode & Security ────────────────────────────────
fn setup_tool_mode() -> Result<(ComposioConfig, SecretsConfig)> {
@@ -4671,8 +4765,8 @@ fn print_summary(config: &Config) {
let mut step = 1u8;
- if config.api_key.is_none() {
- let provider = config.default_provider.as_deref().unwrap_or("openrouter");
+ let provider = config.default_provider.as_deref().unwrap_or("openrouter");
+ if config.api_key.is_none() && !provider_supports_keyless_local_usage(provider) {
if provider == "openai-codex" {
println!(
" {} Authenticate OpenAI Codex:",
@@ -5321,6 +5415,10 @@ mod tests {
default_model_for_provider("nvidia-nim"),
"meta/llama-3.3-70b-instruct"
);
+ assert_eq!(
+ default_model_for_provider("llamacpp"),
+ "ggml-org/gpt-oss-20b-GGUF"
+ );
assert_eq!(
default_model_for_provider("astrai"),
"anthropic/claude-sonnet-4.6"
@@ -5345,6 +5443,7 @@ mod tests {
assert_eq!(canonical_provider_name("nvidia-nim"), "nvidia");
assert_eq!(canonical_provider_name("aws-bedrock"), "bedrock");
assert_eq!(canonical_provider_name("build.nvidia.com"), "nvidia");
+ assert_eq!(canonical_provider_name("llama.cpp"), "llamacpp");
}
#[test]
@@ -5428,6 +5527,8 @@ mod tests {
assert!(allows_unauthenticated_model_fetch("build.nvidia.com"));
assert!(allows_unauthenticated_model_fetch("astrai"));
assert!(allows_unauthenticated_model_fetch("ollama"));
+ assert!(allows_unauthenticated_model_fetch("llamacpp"));
+ assert!(allows_unauthenticated_model_fetch("llama.cpp"));
assert!(!allows_unauthenticated_model_fetch("openai"));
assert!(!allows_unauthenticated_model_fetch("deepseek"));
}
@@ -5467,6 +5568,8 @@ mod tests {
assert!(supports_live_model_fetch("nvidia-nim"));
assert!(supports_live_model_fetch("build.nvidia.com"));
assert!(supports_live_model_fetch("ollama"));
+ assert!(supports_live_model_fetch("llamacpp"));
+ assert!(supports_live_model_fetch("llama.cpp"));
assert!(supports_live_model_fetch("astrai"));
assert!(supports_live_model_fetch("venice"));
assert!(supports_live_model_fetch("glm-cn"));
@@ -5517,6 +5620,10 @@ mod tests {
curated_models_for_provider("nvidia"),
curated_models_for_provider("build.nvidia.com")
);
+ assert_eq!(
+ curated_models_for_provider("llamacpp"),
+ curated_models_for_provider("llama.cpp")
+ );
assert_eq!(
curated_models_for_provider("bedrock"),
curated_models_for_provider("aws-bedrock")
@@ -5565,10 +5672,47 @@ mod tests {
models_endpoint_for_provider("moonshot"),
Some("https://api.moonshot.ai/v1/models")
);
+ assert_eq!(
+ models_endpoint_for_provider("llamacpp"),
+ Some("http://localhost:8080/v1/models")
+ );
+ assert_eq!(
+ models_endpoint_for_provider("llama.cpp"),
+ Some("http://localhost:8080/v1/models")
+ );
assert_eq!(models_endpoint_for_provider("perplexity"), None);
assert_eq!(models_endpoint_for_provider("unknown-provider"), None);
}
+ #[test]
+ fn resolve_live_models_endpoint_prefers_llamacpp_custom_url() {
+ assert_eq!(
+ resolve_live_models_endpoint("llamacpp", Some("http://127.0.0.1:8033/v1")),
+ Some("http://127.0.0.1:8033/v1/models".to_string())
+ );
+ assert_eq!(
+ resolve_live_models_endpoint("llama.cpp", Some("http://127.0.0.1:8033/v1/")),
+ Some("http://127.0.0.1:8033/v1/models".to_string())
+ );
+ assert_eq!(
+ resolve_live_models_endpoint("llamacpp", Some("http://127.0.0.1:8033/v1/models")),
+ Some("http://127.0.0.1:8033/v1/models".to_string())
+ );
+ }
+
+ #[test]
+ fn resolve_live_models_endpoint_falls_back_to_provider_defaults() {
+ assert_eq!(
+ resolve_live_models_endpoint("llamacpp", None),
+ Some("http://localhost:8080/v1/models".to_string())
+ );
+ assert_eq!(
+ resolve_live_models_endpoint("venice", Some("http://localhost:9999/v1")),
+ Some("https://api.venice.ai/api/v1/models".to_string())
+ );
+ assert_eq!(resolve_live_models_endpoint("unknown-provider", None), None);
+ }
+
#[test]
fn parse_openai_model_ids_supports_data_array_payload() {
let payload = json!({
@@ -5716,6 +5860,8 @@ mod tests {
assert_eq!(provider_env_var("anthropic"), "ANTHROPIC_API_KEY");
assert_eq!(provider_env_var("openai"), "OPENAI_API_KEY");
assert_eq!(provider_env_var("ollama"), "OLLAMA_API_KEY");
+ assert_eq!(provider_env_var("llamacpp"), "LLAMACPP_API_KEY");
+ assert_eq!(provider_env_var("llama.cpp"), "LLAMACPP_API_KEY");
assert_eq!(provider_env_var("xai"), "XAI_API_KEY");
assert_eq!(provider_env_var("grok"), "XAI_API_KEY"); // alias
assert_eq!(provider_env_var("together"), "TOGETHER_API_KEY"); // alias
@@ -5743,6 +5889,14 @@ mod tests {
assert_eq!(provider_env_var("astrai"), "ASTRAI_API_KEY");
}
+ #[test]
+ fn provider_supports_keyless_local_usage_for_local_providers() {
+ assert!(provider_supports_keyless_local_usage("ollama"));
+ assert!(provider_supports_keyless_local_usage("llamacpp"));
+ assert!(provider_supports_keyless_local_usage("llama.cpp"));
+ assert!(!provider_supports_keyless_local_usage("openai"));
+ }
+
#[test]
fn provider_env_var_unknown_falls_back() {
assert_eq!(provider_env_var("some-new-provider"), "API_KEY");
diff --git a/src/providers/mod.rs b/src/providers/mod.rs
index 1de295691..8f0bc7331 100644
--- a/src/providers/mod.rs
+++ b/src/providers/mod.rs
@@ -833,6 +833,7 @@ fn resolve_provider_credential(name: &str, credential_override: Option<&str>) ->
"cloudflare" | "cloudflare-ai" => vec!["CLOUDFLARE_API_KEY"],
"ovhcloud" | "ovh" => vec!["OVH_AI_ENDPOINTS_ACCESS_TOKEN"],
"astrai" => vec!["ASTRAI_API_KEY"],
+ "llamacpp" | "llama.cpp" => vec!["LLAMACPP_API_KEY"],
_ => vec![],
};
@@ -1075,6 +1076,22 @@ fn create_provider_with_url_and_options(
AuthStyle::Bearer,
)))
}
+ "llamacpp" | "llama.cpp" => {
+ let base_url = api_url
+ .map(str::trim)
+ .filter(|value| !value.is_empty())
+ .unwrap_or("http://localhost:8080/v1");
+ let llama_cpp_key = key
+ .map(str::trim)
+ .filter(|value| !value.is_empty())
+ .unwrap_or("llama.cpp");
+ Ok(Box::new(OpenAiCompatibleProvider::new(
+ "llama.cpp",
+ base_url,
+ Some(llama_cpp_key),
+ AuthStyle::Bearer,
+ )))
+ }
"nvidia" | "nvidia-nim" | "build.nvidia.com" => Ok(Box::new(
OpenAiCompatibleProvider::new(
"NVIDIA NIM",
@@ -1516,6 +1533,12 @@ pub fn list_providers() -> Vec {
aliases: &["lm-studio"],
local: true,
},
+ ProviderInfo {
+ name: "llamacpp",
+ display_name: "llama.cpp server",
+ aliases: &["llama.cpp"],
+ local: true,
+ },
ProviderInfo {
name: "nvidia",
display_name: "NVIDIA NIM",
@@ -1949,6 +1972,13 @@ mod tests {
assert!(create_provider("lmstudio", None).is_ok());
}
+ #[test]
+ fn factory_llamacpp() {
+ assert!(create_provider("llamacpp", Some("key")).is_ok());
+ assert!(create_provider("llama.cpp", Some("key")).is_ok());
+ assert!(create_provider("llamacpp", None).is_ok());
+ }
+
// ── Extended ecosystem ───────────────────────────────────
#[test]
@@ -2297,6 +2327,7 @@ mod tests {
"qwen-us",
"qwen-code",
"lmstudio",
+ "llamacpp",
"groq",
"mistral",
"xai",
From a9a35d50d19e6fc699e3ab129bd5fb4713f6e0fe Mon Sep 17 00:00:00 2001
From: Will Sarg <12886992+willsarg@users.noreply.github.com>
Date: Fri, 20 Feb 2026 07:48:58 -0500
Subject: [PATCH 044/116] fix(ci): restore containerized validation on main
(#1096)
---
src/agent/agent.rs | 5 ++++-
src/agent/loop_.rs | 10 +++++++--
src/channels/telegram.rs | 5 +++--
src/cron/scheduler.rs | 4 ++--
src/gateway/mod.rs | 3 +--
src/hardware/mod.rs | 46 ++++++++++++++++++++++++++++++++--------
src/peripherals/mod.rs | 15 ++++++++++---
src/providers/bedrock.rs | 14 ++----------
src/providers/mod.rs | 15 ++++++++++---
src/util.rs | 2 +-
10 files changed, 82 insertions(+), 37 deletions(-)
diff --git a/src/agent/agent.rs b/src/agent/agent.rs
index 5f048e2a5..466a23368 100644
--- a/src/agent/agent.rs
+++ b/src/agent/agent.rs
@@ -403,7 +403,10 @@ impl Agent {
return results;
}
- let futs: Vec<_> = calls.iter().map(|call| self.execute_tool_call(call)).collect();
+ let futs: Vec<_> = calls
+ .iter()
+ .map(|call| self.execute_tool_call(call))
+ .collect();
futures::future::join_all(futs).await
}
diff --git a/src/agent/loop_.rs b/src/agent/loop_.rs
index ebf8bc51b..58f80ba2a 100644
--- a/src/agent/loop_.rs
+++ b/src/agent/loop_.rs
@@ -2318,7 +2318,10 @@ mod tests {
let approval_cfg = crate::config::AutonomyConfig::default();
let approval_mgr = ApprovalManager::from_config(&approval_cfg);
- assert!(!should_execute_tools_in_parallel(&calls, Some(&approval_mgr)));
+ assert!(!should_execute_tools_in_parallel(
+ &calls,
+ Some(&approval_mgr)
+ ));
}
#[test]
@@ -2339,7 +2342,10 @@ mod tests {
};
let approval_mgr = ApprovalManager::from_config(&approval_cfg);
- assert!(should_execute_tools_in_parallel(&calls, Some(&approval_mgr)));
+ assert!(should_execute_tools_in_parallel(
+ &calls,
+ Some(&approval_mgr)
+ ));
}
#[tokio::test]
diff --git a/src/channels/telegram.rs b/src/channels/telegram.rs
index 1503e57df..8ecdf7397 100644
--- a/src/channels/telegram.rs
+++ b/src/channels/telegram.rs
@@ -386,8 +386,9 @@ impl TelegramChannel {
let contents = fs::read_to_string(&config_path)
.await
.with_context(|| format!("Failed to read config file: {}", config_path.display()))?;
- let mut config: Config = toml::from_str(&contents)
- .context("Failed to parse config.toml — check [channels.telegram] section for syntax errors")?;
+ let mut config: Config = toml::from_str(&contents).context(
+ "Failed to parse config.toml — check [channels.telegram] section for syntax errors",
+ )?;
config.config_path = config_path;
config.workspace_dir = zeroclaw_dir.join("workspace");
Ok(config)
diff --git a/src/cron/scheduler.rs b/src/cron/scheduler.rs
index fc19311d5..09b288ea0 100644
--- a/src/cron/scheduler.rs
+++ b/src/cron/scheduler.rs
@@ -690,7 +690,7 @@ mod tests {
#[tokio::test]
async fn run_agent_job_blocks_readonly_mode() {
let tmp = TempDir::new().unwrap();
- let mut config = test_config(&tmp);
+ let mut config = test_config(&tmp).await;
config.autonomy.level = crate::security::AutonomyLevel::ReadOnly;
let mut job = test_job("");
job.job_type = JobType::Agent;
@@ -706,7 +706,7 @@ mod tests {
#[tokio::test]
async fn run_agent_job_blocks_rate_limited() {
let tmp = TempDir::new().unwrap();
- let mut config = test_config(&tmp);
+ let mut config = test_config(&tmp).await;
config.autonomy.max_actions_per_hour = 0;
let mut job = test_job("");
job.job_type = JobType::Agent;
diff --git a/src/gateway/mod.rs b/src/gateway/mod.rs
index a7f677702..1142ed7b6 100644
--- a/src/gateway/mod.rs
+++ b/src/gateway/mod.rs
@@ -1194,8 +1194,7 @@ mod tests {
/// Generate a random hex secret at runtime to avoid hard-coded cryptographic values.
fn generate_test_secret() -> String {
- use rand::Rng;
- let bytes: [u8; 32] = rand::rng().random();
+ let bytes: [u8; 32] = rand::random();
hex::encode(bytes)
}
diff --git a/src/hardware/mod.rs b/src/hardware/mod.rs
index d9dbc1cbb..67407a734 100644
--- a/src/hardware/mod.rs
+++ b/src/hardware/mod.rs
@@ -4,10 +4,16 @@
pub mod registry;
-#[cfg(all(feature = "hardware", any(target_os = "linux", target_os = "macos", target_os = "windows")))]
+#[cfg(all(
+ feature = "hardware",
+ any(target_os = "linux", target_os = "macos", target_os = "windows")
+))]
pub mod discover;
-#[cfg(all(feature = "hardware", any(target_os = "linux", target_os = "macos", target_os = "windows")))]
+#[cfg(all(
+ feature = "hardware",
+ any(target_os = "linux", target_os = "macos", target_os = "windows")
+))]
pub mod introspect;
use crate::config::Config;
@@ -30,7 +36,10 @@ pub struct DiscoveredDevice {
pub fn discover_hardware() -> Vec {
// USB/serial discovery is behind the "hardware" feature gate and only
// available on platforms where nusb supports device enumeration.
- #[cfg(all(feature = "hardware", any(target_os = "linux", target_os = "macos", target_os = "windows")))]
+ #[cfg(all(
+ feature = "hardware",
+ any(target_os = "linux", target_os = "macos", target_os = "windows")
+ ))]
{
if let Ok(devices) = discover::list_usb_devices() {
return devices
@@ -103,7 +112,10 @@ pub fn handle_command(cmd: crate::HardwareCommands, _config: &Config) -> Result<
return Ok(());
}
- #[cfg(all(feature = "hardware", not(any(target_os = "linux", target_os = "macos", target_os = "windows"))))]
+ #[cfg(all(
+ feature = "hardware",
+ not(any(target_os = "linux", target_os = "macos", target_os = "windows"))
+ ))]
{
let _ = &cmd;
println!("Hardware USB discovery is not supported on this platform.");
@@ -111,7 +123,10 @@ pub fn handle_command(cmd: crate::HardwareCommands, _config: &Config) -> Result<
return Ok(());
}
- #[cfg(all(feature = "hardware", any(target_os = "linux", target_os = "macos", target_os = "windows")))]
+ #[cfg(all(
+ feature = "hardware",
+ any(target_os = "linux", target_os = "macos", target_os = "windows")
+ ))]
match cmd {
crate::HardwareCommands::Discover => run_discover(),
crate::HardwareCommands::Introspect { path } => run_introspect(&path),
@@ -119,7 +134,10 @@ pub fn handle_command(cmd: crate::HardwareCommands, _config: &Config) -> Result<
}
}
-#[cfg(all(feature = "hardware", any(target_os = "linux", target_os = "macos", target_os = "windows")))]
+#[cfg(all(
+ feature = "hardware",
+ any(target_os = "linux", target_os = "macos", target_os = "windows")
+))]
fn run_discover() -> Result<()> {
let devices = discover::list_usb_devices()?;
@@ -147,7 +165,10 @@ fn run_discover() -> Result<()> {
Ok(())
}
-#[cfg(all(feature = "hardware", any(target_os = "linux", target_os = "macos", target_os = "windows")))]
+#[cfg(all(
+ feature = "hardware",
+ any(target_os = "linux", target_os = "macos", target_os = "windows")
+))]
fn run_introspect(path: &str) -> Result<()> {
let result = introspect::introspect_device(path)?;
@@ -169,7 +190,10 @@ fn run_introspect(path: &str) -> Result<()> {
Ok(())
}
-#[cfg(all(feature = "hardware", any(target_os = "linux", target_os = "macos", target_os = "windows")))]
+#[cfg(all(
+ feature = "hardware",
+ any(target_os = "linux", target_os = "macos", target_os = "windows")
+))]
fn run_info(chip: &str) -> Result<()> {
#[cfg(feature = "probe")]
{
@@ -201,7 +225,11 @@ fn run_info(chip: &str) -> Result<()> {
}
}
-#[cfg(all(feature = "hardware", feature = "probe", any(target_os = "linux", target_os = "macos", target_os = "windows")))]
+#[cfg(all(
+ feature = "hardware",
+ feature = "probe",
+ any(target_os = "linux", target_os = "macos", target_os = "windows")
+))]
fn info_via_probe(chip: &str) -> anyhow::Result<()> {
use probe_rs::config::MemoryRegion;
use probe_rs::{Session, SessionConfig};
diff --git a/src/peripherals/mod.rs b/src/peripherals/mod.rs
index 6ae1c49e7..8c3a59a8d 100644
--- a/src/peripherals/mod.rs
+++ b/src/peripherals/mod.rs
@@ -250,7 +250,10 @@ mod tests {
datasheet_dir: None,
};
let result = list_configured_boards(&config);
- assert!(result.is_empty(), "disabled peripherals should return no boards");
+ assert!(
+ result.is_empty(),
+ "disabled peripherals should return no boards"
+ );
}
#[test]
@@ -287,7 +290,10 @@ mod tests {
datasheet_dir: None,
};
let result = list_configured_boards(&config);
- assert!(result.is_empty(), "enabled with no boards should return empty");
+ assert!(
+ result.is_empty(),
+ "enabled with no boards should return empty"
+ );
}
#[tokio::test]
@@ -298,6 +304,9 @@ mod tests {
datasheet_dir: None,
};
let tools = create_peripheral_tools(&config).await.unwrap();
- assert!(tools.is_empty(), "disabled peripherals should produce no tools");
+ assert!(
+ tools.is_empty(),
+ "disabled peripherals should produce no tools"
+ );
}
}
diff --git a/src/providers/bedrock.rs b/src/providers/bedrock.rs
index 2ec13a125..450f96475 100644
--- a/src/providers/bedrock.rs
+++ b/src/providers/bedrock.rs
@@ -813,24 +813,14 @@ mod tests {
#[test]
fn derive_signing_key_structure() {
// Verify the key derivation produces a 32-byte key (SHA-256 output).
- let key = derive_signing_key(
- TEST_VECTOR_SECRET,
- "20150830",
- "us-east-1",
- "iam",
- );
+ let key = derive_signing_key(TEST_VECTOR_SECRET, "20150830", "us-east-1", "iam");
assert_eq!(key.len(), 32);
}
#[test]
fn derive_signing_key_known_test_vector() {
// AWS SigV4 test vector from documentation.
- let key = derive_signing_key(
- TEST_VECTOR_SECRET,
- "20150830",
- "us-east-1",
- "iam",
- );
+ let key = derive_signing_key(TEST_VECTOR_SECRET, "20150830", "us-east-1", "iam");
assert_eq!(
hex::encode(&key),
"c4afb1cc5771d871763a393e44b703571b55cc28424d1a5e86da6ed3c154a4b9"
diff --git a/src/providers/mod.rs b/src/providers/mod.rs
index 8f0bc7331..85f05f030 100644
--- a/src/providers/mod.rs
+++ b/src/providers/mod.rs
@@ -212,8 +212,14 @@ struct QwenOauthCredentials {
impl std::fmt::Debug for QwenOauthCredentials {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("QwenOauthCredentials")
- .field("access_token", &self.access_token.as_ref().map(|_| "[REDACTED]"))
- .field("refresh_token", &self.refresh_token.as_ref().map(|_| "[REDACTED]"))
+ .field(
+ "access_token",
+ &self.access_token.as_ref().map(|_| "[REDACTED]"),
+ )
+ .field(
+ "refresh_token",
+ &self.refresh_token.as_ref().map(|_| "[REDACTED]"),
+ )
.field("resource_url", &self.resource_url)
.field("expiry_date", &self.expiry_date)
.finish()
@@ -245,7 +251,10 @@ struct QwenOauthProviderContext {
impl std::fmt::Debug for QwenOauthProviderContext {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("QwenOauthProviderContext")
- .field("credential", &self.credential.as_ref().map(|_| "[REDACTED]"))
+ .field(
+ "credential",
+ &self.credential.as_ref().map(|_| "[REDACTED]"),
+ )
.field("base_url", &self.base_url)
.finish()
}
diff --git a/src/util.rs b/src/util.rs
index 85c7856df..ca588e1bf 100644
--- a/src/util.rs
+++ b/src/util.rs
@@ -16,7 +16,7 @@
/// * Truncated string with "..." appended if length > `max_chars`
///
/// # Examples
-/// ```
+/// ```ignore
/// use zeroclaw::util::truncate_with_ellipsis;
///
/// // ASCII string - no truncation needed
From c611ffa43b4ecc8a7bad28077990472c81ee9ecc Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 21:33:23 +0800
Subject: [PATCH 045/116] fix(scheduler): harden idle health heartbeat behavior
---
src/cron/scheduler.rs | 75 +++++++++++++++++++++++++++++++++----------
1 file changed, 58 insertions(+), 17 deletions(-)
diff --git a/src/cron/scheduler.rs b/src/cron/scheduler.rs
index 09b288ea0..b6f4d690c 100644
--- a/src/cron/scheduler.rs
+++ b/src/cron/scheduler.rs
@@ -17,6 +17,7 @@ use tokio::time::{self, Duration};
const MIN_POLL_SECONDS: u64 = 5;
const SHELL_JOB_TIMEOUT_SECS: u64 = 120;
+const SCHEDULER_COMPONENT: &str = "scheduler";
pub async fn run(config: Config) -> Result<()> {
let poll_secs = config.reliability.scheduler_poll_secs.max(MIN_POLL_SECONDS);
@@ -27,23 +28,23 @@ pub async fn run(config: Config) -> Result<()> {
&config.workspace_dir,
));
- crate::health::mark_component_ok("scheduler");
+ crate::health::mark_component_ok(SCHEDULER_COMPONENT);
loop {
interval.tick().await;
// Keep scheduler liveness fresh even when there are no due jobs.
- crate::health::mark_component_ok("scheduler");
+ crate::health::mark_component_ok(SCHEDULER_COMPONENT);
let jobs = match due_jobs(&config, Utc::now()) {
Ok(jobs) => jobs,
Err(e) => {
- crate::health::mark_component_error("scheduler", e.to_string());
+ crate::health::mark_component_error(SCHEDULER_COMPONENT, e.to_string());
tracing::warn!("Scheduler query failed: {e}");
continue;
}
};
- process_due_jobs(&config, &security, jobs).await;
+ process_due_jobs(&config, &security, jobs, SCHEDULER_COMPONENT).await;
}
}
@@ -87,14 +88,28 @@ async fn execute_job_with_retry(
(false, last_output)
}
-async fn process_due_jobs(config: &Config, security: &Arc, jobs: Vec) {
+async fn process_due_jobs(
+ config: &Config,
+ security: &Arc,
+ jobs: Vec,
+ component: &str,
+) {
+ // Refresh scheduler health on every successful poll cycle, including idle cycles.
+ crate::health::mark_component_ok(component);
+
let max_concurrent = config.scheduler.max_concurrent.max(1);
- let mut in_flight = stream::iter(jobs.into_iter().map(|job| {
- let config = config.clone();
- let security = Arc::clone(security);
- async move { execute_and_persist_job(&config, security.as_ref(), &job).await }
- }))
- .buffer_unordered(max_concurrent);
+ let mut in_flight =
+ stream::iter(
+ jobs.into_iter().map(|job| {
+ let config = config.clone();
+ let security = Arc::clone(security);
+ let component = component.to_owned();
+ async move {
+ execute_and_persist_job(&config, security.as_ref(), &job, &component).await
+ }
+ }),
+ )
+ .buffer_unordered(max_concurrent);
while let Some((job_id, success)) = in_flight.next().await {
if !success {
@@ -107,8 +122,9 @@ async fn execute_and_persist_job(
config: &Config,
security: &SecurityPolicy,
job: &CronJob,
+ component: &str,
) -> (String, bool) {
- crate::health::mark_component_ok("scheduler");
+ crate::health::mark_component_ok(component);
warn_if_high_frequency_agent_job(job);
let started_at = Utc::now();
@@ -539,6 +555,10 @@ mod tests {
}
}
+ fn unique_component(prefix: &str) -> String {
+ format!("{prefix}-{}", uuid::Uuid::new_v4())
+ }
+
#[tokio::test]
async fn run_job_command_success() {
let tmp = TempDir::new().unwrap();
@@ -720,7 +740,27 @@ mod tests {
}
#[tokio::test]
- async fn process_due_jobs_failure_does_not_mark_scheduler_unhealthy() {
+ async fn process_due_jobs_marks_component_ok_even_when_idle() {
+ let tmp = TempDir::new().unwrap();
+ let config = test_config(&tmp).await;
+ let security = Arc::new(SecurityPolicy::from_config(
+ &config.autonomy,
+ &config.workspace_dir,
+ ));
+ let component = unique_component("scheduler-idle");
+
+ crate::health::mark_component_error(&component, "pre-existing error");
+ process_due_jobs(&config, &security, Vec::new(), &component).await;
+
+ let snapshot = crate::health::snapshot_json();
+ let entry = &snapshot["components"][component.as_str()];
+ assert_eq!(entry["status"], "ok");
+ assert!(entry["last_ok"].as_str().is_some());
+ assert!(entry["last_error"].is_null());
+ }
+
+ #[tokio::test]
+ async fn process_due_jobs_failure_does_not_mark_component_unhealthy() {
let tmp = TempDir::new().unwrap();
let config = test_config(&tmp).await;
let job = test_job("ls definitely_missing_file_for_scheduler_component_health_test");
@@ -728,13 +768,14 @@ mod tests {
&config.autonomy,
&config.workspace_dir,
));
+ let component = unique_component("scheduler-fail");
- crate::health::mark_component_ok("scheduler");
- process_due_jobs(&config, &security, vec![job]).await;
+ crate::health::mark_component_ok(&component);
+ process_due_jobs(&config, &security, vec![job], &component).await;
let snapshot = crate::health::snapshot_json();
- let scheduler = &snapshot["components"]["scheduler"];
- assert_eq!(scheduler["status"], "ok");
+ let entry = &snapshot["components"][component.as_str()];
+ assert_eq!(entry["status"], "ok");
}
#[tokio::test]
From 2d910e77a76ca524ed6b658ade146df6967637cd Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 21:59:51 +0800
Subject: [PATCH 046/116] fix(security): enforce schedule cron and policy gates
---
docs/commands-reference.md | 5 ++
src/tools/schedule.rs | 147 ++++++++++++++++++++++++++++++++++++-
2 files changed, 150 insertions(+), 2 deletions(-)
diff --git a/docs/commands-reference.md b/docs/commands-reference.md
index 40ed488bb..e81a2a947 100644
--- a/docs/commands-reference.md
+++ b/docs/commands-reference.md
@@ -69,6 +69,11 @@ Last verified: **February 20, 2026**.
- `zeroclaw cron pause `
- `zeroclaw cron resume `
+Notes:
+
+- Mutating schedule/cron actions require `cron.enabled = true`.
+- Shell command payloads for schedule creation (`create` / `add` / `once`) are validated by security command policy before job persistence.
+
### `models`
- `zeroclaw models refresh`
diff --git a/src/tools/schedule.rs b/src/tools/schedule.rs
index fcf46fe0b..b7cabac8f 100644
--- a/src/tools/schedule.rs
+++ b/src/tools/schedule.rs
@@ -55,6 +55,11 @@ impl Tool for ScheduleTool {
"type": "string",
"description": "Shell command to execute. Required for create/add/once."
},
+ "approved": {
+ "type": "boolean",
+ "description": "Set true to explicitly approve medium/high-risk shell commands in supervised mode",
+ "default": false
+ },
"id": {
"type": "string",
"description": "Task ID. Required for get/cancel/remove/pause/resume."
@@ -83,7 +88,11 @@ impl Tool for ScheduleTool {
if let Some(blocked) = self.enforce_mutation_allowed(action) {
return Ok(blocked);
}
- self.handle_create_like(action, &args)
+ let approved = args
+ .get("approved")
+ .and_then(serde_json::Value::as_bool)
+ .unwrap_or(false);
+ self.handle_create_like(action, &args, approved)
}
"cancel" | "remove" => {
if let Some(blocked) = self.enforce_mutation_allowed(action) {
@@ -128,6 +137,16 @@ impl Tool for ScheduleTool {
impl ScheduleTool {
fn enforce_mutation_allowed(&self, action: &str) -> Option {
+ if !self.config.cron.enabled {
+ return Some(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some(format!(
+ "cron is disabled by config (cron.enabled=false); cannot perform '{action}'"
+ )),
+ });
+ }
+
if !self.security.can_act() {
return Some(ToolResult {
success: false,
@@ -219,13 +238,26 @@ impl ScheduleTool {
}
}
- fn handle_create_like(&self, action: &str, args: &serde_json::Value) -> Result {
+ fn handle_create_like(
+ &self,
+ action: &str,
+ args: &serde_json::Value,
+ approved: bool,
+ ) -> Result {
let command = args
.get("command")
.and_then(|value| value.as_str())
.filter(|value| !value.trim().is_empty())
.ok_or_else(|| anyhow::anyhow!("Missing or empty 'command' parameter"))?;
+ if let Err(reason) = self.security.validate_command_execution(command, approved) {
+ return Ok(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some(reason),
+ });
+ }
+
let expression = args.get("expression").and_then(|value| value.as_str());
let delay = args.get("delay").and_then(|value| value.as_str());
let run_at = args.get("run_at").and_then(|value| value.as_str());
@@ -525,4 +557,115 @@ mod tests {
assert!(!result.success);
assert!(result.error.as_deref().unwrap().contains("Unknown action"));
}
+
+ #[tokio::test]
+ async fn mutating_actions_fail_when_cron_disabled() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = Config {
+ workspace_dir: tmp.path().join("workspace"),
+ config_path: tmp.path().join("config.toml"),
+ ..Config::default()
+ };
+ config.cron.enabled = false;
+ std::fs::create_dir_all(&config.workspace_dir).unwrap();
+ let security = Arc::new(SecurityPolicy::from_config(
+ &config.autonomy,
+ &config.workspace_dir,
+ ));
+ let tool = ScheduleTool::new(security, config);
+
+ let create = tool
+ .execute(json!({
+ "action": "create",
+ "expression": "*/5 * * * *",
+ "command": "echo hello"
+ }))
+ .await
+ .unwrap();
+
+ assert!(!create.success);
+ assert!(create
+ .error
+ .as_deref()
+ .unwrap_or_default()
+ .contains("cron is disabled"));
+ }
+
+ #[tokio::test]
+ async fn create_blocks_disallowed_command() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = Config {
+ workspace_dir: tmp.path().join("workspace"),
+ config_path: tmp.path().join("config.toml"),
+ ..Config::default()
+ };
+ config.autonomy.level = AutonomyLevel::Supervised;
+ config.autonomy.allowed_commands = vec!["echo".into()];
+ std::fs::create_dir_all(&config.workspace_dir).unwrap();
+ let security = Arc::new(SecurityPolicy::from_config(
+ &config.autonomy,
+ &config.workspace_dir,
+ ));
+ let tool = ScheduleTool::new(security, config);
+
+ let result = tool
+ .execute(json!({
+ "action": "create",
+ "expression": "*/5 * * * *",
+ "command": "curl https://example.com"
+ }))
+ .await
+ .unwrap();
+
+ assert!(!result.success);
+ assert!(result
+ .error
+ .as_deref()
+ .unwrap_or_default()
+ .contains("not allowed"));
+ }
+
+ #[tokio::test]
+ async fn medium_risk_create_requires_approval() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = Config {
+ workspace_dir: tmp.path().join("workspace"),
+ config_path: tmp.path().join("config.toml"),
+ ..Config::default()
+ };
+ config.autonomy.level = AutonomyLevel::Supervised;
+ config.autonomy.allowed_commands = vec!["touch".into()];
+ std::fs::create_dir_all(&config.workspace_dir).unwrap();
+ let security = Arc::new(SecurityPolicy::from_config(
+ &config.autonomy,
+ &config.workspace_dir,
+ ));
+ let tool = ScheduleTool::new(security, config);
+
+ let denied = tool
+ .execute(json!({
+ "action": "create",
+ "expression": "*/5 * * * *",
+ "command": "touch schedule-policy-test"
+ }))
+ .await
+ .unwrap();
+ assert!(!denied.success);
+ assert!(denied
+ .error
+ .as_deref()
+ .unwrap_or_default()
+ .contains("explicit approval"));
+
+ let approved = tool
+ .execute(json!({
+ "action": "create",
+ "expression": "*/5 * * * *",
+ "command": "touch schedule-policy-test",
+ "approved": true
+ }))
+ .await
+ .unwrap();
+ assert!(approved.success, "{:?}", approved.error);
+ }
}
From 90a565ac5aadf24f528f8287cd1272fd1e561a5e Mon Sep 17 00:00:00 2001
From: fettpl <38704082+fettpl@users.noreply.github.com>
Date: Tue, 17 Feb 2026 20:55:21 +0100
Subject: [PATCH 047/116] fix(security): enforce cron tool policy gates
---
src/tools/cron_add.rs | 121 +++++++++++++++++++++++++++++++++++++--
src/tools/cron_remove.rs | 71 +++++++++++++++++++++--
src/tools/cron_run.rs | 116 +++++++++++++++++++++++++++++++++++--
src/tools/cron_update.rs | 115 +++++++++++++++++++++++++++++++++++--
src/tools/mod.rs | 4 +-
5 files changed, 405 insertions(+), 22 deletions(-)
diff --git a/src/tools/cron_add.rs b/src/tools/cron_add.rs
index a0847b56d..60a4da729 100644
--- a/src/tools/cron_add.rs
+++ b/src/tools/cron_add.rs
@@ -15,6 +15,36 @@ impl CronAddTool {
pub fn new(config: Arc, security: Arc) -> Self {
Self { config, security }
}
+
+ fn enforce_mutation_allowed(&self, action: &str) -> Option {
+ if !self.security.can_act() {
+ return Some(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some(format!(
+ "Security policy: read-only mode, cannot perform '{action}'"
+ )),
+ });
+ }
+
+ if self.security.is_rate_limited() {
+ return Some(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some("Rate limit exceeded: too many actions in the last hour".to_string()),
+ });
+ }
+
+ if !self.security.record_action() {
+ return Some(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some("Rate limit exceeded: action budget exhausted".to_string()),
+ });
+ }
+
+ None
+ }
}
#[async_trait]
@@ -42,7 +72,12 @@ impl Tool for CronAddTool {
"session_target": { "type": "string", "enum": ["isolated", "main"] },
"model": { "type": "string" },
"delivery": { "type": "object" },
- "delete_after_run": { "type": "boolean" }
+ "delete_after_run": { "type": "boolean" },
+ "approved": {
+ "type": "boolean",
+ "description": "Set true to explicitly approve medium/high-risk shell commands in supervised mode",
+ "default": false
+ }
},
"required": ["schedule"]
})
@@ -106,6 +141,10 @@ impl Tool for CronAddTool {
.get("delete_after_run")
.and_then(serde_json::Value::as_bool)
.unwrap_or(default_delete_after_run);
+ let approved = args
+ .get("approved")
+ .and_then(serde_json::Value::as_bool)
+ .unwrap_or(false);
let result = match job_type {
JobType::Shell => {
@@ -120,14 +159,18 @@ impl Tool for CronAddTool {
}
};
- if !self.security.is_command_allowed(command) {
+ if let Err(reason) = self.security.validate_command_execution(command, approved) {
return Ok(ToolResult {
success: false,
output: String::new(),
- error: Some(format!("Command blocked by security policy: {command}")),
+ error: Some(reason),
});
}
+ if let Some(blocked) = self.enforce_mutation_allowed("cron_add") {
+ return Ok(blocked);
+ }
+
cron::add_shell_job(&self.config, name, schedule, command)
}
JobType::Agent => {
@@ -175,6 +218,10 @@ impl Tool for CronAddTool {
None => None,
};
+ if let Some(blocked) = self.enforce_mutation_allowed("cron_add") {
+ return Ok(blocked);
+ }
+
cron::add_agent_job(
&self.config,
name,
@@ -280,10 +327,74 @@ mod tests {
.unwrap();
assert!(!result.success);
- assert!(result
+ assert!(result.error.unwrap_or_default().contains("not allowed"));
+ }
+
+ #[tokio::test]
+ async fn blocks_mutation_in_read_only_mode() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = Config {
+ workspace_dir: tmp.path().join("workspace"),
+ config_path: tmp.path().join("config.toml"),
+ ..Config::default()
+ };
+ config.autonomy.level = AutonomyLevel::ReadOnly;
+ std::fs::create_dir_all(&config.workspace_dir).unwrap();
+ let cfg = Arc::new(config);
+ let tool = CronAddTool::new(cfg.clone(), test_security(&cfg));
+
+ let result = tool
+ .execute(json!({
+ "schedule": { "kind": "cron", "expr": "*/5 * * * *" },
+ "job_type": "shell",
+ "command": "echo ok"
+ }))
+ .await
+ .unwrap();
+
+ assert!(!result.success);
+ let error = result.error.unwrap_or_default();
+ assert!(error.contains("read-only") || error.contains("not allowed"));
+ }
+
+ #[tokio::test]
+ async fn medium_risk_shell_command_requires_approval() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = Config {
+ workspace_dir: tmp.path().join("workspace"),
+ config_path: tmp.path().join("config.toml"),
+ ..Config::default()
+ };
+ config.autonomy.allowed_commands = vec!["touch".into()];
+ config.autonomy.level = AutonomyLevel::Supervised;
+ std::fs::create_dir_all(&config.workspace_dir).unwrap();
+ let cfg = Arc::new(config);
+ let tool = CronAddTool::new(cfg.clone(), test_security(&cfg));
+
+ let denied = tool
+ .execute(json!({
+ "schedule": { "kind": "cron", "expr": "*/5 * * * *" },
+ "job_type": "shell",
+ "command": "touch cron-approval-test"
+ }))
+ .await
+ .unwrap();
+ assert!(!denied.success);
+ assert!(denied
.error
.unwrap_or_default()
- .contains("blocked by security policy"));
+ .contains("explicit approval"));
+
+ let approved = tool
+ .execute(json!({
+ "schedule": { "kind": "cron", "expr": "*/5 * * * *" },
+ "job_type": "shell",
+ "command": "touch cron-approval-test",
+ "approved": true
+ }))
+ .await
+ .unwrap();
+ assert!(approved.success, "{:?}", approved.error);
}
#[tokio::test]
diff --git a/src/tools/cron_remove.rs b/src/tools/cron_remove.rs
index 52492125f..df8a52cbc 100644
--- a/src/tools/cron_remove.rs
+++ b/src/tools/cron_remove.rs
@@ -1,17 +1,49 @@
use super::traits::{Tool, ToolResult};
use crate::config::Config;
use crate::cron;
+use crate::security::SecurityPolicy;
use async_trait::async_trait;
use serde_json::json;
use std::sync::Arc;
pub struct CronRemoveTool {
config: Arc,
+ security: Arc,
}
impl CronRemoveTool {
- pub fn new(config: Arc) -> Self {
- Self { config }
+ pub fn new(config: Arc, security: Arc) -> Self {
+ Self { config, security }
+ }
+
+ fn enforce_mutation_allowed(&self, action: &str) -> Option {
+ if !self.security.can_act() {
+ return Some(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some(format!(
+ "Security policy: read-only mode, cannot perform '{action}'"
+ )),
+ });
+ }
+
+ if self.security.is_rate_limited() {
+ return Some(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some("Rate limit exceeded: too many actions in the last hour".to_string()),
+ });
+ }
+
+ if !self.security.record_action() {
+ return Some(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some("Rate limit exceeded: action budget exhausted".to_string()),
+ });
+ }
+
+ None
}
}
@@ -55,6 +87,10 @@ impl Tool for CronRemoveTool {
}
};
+ if let Some(blocked) = self.enforce_mutation_allowed("cron_remove") {
+ return Ok(blocked);
+ }
+
match cron::remove_job(&self.config, job_id) {
Ok(()) => Ok(ToolResult {
success: true,
@@ -74,6 +110,7 @@ impl Tool for CronRemoveTool {
mod tests {
use super::*;
use crate::config::Config;
+ use crate::security::AutonomyLevel;
use tempfile::TempDir;
async fn test_config(tmp: &TempDir) -> Arc {
@@ -88,12 +125,19 @@ mod tests {
Arc::new(config)
}
+ fn test_security(cfg: &Config) -> Arc {
+ Arc::new(SecurityPolicy::from_config(
+ &cfg.autonomy,
+ &cfg.workspace_dir,
+ ))
+ }
+
#[tokio::test]
async fn removes_existing_job() {
let tmp = TempDir::new().unwrap();
let cfg = test_config(&tmp).await;
let job = cron::add_job(&cfg, "*/5 * * * *", "echo ok").unwrap();
- let tool = CronRemoveTool::new(cfg.clone());
+ let tool = CronRemoveTool::new(cfg.clone(), test_security(&cfg));
let result = tool.execute(json!({"job_id": job.id})).await.unwrap();
assert!(result.success);
@@ -104,7 +148,7 @@ mod tests {
async fn errors_when_job_id_missing() {
let tmp = TempDir::new().unwrap();
let cfg = test_config(&tmp).await;
- let tool = CronRemoveTool::new(cfg);
+ let tool = CronRemoveTool::new(cfg.clone(), test_security(&cfg));
let result = tool.execute(json!({})).await.unwrap();
assert!(!result.success);
@@ -113,4 +157,23 @@ mod tests {
.unwrap_or_default()
.contains("Missing 'job_id'"));
}
+
+ #[tokio::test]
+ async fn blocks_remove_in_read_only_mode() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = Config {
+ workspace_dir: tmp.path().join("workspace"),
+ config_path: tmp.path().join("config.toml"),
+ ..Config::default()
+ };
+ config.autonomy.level = AutonomyLevel::ReadOnly;
+ std::fs::create_dir_all(&config.workspace_dir).unwrap();
+ let cfg = Arc::new(config);
+ let job = cron::add_job(&cfg, "*/5 * * * *", "echo ok").unwrap();
+ let tool = CronRemoveTool::new(cfg.clone(), test_security(&cfg));
+
+ let result = tool.execute(json!({"job_id": job.id})).await.unwrap();
+ assert!(!result.success);
+ assert!(result.error.unwrap_or_default().contains("read-only"));
+ }
}
diff --git a/src/tools/cron_run.rs b/src/tools/cron_run.rs
index ad77344ec..19bf5adcc 100644
--- a/src/tools/cron_run.rs
+++ b/src/tools/cron_run.rs
@@ -1,6 +1,7 @@
use super::traits::{Tool, ToolResult};
use crate::config::Config;
-use crate::cron;
+use crate::cron::{self, JobType};
+use crate::security::SecurityPolicy;
use async_trait::async_trait;
use chrono::Utc;
use serde_json::json;
@@ -8,11 +9,12 @@ use std::sync::Arc;
pub struct CronRunTool {
config: Arc,
+ security: Arc,
}
impl CronRunTool {
- pub fn new(config: Arc) -> Self {
- Self { config }
+ pub fn new(config: Arc, security: Arc) -> Self {
+ Self { config, security }
}
}
@@ -30,7 +32,12 @@ impl Tool for CronRunTool {
json!({
"type": "object",
"properties": {
- "job_id": { "type": "string" }
+ "job_id": { "type": "string" },
+ "approved": {
+ "type": "boolean",
+ "description": "Set true to explicitly approve medium/high-risk shell commands in supervised mode",
+ "default": false
+ }
},
"required": ["job_id"]
})
@@ -55,6 +62,26 @@ impl Tool for CronRunTool {
});
}
};
+ let approved = args
+ .get("approved")
+ .and_then(serde_json::Value::as_bool)
+ .unwrap_or(false);
+
+ if !self.security.can_act() {
+ return Ok(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some("Security policy: read-only mode, cannot perform 'cron_run'".into()),
+ });
+ }
+
+ if self.security.is_rate_limited() {
+ return Ok(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some("Rate limit exceeded: too many actions in the last hour".into()),
+ });
+ }
let job = match cron::get_job(&self.config, job_id) {
Ok(job) => job,
@@ -67,6 +94,27 @@ impl Tool for CronRunTool {
}
};
+ if matches!(job.job_type, JobType::Shell) {
+ if let Err(reason) = self
+ .security
+ .validate_command_execution(&job.command, approved)
+ {
+ return Ok(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some(reason),
+ });
+ }
+ }
+
+ if !self.security.record_action() {
+ return Ok(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some("Rate limit exceeded: action budget exhausted".into()),
+ });
+ }
+
let started_at = Utc::now();
let (success, output) = cron::scheduler::execute_job_now(&self.config, &job).await;
let finished_at = Utc::now();
@@ -105,6 +153,7 @@ impl Tool for CronRunTool {
mod tests {
use super::*;
use crate::config::Config;
+ use crate::security::AutonomyLevel;
use tempfile::TempDir;
async fn test_config(tmp: &TempDir) -> Arc {
@@ -119,12 +168,19 @@ mod tests {
Arc::new(config)
}
+ fn test_security(cfg: &Config) -> Arc {
+ Arc::new(SecurityPolicy::from_config(
+ &cfg.autonomy,
+ &cfg.workspace_dir,
+ ))
+ }
+
#[tokio::test]
async fn force_runs_job_and_records_history() {
let tmp = TempDir::new().unwrap();
let cfg = test_config(&tmp).await;
let job = cron::add_job(&cfg, "*/5 * * * *", "echo run-now").unwrap();
- let tool = CronRunTool::new(cfg.clone());
+ let tool = CronRunTool::new(cfg.clone(), test_security(&cfg));
let result = tool.execute(json!({ "job_id": job.id })).await.unwrap();
assert!(result.success, "{:?}", result.error);
@@ -137,7 +193,7 @@ mod tests {
async fn errors_for_missing_job() {
let tmp = TempDir::new().unwrap();
let cfg = test_config(&tmp).await;
- let tool = CronRunTool::new(cfg);
+ let tool = CronRunTool::new(cfg.clone(), test_security(&cfg));
let result = tool
.execute(json!({ "job_id": "missing-job-id" }))
@@ -146,4 +202,52 @@ mod tests {
assert!(!result.success);
assert!(result.error.unwrap_or_default().contains("not found"));
}
+
+ #[tokio::test]
+ async fn blocks_run_in_read_only_mode() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = Config {
+ workspace_dir: tmp.path().join("workspace"),
+ config_path: tmp.path().join("config.toml"),
+ ..Config::default()
+ };
+ config.autonomy.level = AutonomyLevel::ReadOnly;
+ std::fs::create_dir_all(&config.workspace_dir).unwrap();
+ let cfg = Arc::new(config);
+ let job = cron::add_job(&cfg, "*/5 * * * *", "echo run-now").unwrap();
+ let tool = CronRunTool::new(cfg.clone(), test_security(&cfg));
+
+ let result = tool.execute(json!({ "job_id": job.id })).await.unwrap();
+ assert!(!result.success);
+ assert!(result.error.unwrap_or_default().contains("read-only"));
+ }
+
+ #[tokio::test]
+ async fn shell_run_requires_approval_for_medium_risk() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = Config {
+ workspace_dir: tmp.path().join("workspace"),
+ config_path: tmp.path().join("config.toml"),
+ ..Config::default()
+ };
+ config.autonomy.level = AutonomyLevel::Supervised;
+ config.autonomy.allowed_commands = vec!["touch".into()];
+ std::fs::create_dir_all(&config.workspace_dir).unwrap();
+ let cfg = Arc::new(config);
+ let job = cron::add_job(&cfg, "*/5 * * * *", "touch cron-run-approval").unwrap();
+ let tool = CronRunTool::new(cfg.clone(), test_security(&cfg));
+
+ let denied = tool.execute(json!({ "job_id": job.id })).await.unwrap();
+ assert!(!denied.success);
+ assert!(denied
+ .error
+ .unwrap_or_default()
+ .contains("explicit approval"));
+
+ let approved = tool
+ .execute(json!({ "job_id": job.id, "approved": true }))
+ .await
+ .unwrap();
+ assert!(approved.success, "{:?}", approved.error);
+ }
}
diff --git a/src/tools/cron_update.rs b/src/tools/cron_update.rs
index d8df72d0f..986948317 100644
--- a/src/tools/cron_update.rs
+++ b/src/tools/cron_update.rs
@@ -15,6 +15,36 @@ impl CronUpdateTool {
pub fn new(config: Arc, security: Arc) -> Self {
Self { config, security }
}
+
+ fn enforce_mutation_allowed(&self, action: &str) -> Option {
+ if !self.security.can_act() {
+ return Some(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some(format!(
+ "Security policy: read-only mode, cannot perform '{action}'"
+ )),
+ });
+ }
+
+ if self.security.is_rate_limited() {
+ return Some(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some("Rate limit exceeded: too many actions in the last hour".to_string()),
+ });
+ }
+
+ if !self.security.record_action() {
+ return Some(ToolResult {
+ success: false,
+ output: String::new(),
+ error: Some("Rate limit exceeded: action budget exhausted".to_string()),
+ });
+ }
+
+ None
+ }
}
#[async_trait]
@@ -32,7 +62,12 @@ impl Tool for CronUpdateTool {
"type": "object",
"properties": {
"job_id": { "type": "string" },
- "patch": { "type": "object" }
+ "patch": { "type": "object" },
+ "approved": {
+ "type": "boolean",
+ "description": "Set true to explicitly approve medium/high-risk shell commands in supervised mode",
+ "default": false
+ }
},
"required": ["job_id", "patch"]
})
@@ -79,17 +114,25 @@ impl Tool for CronUpdateTool {
});
}
};
+ let approved = args
+ .get("approved")
+ .and_then(serde_json::Value::as_bool)
+ .unwrap_or(false);
if let Some(command) = &patch.command {
- if !self.security.is_command_allowed(command) {
+ if let Err(reason) = self.security.validate_command_execution(command, approved) {
return Ok(ToolResult {
success: false,
output: String::new(),
- error: Some(format!("Command blocked by security policy: {command}")),
+ error: Some(reason),
});
}
}
+ if let Some(blocked) = self.enforce_mutation_allowed("cron_update") {
+ return Ok(blocked);
+ }
+
match cron::update_job(&self.config, job_id, patch) {
Ok(job) => Ok(ToolResult {
success: true,
@@ -109,6 +152,7 @@ impl Tool for CronUpdateTool {
mod tests {
use super::*;
use crate::config::Config;
+ use crate::security::AutonomyLevel;
use tempfile::TempDir;
async fn test_config(tmp: &TempDir) -> Arc {
@@ -173,9 +217,70 @@ mod tests {
.await
.unwrap();
assert!(!result.success);
- assert!(result
+ assert!(result.error.unwrap_or_default().contains("not allowed"));
+ }
+
+ #[tokio::test]
+ async fn blocks_mutation_in_read_only_mode() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = Config {
+ workspace_dir: tmp.path().join("workspace"),
+ config_path: tmp.path().join("config.toml"),
+ ..Config::default()
+ };
+ config.autonomy.level = AutonomyLevel::ReadOnly;
+ std::fs::create_dir_all(&config.workspace_dir).unwrap();
+ let cfg = Arc::new(config);
+ let job = cron::add_job(&cfg, "*/5 * * * *", "echo ok").unwrap();
+ let tool = CronUpdateTool::new(cfg.clone(), test_security(&cfg));
+
+ let result = tool
+ .execute(json!({
+ "job_id": job.id,
+ "patch": { "enabled": false }
+ }))
+ .await
+ .unwrap();
+ assert!(!result.success);
+ assert!(result.error.unwrap_or_default().contains("read-only"));
+ }
+
+ #[tokio::test]
+ async fn medium_risk_shell_update_requires_approval() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = Config {
+ workspace_dir: tmp.path().join("workspace"),
+ config_path: tmp.path().join("config.toml"),
+ ..Config::default()
+ };
+ config.autonomy.level = AutonomyLevel::Supervised;
+ config.autonomy.allowed_commands = vec!["echo".into(), "touch".into()];
+ std::fs::create_dir_all(&config.workspace_dir).unwrap();
+ let cfg = Arc::new(config);
+ let job = cron::add_job(&cfg, "*/5 * * * *", "echo ok").unwrap();
+ let tool = CronUpdateTool::new(cfg.clone(), test_security(&cfg));
+
+ let denied = tool
+ .execute(json!({
+ "job_id": job.id,
+ "patch": { "command": "touch cron-update-approval-test" }
+ }))
+ .await
+ .unwrap();
+ assert!(!denied.success);
+ assert!(denied
.error
.unwrap_or_default()
- .contains("blocked by security policy"));
+ .contains("explicit approval"));
+
+ let approved = tool
+ .execute(json!({
+ "job_id": job.id,
+ "patch": { "command": "touch cron-update-approval-test" },
+ "approved": true
+ }))
+ .await
+ .unwrap();
+ assert!(approved.success, "{:?}", approved.error);
}
}
diff --git a/src/tools/mod.rs b/src/tools/mod.rs
index 50fec0ca2..82929b11d 100644
--- a/src/tools/mod.rs
+++ b/src/tools/mod.rs
@@ -190,9 +190,9 @@ pub fn all_tools_with_runtime(
Arc::new(FileWriteTool::new(security.clone())),
Arc::new(CronAddTool::new(config.clone(), security.clone())),
Arc::new(CronListTool::new(config.clone())),
- Arc::new(CronRemoveTool::new(config.clone())),
+ Arc::new(CronRemoveTool::new(config.clone(), security.clone())),
Arc::new(CronUpdateTool::new(config.clone(), security.clone())),
- Arc::new(CronRunTool::new(config.clone())),
+ Arc::new(CronRunTool::new(config.clone(), security.clone())),
Arc::new(CronRunsTool::new(config.clone())),
Arc::new(MemoryStoreTool::new(memory.clone(), security.clone())),
Arc::new(MemoryRecallTool::new(memory.clone())),
From 572cde695a3a223999be4ec10860314952703c96 Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 23:12:31 +0800
Subject: [PATCH 048/116] feat(channel): add native nextcloud talk webhook
integration
---
docs/SUMMARY.md | 1 +
docs/channels-reference.md | 25 +-
docs/config-reference.md | 18 ++
docs/docs-inventory.md | 1 +
docs/network-deployment.md | 4 +-
docs/nextcloud-talk-setup.md | 78 ++++++
docs/reference/README.md | 1 +
src/channels/mod.rs | 25 ++
src/channels/nextcloud_talk.rs | 485 +++++++++++++++++++++++++++++++++
src/config/mod.rs | 18 +-
src/config/schema.rs | 55 ++++
src/daemon/mod.rs | 14 +
src/doctor/mod.rs | 1 +
src/gateway/mod.rs | 275 ++++++++++++++++++-
src/main.rs | 1 +
15 files changed, 992 insertions(+), 10 deletions(-)
create mode 100644 docs/nextcloud-talk-setup.md
create mode 100644 src/channels/nextcloud_talk.rs
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 6bd2e9270..31f84b190 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -28,6 +28,7 @@ Last refreshed: **February 18, 2026**.
- [commands-reference.md](commands-reference.md)
- [providers-reference.md](providers-reference.md)
- [channels-reference.md](channels-reference.md)
+- [nextcloud-talk-setup.md](nextcloud-talk-setup.md)
- [config-reference.md](config-reference.md)
- [custom-providers.md](custom-providers.md)
- [zai-glm-setup.md](zai-glm-setup.md)
diff --git a/docs/channels-reference.md b/docs/channels-reference.md
index 7108ab2b1..b441423de 100644
--- a/docs/channels-reference.md
+++ b/docs/channels-reference.md
@@ -10,6 +10,7 @@ For encrypted Matrix rooms, also read the dedicated runbook:
- Need a full config reference by channel: jump to [Per-Channel Config Examples](#4-per-channel-config-examples).
- Need a no-response diagnosis flow: jump to [Troubleshooting Checklist](#6-troubleshooting-checklist).
- Need Matrix encrypted-room help: use [Matrix E2EE Guide](./matrix-e2ee-guide.md).
+- Need Nextcloud Talk bot setup: use [Nextcloud Talk Setup](./nextcloud-talk-setup.md).
- Need deployment/network assumptions (polling vs webhook): use [Network Deployment](./network-deployment.md).
## FAQ: Matrix setup passes but no reply
@@ -102,6 +103,7 @@ If `[channels_config.matrix]` is present but the binary was built without `chann
| Matrix | sync API (supports E2EE) | No |
| Signal | signal-cli HTTP bridge | No (local bridge endpoint) |
| WhatsApp | webhook (Cloud API) or websocket (Web mode) | Cloud API: Yes (public HTTPS callback), Web mode: No |
+| Nextcloud Talk | webhook (`/nextcloud-talk`) | Yes (public HTTPS callback) |
| Webhook | gateway endpoint (`/webhook`) | Usually yes |
| Email | IMAP polling + SMTP send | No |
| IRC | IRC socket | No |
@@ -122,7 +124,7 @@ For channels with inbound sender allowlists:
Field names differ by channel:
-- `allowed_users` (Telegram/Discord/Slack/Mattermost/Matrix/IRC/Lark/DingTalk/QQ)
+- `allowed_users` (Telegram/Discord/Slack/Mattermost/Matrix/IRC/Lark/DingTalk/QQ/Nextcloud Talk)
- `allowed_from` (Signal)
- `allowed_numbers` (WhatsApp)
- `allowed_senders` (Email)
@@ -336,7 +338,25 @@ app_secret = "qq-app-secret"
allowed_users = ["*"]
```
-### 4.14 iMessage
+### 4.14 Nextcloud Talk
+
+```toml
+[channels_config.nextcloud_talk]
+base_url = "https://cloud.example.com"
+app_token = "nextcloud-talk-app-token"
+webhook_secret = "optional-webhook-secret" # optional but recommended
+allowed_users = ["*"]
+```
+
+Notes:
+
+- Inbound webhook endpoint: `POST /nextcloud-talk`.
+- Signature verification uses `X-Nextcloud-Talk-Random` and `X-Nextcloud-Talk-Signature`.
+- If `webhook_secret` is set, invalid signatures are rejected with `401`.
+- `ZEROCLAW_NEXTCLOUD_TALK_WEBHOOK_SECRET` overrides config secret.
+- See [nextcloud-talk-setup.md](./nextcloud-talk-setup.md) for a full runbook.
+
+### 4.15 iMessage
```toml
[channels_config.imessage]
@@ -411,6 +431,7 @@ rg -n "Matrix|Telegram|Discord|Slack|Mattermost|Signal|WhatsApp|Email|IRC|Lark|D
| Lark / Feishu | `Lark: WS connected` / `Lark event callback server listening on` | `Lark WS: ignoring ... (not in allowed_users)` / `Lark: ignoring message from unauthorized user:` | `Lark: ping failed, reconnecting` / `Lark: heartbeat timeout, reconnecting` / `Lark: WS read error:` |
| DingTalk | `DingTalk: connected and listening for messages...` | `DingTalk: ignoring message from unauthorized user:` | `DingTalk WebSocket error:` / `DingTalk: message channel closed` |
| QQ | `QQ: connected and identified` | `QQ: ignoring C2C message from unauthorized user:` / `QQ: ignoring group message from unauthorized user:` | `QQ: received Reconnect (op 7)` / `QQ: received Invalid Session (op 9)` / `QQ: message channel closed` |
+| Nextcloud Talk (gateway) | `POST /nextcloud-talk — Nextcloud Talk bot webhook` | `Nextcloud Talk webhook signature verification failed` / `Nextcloud Talk: ignoring message from unauthorized actor:` | `Nextcloud Talk send failed:` / `LLM error for Nextcloud Talk message:` |
| iMessage | `iMessage channel listening (AppleScript bridge)...` | (contact allowlist enforced by `allowed_contacts`) | `iMessage poll error:` |
### 7.3 Runtime supervisor keywords
diff --git a/docs/config-reference.md b/docs/config-reference.md
index 7acfdb2d4..f1924c9bd 100644
--- a/docs/config-reference.md
+++ b/docs/config-reference.md
@@ -391,6 +391,7 @@ Examples:
- `[channels_config.telegram]`
- `[channels_config.discord]`
- `[channels_config.whatsapp]`
+- `[channels_config.nextcloud_talk]`
- `[channels_config.email]`
Notes:
@@ -435,6 +436,23 @@ Notes:
- WhatsApp Web requires build flag `whatsapp-web`.
- If both Cloud and Web fields are present, Cloud mode wins for backward compatibility.
+### `[channels_config.nextcloud_talk]`
+
+Native Nextcloud Talk bot integration (webhook receive + OCS send API).
+
+| Key | Required | Purpose |
+|---|---|---|
+| `base_url` | Yes | Nextcloud base URL (e.g. `https://cloud.example.com`) |
+| `app_token` | Yes | Bot app token used for OCS bearer auth |
+| `webhook_secret` | Optional | Enables webhook signature verification |
+| `allowed_users` | Recommended | Allowed Nextcloud actor IDs (`[]` = deny all, `"*"` = allow all) |
+
+Notes:
+
+- Webhook endpoint is `POST /nextcloud-talk`.
+- `ZEROCLAW_NEXTCLOUD_TALK_WEBHOOK_SECRET` overrides `webhook_secret` when set.
+- See [nextcloud-talk-setup.md](nextcloud-talk-setup.md) for setup and troubleshooting.
+
## `[hardware]`
Hardware wizard configuration for physical-world access (STM32, probe, serial).
diff --git a/docs/docs-inventory.md b/docs/docs-inventory.md
index f69bf9a81..875cafa3e 100644
--- a/docs/docs-inventory.md
+++ b/docs/docs-inventory.md
@@ -45,6 +45,7 @@ Last reviewed: **February 18, 2026**.
| `docs/commands-reference.md` | Current Reference | users/operators |
| `docs/providers-reference.md` | Current Reference | users/operators |
| `docs/channels-reference.md` | Current Reference | users/operators |
+| `docs/nextcloud-talk-setup.md` | Current Guide | operators |
| `docs/config-reference.md` | Current Reference | operators |
| `docs/custom-providers.md` | Current Integration Guide | integration developers |
| `docs/zai-glm-setup.md` | Current Provider Setup Guide | users/operators |
diff --git a/docs/network-deployment.md b/docs/network-deployment.md
index 28bd99ebe..f6a5f8cb4 100644
--- a/docs/network-deployment.md
+++ b/docs/network-deployment.md
@@ -11,7 +11,7 @@ This document covers deploying ZeroClaw on a Raspberry Pi or other host on your
| **Telegram polling** | No | ZeroClaw polls Telegram API; works from anywhere |
| **Matrix sync (including E2EE)** | No | ZeroClaw syncs via Matrix client API; no inbound webhook required |
| **Discord/Slack** | No | Same — outbound only |
-| **Gateway webhook** | Yes | POST /webhook, WhatsApp, etc. need a public URL |
+| **Gateway webhook** | Yes | POST /webhook, /whatsapp, /linq, /nextcloud-talk need a public URL |
| **Gateway pairing** | Yes | If you pair clients via the gateway |
**Key:** Telegram, Discord, and Slack use **long-polling** — ZeroClaw makes outbound requests. No port forwarding or public IP required.
@@ -156,7 +156,7 @@ you have a polling conflict. Stop extra instances and restart only one daemon.
---
-## 5. Webhook Channels (WhatsApp, Custom)
+## 5. Webhook Channels (WhatsApp, Nextcloud Talk, Custom)
Webhook-based channels need a **public URL** so Meta (WhatsApp) or your client can POST events.
diff --git a/docs/nextcloud-talk-setup.md b/docs/nextcloud-talk-setup.md
new file mode 100644
index 000000000..a2c445a6a
--- /dev/null
+++ b/docs/nextcloud-talk-setup.md
@@ -0,0 +1,78 @@
+# Nextcloud Talk Setup
+
+This guide covers native Nextcloud Talk integration for ZeroClaw.
+
+## 1. What this integration does
+
+- Receives inbound Talk bot webhook events via `POST /nextcloud-talk`.
+- Verifies webhook signatures (HMAC-SHA256) when a secret is configured.
+- Sends bot replies back to Talk rooms via Nextcloud OCS API.
+
+## 2. Configuration
+
+Add this section in `~/.zeroclaw/config.toml`:
+
+```toml
+[channels_config.nextcloud_talk]
+base_url = "https://cloud.example.com"
+app_token = "nextcloud-talk-app-token"
+webhook_secret = "optional-webhook-secret"
+allowed_users = ["*"]
+```
+
+Field reference:
+
+- `base_url`: Nextcloud base URL.
+- `app_token`: Bot app token used as `Authorization: Bearer ` for OCS send API.
+- `webhook_secret`: Shared secret for verifying `X-Nextcloud-Talk-Signature`.
+- `allowed_users`: Allowed Nextcloud actor IDs (`[]` denies all, `"*"` allows all).
+
+Environment override:
+
+- `ZEROCLAW_NEXTCLOUD_TALK_WEBHOOK_SECRET` overrides `webhook_secret` when set.
+
+## 3. Gateway endpoint
+
+Run the daemon or gateway and expose the webhook endpoint:
+
+```bash
+zeroclaw daemon
+# or
+zeroclaw gateway --host 127.0.0.1 --port 3000
+```
+
+Configure your Nextcloud Talk bot webhook URL to:
+
+- `https:///nextcloud-talk`
+
+## 4. Signature verification contract
+
+When `webhook_secret` is configured, ZeroClaw verifies:
+
+- header `X-Nextcloud-Talk-Random`
+- header `X-Nextcloud-Talk-Signature`
+
+Verification formula:
+
+- `hex(hmac_sha256(secret, random + raw_request_body))`
+
+If verification fails, the gateway returns `401 Unauthorized`.
+
+## 5. Message routing behavior
+
+- ZeroClaw ignores bot-originated webhook events (`actorType = bots`).
+- ZeroClaw ignores non-message/system events.
+- Reply routing uses the Talk room token from the webhook payload.
+
+## 6. Quick validation checklist
+
+1. Set `allowed_users = ["*"]` for first-time validation.
+2. Send a test message in the target Talk room.
+3. Confirm ZeroClaw receives and replies in the same room.
+4. Tighten `allowed_users` to explicit actor IDs.
+
+## 7. Troubleshooting
+
+- `404 Nextcloud Talk not configured`: missing `[channels_config.nextcloud_talk]`.
+- `401 Invalid signature`: mismatch in `webhook_secret`, random header, or raw-body signing.
+- No reply but webhook `200`: event filtered (bot/system/non-allowed user/non-message payload).
diff --git a/docs/reference/README.md b/docs/reference/README.md
index 3bab9bca7..aa215eb4d 100644
--- a/docs/reference/README.md
+++ b/docs/reference/README.md
@@ -13,6 +13,7 @@ Structured reference index for commands, providers, channels, config, and integr
- Custom provider endpoints: [../custom-providers.md](../custom-providers.md)
- Z.AI / GLM provider onboarding: [../zai-glm-setup.md](../zai-glm-setup.md)
+- Nextcloud Talk bot integration: [../nextcloud-talk-setup.md](../nextcloud-talk-setup.md)
- LangGraph-based integration patterns: [../langgraph-integration.md](../langgraph-integration.md)
## Usage
diff --git a/src/channels/mod.rs b/src/channels/mod.rs
index dd4771681..d54fb2690 100644
--- a/src/channels/mod.rs
+++ b/src/channels/mod.rs
@@ -25,6 +25,7 @@ pub mod linq;
#[cfg(feature = "channel-matrix")]
pub mod matrix;
pub mod mattermost;
+pub mod nextcloud_talk;
pub mod qq;
pub mod signal;
pub mod slack;
@@ -47,6 +48,7 @@ pub use linq::LinqChannel;
#[cfg(feature = "channel-matrix")]
pub use matrix::MatrixChannel;
pub use mattermost::MattermostChannel;
+pub use nextcloud_talk::NextcloudTalkChannel;
pub use qq::QQChannel;
pub use signal::SignalChannel;
pub use slack::SlackChannel;
@@ -1972,6 +1974,10 @@ pub async fn handle_command(command: crate::ChannelCommands, config: &Config) ->
("Signal", config.channels_config.signal.is_some()),
("WhatsApp", config.channels_config.whatsapp.is_some()),
("Linq", config.channels_config.linq.is_some()),
+ (
+ "Nextcloud Talk",
+ config.channels_config.nextcloud_talk.is_some(),
+ ),
("Email", config.channels_config.email.is_some()),
("IRC", config.channels_config.irc.is_some()),
("Lark", config.channels_config.lark.is_some()),
@@ -2171,6 +2177,17 @@ pub async fn doctor_channels(config: Config) -> Result<()> {
));
}
+ if let Some(ref nc) = config.channels_config.nextcloud_talk {
+ channels.push((
+ "Nextcloud Talk",
+ Arc::new(NextcloudTalkChannel::new(
+ nc.base_url.clone(),
+ nc.app_token.clone(),
+ nc.allowed_users.clone(),
+ )),
+ ));
+ }
+
if let Some(ref email_cfg) = config.channels_config.email {
channels.push(("Email", Arc::new(EmailChannel::new(email_cfg.clone()))));
}
@@ -2556,6 +2573,14 @@ pub async fn start_channels(config: Config) -> Result<()> {
)));
}
+ if let Some(ref nc) = config.channels_config.nextcloud_talk {
+ channels.push(Arc::new(NextcloudTalkChannel::new(
+ nc.base_url.clone(),
+ nc.app_token.clone(),
+ nc.allowed_users.clone(),
+ )));
+ }
+
if let Some(ref email_cfg) = config.channels_config.email {
channels.push(Arc::new(EmailChannel::new(email_cfg.clone())));
}
diff --git a/src/channels/nextcloud_talk.rs b/src/channels/nextcloud_talk.rs
new file mode 100644
index 000000000..574a5b6f1
--- /dev/null
+++ b/src/channels/nextcloud_talk.rs
@@ -0,0 +1,485 @@
+use super::traits::{Channel, ChannelMessage, SendMessage};
+use async_trait::async_trait;
+use hmac::{Hmac, Mac};
+use sha2::Sha256;
+use uuid::Uuid;
+
+/// Nextcloud Talk channel in webhook mode.
+///
+/// Incoming messages are received by the gateway endpoint `/nextcloud-talk`.
+/// Outbound replies are sent through Nextcloud Talk OCS API.
+pub struct NextcloudTalkChannel {
+ base_url: String,
+ app_token: String,
+ allowed_users: Vec,
+ client: reqwest::Client,
+}
+
+impl NextcloudTalkChannel {
+ pub fn new(base_url: String, app_token: String, allowed_users: Vec) -> Self {
+ Self {
+ base_url: base_url.trim_end_matches('/').to_string(),
+ app_token,
+ allowed_users,
+ client: reqwest::Client::new(),
+ }
+ }
+
+ fn is_user_allowed(&self, actor_id: &str) -> bool {
+ self.allowed_users.iter().any(|u| u == "*" || u == actor_id)
+ }
+
+ fn now_unix_secs() -> u64 {
+ std::time::SystemTime::now()
+ .duration_since(std::time::UNIX_EPOCH)
+ .unwrap_or_default()
+ .as_secs()
+ }
+
+ fn parse_timestamp_secs(value: Option<&serde_json::Value>) -> u64 {
+ let raw = match value {
+ Some(serde_json::Value::Number(num)) => num.as_u64(),
+ Some(serde_json::Value::String(s)) => s.trim().parse::().ok(),
+ _ => None,
+ }
+ .unwrap_or_else(Self::now_unix_secs);
+
+ // Some payloads use milliseconds.
+ if raw > 1_000_000_000_000 {
+ raw / 1000
+ } else {
+ raw
+ }
+ }
+
+ fn value_to_string(value: Option<&serde_json::Value>) -> Option {
+ match value {
+ Some(serde_json::Value::String(s)) => Some(s.clone()),
+ Some(serde_json::Value::Number(n)) => Some(n.to_string()),
+ _ => None,
+ }
+ }
+
+ /// Parse a Nextcloud Talk webhook payload into channel messages.
+ ///
+ /// Relevant payload fields:
+ /// - `type` (expects `message`)
+ /// - `object.token` (room token for reply routing)
+ /// - `message.actorType`, `message.actorId`, `message.message`, `message.timestamp`
+ pub fn parse_webhook_payload(&self, payload: &serde_json::Value) -> Vec {
+ let mut messages = Vec::new();
+
+ if let Some(event_type) = payload.get("type").and_then(|v| v.as_str()) {
+ if !event_type.eq_ignore_ascii_case("message") {
+ tracing::debug!("Nextcloud Talk: skipping non-message event: {event_type}");
+ return messages;
+ }
+ }
+
+ let Some(message_obj) = payload.get("message") else {
+ return messages;
+ };
+
+ let room_token = payload
+ .get("object")
+ .and_then(|obj| obj.get("token"))
+ .and_then(|v| v.as_str())
+ .or_else(|| message_obj.get("token").and_then(|v| v.as_str()))
+ .map(str::trim)
+ .filter(|token| !token.is_empty());
+
+ let Some(room_token) = room_token else {
+ tracing::warn!("Nextcloud Talk: missing room token in webhook payload");
+ return messages;
+ };
+
+ let actor_type = message_obj
+ .get("actorType")
+ .and_then(|v| v.as_str())
+ .or_else(|| payload.get("actorType").and_then(|v| v.as_str()))
+ .unwrap_or("");
+
+ // Ignore bot-originated messages to prevent feedback loops.
+ if actor_type.eq_ignore_ascii_case("bots") {
+ tracing::debug!("Nextcloud Talk: skipping bot-originated message");
+ return messages;
+ }
+
+ let actor_id = message_obj
+ .get("actorId")
+ .and_then(|v| v.as_str())
+ .or_else(|| payload.get("actorId").and_then(|v| v.as_str()))
+ .map(str::trim)
+ .filter(|id| !id.is_empty());
+
+ let Some(actor_id) = actor_id else {
+ tracing::warn!("Nextcloud Talk: missing actorId in webhook payload");
+ return messages;
+ };
+
+ if !self.is_user_allowed(actor_id) {
+ tracing::warn!(
+ "Nextcloud Talk: ignoring message from unauthorized actor: {actor_id}. \
+ Add to channels.nextcloud_talk.allowed_users in config.toml, \
+ or run `zeroclaw onboard --channels-only` to configure interactively."
+ );
+ return messages;
+ }
+
+ let message_type = message_obj
+ .get("messageType")
+ .and_then(|v| v.as_str())
+ .unwrap_or("comment");
+ if !message_type.eq_ignore_ascii_case("comment") {
+ tracing::debug!("Nextcloud Talk: skipping non-comment messageType: {message_type}");
+ return messages;
+ }
+
+ // Ignore pure system messages.
+ let has_system_message = message_obj
+ .get("systemMessage")
+ .and_then(|v| v.as_str())
+ .map(str::trim)
+ .is_some_and(|value| !value.is_empty());
+ if has_system_message {
+ tracing::debug!("Nextcloud Talk: skipping system message event");
+ return messages;
+ }
+
+ let content = message_obj
+ .get("message")
+ .and_then(|v| v.as_str())
+ .map(str::trim)
+ .filter(|content| !content.is_empty());
+
+ let Some(content) = content else {
+ return messages;
+ };
+
+ let message_id = Self::value_to_string(message_obj.get("id"))
+ .unwrap_or_else(|| Uuid::new_v4().to_string());
+ let timestamp = Self::parse_timestamp_secs(message_obj.get("timestamp"));
+
+ messages.push(ChannelMessage {
+ id: message_id,
+ reply_target: room_token.to_string(),
+ sender: actor_id.to_string(),
+ content: content.to_string(),
+ channel: "nextcloud_talk".to_string(),
+ timestamp,
+ thread_ts: None,
+ });
+
+ messages
+ }
+
+ async fn send_to_room(&self, room_token: &str, content: &str) -> anyhow::Result<()> {
+ let encoded_room = urlencoding::encode(room_token);
+ let url = format!(
+ "{}/ocs/v2.php/apps/spreed/api/v1/chat/{}?format=json",
+ self.base_url, encoded_room
+ );
+
+ let response = self
+ .client
+ .post(&url)
+ .bearer_auth(&self.app_token)
+ .header("OCS-APIRequest", "true")
+ .header("Accept", "application/json")
+ .json(&serde_json::json!({ "message": content }))
+ .send()
+ .await?;
+
+ if response.status().is_success() {
+ return Ok(());
+ }
+
+ let status = response.status();
+ let body = response.text().await.unwrap_or_default();
+ tracing::error!("Nextcloud Talk send failed: {status} — {body}");
+ anyhow::bail!("Nextcloud Talk API error: {status}");
+ }
+}
+
+#[async_trait]
+impl Channel for NextcloudTalkChannel {
+ fn name(&self) -> &str {
+ "nextcloud_talk"
+ }
+
+ async fn send(&self, message: &SendMessage) -> anyhow::Result<()> {
+ self.send_to_room(&message.recipient, &message.content)
+ .await
+ }
+
+ async fn listen(&self, _tx: tokio::sync::mpsc::Sender) -> anyhow::Result<()> {
+ tracing::info!(
+ "Nextcloud Talk channel active (webhook mode). \
+ Configure Nextcloud Talk bot webhook to POST to your gateway's /nextcloud-talk endpoint."
+ );
+
+ // Keep task alive; incoming events are handled by the gateway webhook handler.
+ loop {
+ tokio::time::sleep(std::time::Duration::from_secs(3600)).await;
+ }
+ }
+
+ async fn health_check(&self) -> bool {
+ let url = format!("{}/status.php", self.base_url);
+
+ self.client
+ .get(&url)
+ .send()
+ .await
+ .map(|r| r.status().is_success())
+ .unwrap_or(false)
+ }
+}
+
+/// Verify Nextcloud Talk webhook signature.
+///
+/// Signature calculation (official Talk bot docs):
+/// `hex(hmac_sha256(secret, X-Nextcloud-Talk-Random + raw_body))`
+pub fn verify_nextcloud_talk_signature(
+ secret: &str,
+ random: &str,
+ body: &str,
+ signature: &str,
+) -> bool {
+ let random = random.trim();
+ if random.is_empty() {
+ tracing::warn!("Nextcloud Talk: missing X-Nextcloud-Talk-Random header");
+ return false;
+ }
+
+ let signature_hex = signature
+ .trim()
+ .strip_prefix("sha256=")
+ .unwrap_or(signature)
+ .trim();
+
+ let Ok(provided) = hex::decode(signature_hex) else {
+ tracing::warn!("Nextcloud Talk: invalid signature format");
+ return false;
+ };
+
+ let payload = format!("{random}{body}");
+ let Ok(mut mac) = Hmac::::new_from_slice(secret.as_bytes()) else {
+ return false;
+ };
+ mac.update(payload.as_bytes());
+
+ mac.verify_slice(&provided).is_ok()
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ fn make_channel() -> NextcloudTalkChannel {
+ NextcloudTalkChannel::new(
+ "https://cloud.example.com".into(),
+ "app-token".into(),
+ vec!["user_a".into()],
+ )
+ }
+
+ #[test]
+ fn nextcloud_talk_channel_name() {
+ let channel = make_channel();
+ assert_eq!(channel.name(), "nextcloud_talk");
+ }
+
+ #[test]
+ fn nextcloud_talk_user_allowlist_exact_and_wildcard() {
+ let channel = make_channel();
+ assert!(channel.is_user_allowed("user_a"));
+ assert!(!channel.is_user_allowed("user_b"));
+
+ let wildcard = NextcloudTalkChannel::new(
+ "https://cloud.example.com".into(),
+ "app-token".into(),
+ vec!["*".into()],
+ );
+ assert!(wildcard.is_user_allowed("any_user"));
+ }
+
+ #[test]
+ fn nextcloud_talk_parse_valid_message_payload() {
+ let channel = make_channel();
+ let payload = serde_json::json!({
+ "type": "message",
+ "object": {
+ "id": "42",
+ "token": "room-token-123",
+ "name": "Team Room",
+ "type": "room"
+ },
+ "message": {
+ "id": 77,
+ "token": "room-token-123",
+ "actorType": "users",
+ "actorId": "user_a",
+ "actorDisplayName": "User A",
+ "timestamp": 1_735_701_200,
+ "messageType": "comment",
+ "systemMessage": "",
+ "message": "Hello from Nextcloud"
+ }
+ });
+
+ let messages = channel.parse_webhook_payload(&payload);
+ assert_eq!(messages.len(), 1);
+ assert_eq!(messages[0].id, "77");
+ assert_eq!(messages[0].reply_target, "room-token-123");
+ assert_eq!(messages[0].sender, "user_a");
+ assert_eq!(messages[0].content, "Hello from Nextcloud");
+ assert_eq!(messages[0].channel, "nextcloud_talk");
+ assert_eq!(messages[0].timestamp, 1_735_701_200);
+ }
+
+ #[test]
+ fn nextcloud_talk_parse_skips_non_message_events() {
+ let channel = make_channel();
+ let payload = serde_json::json!({
+ "type": "room",
+ "object": {"token": "room-token-123"},
+ "message": {
+ "actorType": "users",
+ "actorId": "user_a",
+ "message": "Hello"
+ }
+ });
+
+ let messages = channel.parse_webhook_payload(&payload);
+ assert!(messages.is_empty());
+ }
+
+ #[test]
+ fn nextcloud_talk_parse_skips_bot_messages() {
+ let channel = NextcloudTalkChannel::new(
+ "https://cloud.example.com".into(),
+ "app-token".into(),
+ vec!["*".into()],
+ );
+ let payload = serde_json::json!({
+ "type": "message",
+ "object": {"token": "room-token-123"},
+ "message": {
+ "actorType": "bots",
+ "actorId": "bot_1",
+ "message": "Self message"
+ }
+ });
+
+ let messages = channel.parse_webhook_payload(&payload);
+ assert!(messages.is_empty());
+ }
+
+ #[test]
+ fn nextcloud_talk_parse_skips_unauthorized_sender() {
+ let channel = make_channel();
+ let payload = serde_json::json!({
+ "type": "message",
+ "object": {"token": "room-token-123"},
+ "message": {
+ "actorType": "users",
+ "actorId": "user_b",
+ "message": "Unauthorized"
+ }
+ });
+
+ let messages = channel.parse_webhook_payload(&payload);
+ assert!(messages.is_empty());
+ }
+
+ #[test]
+ fn nextcloud_talk_parse_skips_system_message() {
+ let channel = NextcloudTalkChannel::new(
+ "https://cloud.example.com".into(),
+ "app-token".into(),
+ vec!["*".into()],
+ );
+ let payload = serde_json::json!({
+ "type": "message",
+ "object": {"token": "room-token-123"},
+ "message": {
+ "actorType": "users",
+ "actorId": "user_a",
+ "messageType": "comment",
+ "systemMessage": "joined",
+ "message": ""
+ }
+ });
+
+ let messages = channel.parse_webhook_payload(&payload);
+ assert!(messages.is_empty());
+ }
+
+ #[test]
+ fn nextcloud_talk_parse_timestamp_millis_to_seconds() {
+ let channel = NextcloudTalkChannel::new(
+ "https://cloud.example.com".into(),
+ "app-token".into(),
+ vec!["*".into()],
+ );
+ let payload = serde_json::json!({
+ "type": "message",
+ "object": {"token": "room-token-123"},
+ "message": {
+ "actorType": "users",
+ "actorId": "user_a",
+ "timestamp": 1735701200123u64,
+ "message": "hello"
+ }
+ });
+
+ let messages = channel.parse_webhook_payload(&payload);
+ assert_eq!(messages.len(), 1);
+ assert_eq!(messages[0].timestamp, 1_735_701_200);
+ }
+
+ const TEST_WEBHOOK_SECRET: &str = "nextcloud_test_webhook_secret";
+
+ #[test]
+ fn nextcloud_talk_signature_verification_valid() {
+ let secret = TEST_WEBHOOK_SECRET;
+ let random = "random-seed";
+ let body = r#"{"type":"message"}"#;
+
+ let payload = format!("{random}{body}");
+ let mut mac = Hmac::::new_from_slice(secret.as_bytes()).unwrap();
+ mac.update(payload.as_bytes());
+ let signature = hex::encode(mac.finalize().into_bytes());
+
+ assert!(verify_nextcloud_talk_signature(
+ secret, random, body, &signature
+ ));
+ }
+
+ #[test]
+ fn nextcloud_talk_signature_verification_invalid() {
+ assert!(!verify_nextcloud_talk_signature(
+ TEST_WEBHOOK_SECRET,
+ "random-seed",
+ r#"{"type":"message"}"#,
+ "deadbeef"
+ ));
+ }
+
+ #[test]
+ fn nextcloud_talk_signature_verification_accepts_sha256_prefix() {
+ let secret = TEST_WEBHOOK_SECRET;
+ let random = "random-seed";
+ let body = r#"{"type":"message"}"#;
+
+ let payload = format!("{random}{body}");
+ let mut mac = Hmac::::new_from_slice(secret.as_bytes()).unwrap();
+ mac.update(payload.as_bytes());
+ let signature = format!("sha256={}", hex::encode(mac.finalize().into_bytes()));
+
+ assert!(verify_nextcloud_talk_signature(
+ secret, random, body, &signature
+ ));
+ }
+}
diff --git a/src/config/mod.rs b/src/config/mod.rs
index 4649f9ca3..fbde82e96 100644
--- a/src/config/mod.rs
+++ b/src/config/mod.rs
@@ -9,11 +9,11 @@ pub use schema::{
DelegateAgentConfig, DiscordConfig, DockerRuntimeConfig, EmbeddingRouteConfig, GatewayConfig,
HardwareConfig, HardwareTransport, HeartbeatConfig, HttpRequestConfig, IMessageConfig,
IdentityConfig, LarkConfig, MatrixConfig, MemoryConfig, ModelRouteConfig, MultimodalConfig,
- ObservabilityConfig, PeripheralBoardConfig, PeripheralsConfig, ProxyConfig, ProxyScope,
- QueryClassificationConfig, ReliabilityConfig, ResourceLimitsConfig, RuntimeConfig,
- SandboxBackend, SandboxConfig, SchedulerConfig, SecretsConfig, SecurityConfig, SkillsConfig,
- SlackConfig, StorageConfig, StorageProviderConfig, StorageProviderSection, StreamMode,
- TelegramConfig, TunnelConfig, WebSearchConfig, WebhookConfig,
+ NextcloudTalkConfig, ObservabilityConfig, PeripheralBoardConfig, PeripheralsConfig,
+ ProxyConfig, ProxyScope, QueryClassificationConfig, ReliabilityConfig, ResourceLimitsConfig,
+ RuntimeConfig, SandboxBackend, SandboxConfig, SchedulerConfig, SecretsConfig, SecurityConfig,
+ SkillsConfig, SlackConfig, StorageConfig, StorageProviderConfig, StorageProviderSection,
+ StreamMode, TelegramConfig, TunnelConfig, WebSearchConfig, WebhookConfig,
};
#[cfg(test)]
@@ -59,8 +59,16 @@ mod tests {
port: None,
};
+ let nextcloud_talk = NextcloudTalkConfig {
+ base_url: "https://cloud.example.com".into(),
+ app_token: "app-token".into(),
+ webhook_secret: None,
+ allowed_users: vec!["*".into()],
+ };
+
assert_eq!(telegram.allowed_users.len(), 1);
assert_eq!(discord.guild_id.as_deref(), Some("123"));
assert_eq!(lark.app_id, "app-id");
+ assert_eq!(nextcloud_talk.base_url, "https://cloud.example.com");
}
}
diff --git a/src/config/schema.rs b/src/config/schema.rs
index 4347a8c32..2030feec3 100644
--- a/src/config/schema.rs
+++ b/src/config/schema.rs
@@ -26,6 +26,7 @@ const SUPPORTED_PROXY_SERVICE_KEYS: &[&str] = &[
"channel.lark",
"channel.matrix",
"channel.mattermost",
+ "channel.nextcloud_talk",
"channel.qq",
"channel.signal",
"channel.slack",
@@ -2226,6 +2227,8 @@ pub struct ChannelsConfig {
pub whatsapp: Option,
/// Linq Partner API channel configuration.
pub linq: Option,
+ /// Nextcloud Talk bot channel configuration.
+ pub nextcloud_talk: Option,
/// Email channel configuration.
pub email: Option,
/// IRC channel configuration.
@@ -2263,6 +2266,7 @@ impl Default for ChannelsConfig {
signal: None,
whatsapp: None,
linq: None,
+ nextcloud_talk: None,
email: None,
irc: None,
lark: None,
@@ -2477,6 +2481,23 @@ pub struct LinqConfig {
pub allowed_senders: Vec,
}
+/// Nextcloud Talk bot configuration (webhook receive + OCS send API).
+#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
+pub struct NextcloudTalkConfig {
+ /// Nextcloud base URL (e.g. "https://cloud.example.com").
+ pub base_url: String,
+ /// Bot app token used for OCS API bearer auth.
+ pub app_token: String,
+ /// Shared secret for webhook signature verification.
+ ///
+ /// Can also be set via `ZEROCLAW_NEXTCLOUD_TALK_WEBHOOK_SECRET`.
+ #[serde(default)]
+ pub webhook_secret: Option,
+ /// Allowed Nextcloud actor IDs (`[]` = deny all, `"*"` = allow all).
+ #[serde(default)]
+ pub allowed_users: Vec,
+}
+
impl WhatsAppConfig {
/// Detect which backend to use based on config fields.
/// Returns "cloud" if phone_number_id is set, "web" if session_path is set.
@@ -3836,6 +3857,7 @@ default_temperature = 0.7
signal: None,
whatsapp: None,
linq: None,
+ nextcloud_talk: None,
email: None,
irc: None,
lark: None,
@@ -4379,6 +4401,7 @@ allowed_users = ["@ops:matrix.org"]
signal: None,
whatsapp: None,
linq: None,
+ nextcloud_talk: None,
email: None,
irc: None,
lark: None,
@@ -4588,6 +4611,7 @@ channel_id = "C123"
allowed_numbers: vec!["+1".into()],
}),
linq: None,
+ nextcloud_talk: None,
email: None,
irc: None,
lark: None,
@@ -4609,6 +4633,12 @@ channel_id = "C123"
assert!(c.whatsapp.is_none());
}
+ #[test]
+ async fn channels_config_default_has_no_nextcloud_talk() {
+ let c = ChannelsConfig::default();
+ assert!(c.nextcloud_talk.is_none());
+ }
+
// ══════════════════════════════════════════════════════════
// SECURITY CHECKLIST TESTS — Gateway config
// ══════════════════════════════════════════════════════════
@@ -5840,6 +5870,31 @@ default_model = "legacy-model"
assert_eq!(parsed.allowed_users, vec!["*"]);
}
+ #[test]
+ async fn nextcloud_talk_config_serde() {
+ let nc = NextcloudTalkConfig {
+ base_url: "https://cloud.example.com".into(),
+ app_token: "app-token".into(),
+ webhook_secret: Some("webhook-secret".into()),
+ allowed_users: vec!["user_a".into(), "*".into()],
+ };
+
+ let json = serde_json::to_string(&nc).unwrap();
+ let parsed: NextcloudTalkConfig = serde_json::from_str(&json).unwrap();
+ assert_eq!(parsed.base_url, "https://cloud.example.com");
+ assert_eq!(parsed.app_token, "app-token");
+ assert_eq!(parsed.webhook_secret.as_deref(), Some("webhook-secret"));
+ assert_eq!(parsed.allowed_users, vec!["user_a", "*"]);
+ }
+
+ #[test]
+ async fn nextcloud_talk_config_defaults_optional_fields() {
+ let json = r#"{"base_url":"https://cloud.example.com","app_token":"app-token"}"#;
+ let parsed: NextcloudTalkConfig = serde_json::from_str(json).unwrap();
+ assert!(parsed.webhook_secret.is_none());
+ assert!(parsed.allowed_users.is_empty());
+ }
+
// ── Config file permission hardening (Unix only) ───────────────
#[cfg(unix)]
diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs
index a2dfee2c0..96d0eeee6 100644
--- a/src/daemon/mod.rs
+++ b/src/daemon/mod.rs
@@ -225,6 +225,7 @@ fn has_supervised_channels(config: &Config) -> bool {
lark,
dingtalk,
linq,
+ nextcloud_talk,
qq,
..
} = &config.channels_config;
@@ -242,6 +243,7 @@ fn has_supervised_channels(config: &Config) -> bool {
|| lark.is_some()
|| dingtalk.is_some()
|| linq.is_some()
+ || nextcloud_talk.is_some()
|| qq.is_some()
}
@@ -362,4 +364,16 @@ mod tests {
});
assert!(has_supervised_channels(&config));
}
+
+ #[test]
+ fn detects_nextcloud_talk_as_supervised_channel() {
+ let mut config = Config::default();
+ config.channels_config.nextcloud_talk = Some(crate::config::schema::NextcloudTalkConfig {
+ base_url: "https://cloud.example.com".into(),
+ app_token: "app-token".into(),
+ webhook_secret: None,
+ allowed_users: vec!["*".into()],
+ });
+ assert!(has_supervised_channels(&config));
+ }
}
diff --git a/src/doctor/mod.rs b/src/doctor/mod.rs
index bece584d0..c8a034707 100644
--- a/src/doctor/mod.rs
+++ b/src/doctor/mod.rs
@@ -404,6 +404,7 @@ fn check_config_semantics(config: &Config, items: &mut Vec) {
|| cc.imessage.is_some()
|| cc.matrix.is_some()
|| cc.whatsapp.is_some()
+ || cc.nextcloud_talk.is_some()
|| cc.email.is_some()
|| cc.irc.is_some()
|| cc.lark.is_some()
diff --git a/src/gateway/mod.rs b/src/gateway/mod.rs
index 1142ed7b6..97890d89f 100644
--- a/src/gateway/mod.rs
+++ b/src/gateway/mod.rs
@@ -7,7 +7,7 @@
//! - Request timeouts (30s) to prevent slow-loris attacks
//! - Header sanitization (handled by axum/hyper)
-use crate::channels::{Channel, LinqChannel, SendMessage, WhatsAppChannel};
+use crate::channels::{Channel, LinqChannel, NextcloudTalkChannel, SendMessage, WhatsAppChannel};
use crate::config::Config;
use crate::memory::{self, Memory, MemoryCategory};
use crate::providers::{self, ChatMessage, Provider, ProviderCapabilityError};
@@ -57,6 +57,10 @@ fn linq_memory_key(msg: &crate::channels::traits::ChannelMessage) -> String {
format!("linq_{}_{}", msg.sender, msg.id)
}
+fn nextcloud_talk_memory_key(msg: &crate::channels::traits::ChannelMessage) -> String {
+ format!("nextcloud_talk_{}_{}", msg.sender, msg.id)
+}
+
fn hash_webhook_secret(value: &str) -> String {
use sha2::{Digest, Sha256};
@@ -281,6 +285,9 @@ pub struct AppState {
pub linq: Option>,
/// Linq webhook signing secret for signature verification
pub linq_signing_secret: Option>,
+ pub nextcloud_talk: Option>,
+ /// Nextcloud Talk webhook secret for signature verification
+ pub nextcloud_talk_webhook_secret: Option>,
/// Observability backend for metrics scraping
pub observer: Arc,
}
@@ -429,6 +436,40 @@ pub async fn run_gateway(host: &str, port: u16, config: Config) -> Result<()> {
})
.map(Arc::from);
+ // Nextcloud Talk channel (if configured)
+ let nextcloud_talk_channel: Option> =
+ config.channels_config.nextcloud_talk.as_ref().map(|nc| {
+ Arc::new(NextcloudTalkChannel::new(
+ nc.base_url.clone(),
+ nc.app_token.clone(),
+ nc.allowed_users.clone(),
+ ))
+ });
+
+ // Nextcloud Talk webhook secret for signature verification
+ // Priority: environment variable > config file
+ let nextcloud_talk_webhook_secret: Option> =
+ std::env::var("ZEROCLAW_NEXTCLOUD_TALK_WEBHOOK_SECRET")
+ .ok()
+ .and_then(|secret| {
+ let secret = secret.trim();
+ (!secret.is_empty()).then(|| secret.to_owned())
+ })
+ .or_else(|| {
+ config
+ .channels_config
+ .nextcloud_talk
+ .as_ref()
+ .and_then(|nc| {
+ nc.webhook_secret
+ .as_deref()
+ .map(str::trim)
+ .filter(|secret| !secret.is_empty())
+ .map(ToOwned::to_owned)
+ })
+ })
+ .map(Arc::from);
+
// ── Pairing guard ──────────────────────────────────────
let pairing = Arc::new(PairingGuard::new(
config.gateway.require_pairing,
@@ -483,6 +524,9 @@ pub async fn run_gateway(host: &str, port: u16, config: Config) -> Result<()> {
if linq_channel.is_some() {
println!(" POST /linq — Linq message webhook (iMessage/RCS/SMS)");
}
+ if nextcloud_talk_channel.is_some() {
+ println!(" POST /nextcloud-talk — Nextcloud Talk bot webhook");
+ }
println!(" GET /health — health check");
println!(" GET /metrics — Prometheus metrics");
if let Some(code) = pairing.pairing_code() {
@@ -521,6 +565,8 @@ pub async fn run_gateway(host: &str, port: u16, config: Config) -> Result<()> {
whatsapp_app_secret,
linq: linq_channel,
linq_signing_secret,
+ nextcloud_talk: nextcloud_talk_channel,
+ nextcloud_talk_webhook_secret,
observer,
};
@@ -533,6 +579,7 @@ pub async fn run_gateway(host: &str, port: u16, config: Config) -> Result<()> {
.route("/whatsapp", get(handle_whatsapp_verify))
.route("/whatsapp", post(handle_whatsapp_message))
.route("/linq", post(handle_linq_webhook))
+ .route("/nextcloud-talk", post(handle_nextcloud_talk_webhook))
.with_state(state)
.layer(RequestBodyLimitLayer::new(MAX_BODY_SIZE))
.layer(TimeoutLayer::with_status_code(
@@ -1179,6 +1226,115 @@ async fn handle_linq_webhook(
(StatusCode::OK, Json(serde_json::json!({"status": "ok"})))
}
+/// POST /nextcloud-talk — incoming message webhook (Nextcloud Talk bot API)
+async fn handle_nextcloud_talk_webhook(
+ State(state): State,
+ headers: HeaderMap,
+ body: Bytes,
+) -> impl IntoResponse {
+ let Some(ref nextcloud_talk) = state.nextcloud_talk else {
+ return (
+ StatusCode::NOT_FOUND,
+ Json(serde_json::json!({"error": "Nextcloud Talk not configured"})),
+ );
+ };
+
+ let body_str = String::from_utf8_lossy(&body);
+
+ // ── Security: Verify Nextcloud Talk HMAC signature if secret is configured ──
+ if let Some(ref webhook_secret) = state.nextcloud_talk_webhook_secret {
+ let random = headers
+ .get("X-Nextcloud-Talk-Random")
+ .and_then(|v| v.to_str().ok())
+ .unwrap_or("");
+
+ let signature = headers
+ .get("X-Nextcloud-Talk-Signature")
+ .and_then(|v| v.to_str().ok())
+ .unwrap_or("");
+
+ if !crate::channels::nextcloud_talk::verify_nextcloud_talk_signature(
+ webhook_secret,
+ random,
+ &body_str,
+ signature,
+ ) {
+ tracing::warn!(
+ "Nextcloud Talk webhook signature verification failed (signature: {})",
+ if signature.is_empty() {
+ "missing"
+ } else {
+ "invalid"
+ }
+ );
+ return (
+ StatusCode::UNAUTHORIZED,
+ Json(serde_json::json!({"error": "Invalid signature"})),
+ );
+ }
+ }
+
+ // Parse JSON body
+ let Ok(payload) = serde_json::from_slice::(&body) else {
+ return (
+ StatusCode::BAD_REQUEST,
+ Json(serde_json::json!({"error": "Invalid JSON payload"})),
+ );
+ };
+
+ // Parse messages from webhook payload
+ let messages = nextcloud_talk.parse_webhook_payload(&payload);
+ if messages.is_empty() {
+ // Acknowledge webhook even if payload does not contain actionable user messages.
+ return (StatusCode::OK, Json(serde_json::json!({"status": "ok"})));
+ }
+
+ let provider_label = state
+ .config
+ .lock()
+ .default_provider
+ .clone()
+ .unwrap_or_else(|| "unknown".to_string());
+
+ for msg in &messages {
+ tracing::info!(
+ "Nextcloud Talk message from {}: {}",
+ msg.sender,
+ truncate_with_ellipsis(&msg.content, 50)
+ );
+
+ if state.auto_save {
+ let key = nextcloud_talk_memory_key(msg);
+ let _ = state
+ .mem
+ .store(&key, &msg.content, MemoryCategory::Conversation, None)
+ .await;
+ }
+
+ match run_gateway_chat_with_multimodal(&state, &provider_label, &msg.content).await {
+ Ok(response) => {
+ if let Err(e) = nextcloud_talk
+ .send(&SendMessage::new(response, &msg.reply_target))
+ .await
+ {
+ tracing::error!("Failed to send Nextcloud Talk reply: {e}");
+ }
+ }
+ Err(e) => {
+ tracing::error!("LLM error for Nextcloud Talk message: {e:#}");
+ let _ = nextcloud_talk
+ .send(&SendMessage::new(
+ "Sorry, I couldn't process your message right now.",
+ &msg.reply_target,
+ ))
+ .await;
+ }
+ }
+ }
+
+ (StatusCode::OK, Json(serde_json::json!({"status": "ok"})))
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -1254,6 +1410,8 @@ mod tests {
whatsapp_app_secret: None,
linq: None,
linq_signing_secret: None,
+ nextcloud_talk: None,
+ nextcloud_talk_webhook_secret: None,
observer: Arc::new(crate::observability::NoopObserver),
};
@@ -1297,6 +1455,8 @@ mod tests {
whatsapp_app_secret: None,
linq: None,
linq_signing_secret: None,
+ nextcloud_talk: None,
+ nextcloud_talk_webhook_secret: None,
observer,
};
@@ -1657,6 +1817,8 @@ mod tests {
whatsapp_app_secret: None,
linq: None,
linq_signing_secret: None,
+ nextcloud_talk: None,
+ nextcloud_talk_webhook_secret: None,
observer: Arc::new(crate::observability::NoopObserver),
};
@@ -1715,6 +1877,8 @@ mod tests {
whatsapp_app_secret: None,
linq: None,
linq_signing_secret: None,
+ nextcloud_talk: None,
+ nextcloud_talk_webhook_secret: None,
observer: Arc::new(crate::observability::NoopObserver),
};
@@ -1785,6 +1949,8 @@ mod tests {
whatsapp_app_secret: None,
linq: None,
linq_signing_secret: None,
+ nextcloud_talk: None,
+ nextcloud_talk_webhook_secret: None,
observer: Arc::new(crate::observability::NoopObserver),
};
@@ -1827,6 +1993,8 @@ mod tests {
whatsapp_app_secret: None,
linq: None,
linq_signing_secret: None,
+ nextcloud_talk: None,
+ nextcloud_talk_webhook_secret: None,
observer: Arc::new(crate::observability::NoopObserver),
};
@@ -1874,6 +2042,8 @@ mod tests {
whatsapp_app_secret: None,
linq: None,
linq_signing_secret: None,
+ nextcloud_talk: None,
+ nextcloud_talk_webhook_secret: None,
observer: Arc::new(crate::observability::NoopObserver),
};
@@ -1895,6 +2065,109 @@ mod tests {
assert_eq!(provider_impl.calls.load(Ordering::SeqCst), 1);
}
+ fn compute_nextcloud_signature_hex(secret: &str, random: &str, body: &str) -> String {
+ use hmac::{Hmac, Mac};
+ use sha2::Sha256;
+
+ let payload = format!("{random}{body}");
+ let mut mac = Hmac::::new_from_slice(secret.as_bytes()).unwrap();
+ mac.update(payload.as_bytes());
+ hex::encode(mac.finalize().into_bytes())
+ }
+
+ #[tokio::test]
+ async fn nextcloud_talk_webhook_returns_not_found_when_not_configured() {
+ let provider: Arc = Arc::new(MockProvider::default());
+ let memory: Arc = Arc::new(MockMemory);
+
+ let state = AppState {
+ config: Arc::new(Mutex::new(Config::default())),
+ provider,
+ model: "test-model".into(),
+ temperature: 0.0,
+ mem: memory,
+ auto_save: false,
+ webhook_secret_hash: None,
+ pairing: Arc::new(PairingGuard::new(false, &[])),
+ trust_forwarded_headers: false,
+ rate_limiter: Arc::new(GatewayRateLimiter::new(100, 100, 100)),
+ idempotency_store: Arc::new(IdempotencyStore::new(Duration::from_secs(300), 1000)),
+ whatsapp: None,
+ whatsapp_app_secret: None,
+ linq: None,
+ linq_signing_secret: None,
+ nextcloud_talk: None,
+ nextcloud_talk_webhook_secret: None,
+ observer: Arc::new(crate::observability::NoopObserver),
+ };
+
+ let response = handle_nextcloud_talk_webhook(
+ State(state),
+ HeaderMap::new(),
+ Bytes::from_static(br#"{"type":"message"}"#),
+ )
+ .await
+ .into_response();
+
+ assert_eq!(response.status(), StatusCode::NOT_FOUND);
+ }
+
+ #[tokio::test]
+ async fn nextcloud_talk_webhook_rejects_invalid_signature() {
+ let provider_impl = Arc::new(MockProvider::default());
+ let provider: Arc = provider_impl.clone();
+ let memory: Arc = Arc::new(MockMemory);
+
+ let channel = Arc::new(NextcloudTalkChannel::new(
+ "https://cloud.example.com".into(),
+ "app-token".into(),
+ vec!["*".into()],
+ ));
+
+ let secret = "nextcloud-test-secret";
+ let random = "seed-value";
+ let body = r#"{"type":"message","object":{"token":"room-token"},"message":{"actorType":"users","actorId":"user_a","message":"hello"}}"#;
+ let _valid_signature = compute_nextcloud_signature_hex(secret, random, body);
+ let invalid_signature = "deadbeef";
+
+ let state = AppState {
+ config: Arc::new(Mutex::new(Config::default())),
+ provider,
+ model: "test-model".into(),
+ temperature: 0.0,
+ mem: memory,
+ auto_save: false,
+ webhook_secret_hash: None,
+ pairing: Arc::new(PairingGuard::new(false, &[])),
+ trust_forwarded_headers: false,
+ rate_limiter: Arc::new(GatewayRateLimiter::new(100, 100, 100)),
+ idempotency_store: Arc::new(IdempotencyStore::new(Duration::from_secs(300), 1000)),
+ whatsapp: None,
+ whatsapp_app_secret: None,
+ linq: None,
+ linq_signing_secret: None,
+ nextcloud_talk: Some(channel),
+ nextcloud_talk_webhook_secret: Some(Arc::from(secret)),
+ observer: Arc::new(crate::observability::NoopObserver),
+ };
+
+ let mut headers = HeaderMap::new();
+ headers.insert(
+ "X-Nextcloud-Talk-Random",
+ HeaderValue::from_str(random).unwrap(),
+ );
+ headers.insert(
+ "X-Nextcloud-Talk-Signature",
+ HeaderValue::from_str(invalid_signature).unwrap(),
+ );
+
+ let response = handle_nextcloud_talk_webhook(State(state), headers, Bytes::from(body))
+ .await
+ .into_response();
+ assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
+ assert_eq!(provider_impl.calls.load(Ordering::SeqCst), 0);
+ }
+
// ══════════════════════════════════════════════════════════
// WhatsApp Signature Verification Tests (CWE-345 Prevention)
// ══════════════════════════════════════════════════════════
diff --git a/src/main.rs b/src/main.rs
index 6a20ae40e..e6aabeeb1 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -826,6 +826,7 @@ async fn main() -> Result<()> {
("Discord", config.channels_config.discord.is_some()),
("Slack", config.channels_config.slack.is_some()),
("Webhook", config.channels_config.webhook.is_some()),
+ ("Nextcloud", config.channels_config.nextcloud_talk.is_some()),
] {
println!(
" {name:9} {}",
From 87fa033517053a26920081f0d3bcd5ae0a6d3c52 Mon Sep 17 00:00:00 2001
From: Jakub Buzuk <61548378+Baz00k@users.noreply.github.com>
Date: Wed, 18 Feb 2026 00:45:39 +0100
Subject: [PATCH 049/116] feat(service): add OpenRC support for Alpine Linux
- Add InitSystem enum with auto-detection (systemd/OpenRC)
- Add --service-init CLI flag to override init system detection
- Generate OpenRC init script with security hardening:
- Runs as zeroclaw:zeroclaw user
- umask 027 for file permissions
- Logs to /var/log/zeroclaw/
- Depends on net and firewall
- Require root for OpenRC install with clear error message
- Warn if binary is in home directory
- Add OpenRC auto-restart support in channels module
- Document OpenRC setup in README and network-deployment.md
Non-goals:
- No changes to systemd behavior
- No user-level OpenRC services
- No other init systems (SysV, runit, s6)
Security: OpenRC install requires root, validates user, creates
directories with proper permissions
---
Cargo.lock | 26 ++
Cargo.toml | 7 +
README.md | 28 +-
docs/network-deployment.md | 107 +++++++-
src/channels/mod.rs | 51 +++-
src/main.rs | 12 +-
src/service/mod.rs | 509 +++++++++++++++++++++++++++++++++----
7 files changed, 691 insertions(+), 49 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 21ccb7490..404c0fbc9 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1610,6 +1610,12 @@ dependencies = [
"log",
]
+[[package]]
+name = "env_home"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7f84e12ccf0a7ddc17a6c41c93326024c42920d7ee630d04950e6926645c0fe"
+
[[package]]
name = "equivalent"
version = "1.0.2"
@@ -7050,6 +7056,18 @@ version = "2.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29333c3ea1ba8b17211763463ff24ee84e41c78224c16b001cd907e663a38c68"
+[[package]]
+name = "which"
+version = "7.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d643ce3fd3e5b54854602a080f34fb10ab75e0b813ee32d00ca2b44fa74762"
+dependencies = [
+ "either",
+ "env_home",
+ "rustix 1.1.3",
+ "winsafe",
+]
+
[[package]]
name = "winapi"
version = "0.3.9"
@@ -7389,6 +7407,12 @@ dependencies = [
"memchr",
]
+[[package]]
+name = "winsafe"
+version = "0.0.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904"
+
[[package]]
name = "wit-bindgen"
version = "0.51.0"
@@ -7592,6 +7616,7 @@ dependencies = [
"http-body-util",
"landlock",
"lettre",
+ "libc",
"mail-parser",
"matrix-sdk",
"nusb",
@@ -7640,6 +7665,7 @@ dependencies = [
"wa-rs-tokio-transport",
"wa-rs-ureq-http",
"webpki-roots 1.0.6",
+ "which",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index 9420771ad..d9c1ed10b 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -104,6 +104,9 @@ console = "0.16"
# Hardware discovery (device path globbing)
glob = "0.3"
+# Binary discovery (init system detection)
+which = "7.0"
+
# WebSocket client channels (Discord/Lark/DingTalk)
tokio-tungstenite = { version = "0.28", features = ["rustls-tls-webpki-roots"] }
futures-util = { version = "0.3", default-features = false, features = ["sink"] }
@@ -163,6 +166,10 @@ wa-rs-tokio-transport = { version = "0.2", optional = true, default-features = f
rppal = { version = "0.22", optional = true }
landlock = { version = "0.4", optional = true }
+# Unix-specific dependencies (for root check, etc.)
+[target.'cfg(unix)'.dependencies]
+libc = "0.2"
+
[features]
default = ["hardware", "channel-matrix"]
hardware = ["nusb", "tokio-serial"]
diff --git a/README.md b/README.md
index ff6a93c68..ee6b89c74 100644
--- a/README.md
+++ b/README.md
@@ -321,6 +321,8 @@ zeroclaw service install
zeroclaw service status
zeroclaw service restart
+# On Alpine (OpenRC): sudo zeroclaw service install --service-init=openrc
+
# Migrate memory from OpenClaw (safe preview first)
zeroclaw migrate openclaw --dry-run
zeroclaw migrate openclaw
@@ -896,7 +898,7 @@ See [aieos.org](https://aieos.org) for the full schema and live examples.
| `agent` | Interactive or single-message chat mode |
| `gateway` | Start webhook server (default: `127.0.0.1:3000`) |
| `daemon` | Start long-running autonomous runtime |
-| `service` | Manage user-level background service |
+| `service install/start/stop/status/uninstall` | Manage background service (systemd user-level or OpenRC system-wide) |
| `doctor` | Diagnose daemon/scheduler/channel freshness |
| `status` | Show full system status |
| `cron` | Manage scheduled tasks (`list/add/add-at/add-every/once/remove/update/pause/resume`) |
@@ -912,6 +914,30 @@ See [aieos.org](https://aieos.org) for the full schema and live examples.
For a task-oriented command guide, see [`docs/commands-reference.md`](docs/commands-reference.md).
+### Service Management
+
+ZeroClaw supports two init systems for background services:
+
+| Init System | Scope | Config Path | Requires |
+|------------|-------|-------------|----------|
+| **systemd** (default on Linux) | User-level | `~/.zeroclaw/config.toml` | No sudo |
+| **OpenRC** (Alpine) | System-wide | `/etc/zeroclaw/config.toml` | sudo/root |
+
+Use `--service-init` to select the init system:
+
+```bash
+# Linux with systemd (default, user-level)
+zeroclaw service install
+zeroclaw service start
+
+# Alpine with OpenRC (system-wide, requires sudo)
+sudo zeroclaw service install --service-init=openrc
+sudo rc-update add zeroclaw default
+sudo rc-service zeroclaw start
+```
+
+For full OpenRC setup instructions, see [docs/network-deployment.md](docs/network-deployment.md#7-openrc-alpine-linux-service).
+
### Open-Skills Opt-In
Community `open-skills` sync is disabled by default. Enable it explicitly in `config.toml`:
diff --git a/docs/network-deployment.md b/docs/network-deployment.md
index f6a5f8cb4..3a77c9fcf 100644
--- a/docs/network-deployment.md
+++ b/docs/network-deployment.md
@@ -13,6 +13,7 @@ This document covers deploying ZeroClaw on a Raspberry Pi or other host on your
| **Discord/Slack** | No | Same — outbound only |
| **Gateway webhook** | Yes | POST /webhook, /whatsapp, /linq, /nextcloud-talk need a public URL |
| **Gateway pairing** | Yes | If you pair clients via the gateway |
+| **Alpine/OpenRC service** | No | System-wide background service on Alpine Linux |
**Key:** Telegram, Discord, and Slack use **long-polling** — ZeroClaw makes outbound requests. No port forwarding or public IP required.
@@ -198,7 +199,111 @@ Configure Cloudflare Tunnel to forward to `127.0.0.1:3000`, then set your webhoo
---
-## 7. References
+## 7. OpenRC (Alpine Linux Service)
+
+ZeroClaw supports OpenRC for Alpine Linux and other distributions using the OpenRC init system. OpenRC services run **system-wide** and require root/sudo.
+
+### 7.1 Prerequisites
+
+- Alpine Linux (or another OpenRC-based distro)
+- Root or sudo access
+- A dedicated `zeroclaw` system user (created during install)
+
+### 7.2 Install Service
+
+```bash
+# Install the OpenRC init script (requires sudo)
+sudo zeroclaw service install --service-init=openrc
+```
+
+This creates:
+- Init script: `/etc/init.d/zeroclaw`
+- Config directory: `/etc/zeroclaw/`
+- Log directory: `/var/log/zeroclaw/`
+
+### 7.3 Configuration
+
+Place your ZeroClaw config at `/etc/zeroclaw/config.toml`:
+
+```bash
+# Copy or create system-wide config
+sudo mkdir -p /etc/zeroclaw
+sudo cp ~/.zeroclaw/config.toml /etc/zeroclaw/config.toml
+
+# Set recommended permissions (root-owned, mode 600 for secrets)
+sudo chown root:root /etc/zeroclaw/config.toml
+sudo chmod 600 /etc/zeroclaw/config.toml
+```
+
+### 7.4 Enable and Start
+
+```bash
+# Add to default runlevel
+sudo rc-update add zeroclaw default
+
+# Start the service
+sudo rc-service zeroclaw start
+
+# Check status
+sudo rc-service zeroclaw status
+```
+
+### 7.5 Manage Service
+
+| Command | Description |
+|---------|-------------|
+| `sudo rc-service zeroclaw start` | Start the daemon |
+| `sudo rc-service zeroclaw stop` | Stop the daemon |
+| `sudo rc-service zeroclaw status` | Check service status |
+| `sudo rc-service zeroclaw restart` | Restart the daemon |
+| `sudo zeroclaw service status --service-init=openrc` | ZeroClaw status wrapper |
+
+### 7.6 Logs
+
+OpenRC routes logs to:
+
+| Log | Path |
+|-----|------|
+| Access/stdout | `/var/log/zeroclaw/access.log` |
+| Errors/stderr | `/var/log/zeroclaw/error.log` |
+
+View logs:
+
+```bash
+sudo tail -f /var/log/zeroclaw/error.log
+```
+
+### 7.7 Uninstall
+
+```bash
+# Stop and remove from runlevel
+sudo rc-service zeroclaw stop
+sudo rc-update del zeroclaw default
+
+# Remove init script
+sudo zeroclaw service uninstall --service-init=openrc
+```
+
+### 7.8 Notes
+
+- OpenRC is **system-wide only** (no user-level services)
+- Requires `sudo` or root for all service operations
+- The service runs as the `zeroclaw:zeroclaw` user (least privilege)
+- Config must be at `/etc/zeroclaw/config.toml` (explicit path in init script)
+- If the `zeroclaw` user does not exist, install will fail with instructions to create it
+
+### 7.9 Checklist: Alpine/OpenRC Deployment
+
+- [ ] Install: `sudo zeroclaw service install --service-init=openrc`
+- [ ] Create config: `/etc/zeroclaw/config.toml` with permissions `600`
+- [ ] Enable: `sudo rc-update add zeroclaw default`
+- [ ] Start: `sudo rc-service zeroclaw start`
+- [ ] Verify: `sudo rc-service zeroclaw status`
+- [ ] Check logs: `/var/log/zeroclaw/error.log`
+
+---
+
+## 8. References
- [channels-reference.md](./channels-reference.md) — Channel configuration overview
- [matrix-e2ee-guide.md](./matrix-e2ee-guide.md) — Matrix setup and encrypted-room troubleshooting
diff --git a/src/channels/mod.rs b/src/channels/mod.rs
index d54fb2690..0e5001251 100644
--- a/src/channels/mod.rs
+++ b/src/channels/mod.rs
@@ -180,6 +180,11 @@ fn runtime_config_store() -> &'static Mutex
STORE.get_or_init(|| Mutex::new(HashMap::new()))
}
+const SYSTEMD_STATUS_ARGS: [&str; 3] = ["--user", "is-active", "zeroclaw.service"];
+const SYSTEMD_RESTART_ARGS: [&str; 3] = ["--user", "restart", "zeroclaw.service"];
+const OPENRC_STATUS_ARGS: [&str; 2] = ["zeroclaw", "status"];
+const OPENRC_RESTART_ARGS: [&str; 2] = ["zeroclaw", "restart"];
+
#[derive(Clone)]
struct ChannelRuntimeContext {
channels_by_name: Arc>>,
@@ -1913,6 +1918,30 @@ fn maybe_restart_managed_daemon_service() -> Result {
}
if cfg!(target_os = "linux") {
+ // OpenRC (system-wide) takes precedence over systemd (user-level)
+ let openrc_init_script = PathBuf::from("/etc/init.d/zeroclaw");
+ if openrc_init_script.exists() {
+ let status_output = Command::new("rc-service")
+ .args(OPENRC_STATUS_ARGS)
+ .output()
+ .context("Failed to query OpenRC service state")?;
+
+ // rc-service exits 0 if running, non-zero otherwise
+ if status_output.status.success() {
+ let restart_output = Command::new("rc-service")
+ .args(OPENRC_RESTART_ARGS)
+ .output()
+ .context("Failed to restart OpenRC daemon service")?;
+ if !restart_output.status.success() {
+ let stderr = String::from_utf8_lossy(&restart_output.stderr);
+ anyhow::bail!("rc-service restart failed: {}", stderr.trim());
+ }
+ return Ok(true);
+ }
+ return Ok(false);
+ }
+
+ // Systemd (user-level)
let home = directories::UserDirs::new()
.map(|u| u.home_dir().to_path_buf())
.context("Could not find home directory")?;
@@ -1926,7 +1955,7 @@ fn maybe_restart_managed_daemon_service() -> Result {
}
let active_output = Command::new("systemctl")
- .args(["--user", "is-active", "zeroclaw.service"])
+ .args(SYSTEMD_STATUS_ARGS)
.output()
.context("Failed to query systemd service state")?;
let state = String::from_utf8_lossy(&active_output.stdout);
@@ -1935,7 +1964,7 @@ fn maybe_restart_managed_daemon_service() -> Result {
}
let restart_output = Command::new("systemctl")
- .args(["--user", "restart", "zeroclaw.service"])
+ .args(SYSTEMD_RESTART_ARGS)
.output()
.context("Failed to restart systemd daemon service")?;
if !restart_output.status.success() {
@@ -5226,4 +5255,22 @@ Mon Feb 20
assert!(join.is_ok(), "listener should stop after channel shutdown");
assert!(calls.load(Ordering::SeqCst) >= 1);
}
+
+ #[test]
+ fn maybe_restart_daemon_systemd_args_regression() {
+ assert_eq!(
+ SYSTEMD_STATUS_ARGS,
+ ["--user", "is-active", "zeroclaw.service"]
+ );
+ assert_eq!(
+ SYSTEMD_RESTART_ARGS,
+ ["--user", "restart", "zeroclaw.service"]
+ );
+ }
+
+ #[test]
+ fn maybe_restart_daemon_openrc_args_regression() {
+ assert_eq!(OPENRC_STATUS_ARGS, ["zeroclaw", "status"]);
+ assert_eq!(OPENRC_RESTART_ARGS, ["zeroclaw", "restart"]);
+ }
}
diff --git a/src/main.rs b/src/main.rs
index e6aabeeb1..a7b1966b8 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -239,6 +239,10 @@ Examples:
/// Manage OS service lifecycle (launchd/systemd user service)
Service {
+ /// Init system to use: auto (detect), systemd, or openrc
+ #[arg(long, default_value = "auto", value_parser = ["auto", "systemd", "openrc"])]
+ service_init: String,
+
#[command(subcommand)]
service_command: ServiceCommands,
},
@@ -898,7 +902,13 @@ async fn main() -> Result<()> {
Ok(())
}
- Commands::Service { service_command } => service::handle_command(&service_command, &config),
+ Commands::Service {
+ service_command,
+ service_init,
+ } => {
+ let init_system = service_init.parse()?;
+ service::handle_command(&service_command, &config, init_system)
+ }
Commands::Doctor { doctor_command } => match doctor_command {
Some(DoctorCommands::Models {
diff --git a/src/service/mod.rs b/src/service/mod.rs
index 0c78c941e..3353f91f8 100644
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -1,32 +1,115 @@
use crate::config::Config;
-use anyhow::{Context, Result};
+use anyhow::{bail, Context, Result};
use std::fs;
-use std::path::PathBuf;
+use std::path::{Path, PathBuf};
use std::process::Command;
+use std::str::FromStr;
const SERVICE_LABEL: &str = "com.zeroclaw.daemon";
const WINDOWS_TASK_NAME: &str = "ZeroClaw Daemon";
+/// Supported init systems for service management
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
+pub enum InitSystem {
+ /// Auto-detect based on system indicators
+ #[default]
+ Auto,
+ /// systemd (via systemctl --user)
+ Systemd,
+ /// OpenRC (via rc-service)
+ Openrc,
+}
+
+impl FromStr for InitSystem {
+ type Err = anyhow::Error;
+
+ fn from_str(s: &str) -> Result {
+ match s.to_lowercase().as_str() {
+ "auto" => Ok(Self::Auto),
+ "systemd" => Ok(Self::Systemd),
+ "openrc" => Ok(Self::Openrc),
+ other => bail!(
+ "Unknown init system: '{}'. Supported: auto, systemd, openrc",
+ other
+ ),
+ }
+ }
+}
+
+impl InitSystem {
+ /// Resolve auto-detection to a concrete init system
+ ///
+ /// Detection order (deny-by-default):
+ /// 1. `/run/systemd/system` exists → Systemd
+ /// 2. `/run/openrc` exists AND OpenRC binary present → OpenRC
+ /// 3. else → Error (unknown init system)
+ #[cfg(target_os = "linux")]
+ pub fn resolve(self) -> Result {
+ match self {
+ Self::Auto => detect_init_system(),
+ concrete => Ok(concrete),
+ }
+ }
+
+ #[cfg(not(target_os = "linux"))]
+ pub fn resolve(self) -> Result {
+ match self {
+ Self::Auto => Ok(Self::Systemd),
+ concrete => Ok(concrete),
+ }
+ }
+}
+
+/// Detect the active init system on Linux
+///
+/// Checks for systemd and OpenRC in order, returning the first match.
+/// Returns an error if neither is detected.
+#[cfg(target_os = "linux")]
+fn detect_init_system() -> Result {
+ // Check for systemd first (most common on modern Linux)
+ if Path::new("/run/systemd/system").exists() {
+ return Ok(InitSystem::Systemd);
+ }
+
+ // Check for OpenRC: requires /run/openrc AND openrc binary
+ if Path::new("/run/openrc").exists() {
+ // Check for OpenRC binaries: /sbin/openrc-run or rc-service in PATH
+ if Path::new("/sbin/openrc-run").exists() || which::which("rc-service").is_ok() {
+ return Ok(InitSystem::Openrc);
+ }
+ }
+
+ bail!(
+ "Could not detect init system. Supported: systemd, OpenRC. \
+ Use --service-init to specify manually."
+ );
+}
+
fn windows_task_name() -> &'static str {
WINDOWS_TASK_NAME
}
-pub fn handle_command(command: &crate::ServiceCommands, config: &Config) -> Result<()> {
+pub fn handle_command(
+ command: &crate::ServiceCommands,
+ config: &Config,
+ init_system: InitSystem,
+) -> Result<()> {
match command {
- crate::ServiceCommands::Install => install(config),
- crate::ServiceCommands::Start => start(config),
- crate::ServiceCommands::Stop => stop(config),
- crate::ServiceCommands::Restart => restart(config),
- crate::ServiceCommands::Status => status(config),
- crate::ServiceCommands::Uninstall => uninstall(config),
+ crate::ServiceCommands::Install => install(config, init_system),
+ crate::ServiceCommands::Start => start(config, init_system),
+ crate::ServiceCommands::Stop => stop(config, init_system),
+ crate::ServiceCommands::Restart => restart(config, init_system),
+ crate::ServiceCommands::Status => status(config, init_system),
+ crate::ServiceCommands::Uninstall => uninstall(config, init_system),
}
}
-fn install(config: &Config) -> Result<()> {
+fn install(config: &Config, init_system: InitSystem) -> Result<()> {
if cfg!(target_os = "macos") {
install_macos(config)
} else if cfg!(target_os = "linux") {
- install_linux(config)
+ let resolved = init_system.resolve()?;
+ install_linux(config, resolved)
} else if cfg!(target_os = "windows") {
install_windows(config)
} else {
@@ -34,7 +117,7 @@ fn install(config: &Config) -> Result<()> {
}
}
-fn start(config: &Config) -> Result<()> {
+fn start(config: &Config, init_system: InitSystem) -> Result<()> {
if cfg!(target_os = "macos") {
let plist = macos_service_file()?;
run_checked(Command::new("launchctl").arg("load").arg("-w").arg(&plist))?;
@@ -42,10 +125,8 @@ fn start(config: &Config) -> Result<()> {
println!("✅ Service started");
Ok(())
} else if cfg!(target_os = "linux") {
- run_checked(Command::new("systemctl").args(["--user", "daemon-reload"]))?;
- run_checked(Command::new("systemctl").args(["--user", "start", "zeroclaw.service"]))?;
- println!("✅ Service started");
- Ok(())
+ let resolved = init_system.resolve()?;
+ start_linux(resolved)
} else if cfg!(target_os = "windows") {
let _ = config;
run_checked(Command::new("schtasks").args(["/Run", "/TN", windows_task_name()]))?;
@@ -57,7 +138,22 @@ fn start(config: &Config) -> Result<()> {
}
}
-fn stop(config: &Config) -> Result<()> {
+fn start_linux(init_system: InitSystem) -> Result<()> {
+ match init_system {
+ InitSystem::Systemd => {
+ run_checked(Command::new("systemctl").args(["--user", "daemon-reload"]))?;
+ run_checked(Command::new("systemctl").args(["--user", "start", "zeroclaw.service"]))?;
+ }
+ InitSystem::Openrc => {
+ run_checked(Command::new("rc-service").args(["zeroclaw", "start"]))?;
+ }
+ InitSystem::Auto => unreachable!("Auto should be resolved before this point"),
+ }
+ println!("✅ Service started");
+ Ok(())
+}
+
+fn stop(config: &Config, init_system: InitSystem) -> Result<()> {
if cfg!(target_os = "macos") {
let plist = macos_service_file()?;
let _ = run_checked(Command::new("launchctl").arg("stop").arg(SERVICE_LABEL));
@@ -70,9 +166,8 @@ fn stop(config: &Config) -> Result<()> {
println!("✅ Service stopped");
Ok(())
} else if cfg!(target_os = "linux") {
- let _ = run_checked(Command::new("systemctl").args(["--user", "stop", "zeroclaw.service"]));
- println!("✅ Service stopped");
- Ok(())
+ let resolved = init_system.resolve()?;
+ stop_linux(resolved)
} else if cfg!(target_os = "windows") {
let _ = config;
let task_name = windows_task_name();
@@ -85,14 +180,62 @@ fn stop(config: &Config) -> Result<()> {
}
}
-fn restart(config: &Config) -> Result<()> {
- stop(config)?;
- start(config)?;
+fn stop_linux(init_system: InitSystem) -> Result<()> {
+ match init_system {
+ InitSystem::Systemd => {
+ let _ =
+ run_checked(Command::new("systemctl").args(["--user", "stop", "zeroclaw.service"]));
+ }
+ InitSystem::Openrc => {
+ let _ = run_checked(Command::new("rc-service").args(["zeroclaw", "stop"]));
+ }
+ InitSystem::Auto => unreachable!("Auto should be resolved before this point"),
+ }
+ println!("✅ Service stopped");
+ Ok(())
+}
+
+fn restart(config: &Config, init_system: InitSystem) -> Result<()> {
+ if cfg!(target_os = "macos") {
+ stop(config, init_system)?;
+ start(config, init_system)?;
+ println!("✅ Service restarted");
+ return Ok(());
+ }
+
+ if cfg!(target_os = "linux") {
+ let resolved = init_system.resolve()?;
+ return restart_linux(resolved);
+ }
+
+ if cfg!(target_os = "windows") {
+ stop(config, init_system)?;
+ start(config, init_system)?;
+ println!("✅ Service restarted");
+ return Ok(());
+ }
+
+ anyhow::bail!("Service management is supported on macOS and Linux only")
+}
+
+fn restart_linux(init_system: InitSystem) -> Result<()> {
+ match init_system {
+ InitSystem::Systemd => {
+ run_checked(Command::new("systemctl").args(["--user", "daemon-reload"]))?;
+ run_checked(
+ Command::new("systemctl").args(["--user", "restart", "zeroclaw.service"]),
+ )?;
+ }
+ InitSystem::Openrc => {
+ run_checked(Command::new("rc-service").args(["zeroclaw", "restart"]))?;
+ }
+ InitSystem::Auto => unreachable!("Auto should be resolved before this point"),
+ }
println!("✅ Service restarted");
Ok(())
}
-fn status(config: &Config) -> Result<()> {
+fn status(config: &Config, init_system: InitSystem) -> Result<()> {
if cfg!(target_os = "macos") {
let out = run_capture(Command::new("launchctl").arg("list"))?;
let running = out.lines().any(|line| line.contains(SERVICE_LABEL));
@@ -109,15 +252,8 @@ fn status(config: &Config) -> Result<()> {
}
if cfg!(target_os = "linux") {
- let out = run_capture(Command::new("systemctl").args([
- "--user",
- "is-active",
- "zeroclaw.service",
- ]))
- .unwrap_or_else(|_| "unknown".into());
- println!("Service state: {}", out.trim());
- println!("Unit: {}", linux_service_file(config)?.display());
- return Ok(());
+ let resolved = init_system.resolve()?;
+ return status_linux(config, resolved);
}
if cfg!(target_os = "windows") {
@@ -148,8 +284,31 @@ fn status(config: &Config) -> Result<()> {
anyhow::bail!("Service management is supported on macOS and Linux only")
}
-fn uninstall(config: &Config) -> Result<()> {
- stop(config)?;
+fn status_linux(config: &Config, init_system: InitSystem) -> Result<()> {
+ match init_system {
+ InitSystem::Systemd => {
+ let out = run_capture(Command::new("systemctl").args([
+ "--user",
+ "is-active",
+ "zeroclaw.service",
+ ]))
+ .unwrap_or_else(|_| "unknown".into());
+ println!("Service state: {}", out.trim());
+ println!("Unit: {}", linux_service_file(config)?.display());
+ }
+ InitSystem::Openrc => {
+ let out = run_capture(Command::new("rc-service").args(["zeroclaw", "status"]))
+ .unwrap_or_else(|_| "unknown".into());
+ println!("Service state: {}", out.trim());
+ println!("Unit: /etc/init.d/zeroclaw");
+ }
+ InitSystem::Auto => unreachable!("Auto should be resolved before this point"),
+ }
+ Ok(())
+}
+
+fn uninstall(config: &Config, init_system: InitSystem) -> Result<()> {
+ stop(config, init_system)?;
if cfg!(target_os = "macos") {
let file = macos_service_file()?;
@@ -162,14 +321,8 @@ fn uninstall(config: &Config) -> Result<()> {
}
if cfg!(target_os = "linux") {
- let file = linux_service_file(config)?;
- if file.exists() {
- fs::remove_file(&file)
- .with_context(|| format!("Failed to remove {}", file.display()))?;
- }
- let _ = run_checked(Command::new("systemctl").args(["--user", "daemon-reload"]));
- println!("✅ Service uninstalled ({})", file.display());
- return Ok(());
+ let resolved = init_system.resolve()?;
+ return uninstall_linux(config, resolved);
}
if cfg!(target_os = "windows") {
@@ -192,6 +345,31 @@ fn uninstall(config: &Config) -> Result<()> {
anyhow::bail!("Service management is supported on macOS and Linux only")
}
+fn uninstall_linux(config: &Config, init_system: InitSystem) -> Result<()> {
+ match init_system {
+ InitSystem::Systemd => {
+ let file = linux_service_file(config)?;
+ if file.exists() {
+ fs::remove_file(&file)
+ .with_context(|| format!("Failed to remove {}", file.display()))?;
+ }
+ let _ = run_checked(Command::new("systemctl").args(["--user", "daemon-reload"]));
+ println!("✅ Service uninstalled ({})", file.display());
+ }
+ InitSystem::Openrc => {
+ let init_script = Path::new("/etc/init.d/zeroclaw");
+ if init_script.exists() {
+ run_checked(Command::new("rc-update").args(["del", "zeroclaw", "default"]))?;
+ fs::remove_file(init_script)
+ .with_context(|| format!("Failed to remove {}", init_script.display()))?;
+ }
+ println!("✅ Service uninstalled (/etc/init.d/zeroclaw)");
+ }
+ InitSystem::Auto => unreachable!("Auto should be resolved before this point"),
+ }
+ Ok(())
+}
+
fn install_macos(config: &Config) -> Result<()> {
let file = macos_service_file()?;
if let Some(parent) = file.parent() {
@@ -244,7 +422,15 @@ fn install_macos(config: &Config) -> Result<()> {
Ok(())
}
-fn install_linux(config: &Config) -> Result<()> {
+fn install_linux(config: &Config, init_system: InitSystem) -> Result<()> {
+ match init_system {
+ InitSystem::Systemd => install_linux_systemd(config),
+ InitSystem::Openrc => install_linux_openrc(config),
+ InitSystem::Auto => unreachable!("Auto should be resolved before this point"),
+ }
+}
+
+fn install_linux_systemd(config: &Config) -> Result<()> {
let file = linux_service_file(config)?;
if let Some(parent) = file.parent() {
fs::create_dir_all(parent)?;
@@ -264,6 +450,167 @@ fn install_linux(config: &Config) -> Result<()> {
Ok(())
}
+/// Check if the current process is running as root (Unix only)
+#[cfg(unix)]
+fn is_root() -> bool {
+ unsafe { libc::getuid() == 0 }
+}
+
+#[cfg(not(unix))]
+fn is_root() -> bool {
+ false
+}
+
+/// Check if the zeroclaw user exists and has expected properties.
+/// Returns Ok if user doesn't exist (OpenRC will handle creation or fail gracefully).
+/// Returns error if user exists but has unexpected properties.
+fn check_zeroclaw_user() -> Result<()> {
+ let output = Command::new("getent").args(["passwd", "zeroclaw"]).output();
+
+ match output {
+ Ok(output) if output.status.success() => {
+ let passwd_entry = String::from_utf8_lossy(&output.stdout);
+ let parts: Vec<&str> = passwd_entry.split(':').collect();
+ if parts.len() >= 7 {
+ let uid = parts[2];
+ let gid = parts[3];
+ let home = parts[5];
+ let shell = parts[6];
+
+ if uid.parse::().unwrap_or(999) >= 1000 {
+ bail!(
+ "User 'zeroclaw' exists but has unexpected UID {} (expected system UID < 1000). \
+ Please recreate the user with: sudo userdel zeroclaw && sudo useradd -r -s /sbin/nologin zeroclaw",
+ uid
+ );
+ }
+
+ if !shell.contains("nologin") && !shell.contains("false") {
+ bail!(
+ "User 'zeroclaw' exists but has unexpected shell '{}'. \
+ Expected nologin/false for security. Please recreate the user with: \
+ sudo usermod -s /sbin/nologin zeroclaw",
+ shell
+ );
+ }
+
+ if home != "/var/lib/zeroclaw" && home != "/nonexistent" {
+ eprintln!(
+ "⚠️ Warning: zeroclaw user has home directory '{}' (expected /var/lib/zeroclaw or /nonexistent)",
+ home
+ );
+ }
+
+ let _ = gid;
+ }
+ Ok(())
+ }
+ _ => Ok(()),
+ }
+}
+
+/// Warn if the binary path is in a user home directory
+fn warn_if_binary_in_home(exe_path: &Path) {
+ let path_str = exe_path.to_string_lossy();
+ if path_str.contains("/home/") || path_str.contains(".cargo/bin") {
+ eprintln!(
+ "⚠️ Warning: Binary path '{}' appears to be in a user home directory.\n\
+ For system-wide OpenRC service, consider installing to /usr/local/bin:\n\
+ sudo cp '{}' /usr/local/bin/zeroclaw",
+ exe_path.display(),
+ exe_path.display()
+ );
+ }
+}
+
+/// Generate OpenRC init script content (pure function for testability)
+fn generate_openrc_script(exe_path: &Path, config_path: &str) -> String {
+ format!(
+ r#"#!/sbin/openrc-run
+
+name="zeroclaw"
+description="ZeroClaw daemon"
+
+command="{}"
+command_args="daemon --config {}"
+command_background="yes"
+command_user="zeroclaw:zeroclaw"
+pidfile="/run/${{RC_SVCNAME}}.pid"
+umask 027
+output_log="/var/log/zeroclaw/access.log"
+error_log="/var/log/zeroclaw/error.log"
+
+depend() {{
+ need net
+ after firewall
+}}
+"#,
+ exe_path.display(),
+ config_path
+ )
+}
+
+fn install_linux_openrc(config: &Config) -> Result<()> {
+ if !is_root() {
+ bail!(
+ "OpenRC service installation requires root privileges.\n\
+ Please run with sudo: sudo zeroclaw service install --service-init=openrc"
+ );
+ }
+
+ check_zeroclaw_user()?;
+
+ let exe = std::env::current_exe().context("Failed to resolve current executable")?;
+ warn_if_binary_in_home(&exe);
+
+ let config_dir = Path::new("/etc/zeroclaw");
+ let log_dir = Path::new("/var/log/zeroclaw");
+
+ if !config_dir.exists() {
+ fs::create_dir_all(config_dir)
+ .with_context(|| format!("Failed to create {}", config_dir.display()))?;
+ #[cfg(unix)]
+ {
+ use std::os::unix::fs::PermissionsExt;
+ fs::set_permissions(config_dir, fs::Permissions::from_mode(0o755)).with_context(
+ || format!("Failed to set permissions on {}", config_dir.display()),
+ )?;
+ }
+ println!("✅ Created directory: {}", config_dir.display());
+ }
+
+ if !log_dir.exists() {
+ fs::create_dir_all(log_dir)
+ .with_context(|| format!("Failed to create {}", log_dir.display()))?;
+ #[cfg(unix)]
+ {
+ use std::os::unix::fs::PermissionsExt;
+ fs::set_permissions(log_dir, fs::Permissions::from_mode(0o750))
+ .with_context(|| format!("Failed to set permissions on {}", log_dir.display()))?;
+ }
+ println!("✅ Created directory: {}", log_dir.display());
+ }
+
+ let init_script = generate_openrc_script(&exe, "/etc/zeroclaw/config.toml");
+ let init_path = Path::new("/etc/init.d/zeroclaw");
+ fs::write(init_path, init_script)
+ .with_context(|| format!("Failed to write {}", init_path.display()))?;
+
+ #[cfg(unix)]
+ {
+ use std::os::unix::fs::PermissionsExt;
+ fs::set_permissions(init_path, fs::Permissions::from_mode(0o755))
+ .with_context(|| format!("Failed to set permissions on {}", init_path.display()))?;
+ }
+
+ run_checked(Command::new("rc-update").args(["add", "zeroclaw", "default"]))?;
+ println!("✅ Installed OpenRC service: /etc/init.d/zeroclaw");
+ println!(" Config path: /etc/zeroclaw/config.toml");
+ println!(" Start with: sudo zeroclaw service start");
+ let _ = config;
+ Ok(())
+}
+
fn install_windows(config: &Config) -> Result<()> {
let exe = std::env::current_exe().context("Failed to resolve current executable")?;
let logs_dir = config
@@ -423,4 +770,78 @@ mod tests {
.expect_err("non-zero exit should error");
assert!(err.to_string().contains("Command failed"));
}
+
+ #[test]
+ fn init_system_from_str_parses_valid_values() {
+ assert_eq!("auto".parse::().unwrap(), InitSystem::Auto);
+ assert_eq!("AUTO".parse::().unwrap(), InitSystem::Auto);
+ assert_eq!(
+ "systemd".parse::().unwrap(),
+ InitSystem::Systemd
+ );
+ assert_eq!(
+ "SYSTEMD".parse::().unwrap(),
+ InitSystem::Systemd
+ );
+ assert_eq!("openrc".parse::().unwrap(), InitSystem::Openrc);
+ assert_eq!("OPENRC".parse::().unwrap(), InitSystem::Openrc);
+ }
+
+ #[test]
+ fn init_system_from_str_rejects_unknown() {
+ let err = "unknown"
+ .parse::()
+ .expect_err("should reject unknown");
+ assert!(err.to_string().contains("Unknown init system"));
+ assert!(err.to_string().contains("Supported: auto, systemd, openrc"));
+ }
+
+ #[test]
+ fn init_system_default_is_auto() {
+ assert_eq!(InitSystem::default(), InitSystem::Auto);
+ }
+
+ #[test]
+ fn is_root_returns_false_in_test_env() {
+ assert!(!is_root());
+ }
+
+ #[test]
+ fn generate_openrc_script_contains_required_directives() {
+ use std::path::PathBuf;
+
+ let exe_path = PathBuf::from("/usr/local/bin/zeroclaw");
+ let script = generate_openrc_script(&exe_path, "/etc/zeroclaw/config.toml");
+
+ assert!(script.starts_with("#!/sbin/openrc-run"));
+ assert!(script.contains("name=\"zeroclaw\""));
+ assert!(script.contains("description=\"ZeroClaw daemon\""));
+ assert!(script.contains("command=\"/usr/local/bin/zeroclaw\""));
+ assert!(script.contains("command_args=\"daemon --config /etc/zeroclaw/config.toml\""));
+ assert!(script.contains("command_background=\"yes\""));
+ assert!(script.contains("command_user=\"zeroclaw:zeroclaw\""));
+ assert!(script.contains("pidfile=\"/run/${RC_SVCNAME}.pid\""));
+ assert!(script.contains("umask 027"));
+ assert!(script.contains("output_log=\"/var/log/zeroclaw/access.log\""));
+ assert!(script.contains("error_log=\"/var/log/zeroclaw/error.log\""));
+ assert!(script.contains("depend()"));
+ assert!(script.contains("need net"));
+ assert!(script.contains("after firewall"));
+ }
+
+ #[test]
+ fn warn_if_binary_in_home_detects_home_path() {
+ use std::path::PathBuf;
+
+ let home_path = PathBuf::from("/home/user/.cargo/bin/zeroclaw");
+ assert!(home_path.to_string_lossy().contains("/home/"));
+ assert!(home_path.to_string_lossy().contains(".cargo/bin"));
+
+ let cargo_path = PathBuf::from("/home/user/.cargo/bin/zeroclaw");
+ assert!(cargo_path.to_string_lossy().contains(".cargo/bin"));
+
+ let system_path = PathBuf::from("/usr/local/bin/zeroclaw");
+ assert!(!system_path.to_string_lossy().contains("/home/"));
+ assert!(!system_path.to_string_lossy().contains(".cargo/bin"));
+ }
}
From f110f129e0be97c243d11d1ef57cb710c362c067 Mon Sep 17 00:00:00 2001
From: Jakub Buzuk <61548378+Baz00k@users.noreply.github.com>
Date: Wed, 18 Feb 2026 01:17:50 +0100
Subject: [PATCH 050/116] fix(service): set correct ownership for OpenRC log
directory
- Add chown_to_zeroclaw() helper to change directory ownership
- Log directory /var/log/zeroclaw now owned by zeroclaw:zeroclaw
- Fix docs: config file should be owned by zeroclaw:zeroclaw
(service runs as zeroclaw user, needs read access)
Fixes permission denied error when service tries to write logs.
---
docs/network-deployment.md | 6 ++++--
src/service/mod.rs | 31 ++++++++++++++++++++++++++++++-
2 files changed, 34 insertions(+), 3 deletions(-)
diff --git a/docs/network-deployment.md b/docs/network-deployment.md
index 3a77c9fcf..910eb5dce 100644
--- a/docs/network-deployment.md
+++ b/docs/network-deployment.md
@@ -230,11 +230,13 @@ Place your ZeroClaw config at `/etc/zeroclaw/config.toml`:
sudo mkdir -p /etc/zeroclaw
sudo cp ~/.zeroclaw/config.toml /etc/zeroclaw/config.toml
-# Set recommended permissions (root-owned, mode 600 for secrets)
-sudo chown root:root /etc/zeroclaw/config.toml
+# Set ownership to zeroclaw user (service runs as zeroclaw:zeroclaw)
+sudo chown zeroclaw:zeroclaw /etc/zeroclaw/config.toml
sudo chmod 600 /etc/zeroclaw/config.toml
```
+> **Note**: The service runs as `zeroclaw:zeroclaw`, so the config file must be readable by that user. The `zeroclaw service install` command creates `/var/log/zeroclaw/` with correct ownership automatically.
+
### 7.4 Enable and Start
```bash
diff --git a/src/service/mod.rs b/src/service/mod.rs
index 3353f91f8..0ea998af7 100644
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -509,6 +509,31 @@ fn check_zeroclaw_user() -> Result<()> {
}
}
+/// Change ownership of a path to zeroclaw:zeroclaw
+#[cfg(unix)]
+fn chown_to_zeroclaw(path: &Path) -> Result<()> {
+ let output = Command::new("chown")
+ .args(["zeroclaw:zeroclaw", &path.to_string_lossy()])
+ .output()
+ .context("Failed to run chown")?;
+
+ if !output.status.success() {
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ // Non-fatal: warn but continue
+ eprintln!(
+ "⚠️ Warning: Could not change ownership of {} to zeroclaw:zeroclaw: {}",
+ path.display(),
+ stderr.trim()
+ );
+ }
+ Ok(())
+}
+
+#[cfg(not(unix))]
+fn chown_to_zeroclaw(_path: &Path) -> Result<()> {
+ Ok(())
+}
+
/// Warn if the binary path is in a user home directory
fn warn_if_binary_in_home(exe_path: &Path) {
let path_str = exe_path.to_string_lossy();
@@ -588,7 +613,11 @@ fn install_linux_openrc(config: &Config) -> Result<()> {
fs::set_permissions(log_dir, fs::Permissions::from_mode(0o750))
.with_context(|| format!("Failed to set permissions on {}", log_dir.display()))?;
}
- println!("✅ Created directory: {}", log_dir.display());
+ chown_to_zeroclaw(log_dir)?;
+ println!(
+ "✅ Created directory: {} (owned by zeroclaw:zeroclaw)",
+ log_dir.display()
+ );
}
let init_script = generate_openrc_script(&exe, "/etc/zeroclaw/config.toml");
From d26aa3de1cbe803771e5186d566dfa684839e2e9 Mon Sep 17 00:00:00 2001
From: Jakub Buzuk <61548378+Baz00k@users.noreply.github.com>
Date: Wed, 18 Feb 2026 01:35:10 +0100
Subject: [PATCH 051/116] fix(service): use Alpine-compatible user commands for
OpenRC
- Detect Alpine Linux via /etc/alpine-release
- Use adduser/deluser on Alpine instead of useradd/userdel
- Auto-create zeroclaw system user during install
- Provide correct commands in error messages
Alpine uses BusyBox which has different user management commands:
- adduser -S -s /sbin/nologin -H -D zeroclaw (Alpine)
- useradd -r -s /sbin/nologin zeroclaw (Debian/RHEL)
---
src/service/mod.rs | 73 +++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 65 insertions(+), 8 deletions(-)
diff --git a/src/service/mod.rs b/src/service/mod.rs
index 0ea998af7..74093cfc8 100644
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -466,6 +466,16 @@ fn is_root() -> bool {
/// Returns error if user exists but has unexpected properties.
fn check_zeroclaw_user() -> Result<()> {
let output = Command::new("getent").args(["passwd", "zeroclaw"]).output();
+ let is_alpine = Path::new("/etc/alpine-release").exists();
+
+ let (del_cmd, add_cmd) = if is_alpine {
+ (
+ "deluser zeroclaw",
+ "adduser -S -s /sbin/nologin -H zeroclaw",
+ )
+ } else {
+ ("userdel zeroclaw", "useradd -r -s /sbin/nologin zeroclaw")
+ };
match output {
Ok(output) if output.status.success() => {
@@ -479,18 +489,29 @@ fn check_zeroclaw_user() -> Result<()> {
if uid.parse::().unwrap_or(999) >= 1000 {
bail!(
- "User 'zeroclaw' exists but has unexpected UID {} (expected system UID < 1000). \
- Please recreate the user with: sudo userdel zeroclaw && sudo useradd -r -s /sbin/nologin zeroclaw",
- uid
+ "User 'zeroclaw' exists but has unexpected UID {} (expected system UID < 1000).\n\
+ Recreate with: sudo {} && sudo {}",
+ uid, del_cmd, add_cmd
);
}
if !shell.contains("nologin") && !shell.contains("false") {
bail!(
- "User 'zeroclaw' exists but has unexpected shell '{}'. \
- Expected nologin/false for security. Please recreate the user with: \
- sudo usermod -s /sbin/nologin zeroclaw",
- shell
+ "User 'zeroclaw' exists but has unexpected shell '{}'.\n\
+ Expected nologin/false for security. Fix with: sudo {} && sudo {}",
+ shell,
+ del_cmd,
+ add_cmd
+ );
+ }
+
+ if !shell.contains("nologin") && !shell.contains("false") {
+ bail!(
+ "User 'zeroclaw' exists but has unexpected shell '{}'.\n\
+ Expected nologin/false for security. Fix with: sudo {} && sudo {}",
+ shell,
+ del_cmd,
+ add_cmd
);
}
@@ -509,6 +530,42 @@ fn check_zeroclaw_user() -> Result<()> {
}
}
+fn ensure_zeroclaw_user() -> Result<()> {
+ let output = Command::new("getent").args(["passwd", "zeroclaw"]).output();
+ if let Ok(output) = output {
+ if output.status.success() {
+ return check_zeroclaw_user();
+ }
+ }
+
+ let is_alpine = Path::new("/etc/alpine-release").exists();
+
+ if is_alpine {
+ let output = Command::new("adduser")
+ .args(["-S", "-s", "/sbin/nologin", "-H", "-D", "zeroclaw"])
+ .output()
+ .context("Failed to create zeroclaw user")?;
+
+ if !output.status.success() {
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ bail!("Failed to create zeroclaw user: {}", stderr.trim());
+ }
+ } else {
+ let output = Command::new("useradd")
+ .args(["-r", "-s", "/sbin/nologin", "zeroclaw"])
+ .output()
+ .context("Failed to create zeroclaw user")?;
+
+ if !output.status.success() {
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ bail!("Failed to create zeroclaw user: {}", stderr.trim());
+ }
+ }
+
+ println!("✅ Created system user: zeroclaw");
+ Ok(())
+}
+
/// Change ownership of a path to zeroclaw:zeroclaw
#[cfg(unix)]
fn chown_to_zeroclaw(path: &Path) -> Result<()> {
@@ -583,7 +640,7 @@ fn install_linux_openrc(config: &Config) -> Result<()> {
);
}
- check_zeroclaw_user()?;
+ ensure_zeroclaw_user()?;
let exe = std::env::current_exe().context("Failed to resolve current executable")?;
warn_if_binary_in_home(&exe);
From 52cb914a41ee2f7baa42954be60a5c8452f4c0ba Mon Sep 17 00:00:00 2001
From: Jakub Buzuk <61548378+Baz00k@users.noreply.github.com>
Date: Wed, 18 Feb 2026 01:51:38 +0100
Subject: [PATCH 052/116] fix(service): create zeroclaw group on Alpine Linux
- Alpine adduser -S doesn't create a group automatically
- Explicitly create group with addgroup -S zeroclaw first
- Then add user with -G zeroclaw to join the group
- Update error message commands to include group handling
OpenRC service runs as zeroclaw:zeroclaw, so group must exist.
---
src/service/mod.rs | 31 ++++++++++++++++++++++++++++---
1 file changed, 28 insertions(+), 3 deletions(-)
diff --git a/src/service/mod.rs b/src/service/mod.rs
index 74093cfc8..f26464c6f 100644
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -470,8 +470,8 @@ fn check_zeroclaw_user() -> Result<()> {
let (del_cmd, add_cmd) = if is_alpine {
(
- "deluser zeroclaw",
- "adduser -S -s /sbin/nologin -H zeroclaw",
+ "deluser zeroclaw && delgroup zeroclaw",
+ "addgroup -S zeroclaw && adduser -S -s /sbin/nologin -H -D -G zeroclaw zeroclaw",
)
} else {
("userdel zeroclaw", "useradd -r -s /sbin/nologin zeroclaw")
@@ -541,8 +541,33 @@ fn ensure_zeroclaw_user() -> Result<()> {
let is_alpine = Path::new("/etc/alpine-release").exists();
if is_alpine {
+ let group_output = Command::new("getent").args(["group", "zeroclaw"]).output();
+ let group_exists = group_output.map(|o| o.status.success()).unwrap_or(false);
+
+ if !group_exists {
+ let output = Command::new("addgroup")
+ .args(["-S", "zeroclaw"])
+ .output()
+ .context("Failed to create zeroclaw group")?;
+
+ if !output.status.success() {
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ bail!("Failed to create zeroclaw group: {}", stderr.trim());
+ }
+ println!("✅ Created system group: zeroclaw");
+ }
+
let output = Command::new("adduser")
- .args(["-S", "-s", "/sbin/nologin", "-H", "-D", "zeroclaw"])
+ .args([
+ "-S",
+ "-s",
+ "/sbin/nologin",
+ "-H",
+ "-D",
+ "-G",
+ "zeroclaw",
+ "zeroclaw",
+ ])
.output()
.context("Failed to create zeroclaw user")?;
From 4c85d7e47c2718b12755aa5467a8a0db2c5cb3bc Mon Sep 17 00:00:00 2001
From: Jakub Buzuk <61548378+Baz00k@users.noreply.github.com>
Date: Wed, 18 Feb 2026 02:04:22 +0100
Subject: [PATCH 053/116] fix(service): always chown log directory on OpenRC
install
- Move chown_to_zeroclaw outside the if block
- Fixes permission denied when directory already exists
- Ensures correct ownership even on reinstall
---
src/service/mod.rs | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/src/service/mod.rs b/src/service/mod.rs
index f26464c6f..43f8ac1f9 100644
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -686,7 +686,8 @@ fn install_linux_openrc(config: &Config) -> Result<()> {
println!("✅ Created directory: {}", config_dir.display());
}
- if !log_dir.exists() {
+ let created_log_dir = !log_dir.exists();
+ if created_log_dir {
fs::create_dir_all(log_dir)
.with_context(|| format!("Failed to create {}", log_dir.display()))?;
#[cfg(unix)]
@@ -695,7 +696,11 @@ fn install_linux_openrc(config: &Config) -> Result<()> {
fs::set_permissions(log_dir, fs::Permissions::from_mode(0o750))
.with_context(|| format!("Failed to set permissions on {}", log_dir.display()))?;
}
- chown_to_zeroclaw(log_dir)?;
+ }
+
+ chown_to_zeroclaw(log_dir)?;
+
+ if created_log_dir {
println!(
"✅ Created directory: {} (owned by zeroclaw:zeroclaw)",
log_dir.display()
From 951076e02621ec92b0433a05ac2e80001e7050c6 Mon Sep 17 00:00:00 2001
From: Jakub Buzuk <61548378+Baz00k@users.noreply.github.com>
Date: Wed, 18 Feb 2026 02:50:03 +0100
Subject: [PATCH 054/116] feat(service): add --config-dir flag and improve
OpenRC setup
- Add global --config-dir CLI flag that sets ZEROCLAW_CONFIG_DIR env
- Add ZEROCLAW_CONFIG_DIR override in config resolution (takes precedence)
- Update OpenRC script to use --config-dir and set env vars for config/workspace
- Prefer /usr/local/bin/zeroclaw for OpenRC executable
- Create /etc/zeroclaw/workspace directory with correct ownership on install
- Update docs to reflect --service-init flag order (service-level before subcommand)
---
README.md | 7 ++++--
docs/network-deployment.md | 9 +++----
src/main.rs | 10 ++++++++
src/service/mod.rs | 48 ++++++++++++++++++++++++++++++++------
4 files changed, 61 insertions(+), 13 deletions(-)
diff --git a/README.md b/README.md
index ee6b89c74..7dac2f5f0 100644
--- a/README.md
+++ b/README.md
@@ -321,7 +321,7 @@ zeroclaw service install
zeroclaw service status
zeroclaw service restart
-# On Alpine (OpenRC): sudo zeroclaw service install --service-init=openrc
+# On Alpine (OpenRC): sudo zeroclaw service --service-init=openrc install
# Migrate memory from OpenClaw (safe preview first)
zeroclaw migrate openclaw --dry-run
@@ -931,9 +931,12 @@ zeroclaw service install
zeroclaw service start
# Alpine with OpenRC (system-wide, requires sudo)
-sudo zeroclaw service install --service-init=openrc
+sudo zeroclaw service --service-init=openrc install
sudo rc-update add zeroclaw default
sudo rc-service zeroclaw start
+
+# Note: --service-init is a service-level flag and must come before the subcommand
+# OpenRC installs set ZEROCLAW_CONFIG_DIR=/etc/zeroclaw for the service
```
For full OpenRC setup instructions, see [docs/network-deployment.md](docs/network-deployment.md#7-openrc-alpine-linux-service).
diff --git a/docs/network-deployment.md b/docs/network-deployment.md
index 910eb5dce..1addeb5d0 100644
--- a/docs/network-deployment.md
+++ b/docs/network-deployment.md
@@ -213,7 +213,7 @@ ZeroClaw supports OpenRC for Alpine Linux and other distributions using the Open
```bash
# Install the OpenRC init script (requires sudo)
-sudo zeroclaw service install --service-init=openrc
+sudo zeroclaw service --service-init=openrc install
```
This creates:
@@ -236,6 +236,7 @@ sudo chmod 600 /etc/zeroclaw/config.toml
```
> **Note**: The service runs as `zeroclaw:zeroclaw`, so the config file must be readable by that user. The `zeroclaw service install` command creates `/var/log/zeroclaw/` with correct ownership automatically.
+> The OpenRC service also sets `ZEROCLAW_CONFIG_DIR=/etc/zeroclaw` so config and workspace resolve under `/etc/zeroclaw`.
### 7.4 Enable and Start
@@ -258,7 +259,7 @@ sudo rc-service zeroclaw status
| `sudo rc-service zeroclaw stop` | Stop the daemon |
| `sudo rc-service zeroclaw status` | Check service status |
| `sudo rc-service zeroclaw restart` | Restart the daemon |
-| `sudo zeroclaw service status --service-init=openrc` | ZeroClaw status wrapper |
+| `sudo zeroclaw service --service-init=openrc status` | ZeroClaw status wrapper (uses `/etc/zeroclaw` config) |
### 7.6 Logs
@@ -283,7 +284,7 @@ sudo rc-service zeroclaw stop
sudo rc-update del zeroclaw default
# Remove init script
-sudo zeroclaw service uninstall --service-init=openrc
+sudo zeroclaw service --service-init=openrc uninstall
```
### 7.8 Notes
@@ -296,7 +297,7 @@ sudo zeroclaw service uninstall --service-init=openrc
### 7.9 Checklist: Alpine/OpenRC Deployment
-- [ ] Install: `sudo zeroclaw service install --service-init=openrc`
+- [ ] Install: `sudo zeroclaw service --service-init=openrc install`
- [ ] Create config: `/etc/zeroclaw/config.toml` with permissions `600`
- [ ] Enable: `sudo rc-update add zeroclaw default`
- [ ] Start: `sudo rc-service zeroclaw start`
diff --git a/src/main.rs b/src/main.rs
index a7b1966b8..0001d0faa 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -93,6 +93,9 @@ pub use zeroclaw::{HardwareCommands, PeripheralCommands};
#[command(version = "0.1.0")]
#[command(about = "The fastest, smallest AI assistant.", long_about = None)]
struct Cli {
+ #[arg(long, global = true)]
+ config_dir: Option,
+
#[command(subcommand)]
command: Commands,
}
@@ -666,6 +669,13 @@ async fn main() -> Result<()> {
let cli = Cli::parse();
+ if let Some(config_dir) = &cli.config_dir {
+ if config_dir.trim().is_empty() {
+ bail!("--config-dir cannot be empty");
+ }
+ std::env::set_var("ZEROCLAW_CONFIG_DIR", config_dir);
+ }
+
// Completions must remain stdout-only and should not load config or initialize logging.
// This avoids warnings/log lines corrupting sourced completion scripts.
if let Commands::Completions { shell } = &cli.command {
diff --git a/src/service/mod.rs b/src/service/mod.rs
index 43f8ac1f9..cfb2361f4 100644
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -631,7 +631,7 @@ fn warn_if_binary_in_home(exe_path: &Path) {
}
/// Generate OpenRC init script content (pure function for testability)
-fn generate_openrc_script(exe_path: &Path, config_path: &str) -> String {
+fn generate_openrc_script(exe_path: &Path, config_dir: &Path) -> String {
format!(
r#"#!/sbin/openrc-run
@@ -639,13 +639,15 @@ name="zeroclaw"
description="ZeroClaw daemon"
command="{}"
-command_args="daemon --config {}"
+command_args="--config-dir {} daemon"
command_background="yes"
command_user="zeroclaw:zeroclaw"
pidfile="/run/${{RC_SVCNAME}}.pid"
umask 027
output_log="/var/log/zeroclaw/access.log"
error_log="/var/log/zeroclaw/error.log"
+env ZEROCLAW_CONFIG_DIR="{}"
+env ZEROCLAW_WORKSPACE="{}"
depend() {{
need net
@@ -653,10 +655,22 @@ depend() {{
}}
"#,
exe_path.display(),
- config_path
+ config_dir.display(),
+ config_dir.display(),
+ config_dir.join("workspace").display()
)
}
+fn resolve_openrc_executable() -> Result {
+ let preferred = Path::new("/usr/local/bin/zeroclaw");
+ if preferred.exists() {
+ return Ok(preferred.to_path_buf());
+ }
+
+ let exe = std::env::current_exe().context("Failed to resolve current executable")?;
+ Ok(exe)
+}
+
fn install_linux_openrc(config: &Config) -> Result<()> {
if !is_root() {
bail!(
@@ -667,10 +681,11 @@ fn install_linux_openrc(config: &Config) -> Result<()> {
ensure_zeroclaw_user()?;
- let exe = std::env::current_exe().context("Failed to resolve current executable")?;
+ let exe = resolve_openrc_executable()?;
warn_if_binary_in_home(&exe);
let config_dir = Path::new("/etc/zeroclaw");
+ let workspace_dir = config_dir.join("workspace");
let log_dir = Path::new("/var/log/zeroclaw");
if !config_dir.exists() {
@@ -686,6 +701,23 @@ fn install_linux_openrc(config: &Config) -> Result<()> {
println!("✅ Created directory: {}", config_dir.display());
}
+ if !workspace_dir.exists() {
+ fs::create_dir_all(&workspace_dir)
+ .with_context(|| format!("Failed to create {}", workspace_dir.display()))?;
+ #[cfg(unix)]
+ {
+ use std::os::unix::fs::PermissionsExt;
+ fs::set_permissions(&workspace_dir, fs::Permissions::from_mode(0o750)).with_context(
+ || format!("Failed to set permissions on {}", workspace_dir.display()),
+ )?;
+ }
+ chown_to_zeroclaw(&workspace_dir)?;
+ println!(
+ "✅ Created directory: {} (owned by zeroclaw:zeroclaw)",
+ workspace_dir.display()
+ );
+ }
+
let created_log_dir = !log_dir.exists();
if created_log_dir {
fs::create_dir_all(log_dir)
@@ -707,7 +739,7 @@ fn install_linux_openrc(config: &Config) -> Result<()> {
);
}
- let init_script = generate_openrc_script(&exe, "/etc/zeroclaw/config.toml");
+ let init_script = generate_openrc_script(&exe, config_dir);
let init_path = Path::new("/etc/init.d/zeroclaw");
fs::write(init_path, init_script)
.with_context(|| format!("Failed to write {}", init_path.display()))?;
@@ -927,13 +959,15 @@ mod tests {
use std::path::PathBuf;
let exe_path = PathBuf::from("/usr/local/bin/zeroclaw");
- let script = generate_openrc_script(&exe_path, "/etc/zeroclaw/config.toml");
+ let script = generate_openrc_script(&exe_path, Path::new("/etc/zeroclaw"));
assert!(script.starts_with("#!/sbin/openrc-run"));
assert!(script.contains("name=\"zeroclaw\""));
assert!(script.contains("description=\"ZeroClaw daemon\""));
assert!(script.contains("command=\"/usr/local/bin/zeroclaw\""));
- assert!(script.contains("command_args=\"daemon --config /etc/zeroclaw/config.toml\""));
+ assert!(script.contains("command_args=\"--config-dir /etc/zeroclaw daemon\""));
+ assert!(script.contains("env ZEROCLAW_CONFIG_DIR=\"/etc/zeroclaw\""));
+ assert!(script.contains("env ZEROCLAW_WORKSPACE=\"/etc/zeroclaw/workspace\""));
assert!(script.contains("command_background=\"yes\""));
assert!(script.contains("command_user=\"zeroclaw:zeroclaw\""));
assert!(script.contains("pidfile=\"/run/${RC_SVCNAME}.pid\""));
From b2bf5531e4717b68d04f8f510ce688b829d34a81 Mon Sep 17 00:00:00 2001
From: Jakub Buzuk <61548378+Baz00k@users.noreply.github.com>
Date: Wed, 18 Feb 2026 13:15:28 +0100
Subject: [PATCH 055/116] feat(service): enable hands-off OpenRC installation
on Alpine
Add automatic runtime-state migration to /etc/zeroclaw with secure ownership/permissions. Implement env-based config resolution for service startup, eliminating the need for manual --service-init flags in the happy path.
---
README.md | 9 +--
docs/network-deployment.md | 26 ++-----
src/service/mod.rs | 156 +++++++++++++++++++++++++++++++++----
3 files changed, 152 insertions(+), 39 deletions(-)
diff --git a/README.md b/README.md
index 7dac2f5f0..0025c5f04 100644
--- a/README.md
+++ b/README.md
@@ -321,7 +321,7 @@ zeroclaw service install
zeroclaw service status
zeroclaw service restart
-# On Alpine (OpenRC): sudo zeroclaw service --service-init=openrc install
+# On Alpine (OpenRC): sudo zeroclaw service install
# Migrate memory from OpenClaw (safe preview first)
zeroclaw migrate openclaw --dry-run
@@ -923,7 +923,7 @@ ZeroClaw supports two init systems for background services:
| **systemd** (default on Linux) | User-level | `~/.zeroclaw/config.toml` | No sudo |
| **OpenRC** (Alpine) | System-wide | `/etc/zeroclaw/config.toml` | sudo/root |
-Use `--service-init` to select the init system:
+Init system is auto-detected (`systemd` or `OpenRC`).
```bash
# Linux with systemd (default, user-level)
@@ -931,12 +931,9 @@ zeroclaw service install
zeroclaw service start
# Alpine with OpenRC (system-wide, requires sudo)
-sudo zeroclaw service --service-init=openrc install
+sudo zeroclaw service install
sudo rc-update add zeroclaw default
sudo rc-service zeroclaw start
-
-# Note: --service-init is a service-level flag and must come before the subcommand
-# OpenRC installs set ZEROCLAW_CONFIG_DIR=/etc/zeroclaw for the service
```
For full OpenRC setup instructions, see [docs/network-deployment.md](docs/network-deployment.md#7-openrc-alpine-linux-service).
diff --git a/docs/network-deployment.md b/docs/network-deployment.md
index 1addeb5d0..f9f0bf258 100644
--- a/docs/network-deployment.md
+++ b/docs/network-deployment.md
@@ -212,8 +212,8 @@ ZeroClaw supports OpenRC for Alpine Linux and other distributions using the Open
### 7.2 Install Service
```bash
-# Install the OpenRC init script (requires sudo)
-sudo zeroclaw service --service-init=openrc install
+# Install service (OpenRC is auto-detected on Alpine)
+sudo zeroclaw service install
```
This creates:
@@ -223,20 +223,9 @@ This creates:
### 7.3 Configuration
-Place your ZeroClaw config at `/etc/zeroclaw/config.toml`:
+No manual config copy is required.
-```bash
-# Copy or create system-wide config
-sudo mkdir -p /etc/zeroclaw
-sudo cp ~/.zeroclaw/config.toml /etc/zeroclaw/config.toml
-
-# Set ownership to zeroclaw user (service runs as zeroclaw:zeroclaw)
-sudo chown zeroclaw:zeroclaw /etc/zeroclaw/config.toml
-sudo chmod 600 /etc/zeroclaw/config.toml
-```
-
-> **Note**: The service runs as `zeroclaw:zeroclaw`, so the config file must be readable by that user. The `zeroclaw service install` command creates `/var/log/zeroclaw/` with correct ownership automatically.
-> The OpenRC service also sets `ZEROCLAW_CONFIG_DIR=/etc/zeroclaw` so config and workspace resolve under `/etc/zeroclaw`.
+`sudo zeroclaw service install` automatically prepares `/etc/zeroclaw`, migrates existing runtime state from your user setup when available, and sets ownership/permissions for the `zeroclaw` service user.
### 7.4 Enable and Start
@@ -259,7 +248,7 @@ sudo rc-service zeroclaw status
| `sudo rc-service zeroclaw stop` | Stop the daemon |
| `sudo rc-service zeroclaw status` | Check service status |
| `sudo rc-service zeroclaw restart` | Restart the daemon |
-| `sudo zeroclaw service --service-init=openrc status` | ZeroClaw status wrapper (uses `/etc/zeroclaw` config) |
+| `sudo zeroclaw service status` | ZeroClaw status wrapper (uses `/etc/zeroclaw` config) |
### 7.6 Logs
@@ -284,7 +273,7 @@ sudo rc-service zeroclaw stop
sudo rc-update del zeroclaw default
# Remove init script
-sudo zeroclaw service --service-init=openrc uninstall
+sudo zeroclaw service uninstall
```
### 7.8 Notes
@@ -297,8 +286,7 @@ sudo zeroclaw service --service-init=openrc uninstall
### 7.9 Checklist: Alpine/OpenRC Deployment
-- [ ] Install: `sudo zeroclaw service --service-init=openrc install`
-- [ ] Create config: `/etc/zeroclaw/config.toml` with permissions `600`
+- [ ] Install: `sudo zeroclaw service install`
- [ ] Enable: `sudo rc-update add zeroclaw default`
- [ ] Start: `sudo rc-service zeroclaw start`
- [ ] Verify: `sudo rc-service zeroclaw status`
diff --git a/src/service/mod.rs b/src/service/mod.rs
index cfb2361f4..9651d7330 100644
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -505,16 +505,6 @@ fn check_zeroclaw_user() -> Result<()> {
);
}
- if !shell.contains("nologin") && !shell.contains("false") {
- bail!(
- "User 'zeroclaw' exists but has unexpected shell '{}'.\n\
- Expected nologin/false for security. Fix with: sudo {} && sudo {}",
- shell,
- del_cmd,
- add_cmd
- );
- }
-
if home != "/var/lib/zeroclaw" && home != "/nonexistent" {
eprintln!(
"⚠️ Warning: zeroclaw user has home directory '{}' (expected /var/lib/zeroclaw or /nonexistent)",
@@ -616,6 +606,115 @@ fn chown_to_zeroclaw(_path: &Path) -> Result<()> {
Ok(())
}
+#[cfg(unix)]
+fn chown_recursive_to_zeroclaw(path: &Path) -> Result<()> {
+ let output = Command::new("chown")
+ .args(["-R", "zeroclaw:zeroclaw", &path.to_string_lossy()])
+ .output()
+ .context("Failed to run recursive chown")?;
+
+ if !output.status.success() {
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ eprintln!(
+ "⚠️ Warning: Could not recursively change ownership of {} to zeroclaw:zeroclaw: {}",
+ path.display(),
+ stderr.trim()
+ );
+ }
+
+ Ok(())
+}
+
+#[cfg(not(unix))]
+fn chown_recursive_to_zeroclaw(_path: &Path) -> Result<()> {
+ Ok(())
+}
+
+fn copy_dir_recursive(source: &Path, target: &Path) -> Result<()> {
+ fs::create_dir_all(target)
+ .with_context(|| format!("Failed to create directory {}", target.display()))?;
+
+ for entry in fs::read_dir(source)
+ .with_context(|| format!("Failed to read directory {}", source.display()))?
+ {
+ let entry = entry?;
+ let source_path = entry.path();
+ let target_path = target.join(entry.file_name());
+ let file_type = entry
+ .file_type()
+ .with_context(|| format!("Failed to inspect {}", source_path.display()))?;
+
+ if file_type.is_dir() {
+ copy_dir_recursive(&source_path, &target_path)?;
+ } else if file_type.is_file() {
+ if target_path.exists() {
+ continue;
+ }
+ fs::copy(&source_path, &target_path).with_context(|| {
+ format!(
+ "Failed to copy file {} -> {}",
+ source_path.display(),
+ target_path.display()
+ )
+ })?;
+ }
+ }
+
+ Ok(())
+}
+
+fn resolve_invoking_user_config_dir() -> Option {
+ let sudo_user = std::env::var("SUDO_USER")
+ .ok()
+ .map(|value| value.trim().to_string())
+ .filter(|value| !value.is_empty() && value != "root");
+
+ if let Some(user) = sudo_user {
+ if let Ok(output) = Command::new("getent").args(["passwd", &user]).output() {
+ if output.status.success() {
+ let entry = String::from_utf8_lossy(&output.stdout);
+ let fields: Vec<&str> = entry.trim().split(':').collect();
+ if fields.len() >= 6 {
+ return Some(PathBuf::from(fields[5]).join(".zeroclaw"));
+ }
+ }
+ }
+ }
+
+ std::env::var("HOME")
+ .ok()
+ .map(PathBuf::from)
+ .map(|home| home.join(".zeroclaw"))
+}
+
+fn migrate_openrc_runtime_state_if_needed(config_dir: &Path) -> Result<()> {
+ let target_config = config_dir.join("config.toml");
+ if target_config.exists() {
+ println!(
+ "✅ Reusing existing OpenRC config at {}",
+ target_config.display()
+ );
+ return Ok(());
+ }
+
+ let Some(source_dir) = resolve_invoking_user_config_dir() else {
+ return Ok(());
+ };
+
+ let source_config = source_dir.join("config.toml");
+ if !source_config.exists() {
+ return Ok(());
+ }
+
+ copy_dir_recursive(&source_dir, config_dir)?;
+ println!(
+ "✅ Migrated runtime state from {} to {}",
+ source_dir.display(),
+ config_dir.display()
+ );
+ Ok(())
+}
+
/// Warn if the binary path is in a user home directory
fn warn_if_binary_in_home(exe_path: &Path) {
let path_str = exe_path.to_string_lossy();
@@ -639,7 +738,7 @@ name="zeroclaw"
description="ZeroClaw daemon"
command="{}"
-command_args="--config-dir {} daemon"
+command_args="daemon"
command_background="yes"
command_user="zeroclaw:zeroclaw"
pidfile="/run/${{RC_SVCNAME}}.pid"
@@ -656,7 +755,6 @@ depend() {{
"#,
exe_path.display(),
config_dir.display(),
- config_dir.display(),
config_dir.join("workspace").display()
)
}
@@ -675,7 +773,7 @@ fn install_linux_openrc(config: &Config) -> Result<()> {
if !is_root() {
bail!(
"OpenRC service installation requires root privileges.\n\
- Please run with sudo: sudo zeroclaw service install --service-init=openrc"
+ Please run with sudo: sudo zeroclaw service install"
);
}
@@ -701,6 +799,8 @@ fn install_linux_openrc(config: &Config) -> Result<()> {
println!("✅ Created directory: {}", config_dir.display());
}
+ migrate_openrc_runtime_state_if_needed(config_dir)?;
+
if !workspace_dir.exists() {
fs::create_dir_all(&workspace_dir)
.with_context(|| format!("Failed to create {}", workspace_dir.display()))?;
@@ -718,6 +818,34 @@ fn install_linux_openrc(config: &Config) -> Result<()> {
);
}
+ #[cfg(unix)]
+ {
+ use std::os::unix::fs::PermissionsExt;
+ fs::set_permissions(&workspace_dir, fs::Permissions::from_mode(0o750))
+ .with_context(|| format!("Failed to set permissions on {}", workspace_dir.display()))?;
+ }
+
+ #[cfg(unix)]
+ {
+ use std::os::unix::fs::PermissionsExt;
+ fs::set_permissions(config_dir, fs::Permissions::from_mode(0o755))
+ .with_context(|| format!("Failed to set permissions on {}", config_dir.display()))?;
+ let config_path = config_dir.join("config.toml");
+ if config_path.exists() {
+ fs::set_permissions(&config_path, fs::Permissions::from_mode(0o600)).with_context(
+ || format!("Failed to set permissions on {}", config_path.display()),
+ )?;
+ }
+ let secret_key_path = config_dir.join(".secret_key");
+ if secret_key_path.exists() {
+ fs::set_permissions(&secret_key_path, fs::Permissions::from_mode(0o600)).with_context(
+ || format!("Failed to set permissions on {}", secret_key_path.display()),
+ )?;
+ }
+ }
+
+ chown_recursive_to_zeroclaw(config_dir)?;
+
let created_log_dir = !log_dir.exists();
if created_log_dir {
fs::create_dir_all(log_dir)
@@ -965,7 +1093,7 @@ mod tests {
assert!(script.contains("name=\"zeroclaw\""));
assert!(script.contains("description=\"ZeroClaw daemon\""));
assert!(script.contains("command=\"/usr/local/bin/zeroclaw\""));
- assert!(script.contains("command_args=\"--config-dir /etc/zeroclaw daemon\""));
+ assert!(script.contains("command_args=\"daemon\""));
assert!(script.contains("env ZEROCLAW_CONFIG_DIR=\"/etc/zeroclaw\""));
assert!(script.contains("env ZEROCLAW_WORKSPACE=\"/etc/zeroclaw/workspace\""));
assert!(script.contains("command_background=\"yes\""));
From 076e9be9e56a60e3b4d234446b89fa1e04c8d487 Mon Sep 17 00:00:00 2001
From: Jakub Buzuk <61548378+Baz00k@users.noreply.github.com>
Date: Wed, 18 Feb 2026 15:05:47 +0100
Subject: [PATCH 056/116] fix(service): use explicit --config-dir args in
OpenRC script
Switch OpenRC service generation from env exports
(ZEROCLAW_CONFIG_DIR/WORKSPACE) to explicit command_args with
--config-dir flag. Fixes startup crash with 'Permission denied (os error
13)' under OpenRC init system.
---
src/service/mod.rs | 13 +++++--------
1 file changed, 5 insertions(+), 8 deletions(-)
diff --git a/src/service/mod.rs b/src/service/mod.rs
index 9651d7330..005bc62bf 100644
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -738,15 +738,13 @@ name="zeroclaw"
description="ZeroClaw daemon"
command="{}"
-command_args="daemon"
+command_args="--config-dir {} daemon"
command_background="yes"
command_user="zeroclaw:zeroclaw"
pidfile="/run/${{RC_SVCNAME}}.pid"
umask 027
output_log="/var/log/zeroclaw/access.log"
error_log="/var/log/zeroclaw/error.log"
-env ZEROCLAW_CONFIG_DIR="{}"
-env ZEROCLAW_WORKSPACE="{}"
depend() {{
need net
@@ -754,8 +752,7 @@ depend() {{
}}
"#,
exe_path.display(),
- config_dir.display(),
- config_dir.join("workspace").display()
+ config_dir.display()
)
}
@@ -1093,9 +1090,9 @@ mod tests {
assert!(script.contains("name=\"zeroclaw\""));
assert!(script.contains("description=\"ZeroClaw daemon\""));
assert!(script.contains("command=\"/usr/local/bin/zeroclaw\""));
- assert!(script.contains("command_args=\"daemon\""));
- assert!(script.contains("env ZEROCLAW_CONFIG_DIR=\"/etc/zeroclaw\""));
- assert!(script.contains("env ZEROCLAW_WORKSPACE=\"/etc/zeroclaw/workspace\""));
+ assert!(script.contains("command_args=\"--config-dir /etc/zeroclaw daemon\""));
+ assert!(!script.contains("env ZEROCLAW_CONFIG_DIR"));
+ assert!(!script.contains("env ZEROCLAW_WORKSPACE"));
assert!(script.contains("command_background=\"yes\""));
assert!(script.contains("command_user=\"zeroclaw:zeroclaw\""));
assert!(script.contains("pidfile=\"/run/${RC_SVCNAME}.pid\""));
From 71acd1245c109547dcfef06f06c7754280aa2ff2 Mon Sep 17 00:00:00 2001
From: Jakub Buzuk <61548378+Baz00k@users.noreply.github.com>
Date: Wed, 18 Feb 2026 16:41:47 +0100
Subject: [PATCH 057/116] fix(service): harden OpenRC restart fallback and
uninstall resilience
- Linux managed daemon now falls back to systemd when OpenRC restart probe fails, instead of returning early with no action.
- OpenRC uninstall no longer fails hard if rc-update del fails; it warns and continues to remove the init script.
---
src/channels/mod.rs | 29 +++++++++++++----------------
src/service/mod.rs | 8 +++++++-
2 files changed, 20 insertions(+), 17 deletions(-)
diff --git a/src/channels/mod.rs b/src/channels/mod.rs
index 0e5001251..fe476fd8a 100644
--- a/src/channels/mod.rs
+++ b/src/channels/mod.rs
@@ -1921,24 +1921,21 @@ fn maybe_restart_managed_daemon_service() -> Result {
// OpenRC (system-wide) takes precedence over systemd (user-level)
let openrc_init_script = PathBuf::from("/etc/init.d/zeroclaw");
if openrc_init_script.exists() {
- let status_output = Command::new("rc-service")
- .args(OPENRC_STATUS_ARGS)
- .output()
- .context("Failed to query OpenRC service state")?;
-
- // rc-service exits 0 if running, non-zero otherwise
- if status_output.status.success() {
- let restart_output = Command::new("rc-service")
- .args(OPENRC_RESTART_ARGS)
- .output()
- .context("Failed to restart OpenRC daemon service")?;
- if !restart_output.status.success() {
- let stderr = String::from_utf8_lossy(&restart_output.stderr);
- anyhow::bail!("rc-service restart failed: {}", stderr.trim());
+ if let Ok(status_output) = Command::new("rc-service").args(OPENRC_STATUS_ARGS).output()
+ {
+ // rc-service exits 0 if running, non-zero otherwise
+ if status_output.status.success() {
+ let restart_output = Command::new("rc-service")
+ .args(OPENRC_RESTART_ARGS)
+ .output()
+ .context("Failed to restart OpenRC daemon service")?;
+ if !restart_output.status.success() {
+ let stderr = String::from_utf8_lossy(&restart_output.stderr);
+ anyhow::bail!("rc-service restart failed: {}", stderr.trim());
+ }
+ return Ok(true);
}
- return Ok(true);
}
- return Ok(false);
}
// Systemd (user-level)
diff --git a/src/service/mod.rs b/src/service/mod.rs
index 005bc62bf..add18278c 100644
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -359,7 +359,13 @@ fn uninstall_linux(config: &Config, init_system: InitSystem) -> Result<()> {
InitSystem::Openrc => {
let init_script = Path::new("/etc/init.d/zeroclaw");
if init_script.exists() {
- run_checked(Command::new("rc-update").args(["del", "zeroclaw", "default"]))?;
+ if let Err(err) =
+ run_checked(Command::new("rc-update").args(["del", "zeroclaw", "default"]))
+ {
+ eprintln!(
+ "⚠️ Warning: Could not remove zeroclaw from OpenRC default runlevel: {err}"
+ );
+ }
fs::remove_file(init_script)
.with_context(|| format!("Failed to remove {}", init_script.display()))?;
}
From a1ed5e7e75c5976e30f285557d71d45517b84a82 Mon Sep 17 00:00:00 2001
From: Jakub Buzuk <61548378+Baz00k@users.noreply.github.com>
Date: Fri, 20 Feb 2026 01:02:34 +0100
Subject: [PATCH 058/116] fix: permission issues during service creation
---
src/service/mod.rs | 147 ++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 140 insertions(+), 7 deletions(-)
diff --git a/src/service/mod.rs b/src/service/mod.rs
index add18278c..8d7280edd 100644
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -597,11 +597,10 @@ fn chown_to_zeroclaw(path: &Path) -> Result<()> {
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
- // Non-fatal: warn but continue
- eprintln!(
- "⚠️ Warning: Could not change ownership of {} to zeroclaw:zeroclaw: {}",
+ bail!(
+ "Failed to change ownership of {} to zeroclaw:zeroclaw: {}",
path.display(),
- stderr.trim()
+ stderr.trim(),
);
}
Ok(())
@@ -621,10 +620,10 @@ fn chown_recursive_to_zeroclaw(path: &Path) -> Result<()> {
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
- eprintln!(
- "⚠️ Warning: Could not recursively change ownership of {} to zeroclaw:zeroclaw: {}",
+ bail!(
+ "Failed to recursively change ownership of {} to zeroclaw:zeroclaw: {}",
path.display(),
- stderr.trim()
+ stderr.trim(),
);
}
@@ -721,6 +720,92 @@ fn migrate_openrc_runtime_state_if_needed(config_dir: &Path) -> Result<()> {
Ok(())
}
+#[cfg(unix)]
+fn shell_single_quote(raw: &str) -> String {
+ format!("'{}'", raw.replace('\'', "'\"'\"'"))
+}
+
+#[cfg(unix)]
+fn build_openrc_writability_probe_command(path: &Path, has_runuser: bool) -> (String, Vec) {
+ let probe = format!("test -w {}", shell_single_quote(&path.to_string_lossy()));
+ if has_runuser {
+ (
+ "runuser".to_string(),
+ vec![
+ "-u".to_string(),
+ "zeroclaw".to_string(),
+ "--".to_string(),
+ "sh".to_string(),
+ "-c".to_string(),
+ probe,
+ ],
+ )
+ } else {
+ (
+ "su".to_string(),
+ vec![
+ "-s".to_string(),
+ "/bin/sh".to_string(),
+ "-c".to_string(),
+ probe,
+ "zeroclaw".to_string(),
+ ],
+ )
+ }
+}
+
+#[cfg(unix)]
+fn ensure_openrc_runtime_path_writable(path: &Path) -> Result<()> {
+ let has_runuser = which::which("runuser").is_ok();
+ let (program, args) = build_openrc_writability_probe_command(path, has_runuser);
+ let output = Command::new(&program)
+ .args(args.iter().map(String::as_str))
+ .output()
+ .with_context(|| {
+ format!(
+ "Failed to verify OpenRC runtime write access for {}",
+ path.display()
+ )
+ })?;
+
+ if !output.status.success() {
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ let details = if stderr.trim().is_empty() {
+ "write-access probe failed"
+ } else {
+ stderr.trim()
+ };
+ bail!(
+ "OpenRC runtime user 'zeroclaw' cannot write {} ({details}). \
+ Re-run `sudo zeroclaw service install` and ensure ownership is zeroclaw:zeroclaw.",
+ path.display(),
+ );
+ }
+
+ Ok(())
+}
+
+#[cfg(unix)]
+fn ensure_openrc_runtime_dirs_writable(
+ config_dir: &Path,
+ workspace_dir: &Path,
+ log_dir: &Path,
+) -> Result<()> {
+ for path in [config_dir, workspace_dir, log_dir] {
+ ensure_openrc_runtime_path_writable(path)?;
+ }
+ Ok(())
+}
+
+#[cfg(not(unix))]
+fn ensure_openrc_runtime_dirs_writable(
+ _config_dir: &Path,
+ _workspace_dir: &Path,
+ _log_dir: &Path,
+) -> Result<()> {
+ Ok(())
+}
+
/// Warn if the binary path is in a user home directory
fn warn_if_binary_in_home(exe_path: &Path) {
let path_str = exe_path.to_string_lossy();
@@ -863,6 +948,8 @@ fn install_linux_openrc(config: &Config) -> Result<()> {
chown_to_zeroclaw(log_dir)?;
+ ensure_openrc_runtime_dirs_writable(config_dir, &workspace_dir, log_dir)?;
+
if created_log_dir {
println!(
"✅ Created directory: {} (owned by zeroclaw:zeroclaw)",
@@ -1125,4 +1212,50 @@ mod tests {
assert!(!system_path.to_string_lossy().contains("/home/"));
assert!(!system_path.to_string_lossy().contains(".cargo/bin"));
}
+
+ #[cfg(unix)]
+ #[test]
+ fn shell_single_quote_escapes_single_quotes() {
+ assert_eq!(
+ shell_single_quote("/tmp/weird'path"),
+ "'/tmp/weird'\"'\"'path'"
+ );
+ }
+
+ #[cfg(unix)]
+ #[test]
+ fn openrc_writability_probe_prefers_runuser_when_available() {
+ let (program, args) =
+ build_openrc_writability_probe_command(Path::new("/etc/zeroclaw"), true);
+ assert_eq!(program, "runuser");
+ assert_eq!(
+ args,
+ vec![
+ "-u".to_string(),
+ "zeroclaw".to_string(),
+ "--".to_string(),
+ "sh".to_string(),
+ "-c".to_string(),
+ "test -w '/etc/zeroclaw'".to_string()
+ ]
+ );
+ }
+
+ #[cfg(unix)]
+ #[test]
+ fn openrc_writability_probe_falls_back_to_su() {
+ let (program, args) =
+ build_openrc_writability_probe_command(Path::new("/etc/zeroclaw/workspace"), false);
+ assert_eq!(program, "su");
+ assert_eq!(
+ args,
+ vec![
+ "-s".to_string(),
+ "/bin/sh".to_string(),
+ "-c".to_string(),
+ "test -w '/etc/zeroclaw/workspace'".to_string(),
+ "zeroclaw".to_string()
+ ]
+ );
+ }
}
From 35c37cb217292b70f0f163db8fd7f5c12a0f81b4 Mon Sep 17 00:00:00 2001
From: Jakub Buzuk <61548378+Baz00k@users.noreply.github.com>
Date: Fri, 20 Feb 2026 10:56:39 +0100
Subject: [PATCH 059/116] fix: accept config dir cli arg
---
Cargo.lock | 36 +++++++++++-----------
src/config/schema.rs | 73 +++++++++++++++++++++++++++++++++++++++++---
2 files changed, 86 insertions(+), 23 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 404c0fbc9..0658b38fc 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1600,6 +1600,12 @@ dependencies = [
"log",
]
+[[package]]
+name = "env_home"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7f84e12ccf0a7ddc17a6c41c93326024c42920d7ee630d04950e6926645c0fe"
+
[[package]]
name = "env_logger"
version = "0.11.9"
@@ -1610,12 +1616,6 @@ dependencies = [
"log",
]
-[[package]]
-name = "env_home"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c7f84e12ccf0a7ddc17a6c41c93326024c42920d7ee630d04950e6926645c0fe"
-
[[package]]
name = "equivalent"
version = "1.0.2"
@@ -7037,6 +7037,18 @@ version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a28ac98ddc8b9274cb41bb4d9d4d5c425b6020c50c46f25559911905610b4a88"
+[[package]]
+name = "which"
+version = "7.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d643ce3fd3e5b54854602a080f34fb10ab75e0b813ee32d00ca2b44fa74762"
+dependencies = [
+ "either",
+ "env_home",
+ "rustix 1.1.3",
+ "winsafe",
+]
+
[[package]]
name = "whoami"
version = "2.1.1"
@@ -7056,18 +7068,6 @@ version = "2.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29333c3ea1ba8b17211763463ff24ee84e41c78224c16b001cd907e663a38c68"
-[[package]]
-name = "which"
-version = "7.0.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "24d643ce3fd3e5b54854602a080f34fb10ab75e0b813ee32d00ca2b44fa74762"
-dependencies = [
- "either",
- "env_home",
- "rustix 1.1.3",
- "winsafe",
-]
-
[[package]]
name = "winapi"
version = "0.3.9"
diff --git a/src/config/schema.rs b/src/config/schema.rs
index 2030feec3..50546d65e 100644
--- a/src/config/schema.rs
+++ b/src/config/schema.rs
@@ -2285,6 +2285,7 @@ pub enum StreamMode {
#[default]
Off,
/// Update a draft message with every flush interval.
+ #[serde(alias = "on")]
Partial,
}
@@ -2987,6 +2988,7 @@ fn resolve_config_dir_for_workspace(workspace_dir: &Path) -> (PathBuf, PathBuf)
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum ConfigResolutionSource {
+ EnvConfigDir,
EnvWorkspace,
ActiveWorkspaceMarker,
DefaultConfigDir,
@@ -2995,6 +2997,7 @@ enum ConfigResolutionSource {
impl ConfigResolutionSource {
const fn as_str(self) -> &'static str {
match self {
+ Self::EnvConfigDir => "ZEROCLAW_CONFIG_DIR",
Self::EnvWorkspace => "ZEROCLAW_WORKSPACE",
Self::ActiveWorkspaceMarker => "active_workspace.toml",
Self::DefaultConfigDir => "default",
@@ -3006,10 +3009,18 @@ async fn resolve_runtime_config_dirs(
default_zeroclaw_dir: &Path,
default_workspace_dir: &Path,
) -> Result<(PathBuf, PathBuf, ConfigResolutionSource)> {
- // Resolution priority:
- // 1. ZEROCLAW_WORKSPACE env override
- // 2. Persisted active workspace marker from onboarding/custom profile
- // 3. Default ~/.zeroclaw layout
+ if let Ok(custom_config_dir) = std::env::var("ZEROCLAW_CONFIG_DIR") {
+ let custom_config_dir = custom_config_dir.trim();
+ if !custom_config_dir.is_empty() {
+ let zeroclaw_dir = PathBuf::from(custom_config_dir);
+ return Ok((
+ zeroclaw_dir.clone(),
+ zeroclaw_dir.join("workspace"),
+ ConfigResolutionSource::EnvConfigDir,
+ ));
+ }
+ }
+
if let Ok(custom_workspace) = std::env::var("ZEROCLAW_WORKSPACE") {
if !custom_workspace.is_empty() {
let (zeroclaw_dir, workspace_dir) =
@@ -3073,6 +3084,14 @@ fn encrypt_optional_secret(
Ok(())
}
+fn config_dir_creation_error(path: &Path) -> String {
+ format!(
+ "Failed to create config directory: {}. If running as an OpenRC service, \
+ ensure this path is writable by user 'zeroclaw'.",
+ path.display()
+ )
+}
+
impl Config {
pub async fn load_or_init() -> Result {
let (default_zeroclaw_dir, default_workspace_dir) = default_config_and_workspace_dirs()?;
@@ -3084,7 +3103,7 @@ impl Config {
fs::create_dir_all(&zeroclaw_dir)
.await
- .context("Failed to create config directory")?;
+ .with_context(|| config_dir_creation_error(&zeroclaw_dir))?;
fs::create_dir_all(&workspace_dir)
.await
.context("Failed to create workspace directory")?;
@@ -3661,6 +3680,14 @@ mod tests {
assert!(c.config_path.to_string_lossy().contains("config.toml"));
}
+ #[test]
+ async fn config_dir_creation_error_mentions_openrc_and_path() {
+ let msg = config_dir_creation_error(Path::new("/etc/zeroclaw"));
+ assert!(msg.contains("/etc/zeroclaw"));
+ assert!(msg.contains("OpenRC"));
+ assert!(msg.contains("zeroclaw"));
+ }
+
#[test]
async fn config_schema_export_contains_expected_contract_shape() {
let schema = schemars::schema_for!(Config);
@@ -5172,6 +5199,42 @@ default_temperature = 0.7
let _ = fs::remove_dir_all(default_config_dir).await;
}
+ #[test]
+ async fn resolve_runtime_config_dirs_uses_env_config_dir_first() {
+ let _env_guard = env_override_lock().await;
+ let default_config_dir = std::env::temp_dir().join(uuid::Uuid::new_v4().to_string());
+ let default_workspace_dir = default_config_dir.join("workspace");
+ let explicit_config_dir = default_config_dir.join("explicit-config");
+ let marker_config_dir = default_config_dir.join("profiles").join("alpha");
+ let state_path = default_config_dir.join(ACTIVE_WORKSPACE_STATE_FILE);
+
+ fs::create_dir_all(&default_config_dir).await.unwrap();
+ let state = ActiveWorkspaceState {
+ config_dir: marker_config_dir.to_string_lossy().into_owned(),
+ };
+ fs::write(&state_path, toml::to_string(&state).unwrap())
+ .await
+ .unwrap();
+
+ std::env::set_var("ZEROCLAW_CONFIG_DIR", &explicit_config_dir);
+ std::env::remove_var("ZEROCLAW_WORKSPACE");
+
+ let (config_dir, resolved_workspace_dir, source) =
+ resolve_runtime_config_dirs(&default_config_dir, &default_workspace_dir)
+ .await
+ .unwrap();
+
+ assert_eq!(source, ConfigResolutionSource::EnvConfigDir);
+ assert_eq!(config_dir, explicit_config_dir);
+ assert_eq!(
+ resolved_workspace_dir,
+ explicit_config_dir.join("workspace")
+ );
+
+ std::env::remove_var("ZEROCLAW_CONFIG_DIR");
+ let _ = fs::remove_dir_all(default_config_dir).await;
+ }
+
#[test]
async fn resolve_runtime_config_dirs_uses_active_workspace_marker() {
let _env_guard = env_override_lock().await;
From 6c329760750935e24068283ffee4dc90410bc795 Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 21:38:29 +0800
Subject: [PATCH 060/116] fix(service): tighten OpenRC docs and stabilize root
detection test
---
docs/network-deployment.md | 4 +++-
src/config/schema.rs | 1 -
src/service/mod.rs | 5 +++--
3 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/docs/network-deployment.md b/docs/network-deployment.md
index f9f0bf258..8c849d01c 100644
--- a/docs/network-deployment.md
+++ b/docs/network-deployment.md
@@ -223,10 +223,12 @@ This creates:
### 7.3 Configuration
-No manual config copy is required.
+Manual config copy is usually not required.
`sudo zeroclaw service install` automatically prepares `/etc/zeroclaw`, migrates existing runtime state from your user setup when available, and sets ownership/permissions for the `zeroclaw` service user.
+If no prior runtime state is available to migrate, create `/etc/zeroclaw/config.toml` before starting the service.
+
### 7.4 Enable and Start
```bash
diff --git a/src/config/schema.rs b/src/config/schema.rs
index 50546d65e..02056da0e 100644
--- a/src/config/schema.rs
+++ b/src/config/schema.rs
@@ -2285,7 +2285,6 @@ pub enum StreamMode {
#[default]
Off,
/// Update a draft message with every flush interval.
- #[serde(alias = "on")]
Partial,
}
diff --git a/src/service/mod.rs b/src/service/mod.rs
index 8d7280edd..6218b4696 100644
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -1167,9 +1167,10 @@ mod tests {
assert_eq!(InitSystem::default(), InitSystem::Auto);
}
+ #[cfg(unix)]
#[test]
- fn is_root_returns_false_in_test_env() {
- assert!(!is_root());
+ fn is_root_matches_system_uid() {
+ assert_eq!(is_root(), unsafe { libc::getuid() == 0 });
}
#[test]
From 5f6a8cdfc24b2e4eee0a73a42babb5559d1e7bf0 Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 23:27:41 +0800
Subject: [PATCH 061/116] fix(channels): suppress leaked tool json in channel
replies
---
src/agent/loop_.rs | 39 +++++-
src/channels/mod.rs | 309 ++++++++++++++++++++++++++++++++++++++++++--
2 files changed, 338 insertions(+), 10 deletions(-)
diff --git a/src/agent/loop_.rs b/src/agent/loop_.rs
index 58f80ba2a..42d2a1d85 100644
--- a/src/agent/loop_.rs
+++ b/src/agent/loop_.rs
@@ -310,7 +310,11 @@ fn parse_tool_call_value(value: &serde_json::Value) -> Option {
.trim()
.to_string();
if !name.is_empty() {
- let arguments = parse_arguments_value(function.get("arguments"));
+ let arguments = parse_arguments_value(
+ function
+ .get("arguments")
+ .or_else(|| function.get("parameters")),
+ );
return Some(ParsedToolCall { name, arguments });
}
}
@@ -326,7 +330,8 @@ fn parse_tool_call_value(value: &serde_json::Value) -> Option {
return None;
}
- let arguments = parse_arguments_value(value.get("arguments"));
+ let arguments =
+ parse_arguments_value(value.get("arguments").or_else(|| value.get("parameters")));
Some(ParsedToolCall { name, arguments })
}
@@ -3088,6 +3093,36 @@ Done."#;
assert_eq!(result.unwrap().name, "test_tool");
}
+ #[test]
+ fn parse_tool_call_value_accepts_top_level_parameters_alias() {
+ let value = serde_json::json!({
+ "name": "schedule",
+ "parameters": {"action": "create", "message": "test"}
+ });
+ let result = parse_tool_call_value(&value).expect("tool call should parse");
+ assert_eq!(result.name, "schedule");
+ assert_eq!(
+ result.arguments.get("action").and_then(|v| v.as_str()),
+ Some("create")
+ );
+ }
+
+ #[test]
+ fn parse_tool_call_value_accepts_function_parameters_alias() {
+ let value = serde_json::json!({
+ "function": {
+ "name": "shell",
+ "parameters": {"command": "date"}
+ }
+ });
+ let result = parse_tool_call_value(&value).expect("tool call should parse");
+ assert_eq!(result.name, "shell");
+ assert_eq!(
+ result.arguments.get("command").and_then(|v| v.as_str()),
+ Some("date")
+ );
+ }
+
#[test]
fn parse_tool_calls_from_json_value_handles_empty_array() {
// Recovery: Empty tool_calls array should return empty vec
diff --git a/src/channels/mod.rs b/src/channels/mod.rs
index fe476fd8a..0bfe322b2 100644
--- a/src/channels/mod.rs
+++ b/src/channels/mod.rs
@@ -70,7 +70,7 @@ use crate::tools::{self, Tool};
use crate::util::truncate_with_ellipsis;
use anyhow::{Context, Result};
use serde::Deserialize;
-use std::collections::HashMap;
+use std::collections::{HashMap, HashSet};
use std::fmt::Write;
use std::path::{Path, PathBuf};
use std::process::Command;
@@ -1000,6 +1000,170 @@ fn extract_tool_context_summary(history: &[ChatMessage], start_index: usize) ->
format!("[Used tools: {}]", tool_names.join(", "))
}
+fn sanitize_channel_response(response: &str, tools: &[Box]) -> String {
+ let known_tool_names: HashSet = tools
+ .iter()
+ .map(|tool| tool.name().to_ascii_lowercase())
+ .collect();
+ strip_isolated_tool_json_artifacts(response, &known_tool_names)
+}
+
+fn is_tool_call_payload(value: &serde_json::Value, known_tool_names: &HashSet) -> bool {
+ let Some(object) = value.as_object() else {
+ return false;
+ };
+
+ let (name, has_args) =
+ if let Some(function) = object.get("function").and_then(|f| f.as_object()) {
+ (
+ function
+ .get("name")
+ .and_then(|v| v.as_str())
+ .or_else(|| object.get("name").and_then(|v| v.as_str())),
+ function.contains_key("arguments")
+ || function.contains_key("parameters")
+ || object.contains_key("arguments")
+ || object.contains_key("parameters"),
+ )
+ } else {
+ (
+ object.get("name").and_then(|v| v.as_str()),
+ object.contains_key("arguments") || object.contains_key("parameters"),
+ )
+ };
+
+ let Some(name) = name.map(str::trim).filter(|name| !name.is_empty()) else {
+ return false;
+ };
+
+ has_args && known_tool_names.contains(&name.to_ascii_lowercase())
+}
+
+fn is_tool_result_payload(
+ object: &serde_json::Map,
+ saw_tool_call_payload: bool,
+) -> bool {
+ if !saw_tool_call_payload || !object.contains_key("result") {
+ return false;
+ }
+
+ object.keys().all(|key| {
+ matches!(
+ key.as_str(),
+ "result" | "id" | "tool_call_id" | "name" | "tool"
+ )
+ })
+}
+
+fn sanitize_tool_json_value(
+ value: &serde_json::Value,
+ known_tool_names: &HashSet,
+ saw_tool_call_payload: bool,
+) -> Option<(String, bool)> {
+ if is_tool_call_payload(value, known_tool_names) {
+ return Some((String::new(), true));
+ }
+
+ if let Some(array) = value.as_array() {
+ if !array.is_empty()
+ && array
+ .iter()
+ .all(|item| is_tool_call_payload(item, known_tool_names))
+ {
+ return Some((String::new(), true));
+ }
+ return None;
+ }
+
+ let Some(object) = value.as_object() else {
+ return None;
+ };
+
+ if let Some(tool_calls) = object.get("tool_calls").and_then(|value| value.as_array()) {
+ if !tool_calls.is_empty()
+ && tool_calls
+ .iter()
+ .all(|call| is_tool_call_payload(call, known_tool_names))
+ {
+ let content = object
+ .get("content")
+ .and_then(|value| value.as_str())
+ .unwrap_or("")
+ .trim()
+ .to_string();
+ return Some((content, true));
+ }
+ }
+
+ if is_tool_result_payload(object, saw_tool_call_payload) {
+ return Some((String::new(), false));
+ }
+
+ None
+}
+
+fn is_line_isolated_json_segment(message: &str, start: usize, end: usize) -> bool {
+ let line_start = message[..start].rfind('\n').map_or(0, |idx| idx + 1);
+ let line_end = message[end..]
+ .find('\n')
+ .map_or(message.len(), |idx| end + idx);
+
+ message[line_start..start].trim().is_empty() && message[end..line_end].trim().is_empty()
+}
+
+fn strip_isolated_tool_json_artifacts(message: &str, known_tool_names: &HashSet) -> String {
+ let mut cleaned = String::with_capacity(message.len());
+ let mut cursor = 0usize;
+ let mut saw_tool_call_payload = false;
+
+ while cursor < message.len() {
+ let Some(rel_start) = message[cursor..].find(|ch: char| ch == '{' || ch == '[') else {
+ cleaned.push_str(&message[cursor..]);
+ break;
+ };
+
+ let start = cursor + rel_start;
+ cleaned.push_str(&message[cursor..start]);
+
+ let candidate = &message[start..];
+ let mut stream =
+ serde_json::Deserializer::from_str(candidate).into_iter::();
+
+ if let Some(Ok(value)) = stream.next() {
+ let consumed = stream.byte_offset();
+ if consumed > 0 {
+ let end = start + consumed;
+ if is_line_isolated_json_segment(message, start, end) {
+ if let Some((replacement, marks_tool_call)) =
+ sanitize_tool_json_value(&value, known_tool_names, saw_tool_call_payload)
+ {
+ if marks_tool_call {
+ saw_tool_call_payload = true;
+ }
+ if !replacement.trim().is_empty() {
+ cleaned.push_str(replacement.trim());
+ }
+ cursor = end;
+ continue;
+ }
+ }
+ }
+ }
+
+ let Some(ch) = message[start..].chars().next() else {
+ break;
+ };
+ cleaned.push(ch);
+ cursor = start + ch.len_utf8();
+ }
+
+ let mut result = cleaned.replace("\r\n", "\n");
+ while result.contains("\n\n\n") {
+ result = result.replace("\n\n\n", "\n\n");
+ }
+ result.trim().to_string()
+}
+
fn spawn_supervised_listener(
ch: Arc,
tx: tokio::sync::mpsc::Sender,
@@ -1344,14 +1508,23 @@ async fn process_channel_message(
}
}
LlmExecutionResult::Completed(Ok(Ok(response))) => {
+ let sanitized_response =
+ sanitize_channel_response(&response, ctx.tools_registry.as_ref());
+ let delivered_response = if sanitized_response.is_empty() && !response.trim().is_empty()
+ {
+ "I encountered malformed tool-call output and could not produce a safe reply. Please try again.".to_string()
+ } else {
+ sanitized_response
+ };
+
// Extract condensed tool-use context from the history messages
// added during run_tool_call_loop, so the LLM retains awareness
// of what it did on subsequent turns.
let tool_summary = extract_tool_context_summary(&history, history_len_before_tools);
let history_response = if tool_summary.is_empty() {
- response.clone()
+ delivered_response.clone()
} else {
- format!("{tool_summary}\n{response}")
+ format!("{tool_summary}\n{delivered_response}")
};
append_sender_turn(
@@ -1362,25 +1535,25 @@ async fn process_channel_message(
println!(
" 🤖 Reply ({}ms): {}",
started_at.elapsed().as_millis(),
- truncate_with_ellipsis(&response, 80)
+ truncate_with_ellipsis(&delivered_response, 80)
);
if let Some(channel) = target_channel.as_ref() {
if let Some(ref draft_id) = draft_message_id {
if let Err(e) = channel
- .finalize_draft(&msg.reply_target, draft_id, &response)
+ .finalize_draft(&msg.reply_target, draft_id, &delivered_response)
.await
{
tracing::warn!("Failed to finalize draft: {e}; sending as new message");
let _ = channel
.send(
- &SendMessage::new(&response, &msg.reply_target)
+ &SendMessage::new(&delivered_response, &msg.reply_target)
.in_thread(msg.thread_ts.clone()),
)
.await;
}
} else if let Err(e) = channel
.send(
- &SendMessage::new(response, &msg.reply_target)
+ &SendMessage::new(delivered_response, &msg.reply_target)
.in_thread(msg.thread_ts.clone()),
)
.await
@@ -2763,7 +2936,7 @@ mod tests {
use crate::observability::NoopObserver;
use crate::providers::{ChatMessage, Provider};
use crate::tools::{Tool, ToolResult};
- use std::collections::HashMap;
+ use std::collections::{HashMap, HashSet};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use tempfile::TempDir;
@@ -3122,6 +3295,33 @@ mod tests {
}
}
+ struct RawToolArtifactProvider;
+
+ #[async_trait::async_trait]
+ impl Provider for RawToolArtifactProvider {
+ async fn chat_with_system(
+ &self,
+ _system_prompt: Option<&str>,
+ _message: &str,
+ _model: &str,
+ _temperature: f64,
+ ) -> anyhow::Result {
+ Ok("fallback".to_string())
+ }
+
+ async fn chat_with_history(
+ &self,
+ _messages: &[ChatMessage],
+ _model: &str,
+ _temperature: f64,
+ ) -> anyhow::Result {
+ Ok(r#"{"name":"mock_price","parameters":{"symbol":"BTC"}}
+{"result":{"symbol":"BTC","price_usd":65000}}
+BTC is currently around $65,000 based on latest tool output."#
+ .to_string())
+ }
+ }
+
struct IterativeToolProvider {
required_tool_iterations: usize,
}
@@ -3364,6 +3564,63 @@ mod tests {
assert!(!sent_messages[0].contains("mock_price"));
}
+ #[tokio::test]
+ async fn process_channel_message_strips_unexecuted_tool_json_artifacts_from_reply() {
+ let channel_impl = Arc::new(RecordingChannel::default());
+ let channel: Arc = channel_impl.clone();
+
+ let mut channels_by_name = HashMap::new();
+ channels_by_name.insert(channel.name().to_string(), channel);
+
+ let runtime_ctx = Arc::new(ChannelRuntimeContext {
+ channels_by_name: Arc::new(channels_by_name),
+ provider: Arc::new(RawToolArtifactProvider),
+ default_provider: Arc::new("test-provider".to_string()),
+ memory: Arc::new(NoopMemory),
+ tools_registry: Arc::new(vec![Box::new(MockPriceTool)]),
+ observer: Arc::new(NoopObserver),
+ system_prompt: Arc::new("test-system-prompt".to_string()),
+ model: Arc::new("test-model".to_string()),
+ temperature: 0.0,
+ auto_save_memory: false,
+ max_tool_iterations: 10,
+ min_relevance_score: 0.0,
+ conversation_histories: Arc::new(Mutex::new(HashMap::new())),
+ provider_cache: Arc::new(Mutex::new(HashMap::new())),
+ route_overrides: Arc::new(Mutex::new(HashMap::new())),
+ api_key: None,
+ api_url: None,
+ reliability: Arc::new(crate::config::ReliabilityConfig::default()),
+ provider_runtime_options: providers::ProviderRuntimeOptions::default(),
+ workspace_dir: Arc::new(std::env::temp_dir()),
+ message_timeout_secs: CHANNEL_MESSAGE_TIMEOUT_SECS,
+ interrupt_on_new_message: false,
+ multimodal: crate::config::MultimodalConfig::default(),
+ });
+
+ process_channel_message(
+ runtime_ctx,
+ traits::ChannelMessage {
+ id: "msg-raw-json".to_string(),
+ sender: "alice".to_string(),
+ reply_target: "chat-raw".to_string(),
+ content: "What is the BTC price now?".to_string(),
+ channel: "test-channel".to_string(),
+ timestamp: 3,
+ thread_ts: None,
+ },
+ CancellationToken::new(),
+ )
+ .await;
+
+ let sent_messages = channel_impl.sent_messages.lock().await;
+ assert_eq!(sent_messages.len(), 1);
+ assert!(sent_messages[0].starts_with("chat-raw:"));
+ assert!(sent_messages[0].contains("BTC is currently around"));
+ assert!(!sent_messages[0].contains("\"name\":\"mock_price\""));
+ assert!(!sent_messages[0].contains("\"result\""));
+ }
+
#[tokio::test]
async fn process_channel_message_executes_tool_calls_with_alias_tags() {
let channel_impl = Arc::new(RecordingChannel::default());
@@ -4960,6 +5217,42 @@ Mon Feb 20
assert_eq!(summary, "[Used tools: fresh_tool]");
}
+ #[test]
+ fn strip_isolated_tool_json_artifacts_removes_tool_calls_and_results() {
+ let mut known_tools = HashSet::new();
+ known_tools.insert("schedule".to_string());
+
+ let input = r#"{"name":"schedule","parameters":{"action":"create","message":"test"}}
+{"name":"schedule","parameters":{"action":"cancel","task_id":"test"}}
+Let me create the reminder properly:
+{"name":"schedule","parameters":{"action":"create","message":"Go to sleep"}}
+{"result":{"task_id":"abc","status":"scheduled"}}
+Done reminder set for 1:38 AM."#;
+
+ let result = strip_isolated_tool_json_artifacts(input, &known_tools);
+ let normalized = result
+ .lines()
+ .filter(|line| !line.trim().is_empty())
+ .collect::>()
+ .join("\n");
+ assert_eq!(
+ normalized,
+ "Let me create the reminder properly:\nDone reminder set for 1:38 AM."
+ );
+ }
+
+ #[test]
+ fn strip_isolated_tool_json_artifacts_preserves_non_tool_json() {
+ let mut known_tools = HashSet::new();
+ known_tools.insert("shell".to_string());
+
+ let input = r#"{"name":"profile","parameters":{"timezone":"UTC"}}
+This is an example JSON object for profile settings."#;
+
+ let result = strip_isolated_tool_json_artifacts(input, &known_tools);
+ assert_eq!(result, input);
+ }
+
// ── AIEOS Identity Tests (Issue #168) ─────────────────────────
#[test]
From e0810109835ddf42e90bfda25482fd3b3fba6e7f Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 22:23:03 +0800
Subject: [PATCH 062/116] feat(skills): add configurable compact skills prompt
injection
---
README.md | 3 +-
docs/config-reference.md | 3 +
src/agent/agent.rs | 14 +++++
src/agent/loop_.rs | 2 +
src/agent/prompt.rs | 47 +++++++++++++-
src/channels/mod.rs | 52 +++++++++++++++-
src/config/mod.rs | 5 +-
src/config/schema.rs | 58 +++++++++++++++++
src/skills/mod.rs | 131 ++++++++++++++++++++++++++++++---------
9 files changed, 280 insertions(+), 35 deletions(-)
diff --git a/README.md b/README.md
index 0025c5f04..98485a815 100644
--- a/README.md
+++ b/README.md
@@ -946,9 +946,10 @@ Community `open-skills` sync is disabled by default. Enable it explicitly in `co
[skills]
open_skills_enabled = true
# open_skills_dir = "/path/to/open-skills" # optional
+# prompt_injection_mode = "compact" # optional: use for low-context local models
```
-You can also override at runtime with `ZEROCLAW_OPEN_SKILLS_ENABLED` and `ZEROCLAW_OPEN_SKILLS_DIR`.
+You can also override at runtime with `ZEROCLAW_OPEN_SKILLS_ENABLED`, `ZEROCLAW_OPEN_SKILLS_DIR`, and `ZEROCLAW_SKILLS_PROMPT_MODE` (`full` or `compact`).
## Development
diff --git a/docs/config-reference.md b/docs/config-reference.md
index f1924c9bd..50f91a7d9 100644
--- a/docs/config-reference.md
+++ b/docs/config-reference.md
@@ -134,6 +134,7 @@ Notes:
|---|---|---|
| `open_skills_enabled` | `false` | Opt-in loading/sync of community `open-skills` repository |
| `open_skills_dir` | unset | Optional local path for `open-skills` (defaults to `$HOME/open-skills` when enabled) |
+| `prompt_injection_mode` | `full` | Skill prompt verbosity: `full` (inline instructions/tools) or `compact` (name/description/location only) |
Notes:
@@ -141,7 +142,9 @@ Notes:
- Environment overrides:
- `ZEROCLAW_OPEN_SKILLS_ENABLED` accepts `1/0`, `true/false`, `yes/no`, `on/off`.
- `ZEROCLAW_OPEN_SKILLS_DIR` overrides the repository path when non-empty.
+ - `ZEROCLAW_SKILLS_PROMPT_MODE` accepts `full` or `compact`.
- Precedence for enable flag: `ZEROCLAW_OPEN_SKILLS_ENABLED` → `skills.open_skills_enabled` in `config.toml` → default `false`.
+- `prompt_injection_mode = "compact"` is recommended on low-context local models to reduce startup prompt size while keeping skill files available on demand.
## `[composio]`
diff --git a/src/agent/agent.rs b/src/agent/agent.rs
index 466a23368..d1affdaaf 100644
--- a/src/agent/agent.rs
+++ b/src/agent/agent.rs
@@ -30,6 +30,7 @@ pub struct Agent {
workspace_dir: std::path::PathBuf,
identity_config: crate::config::IdentityConfig,
skills: Vec,
+ skills_prompt_mode: crate::config::SkillsPromptInjectionMode,
auto_save: bool,
history: Vec,
classification_config: crate::config::QueryClassificationConfig,
@@ -50,6 +51,7 @@ pub struct AgentBuilder {
workspace_dir: Option,
identity_config: Option,
skills: Option>,
+ skills_prompt_mode: Option,
auto_save: Option,
classification_config: Option,
available_hints: Option>,
@@ -71,6 +73,7 @@ impl AgentBuilder {
workspace_dir: None,
identity_config: None,
skills: None,
+ skills_prompt_mode: None,
auto_save: None,
classification_config: None,
available_hints: None,
@@ -142,6 +145,14 @@ impl AgentBuilder {
self
}
+ pub fn skills_prompt_mode(
+ mut self,
+ skills_prompt_mode: crate::config::SkillsPromptInjectionMode,
+ ) -> Self {
+ self.skills_prompt_mode = Some(skills_prompt_mode);
+ self
+ }
+
pub fn auto_save(mut self, auto_save: bool) -> Self {
self.auto_save = Some(auto_save);
self
@@ -197,6 +208,7 @@ impl AgentBuilder {
.unwrap_or_else(|| std::path::PathBuf::from(".")),
identity_config: self.identity_config.unwrap_or_default(),
skills: self.skills.unwrap_or_default(),
+ skills_prompt_mode: self.skills_prompt_mode.unwrap_or_default(),
auto_save: self.auto_save.unwrap_or(false),
history: Vec::new(),
classification_config: self.classification_config.unwrap_or_default(),
@@ -312,6 +324,7 @@ impl Agent {
&config.workspace_dir,
config,
))
+ .skills_prompt_mode(config.skills.prompt_injection_mode)
.auto_save(config.memory.auto_save)
.build()
}
@@ -350,6 +363,7 @@ impl Agent {
model_name: &self.model_name,
tools: &self.tools,
skills: &self.skills,
+ skills_prompt_mode: self.skills_prompt_mode,
identity_config: Some(&self.identity_config),
dispatcher_instructions: &instructions,
};
diff --git a/src/agent/loop_.rs b/src/agent/loop_.rs
index 42d2a1d85..cde95d31b 100644
--- a/src/agent/loop_.rs
+++ b/src/agent/loop_.rs
@@ -1561,6 +1561,7 @@ pub async fn run(
Some(&config.identity),
bootstrap_max_chars,
native_tools,
+ config.skills.prompt_injection_mode,
);
// Append structured tool-use instructions with schemas (only for non-native providers)
@@ -1928,6 +1929,7 @@ pub async fn process_message(config: Config, message: &str) -> Result {
Some(&config.identity),
bootstrap_max_chars,
native_tools,
+ config.skills.prompt_injection_mode,
);
if !native_tools {
system_prompt.push_str(&build_tool_instructions(&tools_registry));
diff --git a/src/agent/prompt.rs b/src/agent/prompt.rs
index 457f38f87..3e3e8d2f9 100644
--- a/src/agent/prompt.rs
+++ b/src/agent/prompt.rs
@@ -14,6 +14,7 @@ pub struct PromptContext<'a> {
pub model_name: &'a str,
pub tools: &'a [Box],
pub skills: &'a [Skill],
+ pub skills_prompt_mode: crate::config::SkillsPromptInjectionMode,
pub identity_config: Option<&'a IdentityConfig>,
pub dispatcher_instructions: &'a str,
}
@@ -153,9 +154,10 @@ impl PromptSection for SkillsSection {
}
fn build(&self, ctx: &PromptContext<'_>) -> Result {
- Ok(crate::skills::skills_to_prompt(
+ Ok(crate::skills::skills_to_prompt_with_mode(
ctx.skills,
ctx.workspace_dir,
+ ctx.skills_prompt_mode,
))
}
}
@@ -295,6 +297,7 @@ mod tests {
model_name: "test-model",
tools: &tools,
skills: &[],
+ skills_prompt_mode: crate::config::SkillsPromptInjectionMode::Full,
identity_config: Some(&identity_config),
dispatcher_instructions: "",
};
@@ -322,6 +325,7 @@ mod tests {
model_name: "test-model",
tools: &tools,
skills: &[],
+ skills_prompt_mode: crate::config::SkillsPromptInjectionMode::Full,
identity_config: None,
dispatcher_instructions: "instr",
};
@@ -356,6 +360,7 @@ mod tests {
model_name: "test-model",
tools: &tools,
skills: &skills,
+ skills_prompt_mode: crate::config::SkillsPromptInjectionMode::Full,
identity_config: None,
dispatcher_instructions: "",
};
@@ -368,6 +373,44 @@ mod tests {
assert!(output.contains("shell "));
}
+ #[test]
+ fn skills_section_compact_mode_omits_instructions_and_tools() {
+ let tools: Vec> = vec![];
+ let skills = vec![crate::skills::Skill {
+ name: "deploy".into(),
+ description: "Release safely".into(),
+ version: "1.0.0".into(),
+ author: None,
+ tags: vec![],
+ tools: vec![crate::skills::SkillTool {
+ name: "release_checklist".into(),
+ description: "Validate release readiness".into(),
+ kind: "shell".into(),
+ command: "echo ok".into(),
+ args: std::collections::HashMap::new(),
+ }],
+ prompts: vec!["Run smoke tests before deploy.".into()],
+ location: Some(Path::new("/tmp/workspace/skills/deploy/SKILL.md").to_path_buf()),
+ }];
+
+ let ctx = PromptContext {
+ workspace_dir: Path::new("/tmp/workspace"),
+ model_name: "test-model",
+ tools: &tools,
+ skills: &skills,
+ skills_prompt_mode: crate::config::SkillsPromptInjectionMode::Compact,
+ identity_config: None,
+ dispatcher_instructions: "",
+ };
+
+ let output = SkillsSection.build(&ctx).unwrap();
+ assert!(output.contains(""));
+ assert!(output.contains("deploy "));
+ assert!(output.contains("skills/deploy/SKILL.md "));
+ assert!(!output.contains("Run smoke tests before deploy. "));
+ assert!(!output.contains(""));
+ }
+
#[test]
fn datetime_section_includes_timestamp_and_timezone() {
let tools: Vec> = vec![];
@@ -376,6 +419,7 @@ mod tests {
model_name: "test-model",
tools: &tools,
skills: &[],
+ skills_prompt_mode: crate::config::SkillsPromptInjectionMode::Full,
identity_config: None,
dispatcher_instructions: "instr",
};
@@ -413,6 +457,7 @@ mod tests {
model_name: "test-model",
tools: &tools,
skills: &skills,
+ skills_prompt_mode: crate::config::SkillsPromptInjectionMode::Full,
identity_config: None,
dispatcher_instructions: "",
};
diff --git a/src/channels/mod.rs b/src/channels/mod.rs
index 0bfe322b2..94e2589c8 100644
--- a/src/channels/mod.rs
+++ b/src/channels/mod.rs
@@ -1796,6 +1796,7 @@ pub fn build_system_prompt(
identity_config,
bootstrap_max_chars,
false,
+ crate::config::SkillsPromptInjectionMode::Full,
)
}
@@ -1807,6 +1808,7 @@ pub fn build_system_prompt_with_mode(
identity_config: Option<&crate::config::IdentityConfig>,
bootstrap_max_chars: Option,
native_tools: bool,
+ skills_prompt_mode: crate::config::SkillsPromptInjectionMode,
) -> String {
use std::fmt::Write;
let mut prompt = String::with_capacity(8192);
@@ -1869,9 +1871,13 @@ pub fn build_system_prompt_with_mode(
- When in doubt, ask before acting externally.\n\n",
);
- // ── 3. Skills (full instructions + tool metadata) ───────────
+ // ── 3. Skills (full or compact, based on config) ─────────────
if !skills.is_empty() {
- prompt.push_str(&crate::skills::skills_to_prompt(skills, workspace_dir));
+ prompt.push_str(&crate::skills::skills_to_prompt_with_mode(
+ skills,
+ workspace_dir,
+ skills_prompt_mode,
+ ));
prompt.push_str("\n\n");
}
@@ -2626,6 +2632,7 @@ pub async fn start_channels(config: Config) -> Result<()> {
Some(&config.identity),
bootstrap_max_chars,
native_tools,
+ config.skills.prompt_injection_mode,
);
if !native_tools {
system_prompt.push_str(&build_tool_instructions(tools_registry.as_ref()));
@@ -4710,6 +4717,47 @@ BTC is currently around $65,000 based on latest tool output."#
assert!(!prompt.contains("loaded on demand"));
}
+ #[test]
+ fn prompt_skills_compact_mode_omits_instructions_and_tools() {
+ let ws = make_workspace();
+ let skills = vec![crate::skills::Skill {
+ name: "code-review".into(),
+ description: "Review code for bugs".into(),
+ version: "1.0.0".into(),
+ author: None,
+ tags: vec![],
+ tools: vec![crate::skills::SkillTool {
+ name: "lint".into(),
+ description: "Run static checks".into(),
+ kind: "shell".into(),
+ command: "cargo clippy".into(),
+ args: HashMap::new(),
+ }],
+ prompts: vec!["Always run cargo test before final response.".into()],
+ location: None,
+ }];
+
+ let prompt = build_system_prompt_with_mode(
+ ws.path(),
+ "model",
+ &[],
+ &skills,
+ None,
+ None,
+ false,
+ crate::config::SkillsPromptInjectionMode::Compact,
+ );
+
+ assert!(prompt.contains(""), "missing skills XML");
+ assert!(prompt.contains("code-review "));
+ assert!(prompt.contains("skills/code-review/SKILL.md "));
+ assert!(prompt.contains("loaded on demand"));
+ assert!(!prompt.contains(""));
+ assert!(!prompt
+ .contains("Always run cargo test before final response. "));
+ assert!(!prompt.contains(""));
+ }
+
#[test]
fn prompt_skills_escape_reserved_xml_chars() {
let ws = make_workspace();
diff --git a/src/config/mod.rs b/src/config/mod.rs
index fbde82e96..c40053d45 100644
--- a/src/config/mod.rs
+++ b/src/config/mod.rs
@@ -12,8 +12,9 @@ pub use schema::{
NextcloudTalkConfig, ObservabilityConfig, PeripheralBoardConfig, PeripheralsConfig,
ProxyConfig, ProxyScope, QueryClassificationConfig, ReliabilityConfig, ResourceLimitsConfig,
RuntimeConfig, SandboxBackend, SandboxConfig, SchedulerConfig, SecretsConfig, SecurityConfig,
- SkillsConfig, SlackConfig, StorageConfig, StorageProviderConfig, StorageProviderSection,
- StreamMode, TelegramConfig, TunnelConfig, WebSearchConfig, WebhookConfig,
+ SkillsConfig, SkillsPromptInjectionMode, SlackConfig, StorageConfig, StorageProviderConfig,
+ StorageProviderSection, StreamMode, TelegramConfig, TunnelConfig, WebSearchConfig,
+ WebhookConfig,
};
#[cfg(test)]
diff --git a/src/config/schema.rs b/src/config/schema.rs
index 02056da0e..77e59e0cb 100644
--- a/src/config/schema.rs
+++ b/src/config/schema.rs
@@ -343,6 +343,25 @@ impl Default for AgentConfig {
}
}
+/// Skills loading configuration (`[skills]` section).
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, JsonSchema, Default)]
+#[serde(rename_all = "snake_case")]
+pub enum SkillsPromptInjectionMode {
+ /// Inline full skill instructions and tool metadata into the system prompt.
+ #[default]
+ Full,
+ /// Inline only compact skill metadata (name/description/location) and load details on demand.
+ Compact,
+}
+
+fn parse_skills_prompt_injection_mode(raw: &str) -> Option {
+ match raw.trim().to_ascii_lowercase().as_str() {
+ "full" => Some(SkillsPromptInjectionMode::Full),
+ "compact" => Some(SkillsPromptInjectionMode::Compact),
+ _ => None,
+ }
+}
+
/// Skills loading configuration (`[skills]` section).
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct SkillsConfig {
@@ -354,6 +373,10 @@ pub struct SkillsConfig {
/// If unset, defaults to `$HOME/open-skills` when enabled.
#[serde(default)]
pub open_skills_dir: Option,
+ /// Controls how skills are injected into the system prompt.
+ /// `full` preserves legacy behavior. `compact` keeps context small and loads skills on demand.
+ #[serde(default)]
+ pub prompt_injection_mode: SkillsPromptInjectionMode,
}
impl Default for SkillsConfig {
@@ -361,6 +384,7 @@ impl Default for SkillsConfig {
Self {
open_skills_enabled: false,
open_skills_dir: None,
+ prompt_injection_mode: SkillsPromptInjectionMode::default(),
}
}
}
@@ -3335,6 +3359,19 @@ impl Config {
}
}
+ // Skills prompt mode override: ZEROCLAW_SKILLS_PROMPT_MODE
+ if let Ok(mode) = std::env::var("ZEROCLAW_SKILLS_PROMPT_MODE") {
+ if !mode.trim().is_empty() {
+ if let Some(parsed) = parse_skills_prompt_injection_mode(&mode) {
+ self.skills.prompt_injection_mode = parsed;
+ } else {
+ tracing::warn!(
+ "Ignoring invalid ZEROCLAW_SKILLS_PROMPT_MODE (valid: full|compact)"
+ );
+ }
+ }
+ }
+
// Gateway port: ZEROCLAW_GATEWAY_PORT or PORT
if let Ok(port_str) =
std::env::var("ZEROCLAW_GATEWAY_PORT").or_else(|_| std::env::var("PORT"))
@@ -3675,6 +3712,10 @@ mod tests {
assert!((c.default_temperature - 0.7).abs() < f64::EPSILON);
assert!(c.api_key.is_none());
assert!(!c.skills.open_skills_enabled);
+ assert_eq!(
+ c.skills.prompt_injection_mode,
+ SkillsPromptInjectionMode::Full
+ );
assert!(c.workspace_dir.to_string_lossy().contains("workspace"));
assert!(c.config_path.to_string_lossy().contains("config.toml"));
}
@@ -5030,9 +5071,14 @@ default_temperature = 0.7
let mut config = Config::default();
assert!(!config.skills.open_skills_enabled);
assert!(config.skills.open_skills_dir.is_none());
+ assert_eq!(
+ config.skills.prompt_injection_mode,
+ SkillsPromptInjectionMode::Full
+ );
std::env::set_var("ZEROCLAW_OPEN_SKILLS_ENABLED", "true");
std::env::set_var("ZEROCLAW_OPEN_SKILLS_DIR", "/tmp/open-skills");
+ std::env::set_var("ZEROCLAW_SKILLS_PROMPT_MODE", "compact");
config.apply_env_overrides();
assert!(config.skills.open_skills_enabled);
@@ -5040,9 +5086,14 @@ default_temperature = 0.7
config.skills.open_skills_dir.as_deref(),
Some("/tmp/open-skills")
);
+ assert_eq!(
+ config.skills.prompt_injection_mode,
+ SkillsPromptInjectionMode::Compact
+ );
std::env::remove_var("ZEROCLAW_OPEN_SKILLS_ENABLED");
std::env::remove_var("ZEROCLAW_OPEN_SKILLS_DIR");
+ std::env::remove_var("ZEROCLAW_SKILLS_PROMPT_MODE");
}
#[test]
@@ -5050,12 +5101,19 @@ default_temperature = 0.7
let _env_guard = env_override_lock().await;
let mut config = Config::default();
config.skills.open_skills_enabled = true;
+ config.skills.prompt_injection_mode = SkillsPromptInjectionMode::Compact;
std::env::set_var("ZEROCLAW_OPEN_SKILLS_ENABLED", "maybe");
+ std::env::set_var("ZEROCLAW_SKILLS_PROMPT_MODE", "invalid");
config.apply_env_overrides();
assert!(config.skills.open_skills_enabled);
+ assert_eq!(
+ config.skills.prompt_injection_mode,
+ SkillsPromptInjectionMode::Compact
+ );
std::env::remove_var("ZEROCLAW_OPEN_SKILLS_ENABLED");
+ std::env::remove_var("ZEROCLAW_SKILLS_PROMPT_MODE");
}
#[test]
diff --git a/src/skills/mod.rs b/src/skills/mod.rs
index bca6fffb9..4931c6732 100644
--- a/src/skills/mod.rs
+++ b/src/skills/mod.rs
@@ -445,52 +445,92 @@ fn write_xml_text_element(out: &mut String, indent: usize, tag: &str, value: &st
out.push_str(">\n");
}
+fn resolve_skill_location(skill: &Skill, workspace_dir: &Path) -> PathBuf {
+ skill.location.clone().unwrap_or_else(|| {
+ workspace_dir
+ .join("skills")
+ .join(&skill.name)
+ .join("SKILL.md")
+ })
+}
+
+fn render_skill_location(skill: &Skill, workspace_dir: &Path, prefer_relative: bool) -> String {
+ let location = resolve_skill_location(skill, workspace_dir);
+ if prefer_relative {
+ if let Ok(relative) = location.strip_prefix(workspace_dir) {
+ return relative.display().to_string();
+ }
+ }
+ location.display().to_string()
+}
+
/// Build the "Available Skills" system prompt section with full skill instructions.
pub fn skills_to_prompt(skills: &[Skill], workspace_dir: &Path) -> String {
+ skills_to_prompt_with_mode(
+ skills,
+ workspace_dir,
+ crate::config::SkillsPromptInjectionMode::Full,
+ )
+}
+
+/// Build the "Available Skills" system prompt section with configurable verbosity.
+pub fn skills_to_prompt_with_mode(
+ skills: &[Skill],
+ workspace_dir: &Path,
+ mode: crate::config::SkillsPromptInjectionMode,
+) -> String {
use std::fmt::Write;
if skills.is_empty() {
return String::new();
}
- let mut prompt = String::from(
- "## Available Skills\n\n\
- Skill instructions and tool metadata are preloaded below.\n\
- Follow these instructions directly; do not read skill files at runtime unless the user asks.\n\n\
- \n",
- );
+ let mut prompt = match mode {
+ crate::config::SkillsPromptInjectionMode::Full => String::from(
+ "## Available Skills\n\n\
+ Skill instructions and tool metadata are preloaded below.\n\
+ Follow these instructions directly; do not read skill files at runtime unless the user asks.\n\n\
+ \n",
+ ),
+ crate::config::SkillsPromptInjectionMode::Compact => String::from(
+ "## Available Skills\n\n\
+ Skill summaries are preloaded below to keep context compact.\n\
+ Skill instructions are loaded on demand: read the skill file in `location` only when needed.\n\n\
+ \n",
+ ),
+ };
for skill in skills {
let _ = writeln!(prompt, " ");
write_xml_text_element(&mut prompt, 4, "name", &skill.name);
write_xml_text_element(&mut prompt, 4, "description", &skill.description);
+ let location = render_skill_location(
+ skill,
+ workspace_dir,
+ matches!(mode, crate::config::SkillsPromptInjectionMode::Compact),
+ );
+ write_xml_text_element(&mut prompt, 4, "location", &location);
- let location = skill.location.clone().unwrap_or_else(|| {
- workspace_dir
- .join("skills")
- .join(&skill.name)
- .join("SKILL.md")
- });
- write_xml_text_element(&mut prompt, 4, "location", &location.display().to_string());
-
- if !skill.prompts.is_empty() {
- let _ = writeln!(prompt, " ");
- for instruction in &skill.prompts {
- write_xml_text_element(&mut prompt, 6, "instruction", instruction);
+ if matches!(mode, crate::config::SkillsPromptInjectionMode::Full) {
+ if !skill.prompts.is_empty() {
+ let _ = writeln!(prompt, " ");
+ for instruction in &skill.prompts {
+ write_xml_text_element(&mut prompt, 6, "instruction", instruction);
+ }
+ let _ = writeln!(prompt, " ");
}
- let _ = writeln!(prompt, " ");
- }
- if !skill.tools.is_empty() {
- let _ = writeln!(prompt, " ");
- for tool in &skill.tools {
- let _ = writeln!(prompt, " ");
- write_xml_text_element(&mut prompt, 8, "name", &tool.name);
- write_xml_text_element(&mut prompt, 8, "description", &tool.description);
- write_xml_text_element(&mut prompt, 8, "kind", &tool.kind);
- let _ = writeln!(prompt, " ");
+ if !skill.tools.is_empty() {
+ let _ = writeln!(prompt, " ");
+ for tool in &skill.tools {
+ let _ = writeln!(prompt, " ");
+ write_xml_text_element(&mut prompt, 8, "name", &tool.name);
+ write_xml_text_element(&mut prompt, 8, "description", &tool.description);
+ write_xml_text_element(&mut prompt, 8, "kind", &tool.kind);
+ let _ = writeln!(prompt, " ");
+ }
+ let _ = writeln!(prompt, " ");
}
- let _ = writeln!(prompt, " ");
}
let _ = writeln!(prompt, " ");
@@ -889,6 +929,39 @@ command = "echo hello"
assert!(prompt.contains("Do the thing. "));
}
+ #[test]
+ fn skills_to_prompt_compact_mode_omits_instructions_and_tools() {
+ let skills = vec![Skill {
+ name: "test".to_string(),
+ description: "A test".to_string(),
+ version: "1.0.0".to_string(),
+ author: None,
+ tags: vec![],
+ tools: vec![SkillTool {
+ name: "run".to_string(),
+ description: "Run task".to_string(),
+ kind: "shell".to_string(),
+ command: "echo hi".to_string(),
+ args: HashMap::new(),
+ }],
+ prompts: vec!["Do the thing.".to_string()],
+ location: Some(PathBuf::from("/tmp/workspace/skills/test/SKILL.md")),
+ }];
+ let prompt = skills_to_prompt_with_mode(
+ &skills,
+ Path::new("/tmp/workspace"),
+ crate::config::SkillsPromptInjectionMode::Compact,
+ );
+
+ assert!(prompt.contains(""));
+ assert!(prompt.contains("test "));
+ assert!(prompt.contains("skills/test/SKILL.md "));
+ assert!(prompt.contains("loaded on demand"));
+ assert!(!prompt.contains(""));
+ assert!(!prompt.contains("Do the thing. "));
+ assert!(!prompt.contains(""));
+ }
+
#[test]
fn init_skills_creates_readme() {
let dir = tempfile::tempdir().unwrap();
From 63d002f22a883832ff8fa3cbc846433fcf67c804 Mon Sep 17 00:00:00 2001
From: Chummy
Date: Fri, 20 Feb 2026 23:32:31 +0800
Subject: [PATCH 063/116] fix(ollama): stabilize cloud routing and onboarding
model selection
---
docs/getting-started/README.md | 1 +
docs/providers-reference.md | 8 ++
src/config/schema.rs | 86 +++++++++++++++++++
src/onboard/wizard.rs | 148 ++++++++++++++++++++++++++-------
src/providers/ollama.rs | 32 ++++++-
5 files changed, 241 insertions(+), 34 deletions(-)
diff --git a/docs/getting-started/README.md b/docs/getting-started/README.md
index 3c7e91c60..cc57a3969 100644
--- a/docs/getting-started/README.md
+++ b/docs/getting-started/README.md
@@ -21,6 +21,7 @@ For first-time setup and quick orientation.
- Quick onboarding: `zeroclaw onboard --api-key "sk-..." --provider openrouter`
- Interactive onboarding: `zeroclaw onboard --interactive`
+- Ollama cloud models (`:cloud`) require a remote `api_url` and API key (for example `api_url = "https://ollama.com"`).
- Validate environment: `zeroclaw status` + `zeroclaw doctor`
## Next
diff --git a/docs/providers-reference.md b/docs/providers-reference.md
index 420d61ec1..0f2655046 100644
--- a/docs/providers-reference.md
+++ b/docs/providers-reference.md
@@ -71,6 +71,14 @@ credential is not reused for fallback providers.
- After multimodal normalization, ZeroClaw sends image payloads through Ollama's native `messages[].images` field.
- If a non-vision provider is selected, ZeroClaw returns a structured capability error instead of silently ignoring images.
+### Ollama Cloud Routing Notes
+
+- Use `:cloud` model suffix only with a remote Ollama endpoint.
+- Remote endpoint should be set in `api_url` (example: `https://ollama.com`).
+- ZeroClaw normalizes a trailing `/api` in `api_url` automatically.
+- If `default_model` ends with `:cloud` while `api_url` is local or unset, config validation fails early with an actionable error.
+- Local Ollama model discovery intentionally excludes `:cloud` entries to avoid selecting cloud-only models in local mode.
+
### llama.cpp Server Notes
- Provider ID: `llamacpp` (alias: `llama.cpp`)
diff --git a/src/config/schema.rs b/src/config/schema.rs
index 77e59e0cb..cb7ad82f1 100644
--- a/src/config/schema.rs
+++ b/src/config/schema.rs
@@ -3115,6 +3115,34 @@ fn config_dir_creation_error(path: &Path) -> String {
)
}
+fn is_local_ollama_endpoint(api_url: Option<&str>) -> bool {
+ let Some(raw) = api_url.map(str::trim).filter(|value| !value.is_empty()) else {
+ return true;
+ };
+
+ reqwest::Url::parse(raw)
+ .ok()
+ .and_then(|url| url.host_str().map(|host| host.to_ascii_lowercase()))
+ .is_some_and(|host| matches!(host.as_str(), "localhost" | "127.0.0.1" | "::1" | "0.0.0.0"))
+}
+
+fn has_ollama_cloud_credential(config_api_key: Option<&str>) -> bool {
+ let config_key_present = config_api_key
+ .map(str::trim)
+ .is_some_and(|value| !value.is_empty());
+ if config_key_present {
+ return true;
+ }
+
+ ["OLLAMA_API_KEY", "ZEROCLAW_API_KEY", "API_KEY"]
+ .iter()
+ .any(|name| {
+ std::env::var(name)
+ .ok()
+ .is_some_and(|value| !value.trim().is_empty())
+ })
+}
+
impl Config {
pub async fn load_or_init() -> Result {
let (default_zeroclaw_dir, default_workspace_dir) = default_config_and_workspace_dirs()?;
@@ -3271,6 +3299,29 @@ impl Config {
}
}
+ // Ollama cloud-routing safety checks
+ if self
+ .default_provider
+ .as_deref()
+ .is_some_and(|provider| provider.trim().eq_ignore_ascii_case("ollama"))
+ && self
+ .default_model
+ .as_deref()
+ .is_some_and(|model| model.trim().ends_with(":cloud"))
+ {
+ if is_local_ollama_endpoint(self.api_url.as_deref()) {
+ anyhow::bail!(
+ "default_model uses ':cloud' with provider 'ollama', but api_url is local or unset. Set api_url to a remote Ollama endpoint (for example https://ollama.com)."
+ );
+ }
+
+ if !has_ollama_cloud_credential(self.api_key.as_deref()) {
+ anyhow::bail!(
+ "default_model uses ':cloud' with provider 'ollama', but no API key is configured. Set api_key or OLLAMA_API_KEY."
+ );
+ }
+ }
+
// Proxy (delegate to existing validation)
self.proxy.validate()?;
@@ -5207,6 +5258,41 @@ default_temperature = 0.7
std::env::remove_var("ZEROCLAW_MODEL");
}
+ #[test]
+ async fn validate_ollama_cloud_model_requires_remote_api_url() {
+ let _env_guard = env_override_lock().await;
+ let config = Config {
+ default_provider: Some("ollama".to_string()),
+ default_model: Some("glm-5:cloud".to_string()),
+ api_url: None,
+ api_key: Some("ollama-key".to_string()),
+ ..Config::default()
+ };
+
+ let error = config.validate().expect_err("expected validation to fail");
+ assert!(error.to_string().contains(
+ "default_model uses ':cloud' with provider 'ollama', but api_url is local or unset"
+ ));
+ }
+
+ #[test]
+ async fn validate_ollama_cloud_model_accepts_remote_endpoint_and_env_key() {
+ let _env_guard = env_override_lock().await;
+ let config = Config {
+ default_provider: Some("ollama".to_string()),
+ default_model: Some("glm-5:cloud".to_string()),
+ api_url: Some("https://ollama.com/api".to_string()),
+ api_key: None,
+ ..Config::default()
+ };
+
+ std::env::set_var("OLLAMA_API_KEY", "ollama-env-key");
+ let result = config.validate();
+ std::env::remove_var("OLLAMA_API_KEY");
+
+ assert!(result.is_ok(), "expected validation to pass: {result:?}");
+ }
+
#[test]
async fn env_override_model_fallback() {
let _env_guard = env_override_lock().await;
diff --git a/src/onboard/wizard.rs b/src/onboard/wizard.rs
index dc77261c1..92050e0b8 100644
--- a/src/onboard/wizard.rs
+++ b/src/onboard/wizard.rs
@@ -1211,7 +1211,42 @@ fn fetch_ollama_models() -> Result> {
Ok(parse_ollama_model_ids(&payload))
}
-fn resolve_live_models_endpoint(provider_name: &str, provider_api_url: Option<&str>) -> Option {
+fn normalize_ollama_endpoint_url(raw_url: &str) -> String {
+ let trimmed = raw_url.trim().trim_end_matches('/');
+ if trimmed.is_empty() {
+ return String::new();
+ }
+ trimmed
+ .strip_suffix("/api")
+ .unwrap_or(trimmed)
+ .trim_end_matches('/')
+ .to_string()
+}
+
+fn ollama_endpoint_is_local(endpoint_url: &str) -> bool {
+ reqwest::Url::parse(endpoint_url)
+ .ok()
+ .and_then(|url| url.host_str().map(|host| host.to_ascii_lowercase()))
+ .is_some_and(|host| matches!(host.as_str(), "localhost" | "127.0.0.1" | "::1" | "0.0.0.0"))
+}
+
+fn ollama_uses_remote_endpoint(provider_api_url: Option<&str>) -> bool {
+ let Some(endpoint) = provider_api_url else {
+ return false;
+ };
+
+ let normalized = normalize_ollama_endpoint_url(endpoint);
+ if normalized.is_empty() {
+ return false;
+ }
+
+ !ollama_endpoint_is_local(&normalized)
+}
+
+fn resolve_live_models_endpoint(
+ provider_name: &str,
+ provider_api_url: Option<&str>,
+) -> Option {
if canonical_provider_name(provider_name) == "llamacpp" {
if let Some(url) = provider_api_url
.map(str::trim)
@@ -1235,21 +1270,26 @@ fn fetch_live_models_for_provider(
) -> Result> {
let requested_provider_name = provider_name;
let provider_name = canonical_provider_name(provider_name);
+ let ollama_remote = provider_name == "ollama" && ollama_uses_remote_endpoint(provider_api_url);
let api_key = if api_key.trim().is_empty() {
- std::env::var(provider_env_var(provider_name))
- .ok()
- .or_else(|| {
- // Anthropic also accepts OAuth setup-tokens via ANTHROPIC_OAUTH_TOKEN
- if provider_name == "anthropic" {
- std::env::var("ANTHROPIC_OAUTH_TOKEN").ok()
- } else if provider_name == "minimax" {
- std::env::var("MINIMAX_OAUTH_TOKEN").ok()
- } else {
- None
- }
- })
- .map(|value| value.trim().to_string())
- .filter(|value| !value.is_empty())
+ if provider_name == "ollama" && !ollama_remote {
+ None
+ } else {
+ std::env::var(provider_env_var(provider_name))
+ .ok()
+ .or_else(|| {
+ // Anthropic also accepts OAuth setup-tokens via ANTHROPIC_OAUTH_TOKEN
+ if provider_name == "anthropic" {
+ std::env::var("ANTHROPIC_OAUTH_TOKEN").ok()
+ } else if provider_name == "minimax" {
+ std::env::var("MINIMAX_OAUTH_TOKEN").ok()
+ } else {
+ None
+ }
+ })
+ .map(|value| value.trim().to_string())
+ .filter(|value| !value.is_empty())
+ }
} else {
Some(api_key.trim().to_string())
};
@@ -1259,22 +1299,27 @@ fn fetch_live_models_for_provider(
"anthropic" => fetch_anthropic_models(api_key.as_deref())?,
"gemini" => fetch_gemini_models(api_key.as_deref())?,
"ollama" => {
- if api_key.as_deref().map_or(true, |k| k.trim().is_empty()) {
- // Key is None or empty, assume local Ollama
- fetch_ollama_models()?
- } else {
- // Key is present, assume Ollama Cloud and return hardcoded list
+ if ollama_remote {
+ // Remote Ollama endpoints can serve cloud-routed models.
+ // Keep this curated list aligned with current Ollama cloud catalog.
vec![
"glm-5:cloud".to_string(),
"glm-4.7:cloud".to_string(),
- "gpt-oss:cloud".to_string(),
+ "gpt-oss:20b:cloud".to_string(),
+ "gpt-oss:120b:cloud".to_string(),
"gemini-3-flash-preview:cloud".to_string(),
- "qwen2.5-coder:1.5b".to_string(),
- "qwen2.5-coder:3b".to_string(),
- "qwen2.5:cloud".to_string(),
+ "qwen3-coder-next:cloud".to_string(),
+ "qwen3-coder:480b:cloud".to_string(),
+ "kimi-k2.5:cloud".to_string(),
"minimax-m2.5:cloud".to_string(),
- "deepseek-v3.1:cloud".to_string(),
+ "deepseek-v3.1:671b:cloud".to_string(),
]
+ } else {
+ // Local endpoints should not surface cloud-only suffixes.
+ fetch_ollama_models()?
+ .into_iter()
+ .filter(|model_id| !model_id.ends_with(":cloud"))
+ .collect()
}
}
_ => {
@@ -1792,10 +1837,15 @@ fn setup_provider(workspace_dir: &Path) -> Result<(String, String, String, Optio
.default("https://ollama.com".into())
.interact_text()?;
- let normalized_url = raw_url.trim().trim_end_matches('/').to_string();
+ let normalized_url = normalize_ollama_endpoint_url(&raw_url);
if normalized_url.is_empty() {
anyhow::bail!("Remote Ollama endpoint URL cannot be empty.");
}
+ let parsed = reqwest::Url::parse(&normalized_url)
+ .context("Remote Ollama endpoint URL must be a valid URL")?;
+ if !matches!(parsed.scheme(), "http" | "https") {
+ anyhow::bail!("Remote Ollama endpoint URL must use http:// or https://");
+ }
provider_api_url = Some(normalized_url.clone());
@@ -1803,6 +1853,9 @@ fn setup_provider(workspace_dir: &Path) -> Result<(String, String, String, Optio
"Remote endpoint configured: {}",
style(&normalized_url).cyan()
));
+ if raw_url.trim().trim_end_matches('/') != normalized_url {
+ print_bullet("Normalized endpoint to base URL (removed trailing /api).");
+ }
print_bullet(&format!(
"If you use cloud-only models, append {} to the model ID.",
style(":cloud").yellow()
@@ -2068,16 +2121,27 @@ fn setup_provider(workspace_dir: &Path) -> Result<(String, String, String, Optio
let mut live_options: Option> = None;
if supports_live_model_fetch(provider_name) {
- let can_fetch_without_key = allows_unauthenticated_model_fetch(provider_name);
+ let ollama_remote = canonical_provider == "ollama"
+ && ollama_uses_remote_endpoint(provider_api_url.as_deref());
+ let can_fetch_without_key =
+ allows_unauthenticated_model_fetch(provider_name) && !ollama_remote;
let has_api_key = !api_key.trim().is_empty()
- || std::env::var(provider_env_var(provider_name))
- .ok()
- .is_some_and(|value| !value.trim().is_empty())
+ || ((canonical_provider != "ollama" || ollama_remote)
+ && std::env::var(provider_env_var(provider_name))
+ .ok()
+ .is_some_and(|value| !value.trim().is_empty()))
|| (provider_name == "minimax"
&& std::env::var("MINIMAX_OAUTH_TOKEN")
.ok()
.is_some_and(|value| !value.trim().is_empty()));
+ if canonical_provider == "ollama" && ollama_remote && !has_api_key {
+ print_bullet(&format!(
+ "Remote Ollama live-model refresh needs an API key ({}); using curated models.",
+ style("OLLAMA_API_KEY").yellow()
+ ));
+ }
+
if can_fetch_without_key || has_api_key {
if let Some(cached) =
load_cached_models_for_provider(workspace_dir, provider_name, MODEL_CACHE_TTL_SECS)?
@@ -5713,6 +5777,30 @@ mod tests {
assert_eq!(resolve_live_models_endpoint("unknown-provider", None), None);
}
+ #[test]
+ fn normalize_ollama_endpoint_url_strips_api_suffix_and_trailing_slash() {
+ assert_eq!(
+ normalize_ollama_endpoint_url(" https://ollama.com/api/ "),
+ "https://ollama.com".to_string()
+ );
+ assert_eq!(
+ normalize_ollama_endpoint_url("https://ollama.com/"),
+ "https://ollama.com".to_string()
+ );
+ assert_eq!(normalize_ollama_endpoint_url(""), "");
+ }
+
+ #[test]
+ fn ollama_uses_remote_endpoint_distinguishes_local_and_remote_urls() {
+ assert!(!ollama_uses_remote_endpoint(None));
+ assert!(!ollama_uses_remote_endpoint(Some("http://localhost:11434")));
+ assert!(!ollama_uses_remote_endpoint(Some(
+ "http://127.0.0.1:11434/api"
+ )));
+ assert!(ollama_uses_remote_endpoint(Some("https://ollama.com")));
+ assert!(ollama_uses_remote_endpoint(Some("https://ollama.com/api")));
+ }
+
#[test]
fn parse_openai_model_ids_supports_data_array_payload() {
let payload = json!({
diff --git a/src/providers/ollama.rs b/src/providers/ollama.rs
index 4131d290d..8ba70318f 100644
--- a/src/providers/ollama.rs
+++ b/src/providers/ollama.rs
@@ -92,6 +92,19 @@ struct OllamaFunction {
// ─── Implementation ───────────────────────────────────────────────────────────
impl OllamaProvider {
+ fn normalize_base_url(raw_url: &str) -> String {
+ let trimmed = raw_url.trim().trim_end_matches('/');
+ if trimmed.is_empty() {
+ return String::new();
+ }
+
+ trimmed
+ .strip_suffix("/api")
+ .unwrap_or(trimmed)
+ .trim_end_matches('/')
+ .to_string()
+ }
+
pub fn new(base_url: Option<&str>, api_key: Option<&str>) -> Self {
Self::new_with_reasoning(base_url, api_key, None)
}
@@ -107,10 +120,7 @@ impl OllamaProvider {
});
Self {
- base_url: base_url
- .unwrap_or("http://localhost:11434")
- .trim_end_matches('/')
- .to_string(),
+ base_url: Self::normalize_base_url(base_url.unwrap_or("http://localhost:11434")),
api_key,
reasoning_enabled,
}
@@ -673,6 +683,12 @@ mod tests {
assert_eq!(p.base_url, "http://myserver:11434");
}
+ #[test]
+ fn custom_url_strips_api_suffix() {
+ let p = OllamaProvider::new(Some("https://ollama.com/api/"), None);
+ assert_eq!(p.base_url, "https://ollama.com");
+ }
+
#[test]
fn empty_url_uses_empty() {
let p = OllamaProvider::new(Some(""), None);
@@ -716,6 +732,14 @@ mod tests {
assert!(should_auth);
}
+ #[test]
+ fn remote_endpoint_with_api_suffix_still_allows_cloud_models() {
+ let p = OllamaProvider::new(Some("https://ollama.com/api"), Some("ollama-key"));
+ let (model, should_auth) = p.resolve_request_details("qwen3:cloud").unwrap();
+ assert_eq!(model, "qwen3");
+ assert!(should_auth);
+ }
+
#[test]
fn local_endpoint_auth_disabled_even_with_key() {
let p = OllamaProvider::new(None, Some("ollama-key"));
From 04640a963e6392141d85584a798579273cb067f1 Mon Sep 17 00:00:00 2001
From: reidliu41
Date: Thu, 19 Feb 2026 16:16:13 +0800
Subject: [PATCH 064/116] feat(provider): add Doubao (Volcengine Ark) provider
support
---
docs/providers-reference.md | 1 +
src/providers/mod.rs | 35 +++++++++++++++++++++++++++++++++++
2 files changed, 36 insertions(+)
diff --git a/docs/providers-reference.md b/docs/providers-reference.md
index 0f2655046..42c3f3778 100644
--- a/docs/providers-reference.md
+++ b/docs/providers-reference.md
@@ -43,6 +43,7 @@ credential is not reused for fallback providers.
| `minimax` | `minimax-intl`, `minimax-io`, `minimax-global`, `minimax-cn`, `minimaxi`, `minimax-oauth`, `minimax-oauth-cn`, `minimax-portal`, `minimax-portal-cn` | No | `MINIMAX_OAUTH_TOKEN`, `MINIMAX_API_KEY` |
| `bedrock` | `aws-bedrock` | No | `AWS_ACCESS_KEY_ID` + `AWS_SECRET_ACCESS_KEY` (optional: `AWS_REGION`) |
| `qianfan` | `baidu` | No | `QIANFAN_API_KEY` |
+| `doubao` | `volcengine`, `ark`, `doubao-cn` | No | `ARK_API_KEY`, `DOUBAO_API_KEY` |
| `qwen` | `dashscope`, `qwen-intl`, `dashscope-intl`, `qwen-us`, `dashscope-us`, `qwen-code`, `qwen-oauth`, `qwen_oauth` | No | `QWEN_OAUTH_TOKEN`, `DASHSCOPE_API_KEY` |
| `groq` | — | No | `GROQ_API_KEY` |
| `mistral` | — | No | `MISTRAL_API_KEY` |
diff --git a/src/providers/mod.rs b/src/providers/mod.rs
index 85f05f030..4175e79eb 100644
--- a/src/providers/mod.rs
+++ b/src/providers/mod.rs
@@ -166,6 +166,10 @@ pub(crate) fn is_qianfan_alias(name: &str) -> bool {
matches!(name, "qianfan" | "baidu")
}
+pub(crate) fn is_doubao_alias(name: &str) -> bool {
+ matches!(name, "doubao" | "volcengine" | "ark" | "doubao-cn")
+}
+
#[derive(Clone, Copy, Debug)]
enum MinimaxOauthRegion {
Global,
@@ -615,6 +619,8 @@ pub(crate) fn canonical_china_provider_name(name: &str) -> Option<&'static str>
Some("zai")
} else if is_qianfan_alias(name) {
Some("qianfan")
+ } else if is_doubao_alias(name) {
+ Some("doubao")
} else {
None
}
@@ -833,6 +839,7 @@ fn resolve_provider_credential(name: &str, credential_override: Option<&str>) ->
// not a single API key. Credential resolution happens inside BedrockProvider.
"bedrock" | "aws-bedrock" => return None,
name if is_qianfan_alias(name) => vec!["QIANFAN_API_KEY"],
+ name if is_doubao_alias(name) => vec!["ARK_API_KEY", "DOUBAO_API_KEY"],
name if is_qwen_alias(name) => vec!["DASHSCOPE_API_KEY"],
name if is_zai_alias(name) => vec!["ZAI_API_KEY"],
"nvidia" | "nvidia-nim" | "build.nvidia.com" => vec!["NVIDIA_API_KEY"],
@@ -1040,6 +1047,12 @@ fn create_provider_with_url_and_options(
name if is_qianfan_alias(name) => Ok(Box::new(OpenAiCompatibleProvider::new(
"Qianfan", "https://aip.baidubce.com", key, AuthStyle::Bearer,
))),
+ name if is_doubao_alias(name) => Ok(Box::new(OpenAiCompatibleProvider::new(
+ "Doubao",
+ "https://ark.cn-beijing.volces.com/api/v3",
+ key,
+ AuthStyle::Bearer,
+ ))),
name if qwen_base_url(name).is_some() => Ok(Box::new(OpenAiCompatibleProvider::new(
"Qwen",
qwen_base_url(name).expect("checked in guard"),
@@ -1467,6 +1480,12 @@ pub fn list_providers() -> Vec {
aliases: &["baidu"],
local: false,
},
+ ProviderInfo {
+ name: "doubao",
+ display_name: "Doubao (Volcengine)",
+ aliases: &["volcengine", "ark", "doubao-cn"],
+ local: false,
+ },
ProviderInfo {
name: "qwen",
display_name: "Qwen (DashScope / Qwen Code OAuth)",
@@ -1762,12 +1781,17 @@ mod tests {
assert!(is_zai_alias("zai-cn"));
assert!(is_qianfan_alias("qianfan"));
assert!(is_qianfan_alias("baidu"));
+ assert!(is_doubao_alias("doubao"));
+ assert!(is_doubao_alias("volcengine"));
+ assert!(is_doubao_alias("ark"));
+ assert!(is_doubao_alias("doubao-cn"));
assert!(!is_moonshot_alias("openrouter"));
assert!(!is_glm_alias("openai"));
assert!(!is_qwen_alias("gemini"));
assert!(!is_zai_alias("anthropic"));
assert!(!is_qianfan_alias("cohere"));
+ assert!(!is_doubao_alias("deepseek"));
}
#[test]
@@ -1785,6 +1809,8 @@ mod tests {
assert_eq!(canonical_china_provider_name("z.ai-cn"), Some("zai"));
assert_eq!(canonical_china_provider_name("qianfan"), Some("qianfan"));
assert_eq!(canonical_china_provider_name("baidu"), Some("qianfan"));
+ assert_eq!(canonical_china_provider_name("doubao"), Some("doubao"));
+ assert_eq!(canonical_china_provider_name("volcengine"), Some("doubao"));
assert_eq!(canonical_china_provider_name("openai"), None);
}
@@ -1958,6 +1984,14 @@ mod tests {
assert!(create_provider("baidu", Some("key")).is_ok());
}
+ #[test]
+ fn factory_doubao() {
+ assert!(create_provider("doubao", Some("key")).is_ok());
+ assert!(create_provider("volcengine", Some("key")).is_ok());
+ assert!(create_provider("ark", Some("key")).is_ok());
+ assert!(create_provider("doubao-cn", Some("key")).is_ok());
+ }
+
#[test]
fn factory_qwen() {
assert!(create_provider("qwen", Some("key")).is_ok());
@@ -2330,6 +2364,7 @@ mod tests {
"minimax-cn",
"bedrock",
"qianfan",
+ "doubao",
"qwen",
"qwen-intl",
"qwen-cn",
From ae7f297d17c37694fc641c454663629c65de9016 Mon Sep 17 00:00:00 2001
From: Alex Gorevski
Date: Fri, 20 Feb 2026 09:20:25 -0800
Subject: [PATCH 065/116] feat(ci): build with release-fast
---
.github/workflows/pub-release.yml | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/pub-release.yml b/.github/workflows/pub-release.yml
index 05812d4bb..17b314e89 100644
--- a/.github/workflows/pub-release.yml
+++ b/.github/workflows/pub-release.yml
@@ -202,12 +202,12 @@ jobs:
echo "Using linker override: $LINKER_ENV=$LINKER"
export "$LINKER_ENV=$LINKER"
fi
- cargo build --release --locked --target ${{ matrix.target }}
+ cargo build --profile release-fast --locked --target ${{ matrix.target }}
- name: Check binary size (Unix)
if: runner.os != 'Windows'
run: |
- BIN="target/${{ matrix.target }}/release/${{ matrix.artifact }}"
+ BIN="target/${{ matrix.target }}/release-fast/${{ matrix.artifact }}"
if [ ! -f "$BIN" ]; then
echo "::error::Expected binary not found: $BIN"
exit 1
@@ -231,13 +231,13 @@ jobs:
- name: Package (Unix)
if: runner.os != 'Windows'
run: |
- cd target/${{ matrix.target }}/release
+ cd target/${{ matrix.target }}/release-fast
tar czf ../../../zeroclaw-${{ matrix.target }}.${{ matrix.archive_ext }} ${{ matrix.artifact }}
- name: Package (Windows)
if: runner.os == 'Windows'
run: |
- cd target/${{ matrix.target }}/release
+ cd target/${{ matrix.target }}/release-fast
7z a ../../../zeroclaw-${{ matrix.target }}.${{ matrix.archive_ext }} ${{ matrix.artifact }}
- name: Upload artifact
From 4fd41d5f2c12cbef9f389b9aef9f045c704da2d2 Mon Sep 17 00:00:00 2001
From: Vernon Stinebaker
Date: Fri, 20 Feb 2026 14:45:24 +0800
Subject: [PATCH 066/116] fix(provider): add chat() override to
ReliableProvider for native tool calling
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
ReliableProvider was missing a chat() override, causing it to fall through
to the default Provider::chat() trait implementation. The default
implementation delegates to chat_with_history() which returns a plain
String and wraps it in ChatResponse with tool_calls: Vec::new() — so
native tool calling was completely broken through the retry/failover
wrapper even though the underlying provider properly supports it.
Changes:
- Add chat() with full retry/backoff/failover logic matching existing
chat_with_system(), chat_with_history(), and chat_with_tools() overrides
- Include context_window_exceeded early-exit matching other method patterns
- Add 7 focused tests: delegation with tool calls, retry recovery,
supports_native_tools propagation, aggregated error reporting,
model failover, non-retryable error skip, and system prompt zero-XML
verification
---
src/agent/loop_.rs | 56 +++++
src/providers/reliable.rs | 459 +++++++++++++++++++++++++++++++++++++-
2 files changed, 514 insertions(+), 1 deletion(-)
diff --git a/src/agent/loop_.rs b/src/agent/loop_.rs
index cde95d31b..626a5979b 100644
--- a/src/agent/loop_.rs
+++ b/src/agent/loop_.rs
@@ -3432,4 +3432,60 @@ Let me check the result."#;
assert_eq!(history[0].role, "system");
assert_eq!(history[1].content, "new msg");
}
+
+ /// When `build_system_prompt_with_mode` is called with `native_tools = true`,
+ /// the output must contain ZERO XML protocol artifacts. In the native path
+ /// `build_tool_instructions` is never called, so the system prompt alone
+ /// must be clean of XML tool-call protocol.
+ #[test]
+ fn native_tools_system_prompt_contains_zero_xml() {
+ use crate::channels::build_system_prompt_with_mode;
+
+ let tool_summaries: Vec<(&str, &str)> = vec![
+ ("shell", "Execute shell commands"),
+ ("file_read", "Read files"),
+ ];
+
+ let system_prompt = build_system_prompt_with_mode(
+ std::path::Path::new("/tmp"),
+ "test-model",
+ &tool_summaries,
+ &[], // no skills
+ None, // no identity config
+ None, // no bootstrap_max_chars
+ true, // native_tools
+ );
+
+ // Must contain zero XML protocol artifacts
+ assert!(
+ !system_prompt.contains(""),
+ "Native prompt must not contain "
+ );
+ assert!(
+ !system_prompt.contains(" "),
+ "Native prompt must not contain "
+ );
+ assert!(
+ !system_prompt.contains(""),
+ "Native prompt must not contain "
+ );
+ assert!(
+ !system_prompt.contains(" "),
+ "Native prompt must not contain "
+ );
+ assert!(
+ !system_prompt.contains("## Tool Use Protocol"),
+ "Native prompt must not contain XML protocol header"
+ );
+
+ // Positive: native prompt should still list tools and contain task instructions
+ assert!(
+ system_prompt.contains("shell"),
+ "Native prompt must list tool names"
+ );
+ assert!(
+ system_prompt.contains("## Your Task"),
+ "Native prompt should contain task instructions"
+ );
+ }
}
diff --git a/src/providers/reliable.rs b/src/providers/reliable.rs
index 94c855afa..5c65b3a2f 100644
--- a/src/providers/reliable.rs
+++ b/src/providers/reliable.rs
@@ -1,4 +1,6 @@
-use super::traits::{ChatMessage, ChatResponse, StreamChunk, StreamOptions, StreamResult};
+use super::traits::{
+ ChatMessage, ChatRequest, ChatResponse, StreamChunk, StreamOptions, StreamResult,
+};
use super::Provider;
use async_trait::async_trait;
use futures_util::{stream, StreamExt};
@@ -548,6 +550,115 @@ impl Provider for ReliableProvider {
.any(|(_, provider)| provider.supports_vision())
}
+ async fn chat(
+ &self,
+ request: ChatRequest<'_>,
+ model: &str,
+ temperature: f64,
+ ) -> anyhow::Result {
+ let models = self.model_chain(model);
+ let mut failures = Vec::new();
+
+ for current_model in &models {
+ for (provider_name, provider) in &self.providers {
+ let mut backoff_ms = self.base_backoff_ms;
+
+ for attempt in 0..=self.max_retries {
+ let req = ChatRequest {
+ messages: request.messages,
+ tools: request.tools,
+ };
+ match provider.chat(req, current_model, temperature).await {
+ Ok(resp) => {
+ if attempt > 0 || *current_model != model {
+ tracing::info!(
+ provider = provider_name,
+ model = *current_model,
+ attempt,
+ original_model = model,
+ "Provider recovered (failover/retry)"
+ );
+ }
+ return Ok(resp);
+ }
+ Err(e) => {
+ let non_retryable_rate_limit = is_non_retryable_rate_limit(&e);
+ let non_retryable = is_non_retryable(&e) || non_retryable_rate_limit;
+ let rate_limited = is_rate_limited(&e);
+ let failure_reason = failure_reason(rate_limited, non_retryable);
+ let error_detail = compact_error_detail(&e);
+
+ push_failure(
+ &mut failures,
+ provider_name,
+ current_model,
+ attempt + 1,
+ self.max_retries + 1,
+ failure_reason,
+ &error_detail,
+ );
+
+ if rate_limited && !non_retryable_rate_limit {
+ if let Some(new_key) = self.rotate_key() {
+ tracing::info!(
+ provider = provider_name,
+ error = %error_detail,
+ "Rate limited, rotated API key (key ending ...{})",
+ &new_key[new_key.len().saturating_sub(4)..]
+ );
+ }
+ }
+
+ if non_retryable {
+ tracing::warn!(
+ provider = provider_name,
+ model = *current_model,
+ error = %error_detail,
+ "Non-retryable error, moving on"
+ );
+
+ if is_context_window_exceeded(&e) {
+ anyhow::bail!(
+ "Request exceeds model context window; retries and fallbacks were skipped. Attempts:\n{}",
+ failures.join("\n")
+ );
+ }
+
+ break;
+ }
+
+ if attempt < self.max_retries {
+ let wait = self.compute_backoff(backoff_ms, &e);
+ tracing::warn!(
+ provider = provider_name,
+ model = *current_model,
+ attempt = attempt + 1,
+ backoff_ms = wait,
+ reason = failure_reason,
+ error = %error_detail,
+ "Provider call failed, retrying"
+ );
+ tokio::time::sleep(Duration::from_millis(wait)).await;
+ backoff_ms = (backoff_ms.saturating_mul(2)).min(10_000);
+ }
+ }
+ }
+ }
+
+ tracing::warn!(
+ provider = provider_name,
+ model = *current_model,
+ "Exhausted retries, trying next provider/model"
+ );
+ }
+ }
+
+ anyhow::bail!(
+ "All providers/models failed. Attempts:\n{}",
+ failures.join("\n")
+ )
+ }
+
async fn chat_with_tools(
&self,
messages: &[ChatMessage],
@@ -1509,4 +1620,350 @@ mod tests {
.await
}
}
+
+ /// Mock provider that implements `chat()` with native tool support.
+ struct NativeToolMock {
+ calls: Arc,
+ fail_until_attempt: usize,
+ response_text: &'static str,
+ tool_calls: Vec,
+ error: &'static str,
+ }
+
+ #[async_trait]
+ impl Provider for NativeToolMock {
+ async fn chat_with_system(
+ &self,
+ _system_prompt: Option<&str>,
+ _message: &str,
+ _model: &str,
+ _temperature: f64,
+ ) -> anyhow::Result {
+ Ok(self.response_text.to_string())
+ }
+
+ fn supports_native_tools(&self) -> bool {
+ true
+ }
+
+ async fn chat(
+ &self,
+ _request: ChatRequest<'_>,
+ _model: &str,
+ _temperature: f64,
+ ) -> anyhow::Result {
+ let attempt = self.calls.fetch_add(1, Ordering::SeqCst) + 1;
+ if attempt <= self.fail_until_attempt {
+ anyhow::bail!(self.error);
+ }
+ Ok(ChatResponse {
+ text: Some(self.response_text.to_string()),
+ tool_calls: self.tool_calls.clone(),
+ })
+ }
+ }
+
+ #[tokio::test]
+ async fn chat_delegates_to_inner_provider() {
+ let calls = Arc::new(AtomicUsize::new(0));
+ let tool_call = super::super::traits::ToolCall {
+ id: "call_1".to_string(),
+ name: "shell".to_string(),
+ arguments: r#"{"command":"date"}"#.to_string(),
+ };
+ let provider = ReliableProvider::new(
+ vec![(
+ "primary".into(),
+ Box::new(NativeToolMock {
+ calls: Arc::clone(&calls),
+ fail_until_attempt: 0,
+ response_text: "ok",
+ tool_calls: vec![tool_call.clone()],
+ error: "boom",
+ }) as Box,
+ )],
+ 2,
+ 1,
+ );
+
+ let messages = vec![ChatMessage::user("what time is it?")];
+ let request = ChatRequest {
+ messages: &messages,
+ tools: None,
+ };
+ let result = provider.chat(request, "test-model", 0.0).await.unwrap();
+
+ assert_eq!(result.text.as_deref(), Some("ok"));
+ assert_eq!(result.tool_calls.len(), 1);
+ assert_eq!(result.tool_calls[0].name, "shell");
+ assert_eq!(calls.load(Ordering::SeqCst), 1);
+ }
+
+ #[tokio::test]
+ async fn chat_retries_and_recovers() {
+ let calls = Arc::new(AtomicUsize::new(0));
+ let tool_call = super::super::traits::ToolCall {
+ id: "call_1".to_string(),
+ name: "shell".to_string(),
+ arguments: r#"{"command":"date"}"#.to_string(),
+ };
+ let provider = ReliableProvider::new(
+ vec![(
+ "primary".into(),
+ Box::new(NativeToolMock {
+ calls: Arc::clone(&calls),
+ fail_until_attempt: 2,
+ response_text: "recovered",
+ tool_calls: vec![tool_call],
+ error: "temporary failure",
+ }) as Box,
+ )],
+ 3,
+ 1,
+ );
+
+ let messages = vec![ChatMessage::user("test")];
+ let request = ChatRequest {
+ messages: &messages,
+ tools: None,
+ };
+ let result = provider.chat(request, "test-model", 0.0).await.unwrap();
+
+ assert_eq!(result.text.as_deref(), Some("recovered"));
+ assert!(
+ calls.load(Ordering::SeqCst) > 1,
+ "should have retried at least once"
+ );
+ }
+
+ #[tokio::test]
+ async fn chat_preserves_native_tools_support() {
+ let calls = Arc::new(AtomicUsize::new(0));
+ let provider = ReliableProvider::new(
+ vec![(
+ "primary".into(),
+ Box::new(NativeToolMock {
+ calls: Arc::clone(&calls),
+ fail_until_attempt: 0,
+ response_text: "ok",
+ tool_calls: vec![],
+ error: "boom",
+ }) as Box,
+ )],
+ 2,
+ 1,
+ );
+
+ assert!(
+ provider.supports_native_tools(),
+ "ReliableProvider must propagate supports_native_tools from inner provider"
+ );
+ }
+
+ // ── Gap 2-4: Parity tests for chat() ────────────────────────
+
+ /// Gap 2: `chat()` returns an aggregated error when all providers fail,
+ /// matching behavior of `returns_aggregated_error_when_all_providers_fail`.
+ #[tokio::test]
+ async fn chat_returns_aggregated_error_when_all_providers_fail() {
+ let provider = ReliableProvider::new(
+ vec![
+ (
+ "p1".into(),
+ Box::new(NativeToolMock {
+ calls: Arc::new(AtomicUsize::new(0)),
+ fail_until_attempt: usize::MAX,
+ response_text: "never",
+ tool_calls: vec![],
+ error: "p1 chat error",
+ }) as Box,
+ ),
+ (
+ "p2".into(),
+ Box::new(NativeToolMock {
+ calls: Arc::new(AtomicUsize::new(0)),
+ fail_until_attempt: usize::MAX,
+ response_text: "never",
+ tool_calls: vec![],
+ error: "p2 chat error",
+ }) as Box,
+ ),
+ ],
+ 0,
+ 1,
+ );
+
+ let messages = vec![ChatMessage::user("hello")];
+ let request = ChatRequest {
+ messages: &messages,
+ tools: None,
+ };
+ let err = provider
+ .chat(request, "test", 0.0)
+ .await
+ .expect_err("all providers should fail");
+ let msg = err.to_string();
+ assert!(msg.contains("All providers/models failed"));
+ assert!(msg.contains("provider=p1 model=test"));
+ assert!(msg.contains("provider=p2 model=test"));
+ assert!(msg.contains("error=p1 chat error"));
+ assert!(msg.contains("error=p2 chat error"));
+ assert!(msg.contains("retryable"));
+ }
+
+ /// Mock that records model names and can fail specific models,
+ /// implementing `chat()` for native tool calling parity tests.
+ struct NativeModelAwareMock {
+ calls: Arc,
+ models_seen: parking_lot::Mutex>,
+ fail_models: Vec<&'static str>,
+ response_text: &'static str,
+ }
+
+ #[async_trait]
+ impl Provider for NativeModelAwareMock {
+ async fn chat_with_system(
+ &self,
+ _system_prompt: Option<&str>,
+ _message: &str,
+ _model: &str,
+ _temperature: f64,
+ ) -> anyhow::Result {
+ Ok(self.response_text.to_string())
+ }
+
+ fn supports_native_tools(&self) -> bool {
+ true
+ }
+
+ async fn chat(
+ &self,
+ _request: ChatRequest<'_>,
+ model: &str,
+ _temperature: f64,
+ ) -> anyhow::Result {
+ self.calls.fetch_add(1, Ordering::SeqCst);
+ self.models_seen.lock().push(model.to_string());
+ if self.fail_models.contains(&model) {
+ anyhow::bail!("500 model {} unavailable", model);
+ }
+ Ok(ChatResponse {
+ text: Some(self.response_text.to_string()),
+ tool_calls: vec![],
+ })
+ }
+ }
+
+ #[async_trait]
+ impl Provider for Arc {
+ async fn chat_with_system(
+ &self,
+ system_prompt: Option<&str>,
+ message: &str,
+ model: &str,
+ temperature: f64,
+ ) -> anyhow::Result {
+ self.as_ref()
+ .chat_with_system(system_prompt, message, model, temperature)
+ .await
+ }
+
+ fn supports_native_tools(&self) -> bool {
+ true
+ }
+
+ async fn chat(
+ &self,
+ request: ChatRequest<'_>,
+ model: &str,
+ temperature: f64,
+ ) -> anyhow::Result {
+ self.as_ref().chat(request, model, temperature).await
+ }
+ }
+
+ /// Gap 3: `chat()` tries fallback models on failure,
+ /// matching behavior of `model_failover_tries_fallback_model`.
+ #[tokio::test]
+ async fn chat_tries_model_failover_on_failure() {
+ let calls = Arc::new(AtomicUsize::new(0));
+ let mock = Arc::new(NativeModelAwareMock {
+ calls: Arc::clone(&calls),
+ models_seen: parking_lot::Mutex::new(Vec::new()),
+ fail_models: vec!["claude-opus"],
+ response_text: "ok from sonnet",
+ });
+
+ let mut fallbacks = HashMap::new();
+ fallbacks.insert("claude-opus".to_string(), vec!["claude-sonnet".to_string()]);
+
+ let provider = ReliableProvider::new(
+ vec![(
+ "anthropic".into(),
+ Box::new(mock.clone()) as Box,
+ )],
+ 0, // no retries — force immediate model failover
+ 1,
+ )
+ .with_model_fallbacks(fallbacks);
+
+ let messages = vec![ChatMessage::user("hello")];
+ let request = ChatRequest {
+ messages: &messages,
+ tools: None,
+ };
+ let result = provider.chat(request, "claude-opus", 0.0).await.unwrap();
+ assert_eq!(result.text.as_deref(), Some("ok from sonnet"));
+
+ let seen = mock.models_seen.lock();
+ assert_eq!(seen.len(), 2);
+ assert_eq!(seen[0], "claude-opus");
+ assert_eq!(seen[1], "claude-sonnet");
+ }
+
+ /// Gap 4: `chat()` skips retries on non-retryable errors (401, 403, etc.),
+ /// matching behavior of `skips_retries_on_non_retryable_error`.
+ #[tokio::test]
+ async fn chat_skips_non_retryable_errors() {
+ let primary_calls = Arc::new(AtomicUsize::new(0));
+ let fallback_calls = Arc::new(AtomicUsize::new(0));
+
+ let provider = ReliableProvider::new(
+ vec![
+ (
+ "primary".into(),
+ Box::new(NativeToolMock {
+ calls: Arc::clone(&primary_calls),
+ fail_until_attempt: usize::MAX,
+ response_text: "never",
+ tool_calls: vec![],
+ error: "401 Unauthorized",
+ }) as Box,
+ ),
+ (
+ "fallback".into(),
+ Box::new(NativeToolMock {
+ calls: Arc::clone(&fallback_calls),
+ fail_until_attempt: 0,
+ response_text: "from fallback",
+ tool_calls: vec![],
+ error: "fallback err",
+ }) as Box,
+ ),
+ ],
+ 3,
+ 1,
+ );
+
+ let messages = vec![ChatMessage::user("hello")];
+ let request = ChatRequest {
+ messages: &messages,
+ tools: None,
+ };
+ let result = provider.chat(request, "test", 0.0).await.unwrap();
+ assert_eq!(result.text.as_deref(), Some("from fallback"));
+ // Primary should have been called only once (no retries)
+ assert_eq!(primary_calls.load(Ordering::SeqCst), 1);
+ assert_eq!(fallback_calls.load(Ordering::SeqCst), 1);
+ }
}
From ad5f878e499873595620e251990c7f616b0a9b2d Mon Sep 17 00:00:00 2001
From: Chummy
Date: Sat, 21 Feb 2026 01:11:02 +0800
Subject: [PATCH 067/116] fix: tighten Chinese provider tool-call parsing and
remove PR noise
---
src/agent/loop_.rs | 156 +++++++++++++++++++++++++++++++++++-
src/providers/compatible.rs | 115 ++++++++++++++++++++++++--
2 files changed, 262 insertions(+), 9 deletions(-)
diff --git a/src/agent/loop_.rs b/src/agent/loop_.rs
index 626a5979b..0b8d25118 100644
--- a/src/agent/loop_.rs
+++ b/src/agent/loop_.rs
@@ -366,6 +366,95 @@ fn parse_tool_calls_from_json_value(value: &serde_json::Value) -> Vec bool {
+ let normalized = tag.to_ascii_lowercase();
+ matches!(
+ normalized.as_str(),
+ "tool_call"
+ | "toolcall"
+ | "tool-call"
+ | "invoke"
+ | "thinking"
+ | "thought"
+ | "analysis"
+ | "reasoning"
+ | "reflection"
+ )
+}
+
+static XML_TOOL_TAG_RE: LazyLock