Compare commits

...

6 Commits

Author SHA1 Message Date
argenis de la rosa 80213b08ef feat(workspace): add multi-client workspace isolation
Add workspace profile management, security boundary enforcement, and
a workspace management tool for isolated client engagements.

Original work by @rareba. Supersedes #3597 — rebased on latest master.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-15 22:41:18 -04:00
Argenis a695ca4b9c fix(onboard): auto-detect TTY instead of --interactive flag (#3573)
Remove the --interactive flag from `zeroclaw onboard`. The command now
auto-detects whether stdin/stdout are a TTY: if yes and no provider
flags are given, it launches the full interactive wizard; otherwise it
runs the quick (scriptable) setup path.

This means all three install methods work with a single flow:
  curl -fsSL https://zeroclawlabs.ai/install.sh | bash
  cargo install zeroclawlabs && zeroclaw onboard
  docker run … zeroclaw onboard --api-key …
2026-03-15 19:25:55 -04:00
Argenis 811fab3b87 fix(service): headless browser works in service mode (systemd/OpenRC) (#3645)
When zeroclaw runs as a service, the process inherits a minimal
environment without HOME, DISPLAY, or user namespaces. Headless
browsers (Chromium/Firefox) need HOME for profile/cache dirs and
fail with sandbox errors without user namespaces.

- Detect service environment via INVOCATION_ID, JOURNAL_STREAM,
  or missing HOME on Linux
- Auto-apply --no-sandbox and --disable-dev-shm-usage for Chrome
  in service mode
- Set HOME fallback and CHROMIUM_FLAGS on agent-browser commands
- systemd unit: add Environment=HOME=%h and PassEnvironment
- OpenRC script: export HOME=/var/lib/zeroclaw with start_pre()
  to create the directory

Closes #3584
2026-03-15 19:16:36 -04:00
Argenis 1a5d91fe69 fix(channels): wire query_classification config into channel message processing (#3619)
The QueryClassificationConfig was parsed from config but never applied
during channel message processing. This adds the query_classification
field to ChannelRuntimeContext and invokes the classifier in
process_channel_message to override the route when a classification
rule matches a model_routes hint.

Closes #3579
2026-03-15 19:16:32 -04:00
Argenis 6eec1c81b9 fix(ci): use ubuntu-22.04 for Linux release builds (#3573)
Build against glibc 2.35 to ensure binary compatibility with Ubuntu 22.04+.
2026-03-15 18:57:30 -04:00
Argenis 602db8bca1 fix: exclude name field from Mistral tool_calls (#3572)
* fix: exclude name field from Mistral tool_calls (#3572)

Add skip_serializing_if to the compatibility fields (name, arguments,
parameters) on the ToolCall struct so they are omitted from the JSON
payload when None. Mistral's API returns 422 "Extra inputs are not
permitted" when these extra null fields are present in tool_calls.

* fix: format serde attribute for CI lint compliance
2026-03-15 18:38:41 -04:00
14 changed files with 1742 additions and 36 deletions
+4 -2
View File
@@ -155,11 +155,13 @@ jobs:
fail-fast: false
matrix:
include:
- os: ubuntu-latest
# Use ubuntu-22.04 for Linux builds to link against glibc 2.35,
# ensuring compatibility with Ubuntu 22.04+ (#3573).
- os: ubuntu-22.04
target: x86_64-unknown-linux-gnu
artifact: zeroclaw
ext: tar.gz
- os: ubuntu-latest
- os: ubuntu-22.04
target: aarch64-unknown-linux-gnu
artifact: zeroclaw
ext: tar.gz
+4 -2
View File
@@ -156,11 +156,13 @@ jobs:
fail-fast: false
matrix:
include:
- os: ubuntu-latest
# Use ubuntu-22.04 for Linux builds to link against glibc 2.35,
# ensuring compatibility with Ubuntu 22.04+ (#3573).
- os: ubuntu-22.04
target: x86_64-unknown-linux-gnu
artifact: zeroclaw
ext: tar.gz
- os: ubuntu-latest
- os: ubuntu-22.04
target: aarch64-unknown-linux-gnu
artifact: zeroclaw
ext: tar.gz
+465 -1
View File
@@ -312,6 +312,7 @@ struct ChannelRuntimeContext {
non_cli_excluded_tools: Arc<Vec<String>>,
tool_call_dedup_exempt: Arc<Vec<String>>,
model_routes: Arc<Vec<crate::config::ModelRouteConfig>>,
query_classification: crate::config::QueryClassificationConfig,
ack_reactions: bool,
show_tool_calls: bool,
session_store: Option<Arc<session_store::SessionStore>>,
@@ -1792,7 +1793,31 @@ async fn process_channel_message(
}
let history_key = conversation_history_key(&msg);
let route = get_route_selection(ctx.as_ref(), &history_key);
let mut route = get_route_selection(ctx.as_ref(), &history_key);
// ── Query classification: override route when a rule matches ──
if let Some(hint) = crate::agent::classifier::classify(&ctx.query_classification, &msg.content)
{
if let Some(matched_route) = ctx
.model_routes
.iter()
.find(|r| r.hint.eq_ignore_ascii_case(&hint))
{
tracing::info!(
target: "query_classification",
hint = hint.as_str(),
provider = matched_route.provider.as_str(),
model = matched_route.model.as_str(),
channel = %msg.channel,
"Channel message classified — overriding route"
);
route = ChannelRouteSelection {
provider: matched_route.provider.clone(),
model: matched_route.model.clone(),
};
}
}
let runtime_defaults = runtime_defaults_snapshot(ctx.as_ref());
let active_provider = match get_or_create_provider(ctx.as_ref(), &route.provider).await {
Ok(provider) => provider,
@@ -3843,6 +3868,7 @@ pub async fn start_channels(config: Config) -> Result<()> {
non_cli_excluded_tools: Arc::new(config.autonomy.non_cli_excluded_tools.clone()),
tool_call_dedup_exempt: Arc::new(config.agent.tool_call_dedup_exempt.clone()),
model_routes: Arc::new(config.model_routes.clone()),
query_classification: config.query_classification.clone(),
ack_reactions: config.channels_config.ack_reactions,
show_tool_calls: config.channels_config.show_tool_calls,
session_store: if config.channels_config.session_persistence {
@@ -4145,6 +4171,7 @@ mod tests {
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -4252,6 +4279,7 @@ mod tests {
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -4315,6 +4343,7 @@ mod tests {
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -4836,6 +4865,7 @@ BTC is currently around $65,000 based on latest tool output."#
multimodal: crate::config::MultimodalConfig::default(),
hooks: None,
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -4907,6 +4937,7 @@ BTC is currently around $65,000 based on latest tool output."#
multimodal: crate::config::MultimodalConfig::default(),
hooks: None,
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -4992,6 +5023,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -5062,6 +5094,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -5142,6 +5175,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -5242,6 +5276,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -5324,6 +5359,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -5421,6 +5457,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -5503,6 +5540,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -5575,6 +5613,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -5758,6 +5797,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -5849,6 +5889,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -5961,6 +6002,7 @@ BTC is currently around $65,000 based on latest tool output."#
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
&crate::config::AutonomyConfig::default(),
)),
query_classification: crate::config::QueryClassificationConfig::default(),
});
let (tx, rx) = tokio::sync::mpsc::channel::<traits::ChannelMessage>(8);
@@ -6058,6 +6100,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -6143,6 +6186,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -6213,6 +6257,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -6841,6 +6886,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -6937,6 +6983,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -7033,6 +7080,7 @@ BTC is currently around $65,000 based on latest tool output."#
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -7593,6 +7641,7 @@ This is an example JSON object for profile settings."#;
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -7670,6 +7719,7 @@ This is an example JSON object for profile settings."#;
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(Vec::new()),
query_classification: crate::config::QueryClassificationConfig::default(),
ack_reactions: true,
show_tool_calls: true,
session_store: None,
@@ -7755,6 +7805,420 @@ This is an example JSON object for profile settings."#;
}
}
// ── Query classification in channel message processing ─────────
#[tokio::test]
async fn process_channel_message_applies_query_classification_route() {
let channel_impl = Arc::new(TelegramRecordingChannel::default());
let channel: Arc<dyn Channel> = channel_impl.clone();
let mut channels_by_name = HashMap::new();
channels_by_name.insert(channel.name().to_string(), channel);
let default_provider_impl = Arc::new(ModelCaptureProvider::default());
let default_provider: Arc<dyn Provider> = default_provider_impl.clone();
let vision_provider_impl = Arc::new(ModelCaptureProvider::default());
let vision_provider: Arc<dyn Provider> = vision_provider_impl.clone();
let mut provider_cache_seed: HashMap<String, Arc<dyn Provider>> = HashMap::new();
provider_cache_seed.insert("test-provider".to_string(), Arc::clone(&default_provider));
provider_cache_seed.insert("vision-provider".to_string(), vision_provider);
let classification_config = crate::config::QueryClassificationConfig {
enabled: true,
rules: vec![crate::config::schema::ClassificationRule {
hint: "vision".into(),
keywords: vec!["analyze-image".into()],
..Default::default()
}],
};
let model_routes = vec![crate::config::ModelRouteConfig {
hint: "vision".into(),
provider: "vision-provider".into(),
model: "gpt-4-vision".into(),
api_key: None,
}];
let runtime_ctx = Arc::new(ChannelRuntimeContext {
channels_by_name: Arc::new(channels_by_name),
provider: Arc::clone(&default_provider),
default_provider: Arc::new("test-provider".to_string()),
memory: Arc::new(NoopMemory),
tools_registry: Arc::new(vec![]),
observer: Arc::new(NoopObserver),
system_prompt: Arc::new("test-system-prompt".to_string()),
model: Arc::new("default-model".to_string()),
temperature: 0.0,
auto_save_memory: false,
max_tool_iterations: 5,
min_relevance_score: 0.0,
conversation_histories: Arc::new(Mutex::new(HashMap::new())),
provider_cache: Arc::new(Mutex::new(provider_cache_seed)),
route_overrides: Arc::new(Mutex::new(HashMap::new())),
api_key: None,
api_url: None,
reliability: Arc::new(crate::config::ReliabilityConfig::default()),
provider_runtime_options: providers::ProviderRuntimeOptions::default(),
workspace_dir: Arc::new(std::env::temp_dir()),
message_timeout_secs: CHANNEL_MESSAGE_TIMEOUT_SECS,
interrupt_on_new_message: InterruptOnNewMessageConfig {
telegram: false,
slack: false,
},
multimodal: crate::config::MultimodalConfig::default(),
hooks: None,
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(model_routes),
query_classification: classification_config,
ack_reactions: true,
show_tool_calls: true,
session_store: None,
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
&crate::config::AutonomyConfig::default(),
)),
});
process_channel_message(
runtime_ctx,
traits::ChannelMessage {
id: "msg-qc-1".to_string(),
sender: "alice".to_string(),
reply_target: "chat-1".to_string(),
content: "please analyze-image from the dataset".to_string(),
channel: "telegram".to_string(),
timestamp: 1,
thread_ts: None,
},
CancellationToken::new(),
)
.await;
// Vision provider should have been called instead of the default.
assert_eq!(default_provider_impl.call_count.load(Ordering::SeqCst), 0);
assert_eq!(vision_provider_impl.call_count.load(Ordering::SeqCst), 1);
assert_eq!(
vision_provider_impl
.models
.lock()
.unwrap_or_else(|e| e.into_inner())
.as_slice(),
&["gpt-4-vision".to_string()]
);
}
#[tokio::test]
async fn process_channel_message_classification_disabled_uses_default_route() {
let channel_impl = Arc::new(TelegramRecordingChannel::default());
let channel: Arc<dyn Channel> = channel_impl.clone();
let mut channels_by_name = HashMap::new();
channels_by_name.insert(channel.name().to_string(), channel);
let default_provider_impl = Arc::new(ModelCaptureProvider::default());
let default_provider: Arc<dyn Provider> = default_provider_impl.clone();
let vision_provider_impl = Arc::new(ModelCaptureProvider::default());
let vision_provider: Arc<dyn Provider> = vision_provider_impl.clone();
let mut provider_cache_seed: HashMap<String, Arc<dyn Provider>> = HashMap::new();
provider_cache_seed.insert("test-provider".to_string(), Arc::clone(&default_provider));
provider_cache_seed.insert("vision-provider".to_string(), vision_provider);
// Classification is disabled — matching keyword should NOT trigger reroute.
let classification_config = crate::config::QueryClassificationConfig {
enabled: false,
rules: vec![crate::config::schema::ClassificationRule {
hint: "vision".into(),
keywords: vec!["analyze-image".into()],
..Default::default()
}],
};
let model_routes = vec![crate::config::ModelRouteConfig {
hint: "vision".into(),
provider: "vision-provider".into(),
model: "gpt-4-vision".into(),
api_key: None,
}];
let runtime_ctx = Arc::new(ChannelRuntimeContext {
channels_by_name: Arc::new(channels_by_name),
provider: Arc::clone(&default_provider),
default_provider: Arc::new("test-provider".to_string()),
memory: Arc::new(NoopMemory),
tools_registry: Arc::new(vec![]),
observer: Arc::new(NoopObserver),
system_prompt: Arc::new("test-system-prompt".to_string()),
model: Arc::new("default-model".to_string()),
temperature: 0.0,
auto_save_memory: false,
max_tool_iterations: 5,
min_relevance_score: 0.0,
conversation_histories: Arc::new(Mutex::new(HashMap::new())),
provider_cache: Arc::new(Mutex::new(provider_cache_seed)),
route_overrides: Arc::new(Mutex::new(HashMap::new())),
api_key: None,
api_url: None,
reliability: Arc::new(crate::config::ReliabilityConfig::default()),
provider_runtime_options: providers::ProviderRuntimeOptions::default(),
workspace_dir: Arc::new(std::env::temp_dir()),
message_timeout_secs: CHANNEL_MESSAGE_TIMEOUT_SECS,
interrupt_on_new_message: InterruptOnNewMessageConfig {
telegram: false,
slack: false,
},
multimodal: crate::config::MultimodalConfig::default(),
hooks: None,
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(model_routes),
query_classification: classification_config,
ack_reactions: true,
show_tool_calls: true,
session_store: None,
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
&crate::config::AutonomyConfig::default(),
)),
});
process_channel_message(
runtime_ctx,
traits::ChannelMessage {
id: "msg-qc-disabled".to_string(),
sender: "alice".to_string(),
reply_target: "chat-1".to_string(),
content: "please analyze-image from the dataset".to_string(),
channel: "telegram".to_string(),
timestamp: 1,
thread_ts: None,
},
CancellationToken::new(),
)
.await;
// Default provider should be used since classification is disabled.
assert_eq!(default_provider_impl.call_count.load(Ordering::SeqCst), 1);
assert_eq!(vision_provider_impl.call_count.load(Ordering::SeqCst), 0);
}
#[tokio::test]
async fn process_channel_message_classification_no_match_uses_default_route() {
let channel_impl = Arc::new(TelegramRecordingChannel::default());
let channel: Arc<dyn Channel> = channel_impl.clone();
let mut channels_by_name = HashMap::new();
channels_by_name.insert(channel.name().to_string(), channel);
let default_provider_impl = Arc::new(ModelCaptureProvider::default());
let default_provider: Arc<dyn Provider> = default_provider_impl.clone();
let vision_provider_impl = Arc::new(ModelCaptureProvider::default());
let vision_provider: Arc<dyn Provider> = vision_provider_impl.clone();
let mut provider_cache_seed: HashMap<String, Arc<dyn Provider>> = HashMap::new();
provider_cache_seed.insert("test-provider".to_string(), Arc::clone(&default_provider));
provider_cache_seed.insert("vision-provider".to_string(), vision_provider);
// Classification enabled with a rule that won't match the message.
let classification_config = crate::config::QueryClassificationConfig {
enabled: true,
rules: vec![crate::config::schema::ClassificationRule {
hint: "vision".into(),
keywords: vec!["analyze-image".into()],
..Default::default()
}],
};
let model_routes = vec![crate::config::ModelRouteConfig {
hint: "vision".into(),
provider: "vision-provider".into(),
model: "gpt-4-vision".into(),
api_key: None,
}];
let runtime_ctx = Arc::new(ChannelRuntimeContext {
channels_by_name: Arc::new(channels_by_name),
provider: Arc::clone(&default_provider),
default_provider: Arc::new("test-provider".to_string()),
memory: Arc::new(NoopMemory),
tools_registry: Arc::new(vec![]),
observer: Arc::new(NoopObserver),
system_prompt: Arc::new("test-system-prompt".to_string()),
model: Arc::new("default-model".to_string()),
temperature: 0.0,
auto_save_memory: false,
max_tool_iterations: 5,
min_relevance_score: 0.0,
conversation_histories: Arc::new(Mutex::new(HashMap::new())),
provider_cache: Arc::new(Mutex::new(provider_cache_seed)),
route_overrides: Arc::new(Mutex::new(HashMap::new())),
api_key: None,
api_url: None,
reliability: Arc::new(crate::config::ReliabilityConfig::default()),
provider_runtime_options: providers::ProviderRuntimeOptions::default(),
workspace_dir: Arc::new(std::env::temp_dir()),
message_timeout_secs: CHANNEL_MESSAGE_TIMEOUT_SECS,
interrupt_on_new_message: InterruptOnNewMessageConfig {
telegram: false,
slack: false,
},
multimodal: crate::config::MultimodalConfig::default(),
hooks: None,
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(model_routes),
query_classification: classification_config,
ack_reactions: true,
show_tool_calls: true,
session_store: None,
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
&crate::config::AutonomyConfig::default(),
)),
});
process_channel_message(
runtime_ctx,
traits::ChannelMessage {
id: "msg-qc-nomatch".to_string(),
sender: "alice".to_string(),
reply_target: "chat-1".to_string(),
content: "just a regular text message".to_string(),
channel: "telegram".to_string(),
timestamp: 1,
thread_ts: None,
},
CancellationToken::new(),
)
.await;
// Default provider should be used since no classification rule matched.
assert_eq!(default_provider_impl.call_count.load(Ordering::SeqCst), 1);
assert_eq!(vision_provider_impl.call_count.load(Ordering::SeqCst), 0);
}
#[tokio::test]
async fn process_channel_message_classification_priority_selects_highest() {
let channel_impl = Arc::new(TelegramRecordingChannel::default());
let channel: Arc<dyn Channel> = channel_impl.clone();
let mut channels_by_name = HashMap::new();
channels_by_name.insert(channel.name().to_string(), channel);
let default_provider_impl = Arc::new(ModelCaptureProvider::default());
let default_provider: Arc<dyn Provider> = default_provider_impl.clone();
let fast_provider_impl = Arc::new(ModelCaptureProvider::default());
let fast_provider: Arc<dyn Provider> = fast_provider_impl.clone();
let code_provider_impl = Arc::new(ModelCaptureProvider::default());
let code_provider: Arc<dyn Provider> = code_provider_impl.clone();
let mut provider_cache_seed: HashMap<String, Arc<dyn Provider>> = HashMap::new();
provider_cache_seed.insert("test-provider".to_string(), Arc::clone(&default_provider));
provider_cache_seed.insert("fast-provider".to_string(), fast_provider);
provider_cache_seed.insert("code-provider".to_string(), code_provider);
// Both rules match "code" keyword, but "code" rule has higher priority.
let classification_config = crate::config::QueryClassificationConfig {
enabled: true,
rules: vec![
crate::config::schema::ClassificationRule {
hint: "fast".into(),
keywords: vec!["code".into()],
priority: 1,
..Default::default()
},
crate::config::schema::ClassificationRule {
hint: "code".into(),
keywords: vec!["code".into()],
priority: 10,
..Default::default()
},
],
};
let model_routes = vec![
crate::config::ModelRouteConfig {
hint: "fast".into(),
provider: "fast-provider".into(),
model: "fast-model".into(),
api_key: None,
},
crate::config::ModelRouteConfig {
hint: "code".into(),
provider: "code-provider".into(),
model: "code-model".into(),
api_key: None,
},
];
let runtime_ctx = Arc::new(ChannelRuntimeContext {
channels_by_name: Arc::new(channels_by_name),
provider: Arc::clone(&default_provider),
default_provider: Arc::new("test-provider".to_string()),
memory: Arc::new(NoopMemory),
tools_registry: Arc::new(vec![]),
observer: Arc::new(NoopObserver),
system_prompt: Arc::new("test-system-prompt".to_string()),
model: Arc::new("default-model".to_string()),
temperature: 0.0,
auto_save_memory: false,
max_tool_iterations: 5,
min_relevance_score: 0.0,
conversation_histories: Arc::new(Mutex::new(HashMap::new())),
provider_cache: Arc::new(Mutex::new(provider_cache_seed)),
route_overrides: Arc::new(Mutex::new(HashMap::new())),
api_key: None,
api_url: None,
reliability: Arc::new(crate::config::ReliabilityConfig::default()),
provider_runtime_options: providers::ProviderRuntimeOptions::default(),
workspace_dir: Arc::new(std::env::temp_dir()),
message_timeout_secs: CHANNEL_MESSAGE_TIMEOUT_SECS,
interrupt_on_new_message: InterruptOnNewMessageConfig {
telegram: false,
slack: false,
},
multimodal: crate::config::MultimodalConfig::default(),
hooks: None,
non_cli_excluded_tools: Arc::new(Vec::new()),
tool_call_dedup_exempt: Arc::new(Vec::new()),
model_routes: Arc::new(model_routes),
query_classification: classification_config,
ack_reactions: true,
show_tool_calls: true,
session_store: None,
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
&crate::config::AutonomyConfig::default(),
)),
});
process_channel_message(
runtime_ctx,
traits::ChannelMessage {
id: "msg-qc-prio".to_string(),
sender: "alice".to_string(),
reply_target: "chat-1".to_string(),
content: "write some code for me".to_string(),
channel: "telegram".to_string(),
timestamp: 1,
thread_ts: None,
},
CancellationToken::new(),
)
.await;
// Higher-priority "code" rule (priority=10) should win over "fast" (priority=1).
assert_eq!(default_provider_impl.call_count.load(Ordering::SeqCst), 0);
assert_eq!(fast_provider_impl.call_count.load(Ordering::SeqCst), 0);
assert_eq!(code_provider_impl.call_count.load(Ordering::SeqCst), 1);
assert_eq!(
code_provider_impl
.models
.lock()
.unwrap_or_else(|e| e.into_inner())
.as_slice(),
&["code-model".to_string()]
);
}
#[test]
fn build_channel_by_id_unconfigured_telegram_returns_error() {
let config = Config::default();
+2 -1
View File
@@ -1,5 +1,6 @@
pub mod schema;
pub mod traits;
pub mod workspace;
#[allow(unused_imports)]
pub use schema::{
@@ -19,7 +20,7 @@ pub use schema::{
SkillsPromptInjectionMode, SlackConfig, StorageConfig, StorageProviderConfig,
StorageProviderSection, StreamMode, SwarmConfig, SwarmStrategy, TelegramConfig,
ToolFilterGroup, ToolFilterGroupMode, TranscriptionConfig, TtsConfig, TunnelConfig,
WebFetchConfig, WebSearchConfig, WebhookConfig,
WebFetchConfig, WebSearchConfig, WebhookConfig, WorkspaceConfig,
};
pub fn name_and_presence<T: traits::ChannelConfig>(channel: Option<&T>) -> (&'static str, bool) {
+55
View File
@@ -259,6 +259,58 @@ pub struct Config {
/// Dynamic node discovery configuration (`[nodes]`).
#[serde(default)]
pub nodes: NodesConfig,
/// Multi-client workspace isolation configuration (`[workspace]`).
#[serde(default)]
pub workspace: WorkspaceConfig,
}
/// Multi-client workspace isolation configuration.
///
/// When enabled, each client engagement gets an isolated workspace with
/// separate memory, audit, secrets, and tool restrictions.
#[allow(clippy::struct_excessive_bools)]
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct WorkspaceConfig {
/// Enable workspace isolation. Default: false.
#[serde(default)]
pub enabled: bool,
/// Currently active workspace name.
#[serde(default)]
pub active_workspace: Option<String>,
/// Base directory for workspace profiles.
#[serde(default = "default_workspaces_dir")]
pub workspaces_dir: String,
/// Isolate memory databases per workspace. Default: true.
#[serde(default = "default_true")]
pub isolate_memory: bool,
/// Isolate secrets namespaces per workspace. Default: true.
#[serde(default = "default_true")]
pub isolate_secrets: bool,
/// Isolate audit logs per workspace. Default: true.
#[serde(default = "default_true")]
pub isolate_audit: bool,
/// Allow searching across workspaces. Default: false (security).
#[serde(default)]
pub cross_workspace_search: bool,
}
fn default_workspaces_dir() -> String {
"~/.zeroclaw/workspaces".to_string()
}
impl Default for WorkspaceConfig {
fn default() -> Self {
Self {
enabled: false,
active_workspace: None,
workspaces_dir: default_workspaces_dir(),
isolate_memory: true,
isolate_secrets: true,
isolate_audit: true,
cross_workspace_search: false,
}
}
}
/// Named provider profile definition compatible with Codex app-server style config.
@@ -4252,6 +4304,7 @@ impl Default for Config {
tts: TtsConfig::default(),
mcp: McpConfig::default(),
nodes: NodesConfig::default(),
workspace: WorkspaceConfig::default(),
}
}
}
@@ -6359,6 +6412,7 @@ default_temperature = 0.7
tts: TtsConfig::default(),
mcp: McpConfig::default(),
nodes: NodesConfig::default(),
workspace: WorkspaceConfig::default(),
};
let toml_str = toml::to_string_pretty(&config).unwrap();
@@ -6651,6 +6705,7 @@ tool_dispatcher = "xml"
tts: TtsConfig::default(),
mcp: McpConfig::default(),
nodes: NodesConfig::default(),
workspace: WorkspaceConfig::default(),
};
config.save().await.unwrap();
+382
View File
@@ -0,0 +1,382 @@
//! Workspace profile management for multi-client isolation.
//!
//! Each workspace represents an isolated client engagement with its own
//! memory namespace, audit trail, secrets scope, and tool restrictions.
//! Profiles are stored under `~/.zeroclaw/workspaces/<client_name>/`.
use anyhow::{bail, Context, Result};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
/// A single client workspace profile.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WorkspaceProfile {
/// Human-readable workspace name (also used as directory name).
pub name: String,
/// Allowed domains for network access within this workspace.
#[serde(default)]
pub allowed_domains: Vec<String>,
/// Credential profile name scoped to this workspace.
#[serde(default)]
pub credential_profile: Option<String>,
/// Memory namespace prefix for isolation.
#[serde(default)]
pub memory_namespace: Option<String>,
/// Audit namespace prefix for isolation.
#[serde(default)]
pub audit_namespace: Option<String>,
/// Tool names denied in this workspace (e.g. `["shell"]` to block shell access).
#[serde(default)]
pub tool_restrictions: Vec<String>,
}
impl WorkspaceProfile {
/// Effective memory namespace (falls back to workspace name).
pub fn effective_memory_namespace(&self) -> &str {
self.memory_namespace
.as_deref()
.unwrap_or(self.name.as_str())
}
/// Effective audit namespace (falls back to workspace name).
pub fn effective_audit_namespace(&self) -> &str {
self.audit_namespace
.as_deref()
.unwrap_or(self.name.as_str())
}
/// Returns true if the given tool name is restricted in this workspace.
pub fn is_tool_restricted(&self, tool_name: &str) -> bool {
self.tool_restrictions
.iter()
.any(|r| r.eq_ignore_ascii_case(tool_name))
}
/// Returns true if the given domain is allowed for this workspace.
/// An empty allowlist means all domains are allowed.
pub fn is_domain_allowed(&self, domain: &str) -> bool {
if self.allowed_domains.is_empty() {
return true;
}
let domain_lower = domain.to_ascii_lowercase();
self.allowed_domains
.iter()
.any(|d| domain_lower == d.to_ascii_lowercase())
}
}
/// Manages loading and switching between client workspace profiles.
#[derive(Debug, Clone)]
pub struct WorkspaceManager {
/// Base directory containing all workspace subdirectories.
workspaces_dir: PathBuf,
/// Loaded workspace profiles keyed by name.
profiles: HashMap<String, WorkspaceProfile>,
/// Currently active workspace name.
active: Option<String>,
}
impl WorkspaceManager {
/// Create a new workspace manager rooted at the given directory.
pub fn new(workspaces_dir: PathBuf) -> Self {
Self {
workspaces_dir,
profiles: HashMap::new(),
active: None,
}
}
/// Load all workspace profiles from disk.
///
/// Each subdirectory of `workspaces_dir` that contains a `profile.toml`
/// is treated as a workspace.
pub async fn load_profiles(&mut self) -> Result<()> {
self.profiles.clear();
let dir = &self.workspaces_dir;
if !dir.exists() {
return Ok(());
}
let mut entries = tokio::fs::read_dir(dir)
.await
.with_context(|| format!("reading workspaces directory: {}", dir.display()))?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if !path.is_dir() {
continue;
}
let profile_path = path.join("profile.toml");
if !profile_path.exists() {
continue;
}
match tokio::fs::read_to_string(&profile_path).await {
Ok(contents) => match toml::from_str::<WorkspaceProfile>(&contents) {
Ok(profile) => {
self.profiles.insert(profile.name.clone(), profile);
}
Err(e) => {
tracing::warn!(
"skipping malformed workspace profile {}: {e}",
profile_path.display()
);
}
},
Err(e) => {
tracing::warn!(
"skipping unreadable workspace profile {}: {e}",
profile_path.display()
);
}
}
}
Ok(())
}
/// Switch to the named workspace. Returns an error if it does not exist.
pub fn switch(&mut self, name: &str) -> Result<&WorkspaceProfile> {
if !self.profiles.contains_key(name) {
bail!("workspace '{}' not found", name);
}
self.active = Some(name.to_string());
Ok(&self.profiles[name])
}
/// Get the currently active workspace profile, if any.
pub fn active_profile(&self) -> Option<&WorkspaceProfile> {
self.active
.as_deref()
.and_then(|name| self.profiles.get(name))
}
/// Get the active workspace name.
pub fn active_name(&self) -> Option<&str> {
self.active.as_deref()
}
/// List all loaded workspace names.
pub fn list(&self) -> Vec<&str> {
let mut names: Vec<&str> = self.profiles.keys().map(String::as_str).collect();
names.sort_unstable();
names
}
/// Get a workspace profile by name.
pub fn get(&self, name: &str) -> Option<&WorkspaceProfile> {
self.profiles.get(name)
}
/// Create a new workspace on disk and register it.
pub async fn create(&mut self, name: &str) -> Result<&WorkspaceProfile> {
if name.is_empty() {
bail!("workspace name must not be empty");
}
// Validate name: alphanumeric, hyphens, underscores only
if !name
.chars()
.all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_')
{
bail!(
"workspace name must contain only alphanumeric characters, hyphens, or underscores"
);
}
if self.profiles.contains_key(name) {
bail!("workspace '{}' already exists", name);
}
let ws_dir = self.workspaces_dir.join(name);
tokio::fs::create_dir_all(&ws_dir)
.await
.with_context(|| format!("creating workspace directory: {}", ws_dir.display()))?;
let profile = WorkspaceProfile {
name: name.to_string(),
allowed_domains: Vec::new(),
credential_profile: None,
memory_namespace: Some(name.to_string()),
audit_namespace: Some(name.to_string()),
tool_restrictions: Vec::new(),
};
let toml_str = toml::to_string_pretty(&profile).context("serializing workspace profile")?;
let profile_path = ws_dir.join("profile.toml");
tokio::fs::write(&profile_path, toml_str)
.await
.with_context(|| format!("writing workspace profile: {}", profile_path.display()))?;
self.profiles.insert(name.to_string(), profile);
Ok(&self.profiles[name])
}
/// Export a workspace profile as a sanitized TOML string (no secrets).
pub fn export(&self, name: &str) -> Result<String> {
let profile = self
.profiles
.get(name)
.with_context(|| format!("workspace '{}' not found", name))?;
// Create an export-safe copy with credential_profile redacted
let export = WorkspaceProfile {
credential_profile: profile
.credential_profile
.as_ref()
.map(|_| "***".to_string()),
..profile.clone()
};
toml::to_string_pretty(&export).context("serializing workspace profile for export")
}
/// Directory for a specific workspace.
pub fn workspace_dir(&self, name: &str) -> PathBuf {
self.workspaces_dir.join(name)
}
/// Base workspaces directory.
pub fn workspaces_dir(&self) -> &Path {
&self.workspaces_dir
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
fn sample_profile(name: &str) -> WorkspaceProfile {
WorkspaceProfile {
name: name.to_string(),
allowed_domains: vec!["example.com".to_string()],
credential_profile: Some("test-creds".to_string()),
memory_namespace: Some(format!("{name}_mem")),
audit_namespace: Some(format!("{name}_audit")),
tool_restrictions: vec!["shell".to_string()],
}
}
#[test]
fn workspace_profile_tool_restriction_check() {
let profile = sample_profile("client_a");
assert!(profile.is_tool_restricted("shell"));
assert!(profile.is_tool_restricted("Shell"));
assert!(!profile.is_tool_restricted("file_read"));
}
#[test]
fn workspace_profile_domain_allowlist_empty_allows_all() {
let mut profile = sample_profile("client_a");
profile.allowed_domains.clear();
assert!(profile.is_domain_allowed("anything.com"));
}
#[test]
fn workspace_profile_domain_allowlist_enforced() {
let profile = sample_profile("client_a");
assert!(profile.is_domain_allowed("example.com"));
assert!(!profile.is_domain_allowed("other.com"));
}
#[test]
fn workspace_profile_effective_namespaces() {
let profile = sample_profile("client_a");
assert_eq!(profile.effective_memory_namespace(), "client_a_mem");
assert_eq!(profile.effective_audit_namespace(), "client_a_audit");
let fallback = WorkspaceProfile {
name: "test_ws".to_string(),
memory_namespace: None,
audit_namespace: None,
..sample_profile("test_ws")
};
assert_eq!(fallback.effective_memory_namespace(), "test_ws");
assert_eq!(fallback.effective_audit_namespace(), "test_ws");
}
#[tokio::test]
async fn workspace_manager_create_and_list() {
let tmp = TempDir::new().unwrap();
let mut mgr = WorkspaceManager::new(tmp.path().to_path_buf());
mgr.create("client_alpha").await.unwrap();
mgr.create("client_beta").await.unwrap();
let names = mgr.list();
assert_eq!(names, vec!["client_alpha", "client_beta"]);
}
#[tokio::test]
async fn workspace_manager_create_rejects_duplicate() {
let tmp = TempDir::new().unwrap();
let mut mgr = WorkspaceManager::new(tmp.path().to_path_buf());
mgr.create("client_a").await.unwrap();
let result = mgr.create("client_a").await;
assert!(result.is_err());
}
#[tokio::test]
async fn workspace_manager_create_rejects_invalid_name() {
let tmp = TempDir::new().unwrap();
let mut mgr = WorkspaceManager::new(tmp.path().to_path_buf());
assert!(mgr.create("").await.is_err());
assert!(mgr.create("bad name").await.is_err());
assert!(mgr.create("../escape").await.is_err());
}
#[tokio::test]
async fn workspace_manager_switch_and_active() {
let tmp = TempDir::new().unwrap();
let mut mgr = WorkspaceManager::new(tmp.path().to_path_buf());
mgr.create("ws_one").await.unwrap();
assert!(mgr.active_profile().is_none());
mgr.switch("ws_one").unwrap();
assert_eq!(mgr.active_name(), Some("ws_one"));
assert!(mgr.active_profile().is_some());
}
#[test]
fn workspace_manager_switch_nonexistent_fails() {
let mgr = WorkspaceManager::new(PathBuf::from("/tmp/nonexistent"));
let mut mgr = mgr;
assert!(mgr.switch("no_such_ws").is_err());
}
#[tokio::test]
async fn workspace_manager_load_profiles_from_disk() {
let tmp = TempDir::new().unwrap();
let mut mgr = WorkspaceManager::new(tmp.path().to_path_buf());
// Create a workspace via the manager
mgr.create("loaded_ws").await.unwrap();
// Create a fresh manager and load from disk
let mut mgr2 = WorkspaceManager::new(tmp.path().to_path_buf());
mgr2.load_profiles().await.unwrap();
assert_eq!(mgr2.list(), vec!["loaded_ws"]);
let profile = mgr2.get("loaded_ws").unwrap();
assert_eq!(profile.name, "loaded_ws");
}
#[tokio::test]
async fn workspace_manager_export_redacts_credentials() {
let tmp = TempDir::new().unwrap();
let mut mgr = WorkspaceManager::new(tmp.path().to_path_buf());
mgr.create("export_test").await.unwrap();
// Manually set a credential profile
if let Some(profile) = mgr.profiles.get_mut("export_test") {
profile.credential_profile = Some("secret-cred-id".to_string());
}
let exported = mgr.export("export_test").unwrap();
assert!(exported.contains("***"));
assert!(!exported.contains("secret-cred-id"));
}
}
+22 -24
View File
@@ -37,7 +37,7 @@ use anyhow::{bail, Context, Result};
use clap::{CommandFactory, Parser, Subcommand, ValueEnum};
use dialoguer::{Input, Password};
use serde::{Deserialize, Serialize};
use std::io::Write;
use std::io::{IsTerminal, Write};
use std::path::PathBuf;
use tracing::{info, warn};
use tracing_subscriber::{fmt, EnvFilter};
@@ -166,10 +166,6 @@ enum Commands {
#[arg(long)]
reinit: bool,
/// Run the full interactive setup wizard
#[arg(long)]
interactive: bool,
/// Reconfigure channels only (fast repair flow)
#[arg(long)]
channels_only: bool,
@@ -723,14 +719,14 @@ async fn main() -> Result<()> {
tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed");
// Onboard runs quick setup by default, or the interactive wizard with --interactive.
// The onboard wizard uses reqwest::blocking internally, which creates its own
// Tokio runtime. To avoid "Cannot drop a runtime in a context where blocking is
// not allowed", we run the wizard on a blocking thread via spawn_blocking.
// Onboard auto-detects the environment: if stdin/stdout are a TTY and no
// provider flags were given, it runs the full interactive wizard; otherwise
// it runs the quick (scriptable) setup. This means `curl … | bash` and
// `zeroclaw onboard --api-key …` both take the fast path, while a bare
// `zeroclaw onboard` in a terminal launches the wizard.
if let Commands::Onboard {
force,
reinit,
interactive,
channels_only,
api_key,
provider,
@@ -740,7 +736,6 @@ async fn main() -> Result<()> {
{
let force = *force;
let reinit = *reinit;
let interactive = *interactive;
let channels_only = *channels_only;
let api_key = api_key.clone();
let provider = provider.clone();
@@ -750,14 +745,6 @@ async fn main() -> Result<()> {
if reinit && channels_only {
bail!("--reinit and --channels-only cannot be used together");
}
if interactive && channels_only {
bail!("--interactive and --channels-only cannot be used together");
}
if interactive
&& (api_key.is_some() || provider.is_some() || model.is_some() || memory.is_some())
{
bail!("--interactive does not accept --api-key, --provider, --model, or --memory");
}
if channels_only
&& (api_key.is_some() || provider.is_some() || model.is_some() || memory.is_some())
{
@@ -808,9 +795,15 @@ async fn main() -> Result<()> {
}
}
// Auto-detect: run the interactive wizard when in a TTY with no
// provider flags, quick setup otherwise (scriptable path).
let has_provider_flags =
api_key.is_some() || provider.is_some() || model.is_some() || memory.is_some();
let is_tty = std::io::stdin().is_terminal() && std::io::stdout().is_terminal();
let config = if channels_only {
Box::pin(onboard::run_channels_repair_wizard()).await
} else if interactive {
} else if is_tty && !has_provider_flags {
Box::pin(onboard::run_wizard(force)).await
} else {
onboard::run_quick_setup(
@@ -2224,12 +2217,17 @@ mod tests {
}
#[test]
fn onboard_cli_accepts_interactive_flag() {
let cli = Cli::try_parse_from(["zeroclaw", "onboard", "--interactive"])
.expect("onboard --interactive should parse");
fn onboard_cli_rejects_removed_interactive_flag() {
// --interactive was removed; onboard auto-detects TTY instead.
assert!(Cli::try_parse_from(["zeroclaw", "onboard", "--interactive"]).is_err());
}
#[test]
fn onboard_cli_bare_parses() {
let cli = Cli::try_parse_from(["zeroclaw", "onboard"]).expect("bare onboard should parse");
match cli.command {
Commands::Onboard { interactive, .. } => assert!(interactive),
Commands::Onboard { .. } => {}
other => panic!("expected onboard command, got {other:?}"),
}
}
+2
View File
@@ -178,6 +178,7 @@ pub async fn run_wizard(force: bool) -> Result<Config> {
tts: crate::config::TtsConfig::default(),
mcp: crate::config::McpConfig::default(),
nodes: crate::config::NodesConfig::default(),
workspace: crate::config::WorkspaceConfig::default(),
};
println!(
@@ -536,6 +537,7 @@ async fn run_quick_setup_with_home(
tts: crate::config::TtsConfig::default(),
mcp: crate::config::McpConfig::default(),
nodes: crate::config::NodesConfig::default(),
workspace: crate::config::WorkspaceConfig::default(),
};
config.save().await?;
+3
View File
@@ -38,6 +38,7 @@ pub mod policy;
pub mod prompt_guard;
pub mod secrets;
pub mod traits;
pub mod workspace_boundary;
#[allow(unused_imports)]
pub use audit::{AuditEvent, AuditEventType, AuditLogger};
@@ -60,6 +61,8 @@ pub use traits::{NoopSandbox, Sandbox};
pub use leak_detector::{LeakDetector, LeakResult};
#[allow(unused_imports)]
pub use prompt_guard::{GuardAction, GuardResult, PromptGuard};
#[allow(unused_imports)]
pub use workspace_boundary::{BoundaryVerdict, WorkspaceBoundary};
/// Redact sensitive values for safe logging. Shows first 4 chars + "***" suffix.
/// This function intentionally breaks the data-flow taint chain for static analysis.
+211
View File
@@ -0,0 +1,211 @@
//! Workspace isolation boundary enforcement.
//!
//! Prevents cross-workspace data access and enforces per-workspace
//! domain allowlists and tool restrictions.
use crate::config::workspace::WorkspaceProfile;
use std::path::Path;
/// Outcome of a workspace boundary check.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum BoundaryVerdict {
/// Access is allowed.
Allow,
/// Access is denied with a reason.
Deny(String),
}
/// Enforces isolation boundaries for the active workspace.
#[derive(Debug, Clone)]
pub struct WorkspaceBoundary {
/// The active workspace profile (if workspace isolation is active).
profile: Option<WorkspaceProfile>,
/// Whether cross-workspace search is allowed.
cross_workspace_search: bool,
}
impl WorkspaceBoundary {
/// Create a boundary enforcer for the given active workspace.
pub fn new(profile: Option<WorkspaceProfile>, cross_workspace_search: bool) -> Self {
Self {
profile,
cross_workspace_search,
}
}
/// Create a boundary enforcer with no active workspace (no restrictions).
pub fn inactive() -> Self {
Self {
profile: None,
cross_workspace_search: false,
}
}
/// Check whether a tool is allowed in the current workspace.
pub fn check_tool_access(&self, tool_name: &str) -> BoundaryVerdict {
if let Some(profile) = &self.profile {
if profile.is_tool_restricted(tool_name) {
return BoundaryVerdict::Deny(format!(
"tool '{}' is restricted in workspace '{}'",
tool_name, profile.name
));
}
}
BoundaryVerdict::Allow
}
/// Check whether a domain is allowed in the current workspace.
pub fn check_domain_access(&self, domain: &str) -> BoundaryVerdict {
if let Some(profile) = &self.profile {
if !profile.is_domain_allowed(domain) {
return BoundaryVerdict::Deny(format!(
"domain '{}' is not in the allowlist for workspace '{}'",
domain, profile.name
));
}
}
BoundaryVerdict::Allow
}
/// Check whether accessing a path is allowed given workspace isolation.
///
/// When a workspace is active, paths outside the workspace directory
/// and paths belonging to other workspaces are denied.
pub fn check_path_access(&self, path: &Path, workspaces_base: &Path) -> BoundaryVerdict {
let profile = match &self.profile {
Some(p) => p,
None => return BoundaryVerdict::Allow,
};
// If the path is under the workspaces base, verify it belongs to the active workspace
if let Ok(relative) = path.strip_prefix(workspaces_base) {
let first_component = relative
.components()
.next()
.and_then(|c| c.as_os_str().to_str());
if let Some(ws_name) = first_component {
if ws_name != profile.name {
if self.cross_workspace_search {
// Cross-workspace search is allowed, but only for read-like access
return BoundaryVerdict::Allow;
}
return BoundaryVerdict::Deny(format!(
"access to workspace '{}' is denied from workspace '{}'",
ws_name, profile.name
));
}
}
}
BoundaryVerdict::Allow
}
/// Whether workspace isolation is active.
pub fn is_active(&self) -> bool {
self.profile.is_some()
}
/// Get the active workspace name, if any.
pub fn active_workspace_name(&self) -> Option<&str> {
self.profile.as_ref().map(|p| p.name.as_str())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
fn test_profile() -> WorkspaceProfile {
WorkspaceProfile {
name: "client_a".to_string(),
allowed_domains: vec!["api.example.com".to_string()],
credential_profile: None,
memory_namespace: Some("client_a".to_string()),
audit_namespace: Some("client_a".to_string()),
tool_restrictions: vec!["shell".to_string()],
}
}
#[test]
fn boundary_inactive_allows_everything() {
let boundary = WorkspaceBoundary::inactive();
assert_eq!(boundary.check_tool_access("shell"), BoundaryVerdict::Allow);
assert_eq!(
boundary.check_domain_access("any.domain"),
BoundaryVerdict::Allow
);
assert!(!boundary.is_active());
}
#[test]
fn boundary_denies_restricted_tool() {
let boundary = WorkspaceBoundary::new(Some(test_profile()), false);
assert!(matches!(
boundary.check_tool_access("shell"),
BoundaryVerdict::Deny(_)
));
assert_eq!(
boundary.check_tool_access("file_read"),
BoundaryVerdict::Allow
);
}
#[test]
fn boundary_denies_unlisted_domain() {
let boundary = WorkspaceBoundary::new(Some(test_profile()), false);
assert_eq!(
boundary.check_domain_access("api.example.com"),
BoundaryVerdict::Allow
);
assert!(matches!(
boundary.check_domain_access("evil.com"),
BoundaryVerdict::Deny(_)
));
}
#[test]
fn boundary_denies_cross_workspace_path_access() {
let boundary = WorkspaceBoundary::new(Some(test_profile()), false);
let base = PathBuf::from("/home/zeroclaw_user/.zeroclaw/workspaces");
// Access to own workspace is allowed
let own_path = base.join("client_a").join("data.db");
assert_eq!(
boundary.check_path_access(&own_path, &base),
BoundaryVerdict::Allow
);
// Access to other workspace is denied
let other_path = base.join("client_b").join("data.db");
assert!(matches!(
boundary.check_path_access(&other_path, &base),
BoundaryVerdict::Deny(_)
));
}
#[test]
fn boundary_allows_cross_workspace_when_enabled() {
let boundary = WorkspaceBoundary::new(Some(test_profile()), true);
let base = PathBuf::from("/home/zeroclaw_user/.zeroclaw/workspaces");
let other_path = base.join("client_b").join("data.db");
assert_eq!(
boundary.check_path_access(&other_path, &base),
BoundaryVerdict::Allow
);
}
#[test]
fn boundary_allows_paths_outside_workspaces_dir() {
let boundary = WorkspaceBoundary::new(Some(test_profile()), false);
let base = PathBuf::from("/home/zeroclaw_user/.zeroclaw/workspaces");
let outside_path = PathBuf::from("/tmp/something");
assert_eq!(
boundary.check_path_access(&outside_path, &base),
BoundaryVerdict::Allow
);
}
}
+91 -6
View File
@@ -442,8 +442,24 @@ fn install_linux_systemd(config: &Config) -> Result<()> {
let exe = std::env::current_exe().context("Failed to resolve current executable")?;
let unit = format!(
"[Unit]\nDescription=ZeroClaw daemon\nAfter=network.target\n\n[Service]\nType=simple\nExecStart={} daemon\nRestart=always\nRestartSec=3\n\n[Install]\nWantedBy=default.target\n",
exe.display()
"[Unit]\n\
Description=ZeroClaw daemon\n\
After=network.target\n\
\n\
[Service]\n\
Type=simple\n\
ExecStart={exe} daemon\n\
Restart=always\n\
RestartSec=3\n\
# Ensure HOME is set so headless browsers can create profile/cache dirs.\n\
Environment=HOME=%h\n\
# Allow inheriting DISPLAY and XDG_RUNTIME_DIR from the user session\n\
# so graphical/headless browsers can function correctly.\n\
PassEnvironment=DISPLAY XDG_RUNTIME_DIR\n\
\n\
[Install]\n\
WantedBy=default.target\n",
exe = exe.display()
);
fs::write(&file, unit)?;
@@ -826,8 +842,8 @@ fn generate_openrc_script(exe_path: &Path, config_dir: &Path) -> String {
name="zeroclaw"
description="ZeroClaw daemon"
command="{}"
command_args="--config-dir {} daemon"
command="{exe}"
command_args="--config-dir {config_dir} daemon"
command_background="yes"
command_user="zeroclaw:zeroclaw"
pidfile="/run/${{RC_SVCNAME}}.pid"
@@ -835,13 +851,21 @@ umask 027
output_log="/var/log/zeroclaw/access.log"
error_log="/var/log/zeroclaw/error.log"
# Provide HOME so headless browsers can create profile/cache directories.
# Without this, Chromium/Firefox fail with sandbox or profile errors.
export HOME="/var/lib/zeroclaw"
depend() {{
need net
after firewall
}}
start_pre() {{
checkpath --directory --owner zeroclaw:zeroclaw --mode 0750 /var/lib/zeroclaw
}}
"#,
exe_path.display(),
config_dir.display()
exe = exe_path.display(),
config_dir = config_dir.display(),
)
}
@@ -1196,6 +1220,67 @@ mod tests {
assert!(script.contains("after firewall"));
}
#[test]
fn generate_openrc_script_sets_home_for_browser() {
use std::path::PathBuf;
let exe_path = PathBuf::from("/usr/local/bin/zeroclaw");
let script = generate_openrc_script(&exe_path, Path::new("/etc/zeroclaw"));
assert!(
script.contains("export HOME=\"/var/lib/zeroclaw\""),
"OpenRC script must set HOME for headless browser support"
);
}
#[test]
fn generate_openrc_script_creates_home_directory() {
use std::path::PathBuf;
let exe_path = PathBuf::from("/usr/local/bin/zeroclaw");
let script = generate_openrc_script(&exe_path, Path::new("/etc/zeroclaw"));
assert!(
script.contains("start_pre()"),
"OpenRC script must have start_pre to create HOME dir"
);
assert!(
script.contains("checkpath --directory --owner zeroclaw:zeroclaw"),
"start_pre must ensure /var/lib/zeroclaw exists with correct ownership"
);
}
#[test]
fn systemd_unit_contains_home_and_pass_environment() {
let unit = "[Unit]\n\
Description=ZeroClaw daemon\n\
After=network.target\n\
\n\
[Service]\n\
Type=simple\n\
ExecStart=/usr/local/bin/zeroclaw daemon\n\
Restart=always\n\
RestartSec=3\n\
# Ensure HOME is set so headless browsers can create profile/cache dirs.\n\
Environment=HOME=%h\n\
# Allow inheriting DISPLAY and XDG_RUNTIME_DIR from the user session\n\
# so graphical/headless browsers can function correctly.\n\
PassEnvironment=DISPLAY XDG_RUNTIME_DIR\n\
\n\
[Install]\n\
WantedBy=default.target\n"
.to_string();
assert!(
unit.contains("Environment=HOME=%h"),
"systemd unit must set HOME for headless browser support"
);
assert!(
unit.contains("PassEnvironment=DISPLAY XDG_RUNTIME_DIR"),
"systemd unit must pass through display/runtime env vars"
);
}
#[test]
fn warn_if_binary_in_home_detects_home_path() {
use std::path::PathBuf;
+126
View File
@@ -440,6 +440,12 @@ impl BrowserTool {
async fn run_command(&self, args: &[&str]) -> anyhow::Result<AgentBrowserResponse> {
let mut cmd = Command::new("agent-browser");
// When running as a service (systemd/OpenRC), the process may lack
// HOME which browsers need for profile directories.
if is_service_environment() {
ensure_browser_env(&mut cmd);
}
// Add session if configured
if let Some(ref session) = self.session_name {
cmd.arg("--session").arg(session);
@@ -1461,6 +1467,14 @@ mod native_backend {
args.push(Value::String("--disable-gpu".to_string()));
}
// When running as a service (systemd/OpenRC), the browser sandbox
// fails because the process lacks a user namespace / session.
// --no-sandbox and --disable-dev-shm-usage are required in this context.
if is_service_environment() {
args.push(Value::String("--no-sandbox".to_string()));
args.push(Value::String("--disable-dev-shm-usage".to_string()));
}
if !args.is_empty() {
chrome_options.insert("args".to_string(), Value::Array(args));
}
@@ -2111,6 +2125,44 @@ fn is_non_global_v6(v6: std::net::Ipv6Addr) -> bool {
|| v6.to_ipv4_mapped().is_some_and(is_non_global_v4)
}
/// Detect whether the current process is running inside a service environment
/// (e.g. systemd, OpenRC, or launchd) where the browser sandbox and
/// environment setup may be restricted.
fn is_service_environment() -> bool {
if std::env::var_os("INVOCATION_ID").is_some() {
return true;
}
if std::env::var_os("JOURNAL_STREAM").is_some() {
return true;
}
#[cfg(target_os = "linux")]
if std::path::Path::new("/run/openrc").exists() && std::env::var_os("HOME").is_none() {
return true;
}
#[cfg(target_os = "linux")]
if std::env::var_os("HOME").is_none() {
return true;
}
false
}
/// Ensure environment variables required by headless browsers are present
/// when running inside a service context.
fn ensure_browser_env(cmd: &mut Command) {
if std::env::var_os("HOME").is_none() {
cmd.env("HOME", "/tmp");
}
let existing = std::env::var("CHROMIUM_FLAGS").unwrap_or_default();
if !existing.contains("--no-sandbox") {
let new_flags = if existing.is_empty() {
"--no-sandbox --disable-dev-shm-usage".to_string()
} else {
format!("{existing} --no-sandbox --disable-dev-shm-usage")
};
cmd.env("CHROMIUM_FLAGS", new_flags);
}
}
fn host_matches_allowlist(host: &str, allowed: &[String]) -> bool {
allowed.iter().any(|pattern| {
if pattern == "*" {
@@ -2492,4 +2544,78 @@ mod tests {
state.reset_session().await;
});
}
#[test]
fn ensure_browser_env_sets_home_when_missing() {
let original_home = std::env::var_os("HOME");
unsafe { std::env::remove_var("HOME") };
let mut cmd = Command::new("true");
ensure_browser_env(&mut cmd);
// Function completes without panic — HOME and CHROMIUM_FLAGS set on cmd.
if let Some(home) = original_home {
unsafe { std::env::set_var("HOME", home) };
}
}
#[test]
fn ensure_browser_env_sets_chromium_flags() {
let original = std::env::var_os("CHROMIUM_FLAGS");
unsafe { std::env::remove_var("CHROMIUM_FLAGS") };
let mut cmd = Command::new("true");
ensure_browser_env(&mut cmd);
if let Some(val) = original {
unsafe { std::env::set_var("CHROMIUM_FLAGS", val) };
}
}
#[test]
fn is_service_environment_detects_invocation_id() {
let original = std::env::var_os("INVOCATION_ID");
unsafe { std::env::set_var("INVOCATION_ID", "test-unit-id") };
assert!(is_service_environment());
if let Some(val) = original {
unsafe { std::env::set_var("INVOCATION_ID", val) };
} else {
unsafe { std::env::remove_var("INVOCATION_ID") };
}
}
#[test]
fn is_service_environment_detects_journal_stream() {
let original = std::env::var_os("JOURNAL_STREAM");
unsafe { std::env::set_var("JOURNAL_STREAM", "8:12345") };
assert!(is_service_environment());
if let Some(val) = original {
unsafe { std::env::set_var("JOURNAL_STREAM", val) };
} else {
unsafe { std::env::remove_var("JOURNAL_STREAM") };
}
}
#[test]
fn is_service_environment_false_in_normal_context() {
let inv = std::env::var_os("INVOCATION_ID");
let journal = std::env::var_os("JOURNAL_STREAM");
unsafe { std::env::remove_var("INVOCATION_ID") };
unsafe { std::env::remove_var("JOURNAL_STREAM") };
if std::env::var_os("HOME").is_some() {
assert!(!is_service_environment());
}
if let Some(val) = inv {
unsafe { std::env::set_var("INVOCATION_ID", val) };
}
if let Some(val) = journal {
unsafe { std::env::set_var("JOURNAL_STREAM", val) };
}
}
}
+19
View File
@@ -62,6 +62,7 @@ pub mod tool_search;
pub mod traits;
pub mod web_fetch;
pub mod web_search_tool;
pub mod workspace_tool;
pub use browser::{BrowserTool, ComputerUseConfig};
pub use browser_open::BrowserOpenTool;
@@ -111,6 +112,7 @@ pub use traits::Tool;
pub use traits::{ToolResult, ToolSpec};
pub use web_fetch::WebFetchTool;
pub use web_search_tool::WebSearchTool;
pub use workspace_tool::WorkspaceTool;
use crate::config::{Config, DelegateAgentConfig};
use crate::memory::Memory;
@@ -413,6 +415,23 @@ pub fn all_tools_with_runtime(
)));
}
// Workspace management tool (conditionally registered when workspace isolation is enabled)
if root_config.workspace.enabled {
let workspaces_dir = if root_config.workspace.workspaces_dir.starts_with("~/") {
let home = directories::UserDirs::new()
.map(|u| u.home_dir().to_path_buf())
.unwrap_or_else(|| std::path::PathBuf::from("."));
home.join(&root_config.workspace.workspaces_dir[2..])
} else {
std::path::PathBuf::from(&root_config.workspace.workspaces_dir)
};
let ws_manager = crate::config::workspace::WorkspaceManager::new(workspaces_dir);
tool_arcs.push(Arc::new(WorkspaceTool::new(
Arc::new(tokio::sync::RwLock::new(ws_manager)),
security.clone(),
)));
}
(boxed_registry_from_arcs(tool_arcs), delegate_handle)
}
+356
View File
@@ -0,0 +1,356 @@
//! Tool for managing multi-client workspaces.
//!
//! Provides `workspace` subcommands: list, switch, create, info, export.
use super::traits::{Tool, ToolResult};
use crate::config::workspace::WorkspaceManager;
use crate::security::policy::ToolOperation;
use crate::security::SecurityPolicy;
use async_trait::async_trait;
use serde_json::json;
use std::fmt::Write;
use std::sync::Arc;
use tokio::sync::RwLock;
/// Agent-callable tool for workspace management operations.
pub struct WorkspaceTool {
manager: Arc<RwLock<WorkspaceManager>>,
security: Arc<SecurityPolicy>,
}
impl WorkspaceTool {
pub fn new(manager: Arc<RwLock<WorkspaceManager>>, security: Arc<SecurityPolicy>) -> Self {
Self { manager, security }
}
}
#[async_trait]
impl Tool for WorkspaceTool {
fn name(&self) -> &str {
"workspace"
}
fn description(&self) -> &str {
"Manage multi-client workspaces. Subcommands: list, switch, create, info, export. Each workspace provides isolated memory, audit, secrets, and tool restrictions."
}
fn parameters_schema(&self) -> serde_json::Value {
json!({
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": ["list", "switch", "create", "info", "export"],
"description": "Workspace action to perform"
},
"name": {
"type": "string",
"description": "Workspace name (required for switch, create, export)"
}
},
"required": ["action"]
})
}
async fn execute(&self, args: serde_json::Value) -> anyhow::Result<ToolResult> {
let action = args
.get("action")
.and_then(|v| v.as_str())
.ok_or_else(|| anyhow::anyhow!("Missing 'action' parameter"))?;
let name = args.get("name").and_then(|v| v.as_str());
match action {
"list" => {
let mgr = self.manager.read().await;
let names = mgr.list();
let active = mgr.active_name();
if names.is_empty() {
return Ok(ToolResult {
success: true,
output: "No workspaces configured.".to_string(),
error: None,
});
}
let mut output = format!("Workspaces ({}):\n", names.len());
for ws_name in &names {
let marker = if Some(*ws_name) == active {
" (active)"
} else {
""
};
let _ = writeln!(output, " - {ws_name}{marker}");
}
Ok(ToolResult {
success: true,
output,
error: None,
})
}
"switch" => {
if let Err(error) = self
.security
.enforce_tool_operation(ToolOperation::Act, "workspace")
{
return Ok(ToolResult {
success: false,
output: String::new(),
error: Some(error),
});
}
let ws_name = name.ok_or_else(|| {
anyhow::anyhow!("'name' parameter is required for switch action")
})?;
let mut mgr = self.manager.write().await;
match mgr.switch(ws_name) {
Ok(profile) => Ok(ToolResult {
success: true,
output: format!(
"Switched to workspace '{}'. Memory namespace: {}, Audit namespace: {}",
profile.name,
profile.effective_memory_namespace(),
profile.effective_audit_namespace()
),
error: None,
}),
Err(e) => Ok(ToolResult {
success: false,
output: String::new(),
error: Some(e.to_string()),
}),
}
}
"create" => {
if let Err(error) = self
.security
.enforce_tool_operation(ToolOperation::Act, "workspace")
{
return Ok(ToolResult {
success: false,
output: String::new(),
error: Some(error),
});
}
let ws_name = name.ok_or_else(|| {
anyhow::anyhow!("'name' parameter is required for create action")
})?;
let mut mgr = self.manager.write().await;
match mgr.create(ws_name).await {
Ok(profile) => {
let name = profile.name.clone();
let dir = mgr.workspace_dir(ws_name);
Ok(ToolResult {
success: true,
output: format!("Created workspace '{}' at {}", name, dir.display()),
error: None,
})
}
Err(e) => Ok(ToolResult {
success: false,
output: String::new(),
error: Some(e.to_string()),
}),
}
}
"info" => {
let mgr = self.manager.read().await;
let target_name = name.or_else(|| mgr.active_name());
match target_name {
Some(ws_name) => match mgr.get(ws_name) {
Some(profile) => {
let is_active = mgr.active_name() == Some(ws_name);
let mut output = format!("Workspace: {}\n", profile.name);
let _ = writeln!(
output,
" Status: {}",
if is_active { "active" } else { "inactive" }
);
let _ = writeln!(
output,
" Memory namespace: {}",
profile.effective_memory_namespace()
);
let _ = writeln!(
output,
" Audit namespace: {}",
profile.effective_audit_namespace()
);
if !profile.allowed_domains.is_empty() {
let _ = writeln!(
output,
" Allowed domains: {}",
profile.allowed_domains.join(", ")
);
}
if !profile.tool_restrictions.is_empty() {
let _ = writeln!(
output,
" Restricted tools: {}",
profile.tool_restrictions.join(", ")
);
}
Ok(ToolResult {
success: true,
output,
error: None,
})
}
None => Ok(ToolResult {
success: false,
output: String::new(),
error: Some(format!("workspace '{}' not found", ws_name)),
}),
},
None => Ok(ToolResult {
success: true,
output: "No workspace is currently active. Use 'workspace switch <name>' to activate one.".to_string(),
error: None,
}),
}
}
"export" => {
let mgr = self.manager.read().await;
let ws_name = name.or_else(|| mgr.active_name()).ok_or_else(|| {
anyhow::anyhow!("'name' parameter is required when no workspace is active")
})?;
match mgr.export(ws_name) {
Ok(toml_str) => Ok(ToolResult {
success: true,
output: format!(
"Exported workspace '{}' config (secrets redacted):\n\n{}",
ws_name, toml_str
),
error: None,
}),
Err(e) => Ok(ToolResult {
success: false,
output: String::new(),
error: Some(e.to_string()),
}),
}
}
other => Ok(ToolResult {
success: false,
output: String::new(),
error: Some(format!(
"unknown workspace action '{}'. Expected: list, switch, create, info, export",
other
)),
}),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::security::SecurityPolicy;
use tempfile::TempDir;
fn test_tool(tmp: &TempDir) -> WorkspaceTool {
let mgr = WorkspaceManager::new(tmp.path().to_path_buf());
WorkspaceTool::new(
Arc::new(RwLock::new(mgr)),
Arc::new(SecurityPolicy::default()),
)
}
#[tokio::test]
async fn workspace_tool_list_empty() {
let tmp = TempDir::new().unwrap();
let tool = test_tool(&tmp);
let result = tool.execute(json!({"action": "list"})).await.unwrap();
assert!(result.success);
assert!(result.output.contains("No workspaces"));
}
#[tokio::test]
async fn workspace_tool_create_and_list() {
let tmp = TempDir::new().unwrap();
let tool = test_tool(&tmp);
let result = tool
.execute(json!({"action": "create", "name": "test_client"}))
.await
.unwrap();
assert!(result.success);
assert!(result.output.contains("test_client"));
let result = tool.execute(json!({"action": "list"})).await.unwrap();
assert!(result.success);
assert!(result.output.contains("test_client"));
}
#[tokio::test]
async fn workspace_tool_switch_and_info() {
let tmp = TempDir::new().unwrap();
let tool = test_tool(&tmp);
tool.execute(json!({"action": "create", "name": "ws_test"}))
.await
.unwrap();
let result = tool
.execute(json!({"action": "switch", "name": "ws_test"}))
.await
.unwrap();
assert!(result.success);
assert!(result.output.contains("Switched to workspace"));
let result = tool.execute(json!({"action": "info"})).await.unwrap();
assert!(result.success);
assert!(result.output.contains("ws_test"));
assert!(result.output.contains("active"));
}
#[tokio::test]
async fn workspace_tool_export_redacts() {
let tmp = TempDir::new().unwrap();
let tool = test_tool(&tmp);
tool.execute(json!({"action": "create", "name": "export_ws"}))
.await
.unwrap();
let result = tool
.execute(json!({"action": "export", "name": "export_ws"}))
.await
.unwrap();
assert!(result.success);
assert!(result.output.contains("export_ws"));
}
#[tokio::test]
async fn workspace_tool_unknown_action() {
let tmp = TempDir::new().unwrap();
let tool = test_tool(&tmp);
let result = tool.execute(json!({"action": "destroy"})).await.unwrap();
assert!(!result.success);
assert!(result.error.unwrap().contains("unknown workspace action"));
}
#[tokio::test]
async fn workspace_tool_switch_nonexistent() {
let tmp = TempDir::new().unwrap();
let tool = test_tool(&tmp);
let result = tool
.execute(json!({"action": "switch", "name": "ghost"}))
.await
.unwrap();
assert!(!result.success);
assert!(result.error.unwrap().contains("not found"));
}
}