Merge branch 'master' into work/openvpn-tunnel

This commit is contained in:
Argenis
2026-03-15 23:10:15 -04:00
committed by GitHub
15 changed files with 1205 additions and 4 deletions
+78 -1
View File
@@ -37,6 +37,7 @@ pub struct Agent {
classification_config: crate::config::QueryClassificationConfig,
available_hints: Vec<String>,
route_model_by_hint: HashMap<String, String>,
allowed_tools: Option<Vec<String>>,
}
pub struct AgentBuilder {
@@ -58,6 +59,7 @@ pub struct AgentBuilder {
classification_config: Option<crate::config::QueryClassificationConfig>,
available_hints: Option<Vec<String>>,
route_model_by_hint: Option<HashMap<String, String>>,
allowed_tools: Option<Vec<String>>,
}
impl AgentBuilder {
@@ -81,6 +83,7 @@ impl AgentBuilder {
classification_config: None,
available_hints: None,
route_model_by_hint: None,
allowed_tools: None,
}
}
@@ -180,10 +183,19 @@ impl AgentBuilder {
self
}
pub fn allowed_tools(mut self, allowed_tools: Option<Vec<String>>) -> Self {
self.allowed_tools = allowed_tools;
self
}
pub fn build(self) -> Result<Agent> {
let tools = self
let mut tools = self
.tools
.ok_or_else(|| anyhow::anyhow!("tools are required"))?;
let allowed = self.allowed_tools.clone();
if let Some(ref allow_list) = allowed {
tools.retain(|t| allow_list.iter().any(|name| name == t.name()));
}
let tool_specs = tools.iter().map(|tool| tool.spec()).collect();
Ok(Agent {
@@ -223,6 +235,7 @@ impl AgentBuilder {
classification_config: self.classification_config.unwrap_or_default(),
available_hints: self.available_hints.unwrap_or_default(),
route_model_by_hint: self.route_model_by_hint.unwrap_or_default(),
allowed_tools: allowed,
})
}
}
@@ -892,4 +905,68 @@ mod tests {
let seen = seen_models.lock();
assert_eq!(seen.as_slice(), &["hint:fast".to_string()]);
}
#[test]
fn builder_allowed_tools_none_keeps_all_tools() {
let provider = Box::new(MockProvider {
responses: Mutex::new(vec![]),
});
let memory_cfg = crate::config::MemoryConfig {
backend: "none".into(),
..crate::config::MemoryConfig::default()
};
let mem: Arc<dyn Memory> = Arc::from(
crate::memory::create_memory(&memory_cfg, std::path::Path::new("/tmp"), None)
.expect("memory creation should succeed with valid config"),
);
let observer: Arc<dyn Observer> = Arc::from(crate::observability::NoopObserver {});
let agent = Agent::builder()
.provider(provider)
.tools(vec![Box::new(MockTool)])
.memory(mem)
.observer(observer)
.tool_dispatcher(Box::new(NativeToolDispatcher))
.workspace_dir(std::path::PathBuf::from("/tmp"))
.allowed_tools(None)
.build()
.expect("agent builder should succeed with valid config");
assert_eq!(agent.tool_specs.len(), 1);
assert_eq!(agent.tool_specs[0].name, "echo");
}
#[test]
fn builder_allowed_tools_some_filters_tools() {
let provider = Box::new(MockProvider {
responses: Mutex::new(vec![]),
});
let memory_cfg = crate::config::MemoryConfig {
backend: "none".into(),
..crate::config::MemoryConfig::default()
};
let mem: Arc<dyn Memory> = Arc::from(
crate::memory::create_memory(&memory_cfg, std::path::Path::new("/tmp"), None)
.expect("memory creation should succeed with valid config"),
);
let observer: Arc<dyn Observer> = Arc::from(crate::observability::NoopObserver {});
let agent = Agent::builder()
.provider(provider)
.tools(vec![Box::new(MockTool)])
.memory(mem)
.observer(observer)
.tool_dispatcher(Box::new(NativeToolDispatcher))
.workspace_dir(std::path::PathBuf::from("/tmp"))
.allowed_tools(Some(vec!["nonexistent".to_string()]))
.build()
.expect("agent builder should succeed with valid config");
assert!(
agent.tool_specs.is_empty(),
"No tools should match a non-existent allowlist entry"
);
}
}
+85 -2
View File
@@ -93,6 +93,24 @@ pub(crate) fn filter_tool_specs_for_turn(
.collect()
}
/// Filters a tool spec list by an optional capability allowlist.
///
/// When `allowed` is `None`, all specs pass through unchanged.
/// When `allowed` is `Some(list)`, only specs whose name appears in the list
/// are retained. Unknown names in the allowlist are silently ignored.
pub(crate) fn filter_by_allowed_tools(
specs: Vec<crate::tools::ToolSpec>,
allowed: Option<&[String]>,
) -> Vec<crate::tools::ToolSpec> {
match allowed {
None => specs,
Some(list) => specs
.into_iter()
.filter(|spec| list.iter().any(|name| name == &spec.name))
.collect(),
}
}
/// Computes the list of MCP tool names that should be excluded for a given turn
/// based on `tool_filter_groups` and the user message.
///
@@ -2994,6 +3012,7 @@ pub async fn run(
peripheral_overrides: Vec<String>,
interactive: bool,
session_state_file: Option<PathBuf>,
allowed_tools: Option<Vec<String>>,
) -> Result<String> {
// ── Wire up agnostic subsystems ──────────────────────────────
let base_observer = observability::create_observer(&config.observability);
@@ -3055,6 +3074,19 @@ pub async fn run(
tools_registry.extend(peripheral_tools);
}
// ── Capability-based tool access control ─────────────────────
// When `allowed_tools` is `Some(list)`, restrict the tool registry to only
// those tools whose name appears in the list. Unknown names are silently
// ignored. When `None`, all tools remain available (backward compatible).
if let Some(ref allow_list) = allowed_tools {
tools_registry.retain(|t| allow_list.iter().any(|name| name == t.name()));
tracing::info!(
allowed = allow_list.len(),
retained = tools_registry.len(),
"Applied capability-based tool access filter"
);
}
// ── Wire MCP tools (non-fatal) — CLI path ────────────────────
// NOTE: MCP tools are injected after built-in tool filtering
// (filter_primary_agent_tools_or_fail / agent.allowed_tools / agent.denied_tools).
@@ -3874,7 +3906,7 @@ mod tests {
use std::time::Duration;
#[test]
fn test_scrub_credentials() {
fn scrub_credentials_redacts_bearer_token() {
let input = "API_KEY=sk-1234567890abcdef; token: 1234567890; password=\"secret123456\"";
let scrubbed = scrub_credentials(input);
assert!(scrubbed.contains("API_KEY=sk-1*[REDACTED]"));
@@ -3885,7 +3917,7 @@ mod tests {
}
#[test]
fn test_scrub_credentials_json() {
fn scrub_credentials_redacts_json_api_key() {
let input = r#"{"api_key": "sk-1234567890", "other": "public"}"#;
let scrubbed = scrub_credentials(input);
assert!(scrubbed.contains("\"api_key\": \"sk-1*[REDACTED]\""));
@@ -6641,4 +6673,55 @@ Let me check the result."#;
assert_eq!(result, "I could not execute that command.");
}
// ── filter_by_allowed_tools tests ─────────────────────────────────────
#[test]
fn filter_by_allowed_tools_none_passes_all() {
let specs = vec![
make_spec("shell"),
make_spec("memory_store"),
make_spec("file_read"),
];
let result = filter_by_allowed_tools(specs, None);
assert_eq!(result.len(), 3);
}
#[test]
fn filter_by_allowed_tools_some_restricts_to_listed() {
let specs = vec![
make_spec("shell"),
make_spec("memory_store"),
make_spec("file_read"),
];
let allowed = vec!["shell".to_string(), "memory_store".to_string()];
let result = filter_by_allowed_tools(specs, Some(&allowed));
let names: Vec<&str> = result.iter().map(|s| s.name.as_str()).collect();
assert_eq!(names.len(), 2);
assert!(names.contains(&"shell"));
assert!(names.contains(&"memory_store"));
assert!(!names.contains(&"file_read"));
}
#[test]
fn filter_by_allowed_tools_unknown_names_silently_ignored() {
let specs = vec![make_spec("shell"), make_spec("file_read")];
let allowed = vec![
"shell".to_string(),
"nonexistent_tool".to_string(),
"another_missing".to_string(),
];
let result = filter_by_allowed_tools(specs, Some(&allowed));
let names: Vec<&str> = result.iter().map(|s| s.name.as_str()).collect();
assert_eq!(names.len(), 1);
assert!(names.contains(&"shell"));
}
#[test]
fn filter_by_allowed_tools_empty_list_excludes_all() {
let specs = vec![make_spec("shell"), make_spec("file_read")];
let allowed: Vec<String> = vec![];
let result = filter_by_allowed_tools(specs, Some(&allowed));
assert!(result.is_empty());
}
}
+2 -1
View File
@@ -1,5 +1,6 @@
pub mod schema;
pub mod traits;
pub mod workspace;
#[allow(unused_imports)]
pub use schema::{
@@ -19,7 +20,7 @@ pub use schema::{
SkillsConfig, SkillsPromptInjectionMode, SlackConfig, StorageConfig, StorageProviderConfig,
StorageProviderSection, StreamMode, SwarmConfig, SwarmStrategy, TelegramConfig,
ToolFilterGroup, ToolFilterGroupMode, TranscriptionConfig, TtsConfig, TunnelConfig,
WebFetchConfig, WebSearchConfig, WebhookConfig,
WebFetchConfig, WebSearchConfig, WebhookConfig, WorkspaceConfig,
};
pub fn name_and_presence<T: traits::ChannelConfig>(channel: Option<&T>) -> (&'static str, bool) {
+55
View File
@@ -259,6 +259,58 @@ pub struct Config {
/// Dynamic node discovery configuration (`[nodes]`).
#[serde(default)]
pub nodes: NodesConfig,
/// Multi-client workspace isolation configuration (`[workspace]`).
#[serde(default)]
pub workspace: WorkspaceConfig,
}
/// Multi-client workspace isolation configuration.
///
/// When enabled, each client engagement gets an isolated workspace with
/// separate memory, audit, secrets, and tool restrictions.
#[allow(clippy::struct_excessive_bools)]
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct WorkspaceConfig {
/// Enable workspace isolation. Default: false.
#[serde(default)]
pub enabled: bool,
/// Currently active workspace name.
#[serde(default)]
pub active_workspace: Option<String>,
/// Base directory for workspace profiles.
#[serde(default = "default_workspaces_dir")]
pub workspaces_dir: String,
/// Isolate memory databases per workspace. Default: true.
#[serde(default = "default_true")]
pub isolate_memory: bool,
/// Isolate secrets namespaces per workspace. Default: true.
#[serde(default = "default_true")]
pub isolate_secrets: bool,
/// Isolate audit logs per workspace. Default: true.
#[serde(default = "default_true")]
pub isolate_audit: bool,
/// Allow searching across workspaces. Default: false (security).
#[serde(default)]
pub cross_workspace_search: bool,
}
fn default_workspaces_dir() -> String {
"~/.zeroclaw/workspaces".to_string()
}
impl Default for WorkspaceConfig {
fn default() -> Self {
Self {
enabled: false,
active_workspace: None,
workspaces_dir: default_workspaces_dir(),
isolate_memory: true,
isolate_secrets: true,
isolate_audit: true,
cross_workspace_search: false,
}
}
}
/// Named provider profile definition compatible with Codex app-server style config.
@@ -4287,6 +4339,7 @@ impl Default for Config {
tts: TtsConfig::default(),
mcp: McpConfig::default(),
nodes: NodesConfig::default(),
workspace: WorkspaceConfig::default(),
}
}
}
@@ -6408,6 +6461,7 @@ default_temperature = 0.7
tts: TtsConfig::default(),
mcp: McpConfig::default(),
nodes: NodesConfig::default(),
workspace: WorkspaceConfig::default(),
};
let toml_str = toml::to_string_pretty(&config).unwrap();
@@ -6700,6 +6754,7 @@ tool_dispatcher = "xml"
tts: TtsConfig::default(),
mcp: McpConfig::default(),
nodes: NodesConfig::default(),
workspace: WorkspaceConfig::default(),
};
config.save().await.unwrap();
+382
View File
@@ -0,0 +1,382 @@
//! Workspace profile management for multi-client isolation.
//!
//! Each workspace represents an isolated client engagement with its own
//! memory namespace, audit trail, secrets scope, and tool restrictions.
//! Profiles are stored under `~/.zeroclaw/workspaces/<client_name>/`.
use anyhow::{bail, Context, Result};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
/// A single client workspace profile.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WorkspaceProfile {
/// Human-readable workspace name (also used as directory name).
pub name: String,
/// Allowed domains for network access within this workspace.
#[serde(default)]
pub allowed_domains: Vec<String>,
/// Credential profile name scoped to this workspace.
#[serde(default)]
pub credential_profile: Option<String>,
/// Memory namespace prefix for isolation.
#[serde(default)]
pub memory_namespace: Option<String>,
/// Audit namespace prefix for isolation.
#[serde(default)]
pub audit_namespace: Option<String>,
/// Tool names denied in this workspace (e.g. `["shell"]` to block shell access).
#[serde(default)]
pub tool_restrictions: Vec<String>,
}
impl WorkspaceProfile {
/// Effective memory namespace (falls back to workspace name).
pub fn effective_memory_namespace(&self) -> &str {
self.memory_namespace
.as_deref()
.unwrap_or(self.name.as_str())
}
/// Effective audit namespace (falls back to workspace name).
pub fn effective_audit_namespace(&self) -> &str {
self.audit_namespace
.as_deref()
.unwrap_or(self.name.as_str())
}
/// Returns true if the given tool name is restricted in this workspace.
pub fn is_tool_restricted(&self, tool_name: &str) -> bool {
self.tool_restrictions
.iter()
.any(|r| r.eq_ignore_ascii_case(tool_name))
}
/// Returns true if the given domain is allowed for this workspace.
/// An empty allowlist means all domains are allowed.
pub fn is_domain_allowed(&self, domain: &str) -> bool {
if self.allowed_domains.is_empty() {
return true;
}
let domain_lower = domain.to_ascii_lowercase();
self.allowed_domains
.iter()
.any(|d| domain_lower == d.to_ascii_lowercase())
}
}
/// Manages loading and switching between client workspace profiles.
#[derive(Debug, Clone)]
pub struct WorkspaceManager {
/// Base directory containing all workspace subdirectories.
workspaces_dir: PathBuf,
/// Loaded workspace profiles keyed by name.
profiles: HashMap<String, WorkspaceProfile>,
/// Currently active workspace name.
active: Option<String>,
}
impl WorkspaceManager {
/// Create a new workspace manager rooted at the given directory.
pub fn new(workspaces_dir: PathBuf) -> Self {
Self {
workspaces_dir,
profiles: HashMap::new(),
active: None,
}
}
/// Load all workspace profiles from disk.
///
/// Each subdirectory of `workspaces_dir` that contains a `profile.toml`
/// is treated as a workspace.
pub async fn load_profiles(&mut self) -> Result<()> {
self.profiles.clear();
let dir = &self.workspaces_dir;
if !dir.exists() {
return Ok(());
}
let mut entries = tokio::fs::read_dir(dir)
.await
.with_context(|| format!("reading workspaces directory: {}", dir.display()))?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if !path.is_dir() {
continue;
}
let profile_path = path.join("profile.toml");
if !profile_path.exists() {
continue;
}
match tokio::fs::read_to_string(&profile_path).await {
Ok(contents) => match toml::from_str::<WorkspaceProfile>(&contents) {
Ok(profile) => {
self.profiles.insert(profile.name.clone(), profile);
}
Err(e) => {
tracing::warn!(
"skipping malformed workspace profile {}: {e}",
profile_path.display()
);
}
},
Err(e) => {
tracing::warn!(
"skipping unreadable workspace profile {}: {e}",
profile_path.display()
);
}
}
}
Ok(())
}
/// Switch to the named workspace. Returns an error if it does not exist.
pub fn switch(&mut self, name: &str) -> Result<&WorkspaceProfile> {
if !self.profiles.contains_key(name) {
bail!("workspace '{}' not found", name);
}
self.active = Some(name.to_string());
Ok(&self.profiles[name])
}
/// Get the currently active workspace profile, if any.
pub fn active_profile(&self) -> Option<&WorkspaceProfile> {
self.active
.as_deref()
.and_then(|name| self.profiles.get(name))
}
/// Get the active workspace name.
pub fn active_name(&self) -> Option<&str> {
self.active.as_deref()
}
/// List all loaded workspace names.
pub fn list(&self) -> Vec<&str> {
let mut names: Vec<&str> = self.profiles.keys().map(String::as_str).collect();
names.sort_unstable();
names
}
/// Get a workspace profile by name.
pub fn get(&self, name: &str) -> Option<&WorkspaceProfile> {
self.profiles.get(name)
}
/// Create a new workspace on disk and register it.
pub async fn create(&mut self, name: &str) -> Result<&WorkspaceProfile> {
if name.is_empty() {
bail!("workspace name must not be empty");
}
// Validate name: alphanumeric, hyphens, underscores only
if !name
.chars()
.all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_')
{
bail!(
"workspace name must contain only alphanumeric characters, hyphens, or underscores"
);
}
if self.profiles.contains_key(name) {
bail!("workspace '{}' already exists", name);
}
let ws_dir = self.workspaces_dir.join(name);
tokio::fs::create_dir_all(&ws_dir)
.await
.with_context(|| format!("creating workspace directory: {}", ws_dir.display()))?;
let profile = WorkspaceProfile {
name: name.to_string(),
allowed_domains: Vec::new(),
credential_profile: None,
memory_namespace: Some(name.to_string()),
audit_namespace: Some(name.to_string()),
tool_restrictions: Vec::new(),
};
let toml_str = toml::to_string_pretty(&profile).context("serializing workspace profile")?;
let profile_path = ws_dir.join("profile.toml");
tokio::fs::write(&profile_path, toml_str)
.await
.with_context(|| format!("writing workspace profile: {}", profile_path.display()))?;
self.profiles.insert(name.to_string(), profile);
Ok(&self.profiles[name])
}
/// Export a workspace profile as a sanitized TOML string (no secrets).
pub fn export(&self, name: &str) -> Result<String> {
let profile = self
.profiles
.get(name)
.with_context(|| format!("workspace '{}' not found", name))?;
// Create an export-safe copy with credential_profile redacted
let export = WorkspaceProfile {
credential_profile: profile
.credential_profile
.as_ref()
.map(|_| "***".to_string()),
..profile.clone()
};
toml::to_string_pretty(&export).context("serializing workspace profile for export")
}
/// Directory for a specific workspace.
pub fn workspace_dir(&self, name: &str) -> PathBuf {
self.workspaces_dir.join(name)
}
/// Base workspaces directory.
pub fn workspaces_dir(&self) -> &Path {
&self.workspaces_dir
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
fn sample_profile(name: &str) -> WorkspaceProfile {
WorkspaceProfile {
name: name.to_string(),
allowed_domains: vec!["example.com".to_string()],
credential_profile: Some("test-creds".to_string()),
memory_namespace: Some(format!("{name}_mem")),
audit_namespace: Some(format!("{name}_audit")),
tool_restrictions: vec!["shell".to_string()],
}
}
#[test]
fn workspace_profile_tool_restriction_check() {
let profile = sample_profile("client_a");
assert!(profile.is_tool_restricted("shell"));
assert!(profile.is_tool_restricted("Shell"));
assert!(!profile.is_tool_restricted("file_read"));
}
#[test]
fn workspace_profile_domain_allowlist_empty_allows_all() {
let mut profile = sample_profile("client_a");
profile.allowed_domains.clear();
assert!(profile.is_domain_allowed("anything.com"));
}
#[test]
fn workspace_profile_domain_allowlist_enforced() {
let profile = sample_profile("client_a");
assert!(profile.is_domain_allowed("example.com"));
assert!(!profile.is_domain_allowed("other.com"));
}
#[test]
fn workspace_profile_effective_namespaces() {
let profile = sample_profile("client_a");
assert_eq!(profile.effective_memory_namespace(), "client_a_mem");
assert_eq!(profile.effective_audit_namespace(), "client_a_audit");
let fallback = WorkspaceProfile {
name: "test_ws".to_string(),
memory_namespace: None,
audit_namespace: None,
..sample_profile("test_ws")
};
assert_eq!(fallback.effective_memory_namespace(), "test_ws");
assert_eq!(fallback.effective_audit_namespace(), "test_ws");
}
#[tokio::test]
async fn workspace_manager_create_and_list() {
let tmp = TempDir::new().unwrap();
let mut mgr = WorkspaceManager::new(tmp.path().to_path_buf());
mgr.create("client_alpha").await.unwrap();
mgr.create("client_beta").await.unwrap();
let names = mgr.list();
assert_eq!(names, vec!["client_alpha", "client_beta"]);
}
#[tokio::test]
async fn workspace_manager_create_rejects_duplicate() {
let tmp = TempDir::new().unwrap();
let mut mgr = WorkspaceManager::new(tmp.path().to_path_buf());
mgr.create("client_a").await.unwrap();
let result = mgr.create("client_a").await;
assert!(result.is_err());
}
#[tokio::test]
async fn workspace_manager_create_rejects_invalid_name() {
let tmp = TempDir::new().unwrap();
let mut mgr = WorkspaceManager::new(tmp.path().to_path_buf());
assert!(mgr.create("").await.is_err());
assert!(mgr.create("bad name").await.is_err());
assert!(mgr.create("../escape").await.is_err());
}
#[tokio::test]
async fn workspace_manager_switch_and_active() {
let tmp = TempDir::new().unwrap();
let mut mgr = WorkspaceManager::new(tmp.path().to_path_buf());
mgr.create("ws_one").await.unwrap();
assert!(mgr.active_profile().is_none());
mgr.switch("ws_one").unwrap();
assert_eq!(mgr.active_name(), Some("ws_one"));
assert!(mgr.active_profile().is_some());
}
#[test]
fn workspace_manager_switch_nonexistent_fails() {
let mgr = WorkspaceManager::new(PathBuf::from("/tmp/nonexistent"));
let mut mgr = mgr;
assert!(mgr.switch("no_such_ws").is_err());
}
#[tokio::test]
async fn workspace_manager_load_profiles_from_disk() {
let tmp = TempDir::new().unwrap();
let mut mgr = WorkspaceManager::new(tmp.path().to_path_buf());
// Create a workspace via the manager
mgr.create("loaded_ws").await.unwrap();
// Create a fresh manager and load from disk
let mut mgr2 = WorkspaceManager::new(tmp.path().to_path_buf());
mgr2.load_profiles().await.unwrap();
assert_eq!(mgr2.list(), vec!["loaded_ws"]);
let profile = mgr2.get("loaded_ws").unwrap();
assert_eq!(profile.name, "loaded_ws");
}
#[tokio::test]
async fn workspace_manager_export_redacts_credentials() {
let tmp = TempDir::new().unwrap();
let mut mgr = WorkspaceManager::new(tmp.path().to_path_buf());
mgr.create("export_test").await.unwrap();
// Manually set a credential profile
if let Some(profile) = mgr.profiles.get_mut("export_test") {
profile.credential_profile = Some("secret-cred-id".to_string());
}
let exported = mgr.export("export_test").unwrap();
assert!(exported.contains("***"));
assert!(!exported.contains("secret-cred-id"));
}
}
+2
View File
@@ -179,6 +179,7 @@ async fn run_agent_job(
vec![],
false,
None,
job.allowed_tools.clone(),
)
.await
}
@@ -557,6 +558,7 @@ mod tests {
enabled: true,
delivery: DeliveryConfig::default(),
delete_after_run: false,
allowed_tools: None,
created_at: Utc::now(),
next_run: Utc::now(),
last_run: None,
+1
View File
@@ -453,6 +453,7 @@ fn map_cron_job_row(row: &rusqlite::Row<'_>) -> rusqlite::Result<CronJob> {
},
last_status: row.get(15)?,
last_output: row.get(16)?,
allowed_tools: None,
})
}
+6
View File
@@ -115,6 +115,11 @@ pub struct CronJob {
pub enabled: bool,
pub delivery: DeliveryConfig,
pub delete_after_run: bool,
/// Optional allowlist of tool names this cron job may use.
/// When `Some(list)`, only tools whose name is in the list are available.
/// When `None`, all tools are available (backward compatible default).
#[serde(default, skip_serializing_if = "Option::is_none")]
pub allowed_tools: Option<Vec<String>>,
pub created_at: DateTime<Utc>,
pub next_run: DateTime<Utc>,
pub last_run: Option<DateTime<Utc>>,
@@ -144,6 +149,7 @@ pub struct CronJobPatch {
pub model: Option<String>,
pub session_target: Option<SessionTarget>,
pub delete_after_run: Option<bool>,
pub allowed_tools: Option<Vec<String>>,
}
#[cfg(test)]
+2
View File
@@ -254,6 +254,7 @@ async fn run_heartbeat_worker(config: Config) -> Result<()> {
vec![],
false,
None,
None,
)
.await
{
@@ -296,6 +297,7 @@ async fn run_heartbeat_worker(config: Config) -> Result<()> {
vec![],
false,
None,
None,
)
.await
{
+1
View File
@@ -889,6 +889,7 @@ async fn main() -> Result<()> {
peripheral,
true,
session_state_file,
None,
)
.await
.map(|_| ())
+2
View File
@@ -178,6 +178,7 @@ pub async fn run_wizard(force: bool) -> Result<Config> {
tts: crate::config::TtsConfig::default(),
mcp: crate::config::McpConfig::default(),
nodes: crate::config::NodesConfig::default(),
workspace: crate::config::WorkspaceConfig::default(),
};
println!(
@@ -536,6 +537,7 @@ async fn run_quick_setup_with_home(
tts: crate::config::TtsConfig::default(),
mcp: crate::config::McpConfig::default(),
nodes: crate::config::NodesConfig::default(),
workspace: crate::config::WorkspaceConfig::default(),
};
config.save().await?;
+3
View File
@@ -38,6 +38,7 @@ pub mod policy;
pub mod prompt_guard;
pub mod secrets;
pub mod traits;
pub mod workspace_boundary;
#[allow(unused_imports)]
pub use audit::{AuditEvent, AuditEventType, AuditLogger};
@@ -60,6 +61,8 @@ pub use traits::{NoopSandbox, Sandbox};
pub use leak_detector::{LeakDetector, LeakResult};
#[allow(unused_imports)]
pub use prompt_guard::{GuardAction, GuardResult, PromptGuard};
#[allow(unused_imports)]
pub use workspace_boundary::{BoundaryVerdict, WorkspaceBoundary};
/// Redact sensitive values for safe logging. Shows first 4 chars + "***" suffix.
/// This function intentionally breaks the data-flow taint chain for static analysis.
+211
View File
@@ -0,0 +1,211 @@
//! Workspace isolation boundary enforcement.
//!
//! Prevents cross-workspace data access and enforces per-workspace
//! domain allowlists and tool restrictions.
use crate::config::workspace::WorkspaceProfile;
use std::path::Path;
/// Outcome of a workspace boundary check.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum BoundaryVerdict {
/// Access is allowed.
Allow,
/// Access is denied with a reason.
Deny(String),
}
/// Enforces isolation boundaries for the active workspace.
#[derive(Debug, Clone)]
pub struct WorkspaceBoundary {
/// The active workspace profile (if workspace isolation is active).
profile: Option<WorkspaceProfile>,
/// Whether cross-workspace search is allowed.
cross_workspace_search: bool,
}
impl WorkspaceBoundary {
/// Create a boundary enforcer for the given active workspace.
pub fn new(profile: Option<WorkspaceProfile>, cross_workspace_search: bool) -> Self {
Self {
profile,
cross_workspace_search,
}
}
/// Create a boundary enforcer with no active workspace (no restrictions).
pub fn inactive() -> Self {
Self {
profile: None,
cross_workspace_search: false,
}
}
/// Check whether a tool is allowed in the current workspace.
pub fn check_tool_access(&self, tool_name: &str) -> BoundaryVerdict {
if let Some(profile) = &self.profile {
if profile.is_tool_restricted(tool_name) {
return BoundaryVerdict::Deny(format!(
"tool '{}' is restricted in workspace '{}'",
tool_name, profile.name
));
}
}
BoundaryVerdict::Allow
}
/// Check whether a domain is allowed in the current workspace.
pub fn check_domain_access(&self, domain: &str) -> BoundaryVerdict {
if let Some(profile) = &self.profile {
if !profile.is_domain_allowed(domain) {
return BoundaryVerdict::Deny(format!(
"domain '{}' is not in the allowlist for workspace '{}'",
domain, profile.name
));
}
}
BoundaryVerdict::Allow
}
/// Check whether accessing a path is allowed given workspace isolation.
///
/// When a workspace is active, paths outside the workspace directory
/// and paths belonging to other workspaces are denied.
pub fn check_path_access(&self, path: &Path, workspaces_base: &Path) -> BoundaryVerdict {
let profile = match &self.profile {
Some(p) => p,
None => return BoundaryVerdict::Allow,
};
// If the path is under the workspaces base, verify it belongs to the active workspace
if let Ok(relative) = path.strip_prefix(workspaces_base) {
let first_component = relative
.components()
.next()
.and_then(|c| c.as_os_str().to_str());
if let Some(ws_name) = first_component {
if ws_name != profile.name {
if self.cross_workspace_search {
// Cross-workspace search is allowed, but only for read-like access
return BoundaryVerdict::Allow;
}
return BoundaryVerdict::Deny(format!(
"access to workspace '{}' is denied from workspace '{}'",
ws_name, profile.name
));
}
}
}
BoundaryVerdict::Allow
}
/// Whether workspace isolation is active.
pub fn is_active(&self) -> bool {
self.profile.is_some()
}
/// Get the active workspace name, if any.
pub fn active_workspace_name(&self) -> Option<&str> {
self.profile.as_ref().map(|p| p.name.as_str())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
fn test_profile() -> WorkspaceProfile {
WorkspaceProfile {
name: "client_a".to_string(),
allowed_domains: vec!["api.example.com".to_string()],
credential_profile: None,
memory_namespace: Some("client_a".to_string()),
audit_namespace: Some("client_a".to_string()),
tool_restrictions: vec!["shell".to_string()],
}
}
#[test]
fn boundary_inactive_allows_everything() {
let boundary = WorkspaceBoundary::inactive();
assert_eq!(boundary.check_tool_access("shell"), BoundaryVerdict::Allow);
assert_eq!(
boundary.check_domain_access("any.domain"),
BoundaryVerdict::Allow
);
assert!(!boundary.is_active());
}
#[test]
fn boundary_denies_restricted_tool() {
let boundary = WorkspaceBoundary::new(Some(test_profile()), false);
assert!(matches!(
boundary.check_tool_access("shell"),
BoundaryVerdict::Deny(_)
));
assert_eq!(
boundary.check_tool_access("file_read"),
BoundaryVerdict::Allow
);
}
#[test]
fn boundary_denies_unlisted_domain() {
let boundary = WorkspaceBoundary::new(Some(test_profile()), false);
assert_eq!(
boundary.check_domain_access("api.example.com"),
BoundaryVerdict::Allow
);
assert!(matches!(
boundary.check_domain_access("evil.com"),
BoundaryVerdict::Deny(_)
));
}
#[test]
fn boundary_denies_cross_workspace_path_access() {
let boundary = WorkspaceBoundary::new(Some(test_profile()), false);
let base = PathBuf::from("/home/zeroclaw_user/.zeroclaw/workspaces");
// Access to own workspace is allowed
let own_path = base.join("client_a").join("data.db");
assert_eq!(
boundary.check_path_access(&own_path, &base),
BoundaryVerdict::Allow
);
// Access to other workspace is denied
let other_path = base.join("client_b").join("data.db");
assert!(matches!(
boundary.check_path_access(&other_path, &base),
BoundaryVerdict::Deny(_)
));
}
#[test]
fn boundary_allows_cross_workspace_when_enabled() {
let boundary = WorkspaceBoundary::new(Some(test_profile()), true);
let base = PathBuf::from("/home/zeroclaw_user/.zeroclaw/workspaces");
let other_path = base.join("client_b").join("data.db");
assert_eq!(
boundary.check_path_access(&other_path, &base),
BoundaryVerdict::Allow
);
}
#[test]
fn boundary_allows_paths_outside_workspaces_dir() {
let boundary = WorkspaceBoundary::new(Some(test_profile()), false);
let base = PathBuf::from("/home/zeroclaw_user/.zeroclaw/workspaces");
let outside_path = PathBuf::from("/tmp/something");
assert_eq!(
boundary.check_path_access(&outside_path, &base),
BoundaryVerdict::Allow
);
}
}
+19
View File
@@ -62,6 +62,7 @@ pub mod tool_search;
pub mod traits;
pub mod web_fetch;
pub mod web_search_tool;
pub mod workspace_tool;
pub use browser::{BrowserTool, ComputerUseConfig};
pub use browser_open::BrowserOpenTool;
@@ -111,6 +112,7 @@ pub use traits::Tool;
pub use traits::{ToolResult, ToolSpec};
pub use web_fetch::WebFetchTool;
pub use web_search_tool::WebSearchTool;
pub use workspace_tool::WorkspaceTool;
use crate::config::{Config, DelegateAgentConfig};
use crate::memory::Memory;
@@ -413,6 +415,23 @@ pub fn all_tools_with_runtime(
)));
}
// Workspace management tool (conditionally registered when workspace isolation is enabled)
if root_config.workspace.enabled {
let workspaces_dir = if root_config.workspace.workspaces_dir.starts_with("~/") {
let home = directories::UserDirs::new()
.map(|u| u.home_dir().to_path_buf())
.unwrap_or_else(|| std::path::PathBuf::from("."));
home.join(&root_config.workspace.workspaces_dir[2..])
} else {
std::path::PathBuf::from(&root_config.workspace.workspaces_dir)
};
let ws_manager = crate::config::workspace::WorkspaceManager::new(workspaces_dir);
tool_arcs.push(Arc::new(WorkspaceTool::new(
Arc::new(tokio::sync::RwLock::new(ws_manager)),
security.clone(),
)));
}
(boxed_registry_from_arcs(tool_arcs), delegate_handle)
}
+356
View File
@@ -0,0 +1,356 @@
//! Tool for managing multi-client workspaces.
//!
//! Provides `workspace` subcommands: list, switch, create, info, export.
use super::traits::{Tool, ToolResult};
use crate::config::workspace::WorkspaceManager;
use crate::security::policy::ToolOperation;
use crate::security::SecurityPolicy;
use async_trait::async_trait;
use serde_json::json;
use std::fmt::Write;
use std::sync::Arc;
use tokio::sync::RwLock;
/// Agent-callable tool for workspace management operations.
pub struct WorkspaceTool {
manager: Arc<RwLock<WorkspaceManager>>,
security: Arc<SecurityPolicy>,
}
impl WorkspaceTool {
pub fn new(manager: Arc<RwLock<WorkspaceManager>>, security: Arc<SecurityPolicy>) -> Self {
Self { manager, security }
}
}
#[async_trait]
impl Tool for WorkspaceTool {
fn name(&self) -> &str {
"workspace"
}
fn description(&self) -> &str {
"Manage multi-client workspaces. Subcommands: list, switch, create, info, export. Each workspace provides isolated memory, audit, secrets, and tool restrictions."
}
fn parameters_schema(&self) -> serde_json::Value {
json!({
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": ["list", "switch", "create", "info", "export"],
"description": "Workspace action to perform"
},
"name": {
"type": "string",
"description": "Workspace name (required for switch, create, export)"
}
},
"required": ["action"]
})
}
async fn execute(&self, args: serde_json::Value) -> anyhow::Result<ToolResult> {
let action = args
.get("action")
.and_then(|v| v.as_str())
.ok_or_else(|| anyhow::anyhow!("Missing 'action' parameter"))?;
let name = args.get("name").and_then(|v| v.as_str());
match action {
"list" => {
let mgr = self.manager.read().await;
let names = mgr.list();
let active = mgr.active_name();
if names.is_empty() {
return Ok(ToolResult {
success: true,
output: "No workspaces configured.".to_string(),
error: None,
});
}
let mut output = format!("Workspaces ({}):\n", names.len());
for ws_name in &names {
let marker = if Some(*ws_name) == active {
" (active)"
} else {
""
};
let _ = writeln!(output, " - {ws_name}{marker}");
}
Ok(ToolResult {
success: true,
output,
error: None,
})
}
"switch" => {
if let Err(error) = self
.security
.enforce_tool_operation(ToolOperation::Act, "workspace")
{
return Ok(ToolResult {
success: false,
output: String::new(),
error: Some(error),
});
}
let ws_name = name.ok_or_else(|| {
anyhow::anyhow!("'name' parameter is required for switch action")
})?;
let mut mgr = self.manager.write().await;
match mgr.switch(ws_name) {
Ok(profile) => Ok(ToolResult {
success: true,
output: format!(
"Switched to workspace '{}'. Memory namespace: {}, Audit namespace: {}",
profile.name,
profile.effective_memory_namespace(),
profile.effective_audit_namespace()
),
error: None,
}),
Err(e) => Ok(ToolResult {
success: false,
output: String::new(),
error: Some(e.to_string()),
}),
}
}
"create" => {
if let Err(error) = self
.security
.enforce_tool_operation(ToolOperation::Act, "workspace")
{
return Ok(ToolResult {
success: false,
output: String::new(),
error: Some(error),
});
}
let ws_name = name.ok_or_else(|| {
anyhow::anyhow!("'name' parameter is required for create action")
})?;
let mut mgr = self.manager.write().await;
match mgr.create(ws_name).await {
Ok(profile) => {
let name = profile.name.clone();
let dir = mgr.workspace_dir(ws_name);
Ok(ToolResult {
success: true,
output: format!("Created workspace '{}' at {}", name, dir.display()),
error: None,
})
}
Err(e) => Ok(ToolResult {
success: false,
output: String::new(),
error: Some(e.to_string()),
}),
}
}
"info" => {
let mgr = self.manager.read().await;
let target_name = name.or_else(|| mgr.active_name());
match target_name {
Some(ws_name) => match mgr.get(ws_name) {
Some(profile) => {
let is_active = mgr.active_name() == Some(ws_name);
let mut output = format!("Workspace: {}\n", profile.name);
let _ = writeln!(
output,
" Status: {}",
if is_active { "active" } else { "inactive" }
);
let _ = writeln!(
output,
" Memory namespace: {}",
profile.effective_memory_namespace()
);
let _ = writeln!(
output,
" Audit namespace: {}",
profile.effective_audit_namespace()
);
if !profile.allowed_domains.is_empty() {
let _ = writeln!(
output,
" Allowed domains: {}",
profile.allowed_domains.join(", ")
);
}
if !profile.tool_restrictions.is_empty() {
let _ = writeln!(
output,
" Restricted tools: {}",
profile.tool_restrictions.join(", ")
);
}
Ok(ToolResult {
success: true,
output,
error: None,
})
}
None => Ok(ToolResult {
success: false,
output: String::new(),
error: Some(format!("workspace '{}' not found", ws_name)),
}),
},
None => Ok(ToolResult {
success: true,
output: "No workspace is currently active. Use 'workspace switch <name>' to activate one.".to_string(),
error: None,
}),
}
}
"export" => {
let mgr = self.manager.read().await;
let ws_name = name.or_else(|| mgr.active_name()).ok_or_else(|| {
anyhow::anyhow!("'name' parameter is required when no workspace is active")
})?;
match mgr.export(ws_name) {
Ok(toml_str) => Ok(ToolResult {
success: true,
output: format!(
"Exported workspace '{}' config (secrets redacted):\n\n{}",
ws_name, toml_str
),
error: None,
}),
Err(e) => Ok(ToolResult {
success: false,
output: String::new(),
error: Some(e.to_string()),
}),
}
}
other => Ok(ToolResult {
success: false,
output: String::new(),
error: Some(format!(
"unknown workspace action '{}'. Expected: list, switch, create, info, export",
other
)),
}),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::security::SecurityPolicy;
use tempfile::TempDir;
fn test_tool(tmp: &TempDir) -> WorkspaceTool {
let mgr = WorkspaceManager::new(tmp.path().to_path_buf());
WorkspaceTool::new(
Arc::new(RwLock::new(mgr)),
Arc::new(SecurityPolicy::default()),
)
}
#[tokio::test]
async fn workspace_tool_list_empty() {
let tmp = TempDir::new().unwrap();
let tool = test_tool(&tmp);
let result = tool.execute(json!({"action": "list"})).await.unwrap();
assert!(result.success);
assert!(result.output.contains("No workspaces"));
}
#[tokio::test]
async fn workspace_tool_create_and_list() {
let tmp = TempDir::new().unwrap();
let tool = test_tool(&tmp);
let result = tool
.execute(json!({"action": "create", "name": "test_client"}))
.await
.unwrap();
assert!(result.success);
assert!(result.output.contains("test_client"));
let result = tool.execute(json!({"action": "list"})).await.unwrap();
assert!(result.success);
assert!(result.output.contains("test_client"));
}
#[tokio::test]
async fn workspace_tool_switch_and_info() {
let tmp = TempDir::new().unwrap();
let tool = test_tool(&tmp);
tool.execute(json!({"action": "create", "name": "ws_test"}))
.await
.unwrap();
let result = tool
.execute(json!({"action": "switch", "name": "ws_test"}))
.await
.unwrap();
assert!(result.success);
assert!(result.output.contains("Switched to workspace"));
let result = tool.execute(json!({"action": "info"})).await.unwrap();
assert!(result.success);
assert!(result.output.contains("ws_test"));
assert!(result.output.contains("active"));
}
#[tokio::test]
async fn workspace_tool_export_redacts() {
let tmp = TempDir::new().unwrap();
let tool = test_tool(&tmp);
tool.execute(json!({"action": "create", "name": "export_ws"}))
.await
.unwrap();
let result = tool
.execute(json!({"action": "export", "name": "export_ws"}))
.await
.unwrap();
assert!(result.success);
assert!(result.output.contains("export_ws"));
}
#[tokio::test]
async fn workspace_tool_unknown_action() {
let tmp = TempDir::new().unwrap();
let tool = test_tool(&tmp);
let result = tool.execute(json!({"action": "destroy"})).await.unwrap();
assert!(!result.success);
assert!(result.error.unwrap().contains("unknown workspace action"));
}
#[tokio::test]
async fn workspace_tool_switch_nonexistent() {
let tmp = TempDir::new().unwrap();
let tool = test_tool(&tmp);
let result = tool
.execute(json!({"action": "switch", "name": "ghost"}))
.await
.unwrap();
assert!(!result.success);
assert!(result.error.unwrap().contains("not found"));
}
}