Compare commits
13 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 2539bcafe0 | |||
| 314e1d3ae8 | |||
| 82be05b1e9 | |||
| 1373659058 | |||
| c7f064e866 | |||
| 9c1d63e109 | |||
| 966edf1553 | |||
| a1af84d992 | |||
| 70e8e7ebcd | |||
| 2bcb82c5b3 | |||
| e211b5c3e3 | |||
| 8691476577 | |||
| 996dbe95cf |
Generated
+1
-1
@@ -7945,7 +7945,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zeroclawlabs"
|
||||
version = "0.3.3"
|
||||
version = "0.3.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-imap",
|
||||
|
||||
+1
-1
@@ -4,7 +4,7 @@ resolver = "2"
|
||||
|
||||
[package]
|
||||
name = "zeroclawlabs"
|
||||
version = "0.3.3"
|
||||
version = "0.3.4"
|
||||
edition = "2021"
|
||||
authors = ["theonlyhennygod"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
||||
+6
-4
@@ -2696,11 +2696,13 @@ pub(crate) async fn run_tool_call_loop(
|
||||
arguments: tool_args.clone(),
|
||||
};
|
||||
|
||||
// Only prompt interactively on CLI; auto-approve on other channels.
|
||||
let decision = if channel_name == "cli" {
|
||||
mgr.prompt_cli(&request)
|
||||
// Interactive CLI: prompt the operator.
|
||||
// Non-interactive (channels): auto-deny since no operator
|
||||
// is present to approve.
|
||||
let decision = if mgr.is_non_interactive() {
|
||||
ApprovalResponse::No
|
||||
} else {
|
||||
ApprovalResponse::Yes
|
||||
mgr.prompt_cli(&request)
|
||||
};
|
||||
|
||||
mgr.record_decision(&tool_name, &tool_args, decision, channel_name);
|
||||
|
||||
+128
-4
@@ -44,11 +44,18 @@ pub struct ApprovalLogEntry {
|
||||
|
||||
// ── ApprovalManager ──────────────────────────────────────────────
|
||||
|
||||
/// Manages the interactive approval workflow.
|
||||
/// Manages the approval workflow for tool calls.
|
||||
///
|
||||
/// - Checks config-level `auto_approve` / `always_ask` lists
|
||||
/// - Maintains a session-scoped "always" allowlist
|
||||
/// - Records an audit trail of all decisions
|
||||
///
|
||||
/// Two modes:
|
||||
/// - **Interactive** (CLI): tools needing approval trigger a stdin prompt.
|
||||
/// - **Non-interactive** (channels): tools needing approval are auto-denied
|
||||
/// because there is no interactive operator to approve them. `auto_approve`
|
||||
/// policy is still enforced, and `always_ask` / supervised-default tools are
|
||||
/// denied rather than silently allowed.
|
||||
pub struct ApprovalManager {
|
||||
/// Tools that never need approval (from config).
|
||||
auto_approve: HashSet<String>,
|
||||
@@ -56,6 +63,9 @@ pub struct ApprovalManager {
|
||||
always_ask: HashSet<String>,
|
||||
/// Autonomy level from config.
|
||||
autonomy_level: AutonomyLevel,
|
||||
/// When `true`, tools that would require interactive approval are
|
||||
/// auto-denied instead. Used for channel-driven (non-CLI) runs.
|
||||
non_interactive: bool,
|
||||
/// Session-scoped allowlist built from "Always" responses.
|
||||
session_allowlist: Mutex<HashSet<String>>,
|
||||
/// Audit trail of approval decisions.
|
||||
@@ -63,17 +73,40 @@ pub struct ApprovalManager {
|
||||
}
|
||||
|
||||
impl ApprovalManager {
|
||||
/// Create from autonomy config.
|
||||
/// Create an interactive (CLI) approval manager from autonomy config.
|
||||
pub fn from_config(config: &AutonomyConfig) -> Self {
|
||||
Self {
|
||||
auto_approve: config.auto_approve.iter().cloned().collect(),
|
||||
always_ask: config.always_ask.iter().cloned().collect(),
|
||||
autonomy_level: config.level,
|
||||
non_interactive: false,
|
||||
session_allowlist: Mutex::new(HashSet::new()),
|
||||
audit_log: Mutex::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a non-interactive approval manager for channel-driven runs.
|
||||
///
|
||||
/// Enforces the same `auto_approve` / `always_ask` / supervised policies
|
||||
/// as the CLI manager, but tools that would require interactive approval
|
||||
/// are auto-denied instead of prompting (since there is no operator).
|
||||
pub fn for_non_interactive(config: &AutonomyConfig) -> Self {
|
||||
Self {
|
||||
auto_approve: config.auto_approve.iter().cloned().collect(),
|
||||
always_ask: config.always_ask.iter().cloned().collect(),
|
||||
autonomy_level: config.level,
|
||||
non_interactive: true,
|
||||
session_allowlist: Mutex::new(HashSet::new()),
|
||||
audit_log: Mutex::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` when this manager operates in non-interactive mode
|
||||
/// (i.e. for channel-driven runs where no operator can approve).
|
||||
pub fn is_non_interactive(&self) -> bool {
|
||||
self.non_interactive
|
||||
}
|
||||
|
||||
/// Check whether a tool call requires interactive approval.
|
||||
///
|
||||
/// Returns `true` if the call needs a prompt, `false` if it can proceed.
|
||||
@@ -147,8 +180,8 @@ impl ApprovalManager {
|
||||
|
||||
/// Prompt the user on the CLI and return their decision.
|
||||
///
|
||||
/// For non-CLI channels, returns `Yes` automatically (interactive
|
||||
/// approval is only supported on CLI for now).
|
||||
/// Only called for interactive (CLI) managers. Non-interactive managers
|
||||
/// auto-deny in the tool-call loop before reaching this point.
|
||||
pub fn prompt_cli(&self, request: &ApprovalRequest) -> ApprovalResponse {
|
||||
prompt_cli_interactive(request)
|
||||
}
|
||||
@@ -401,6 +434,97 @@ mod tests {
|
||||
assert!(summary.contains("just a string"));
|
||||
}
|
||||
|
||||
// ── non-interactive (channel) mode ────────────────────────
|
||||
|
||||
#[test]
|
||||
fn non_interactive_manager_reports_non_interactive() {
|
||||
let mgr = ApprovalManager::for_non_interactive(&supervised_config());
|
||||
assert!(mgr.is_non_interactive());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn interactive_manager_reports_interactive() {
|
||||
let mgr = ApprovalManager::from_config(&supervised_config());
|
||||
assert!(!mgr.is_non_interactive());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_interactive_auto_approve_tools_skip_approval() {
|
||||
let mgr = ApprovalManager::for_non_interactive(&supervised_config());
|
||||
// auto_approve tools (file_read, memory_recall) should not need approval.
|
||||
assert!(!mgr.needs_approval("file_read"));
|
||||
assert!(!mgr.needs_approval("memory_recall"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_interactive_always_ask_tools_need_approval() {
|
||||
let mgr = ApprovalManager::for_non_interactive(&supervised_config());
|
||||
// always_ask tools (shell) still report as needing approval,
|
||||
// so the tool-call loop will auto-deny them in non-interactive mode.
|
||||
assert!(mgr.needs_approval("shell"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_interactive_unknown_tools_need_approval_in_supervised() {
|
||||
let mgr = ApprovalManager::for_non_interactive(&supervised_config());
|
||||
// Unknown tools in supervised mode need approval (will be auto-denied
|
||||
// by the tool-call loop for non-interactive managers).
|
||||
assert!(mgr.needs_approval("file_write"));
|
||||
assert!(mgr.needs_approval("http_request"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_interactive_full_autonomy_never_needs_approval() {
|
||||
let mgr = ApprovalManager::for_non_interactive(&full_config());
|
||||
// Full autonomy means no approval needed, even in non-interactive mode.
|
||||
assert!(!mgr.needs_approval("shell"));
|
||||
assert!(!mgr.needs_approval("file_write"));
|
||||
assert!(!mgr.needs_approval("anything"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_interactive_readonly_never_needs_approval() {
|
||||
let config = AutonomyConfig {
|
||||
level: AutonomyLevel::ReadOnly,
|
||||
..AutonomyConfig::default()
|
||||
};
|
||||
let mgr = ApprovalManager::for_non_interactive(&config);
|
||||
// ReadOnly blocks execution elsewhere; approval manager does not prompt.
|
||||
assert!(!mgr.needs_approval("shell"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_interactive_session_allowlist_still_works() {
|
||||
let mgr = ApprovalManager::for_non_interactive(&supervised_config());
|
||||
assert!(mgr.needs_approval("file_write"));
|
||||
|
||||
// Simulate an "Always" decision (would come from a prior channel run
|
||||
// if the tool was auto-approved somehow, e.g. via config change).
|
||||
mgr.record_decision(
|
||||
"file_write",
|
||||
&serde_json::json!({"path": "test.txt"}),
|
||||
ApprovalResponse::Always,
|
||||
"telegram",
|
||||
);
|
||||
|
||||
assert!(!mgr.needs_approval("file_write"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_interactive_always_ask_overrides_session_allowlist() {
|
||||
let mgr = ApprovalManager::for_non_interactive(&supervised_config());
|
||||
|
||||
mgr.record_decision(
|
||||
"shell",
|
||||
&serde_json::json!({"command": "ls"}),
|
||||
ApprovalResponse::Always,
|
||||
"telegram",
|
||||
);
|
||||
|
||||
// shell is in always_ask, so it still needs approval even after "Always".
|
||||
assert!(mgr.needs_approval("shell"));
|
||||
}
|
||||
|
||||
// ── ApprovalResponse serde ───────────────────────────────
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -888,7 +888,7 @@ impl Channel for MatrixChannel {
|
||||
sender: sender.clone(),
|
||||
reply_target: format!("{}||{}", sender, room.room_id()),
|
||||
content: body,
|
||||
channel: format!("matrix:{}", room.room_id()),
|
||||
channel: "matrix".to_string(),
|
||||
timestamp: std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
|
||||
+82
-1
@@ -76,6 +76,7 @@ pub use whatsapp::WhatsAppChannel;
|
||||
pub use whatsapp_web::WhatsAppWebChannel;
|
||||
|
||||
use crate::agent::loop_::{build_tool_instructions, run_tool_call_loop, scrub_credentials};
|
||||
use crate::approval::ApprovalManager;
|
||||
use crate::config::Config;
|
||||
use crate::identity;
|
||||
use crate::memory::{self, Memory};
|
||||
@@ -314,6 +315,11 @@ struct ChannelRuntimeContext {
|
||||
ack_reactions: bool,
|
||||
show_tool_calls: bool,
|
||||
session_store: Option<Arc<session_store::SessionStore>>,
|
||||
/// Non-interactive approval manager for channel-driven runs.
|
||||
/// Enforces `auto_approve` / `always_ask` / supervised policy from
|
||||
/// `[autonomy]` config; auto-denies tools that would need interactive
|
||||
/// approval since no operator is present on channel runs.
|
||||
approval_manager: Arc<ApprovalManager>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -2025,7 +2031,7 @@ async fn process_channel_message(
|
||||
route.model.as_str(),
|
||||
runtime_defaults.temperature,
|
||||
true,
|
||||
None,
|
||||
Some(&*ctx.approval_manager),
|
||||
msg.channel.as_str(),
|
||||
&ctx.multimodal,
|
||||
ctx.max_tool_iterations,
|
||||
@@ -3235,6 +3241,8 @@ fn collect_configured_channels(
|
||||
#[cfg(not(feature = "whatsapp-web"))]
|
||||
{
|
||||
tracing::warn!("WhatsApp Web backend requires 'whatsapp-web' feature. Enable with: cargo build --features whatsapp-web");
|
||||
eprintln!(" ⚠ WhatsApp Web is configured but the 'whatsapp-web' feature is not compiled in.");
|
||||
eprintln!(" Rebuild with: cargo build --features whatsapp-web");
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
@@ -3851,6 +3859,7 @@ pub async fn start_channels(config: Config) -> Result<()> {
|
||||
} else {
|
||||
None
|
||||
},
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(&config.autonomy)),
|
||||
});
|
||||
|
||||
// Hydrate in-memory conversation histories from persisted JSONL session files.
|
||||
@@ -4139,6 +4148,9 @@ mod tests {
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
};
|
||||
|
||||
assert!(compact_sender_history(&ctx, &sender));
|
||||
@@ -4243,6 +4255,9 @@ mod tests {
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
};
|
||||
|
||||
append_sender_turn(&ctx, &sender, ChatMessage::user("hello"));
|
||||
@@ -4303,6 +4318,9 @@ mod tests {
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
};
|
||||
|
||||
assert!(rollback_orphan_user_turn(&ctx, &sender, "pending"));
|
||||
@@ -4821,6 +4839,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -4889,6 +4910,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -4971,6 +4995,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -5038,6 +5065,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -5115,6 +5145,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -5212,6 +5245,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -5291,6 +5327,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -5385,6 +5424,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -5464,6 +5506,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -5533,6 +5578,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -5713,6 +5761,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
let (tx, rx) = tokio::sync::mpsc::channel::<traits::ChannelMessage>(4);
|
||||
@@ -5801,6 +5852,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
let (tx, rx) = tokio::sync::mpsc::channel::<traits::ChannelMessage>(8);
|
||||
@@ -5904,6 +5958,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
non_cli_excluded_tools: Arc::new(Vec::new()),
|
||||
tool_call_dedup_exempt: Arc::new(Vec::new()),
|
||||
model_routes: Arc::new(Vec::new()),
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
let (tx, rx) = tokio::sync::mpsc::channel::<traits::ChannelMessage>(8);
|
||||
@@ -6004,6 +6061,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
let (tx, rx) = tokio::sync::mpsc::channel::<traits::ChannelMessage>(8);
|
||||
@@ -6086,6 +6146,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -6153,6 +6216,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -6778,6 +6844,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -6871,6 +6940,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -6964,6 +7036,9 @@ BTC is currently around $65,000 based on latest tool output."#
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
@@ -7521,6 +7596,9 @@ This is an example JSON object for profile settings."#;
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
// Simulate a photo attachment message with [IMAGE:] marker.
|
||||
@@ -7595,6 +7673,9 @@ This is an example JSON object for profile settings."#;
|
||||
ack_reactions: true,
|
||||
show_tool_calls: true,
|
||||
session_store: None,
|
||||
approval_manager: Arc::new(ApprovalManager::for_non_interactive(
|
||||
&crate::config::AutonomyConfig::default(),
|
||||
)),
|
||||
});
|
||||
|
||||
process_channel_message(
|
||||
|
||||
+3
-2
@@ -17,8 +17,9 @@ pub use schema::{
|
||||
QueryClassificationConfig, ReliabilityConfig, ResourceLimitsConfig, RuntimeConfig,
|
||||
SandboxBackend, SandboxConfig, SchedulerConfig, SecretsConfig, SecurityConfig, SkillsConfig,
|
||||
SkillsPromptInjectionMode, SlackConfig, StorageConfig, StorageProviderConfig,
|
||||
StorageProviderSection, StreamMode, TelegramConfig, ToolFilterGroup, ToolFilterGroupMode,
|
||||
TranscriptionConfig, TtsConfig, TunnelConfig, WebFetchConfig, WebSearchConfig, WebhookConfig,
|
||||
StorageProviderSection, StreamMode, SwarmConfig, SwarmStrategy, TelegramConfig,
|
||||
ToolFilterGroup, ToolFilterGroupMode, TranscriptionConfig, TtsConfig, TunnelConfig,
|
||||
WebFetchConfig, WebSearchConfig, WebhookConfig,
|
||||
};
|
||||
|
||||
pub fn name_and_presence<T: traits::ChannelConfig>(channel: Option<&T>) -> (&'static str, bool) {
|
||||
|
||||
@@ -232,6 +232,10 @@ pub struct Config {
|
||||
#[serde(default)]
|
||||
pub agents: HashMap<String, DelegateAgentConfig>,
|
||||
|
||||
/// Swarm configurations for multi-agent orchestration.
|
||||
#[serde(default)]
|
||||
pub swarms: HashMap<String, SwarmConfig>,
|
||||
|
||||
/// Hooks configuration (lifecycle hooks and built-in hook toggles).
|
||||
#[serde(default)]
|
||||
pub hooks: HooksConfig,
|
||||
@@ -319,6 +323,44 @@ pub struct DelegateAgentConfig {
|
||||
pub max_iterations: usize,
|
||||
}
|
||||
|
||||
// ── Swarms ──────────────────────────────────────────────────────
|
||||
|
||||
/// Orchestration strategy for a swarm of agents.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, PartialEq, Eq)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum SwarmStrategy {
|
||||
/// Run agents sequentially; each agent's output feeds into the next.
|
||||
Sequential,
|
||||
/// Run agents in parallel; collect all outputs.
|
||||
Parallel,
|
||||
/// Use the LLM to pick the best agent for the task.
|
||||
Router,
|
||||
}
|
||||
|
||||
/// Configuration for a swarm of coordinated agents.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct SwarmConfig {
|
||||
/// Ordered list of agent names (must reference keys in `agents`).
|
||||
pub agents: Vec<String>,
|
||||
/// Orchestration strategy.
|
||||
pub strategy: SwarmStrategy,
|
||||
/// System prompt for router strategy (used to pick the best agent).
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub router_prompt: Option<String>,
|
||||
/// Optional description shown to the LLM when choosing swarms.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub description: Option<String>,
|
||||
/// Maximum total timeout for the swarm execution in seconds.
|
||||
#[serde(default = "default_swarm_timeout_secs")]
|
||||
pub timeout_secs: u64,
|
||||
}
|
||||
|
||||
const DEFAULT_SWARM_TIMEOUT_SECS: u64 = 300;
|
||||
|
||||
fn default_swarm_timeout_secs() -> u64 {
|
||||
DEFAULT_SWARM_TIMEOUT_SECS
|
||||
}
|
||||
|
||||
/// Valid temperature range for all paths (config, CLI, env override).
|
||||
pub const TEMPERATURE_RANGE: std::ops::RangeInclusive<f64> = 0.0..=2.0;
|
||||
|
||||
@@ -4202,6 +4244,7 @@ impl Default for Config {
|
||||
cost: CostConfig::default(),
|
||||
peripherals: PeripheralsConfig::default(),
|
||||
agents: HashMap::new(),
|
||||
swarms: HashMap::new(),
|
||||
hooks: HooksConfig::default(),
|
||||
hardware: HardwareConfig::default(),
|
||||
query_classification: QueryClassificationConfig::default(),
|
||||
@@ -6309,6 +6352,7 @@ default_temperature = 0.7
|
||||
cost: CostConfig::default(),
|
||||
peripherals: PeripheralsConfig::default(),
|
||||
agents: HashMap::new(),
|
||||
swarms: HashMap::new(),
|
||||
hooks: HooksConfig::default(),
|
||||
hardware: HardwareConfig::default(),
|
||||
transcription: TranscriptionConfig::default(),
|
||||
@@ -6600,6 +6644,7 @@ tool_dispatcher = "xml"
|
||||
cost: CostConfig::default(),
|
||||
peripherals: PeripheralsConfig::default(),
|
||||
agents: HashMap::new(),
|
||||
swarms: HashMap::new(),
|
||||
hooks: HooksConfig::default(),
|
||||
hardware: HardwareConfig::default(),
|
||||
transcription: TranscriptionConfig::default(),
|
||||
@@ -9352,4 +9397,72 @@ require_otp_to_resume = true
|
||||
assert_eq!(&deserialized, variant);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
async fn swarm_strategy_roundtrip() {
|
||||
let cases = vec![
|
||||
(SwarmStrategy::Sequential, "\"sequential\""),
|
||||
(SwarmStrategy::Parallel, "\"parallel\""),
|
||||
(SwarmStrategy::Router, "\"router\""),
|
||||
];
|
||||
for (variant, expected_json) in &cases {
|
||||
let serialized = serde_json::to_string(variant).expect("serialize");
|
||||
assert_eq!(&serialized, expected_json, "variant: {variant:?}");
|
||||
let deserialized: SwarmStrategy =
|
||||
serde_json::from_str(expected_json).expect("deserialize");
|
||||
assert_eq!(&deserialized, variant);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
async fn swarm_config_deserializes_with_defaults() {
|
||||
let toml_str = r#"
|
||||
agents = ["researcher", "writer"]
|
||||
strategy = "sequential"
|
||||
"#;
|
||||
let config: SwarmConfig = toml::from_str(toml_str).expect("deserialize");
|
||||
assert_eq!(config.agents, vec!["researcher", "writer"]);
|
||||
assert_eq!(config.strategy, SwarmStrategy::Sequential);
|
||||
assert!(config.router_prompt.is_none());
|
||||
assert!(config.description.is_none());
|
||||
assert_eq!(config.timeout_secs, 300);
|
||||
}
|
||||
|
||||
#[test]
|
||||
async fn swarm_config_deserializes_full() {
|
||||
let toml_str = r#"
|
||||
agents = ["a", "b", "c"]
|
||||
strategy = "router"
|
||||
router_prompt = "Pick the best."
|
||||
description = "Multi-agent router"
|
||||
timeout_secs = 120
|
||||
"#;
|
||||
let config: SwarmConfig = toml::from_str(toml_str).expect("deserialize");
|
||||
assert_eq!(config.agents.len(), 3);
|
||||
assert_eq!(config.strategy, SwarmStrategy::Router);
|
||||
assert_eq!(config.router_prompt.as_deref(), Some("Pick the best."));
|
||||
assert_eq!(config.description.as_deref(), Some("Multi-agent router"));
|
||||
assert_eq!(config.timeout_secs, 120);
|
||||
}
|
||||
|
||||
#[test]
|
||||
async fn config_with_swarms_section_deserializes() {
|
||||
let toml_str = r#"
|
||||
[agents.researcher]
|
||||
provider = "ollama"
|
||||
model = "llama3"
|
||||
|
||||
[agents.writer]
|
||||
provider = "openrouter"
|
||||
model = "claude-sonnet"
|
||||
|
||||
[swarms.pipeline]
|
||||
agents = ["researcher", "writer"]
|
||||
strategy = "sequential"
|
||||
"#;
|
||||
let config: Config = toml::from_str(toml_str).expect("deserialize");
|
||||
assert_eq!(config.agents.len(), 2);
|
||||
assert_eq!(config.swarms.len(), 1);
|
||||
assert!(config.swarms.contains_key("pipeline"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,229 @@
|
||||
pub mod types;
|
||||
|
||||
pub use types::{Hand, HandContext, HandRun, HandRunStatus};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use std::path::Path;
|
||||
|
||||
/// Load all hand definitions from TOML files in the given directory.
|
||||
///
|
||||
/// Each `.toml` file in `hands_dir` is expected to deserialize into a [`Hand`].
|
||||
/// Files that fail to parse are logged and skipped.
|
||||
pub fn load_hands(hands_dir: &Path) -> Result<Vec<Hand>> {
|
||||
if !hands_dir.is_dir() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut hands = Vec::new();
|
||||
let entries = std::fs::read_dir(hands_dir)
|
||||
.with_context(|| format!("failed to read hands directory: {}", hands_dir.display()))?;
|
||||
|
||||
for entry in entries {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
if path.extension().and_then(|e| e.to_str()) != Some("toml") {
|
||||
continue;
|
||||
}
|
||||
let content = std::fs::read_to_string(&path)
|
||||
.with_context(|| format!("failed to read hand file: {}", path.display()))?;
|
||||
match toml::from_str::<Hand>(&content) {
|
||||
Ok(hand) => hands.push(hand),
|
||||
Err(e) => {
|
||||
tracing::warn!(path = %path.display(), error = %e, "skipping malformed hand file");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(hands)
|
||||
}
|
||||
|
||||
/// Load the rolling context for a hand.
|
||||
///
|
||||
/// Reads from `{hands_dir}/{name}/context.json`. Returns a fresh
|
||||
/// [`HandContext`] if the file does not exist yet.
|
||||
pub fn load_hand_context(hands_dir: &Path, name: &str) -> Result<HandContext> {
|
||||
let path = hands_dir.join(name).join("context.json");
|
||||
if !path.exists() {
|
||||
return Ok(HandContext::new(name));
|
||||
}
|
||||
let content = std::fs::read_to_string(&path)
|
||||
.with_context(|| format!("failed to read hand context: {}", path.display()))?;
|
||||
let ctx: HandContext = serde_json::from_str(&content)
|
||||
.with_context(|| format!("failed to parse hand context: {}", path.display()))?;
|
||||
Ok(ctx)
|
||||
}
|
||||
|
||||
/// Persist the rolling context for a hand.
|
||||
///
|
||||
/// Writes to `{hands_dir}/{name}/context.json`, creating the
|
||||
/// directory if it does not exist.
|
||||
pub fn save_hand_context(hands_dir: &Path, context: &HandContext) -> Result<()> {
|
||||
let dir = hands_dir.join(&context.hand_name);
|
||||
std::fs::create_dir_all(&dir)
|
||||
.with_context(|| format!("failed to create hand context dir: {}", dir.display()))?;
|
||||
let path = dir.join("context.json");
|
||||
let json = serde_json::to_string_pretty(context)?;
|
||||
std::fs::write(&path, json)
|
||||
.with_context(|| format!("failed to write hand context: {}", path.display()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn write_hand_toml(dir: &Path, filename: &str, content: &str) {
|
||||
std::fs::write(dir.join(filename), content).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_hands_empty_dir() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let hands = load_hands(tmp.path()).unwrap();
|
||||
assert!(hands.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_hands_nonexistent_dir() {
|
||||
let hands = load_hands(Path::new("/nonexistent/path/hands")).unwrap();
|
||||
assert!(hands.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_hands_parses_valid_files() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
write_hand_toml(
|
||||
tmp.path(),
|
||||
"scanner.toml",
|
||||
r#"
|
||||
name = "scanner"
|
||||
description = "Market scanner"
|
||||
prompt = "Scan markets."
|
||||
|
||||
[schedule]
|
||||
kind = "cron"
|
||||
expr = "0 9 * * *"
|
||||
"#,
|
||||
);
|
||||
write_hand_toml(
|
||||
tmp.path(),
|
||||
"digest.toml",
|
||||
r#"
|
||||
name = "digest"
|
||||
description = "News digest"
|
||||
prompt = "Digest news."
|
||||
|
||||
[schedule]
|
||||
kind = "every"
|
||||
every_ms = 3600000
|
||||
"#,
|
||||
);
|
||||
|
||||
let hands = load_hands(tmp.path()).unwrap();
|
||||
assert_eq!(hands.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_hands_skips_malformed_files() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
write_hand_toml(tmp.path(), "bad.toml", "this is not valid toml struct");
|
||||
write_hand_toml(
|
||||
tmp.path(),
|
||||
"good.toml",
|
||||
r#"
|
||||
name = "good"
|
||||
description = "A good hand"
|
||||
prompt = "Do good things."
|
||||
|
||||
[schedule]
|
||||
kind = "every"
|
||||
every_ms = 60000
|
||||
"#,
|
||||
);
|
||||
|
||||
let hands = load_hands(tmp.path()).unwrap();
|
||||
assert_eq!(hands.len(), 1);
|
||||
assert_eq!(hands[0].name, "good");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_hands_ignores_non_toml_files() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
std::fs::write(tmp.path().join("readme.md"), "# Hands").unwrap();
|
||||
std::fs::write(tmp.path().join("notes.txt"), "some notes").unwrap();
|
||||
|
||||
let hands = load_hands(tmp.path()).unwrap();
|
||||
assert!(hands.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn context_roundtrip_through_filesystem() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let mut ctx = HandContext::new("test-hand");
|
||||
let run = HandRun {
|
||||
hand_name: "test-hand".into(),
|
||||
run_id: "run-001".into(),
|
||||
started_at: chrono::Utc::now(),
|
||||
finished_at: Some(chrono::Utc::now()),
|
||||
status: HandRunStatus::Completed,
|
||||
findings: vec!["found something".into()],
|
||||
knowledge_added: vec!["learned something".into()],
|
||||
duration_ms: Some(500),
|
||||
};
|
||||
ctx.record_run(run, 100);
|
||||
|
||||
save_hand_context(tmp.path(), &ctx).unwrap();
|
||||
let loaded = load_hand_context(tmp.path(), "test-hand").unwrap();
|
||||
|
||||
assert_eq!(loaded.hand_name, "test-hand");
|
||||
assert_eq!(loaded.total_runs, 1);
|
||||
assert_eq!(loaded.history.len(), 1);
|
||||
assert_eq!(loaded.learned_facts, vec!["learned something"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_context_returns_fresh_when_missing() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let ctx = load_hand_context(tmp.path(), "nonexistent").unwrap();
|
||||
assert_eq!(ctx.hand_name, "nonexistent");
|
||||
assert_eq!(ctx.total_runs, 0);
|
||||
assert!(ctx.history.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn save_context_creates_directory() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let ctx = HandContext::new("new-hand");
|
||||
save_hand_context(tmp.path(), &ctx).unwrap();
|
||||
|
||||
assert!(tmp.path().join("new-hand").join("context.json").exists());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn save_then_load_preserves_multiple_runs() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let mut ctx = HandContext::new("multi");
|
||||
|
||||
for i in 0..5 {
|
||||
let run = HandRun {
|
||||
hand_name: "multi".into(),
|
||||
run_id: format!("run-{i:03}"),
|
||||
started_at: chrono::Utc::now(),
|
||||
finished_at: Some(chrono::Utc::now()),
|
||||
status: HandRunStatus::Completed,
|
||||
findings: vec![format!("finding-{i}")],
|
||||
knowledge_added: vec![format!("fact-{i}")],
|
||||
duration_ms: Some(100),
|
||||
};
|
||||
ctx.record_run(run, 3);
|
||||
}
|
||||
|
||||
save_hand_context(tmp.path(), &ctx).unwrap();
|
||||
let loaded = load_hand_context(tmp.path(), "multi").unwrap();
|
||||
|
||||
assert_eq!(loaded.total_runs, 5);
|
||||
assert_eq!(loaded.history.len(), 3, "history capped at max_history=3");
|
||||
assert_eq!(loaded.learned_facts.len(), 5);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,345 @@
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::cron::Schedule;
|
||||
|
||||
// ── Hand ───────────────────────────────────────────────────────
|
||||
|
||||
/// A Hand is an autonomous agent package that runs on a schedule,
|
||||
/// accumulates knowledge over time, and reports results.
|
||||
///
|
||||
/// Hands are defined as TOML files in `~/.zeroclaw/hands/` and each
|
||||
/// maintains a rolling context of findings across runs so the agent
|
||||
/// grows smarter with every execution.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Hand {
|
||||
/// Unique name (also used as directory/file stem)
|
||||
pub name: String,
|
||||
/// Human-readable description of what this hand does
|
||||
pub description: String,
|
||||
/// The schedule this hand runs on (reuses cron schedule types)
|
||||
pub schedule: Schedule,
|
||||
/// System prompt / execution plan for this hand
|
||||
pub prompt: String,
|
||||
/// Domain knowledge lines to inject into context
|
||||
#[serde(default)]
|
||||
pub knowledge: Vec<String>,
|
||||
/// Tools this hand is allowed to use (None = all available)
|
||||
#[serde(default)]
|
||||
pub allowed_tools: Option<Vec<String>>,
|
||||
/// Model override for this hand (None = default provider)
|
||||
#[serde(default)]
|
||||
pub model: Option<String>,
|
||||
/// Whether this hand is currently active
|
||||
#[serde(default = "default_true")]
|
||||
pub active: bool,
|
||||
/// Maximum runs to keep in history
|
||||
#[serde(default = "default_max_runs")]
|
||||
pub max_history: usize,
|
||||
}
|
||||
|
||||
fn default_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_max_runs() -> usize {
|
||||
100
|
||||
}
|
||||
|
||||
// ── Hand Run ───────────────────────────────────────────────────
|
||||
|
||||
/// The status of a single hand execution.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "snake_case", tag = "status")]
|
||||
pub enum HandRunStatus {
|
||||
Running,
|
||||
Completed,
|
||||
Failed { error: String },
|
||||
}
|
||||
|
||||
/// Record of a single hand execution.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HandRun {
|
||||
/// Name of the hand that produced this run
|
||||
pub hand_name: String,
|
||||
/// Unique identifier for this run
|
||||
pub run_id: String,
|
||||
/// When the run started
|
||||
pub started_at: DateTime<Utc>,
|
||||
/// When the run finished (None if still running)
|
||||
pub finished_at: Option<DateTime<Utc>>,
|
||||
/// Outcome of the run
|
||||
pub status: HandRunStatus,
|
||||
/// Key findings/outputs extracted from this run
|
||||
#[serde(default)]
|
||||
pub findings: Vec<String>,
|
||||
/// New knowledge accumulated and stored to memory
|
||||
#[serde(default)]
|
||||
pub knowledge_added: Vec<String>,
|
||||
/// Wall-clock duration in milliseconds
|
||||
pub duration_ms: Option<u64>,
|
||||
}
|
||||
|
||||
// ── Hand Context ───────────────────────────────────────────────
|
||||
|
||||
/// Rolling context that accumulates across hand runs.
|
||||
///
|
||||
/// Persisted as `~/.zeroclaw/hands/{name}/context.json`.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HandContext {
|
||||
/// Name of the hand this context belongs to
|
||||
pub hand_name: String,
|
||||
/// Past runs, most-recent first, capped at `Hand::max_history`
|
||||
#[serde(default)]
|
||||
pub history: Vec<HandRun>,
|
||||
/// Persistent facts learned across runs
|
||||
#[serde(default)]
|
||||
pub learned_facts: Vec<String>,
|
||||
/// Timestamp of the last completed run
|
||||
pub last_run: Option<DateTime<Utc>>,
|
||||
/// Total number of successful runs
|
||||
#[serde(default)]
|
||||
pub total_runs: u64,
|
||||
}
|
||||
|
||||
impl HandContext {
|
||||
/// Create a fresh, empty context for a hand.
|
||||
pub fn new(hand_name: &str) -> Self {
|
||||
Self {
|
||||
hand_name: hand_name.to_string(),
|
||||
history: Vec::new(),
|
||||
learned_facts: Vec::new(),
|
||||
last_run: None,
|
||||
total_runs: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a completed run, updating counters and trimming history.
|
||||
pub fn record_run(&mut self, run: HandRun, max_history: usize) {
|
||||
if run.status == (HandRunStatus::Completed) {
|
||||
self.total_runs += 1;
|
||||
self.last_run = run.finished_at;
|
||||
}
|
||||
|
||||
// Merge new knowledge
|
||||
for fact in &run.knowledge_added {
|
||||
if !self.learned_facts.contains(fact) {
|
||||
self.learned_facts.push(fact.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Insert at the front (most-recent first)
|
||||
self.history.insert(0, run);
|
||||
|
||||
// Cap history length
|
||||
self.history.truncate(max_history);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::cron::Schedule;
|
||||
|
||||
fn sample_hand() -> Hand {
|
||||
Hand {
|
||||
name: "market-scanner".into(),
|
||||
description: "Scans market trends and reports findings".into(),
|
||||
schedule: Schedule::Cron {
|
||||
expr: "0 9 * * 1-5".into(),
|
||||
tz: Some("America/New_York".into()),
|
||||
},
|
||||
prompt: "Scan market trends and report key findings.".into(),
|
||||
knowledge: vec!["Focus on tech sector.".into()],
|
||||
allowed_tools: Some(vec!["web_search".into(), "memory".into()]),
|
||||
model: Some("claude-opus-4-6".into()),
|
||||
active: true,
|
||||
max_history: 50,
|
||||
}
|
||||
}
|
||||
|
||||
fn sample_run(name: &str, status: HandRunStatus) -> HandRun {
|
||||
let now = Utc::now();
|
||||
HandRun {
|
||||
hand_name: name.into(),
|
||||
run_id: uuid::Uuid::new_v4().to_string(),
|
||||
started_at: now,
|
||||
finished_at: Some(now),
|
||||
status,
|
||||
findings: vec!["finding-1".into()],
|
||||
knowledge_added: vec!["learned-fact-A".into()],
|
||||
duration_ms: Some(1234),
|
||||
}
|
||||
}
|
||||
|
||||
// ── Deserialization ────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn hand_deserializes_from_toml() {
|
||||
let toml_str = r#"
|
||||
name = "market-scanner"
|
||||
description = "Scans market trends"
|
||||
prompt = "Scan trends."
|
||||
|
||||
[schedule]
|
||||
kind = "cron"
|
||||
expr = "0 9 * * 1-5"
|
||||
tz = "America/New_York"
|
||||
"#;
|
||||
let hand: Hand = toml::from_str(toml_str).unwrap();
|
||||
assert_eq!(hand.name, "market-scanner");
|
||||
assert!(hand.active, "active should default to true");
|
||||
assert_eq!(hand.max_history, 100, "max_history should default to 100");
|
||||
assert!(hand.knowledge.is_empty());
|
||||
assert!(hand.allowed_tools.is_none());
|
||||
assert!(hand.model.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hand_deserializes_full_toml() {
|
||||
let toml_str = r#"
|
||||
name = "news-digest"
|
||||
description = "Daily news digest"
|
||||
prompt = "Summarize the day's news."
|
||||
knowledge = ["focus on AI", "include funding rounds"]
|
||||
allowed_tools = ["web_search"]
|
||||
model = "claude-opus-4-6"
|
||||
active = false
|
||||
max_history = 25
|
||||
|
||||
[schedule]
|
||||
kind = "every"
|
||||
every_ms = 3600000
|
||||
"#;
|
||||
let hand: Hand = toml::from_str(toml_str).unwrap();
|
||||
assert_eq!(hand.name, "news-digest");
|
||||
assert!(!hand.active);
|
||||
assert_eq!(hand.max_history, 25);
|
||||
assert_eq!(hand.knowledge.len(), 2);
|
||||
assert_eq!(hand.allowed_tools.as_ref().unwrap().len(), 1);
|
||||
assert_eq!(hand.model.as_deref(), Some("claude-opus-4-6"));
|
||||
assert!(matches!(
|
||||
hand.schedule,
|
||||
Schedule::Every {
|
||||
every_ms: 3_600_000
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hand_roundtrip_json() {
|
||||
let hand = sample_hand();
|
||||
let json = serde_json::to_string(&hand).unwrap();
|
||||
let parsed: Hand = serde_json::from_str(&json).unwrap();
|
||||
assert_eq!(parsed.name, hand.name);
|
||||
assert_eq!(parsed.max_history, hand.max_history);
|
||||
}
|
||||
|
||||
// ── HandRunStatus ──────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn hand_run_status_serde_roundtrip() {
|
||||
let statuses = vec![
|
||||
HandRunStatus::Running,
|
||||
HandRunStatus::Completed,
|
||||
HandRunStatus::Failed {
|
||||
error: "timeout".into(),
|
||||
},
|
||||
];
|
||||
for status in statuses {
|
||||
let json = serde_json::to_string(&status).unwrap();
|
||||
let parsed: HandRunStatus = serde_json::from_str(&json).unwrap();
|
||||
assert_eq!(parsed, status);
|
||||
}
|
||||
}
|
||||
|
||||
// ── HandContext ────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn context_new_is_empty() {
|
||||
let ctx = HandContext::new("test-hand");
|
||||
assert_eq!(ctx.hand_name, "test-hand");
|
||||
assert!(ctx.history.is_empty());
|
||||
assert!(ctx.learned_facts.is_empty());
|
||||
assert!(ctx.last_run.is_none());
|
||||
assert_eq!(ctx.total_runs, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn context_record_run_increments_counters() {
|
||||
let mut ctx = HandContext::new("scanner");
|
||||
let run = sample_run("scanner", HandRunStatus::Completed);
|
||||
ctx.record_run(run, 100);
|
||||
|
||||
assert_eq!(ctx.total_runs, 1);
|
||||
assert!(ctx.last_run.is_some());
|
||||
assert_eq!(ctx.history.len(), 1);
|
||||
assert_eq!(ctx.learned_facts, vec!["learned-fact-A"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn context_record_failed_run_does_not_increment_total() {
|
||||
let mut ctx = HandContext::new("scanner");
|
||||
let run = sample_run(
|
||||
"scanner",
|
||||
HandRunStatus::Failed {
|
||||
error: "boom".into(),
|
||||
},
|
||||
);
|
||||
ctx.record_run(run, 100);
|
||||
|
||||
assert_eq!(ctx.total_runs, 0);
|
||||
assert!(ctx.last_run.is_none());
|
||||
assert_eq!(ctx.history.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn context_caps_history_at_max() {
|
||||
let mut ctx = HandContext::new("scanner");
|
||||
for _ in 0..10 {
|
||||
let run = sample_run("scanner", HandRunStatus::Completed);
|
||||
ctx.record_run(run, 3);
|
||||
}
|
||||
assert_eq!(ctx.history.len(), 3);
|
||||
assert_eq!(ctx.total_runs, 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn context_deduplicates_learned_facts() {
|
||||
let mut ctx = HandContext::new("scanner");
|
||||
let run1 = sample_run("scanner", HandRunStatus::Completed);
|
||||
let run2 = sample_run("scanner", HandRunStatus::Completed);
|
||||
ctx.record_run(run1, 100);
|
||||
ctx.record_run(run2, 100);
|
||||
|
||||
// Both runs add "learned-fact-A" but it should appear only once
|
||||
assert_eq!(ctx.learned_facts.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn context_json_roundtrip() {
|
||||
let mut ctx = HandContext::new("scanner");
|
||||
let run = sample_run("scanner", HandRunStatus::Completed);
|
||||
ctx.record_run(run, 100);
|
||||
|
||||
let json = serde_json::to_string_pretty(&ctx).unwrap();
|
||||
let parsed: HandContext = serde_json::from_str(&json).unwrap();
|
||||
|
||||
assert_eq!(parsed.hand_name, "scanner");
|
||||
assert_eq!(parsed.total_runs, 1);
|
||||
assert_eq!(parsed.history.len(), 1);
|
||||
assert_eq!(parsed.learned_facts, vec!["learned-fact-A"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn most_recent_run_is_first_in_history() {
|
||||
let mut ctx = HandContext::new("scanner");
|
||||
for i in 0..3 {
|
||||
let mut run = sample_run("scanner", HandRunStatus::Completed);
|
||||
run.findings = vec![format!("finding-{i}")];
|
||||
ctx.record_run(run, 100);
|
||||
}
|
||||
assert_eq!(ctx.history[0].findings[0], "finding-2");
|
||||
assert_eq!(ctx.history[2].findings[0], "finding-0");
|
||||
}
|
||||
}
|
||||
@@ -48,6 +48,7 @@ pub(crate) mod cron;
|
||||
pub(crate) mod daemon;
|
||||
pub(crate) mod doctor;
|
||||
pub mod gateway;
|
||||
pub mod hands;
|
||||
pub(crate) mod hardware;
|
||||
pub(crate) mod health;
|
||||
pub(crate) mod heartbeat;
|
||||
|
||||
+27
@@ -166,6 +166,10 @@ enum Commands {
|
||||
#[arg(long)]
|
||||
reinit: bool,
|
||||
|
||||
/// Run the full interactive setup wizard
|
||||
#[arg(long)]
|
||||
interactive: bool,
|
||||
|
||||
/// Reconfigure channels only (fast repair flow)
|
||||
#[arg(long)]
|
||||
channels_only: bool,
|
||||
@@ -726,6 +730,7 @@ async fn main() -> Result<()> {
|
||||
if let Commands::Onboard {
|
||||
force,
|
||||
reinit,
|
||||
interactive,
|
||||
channels_only,
|
||||
api_key,
|
||||
provider,
|
||||
@@ -735,6 +740,7 @@ async fn main() -> Result<()> {
|
||||
{
|
||||
let force = *force;
|
||||
let reinit = *reinit;
|
||||
let interactive = *interactive;
|
||||
let channels_only = *channels_only;
|
||||
let api_key = api_key.clone();
|
||||
let provider = provider.clone();
|
||||
@@ -744,6 +750,14 @@ async fn main() -> Result<()> {
|
||||
if reinit && channels_only {
|
||||
bail!("--reinit and --channels-only cannot be used together");
|
||||
}
|
||||
if interactive && channels_only {
|
||||
bail!("--interactive and --channels-only cannot be used together");
|
||||
}
|
||||
if interactive
|
||||
&& (api_key.is_some() || provider.is_some() || model.is_some() || memory.is_some())
|
||||
{
|
||||
bail!("--interactive does not accept --api-key, --provider, --model, or --memory");
|
||||
}
|
||||
if channels_only
|
||||
&& (api_key.is_some() || provider.is_some() || model.is_some() || memory.is_some())
|
||||
{
|
||||
@@ -796,6 +810,8 @@ async fn main() -> Result<()> {
|
||||
|
||||
let config = if channels_only {
|
||||
Box::pin(onboard::run_channels_repair_wizard()).await
|
||||
} else if interactive {
|
||||
Box::pin(onboard::run_wizard(force)).await
|
||||
} else {
|
||||
onboard::run_quick_setup(
|
||||
api_key.as_deref(),
|
||||
@@ -2207,6 +2223,17 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn onboard_cli_accepts_interactive_flag() {
|
||||
let cli = Cli::try_parse_from(["zeroclaw", "onboard", "--interactive"])
|
||||
.expect("onboard --interactive should parse");
|
||||
|
||||
match cli.command {
|
||||
Commands::Onboard { interactive, .. } => assert!(interactive),
|
||||
other => panic!("expected onboard command, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cli_parses_estop_default_engage() {
|
||||
let cli = Cli::try_parse_from(["zeroclaw", "estop"]).expect("estop command should parse");
|
||||
|
||||
+2
-1
@@ -4,7 +4,7 @@ pub mod wizard;
|
||||
#[allow(unused_imports)]
|
||||
pub use wizard::{
|
||||
run_channels_repair_wizard, run_models_list, run_models_refresh, run_models_refresh_all,
|
||||
run_models_set, run_models_status, run_quick_setup,
|
||||
run_models_set, run_models_status, run_quick_setup, run_wizard,
|
||||
};
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -17,6 +17,7 @@ mod tests {
|
||||
fn wizard_functions_are_reexported() {
|
||||
assert_reexport_exists(run_channels_repair_wizard);
|
||||
assert_reexport_exists(run_quick_setup);
|
||||
assert_reexport_exists(run_wizard);
|
||||
assert_reexport_exists(run_models_refresh);
|
||||
assert_reexport_exists(run_models_list);
|
||||
assert_reexport_exists(run_models_set);
|
||||
|
||||
@@ -170,6 +170,7 @@ pub async fn run_wizard(force: bool) -> Result<Config> {
|
||||
cost: crate::config::CostConfig::default(),
|
||||
peripherals: crate::config::PeripheralsConfig::default(),
|
||||
agents: std::collections::HashMap::new(),
|
||||
swarms: std::collections::HashMap::new(),
|
||||
hooks: crate::config::HooksConfig::default(),
|
||||
hardware: hardware_config,
|
||||
query_classification: crate::config::QueryClassificationConfig::default(),
|
||||
@@ -527,6 +528,7 @@ async fn run_quick_setup_with_home(
|
||||
cost: crate::config::CostConfig::default(),
|
||||
peripherals: crate::config::PeripheralsConfig::default(),
|
||||
agents: std::collections::HashMap::new(),
|
||||
swarms: std::collections::HashMap::new(),
|
||||
hooks: crate::config::HooksConfig::default(),
|
||||
hardware: crate::config::HardwareConfig::default(),
|
||||
query_classification: crate::config::QueryClassificationConfig::default(),
|
||||
@@ -4147,6 +4149,23 @@ fn setup_channels() -> Result<ChannelsConfig> {
|
||||
.interact()?;
|
||||
|
||||
if mode_idx == 0 {
|
||||
// Compile-time check: warn early if the feature is not enabled.
|
||||
#[cfg(not(feature = "whatsapp-web"))]
|
||||
{
|
||||
println!();
|
||||
println!(
|
||||
" {} {}",
|
||||
style("⚠").yellow().bold(),
|
||||
style("The 'whatsapp-web' feature is not compiled in. WhatsApp Web will not work at runtime.").yellow()
|
||||
);
|
||||
println!(
|
||||
" {} Rebuild with: {}",
|
||||
style("→").dim(),
|
||||
style("cargo build --features whatsapp-web").white().bold()
|
||||
);
|
||||
println!();
|
||||
}
|
||||
|
||||
println!(" {}", style("Mode: WhatsApp Web").dim());
|
||||
print_bullet("1. Build with --features whatsapp-web");
|
||||
print_bullet(
|
||||
|
||||
@@ -500,19 +500,23 @@ struct ToolCall {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
id: Option<String>,
|
||||
#[serde(rename = "type")]
|
||||
#[serde(default)]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
kind: Option<String>,
|
||||
#[serde(default)]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
function: Option<Function>,
|
||||
|
||||
// Compatibility: Some providers (e.g., older GLM) may use 'name' directly
|
||||
#[serde(default)]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
name: Option<String>,
|
||||
#[serde(default)]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
arguments: Option<String>,
|
||||
|
||||
// Compatibility: DeepSeek sometimes wraps arguments differently
|
||||
#[serde(rename = "parameters", default)]
|
||||
#[serde(
|
||||
rename = "parameters",
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
parameters: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
@@ -3094,4 +3098,50 @@ mod tests {
|
||||
// Should not panic
|
||||
let _client = p.http_client();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tool_call_none_fields_omitted_from_json() {
|
||||
// Ensures providers like Mistral that reject extra fields (e.g. "name": null)
|
||||
// don't receive them when the ToolCall compat fields are None.
|
||||
let tc = ToolCall {
|
||||
id: Some("call_1".to_string()),
|
||||
kind: Some("function".to_string()),
|
||||
function: Some(Function {
|
||||
name: Some("shell".to_string()),
|
||||
arguments: Some("{\"command\":\"ls\"}".to_string()),
|
||||
}),
|
||||
name: None,
|
||||
arguments: None,
|
||||
parameters: None,
|
||||
};
|
||||
let json = serde_json::to_value(&tc).unwrap();
|
||||
assert!(!json.as_object().unwrap().contains_key("name"));
|
||||
assert!(!json.as_object().unwrap().contains_key("arguments"));
|
||||
assert!(!json.as_object().unwrap().contains_key("parameters"));
|
||||
// Standard fields must be present
|
||||
assert!(json.as_object().unwrap().contains_key("id"));
|
||||
assert!(json.as_object().unwrap().contains_key("type"));
|
||||
assert!(json.as_object().unwrap().contains_key("function"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tool_call_with_compat_fields_serializes_them() {
|
||||
// When compat fields are Some, they should appear in the output.
|
||||
let tc = ToolCall {
|
||||
id: None,
|
||||
kind: None,
|
||||
function: None,
|
||||
name: Some("shell".to_string()),
|
||||
arguments: Some("{\"command\":\"ls\"}".to_string()),
|
||||
parameters: None,
|
||||
};
|
||||
let json = serde_json::to_value(&tc).unwrap();
|
||||
assert_eq!(json["name"], "shell");
|
||||
assert_eq!(json["arguments"], "{\"command\":\"ls\"}");
|
||||
// None fields should be omitted
|
||||
assert!(!json.as_object().unwrap().contains_key("id"));
|
||||
assert!(!json.as_object().unwrap().contains_key("type"));
|
||||
assert!(!json.as_object().unwrap().contains_key("function"));
|
||||
assert!(!json.as_object().unwrap().contains_key("parameters"));
|
||||
}
|
||||
}
|
||||
|
||||
+37
-18
@@ -57,6 +57,7 @@ pub mod schedule;
|
||||
pub mod schema;
|
||||
pub mod screenshot;
|
||||
pub mod shell;
|
||||
pub mod swarm;
|
||||
pub mod tool_search;
|
||||
pub mod traits;
|
||||
pub mod web_fetch;
|
||||
@@ -103,6 +104,7 @@ pub use schedule::ScheduleTool;
|
||||
pub use schema::{CleaningStrategy, SchemaCleanr};
|
||||
pub use screenshot::ScreenshotTool;
|
||||
pub use shell::ShellTool;
|
||||
pub use swarm::SwarmTool;
|
||||
pub use tool_search::ToolSearchTool;
|
||||
pub use traits::Tool;
|
||||
#[allow(unused_imports)]
|
||||
@@ -358,6 +360,24 @@ pub fn all_tools_with_runtime(
|
||||
}
|
||||
|
||||
// Add delegation tool when agents are configured
|
||||
let delegate_fallback_credential = fallback_api_key.and_then(|value| {
|
||||
let trimmed_value = value.trim();
|
||||
(!trimmed_value.is_empty()).then(|| trimmed_value.to_owned())
|
||||
});
|
||||
let provider_runtime_options = crate::providers::ProviderRuntimeOptions {
|
||||
auth_profile_override: None,
|
||||
provider_api_url: root_config.api_url.clone(),
|
||||
zeroclaw_dir: root_config
|
||||
.config_path
|
||||
.parent()
|
||||
.map(std::path::PathBuf::from),
|
||||
secrets_encrypt: root_config.secrets.encrypt,
|
||||
reasoning_enabled: root_config.runtime.reasoning_enabled,
|
||||
provider_timeout_secs: Some(root_config.provider_timeout_secs),
|
||||
extra_headers: root_config.extra_headers.clone(),
|
||||
api_path: root_config.api_path.clone(),
|
||||
};
|
||||
|
||||
let delegate_handle: Option<DelegateParentToolsHandle> = if agents.is_empty() {
|
||||
None
|
||||
} else {
|
||||
@@ -365,28 +385,12 @@ pub fn all_tools_with_runtime(
|
||||
.iter()
|
||||
.map(|(name, cfg)| (name.clone(), cfg.clone()))
|
||||
.collect();
|
||||
let delegate_fallback_credential = fallback_api_key.and_then(|value| {
|
||||
let trimmed_value = value.trim();
|
||||
(!trimmed_value.is_empty()).then(|| trimmed_value.to_owned())
|
||||
});
|
||||
let parent_tools = Arc::new(RwLock::new(tool_arcs.clone()));
|
||||
let delegate_tool = DelegateTool::new_with_options(
|
||||
delegate_agents,
|
||||
delegate_fallback_credential,
|
||||
delegate_fallback_credential.clone(),
|
||||
security.clone(),
|
||||
crate::providers::ProviderRuntimeOptions {
|
||||
auth_profile_override: None,
|
||||
provider_api_url: root_config.api_url.clone(),
|
||||
zeroclaw_dir: root_config
|
||||
.config_path
|
||||
.parent()
|
||||
.map(std::path::PathBuf::from),
|
||||
secrets_encrypt: root_config.secrets.encrypt,
|
||||
reasoning_enabled: root_config.runtime.reasoning_enabled,
|
||||
provider_timeout_secs: Some(root_config.provider_timeout_secs),
|
||||
extra_headers: root_config.extra_headers.clone(),
|
||||
api_path: root_config.api_path.clone(),
|
||||
},
|
||||
provider_runtime_options.clone(),
|
||||
)
|
||||
.with_parent_tools(Arc::clone(&parent_tools))
|
||||
.with_multimodal_config(root_config.multimodal.clone());
|
||||
@@ -394,6 +398,21 @@ pub fn all_tools_with_runtime(
|
||||
Some(parent_tools)
|
||||
};
|
||||
|
||||
// Add swarm tool when swarms are configured
|
||||
if !root_config.swarms.is_empty() {
|
||||
let swarm_agents: HashMap<String, DelegateAgentConfig> = agents
|
||||
.iter()
|
||||
.map(|(name, cfg)| (name.clone(), cfg.clone()))
|
||||
.collect();
|
||||
tool_arcs.push(Arc::new(SwarmTool::new(
|
||||
root_config.swarms.clone(),
|
||||
swarm_agents,
|
||||
delegate_fallback_credential,
|
||||
security.clone(),
|
||||
provider_runtime_options,
|
||||
)));
|
||||
}
|
||||
|
||||
(boxed_registry_from_arcs(tool_arcs), delegate_handle)
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,953 @@
|
||||
use super::traits::{Tool, ToolResult};
|
||||
use crate::config::{DelegateAgentConfig, SwarmConfig, SwarmStrategy};
|
||||
use crate::providers::{self, Provider};
|
||||
use crate::security::policy::ToolOperation;
|
||||
use crate::security::SecurityPolicy;
|
||||
use async_trait::async_trait;
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
/// Default timeout for individual agent calls within a swarm.
|
||||
const SWARM_AGENT_TIMEOUT_SECS: u64 = 120;
|
||||
|
||||
/// Tool that orchestrates multiple agents as a swarm. Supports sequential
|
||||
/// (pipeline), parallel (fan-out/fan-in), and router (LLM-selected) strategies.
|
||||
pub struct SwarmTool {
|
||||
swarms: Arc<HashMap<String, SwarmConfig>>,
|
||||
agents: Arc<HashMap<String, DelegateAgentConfig>>,
|
||||
security: Arc<SecurityPolicy>,
|
||||
fallback_credential: Option<String>,
|
||||
provider_runtime_options: providers::ProviderRuntimeOptions,
|
||||
}
|
||||
|
||||
impl SwarmTool {
|
||||
pub fn new(
|
||||
swarms: HashMap<String, SwarmConfig>,
|
||||
agents: HashMap<String, DelegateAgentConfig>,
|
||||
fallback_credential: Option<String>,
|
||||
security: Arc<SecurityPolicy>,
|
||||
provider_runtime_options: providers::ProviderRuntimeOptions,
|
||||
) -> Self {
|
||||
Self {
|
||||
swarms: Arc::new(swarms),
|
||||
agents: Arc::new(agents),
|
||||
security,
|
||||
fallback_credential,
|
||||
provider_runtime_options,
|
||||
}
|
||||
}
|
||||
|
||||
fn create_provider_for_agent(
|
||||
&self,
|
||||
agent_config: &DelegateAgentConfig,
|
||||
agent_name: &str,
|
||||
) -> Result<Box<dyn Provider>, ToolResult> {
|
||||
let credential = agent_config
|
||||
.api_key
|
||||
.clone()
|
||||
.or_else(|| self.fallback_credential.clone());
|
||||
|
||||
providers::create_provider_with_options(
|
||||
&agent_config.provider,
|
||||
credential.as_deref(),
|
||||
&self.provider_runtime_options,
|
||||
)
|
||||
.map_err(|e| ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some(format!(
|
||||
"Failed to create provider '{}' for agent '{agent_name}': {e}",
|
||||
agent_config.provider
|
||||
)),
|
||||
})
|
||||
}
|
||||
|
||||
async fn call_agent(
|
||||
&self,
|
||||
agent_name: &str,
|
||||
agent_config: &DelegateAgentConfig,
|
||||
prompt: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<String, String> {
|
||||
let provider = self
|
||||
.create_provider_for_agent(agent_config, agent_name)
|
||||
.map_err(|r| r.error.unwrap_or_default())?;
|
||||
|
||||
let temperature = agent_config.temperature.unwrap_or(0.7);
|
||||
|
||||
let result = tokio::time::timeout(
|
||||
Duration::from_secs(timeout_secs),
|
||||
provider.chat_with_system(
|
||||
agent_config.system_prompt.as_deref(),
|
||||
prompt,
|
||||
&agent_config.model,
|
||||
temperature,
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(Ok(response)) => {
|
||||
if response.trim().is_empty() {
|
||||
Ok("[Empty response]".to_string())
|
||||
} else {
|
||||
Ok(response)
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => Err(format!("Agent '{agent_name}' failed: {e}")),
|
||||
Err(_) => Err(format!(
|
||||
"Agent '{agent_name}' timed out after {timeout_secs}s"
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_sequential(
|
||||
&self,
|
||||
swarm_config: &SwarmConfig,
|
||||
prompt: &str,
|
||||
context: &str,
|
||||
) -> anyhow::Result<ToolResult> {
|
||||
let mut current_input = if context.is_empty() {
|
||||
prompt.to_string()
|
||||
} else {
|
||||
format!("[Context]\n{context}\n\n[Task]\n{prompt}")
|
||||
};
|
||||
|
||||
let per_agent_timeout = swarm_config.timeout_secs / swarm_config.agents.len().max(1) as u64;
|
||||
let mut results = Vec::new();
|
||||
|
||||
for (i, agent_name) in swarm_config.agents.iter().enumerate() {
|
||||
let agent_config = match self.agents.get(agent_name) {
|
||||
Some(cfg) => cfg,
|
||||
None => {
|
||||
return Ok(ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some(format!("Swarm references unknown agent '{agent_name}'")),
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let agent_prompt = if i == 0 {
|
||||
current_input.clone()
|
||||
} else {
|
||||
format!("[Previous agent output]\n{current_input}\n\n[Original task]\n{prompt}")
|
||||
};
|
||||
|
||||
match self
|
||||
.call_agent(agent_name, agent_config, &agent_prompt, per_agent_timeout)
|
||||
.await
|
||||
{
|
||||
Ok(output) => {
|
||||
results.push(format!(
|
||||
"[{agent_name} ({}/{})] {output}",
|
||||
agent_config.provider, agent_config.model
|
||||
));
|
||||
current_input = output;
|
||||
}
|
||||
Err(e) => {
|
||||
return Ok(ToolResult {
|
||||
success: false,
|
||||
output: results.join("\n\n"),
|
||||
error: Some(e),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ToolResult {
|
||||
success: true,
|
||||
output: format!(
|
||||
"[Swarm sequential — {} agents]\n\n{}",
|
||||
swarm_config.agents.len(),
|
||||
results.join("\n\n")
|
||||
),
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn execute_parallel(
|
||||
&self,
|
||||
swarm_config: &SwarmConfig,
|
||||
prompt: &str,
|
||||
context: &str,
|
||||
) -> anyhow::Result<ToolResult> {
|
||||
let full_prompt = if context.is_empty() {
|
||||
prompt.to_string()
|
||||
} else {
|
||||
format!("[Context]\n{context}\n\n[Task]\n{prompt}")
|
||||
};
|
||||
|
||||
let mut join_set = tokio::task::JoinSet::new();
|
||||
|
||||
for agent_name in &swarm_config.agents {
|
||||
let agent_config = match self.agents.get(agent_name) {
|
||||
Some(cfg) => cfg.clone(),
|
||||
None => {
|
||||
return Ok(ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some(format!("Swarm references unknown agent '{agent_name}'")),
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let credential = agent_config
|
||||
.api_key
|
||||
.clone()
|
||||
.or_else(|| self.fallback_credential.clone());
|
||||
|
||||
let provider = match providers::create_provider_with_options(
|
||||
&agent_config.provider,
|
||||
credential.as_deref(),
|
||||
&self.provider_runtime_options,
|
||||
) {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
return Ok(ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some(format!(
|
||||
"Failed to create provider for agent '{agent_name}': {e}"
|
||||
)),
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let name = agent_name.clone();
|
||||
let prompt_clone = full_prompt.clone();
|
||||
let timeout = swarm_config.timeout_secs;
|
||||
let model = agent_config.model.clone();
|
||||
let temperature = agent_config.temperature.unwrap_or(0.7);
|
||||
let system_prompt = agent_config.system_prompt.clone();
|
||||
let provider_name = agent_config.provider.clone();
|
||||
|
||||
join_set.spawn(async move {
|
||||
let result = tokio::time::timeout(
|
||||
Duration::from_secs(timeout),
|
||||
provider.chat_with_system(
|
||||
system_prompt.as_deref(),
|
||||
&prompt_clone,
|
||||
&model,
|
||||
temperature,
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
||||
let output = match result {
|
||||
Ok(Ok(text)) => {
|
||||
if text.trim().is_empty() {
|
||||
"[Empty response]".to_string()
|
||||
} else {
|
||||
text
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => format!("[Error] {e}"),
|
||||
Err(_) => format!("[Timed out after {timeout}s]"),
|
||||
};
|
||||
|
||||
(name, provider_name, model, output)
|
||||
});
|
||||
}
|
||||
|
||||
let mut results = Vec::new();
|
||||
while let Some(join_result) = join_set.join_next().await {
|
||||
match join_result {
|
||||
Ok((name, provider_name, model, output)) => {
|
||||
results.push(format!("[{name} ({provider_name}/{model})]\n{output}"));
|
||||
}
|
||||
Err(e) => {
|
||||
results.push(format!("[join error] {e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ToolResult {
|
||||
success: true,
|
||||
output: format!(
|
||||
"[Swarm parallel — {} agents]\n\n{}",
|
||||
swarm_config.agents.len(),
|
||||
results.join("\n\n---\n\n")
|
||||
),
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn execute_router(
|
||||
&self,
|
||||
swarm_config: &SwarmConfig,
|
||||
prompt: &str,
|
||||
context: &str,
|
||||
) -> anyhow::Result<ToolResult> {
|
||||
if swarm_config.agents.is_empty() {
|
||||
return Ok(ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some("Router swarm has no agents to choose from".into()),
|
||||
});
|
||||
}
|
||||
|
||||
// Build agent descriptions for the router prompt
|
||||
let agent_descriptions: Vec<String> = swarm_config
|
||||
.agents
|
||||
.iter()
|
||||
.filter_map(|name| {
|
||||
self.agents.get(name).map(|cfg| {
|
||||
let desc = cfg
|
||||
.system_prompt
|
||||
.as_deref()
|
||||
.unwrap_or("General purpose agent");
|
||||
format!(
|
||||
"- {name}: {desc} (provider: {}, model: {})",
|
||||
cfg.provider, cfg.model
|
||||
)
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Use the first agent's provider for routing
|
||||
let first_agent_name = &swarm_config.agents[0];
|
||||
let first_agent_config = match self.agents.get(first_agent_name) {
|
||||
Some(cfg) => cfg,
|
||||
None => {
|
||||
return Ok(ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some(format!(
|
||||
"Swarm references unknown agent '{first_agent_name}'"
|
||||
)),
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let router_provider = self
|
||||
.create_provider_for_agent(first_agent_config, first_agent_name)
|
||||
.map_err(|r| anyhow::anyhow!(r.error.unwrap_or_default()))?;
|
||||
|
||||
let base_router_prompt = swarm_config
|
||||
.router_prompt
|
||||
.as_deref()
|
||||
.unwrap_or("Pick the single best agent for this task.");
|
||||
|
||||
let routing_prompt = format!(
|
||||
"{base_router_prompt}\n\nAvailable agents:\n{}\n\nUser task: {prompt}\n\n\
|
||||
Respond with ONLY the agent name, nothing else.",
|
||||
agent_descriptions.join("\n")
|
||||
);
|
||||
|
||||
let chosen = tokio::time::timeout(
|
||||
Duration::from_secs(SWARM_AGENT_TIMEOUT_SECS),
|
||||
router_provider.chat_with_system(
|
||||
Some("You are a routing assistant. Respond with only the agent name."),
|
||||
&routing_prompt,
|
||||
&first_agent_config.model,
|
||||
0.0,
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
||||
let chosen_name = match chosen {
|
||||
Ok(Ok(name)) => name.trim().to_string(),
|
||||
Ok(Err(e)) => {
|
||||
return Ok(ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some(format!("Router LLM call failed: {e}")),
|
||||
});
|
||||
}
|
||||
Err(_) => {
|
||||
return Ok(ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some("Router LLM call timed out".into()),
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// Case-insensitive matching with fallback to first agent
|
||||
let matched_name = swarm_config
|
||||
.agents
|
||||
.iter()
|
||||
.find(|name| name.eq_ignore_ascii_case(&chosen_name))
|
||||
.cloned()
|
||||
.unwrap_or_else(|| swarm_config.agents[0].clone());
|
||||
|
||||
let agent_config = match self.agents.get(&matched_name) {
|
||||
Some(cfg) => cfg,
|
||||
None => {
|
||||
return Ok(ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some(format!("Router selected unknown agent '{matched_name}'")),
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let full_prompt = if context.is_empty() {
|
||||
prompt.to_string()
|
||||
} else {
|
||||
format!("[Context]\n{context}\n\n[Task]\n{prompt}")
|
||||
};
|
||||
|
||||
match self
|
||||
.call_agent(
|
||||
&matched_name,
|
||||
agent_config,
|
||||
&full_prompt,
|
||||
swarm_config.timeout_secs,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(output) => Ok(ToolResult {
|
||||
success: true,
|
||||
output: format!(
|
||||
"[Swarm router — selected '{matched_name}' ({}/{})]\n{output}",
|
||||
agent_config.provider, agent_config.model
|
||||
),
|
||||
error: None,
|
||||
}),
|
||||
Err(e) => Ok(ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some(e),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for SwarmTool {
|
||||
fn name(&self) -> &str {
|
||||
"swarm"
|
||||
}
|
||||
|
||||
fn description(&self) -> &str {
|
||||
"Orchestrate a swarm of agents to collaboratively handle a task. Supports sequential \
|
||||
(pipeline), parallel (fan-out/fan-in), and router (LLM-selected) strategies."
|
||||
}
|
||||
|
||||
fn parameters_schema(&self) -> serde_json::Value {
|
||||
let swarm_names: Vec<&str> = self.swarms.keys().map(String::as_str).collect();
|
||||
json!({
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"swarm": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"description": format!(
|
||||
"Name of the swarm to invoke. Available: {}",
|
||||
if swarm_names.is_empty() {
|
||||
"(none configured)".to_string()
|
||||
} else {
|
||||
swarm_names.join(", ")
|
||||
}
|
||||
)
|
||||
},
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"description": "The task/prompt to send to the swarm"
|
||||
},
|
||||
"context": {
|
||||
"type": "string",
|
||||
"description": "Optional context to include (e.g. relevant code, prior findings)"
|
||||
}
|
||||
},
|
||||
"required": ["swarm", "prompt"]
|
||||
})
|
||||
}
|
||||
|
||||
async fn execute(&self, args: serde_json::Value) -> anyhow::Result<ToolResult> {
|
||||
let swarm_name = args
|
||||
.get("swarm")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(str::trim)
|
||||
.ok_or_else(|| anyhow::anyhow!("Missing 'swarm' parameter"))?;
|
||||
|
||||
if swarm_name.is_empty() {
|
||||
return Ok(ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some("'swarm' parameter must not be empty".into()),
|
||||
});
|
||||
}
|
||||
|
||||
let prompt = args
|
||||
.get("prompt")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(str::trim)
|
||||
.ok_or_else(|| anyhow::anyhow!("Missing 'prompt' parameter"))?;
|
||||
|
||||
if prompt.is_empty() {
|
||||
return Ok(ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some("'prompt' parameter must not be empty".into()),
|
||||
});
|
||||
}
|
||||
|
||||
let context = args
|
||||
.get("context")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(str::trim)
|
||||
.unwrap_or("");
|
||||
|
||||
let swarm_config = match self.swarms.get(swarm_name) {
|
||||
Some(cfg) => cfg,
|
||||
None => {
|
||||
let available: Vec<&str> = self.swarms.keys().map(String::as_str).collect();
|
||||
return Ok(ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some(format!(
|
||||
"Unknown swarm '{swarm_name}'. Available swarms: {}",
|
||||
if available.is_empty() {
|
||||
"(none configured)".to_string()
|
||||
} else {
|
||||
available.join(", ")
|
||||
}
|
||||
)),
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
if swarm_config.agents.is_empty() {
|
||||
return Ok(ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some(format!("Swarm '{swarm_name}' has no agents configured")),
|
||||
});
|
||||
}
|
||||
|
||||
if let Err(error) = self
|
||||
.security
|
||||
.enforce_tool_operation(ToolOperation::Act, "swarm")
|
||||
{
|
||||
return Ok(ToolResult {
|
||||
success: false,
|
||||
output: String::new(),
|
||||
error: Some(error),
|
||||
});
|
||||
}
|
||||
|
||||
match swarm_config.strategy {
|
||||
SwarmStrategy::Sequential => {
|
||||
self.execute_sequential(swarm_config, prompt, context).await
|
||||
}
|
||||
SwarmStrategy::Parallel => self.execute_parallel(swarm_config, prompt, context).await,
|
||||
SwarmStrategy::Router => self.execute_router(swarm_config, prompt, context).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::security::{AutonomyLevel, SecurityPolicy};
|
||||
|
||||
fn test_security() -> Arc<SecurityPolicy> {
|
||||
Arc::new(SecurityPolicy::default())
|
||||
}
|
||||
|
||||
fn sample_agents() -> HashMap<String, DelegateAgentConfig> {
|
||||
let mut agents = HashMap::new();
|
||||
agents.insert(
|
||||
"researcher".to_string(),
|
||||
DelegateAgentConfig {
|
||||
provider: "ollama".to_string(),
|
||||
model: "llama3".to_string(),
|
||||
system_prompt: Some("You are a research assistant.".to_string()),
|
||||
api_key: None,
|
||||
temperature: Some(0.3),
|
||||
max_depth: 3,
|
||||
agentic: false,
|
||||
allowed_tools: Vec::new(),
|
||||
max_iterations: 10,
|
||||
},
|
||||
);
|
||||
agents.insert(
|
||||
"writer".to_string(),
|
||||
DelegateAgentConfig {
|
||||
provider: "openrouter".to_string(),
|
||||
model: "anthropic/claude-sonnet-4-20250514".to_string(),
|
||||
system_prompt: Some("You are a technical writer.".to_string()),
|
||||
api_key: Some("test-key".to_string()),
|
||||
temperature: Some(0.5),
|
||||
max_depth: 3,
|
||||
agentic: false,
|
||||
allowed_tools: Vec::new(),
|
||||
max_iterations: 10,
|
||||
},
|
||||
);
|
||||
agents
|
||||
}
|
||||
|
||||
fn sample_swarms() -> HashMap<String, SwarmConfig> {
|
||||
let mut swarms = HashMap::new();
|
||||
swarms.insert(
|
||||
"pipeline".to_string(),
|
||||
SwarmConfig {
|
||||
agents: vec!["researcher".to_string(), "writer".to_string()],
|
||||
strategy: SwarmStrategy::Sequential,
|
||||
router_prompt: None,
|
||||
description: Some("Research then write".to_string()),
|
||||
timeout_secs: 300,
|
||||
},
|
||||
);
|
||||
swarms.insert(
|
||||
"fanout".to_string(),
|
||||
SwarmConfig {
|
||||
agents: vec!["researcher".to_string(), "writer".to_string()],
|
||||
strategy: SwarmStrategy::Parallel,
|
||||
router_prompt: None,
|
||||
description: None,
|
||||
timeout_secs: 300,
|
||||
},
|
||||
);
|
||||
swarms.insert(
|
||||
"router".to_string(),
|
||||
SwarmConfig {
|
||||
agents: vec!["researcher".to_string(), "writer".to_string()],
|
||||
strategy: SwarmStrategy::Router,
|
||||
router_prompt: Some("Pick the best agent.".to_string()),
|
||||
description: None,
|
||||
timeout_secs: 300,
|
||||
},
|
||||
);
|
||||
swarms
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn name_and_schema() {
|
||||
let tool = SwarmTool::new(
|
||||
sample_swarms(),
|
||||
sample_agents(),
|
||||
None,
|
||||
test_security(),
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
assert_eq!(tool.name(), "swarm");
|
||||
let schema = tool.parameters_schema();
|
||||
assert!(schema["properties"]["swarm"].is_object());
|
||||
assert!(schema["properties"]["prompt"].is_object());
|
||||
assert!(schema["properties"]["context"].is_object());
|
||||
let required = schema["required"].as_array().unwrap();
|
||||
assert!(required.contains(&json!("swarm")));
|
||||
assert!(required.contains(&json!("prompt")));
|
||||
assert_eq!(schema["additionalProperties"], json!(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn description_not_empty() {
|
||||
let tool = SwarmTool::new(
|
||||
sample_swarms(),
|
||||
sample_agents(),
|
||||
None,
|
||||
test_security(),
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
assert!(!tool.description().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn schema_lists_swarm_names() {
|
||||
let tool = SwarmTool::new(
|
||||
sample_swarms(),
|
||||
sample_agents(),
|
||||
None,
|
||||
test_security(),
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
let schema = tool.parameters_schema();
|
||||
let desc = schema["properties"]["swarm"]["description"]
|
||||
.as_str()
|
||||
.unwrap();
|
||||
assert!(desc.contains("pipeline") || desc.contains("fanout") || desc.contains("router"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_swarms_schema() {
|
||||
let tool = SwarmTool::new(
|
||||
HashMap::new(),
|
||||
sample_agents(),
|
||||
None,
|
||||
test_security(),
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
let schema = tool.parameters_schema();
|
||||
let desc = schema["properties"]["swarm"]["description"]
|
||||
.as_str()
|
||||
.unwrap();
|
||||
assert!(desc.contains("none configured"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn unknown_swarm_returns_error() {
|
||||
let tool = SwarmTool::new(
|
||||
sample_swarms(),
|
||||
sample_agents(),
|
||||
None,
|
||||
test_security(),
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
let result = tool
|
||||
.execute(json!({"swarm": "nonexistent", "prompt": "test"}))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!result.success);
|
||||
assert!(result.error.unwrap().contains("Unknown swarm"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn missing_swarm_param() {
|
||||
let tool = SwarmTool::new(
|
||||
sample_swarms(),
|
||||
sample_agents(),
|
||||
None,
|
||||
test_security(),
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
let result = tool.execute(json!({"prompt": "test"})).await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn missing_prompt_param() {
|
||||
let tool = SwarmTool::new(
|
||||
sample_swarms(),
|
||||
sample_agents(),
|
||||
None,
|
||||
test_security(),
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
let result = tool.execute(json!({"swarm": "pipeline"})).await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blank_swarm_rejected() {
|
||||
let tool = SwarmTool::new(
|
||||
sample_swarms(),
|
||||
sample_agents(),
|
||||
None,
|
||||
test_security(),
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
let result = tool
|
||||
.execute(json!({"swarm": " ", "prompt": "test"}))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!result.success);
|
||||
assert!(result.error.unwrap().contains("must not be empty"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blank_prompt_rejected() {
|
||||
let tool = SwarmTool::new(
|
||||
sample_swarms(),
|
||||
sample_agents(),
|
||||
None,
|
||||
test_security(),
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
let result = tool
|
||||
.execute(json!({"swarm": "pipeline", "prompt": " "}))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!result.success);
|
||||
assert!(result.error.unwrap().contains("must not be empty"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn swarm_with_missing_agent_returns_error() {
|
||||
let mut swarms = HashMap::new();
|
||||
swarms.insert(
|
||||
"broken".to_string(),
|
||||
SwarmConfig {
|
||||
agents: vec!["nonexistent_agent".to_string()],
|
||||
strategy: SwarmStrategy::Sequential,
|
||||
router_prompt: None,
|
||||
description: None,
|
||||
timeout_secs: 60,
|
||||
},
|
||||
);
|
||||
let tool = SwarmTool::new(
|
||||
swarms,
|
||||
sample_agents(),
|
||||
None,
|
||||
test_security(),
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
let result = tool
|
||||
.execute(json!({"swarm": "broken", "prompt": "test"}))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!result.success);
|
||||
assert!(result.error.unwrap().contains("unknown agent"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn swarm_with_empty_agents_returns_error() {
|
||||
let mut swarms = HashMap::new();
|
||||
swarms.insert(
|
||||
"empty".to_string(),
|
||||
SwarmConfig {
|
||||
agents: Vec::new(),
|
||||
strategy: SwarmStrategy::Parallel,
|
||||
router_prompt: None,
|
||||
description: None,
|
||||
timeout_secs: 60,
|
||||
},
|
||||
);
|
||||
let tool = SwarmTool::new(
|
||||
swarms,
|
||||
sample_agents(),
|
||||
None,
|
||||
test_security(),
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
let result = tool
|
||||
.execute(json!({"swarm": "empty", "prompt": "test"}))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!result.success);
|
||||
assert!(result.error.unwrap().contains("no agents configured"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn swarm_blocked_in_readonly_mode() {
|
||||
let readonly = Arc::new(SecurityPolicy {
|
||||
autonomy: AutonomyLevel::ReadOnly,
|
||||
..SecurityPolicy::default()
|
||||
});
|
||||
let tool = SwarmTool::new(
|
||||
sample_swarms(),
|
||||
sample_agents(),
|
||||
None,
|
||||
readonly,
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
let result = tool
|
||||
.execute(json!({"swarm": "pipeline", "prompt": "test"}))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!result.success);
|
||||
assert!(result
|
||||
.error
|
||||
.as_deref()
|
||||
.unwrap_or("")
|
||||
.contains("read-only mode"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn swarm_blocked_when_rate_limited() {
|
||||
let limited = Arc::new(SecurityPolicy {
|
||||
max_actions_per_hour: 0,
|
||||
..SecurityPolicy::default()
|
||||
});
|
||||
let tool = SwarmTool::new(
|
||||
sample_swarms(),
|
||||
sample_agents(),
|
||||
None,
|
||||
limited,
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
let result = tool
|
||||
.execute(json!({"swarm": "pipeline", "prompt": "test"}))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!result.success);
|
||||
assert!(result
|
||||
.error
|
||||
.as_deref()
|
||||
.unwrap_or("")
|
||||
.contains("Rate limit exceeded"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn sequential_invalid_provider_returns_error() {
|
||||
let mut swarms = HashMap::new();
|
||||
swarms.insert(
|
||||
"seq".to_string(),
|
||||
SwarmConfig {
|
||||
agents: vec!["researcher".to_string()],
|
||||
strategy: SwarmStrategy::Sequential,
|
||||
router_prompt: None,
|
||||
description: None,
|
||||
timeout_secs: 60,
|
||||
},
|
||||
);
|
||||
// researcher uses "ollama" which won't be running in CI
|
||||
let tool = SwarmTool::new(
|
||||
swarms,
|
||||
sample_agents(),
|
||||
None,
|
||||
test_security(),
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
let result = tool
|
||||
.execute(json!({"swarm": "seq", "prompt": "test"}))
|
||||
.await
|
||||
.unwrap();
|
||||
// Should fail at provider creation or call level
|
||||
assert!(!result.success);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn parallel_invalid_provider_returns_error() {
|
||||
let mut swarms = HashMap::new();
|
||||
swarms.insert(
|
||||
"par".to_string(),
|
||||
SwarmConfig {
|
||||
agents: vec!["researcher".to_string()],
|
||||
strategy: SwarmStrategy::Parallel,
|
||||
router_prompt: None,
|
||||
description: None,
|
||||
timeout_secs: 60,
|
||||
},
|
||||
);
|
||||
let tool = SwarmTool::new(
|
||||
swarms,
|
||||
sample_agents(),
|
||||
None,
|
||||
test_security(),
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
let result = tool
|
||||
.execute(json!({"swarm": "par", "prompt": "test"}))
|
||||
.await
|
||||
.unwrap();
|
||||
// Parallel strategy returns success with error annotations in output
|
||||
assert!(result.success || result.error.is_some());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn router_invalid_provider_returns_error() {
|
||||
let mut swarms = HashMap::new();
|
||||
swarms.insert(
|
||||
"rout".to_string(),
|
||||
SwarmConfig {
|
||||
agents: vec!["researcher".to_string()],
|
||||
strategy: SwarmStrategy::Router,
|
||||
router_prompt: Some("Pick.".to_string()),
|
||||
description: None,
|
||||
timeout_secs: 60,
|
||||
},
|
||||
);
|
||||
let tool = SwarmTool::new(
|
||||
swarms,
|
||||
sample_agents(),
|
||||
None,
|
||||
test_security(),
|
||||
providers::ProviderRuntimeOptions::default(),
|
||||
);
|
||||
let result = tool
|
||||
.execute(json!({"swarm": "rout", "prompt": "test"}))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!result.success);
|
||||
}
|
||||
}
|
||||
+3
-1
@@ -71,7 +71,9 @@ export class WebSocketClient {
|
||||
params.set('session_id', sessionId);
|
||||
const url = `${this.baseUrl}/ws/chat?${params.toString()}`;
|
||||
|
||||
this.ws = new WebSocket(url, ['zeroclaw.v1']);
|
||||
const protocols: string[] = ['zeroclaw.v1'];
|
||||
if (token) protocols.push(`bearer.${token}`);
|
||||
this.ws = new WebSocket(url, protocols);
|
||||
|
||||
this.ws.onopen = () => {
|
||||
this.currentDelay = this.reconnectDelay;
|
||||
|
||||
Reference in New Issue
Block a user