Skip to content

Commit c449c4c

Browse files
committed
feat: enhance agentic workflow execution and improve response parsing
1 parent 1c98baf commit c449c4c

File tree

5 files changed

+105
-168
lines changed

5 files changed

+105
-168
lines changed

crates/codegraph-mcp/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ embeddings-jina = ["embeddings", "codegraph-vector/jina"]
9393
cloud = ["embeddings-jina", "codegraph-graph/surrealdb"]
9494
server-http = ["dep:axum", "dep:hyper", "dep:tower", "dep:http-body-util"]
9595
qwen-integration = []
96-
ai-enhanced = ["dep:codegraph-ai", "embeddings"]
96+
ai-enhanced = ["dep:codegraph-ai", "embeddings", "autoagents-experimental"]
9797
ai-enhanced-faiss = ["ai-enhanced", "faiss"]
9898
autoagents-experimental = ["dep:autoagents", "dep:autoagents-derive"]
9999
legacy-mcp-server = []

crates/codegraph-mcp/src/agentic_orchestrator.rs

Lines changed: 40 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -512,6 +512,13 @@ impl AgenticOrchestrator {
512512

513513
/// Parse LLM response to extract reasoning step
514514
fn parse_llm_response(&self, step_number: usize, response: &str) -> Result<ReasoningStep> {
515+
Self::parse_llm_response_internal(step_number, response)
516+
}
517+
518+
fn parse_llm_response_internal(
519+
step_number: usize,
520+
response: &str,
521+
) -> Result<ReasoningStep> {
515522
// Try to parse as JSON first
516523
if let Ok(parsed) = serde_json::from_str::<JsonValue>(response) {
517524
let reasoning = parsed["reasoning"]
@@ -523,8 +530,14 @@ impl AgenticOrchestrator {
523530

524531
let (tool_name, tool_params) = if let Some(tool_call) = parsed["tool_call"].as_object()
525532
{
526-
let name = tool_call.get("tool_name").and_then(|v| v.as_str());
527-
let params = tool_call.get("parameters").cloned();
533+
let name = tool_call
534+
.get("tool_name")
535+
.or_else(|| tool_call.get("name"))
536+
.and_then(|v| v.as_str());
537+
let params = tool_call
538+
.get("parameters")
539+
.or_else(|| tool_call.get("arguments"))
540+
.cloned();
528541
(name.map(|s| s.to_string()), params)
529542
} else {
530543
(None, None)
@@ -580,8 +593,6 @@ mod tests {
580593

581594
#[test]
582595
fn test_parse_valid_json_response() {
583-
let orchestrator = create_test_orchestrator();
584-
585596
let json_response = r#"{
586597
"reasoning": "I need to analyze dependencies",
587598
"tool_call": {
@@ -595,8 +606,7 @@ mod tests {
595606
"is_final": false
596607
}"#;
597608

598-
let step = orchestrator
599-
.parse_llm_response(1, json_response)
609+
let step = AgenticOrchestrator::parse_llm_response_internal(1, json_response)
600610
.expect("Should parse valid JSON");
601611

602612
assert_eq!(step.step_number, 1);
@@ -610,37 +620,44 @@ mod tests {
610620

611621
#[test]
612622
fn test_parse_final_response() {
613-
let orchestrator = create_test_orchestrator();
614-
615623
let json_response = r#"{
616624
"reasoning": "Based on the analysis, the answer is...",
617625
"tool_call": null,
618626
"is_final": true
619627
}"#;
620628

621-
let step = orchestrator
622-
.parse_llm_response(1, json_response)
629+
let step = AgenticOrchestrator::parse_llm_response_internal(1, json_response)
623630
.expect("Should parse final response");
624631

625632
assert_eq!(step.step_number, 1);
626633
assert!(step.is_final);
627634
assert!(step.tool_name.is_none());
628635
}
629636

630-
// Helper to create test orchestrator
631-
fn create_test_orchestrator() -> AgenticOrchestrator {
632-
use codegraph_graph::GraphFunctions;
633-
use std::sync::Arc;
634-
635-
// This is a minimal test setup - in real tests, we'd use mock objects
636-
let graph_functions =
637-
Arc::new(GraphFunctions::new_memory().expect("Failed to create test graph"));
638-
let tool_executor = Arc::new(GraphToolExecutor::new(graph_functions));
637+
#[test]
638+
fn test_parse_tool_call_with_name_arguments_fields() {
639+
let json_response = r#"{
640+
"reasoning": "Need hub nodes",
641+
"tool_call": {
642+
"name": "get_hub_nodes",
643+
"arguments": {
644+
"min_degree": 7
645+
}
646+
},
647+
"is_final": false
648+
}"#;
639649

640-
// For testing parse logic, we don't need a real LLM provider
641-
// In real tests, we'd use a mock LLM provider
642-
let llm_provider: Arc<dyn LLMProvider> = unimplemented!("Use mock in real tests");
650+
let step = AgenticOrchestrator::parse_llm_response_internal(2, json_response)
651+
.expect("Should parse alias fields");
643652

644-
AgenticOrchestrator::new(llm_provider, tool_executor, ContextTier::Medium)
653+
assert_eq!(step.step_number, 2);
654+
assert_eq!(step.tool_name, Some("get_hub_nodes".to_string()));
655+
assert_eq!(
656+
step.tool_params
657+
.and_then(|p| p["min_degree"].as_i64())
658+
.unwrap_or_default(),
659+
7
660+
);
661+
assert!(!step.is_final);
645662
}
646663
}

crates/codegraph-mcp/src/autoagents/agent_builder.rs

Lines changed: 61 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,9 @@ use autoagents::llm::embedding::EmbeddingProvider;
1010
use autoagents::llm::error::LLMError;
1111
use autoagents::llm::models::{ModelListRequest, ModelListResponse, ModelsProvider};
1212
use autoagents::llm::{FunctionCall, ToolCall};
13-
use codegraph_ai::llm_provider::{LLMProvider as CodeGraphLLM, Message, MessageRole};
13+
use codegraph_ai::llm_provider::{
14+
LLMProvider as CodeGraphLLM, LLMResponse, Message, MessageRole, ProviderCharacteristics,
15+
};
1416
use serde::Deserialize;
1517
use std::sync::atomic::{AtomicU64, Ordering};
1618
use std::sync::Arc;
@@ -220,7 +222,9 @@ struct CodeGraphLLMResponse {
220222

221223
#[derive(Debug, Deserialize)]
222224
struct CodeGraphToolCall {
225+
#[serde(alias = "name")]
223226
tool_name: String,
227+
#[serde(alias = "arguments")]
224228
parameters: serde_json::Value,
225229
}
226230

@@ -410,7 +414,9 @@ use crate::autoagents::codegraph_agent::CodeGraphAgentOutput;
410414
use autoagents::core::agent::memory::SlidingWindowMemory;
411415
use autoagents::core::agent::prebuilt::executor::ReActAgent;
412416
use autoagents::core::agent::AgentBuilder;
413-
use autoagents::core::agent::{AgentDeriveT, AgentExecutor, AgentHooks, AgentOutputT, Context, DirectAgentHandle, ExecutorConfig};
417+
use autoagents::core::agent::{
418+
AgentDeriveT, AgentExecutor, AgentHooks, Context, DirectAgentHandle, ExecutorConfig,
419+
};
414420
use autoagents::core::error::Error as AutoAgentsError;
415421
use autoagents::core::tool::{shared_tools_to_boxes, ToolT};
416422

@@ -608,6 +614,29 @@ mod tests {
608614
assert_eq!(aa_messages[1].role, ChatRole::User);
609615
}
610616

617+
#[test]
618+
fn test_tool_calls_accepts_name_arguments_fields() {
619+
let response = CodeGraphChatResponse {
620+
content: r#"{
621+
"reasoning": "Plan",
622+
"tool_call": {
623+
"name": "get_hub_nodes",
624+
"arguments": {
625+
"min_degree": 4
626+
}
627+
},
628+
"is_final": false
629+
}"#
630+
.to_string(),
631+
_total_tokens: 0,
632+
};
633+
634+
let tool_calls = response.tool_calls().expect("tool call not parsed");
635+
assert_eq!(tool_calls.len(), 1);
636+
assert_eq!(tool_calls[0].function.name, "get_hub_nodes");
637+
assert_eq!(tool_calls[0].function.arguments, "{\"min_degree\":4}");
638+
}
639+
611640
// Integration test for ChatProvider
612641
struct MockCodeGraphLLM;
613642

@@ -617,12 +646,39 @@ mod tests {
617646
&self,
618647
messages: &[Message],
619648
_config: &codegraph_ai::llm_provider::GenerationConfig,
620-
) -> codegraph_ai::llm_provider::LLMResult<codegraph_ai::llm_provider::Response> {
621-
Ok(codegraph_ai::llm_provider::Response {
649+
) -> codegraph_ai::llm_provider::LLMResult<LLMResponse> {
650+
Ok(LLMResponse {
622651
content: format!("Echo: {}", messages.last().unwrap().content),
623-
total_tokens: 10,
652+
total_tokens: Some(10),
653+
prompt_tokens: None,
654+
completion_tokens: None,
655+
finish_reason: Some("stop".to_string()),
656+
model: "mock".to_string(),
624657
})
625658
}
659+
660+
async fn is_available(&self) -> bool {
661+
true
662+
}
663+
664+
fn provider_name(&self) -> &str {
665+
"mock"
666+
}
667+
668+
fn model_name(&self) -> &str {
669+
"mock-model"
670+
}
671+
672+
fn characteristics(&self) -> ProviderCharacteristics {
673+
ProviderCharacteristics {
674+
max_tokens: 4096,
675+
avg_latency_ms: 1,
676+
rpm_limit: None,
677+
tpm_limit: None,
678+
supports_streaming: false,
679+
supports_functions: true,
680+
}
681+
}
626682
}
627683

628684
#[tokio::test]

crates/codegraph-mcp/src/heartbeat.rs

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ impl HeartbeatManager {
231231
#[cfg(test)]
232232
mod tests {
233233
use super::*;
234-
use tokio::time::{advance, pause};
234+
use tokio::time::sleep;
235235

236236
#[tokio::test]
237237
async fn test_heartbeat_config_default() {
@@ -266,8 +266,6 @@ mod tests {
266266

267267
#[tokio::test]
268268
async fn test_pong_handling() {
269-
pause();
270-
271269
let config = HeartbeatConfig {
272270
interval: Duration::from_millis(100),
273271
timeout: Duration::from_millis(50),
@@ -278,7 +276,7 @@ mod tests {
278276
monitor.on_pong_received(1).await;
279277
assert_eq!(monitor.state().await, HeartbeatState::Healthy);
280278

281-
advance(Duration::from_millis(200)).await;
279+
sleep(Duration::from_millis(200)).await;
282280

283281
monitor.on_pong_received(100).await;
284282
assert_eq!(monitor.state().await, HeartbeatState::Healthy);

0 commit comments

Comments
 (0)