Skip to content

Commit 9f4c00c

Browse files
committed
feat: add clean install guide and enhance tool call parsing in agentic orchestrator
1 parent c449c4c commit 9f4c00c

File tree

5 files changed

+215
-10
lines changed

5 files changed

+215
-10
lines changed

CLEAN_INSTALL.md

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
# Clean Install Guide
2+
3+
Follow these steps to remove every trace of a previous CodeGraph MCP installation and rebuild from scratch.
4+
5+
1. **Stop running servers**
6+
Close every terminal session that is running `codegraph start …` (Ctrl+C) so no process holds binaries or logs open.
7+
8+
2. **Remove installed binaries**
9+
If you used the installer, it dropped symlinks/binaries into `/usr/local/bin`. Remove them:
10+
```bash
11+
sudo rm -f /usr/local/bin/codegraph /usr/local/bin/codegraph-official
12+
```
13+
14+
3. **Delete the installer support directory**
15+
The installer caches logs/config under `~/.codegraph`. Remove it to avoid stale state:
16+
```bash
17+
rm -rf ~/.codegraph
18+
```
19+
20+
4. **Clean the repository build artifacts**
21+
From the repo root run both commands to guarantee a fresh rebuild:
22+
```bash
23+
cargo clean
24+
rm -rf target
25+
```
26+
27+
5. **(Optional) Remove Python test environments**
28+
If you created a virtualenv or pip install for `test_http_mcp.py`, delete that environment so you can reinstall dependencies cleanly.
29+
30+
6. **Re-run the installer**
31+
Execute the installer once (choose the script you normally use, e.g. cloud):
32+
```bash
33+
./install-codegraph-cloud.sh
34+
```
35+
This recreates `/usr/local/bin/codegraph` pointing at the freshly built release binary.
36+
37+
7. **Rebuild your development binary**
38+
Inside the repo, rebuild with the required features so the local `target/` tree matches your latest changes:
39+
```bash
40+
cargo build -p codegraph-mcp --features "ai-enhanced,server-http"
41+
```
42+
43+
8. **Start the MCP server with the new binary**
44+
Launch STDIO or HTTP using the freshly built binary (debug path shown here):
45+
```bash
46+
./target/debug/codegraph start mcp http --port 3003
47+
```
48+
Replace `http` with `stdio` or use the release path if desired.
49+
50+
Once these steps are complete, you are running on a completely clean install and can rerun the MCP tests with confidence.

crates/codegraph-mcp/src/agentic_orchestrator.rs

Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -533,10 +533,13 @@ impl AgenticOrchestrator {
533533
let name = tool_call
534534
.get("tool_name")
535535
.or_else(|| tool_call.get("name"))
536+
.or_else(|| tool_call.get("function"))
537+
.or_else(|| tool_call.get("tool"))
536538
.and_then(|v| v.as_str());
537539
let params = tool_call
538540
.get("parameters")
539541
.or_else(|| tool_call.get("arguments"))
542+
.or_else(|| tool_call.get("args"))
540543
.cloned();
541544
(name.map(|s| s.to_string()), params)
542545
} else {
@@ -660,4 +663,78 @@ mod tests {
660663
);
661664
assert!(!step.is_final);
662665
}
666+
667+
#[test]
668+
fn test_parse_tool_call_with_args_field() {
669+
let json_response = r#"{
670+
"reasoning": "Trace call chain",
671+
"tool_call": {
672+
"tool_name": "trace_call_chain",
673+
"args": {
674+
"from_node": "GraphToolExecutor",
675+
"max_depth": 4
676+
}
677+
},
678+
"is_final": false
679+
}"#;
680+
681+
let step = AgenticOrchestrator::parse_llm_response_internal(3, json_response)
682+
.expect("Should parse args field");
683+
684+
assert_eq!(step.tool_name, Some("trace_call_chain".to_string()));
685+
assert_eq!(
686+
step.tool_params
687+
.as_ref()
688+
.and_then(|p| p["max_depth"].as_i64())
689+
.unwrap_or_default(),
690+
4
691+
);
692+
}
693+
694+
#[test]
695+
fn test_parse_tool_call_with_function_field() {
696+
let json_response = r#"{
697+
"reasoning": "Check hubs",
698+
"tool_call": {
699+
"function": "get_hub_nodes",
700+
"arguments": {
701+
"min_degree": 9
702+
}
703+
},
704+
"is_final": false
705+
}"#;
706+
707+
let step = AgenticOrchestrator::parse_llm_response_internal(4, json_response)
708+
.expect("Should parse function field");
709+
710+
assert_eq!(step.tool_name, Some("get_hub_nodes".to_string()));
711+
assert_eq!(
712+
step.tool_params
713+
.as_ref()
714+
.and_then(|p| p["min_degree"].as_i64())
715+
.unwrap_or_default(),
716+
9
717+
);
718+
}
719+
720+
#[test]
721+
fn test_parse_tool_call_with_tool_field() {
722+
let json_response = r#"{
723+
"reasoning": "Check dependencies",
724+
"tool_call": {
725+
"tool": "get_transitive_dependencies",
726+
"parameters": {
727+
"node_id": "AgenticOrchestrator",
728+
"edge_type": "Imports",
729+
"depth": 2
730+
}
731+
},
732+
"is_final": false
733+
}"#;
734+
735+
let step = AgenticOrchestrator::parse_llm_response_internal(5, json_response)
736+
.expect("Should parse tool field");
737+
738+
assert_eq!(step.tool_name, Some("get_transitive_dependencies".to_string()));
739+
}
663740
}

crates/codegraph-mcp/src/autoagents/agent_builder.rs

Lines changed: 70 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -222,9 +222,9 @@ struct CodeGraphLLMResponse {
222222

223223
#[derive(Debug, Deserialize)]
224224
struct CodeGraphToolCall {
225-
#[serde(alias = "name")]
225+
#[serde(alias = "name", alias = "function", alias = "tool")]
226226
tool_name: String,
227-
#[serde(alias = "arguments")]
227+
#[serde(alias = "arguments", alias = "args")]
228228
parameters: serde_json::Value,
229229
}
230230

@@ -637,6 +637,74 @@ mod tests {
637637
assert_eq!(tool_calls[0].function.arguments, "{\"min_degree\":4}");
638638
}
639639

640+
#[test]
641+
fn test_tool_calls_accepts_args_field() {
642+
let response = CodeGraphChatResponse {
643+
content: r#"{
644+
"reasoning": "Trace chain",
645+
"tool_call": {
646+
"tool_name": "trace_call_chain",
647+
"args": {
648+
"from_node": "GraphToolExecutor",
649+
"max_depth": 4
650+
}
651+
},
652+
"is_final": false
653+
}"#
654+
.to_string(),
655+
_total_tokens: 0,
656+
};
657+
658+
let tool_calls = response.tool_calls().expect("tool call not parsed");
659+
assert_eq!(tool_calls[0].function.name, "trace_call_chain");
660+
assert_eq!(tool_calls[0].function.arguments, "{\"from_node\":\"GraphToolExecutor\",\"max_depth\":4}");
661+
}
662+
663+
#[test]
664+
fn test_tool_calls_accepts_function_field() {
665+
let response = CodeGraphChatResponse {
666+
content: r#"{
667+
"reasoning": "Find hubs",
668+
"tool_call": {
669+
"function": "get_hub_nodes",
670+
"arguments": {
671+
"min_degree": 6
672+
}
673+
},
674+
"is_final": false
675+
}"#
676+
.to_string(),
677+
_total_tokens: 0,
678+
};
679+
680+
let tool_calls = response.tool_calls().expect("tool call not parsed");
681+
assert_eq!(tool_calls[0].function.name, "get_hub_nodes");
682+
assert_eq!(tool_calls[0].function.arguments, "{\"min_degree\":6}");
683+
}
684+
685+
#[test]
686+
fn test_tool_calls_accepts_tool_field() {
687+
let response = CodeGraphChatResponse {
688+
content: r#"{
689+
"reasoning": "Dependencies",
690+
"tool_call": {
691+
"tool": "get_transitive_dependencies",
692+
"parameters": {
693+
"node_id": "AgenticOrchestrator",
694+
"edge_type": "Imports",
695+
"depth": 2
696+
}
697+
},
698+
"is_final": false
699+
}"#
700+
.to_string(),
701+
_total_tokens: 0,
702+
};
703+
704+
let tool_calls = response.tool_calls().expect("tool call not parsed");
705+
assert_eq!(tool_calls[0].function.name, "get_transitive_dependencies");
706+
}
707+
640708
// Integration test for ChatProvider
641709
struct MockCodeGraphLLM;
642710

crates/codegraph-mcp/src/bin/codegraph.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2795,12 +2795,12 @@ fn optimize_for_memory(
27952795
if embedding_provider == "ollama" {
27962796
// Ollama models work better with smaller batches for stability
27972797
match memory_gb {
2798-
128.. => 1024, // 128GB+: Large but stable batch size
2799-
96..=127 => 768, // 96-127GB: Medium-large batch
2800-
64..=95 => 512, // 64-95GB: Medium batch
2801-
32..=63 => 256, // 32-63GB: Small batch
2802-
16..=31 => 128, // 16-31GB: Very small batch
2803-
_ => 64, // <16GB: Minimal batch
2798+
128.. => 64, // 128GB+: Even high-memory boxes benefit from modest batches with Ollama
2799+
96..=127 => 64, // 96-127GB: Keep batches capped for GPU/CPU stability
2800+
64..=95 => 48, // 64-95GB: Slightly leaner batch for steady throughput
2801+
32..=63 => 32, // 32-63GB: Conservative batch to prevent throttling
2802+
16..=31 => 24, // 16-31GB: Small batch keeps latency predictable
2803+
_ => 16, // <16GB: Minimal batch on constrained systems
28042804
}
28052805
} else {
28062806
// ONNX/OpenAI can handle much larger batches

crates/codegraph-vector/src/ollama_embedding_provider.rs

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -421,8 +421,18 @@ impl EmbeddingProvider for OllamaEmbeddingProvider {
421421
// Accumulate chunk embeddings for each node
422422
for (chunk_idx, chunk_embedding) in chunk_embeddings.into_iter().enumerate() {
423423
let node_idx = chunk_to_node[chunk_idx];
424-
for (i, &val) in chunk_embedding.iter().enumerate() {
425-
node_embeddings[node_idx][i] += val;
424+
if chunk_embedding.len() != dimension {
425+
warn!(
426+
"⚠️ Ollama embedding dimension mismatch: expected {}, got {}",
427+
dimension,
428+
chunk_embedding.len()
429+
);
430+
}
431+
for (slot, value) in node_embeddings[node_idx]
432+
.iter_mut()
433+
.zip(chunk_embedding.iter())
434+
{
435+
*slot += *value;
426436
}
427437
node_chunk_counts[node_idx] += 1;
428438
}

0 commit comments

Comments
 (0)