2525HTTP_PORT = os .environ .get ("CODEGRAPH_HTTP_PORT" , "3003" )
2626SERVER_URL = f"http://{ HTTP_HOST } :{ HTTP_PORT } /mcp"
2727
28- # Test cases (same as STDIO version)
28+ # Test cases (same as STDIO version, extended timeouts )
2929AGENTIC_TESTS = [
30- ("agentic_code_search" , "How is configuration loaded in this codebase? Find all config loading mechanisms." , 60 ),
31- ("agentic_dependency_analysis" , "Analyze the dependency chain for the AgenticOrchestrator. What does it depend on?" , 60 ),
32- ("agentic_call_chain_analysis" , "Trace the call chain from execute_agentic_workflow to the graph analysis tools" , 60 ),
33- ("agentic_architecture_analysis" , "Analyze the architecture of the MCP server. Find coupling metrics and hub nodes." , 90 ),
34- ("agentic_api_surface_analysis" , "What is the public API surface of the GraphToolExecutor?" , 60 ),
35- ("agentic_context_builder" , "Gather comprehensive context about the tier-aware prompt selection system" , 90 ),
36- ("agentic_semantic_question" , "How does the LRU cache work in GraphToolExecutor? What gets cached and when?" , 60 ),
30+ ("agentic_code_search" , "How is configuration loaded in this codebase? Find all config loading mechanisms." , 300 ),
31+ ("agentic_dependency_analysis" , "Analyze the dependency chain for the AgenticOrchestrator. What does it depend on?" , 300 ),
32+ ("agentic_call_chain_analysis" , "Trace the call chain from execute_agentic_workflow to the graph analysis tools" , 300 ),
33+ ("agentic_architecture_analysis" , "Analyze the architecture of the MCP server. Find coupling metrics and hub nodes." , 300 ),
34+ ("agentic_api_surface_analysis" , "What is the public API surface of the GraphToolExecutor?" , 300 ),
35+ ("agentic_context_builder" , "Gather comprehensive context about the tier-aware prompt selection system" , 300 ),
36+ ("agentic_semantic_question" , "How does the LRU cache work in GraphToolExecutor? What gets cached and when?" , 300 ),
3737]
3838
3939async def run_tests ():
@@ -56,13 +56,22 @@ async def run_tests():
5656
5757 results = []
5858
59- for tool_name , query , timeout in AGENTIC_TESTS :
59+ # Create output directory
60+ os .makedirs ("test_output_http" , exist_ok = True )
61+
62+ for i , (tool_name , query , timeout ) in enumerate (AGENTIC_TESTS , 1 ):
6063 print (f"{ '=' * 72 } " )
6164 print (f"Testing: { tool_name } " )
6265 print (f"Query: { query } " )
6366 print (f"Timeout: { timeout } s" )
6467 print ('=' * 72 )
6568
69+ start_time = asyncio .get_event_loop ().time ()
70+ result_text = None
71+ structured_output = None
72+ file_locations = []
73+ success = False
74+
6675 try :
6776 # Call tool with timeout
6877 result = await asyncio .wait_for (
@@ -71,13 +80,9 @@ async def run_tests():
7180 )
7281
7382 # Parse result
74- success = False
75- structured_output = None
76- file_locations = []
77-
7883 if result and len (result .content ) > 0 :
79- text = result .content [0 ].text
80- data = json .loads (text )
84+ result_text = result .content [0 ].text
85+ data = json .loads (result_text )
8186
8287 if "structured_output" in data :
8388 structured_output = data ["structured_output" ]
@@ -90,9 +95,11 @@ async def run_tests():
9095 if isinstance (item , dict ) and 'file_path' in item :
9196 file_locations .append (item )
9297
98+ duration = asyncio .get_event_loop ().time () - start_time
99+
93100 if success :
94101 steps = data .get ("steps_taken" , "?" )
95- print (f"\n ✅ SUCCESS ({ steps } steps)" )
102+ print (f"\n ✅ SUCCESS in { duration :.1f } s ({ steps } steps)" )
96103 if structured_output :
97104 print (f" 📊 Structured Output: ✅ PRESENT" )
98105 if file_locations :
@@ -110,12 +117,56 @@ async def run_tests():
110117 })
111118
112119 except asyncio .TimeoutError :
113- print (f"\n ❌ TIMEOUT after { timeout } s" )
120+ duration = asyncio .get_event_loop ().time () - start_time
121+ print (f"\n ❌ TIMEOUT after { duration :.1f} s" )
114122 results .append ({"test" : tool_name , "success" : False , "files" : 0 })
115123 except Exception as e :
124+ duration = asyncio .get_event_loop ().time () - start_time
116125 print (f"\n ❌ ERROR: { e } " )
117126 results .append ({"test" : tool_name , "success" : False , "files" : 0 })
118127
128+ # Write log file
129+ try :
130+ timestamp = datetime .now ().strftime ("%Y%m%d_%H%M%S" )
131+ log_filename = f"test_output_http/{ str (i ).zfill (2 )} _{ tool_name } _{ timestamp } .log"
132+
133+ with open (log_filename , "w" ) as f :
134+ f .write ("=" * 80 + "\n " )
135+ f .write (f"Test: { tool_name } \n " )
136+ f .write (f"Transport: HTTP (MCP SDK)\n " )
137+ f .write (f"Timestamp: { timestamp } \n " )
138+ f .write (f"Timeout: { timeout } s\n " )
139+ f .write ("=" * 80 + "\n \n " )
140+
141+ f .write ("INPUT QUERY:\n " )
142+ f .write ("-" * 80 + "\n " )
143+ f .write (f"{ query } \n " )
144+ f .write ("-" * 80 + "\n \n " )
145+
146+ f .write ("OUTPUT:\n " )
147+ f .write ("-" * 80 + "\n " )
148+
149+ if structured_output :
150+ f .write (json .dumps (structured_output , indent = 2 ))
151+ f .write ("\n \n " )
152+ f .write ("FILE LOCATIONS EXTRACTED:\n " )
153+ f .write ("-" * 80 + "\n " )
154+ for loc in file_locations :
155+ line_info = f":{ loc ['line_number' ]} " if loc .get ('line_number' ) else ""
156+ f .write (f" { loc ['name' ]} in { loc ['file_path' ]} { line_info } \n " )
157+ elif result_text :
158+ f .write (result_text )
159+ else :
160+ f .write ("(No result received)\n " )
161+
162+ f .write ("-" * 80 + "\n \n " )
163+ f .write (f"Duration: { duration :.1f} s\n " )
164+ f .write (f"Status: { 'SUCCESS' if success else 'FAILED' } \n " )
165+
166+ print (f" 💾 Log saved: { log_filename } " )
167+ except Exception as e :
168+ print (f" ⚠️ Failed to write log: { e } " )
169+
119170 # Summary
120171 print ("\n " + "=" * 72 )
121172 print ("Test Summary" )
0 commit comments