|
39 | 39 | WEIGHTS_FILE = APP_ROOT / 'weights.pt' |
40 | 40 | app.config['UPLOAD_FOLDER'] = str(UPLOAD_FOLDER) |
41 | 41 | app.config['RESULTS_FOLDER'] = str(RESULTS_FOLDER) |
42 | | -app.config['WEIGHTS_FILE'] = str(WEIGHTS_FILE) |
43 | 42 | app.config['ANNOT_FOLDER'] = str(ANNOT_FOLDER) |
| 43 | +app.config['WEIGHTS_FILE'] = str(WEIGHTS_FILE) |
44 | 44 | app.config['ALLOWED_EXTENSIONS'] = {'png', 'jpg', 'jpeg', 'tif', 'tiff'} |
45 | 45 |
|
46 | 46 | # skip these -- created dirs in dockerfile |
@@ -235,6 +235,7 @@ def get_progress(): |
235 | 235 | with open(pkl_file, 'rb') as pf: |
236 | 236 | all_results[uuid_base] = pickle.load(pf) |
237 | 237 | resp['results'] = all_results |
| 238 | + print(f"Job executed successfully! {len(all_results)} results aggregated.") |
238 | 239 | return jsonify(resp) |
239 | 240 |
|
240 | 241 | # If still processing, update progress |
@@ -283,7 +284,6 @@ def annotate_image(): |
283 | 284 |
|
284 | 285 | if not img_name: |
285 | 286 | return jsonify({'error': 'File not found'}), 404 |
286 | | - |
287 | 287 | # Load detections from pickle |
288 | 288 | result_path = Path(app.config['RESULTS_FOLDER']) / session_id / f"{uuid}.pkl" |
289 | 289 | if not result_path.exists(): |
@@ -367,14 +367,28 @@ def export_csv(): |
367 | 367 | try: |
368 | 368 | data = request.json |
369 | 369 | session_id = session['id'] |
370 | | - threshold = float(data.get('confidence', 0.5)) |
371 | 370 | job_state = session.get('job_state') |
| 371 | + filename_map = session.get('filename_map') |
| 372 | + threshold = float(data.get('confidence', 0.5)) |
372 | 373 | if not job_state: |
373 | 374 | return jsonify({'error': 'Job not found'}), 404 |
| 375 | + |
| 376 | + # iterate through the results |
| 377 | + results_dir = Path(app.config['RESULTS_FOLDER']) / session_id |
| 378 | + pkl_paths = list(results_dir.glob('*.pkl')) |
| 379 | + all_results = {} |
| 380 | + for path in pkl_paths: |
| 381 | + uuid_base = path.stem |
| 382 | + with open(path, 'rb') as pf: |
| 383 | + all_results[uuid_base] = pickle.load(pf) |
| 384 | + |
| 385 | + # populate rows for CSV conversion |
374 | 386 | rows = [] |
375 | | - for orig_name, detections in job_state['detections'].items(): |
376 | | - count = sum(1 for d in detections if d['score'] >= threshold) |
377 | | - rows.append({'Filename': orig_name, 'EggsDetected': count}) |
| 387 | + for uuid in all_results.keys(): |
| 388 | + count = sum(1 for d in all_results[uuid] if d['score'] >= threshold) |
| 389 | + rows.append({'Filename': filename_map[uuid], 'EggsDetected': count}) |
| 390 | + rows = sorted(rows, key=lambda x: x['Filename'].lower()) |
| 391 | + # write the CSV out |
378 | 392 | timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') |
379 | 393 | output = io.StringIO() |
380 | 394 | writer = csv.DictWriter(output, fieldnames=['Filename', 'EggsDetected']) |
|
0 commit comments