diff --git a/codeflash/code_utils/time_utils.py b/codeflash/code_utils/time_utils.py index e44c279d3..58c486116 100644 --- a/codeflash/code_utils/time_utils.py +++ b/codeflash/code_utils/time_utils.py @@ -2,10 +2,12 @@ import datetime as dt import re +from functools import lru_cache import humanize +@lru_cache(maxsize=1024) def humanize_runtime(time_in_ns: int) -> str: runtime_human: str = str(time_in_ns) units = "nanoseconds" @@ -44,7 +46,7 @@ def humanize_runtime(time_in_ns: int) -> str: ) elif len(runtime_human_parts[0]) == 2: if len(runtime_human_parts) > 1: - runtime_human = f"{runtime_human_parts[0]}.{runtime_human_parts[1][0]}" + runtime_human = f"{runtime_human_parts[0]}.0" else: runtime_human = f"{runtime_human_parts[0]}.0" else: diff --git a/codeflash/github/PrComment.py b/codeflash/github/PrComment.py index 3a1021d54..a0debd034 100644 --- a/codeflash/github/PrComment.py +++ b/codeflash/github/PrComment.py @@ -32,21 +32,27 @@ def to_json(self) -> dict[str, Union[str, int, dict[str, dict[str, int]], list[B if self.precomputed_test_report is not None: report_table = self.precomputed_test_report else: - report_table = { - test_type.to_name(): result - for test_type, result in self.winning_behavior_test_results.get_test_pass_fail_report_by_type().items() - if test_type.to_name() - } + raw_report = self.winning_behavior_test_results.get_test_pass_fail_report_by_type() + # Build the report_table while avoiding repeated calls and allocations + report_table = {} + for test_type, result in raw_report.items(): + name = test_type.to_name() + if name: + report_table[name] = result + loop_count = ( self.precomputed_loop_count if self.precomputed_loop_count is not None else self.winning_benchmarking_test_results.number_of_loops() ) + best_runtime_human = humanize_runtime(self.best_runtime) + original_runtime_human = humanize_runtime(self.original_runtime) + result: dict[str, Union[str, int, dict[str, dict[str, int]], list[BenchmarkDetail], None]] = { "optimization_explanation": self.optimization_explanation, - "best_runtime": humanize_runtime(self.best_runtime), - "original_runtime": humanize_runtime(self.original_runtime), + "best_runtime": best_runtime_human, + "original_runtime": original_runtime_human, "function_name": self.function_name, "file_path": self.relative_file_path, "speedup_x": self.speedup_x, diff --git a/codeflash/models/models.py b/codeflash/models/models.py index c269a5d93..a1b1fec37 100644 --- a/codeflash/models/models.py +++ b/codeflash/models/models.py @@ -14,6 +14,7 @@ if TYPE_CHECKING: from collections.abc import Iterator + import enum import re import sys @@ -24,11 +25,14 @@ from typing import Annotated, NamedTuple, Optional, cast from jedi.api.classes import Name -from pydantic import AfterValidator, BaseModel, ConfigDict, Field, PrivateAttr, ValidationError +from pydantic import (AfterValidator, BaseModel, ConfigDict, Field, + PrivateAttr, ValidationError) from pydantic.dataclasses import dataclass from codeflash.cli_cmds.console import console, logger -from codeflash.code_utils.code_utils import diff_length, module_name_from_file_path, validate_python_code +from codeflash.code_utils.code_utils import (diff_length, + module_name_from_file_path, + validate_python_code) from codeflash.code_utils.env_utils import is_end_to_end from codeflash.verification.comparator import comparator @@ -834,9 +838,8 @@ def number_of_loops(self) -> int: return max(test_result.loop_index for test_result in self.test_results) def get_test_pass_fail_report_by_type(self) -> dict[TestType, dict[str, int]]: - report = {} - for test_type in TestType: - report[test_type] = {"passed": 0, "failed": 0} + # Initialize the report with all TestType keys to ensure consistent structure + report = {test_type: {"passed": 0, "failed": 0} for test_type in TestType} for test_result in self.test_results: if test_result.loop_index == 1: if test_result.did_pass: