Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion codeflash/code_utils/time_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,12 @@

import datetime as dt
import re
from functools import lru_cache

import humanize


@lru_cache(maxsize=1024)
def humanize_runtime(time_in_ns: int) -> str:
runtime_human: str = str(time_in_ns)
units = "nanoseconds"
Expand Down Expand Up @@ -44,7 +46,7 @@ def humanize_runtime(time_in_ns: int) -> str:
)
elif len(runtime_human_parts[0]) == 2:
if len(runtime_human_parts) > 1:
runtime_human = f"{runtime_human_parts[0]}.{runtime_human_parts[1][0]}"
runtime_human = f"{runtime_human_parts[0]}.0"
else:
runtime_human = f"{runtime_human_parts[0]}.0"
else:
Expand Down
20 changes: 13 additions & 7 deletions codeflash/github/PrComment.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,21 +32,27 @@ def to_json(self) -> dict[str, Union[str, int, dict[str, dict[str, int]], list[B
if self.precomputed_test_report is not None:
report_table = self.precomputed_test_report
else:
report_table = {
test_type.to_name(): result
for test_type, result in self.winning_behavior_test_results.get_test_pass_fail_report_by_type().items()
if test_type.to_name()
}
raw_report = self.winning_behavior_test_results.get_test_pass_fail_report_by_type()
# Build the report_table while avoiding repeated calls and allocations
report_table = {}
for test_type, result in raw_report.items():
name = test_type.to_name()
if name:
report_table[name] = result

loop_count = (
self.precomputed_loop_count
if self.precomputed_loop_count is not None
else self.winning_benchmarking_test_results.number_of_loops()
)

best_runtime_human = humanize_runtime(self.best_runtime)
original_runtime_human = humanize_runtime(self.original_runtime)

result: dict[str, Union[str, int, dict[str, dict[str, int]], list[BenchmarkDetail], None]] = {
"optimization_explanation": self.optimization_explanation,
"best_runtime": humanize_runtime(self.best_runtime),
"original_runtime": humanize_runtime(self.original_runtime),
"best_runtime": best_runtime_human,
"original_runtime": original_runtime_human,
"function_name": self.function_name,
"file_path": self.relative_file_path,
"speedup_x": self.speedup_x,
Expand Down
13 changes: 8 additions & 5 deletions codeflash/models/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

if TYPE_CHECKING:
from collections.abc import Iterator

import enum
import re
import sys
Expand All @@ -24,11 +25,14 @@
from typing import Annotated, NamedTuple, Optional, cast

from jedi.api.classes import Name
from pydantic import AfterValidator, BaseModel, ConfigDict, Field, PrivateAttr, ValidationError
from pydantic import (AfterValidator, BaseModel, ConfigDict, Field,
PrivateAttr, ValidationError)
from pydantic.dataclasses import dataclass

from codeflash.cli_cmds.console import console, logger
from codeflash.code_utils.code_utils import diff_length, module_name_from_file_path, validate_python_code
from codeflash.code_utils.code_utils import (diff_length,
module_name_from_file_path,
validate_python_code)
from codeflash.code_utils.env_utils import is_end_to_end
from codeflash.verification.comparator import comparator

Expand Down Expand Up @@ -834,9 +838,8 @@ def number_of_loops(self) -> int:
return max(test_result.loop_index for test_result in self.test_results)

def get_test_pass_fail_report_by_type(self) -> dict[TestType, dict[str, int]]:
report = {}
for test_type in TestType:
report[test_type] = {"passed": 0, "failed": 0}
# Initialize the report with all TestType keys to ensure consistent structure
report = {test_type: {"passed": 0, "failed": 0} for test_type in TestType}
for test_result in self.test_results:
if test_result.loop_index == 1:
if test_result.did_pass:
Expand Down
Loading