Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 40 additions & 31 deletions compressai_vision/evaluators/evaluators.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,10 @@ def results(self, save_path: str = None):

self.write_results(out)

overall_mse = None
# summary = {}
# for key, item_dict in out.items():
# summary[f"{key}"] = item_dict["AP"]

if self._mse_results:
mse_results_dict = {"per_frame_mse": self._mse_results}
overall_mse = 0.0
Expand Down Expand Up @@ -205,11 +208,9 @@ def results(self, save_path: str = None):
) as f:
json.dump(mse_results_dict, f, ensure_ascii=False, indent=4)

# summary = {}
# for key, item_dict in out.items():
# summary[f"{key}"] = item_dict["AP"]

return out, overall_mse
return out, overall_mse
else:
return out


@register_evaluator("OIC-EVAL")
Expand Down Expand Up @@ -474,7 +475,11 @@ def results(self, save_path: str = None):

self.write_results(out)

overall_mse = None
summary = {}
for key, value in out.items():
name = "-".join(key.split("/")[1:])
summary[name] = value

if self._mse_results:
mse_results_dict = {"per_frame_mse": self._mse_results}
overall_mse = 0.0
Expand Down Expand Up @@ -503,12 +508,9 @@ def results(self, save_path: str = None):
) as f:
json.dump(mse_results_dict, f, ensure_ascii=False, indent=4)

summary = {}
for key, value in out.items():
name = "-".join(key.split("/")[1:])
summary[name] = value

return summary, overall_mse
return summary, overall_mse
else:
return summary


@register_evaluator("SEMANTICSEG-EVAL")
Expand Down Expand Up @@ -607,7 +609,6 @@ def results(self, save_path: str = None):

self.write_results(class_mIoU)

overall_mse = None
if self._mse_results:
mse_results_dict = {"per_frame_mse": self._mse_results}
overall_mse = 0.0
Expand Down Expand Up @@ -636,7 +637,9 @@ def results(self, save_path: str = None):
) as f:
json.dump(mse_results_dict, f, ensure_ascii=False, indent=4)

return class_mIoU, overall_mse
return class_mIoU, overall_mse
else:
return class_mIoU


@register_evaluator("MOT-JDE-EVAL")
Expand Down Expand Up @@ -788,7 +791,6 @@ def results(self, save_path: str = None):

self.write_results(out)

overall_mse = None
if self._mse_results:
mse_results_dict = {"per_frame_mse": self._mse_results}
overall_mse = 0.0
Expand Down Expand Up @@ -817,7 +819,9 @@ def results(self, save_path: str = None):
) as f:
json.dump(mse_results_dict, f, ensure_ascii=False, indent=4)

return out, overall_mse
return out, overall_mse
else:
return out

@staticmethod
def digest_summary(summary):
Expand Down Expand Up @@ -1126,7 +1130,10 @@ def results(self, save_path: str = None):

self.write_results(eval_results)

overall_mse = None
*listed_items, summary = eval_results

self._logger.info("\n" + summary)

if self._mse_results:
mse_results_dict = {"per_frame_mse": self._mse_results}
overall_mse = 0.0
Expand Down Expand Up @@ -1155,11 +1162,12 @@ def results(self, save_path: str = None):
) as f:
json.dump(mse_results_dict, f, ensure_ascii=False, indent=4)

*listed_items, summary = eval_results

self._logger.info("\n" + summary)

return {"AP": listed_items[0] * 100, "AP50": listed_items[1] * 100}, overall_mse
return {
"AP": listed_items[0] * 100,
"AP50": listed_items[1] * 100,
}, overall_mse
else:
return {"AP": listed_items[0] * 100, "AP50": listed_items[1] * 100}

def _convert_to_coco_format(self, outputs, info_imgs, ids):
# reference : yolox > evaluators > coco_evaluator > convert_to_coco_format
Expand Down Expand Up @@ -1363,7 +1371,11 @@ def results(self, save_path: str = None):

self.write_results(eval_results)

overall_mse = None
# item_keys = list(eval_results.keys())
item_vals = list(eval_results.values())

# self._logger.info("\n" + summary)

if self._mse_results:
mse_results_dict = {"per_frame_mse": self._mse_results}
overall_mse = 0.0
Expand Down Expand Up @@ -1392,12 +1404,9 @@ def results(self, save_path: str = None):
) as f:
json.dump(mse_results_dict, f, ensure_ascii=False, indent=4)

# item_keys = list(eval_results.keys())
item_vals = list(eval_results.values())

# self._logger.info("\n" + summary)

return {"AP": item_vals[0] * 100, "AP50": item_vals[1] * 100}, overall_mse
return {"AP": item_vals[0] * 100, "AP50": item_vals[1] * 100}, overall_mse
else:
return {"AP": item_vals[0] * 100, "AP50": item_vals[1] * 100}


@register_evaluator("VISUAL-QUALITY-EVAL")
Expand Down Expand Up @@ -1444,7 +1453,7 @@ def write_results(self, path: str = None):
with open(f"{path}/{self.output_file_name}.json", "w", encoding="utf-8") as f:
json.dump(self._evaluations, f, ensure_ascii=False, indent=4)

def digest(self, gt, pred):
def digest(self, gt, pred, mse_results=None):
ref = gt[0]["image"].unsqueeze(0).cpu()
tst = pred.unsqueeze(0).cpu()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ def __call__(
start = time_measure()
dec_d = {
"file_name": dec_seq["file_names"][e],
"file_origin": d[e]["file_name"],
"file_origin": d[0]["file_name"],
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@jianwensong Is this change necessary?
I forgot which case was, but I remeber there was a case we still need access with an index.
Would you please confirm this?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it is necessary (at least under current remote inference settings). When the batch size is one, each 'd' enumerated from the 'dataloader' is a list containing one dictionary. If we set it to d[e], remote inference will result in an error. I think only the case where batch size is larger than one may need access this index. But I am not sure whether remote inference always uses batch size 1 or if it supports larger batch sizes.

}
# dec_d = {"file_name": dec_seq[0]["file_names"][e]}
pred = vision_model.forward(org_map_func(dec_d))
Expand Down
8 changes: 6 additions & 2 deletions compressai_vision/run/eval_split_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,9 @@ def main(conf: DictConfig):
"avg_bpp": avg_bpp,
"end_accuracy": performance,
**elap_times,
"inv_mse": None if mse is None else 1.0 / mse,
**(
{"inv_mse": 0 if mse == 0 else 1.0 / mse} if mse is not None else {}
),
}
)
print(tabulate(result_df, headers="keys", tablefmt="psql"))
Expand All @@ -283,7 +285,9 @@ def main(conf: DictConfig):
"bitrate (kbps)": bitrate,
"end_accuracy": performance,
**elap_times,
"inv_mse": None if mse is None else 1.0 / mse,
**(
{"inv_mse": 0 if mse == 0 else 1.0 / mse} if mse is not None else {}
),
}
)
print(tabulate(result_df, headers="keys", tablefmt="psql"))
Expand Down
3 changes: 2 additions & 1 deletion scripts/metrics/compute_overall_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@

from compressai_vision.evaluators.evaluators import BaseEvaluator

CLASSES = ["CLASS-AB", "CLASS-C", "CLASS-D"]
CLASSES = ["CLASS-AB", "CLASS-C", "CLASS-D", "CLASS-AB*"]

SEQS_BY_CLASS = {
CLASSES[0]: [
Expand All @@ -67,6 +67,7 @@
],
CLASSES[1]: ["BasketballDrill", "BQMall", "PartyScene", "RaceHorses_832x480"],
CLASSES[2]: ["BasketballPass", "BQSquare", "BlowingBubbles", "RaceHorses"],
CLASSES[3]: ["Traffic", "BQTerrace"],
}

SEQUENCE_TO_OFFSET = {
Expand Down
69 changes: 62 additions & 7 deletions scripts/metrics/gen_mpeg_cttc_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,13 @@
DATASETS = ["TVD", "SFU", "OIV6", "HIEVE", "PANDASET"]


def read_df_rec(path, seq_list, nb_operation_points, fn_regex=r"summary.csv"):
def read_df_rec(
path,
seq_list,
nb_operation_points,
fn_regex=r"summary.csv",
prefix: str | None = None,
):
summary_csvs = [f for f in iglob(join(path, "**", fn_regex), recursive=True)]
if nb_operation_points > 0:
seq_names = [
Expand All @@ -70,10 +76,18 @@ def read_df_rec(path, seq_list, nb_operation_points, fn_regex=r"summary.csv"):
len([f for f in summary_csvs if sequence in f]) == nb_operation_points
), f"Did not find {nb_operation_points} results for {sequence}"

return pd.concat(
(pd.read_csv(f) for f in summary_csvs),
ignore_index=True,
)
dfs = []
for f in summary_csvs:
df = pd.read_csv(f)

seq_dir = Path(os.path.relpath(f, path)).parts[0]
if prefix and prefix in seq_dir:
df["Dataset"] = df["Dataset"].apply(
lambda x: f"{prefix}{x}" if not x.startswith(f"{prefix}") else x
)

dfs.append(df)
return pd.concat(dfs, ignore_index=True)


def df_append(df1, df2):
Expand Down Expand Up @@ -155,9 +169,13 @@ def generate_csv_classwise_video_map(
nb_operation_points: int = 4,
no_cactus: bool = False,
skip_classwise: bool = False,
seq_prefix: str = None,
dataset_prefix: str = None,
):
opts_metrics = {"AP": 0, "AP50": 1, "AP75": 2, "APS": 3, "APM": 4, "APL": 5}
results_df = read_df_rec(result_path, seq_list, nb_operation_points)
results_df = read_df_rec(
result_path, seq_list, nb_operation_points, prefix=seq_prefix
)

# sort
sorterIndex = dict(zip(seq_list, range(len(seq_list))))
Expand All @@ -177,6 +195,13 @@ def generate_csv_classwise_video_map(
classwise_name = list(seqs_by_class.keys())[0]
classwise_seqs = list(seqs_by_class.values())[0]

cur_seq_prefix = (
seq_prefix
if seq_prefix
and any(name.startswith(seq_prefix) for name in classwise_seqs)
else None
)

class_wise_maps = []
for q in range(nb_operation_points):
items = utils.search_items(
Expand All @@ -187,6 +212,8 @@ def generate_csv_classwise_video_map(
BaseEvaluator.get_coco_eval_info_name,
by_name=True,
gt_folder=gt_folder,
seq_prefix=cur_seq_prefix,
dataset_prefix=dataset_prefix,
)

assert (
Expand All @@ -202,7 +229,11 @@ def generate_csv_classwise_video_map(
matched_seq_names = []
for seq_info in items:
name, _, _ = get_seq_info(seq_info[utils.SEQ_INFO_KEY])
matched_seq_names.append(name)
matched_seq_names.append(
f"{seq_prefix}{name}"
if seq_prefix and seq_prefix in seq_info[utils.SEQ_NAME_KEY]
else name
)

class_wise_results_df = generate_classwise_df(
results_df, {classwise_name: matched_seq_names}
Expand Down Expand Up @@ -409,6 +440,12 @@ def generate_csv(result_path, seq_list, nb_operation_points):
default=False,
help="exclude Cactus sequence for FCM eval",
)
parser.add_argument(
"--add-non-scale",
action="store_true",
default=False,
help="Add non-scale option using ns_Traffic/ns_BQTerrace with original GT",
)

args = parser.parse_args()

Expand All @@ -421,6 +458,7 @@ def generate_csv(result_path, seq_list, nb_operation_points):

if args.dataset_name == "SFU":
metric = args.metric
dataset_prefix = "sfu-hw-"
class_ab = {
"CLASS-AB": [
"Traffic",
Expand Down Expand Up @@ -476,6 +514,19 @@ def generate_csv(result_path, seq_list, nb_operation_points):
"BlowingBubbles_416x240_50",
"RaceHorses_416x240_30",
]

if args.mode == "FCM" and args.add_non_scale:
ns_seq_list = ["ns_Traffic_2560x1600_30", "ns_BQTerrace_1920x1080_60"]
seq_list.extend(ns_seq_list)
seq_prefix = "ns_"
class_ab_star = {
"CLASS-AB*": [
"ns_Traffic",
"ns_BQTerrace",
]
}
classes.append(class_ab_star)

if args.mode == "VCM" and not args.include_optional:
seq_list.remove("Kimono_1920x1080_24")
seq_list.remove("Cactus_1920x1080_50")
Expand All @@ -493,6 +544,10 @@ def generate_csv(result_path, seq_list, nb_operation_points):
args.nb_operation_points,
args.no_cactus,
args.mode == "VCM", # skip classwise evaluation
seq_prefix=seq_prefix
if "seq_prefix" in locals()
else None, # adding prefix to non-scale sequence
dataset_prefix=dataset_prefix if "dataset_prefix" in locals() else None,
)

if args.mode == "VCM":
Expand Down
Loading