Skip to content

Add condition in fct get_metrics #42

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Dec 9, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 23 additions & 13 deletions helpers/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,16 @@ def get_metrics(tp_gdf, fp_gdf, fn_gdf, mismatch_gdf, id_classes=0, method='macr
- float: f1 score.
"""

by_class_dict = {key: 0 for key in id_classes}
tp_k = by_class_dict.copy()
fp_k = by_class_dict.copy()
fn_k = by_class_dict.copy()
p_k = by_class_dict.copy()
r_k = by_class_dict.copy()
count_k = by_class_dict.copy()
pw_k = by_class_dict.copy()
rw_k = by_class_dict.copy()

by_class_dict = {key: None for key in id_classes}
tp_k = by_class_dict.copy()
fp_k = by_class_dict.copy()
Expand All @@ -152,7 +162,6 @@ def get_metrics(tp_gdf, fp_gdf, fn_gdf, mismatch_gdf, id_classes=0, method='macr

for id_cl in id_classes:

tp_count = 0 if tp_gdf.empty else len(tp_gdf[tp_gdf.det_class==id_cl])
pure_fp_count = 0 if fp_gdf.empty else len(fp_gdf[fp_gdf.det_class==id_cl])
pure_fn_count = 0 if fn_gdf.empty else len(fn_gdf[fn_gdf.label_class==id_cl+1]) # label class starting at 1 and id class at 0

Expand All @@ -165,31 +174,32 @@ def get_metrics(tp_gdf, fp_gdf, fn_gdf, mismatch_gdf, id_classes=0, method='macr

fp_count = pure_fp_count + mismatched_fp_count
fn_count = pure_fn_count + mismatched_fn_count
tp_count = 0 if tp_gdf.empty else len(tp_gdf[tp_gdf.det_class==id_cl])

tp_k[id_cl] = tp_count
fp_k[id_cl] = fp_count
fn_k[id_cl] = fn_count

if tp_count == 0:
p_k[id_cl] = 0
r_k[id_cl] = 0
else:
p_k[id_cl] = tp_count / (tp_count + fp_count)
r_k[id_cl] = tp_count / (tp_count + fn_count)
count_k[id_cl] = tp_count + fn_count
p_k[id_cl] = 0 if tp_count == 0 else tp_count / (tp_count + fp_count)
r_k[id_cl] = 0 if tp_count == 0 else tp_count / (tp_count + fn_count)
count_k[id_cl] = 0 if tp_count == 0 else tp_count + fn_count

if method == 'macro-average':
precision = sum(p_k.values()) / len(id_classes)
recall = sum(r_k.values()) / len(id_classes)
elif method == 'macro-weighted-average':
elif method == 'macro-weighted-average':
for id_cl in id_classes:
pw_k[id_cl] = (count_k[id_cl] / sum(count_k.values())) * p_k[id_cl]
rw_k[id_cl] = (count_k[id_cl] / sum(count_k.values())) * r_k[id_cl]
pw_k[id_cl] = 0 if sum(count_k.values()) == 0 else (count_k[id_cl] / sum(count_k.values())) * p_k[id_cl]
rw_k[id_cl] = 0 if sum(count_k.values()) == 0 else (count_k[id_cl] / sum(count_k.values())) * r_k[id_cl]
precision = sum(pw_k.values()) / len(id_classes)
recall = sum(rw_k.values()) / len(id_classes)
elif method == 'micro-average':
precision = sum(tp_k.values()) / (sum(tp_k.values()) + sum(fp_k.values()))
recall = sum(tp_k.values()) / (sum(tp_k.values()) + sum(fn_k.values()))
if sum(tp_k.values()) == 0 and sum(fp_k.values()) == 0:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does it really happen or is it for the hypothetical case?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes it has occured.

precision = 0
recall = 0
else:
precision = sum(tp_k.values()) / (sum(tp_k.values()) + sum(fp_k.values()))
recall = sum(tp_k.values()) / (sum(tp_k.values()) + sum(fn_k.values()))

if precision==0 and recall==0:
return tp_k, fp_k, fn_k, p_k, r_k, 0, 0, 0
Expand Down
Loading