6
6
'get_f_score' , 'get_iou_score' , 'get_jaccard_score' ,
7
7
]
8
8
9
- SMOOTH = 1e-12
9
+ SMOOTH = 1.
10
10
11
11
12
12
# ============================ Jaccard/IoU score ============================
13
13
14
14
15
- def iou_score (gt , pr , class_weights = 1. , smooth = SMOOTH , per_image = True ):
15
+ def iou_score (gt , pr , class_weights = 1. , smooth = SMOOTH , per_image = True , threshold = None ):
16
16
r""" The `Jaccard index`_, also known as Intersection over Union and the Jaccard similarity coefficient
17
17
(originally coined coefficient de communauté by Paul Jaccard), is a statistic used for comparing the
18
18
similarity and diversity of sample sets. The Jaccard coefficient measures similarity between finite sample sets,
@@ -27,6 +27,7 @@ def iou_score(gt, pr, class_weights=1., smooth=SMOOTH, per_image=True):
27
27
smooth: value to avoid division by zero
28
28
per_image: if ``True``, metric is calculated as mean over images in batch (B),
29
29
else over whole batch
30
+ threshold: value to round predictions (use ``>`` comparison), if ``None`` prediction prediction will not be round
30
31
31
32
Returns:
32
33
IoU/Jaccard score in range [0, 1]
@@ -38,6 +39,10 @@ def iou_score(gt, pr, class_weights=1., smooth=SMOOTH, per_image=True):
38
39
axes = [1 , 2 ]
39
40
else :
40
41
axes = [0 , 1 , 2 ]
42
+
43
+ if threshold is not None :
44
+ pr = K .greater (pr , threshold )
45
+ pr = K .cast (pr , K .floatx ())
41
46
42
47
intersection = K .sum (gt * pr , axis = axes )
43
48
union = K .sum (gt + pr , axis = axes ) - intersection
@@ -53,20 +58,21 @@ def iou_score(gt, pr, class_weights=1., smooth=SMOOTH, per_image=True):
53
58
return iou
54
59
55
60
56
- def get_iou_score (class_weights = 1. , smooth = SMOOTH , per_image = True ):
61
+ def get_iou_score (class_weights = 1. , smooth = SMOOTH , per_image = True , threshold = None ):
57
62
"""Change default parameters of IoU/Jaccard score
58
63
59
64
Args:
60
65
class_weights: 1. or list of class weights, len(weights) = C
61
66
smooth: value to avoid division by zero
62
67
per_image: if ``True``, metric is calculated as mean over images in batch (B),
63
68
else over whole batch
69
+ threshold: value to round predictions (use ``>`` comparison), if ``None`` prediction prediction will not be round
64
70
65
71
Returns:
66
72
``callable``: IoU/Jaccard score
67
73
"""
68
74
def score (gt , pr ):
69
- return iou_score (gt , pr , class_weights = class_weights , smooth = smooth , per_image = per_image )
75
+ return iou_score (gt , pr , class_weights = class_weights , smooth = smooth , per_image = per_image , threshold = threshold )
70
76
71
77
return score
72
78
@@ -83,7 +89,7 @@ def score(gt, pr):
83
89
84
90
# ============================== F/Dice - score ==============================
85
91
86
- def f_score (gt , pr , class_weights = 1 , beta = 1 , smooth = SMOOTH , per_image = True ):
92
+ def f_score (gt , pr , class_weights = 1 , beta = 1 , smooth = SMOOTH , per_image = True , threshold = None ):
87
93
r"""The F-score (Dice coefficient) can be interpreted as a weighted average of the precision and recall,
88
94
where an F-score reaches its best value at 1 and worst score at 0.
89
95
The relative contribution of ``precision`` and ``recall`` to the F1-score are equal.
@@ -110,6 +116,7 @@ def f_score(gt, pr, class_weights=1, beta=1, smooth=SMOOTH, per_image=True):
110
116
smooth: value to avoid division by zero
111
117
per_image: if ``True``, metric is calculated as mean over images in batch (B),
112
118
else over whole batch
119
+ threshold: value to round predictions (use ``>`` comparison), if ``None`` prediction prediction will not be round
113
120
114
121
Returns:
115
122
F-score in range [0, 1]
@@ -119,6 +126,10 @@ def f_score(gt, pr, class_weights=1, beta=1, smooth=SMOOTH, per_image=True):
119
126
axes = [1 , 2 ]
120
127
else :
121
128
axes = [0 , 1 , 2 ]
129
+
130
+ if threshold is not None :
131
+ pr = K .greater (pr , threshold )
132
+ pr = K .cast (pr , K .floatx ())
122
133
123
134
tp = K .sum (gt * pr , axis = axes )
124
135
fp = K .sum (pr , axis = axes ) - tp
@@ -137,7 +148,7 @@ def f_score(gt, pr, class_weights=1, beta=1, smooth=SMOOTH, per_image=True):
137
148
return score
138
149
139
150
140
- def get_f_score (class_weights = 1 , beta = 1 , smooth = SMOOTH , per_image = True ):
151
+ def get_f_score (class_weights = 1 , beta = 1 , smooth = SMOOTH , per_image = True , threshold = None ):
141
152
"""Change default parameters of F-score score
142
153
143
154
Args:
@@ -146,12 +157,13 @@ def get_f_score(class_weights=1, beta=1, smooth=SMOOTH, per_image=True):
146
157
beta: f-score coefficient
147
158
per_image: if ``True``, metric is calculated as mean over images in batch (B),
148
159
else over whole batch
160
+ threshold: value to round predictions (use ``>`` comparison), if ``None`` prediction prediction will not be round
149
161
150
162
Returns:
151
163
``callable``: F-score
152
164
"""
153
165
def score (gt , pr ):
154
- return f_score (gt , pr , class_weights = class_weights , beta = beta , smooth = smooth , per_image = per_image )
166
+ return f_score (gt , pr , class_weights = class_weights , beta = beta , smooth = smooth , per_image = per_image , threshold = threshold )
155
167
156
168
return score
157
169
0 commit comments