Skip to content

Commit a8983dd

Browse files
committed
test: update lat benchmark test
1 parent 5a0a58e commit a8983dd

File tree

1 file changed

+27
-27
lines changed

1 file changed

+27
-27
lines changed
Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1,32 +1,32 @@
1-
# from cellseg_models_pytorch.inference import ResizeInferer
2-
# from cellseg_models_pytorch.models import cellpose_plus
3-
# from cellseg_models_pytorch.utils.latency_benchmark import LatencyBenchmarker
1+
from cellseg_models_pytorch.inference import ResizeInferer
2+
from cellseg_models_pytorch.models import cellpose_plus
3+
from cellseg_models_pytorch.utils.latency_benchmark import LatencyBenchmarker
44

55

6-
# def test_latency_benchmark(img_dir):
7-
# model = cellpose_plus(sem_classes=3, type_classes=3, long_skip="unet")
6+
def test_latency_benchmark(img_dir):
7+
model = cellpose_plus(sem_classes=3, type_classes=3, long_skip="unet")
88

9-
# inferer = ResizeInferer(
10-
# model,
11-
# img_dir,
12-
# out_activations={"sem": "softmax", "type": "softmax", "cellpose": "tanh"},
13-
# out_boundary_weights={"sem": False, "type": False, "cellpose": True},
14-
# resize=(256, 256),
15-
# padding=80,
16-
# instance_postproc="hovernet",
17-
# batch_size=1,
18-
# save_intermediate=True,
19-
# device="cpu",
20-
# parallel=False,
21-
# )
22-
# inferer.infer()
9+
inferer = ResizeInferer(
10+
model,
11+
img_dir,
12+
out_activations={"sem": "softmax", "type": "softmax", "cellpose": "tanh"},
13+
out_boundary_weights={"sem": False, "type": False, "cellpose": True},
14+
resize=(256, 256),
15+
padding=80,
16+
instance_postproc="hovernet",
17+
batch_size=1,
18+
save_intermediate=True,
19+
device="cpu",
20+
parallel=False,
21+
)
22+
inferer.infer()
2323

24-
# bm = LatencyBenchmarker(inferer)
24+
bm = LatencyBenchmarker(inferer)
2525

26-
# bm.postproc_latency("inst", reps_per_img=1)
27-
# bm.postproc_latency("type", reps_per_img=1)
28-
# bm.postproc_latency("sem", reps_per_img=1)
29-
# bm.inference_latency(reps=1, warmup_reps=0)
30-
# bm.inference_postproc_latency(reps=1)
31-
# # bm.model_latency(input_size=(64, 64), reps=1, warmup_reps=0, device="cpu")
32-
# # bm.model_throughput(input_size=(64, 64), reps=1, warmup_reps=0, device="cpu")
26+
bm.postproc_latency("inst", reps_per_img=1)
27+
bm.postproc_latency("type", reps_per_img=1)
28+
bm.postproc_latency("sem", reps_per_img=1)
29+
bm.inference_latency(reps=1, warmup_reps=0)
30+
bm.inference_postproc_latency(reps=1)
31+
# bm.model_latency(input_size=(64, 64), reps=1, warmup_reps=0, device="cpu")
32+
# bm.model_throughput(input_size=(64, 64), reps=1, warmup_reps=0, device="cpu")

0 commit comments

Comments
 (0)