|
1 |
| -# from cellseg_models_pytorch.inference import ResizeInferer |
2 |
| -# from cellseg_models_pytorch.models import cellpose_plus |
3 |
| -# from cellseg_models_pytorch.utils.latency_benchmark import LatencyBenchmarker |
| 1 | +from cellseg_models_pytorch.inference import ResizeInferer |
| 2 | +from cellseg_models_pytorch.models import cellpose_plus |
| 3 | +from cellseg_models_pytorch.utils.latency_benchmark import LatencyBenchmarker |
4 | 4 |
|
5 | 5 |
|
6 |
| -# def test_latency_benchmark(img_dir): |
7 |
| -# model = cellpose_plus(sem_classes=3, type_classes=3, long_skip="unet") |
| 6 | +def test_latency_benchmark(img_dir): |
| 7 | + model = cellpose_plus(sem_classes=3, type_classes=3, long_skip="unet") |
8 | 8 |
|
9 |
| -# inferer = ResizeInferer( |
10 |
| -# model, |
11 |
| -# img_dir, |
12 |
| -# out_activations={"sem": "softmax", "type": "softmax", "cellpose": "tanh"}, |
13 |
| -# out_boundary_weights={"sem": False, "type": False, "cellpose": True}, |
14 |
| -# resize=(256, 256), |
15 |
| -# padding=80, |
16 |
| -# instance_postproc="hovernet", |
17 |
| -# batch_size=1, |
18 |
| -# save_intermediate=True, |
19 |
| -# device="cpu", |
20 |
| -# parallel=False, |
21 |
| -# ) |
22 |
| -# inferer.infer() |
| 9 | + inferer = ResizeInferer( |
| 10 | + model, |
| 11 | + img_dir, |
| 12 | + out_activations={"sem": "softmax", "type": "softmax", "cellpose": "tanh"}, |
| 13 | + out_boundary_weights={"sem": False, "type": False, "cellpose": True}, |
| 14 | + resize=(256, 256), |
| 15 | + padding=80, |
| 16 | + instance_postproc="hovernet", |
| 17 | + batch_size=1, |
| 18 | + save_intermediate=True, |
| 19 | + device="cpu", |
| 20 | + parallel=False, |
| 21 | + ) |
| 22 | + inferer.infer() |
23 | 23 |
|
24 |
| -# bm = LatencyBenchmarker(inferer) |
| 24 | + bm = LatencyBenchmarker(inferer) |
25 | 25 |
|
26 |
| -# bm.postproc_latency("inst", reps_per_img=1) |
27 |
| -# bm.postproc_latency("type", reps_per_img=1) |
28 |
| -# bm.postproc_latency("sem", reps_per_img=1) |
29 |
| -# bm.inference_latency(reps=1, warmup_reps=0) |
30 |
| -# bm.inference_postproc_latency(reps=1) |
31 |
| -# # bm.model_latency(input_size=(64, 64), reps=1, warmup_reps=0, device="cpu") |
32 |
| -# # bm.model_throughput(input_size=(64, 64), reps=1, warmup_reps=0, device="cpu") |
| 26 | + bm.postproc_latency("inst", reps_per_img=1) |
| 27 | + bm.postproc_latency("type", reps_per_img=1) |
| 28 | + bm.postproc_latency("sem", reps_per_img=1) |
| 29 | + bm.inference_latency(reps=1, warmup_reps=0) |
| 30 | + bm.inference_postproc_latency(reps=1) |
| 31 | + # bm.model_latency(input_size=(64, 64), reps=1, warmup_reps=0, device="cpu") |
| 32 | + # bm.model_throughput(input_size=(64, 64), reps=1, warmup_reps=0, device="cpu") |
0 commit comments