|
| 1 | +from concurrent.futures import ThreadPoolExecutor, as_completed |
| 2 | +import functools |
1 | 3 | import random
|
| 4 | +import json |
| 5 | +import os |
| 6 | + |
2 | 7 | import numpy as np
|
3 | 8 | from PIL import Image
|
| 9 | +from tqdm import tqdm |
4 | 10 | import cv2
|
| 11 | + |
5 | 12 | from detectron2.utils.visualizer import Visualizer
|
6 | 13 |
|
7 | 14 |
|
@@ -47,30 +54,36 @@ def partition_coco(coco_instance_data, coco_panoptic_data = None, splits = None)
|
47 | 54 | return partitions
|
48 | 55 |
|
49 | 56 |
|
50 |
| -def visualize_coco_examples(coco_examples, metadata_catalog, scale = 1.0, max_images = 5, resize_dims = (768, 512)): |
| 57 | +def visualize_object_inferences(metadata_catalog, coco_examples, predictor, scale = 1.0, max_images = 5, resize_dims = (768, 512)): |
51 | 58 | images = []
|
52 | 59 | for idx, example in enumerate(coco_examples):
|
53 | 60 | if idx > max_images:
|
54 | 61 | break
|
55 | 62 | im = cv2.imread(example['file_name'])
|
| 63 | + outputs = predictor(im) |
56 | 64 | v = Visualizer(im[:, :, ::-1], metadata_catalog, scale=scale)
|
57 |
| - out = v.draw_dataset_dict(example) |
58 |
| - images.append(cv2.resize(out.get_image(), resize_dims)) |
| 65 | + out = v.draw_instance_predictions(outputs["instances"].to("cpu")) |
| 66 | + images.append(cv2.resize(out.get_image()[:, :, ::-1], resize_dims)) |
59 | 67 | return Image.fromarray(np.vstack(images))
|
60 | 68 |
|
61 | 69 |
|
62 |
| - def visualize_object_inferences(coco_examples, metadata_catalog, predictor, scale = 1.0, max_images = 5, resize_dims = (768, 512)): |
63 |
| - images = [] |
64 |
| - for idx, example in enumerate(coco_examples): |
65 |
| - if idx > max_images: |
66 |
| - break |
67 |
| - im = cv2.imread(example['file_name']) |
68 |
| - outputs = predictor(im) |
69 |
| - v = Visualizer(im[:, :, ::-1], metadata_catalog, scale=scale) |
70 |
| - out = v.draw_instance_predictions(outputs["instances"].to("cpu")) |
71 |
| - images.append(cv2.resize(out.get_image()[:, :, ::-1], resize_dims)) |
72 |
| - return Image.fromarray(np.vstack(images)) |
| 70 | +def visualize_coco_examples(metadata_catalog, object_examples, panoptic_examples = None, scale = 1.0, max_images = 5, resize_dims = (768,512)): |
| 71 | + if panoptic_examples is not None: |
| 72 | + lookup = {d['file_name'] : d for d in panoptic_examples} |
73 | 73 |
|
| 74 | + images = [] |
| 75 | + for idx, example in enumerate(object_examples): |
| 76 | + if idx > max_images: |
| 77 | + break |
| 78 | + im = cv2.imread(example['file_name']) |
| 79 | + v = Visualizer(im[:, :, ::-1], metadata_catalog, scale=scale) |
| 80 | + out = v.draw_dataset_dict(example) |
| 81 | + if panoptic_examples is not None: |
| 82 | + example_panoptic = lookup.get(example['file_name']) |
| 83 | + if example_panoptic is not None: |
| 84 | + out = v.draw_dataset_dict(example_panoptic) |
| 85 | + images.append(cv2.resize(out.get_image(), resize_dims)) |
| 86 | + return Image.fromarray(np.vstack(images)) |
74 | 87 |
|
75 | 88 |
|
76 | 89 | def _process_panoptic_to_semantic(input_panoptic, output_semantic, segments, id_map):
|
|
0 commit comments