Skip to content

Commit c7c8ac2

Browse files
committed
cleaned up
1 parent e7e0091 commit c7c8ac2

9 files changed

+245
-899
lines changed

examples/integrations/tlt/detectnet_v2_bounding_box.ipynb

Lines changed: 200 additions & 128 deletions
Large diffs are not rendered by default.

examples/integrations/tlt/labelbox_upload.ipynb

Lines changed: 45 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,12 @@
66
"source": [
77
"# Create training data using Labelbox\n",
88
"* Download images and animal annotations\n",
9-
"* Upload them to labelbox using MAL\n",
10-
"* Label to add any missing classes"
9+
"* Upload them to labelbox using MAL"
1110
]
1211
},
1312
{
1413
"cell_type": "code",
15-
"execution_count": 1,
14+
"execution_count": null,
1615
"metadata": {},
1716
"outputs": [],
1817
"source": [
@@ -40,7 +39,7 @@
4039
},
4140
{
4241
"cell_type": "code",
43-
"execution_count": 2,
42+
"execution_count": null,
4443
"metadata": {},
4544
"outputs": [],
4645
"source": [
@@ -53,45 +52,44 @@
5352
},
5453
{
5554
"cell_type": "code",
56-
"execution_count": 3,
55+
"execution_count": null,
5756
"metadata": {},
5857
"outputs": [],
5958
"source": [
6059
"# Download the annotations\n",
61-
"if not os.path.exists('eccv_18_annotation_files'):\n",
62-
" !wget http://www.vision.caltech.edu/~sbeery/datasets/caltechcameratraps18/eccv_18_annotations.tar.gz\n",
63-
" !tar -zxf eccv_18_annotations.tar.gz"
60+
"if not os.path.exists('eccv_18_all_annotations'):\n",
61+
" !wget http://www.vision.caltech.edu/~sbeery/datasets/caltechcameratraps18/eccv_18_all_annotations.tar.gz\n",
62+
" !tar -zxf eccv_18_all_annotations.tar.gz"
6463
]
6564
},
6665
{
6766
"cell_type": "markdown",
6867
"metadata": {},
6968
"source": [
7069
"## Preprocess Data\n",
70+
"* Sample images from video sequences\n",
7171
"* Select only day time images and a subset of possible animals\n",
72-
"* Since the images are coming from video frames we split into train and eval datasets to account for this.\n",
7372
"* Convert the data into a format that is compatible with labelbox"
7473
]
7574
},
7675
{
7776
"cell_type": "code",
78-
"execution_count": 4,
77+
"execution_count": null,
7978
"metadata": {},
8079
"outputs": [],
8180
"source": [
82-
"data = json.load(open('eccv_18_annotation_files/train_annotations.json'))\n",
81+
"data = json.load(open('CaltechCameraTrapsECCV18.json'))\n",
8382
"data['categories'] = {d['id'] : d for d in data['categories']}\n",
8483
"annotations = defaultdict(lambda: [])"
8584
]
8685
},
8786
{
8887
"cell_type": "code",
89-
"execution_count": 5,
88+
"execution_count": null,
9089
"metadata": {},
9190
"outputs": [],
9291
"source": [
93-
"#Unique sequence of frames so the that the same animal isn't in the train and eval set by chance\n",
94-
"#We also want different seq_ids so that they are all from different sequences (not same animal)\n",
92+
"# One image per video sequence to reduce correlation between training/testing images.\n",
9593
"\n",
9694
"images = {}\n",
9795
"ids = set()\n",
@@ -113,19 +111,25 @@
113111
},
114112
{
115113
"cell_type": "code",
116-
"execution_count": 6,
114+
"execution_count": null,
117115
"metadata": {},
118116
"outputs": [],
119117
"source": [
118+
"# These ids correspond to locations with a lot of people in the images that we can label\n",
119+
"target_locations = {0,125,120}\n",
120120
"target_classes = {'dog', 'cat', 'deer', 'bobcat', 'fox'}\n",
121+
"min_border_distance = 50\n",
121122
"\n",
122123
"\n",
123-
"def process_image(image, min_bbox_height_px = 50 , min_bbox_width_px = 50):\n",
124+
"def process_image(image):\n",
124125
" date_time_obj = datetime.datetime.strptime(image['date_captured'], '%Y-%m-%d %H:%M:%S')\n",
125126
" if (not ((18 > date_time_obj.hour > 7)) or (date_time_obj.hour == 12)):\n",
126127
" #Only train on day time images\n",
127128
" return\n",
128129
" \n",
130+
" if image['location'] not in target_locations:\n",
131+
" return\n",
132+
" \n",
129133
" annots = annotations[image['id']]\n",
130134
" im = None \n",
131135
" box_coords = []\n",
@@ -138,11 +142,11 @@
138142
" bbox = annot.get('bbox')\n",
139143
" assert bbox is not None\n",
140144
" \n",
141-
" if bbox[0] < min_bbox_width_px or bbox[1] < min_bbox_height_px:\n",
142-
" #Ignore tiny bboxes\n",
145+
" # Don't train on images where the animal is on the edge of the image\n",
146+
" if bbox[0] < min_border_distance or bbox[1] < min_border_distance:\n",
143147
" return\n",
144148
" \n",
145-
" if (w - (bbox[0] + bbox[2])) < min_bbox_width_px or (h - (bbox[1] + bbox[3])) < min_bbox_height_px:\n",
149+
" if (w - (bbox[0] + bbox[2])) < min_border_distance or (h - (bbox[1] + bbox[3])) < min_border_distance:\n",
146150
" return \n",
147151
" \n",
148152
" if im is None:\n",
@@ -158,17 +162,9 @@
158162
},
159163
{
160164
"cell_type": "code",
161-
"execution_count": 7,
165+
"execution_count": null,
162166
"metadata": {},
163-
"outputs": [
164-
{
165-
"name": "stdout",
166-
"output_type": "stream",
167-
"text": [
168-
"191\n"
169-
]
170-
}
171-
],
167+
"outputs": [],
172168
"source": [
173169
"examples = [process_image(ex) for ex in data['images'].values()]\n",
174170
"examples = [ex for ex in examples if ex is not None]\n",
@@ -184,7 +180,7 @@
184180
},
185181
{
186182
"cell_type": "code",
187-
"execution_count": 8,
183+
"execution_count": null,
188184
"metadata": {},
189185
"outputs": [],
190186
"source": [
@@ -195,6 +191,7 @@
195191
" os.mkdir(\"labels\")\n",
196192
" \n",
197193
"image_paths = []\n",
194+
"\n",
198195
"for idx, example in enumerate(examples):\n",
199196
" imm, coords, location = example\n",
200197
" image_path = os.path.join(\"uploaded_images\", f\"{idx}.jpg\")\n",
@@ -219,29 +216,18 @@
219216
},
220217
{
221218
"cell_type": "code",
222-
"execution_count": 9,
219+
"execution_count": null,
223220
"metadata": {},
224221
"outputs": [],
225222
"source": [
226-
"client = Client(api_key = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJja2s0cTF2Z3djMHZwMDcwNHhoeDdtNHZrIiwib3JnYW5pemF0aW9uSWQiOiJja2s0cTF2Z2Fwc2F1MDczMjRhd25zanEyIiwiYXBpS2V5SWQiOiJja2t6bjd5dG5pZHNjMDcwNjczazIyamF1IiwiaWF0IjoxNjEyOTc0MjQ3LCJleHAiOjIyNDQxMjYyNDd9.GrGjHbN1w1X5-qLzlzp9UKCnkSffKqTQWEWIRyegHGg\")"
223+
"client = Client()"
227224
]
228225
},
229226
{
230227
"cell_type": "code",
231-
"execution_count": 10,
228+
"execution_count": null,
232229
"metadata": {},
233-
"outputs": [
234-
{
235-
"data": {
236-
"text/plain": [
237-
"True"
238-
]
239-
},
240-
"execution_count": 10,
241-
"metadata": {},
242-
"output_type": "execute_result"
243-
}
244-
],
230+
"outputs": [],
245231
"source": [
246232
"project = client.create_project(name = \"animal_demo_proj\")\n",
247233
"dataset = client.create_dataset(name = \"animal_demo_ds\")\n",
@@ -252,7 +238,7 @@
252238
},
253239
{
254240
"cell_type": "code",
255-
"execution_count": 11,
241+
"execution_count": null,
256242
"metadata": {},
257243
"outputs": [],
258244
"source": [
@@ -273,7 +259,7 @@
273259
},
274260
{
275261
"cell_type": "code",
276-
"execution_count": 12,
262+
"execution_count": null,
277263
"metadata": {},
278264
"outputs": [],
279265
"source": [
@@ -286,7 +272,7 @@
286272
},
287273
{
288274
"cell_type": "code",
289-
"execution_count": 13,
275+
"execution_count": null,
290276
"metadata": {},
291277
"outputs": [],
292278
"source": [
@@ -295,7 +281,7 @@
295281
},
296282
{
297283
"cell_type": "code",
298-
"execution_count": 14,
284+
"execution_count": null,
299285
"metadata": {},
300286
"outputs": [],
301287
"source": [
@@ -319,7 +305,7 @@
319305
},
320306
{
321307
"cell_type": "code",
322-
"execution_count": 16,
308+
"execution_count": null,
323309
"metadata": {},
324310
"outputs": [],
325311
"source": [
@@ -331,24 +317,21 @@
331317
"cell_type": "markdown",
332318
"metadata": {},
333319
"source": [
334-
"## Go to labelbox and label\n",
335-
"* Most of the data is prelabeled so we just need to go through and make sure everything is correct\n",
320+
"## Go to Labelbox and Label\n",
321+
"* Most of the animal data is prelabeled we want to go through and make sure everything is correct\n",
322+
" * Make sure to use the hot keys to label quickly!\n",
323+
" * 'e' submits the image\n",
324+
" * '1' selects the person bounding box\n",
325+
" * '2' selects the animal bounding box\n",
326+
" * There are other helpful ones too! Check out the keyboard shortcuts panel in the top right of the editor.\n",
336327
"* None of the people in the images have been labeled so we are also going to add those annotations"
337328
]
338329
},
339330
{
340331
"cell_type": "code",
341-
"execution_count": 17,
332+
"execution_count": null,
342333
"metadata": {},
343-
"outputs": [
344-
{
345-
"name": "stdout",
346-
"output_type": "stream",
347-
"text": [
348-
"https://app.labelbox.com/projects/ckq6zvmwm8sko0ya4fevdgsbf/overview\n"
349-
]
350-
}
351-
],
334+
"outputs": [],
352335
"source": [
353336
"print(f\"https://app.labelbox.com/projects/{project.uid}/overview\")"
354337
]

examples/integrations/tlt/specs/detectnet_v2_inference_resnet18_kitti_tlt.txt

Lines changed: 0 additions & 49 deletions
This file was deleted.

examples/integrations/tlt/specs/detectnet_v2_inference_resnet18_kitti_trt.txt

Lines changed: 0 additions & 49 deletions
This file was deleted.

0 commit comments

Comments
 (0)