|
37 | 37 | "- Loading datasets for training, validating and testing the model\n",
|
38 | 38 | "- Building a data pipeline including augmentation\n",
|
39 | 39 | "- Creating a TensorFlow model architecture\n",
|
40 |
| - "- Training and validating the model with synthetic data and analyzing the loss with TensorBoard\n", |
41 |
| - "- Testing the model on synthetic and real-world data and analyzing the prediction results with TensorBoard" |
| 40 | + "- Training and validating the model with synthetic data and analyzing the loss\n", |
| 41 | + "- Testing the model on synthetic and real-world data and analyzing the prediction results" |
42 | 42 | ]
|
43 | 43 | },
|
44 | 44 | {
|
|
61 | 61 | {
|
62 | 62 | "cell_type": "code",
|
63 | 63 | "execution_count": null,
|
64 |
| - "metadata": { |
65 |
| - "vscode": { |
66 |
| - "languageId": "python" |
67 |
| - } |
68 |
| - }, |
| 64 | + "metadata": {}, |
69 | 65 | "outputs": [],
|
70 | 66 | "source": [
|
71 |
| - "batch_size = 2 \n", |
| 67 | + "batch_size = 2\n", |
72 | 68 | "max_points_per_pillar = 100\n",
|
73 | 69 | "max_pillars = 10000\n",
|
74 | 70 | "number_features = 9\n",
|
|
99 | 95 | {
|
100 | 96 | "cell_type": "code",
|
101 | 97 | "execution_count": null,
|
102 |
| - "metadata": { |
103 |
| - "vscode": { |
104 |
| - "languageId": "python" |
105 |
| - } |
106 |
| - }, |
| 98 | + "metadata": {}, |
107 | 99 | "outputs": [],
|
108 | 100 | "source": [
|
109 | 101 | "import os\n",
|
|
133 | 125 | {
|
134 | 126 | "cell_type": "code",
|
135 | 127 | "execution_count": null,
|
136 |
| - "metadata": { |
137 |
| - "vscode": { |
138 |
| - "languageId": "python" |
139 |
| - } |
140 |
| - }, |
| 128 | + "metadata": {}, |
141 | 129 | "outputs": [],
|
142 | 130 | "source": [
|
143 | 131 | "for sample in dataset_train.take(1):\n",
|
|
183 | 171 | {
|
184 | 172 | "cell_type": "code",
|
185 | 173 | "execution_count": null,
|
186 |
| - "metadata": { |
187 |
| - "vscode": { |
188 |
| - "languageId": "python" |
189 |
| - } |
190 |
| - }, |
| 174 | + "metadata": {}, |
191 | 175 | "outputs": [],
|
192 | 176 | "source": [
|
193 | 177 | "from point_pillars import createPillars\n",
|
194 | 178 | "import random\n",
|
| 179 | + "from scipy.ndimage import rotate\n", |
195 | 180 | "import math\n",
|
196 |
| - "import tensorflow_addons as tfa\n", |
197 | 181 | "import numpy as np\n",
|
198 | 182 | "from grid_mapping import utils\n",
|
199 | 183 | "import matplotlib.pyplot as plt\n",
|
|
206 | 190 | "# This method augments one training sample. It is called by `preprocessSample`\n",
|
207 | 191 | "def augmentSample(point_cloud, grid_map):\n",
|
208 | 192 | " angle = random.uniform(-math.pi, math.pi)\n",
|
209 |
| - " grid_map = tfa.image.rotate(grid_map, angle)\n", |
| 193 | + " grid_map = rotate(grid_map, np.degrees(angle), mode='nearest', order=0)\n", |
210 | 194 | "\n",
|
211 | 195 | " # TASK 2: Create a rotation matrix that rotates the point cloud according to the grid map\n",
|
212 | 196 | " ### START CODE HERE ###\n",
|
213 |
| - " rotation_matrix = np.array( [[1.0, 0.0],\n", |
214 |
| - " [0.0, 1.0]] )\n", |
| 197 | + " rotation_matrix = np.array( [[1.0, 1.0],\n", |
| 198 | + " [1.0, 1.0]] )\n", |
215 | 199 | " ### END CODE HERE ###\n",
|
216 | 200 | " \n",
|
217 | 201 | " point_cloud[:, 0:2] = (rotation_matrix @ point_cloud[:, 0:2].T).T\n",
|
|
225 | 209 | " grid_map = label_\n",
|
226 | 210 | " # augment training sample (method above)\n",
|
227 | 211 | " if grid_map is not None:\n",
|
228 |
| - " point_cloud, grid_map = augmentSample(point_cloud, grid_map)\n", |
| 212 | + " point_cloud, grid_map = augmentSample(point_cloud, grid_map)\n", |
229 | 213 | "\n",
|
230 | 214 | " if intensity_threshold is not None:\n",
|
231 | 215 | " point_cloud[:, 3] = np.clip(point_cloud[:, 3] / intensity_threshold,\n",
|
|
319 | 303 | {
|
320 | 304 | "cell_type": "code",
|
321 | 305 | "execution_count": null,
|
322 |
| - "metadata": { |
323 |
| - "vscode": { |
324 |
| - "languageId": "python" |
325 |
| - } |
326 |
| - }, |
| 306 | + "metadata": {}, |
327 | 307 | "outputs": [],
|
328 | 308 | "source": [
|
329 | 309 | "datapipe_train = dataset_train.map(lambda ex: (ex[\"point_cloud\"], ex[\"grid_map\"]))\n",
|
|
364 | 344 | {
|
365 | 345 | "cell_type": "code",
|
366 | 346 | "execution_count": null,
|
367 |
| - "metadata": { |
368 |
| - "vscode": { |
369 |
| - "languageId": "python" |
370 |
| - } |
371 |
| - }, |
| 347 | + "metadata": {}, |
372 | 348 | "outputs": [],
|
373 | 349 | "source": [
|
374 | 350 | "from grid_mapping.point_pillars import getPointPillarsModel\n",
|
|
407 | 383 | {
|
408 | 384 | "cell_type": "code",
|
409 | 385 | "execution_count": null,
|
410 |
| - "metadata": { |
411 |
| - "vscode": { |
412 |
| - "languageId": "python" |
413 |
| - } |
414 |
| - }, |
| 386 | + "metadata": {}, |
415 | 387 | "outputs": [],
|
416 | 388 | "source": [
|
417 | 389 | "# The neural network predicts evidences for the cell being free or occupied in the range [0, inf].\n",
|
|
489 | 461 | {
|
490 | 462 | "cell_type": "code",
|
491 | 463 | "execution_count": null,
|
492 |
| - "metadata": { |
493 |
| - "vscode": { |
494 |
| - "languageId": "python" |
495 |
| - } |
496 |
| - }, |
| 464 | + "metadata": {}, |
497 | 465 | "outputs": [],
|
498 | 466 | "source": [
|
499 | 467 | "model = getModel()\n",
|
|
554 | 522 | {
|
555 | 523 | "cell_type": "code",
|
556 | 524 | "execution_count": null,
|
557 |
| - "metadata": { |
558 |
| - "vscode": { |
559 |
| - "languageId": "python" |
560 |
| - } |
561 |
| - }, |
| 525 | + "metadata": {}, |
562 | 526 | "outputs": [],
|
563 | 527 | "source": [
|
564 |
| - "!rm -rf tensorboard/\n", |
565 | 528 | "!rm -rf grid_mapping/checkpoints\n",
|
566 | 529 | "!mkdir -p grid_mapping/checkpoints\n",
|
567 | 530 | "\n",
|
568 |
| - "tensorboard_callback = tf.keras.callbacks.TensorBoard(\n", |
569 |
| - " log_dir='tensorboard',\n", |
570 |
| - " update_freq='batch',\n", |
571 |
| - ")\n", |
572 | 531 | "\n",
|
573 | 532 | "model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n",
|
574 | 533 | " filepath='grid_mapping/checkpoints/weights.{epoch:02d}.hdf5',\n",
|
575 | 534 | " save_weights_only=True)\n",
|
576 | 535 | "\n",
|
577 |
| - "model.fit(datapipe_train,\n", |
578 |
| - " validation_data=datapipe_valid,\n", |
579 |
| - " epochs=5,\n", |
580 |
| - " callbacks=[tensorboard_callback, model_checkpoint_callback])" |
| 536 | + "history = model.fit(datapipe_train,\n", |
| 537 | + " validation_data=datapipe_valid,\n", |
| 538 | + " epochs=5,\n", |
| 539 | + " callbacks=[model_checkpoint_callback])" |
581 | 540 | ]
|
582 | 541 | },
|
583 | 542 | {
|
584 | 543 | "cell_type": "markdown",
|
585 | 544 | "metadata": {},
|
586 | 545 | "source": [
|
587 |
| - "As the training is performed using the CPU (and not the GPU) on the jupyter server, this will take a while. So get a coffee and **analyze how the training and validation error develops during the training process** using TensorBoard. To open TensorBoard, navigate to the folder `acdc/section_3_sensor_data_processing/tensorboard` in the file browser, click the blue \"plus\" button and select \"TensorBoard\". Select the **\"Scalars\" tab in TensorBoard** to see the the loss graphs. It may take a while until the tab appears next to the \"Graphs\" tab.\n", |
| 546 | + "As the training is performed using the CPU (and not the GPU) on the jupyter server, this will take a while. So get a coffee and **analyze how the training and validation error develops during the training process**. After the training you can execute the cell below to check the loss curves." |
| 547 | + ] |
| 548 | + }, |
| 549 | + { |
| 550 | + "cell_type": "code", |
| 551 | + "execution_count": null, |
| 552 | + "metadata": {}, |
| 553 | + "outputs": [], |
| 554 | + "source": [ |
| 555 | + "train_loss = history.history['loss']\n", |
| 556 | + "val_loss = history.history['val_loss']\n", |
| 557 | + "\n", |
| 558 | + "plt.figure(figsize=(10, 6))\n", |
588 | 559 | "\n",
|
589 |
| - "<img src=\"grid_mapping/start-tensorboard.png\" alt=\"start tensorboard\" style=\"width: 800px;\"/>" |
| 560 | + "# Plot training and validation loss\n", |
| 561 | + "plt.subplot(2, 1, 1)\n", |
| 562 | + "plt.plot(train_loss, label='Training loss')\n", |
| 563 | + "plt.plot(val_loss, label='Validation loss')\n", |
| 564 | + "plt.legend()\n", |
| 565 | + "plt.xlabel('Epoch')\n", |
| 566 | + "plt.ylabel('Loss')" |
590 | 567 | ]
|
591 | 568 | },
|
592 | 569 | {
|
|
604 | 581 | {
|
605 | 582 | "cell_type": "code",
|
606 | 583 | "execution_count": null,
|
607 |
| - "metadata": { |
608 |
| - "vscode": { |
609 |
| - "languageId": "python" |
610 |
| - } |
611 |
| - }, |
| 584 | + "metadata": {}, |
612 | 585 | "outputs": [],
|
613 | 586 | "source": [
|
614 |
| - "!rm -rf tensorboard/images_synthetic\n", |
615 |
| - "\n", |
616 | 587 | "from tqdm import tqdm\n",
|
617 | 588 | "\n",
|
618 | 589 | "# TASK 4: Replace XX with the 2-digit epoch number of the saved model you want to use\n",
|
619 | 590 | "### START CODE HERE ###\n",
|
620 |
| - "model.load_weights('grid_mapping/checkpoints/weights.XX.hdf5')\n", |
| 591 | + "model.load_weights('grid_mapping/checkpoints/weights.05.hdf5')\n", |
621 | 592 | "### END CODE HERE ###\n",
|
622 | 593 | "\n",
|
| 594 | + "num_plots = 5\n", |
623 | 595 | "i = 0\n",
|
| 596 | + "fig, axs = plt.subplots(num_plots, 2, figsize=(20, 40))\n", |
624 | 597 | "for sample, sample_raw in zip(tqdm(datapipe_test_syn), datapipe_test_syn_raw):\n",
|
625 |
| - " point_cloud_batch = sample[0] # get first input batch from the traning pipeline (after `preprocessSample`)\n", |
626 |
| - " point_cloud_raw = sample_raw[0][0] # get first input point cloud directly from the dataset (without preprocessing)\n", |
| 598 | + " point_cloud_batch = sample[0] # get first input batch from the traning pipeline (after `preprocessSample`)\n", |
| 599 | + " point_cloud_raw = sample_raw[0][0] # get first input point cloud directly from the dataset (without preprocessing)\n", |
| 600 | + "\n", |
| 601 | + " # use trained model to predict grid map\n", |
| 602 | + " prediction_batch = model(point_cloud_batch, training=False)\n", |
| 603 | + " grid_map = prediction_batch[0] # get first predicted grid map from batch\n", |
627 | 604 | "\n",
|
628 |
| - " # use trained model to predict grid map\n", |
629 |
| - " prediction_batch = model(point_cloud_batch, training=False)\n", |
630 |
| - " grid_map = prediction_batch[0] # get first predicted grid map from batch\n", |
| 605 | + " # convert predicted evidences to belief masses\n", |
| 606 | + " prob, u, S, num_classes = evidences_to_masses(grid_map)\n", |
631 | 607 | "\n",
|
632 |
| - " # convert predicted evidences to belief masses\n", |
633 |
| - " prob, u, S, num_classes = evidences_to_masses(grid_map)\n", |
| 608 | + " # convert point cloud and grid map to images\n", |
| 609 | + " image_point_cloud = utils.lidar_to_bird_view_img(point_cloud_raw.numpy(), x_min, x_max, y_min, y_max, step_x_size, step_y_size, intensity_threshold)\n", |
| 610 | + " image_grid_map = utils.grid_map_to_img(prob)\n", |
634 | 611 | "\n",
|
635 |
| - " # convert point cloud and grid map to images\n", |
636 |
| - " image_point_cloud = utils.lidar_to_bird_view_img(point_cloud_raw.numpy(), x_min, x_max, y_min, y_max, step_x_size, step_y_size, intensity_threshold)\n", |
637 |
| - " image_grid_map = utils.grid_map_to_img(prob)\n", |
| 612 | + " # plot images in the grid\n", |
| 613 | + " axs[i, 0].imshow(tf.cast(image_point_cloud, dtype=tf.float32)/255.0)\n", |
| 614 | + " axs[i, 0].set_title('Synthetic Input')\n", |
| 615 | + " axs[i, 1].imshow(image_grid_map)\n", |
| 616 | + " axs[i, 1].set_title('Synthetic Prediction')\n", |
638 | 617 | "\n",
|
639 |
| - " # display input and prediction in TensorBoard\n", |
640 |
| - " w = tf.summary.create_file_writer('tensorboard/images_synthetic2')\n", |
641 |
| - " with w.as_default():\n", |
642 |
| - " tf.summary.image(\"synthetic_input\", [tf.cast(image_point_cloud, dtype=tf.float64)/255.0], step=i)\n", |
643 |
| - " tf.summary.image(\"synthetic_prediction\", [image_grid_map], step=i)\n", |
644 |
| - " i = i + 1\n" |
| 618 | + " i = i + 1\n", |
| 619 | + " if i >= num_plots:\n", |
| 620 | + " break\n", |
| 621 | + "\n", |
| 622 | + "# adjust layout and show plot\n", |
| 623 | + "fig.tight_layout()\n", |
| 624 | + "plt.show()" |
645 | 625 | ]
|
646 | 626 | },
|
647 | 627 | {
|
648 | 628 | "cell_type": "markdown",
|
649 | 629 | "metadata": {},
|
650 | 630 | "source": [
|
651 |
| - "Select the **\"Images\" tab in TensorBoard** to see the predicted occupancy grid maps. It may take a while until the tab appears. Use the slider above the image to switch between different test samples.\n", |
652 |
| - "Analyze the perfomance of the trained model. Are the predicted occupancy grid maps helpful for an automated driving function? Can you explain the results?" |
| 631 | + "#### Analyze the perfomance of the trained model. Are the predicted occupancy grid maps helpful for an automated driving function? Can you explain the results?" |
653 | 632 | ]
|
654 | 633 | },
|
655 | 634 | {
|
|
664 | 643 | {
|
665 | 644 | "cell_type": "code",
|
666 | 645 | "execution_count": null,
|
667 |
| - "metadata": { |
668 |
| - "vscode": { |
669 |
| - "languageId": "python" |
670 |
| - } |
671 |
| - }, |
| 646 | + "metadata": {}, |
672 | 647 | "outputs": [],
|
673 | 648 | "source": [
|
674 |
| - "!rm -rf tensorboard/images_real\n", |
675 |
| - "\n", |
676 | 649 | "model.load_weights('grid_mapping/weights.99.hdf5')\n",
|
677 | 650 | "\n",
|
| 651 | + "num_plots = 5\n", |
678 | 652 | "i = 0\n",
|
| 653 | + "fig, axs = plt.subplots(num_plots, 2, figsize=(20, 40))\n", |
679 | 654 | "for sample, sample_raw in zip(tqdm(datapipe_test_real), datapipe_test_real_raw):\n",
|
680 |
| - " point_cloud_batch = sample[0]\n", |
681 |
| - " point_cloud_raw = sample_raw[0][0]\n", |
| 655 | + " point_cloud_batch = sample[0]\n", |
| 656 | + " point_cloud_raw = sample_raw[0][0]\n", |
682 | 657 | "\n",
|
683 |
| - " # use trained model to predict grid map\n", |
684 |
| - " prediction_batch = model(point_cloud_batch, training=False)\n", |
685 |
| - " grid_map = prediction_batch[0]\n", |
| 658 | + " # use trained model to predict grid map\n", |
| 659 | + " prediction_batch = model(point_cloud_batch, training=False)\n", |
| 660 | + " grid_map = prediction_batch[0]\n", |
686 | 661 | " \n",
|
687 |
| - " # convert predicted evidences to belief masses\n", |
688 |
| - " prob, u, S, num_classes = evidences_to_masses(grid_map)\n", |
| 662 | + " # convert predicted evidences to belief masses\n", |
| 663 | + " prob, u, S, num_classes = evidences_to_masses(grid_map)\n", |
689 | 664 | " \n",
|
690 |
| - " # convert point cloud and grid map to images\n", |
691 |
| - " image_point_cloud = utils.lidar_to_bird_view_img(point_cloud_raw.numpy(), x_min, x_max, y_min, y_max, step_x_size, step_y_size, intensity_threshold)\n", |
692 |
| - " image_grid_map = utils.grid_map_to_img(prob)\n", |
| 665 | + " # convert point cloud and grid map to images\n", |
| 666 | + " image_point_cloud = utils.lidar_to_bird_view_img(point_cloud_raw.numpy(), x_min, x_max, y_min, y_max, step_x_size, step_y_size, intensity_threshold)\n", |
| 667 | + " image_grid_map = utils.grid_map_to_img(prob)\n", |
693 | 668 | " \n",
|
694 |
| - " # display input and prediction in TensorBoard\n", |
695 |
| - " w = tf.summary.create_file_writer('tensorboard/images_real')\n", |
696 |
| - " with w.as_default():\n", |
697 |
| - " tf.summary.image(\"real_input\", [tf.cast(image_point_cloud, dtype=tf.float64)/255.0], step=i)\n", |
698 |
| - " tf.summary.image(\"real_prediction\", [image_grid_map], step=i)\n", |
699 |
| - " i = i + 1" |
| 669 | + " # plot images in the grid\n", |
| 670 | + " axs[i, 0].imshow(tf.cast(image_point_cloud, dtype=tf.float32)/255.0)\n", |
| 671 | + " axs[i, 0].set_title('Synthetic Input')\n", |
| 672 | + " axs[i, 1].imshow(image_grid_map)\n", |
| 673 | + " axs[i, 1].set_title('Synthetic Prediction')\n", |
| 674 | + "\n", |
| 675 | + " i = i + 1\n", |
| 676 | + " if i >= num_plots:\n", |
| 677 | + " break\n", |
| 678 | + "\n", |
| 679 | + "# adjust layout and show plot\n", |
| 680 | + "fig.tight_layout()\n", |
| 681 | + "plt.show()" |
700 | 682 | ]
|
701 | 683 | },
|
702 | 684 | {
|
|
764 | 746 | "\n",
|
765 | 747 | "MIT License\n",
|
766 | 748 | "\n",
|
767 |
| - "Copyright 2023 Institute for Automotive Engineering of RWTH Aachen University." |
| 749 | + "Copyright 2022 Institute for Automotive Engineering of RWTH Aachen University." |
768 | 750 | ]
|
769 |
| - }, |
770 |
| - { |
771 |
| - "cell_type": "code", |
772 |
| - "execution_count": null, |
773 |
| - "metadata": {}, |
774 |
| - "outputs": [], |
775 |
| - "source": [] |
776 | 751 | }
|
777 | 752 | ],
|
778 | 753 | "metadata": {
|
|
0 commit comments