Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion detector/docker.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ fi
# ========================== BUILD CONFIGURATION / IMAGE SELECTION =======================

SEMANTIC_VERSION=0.1.13
NODE_LIB_VERSION=0.16.1
NODE_LIB_VERSION=0.17.0
build_args=" --build-arg NODE_LIB_VERSION=$NODE_LIB_VERSION"

if [ -f /etc/nv_tegra_release ] # Check if we are on a Jetson device
Expand Down
8 changes: 7 additions & 1 deletion detector/yolov5_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,12 @@
import cv2
import numpy as np
import yolov5
from learning_loop_node.data_classes import BoxDetection, ImageMetadata, ImagesMetadata, PointDetection
from learning_loop_node.data_classes import (
BoxDetection,
ImageMetadata,
ImagesMetadata,
PointDetection,
)
from learning_loop_node.detector.detector_logic import DetectorLogic
from learning_loop_node.enums import CategoryType

Expand Down Expand Up @@ -86,6 +91,7 @@ def evaluate(self, image: bytes) -> ImageMetadata:
assert self.model_info is not None, 'model_info must be set before calling evaluate()'

image_metadata = ImageMetadata()

try:
t = time.time()
cv_image = cv2.imdecode(np.frombuffer(image, np.uint8), cv2.IMREAD_COLOR)
Expand Down
46 changes: 28 additions & 18 deletions detector_cla/yolov5_detector.py
Original file line number Diff line number Diff line change
@@ -1,38 +1,45 @@
from typing import List
from learning_loop_node import ModelInformation, Detector
from learning_loop_node.detector import Detections
from learning_loop_node.detector.classification_detection import ClassificationDetection
import logging
import numpy as np
import torch
from typing import List, Tuple

import cv2
import torch
import torch.nn.functional as F
import torchvision.transforms as T

from learning_loop_node.data_classes import (
ClassificationDetection,
ImageMetadata,
ImagesMetadata,
)
from learning_loop_node.detector.detector_logic import DetectorLogic

IMAGENET_MEAN = 0.485, 0.456, 0.406
IMAGENET_STD = 0.229, 0.224, 0.225


def classify_transforms(size=832):
def classify_transforms(size: Tuple[int, int] = (832, 832)):
return T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])


class Yolov5Detector(Detector):
class Yolov5Detector(DetectorLogic):

def __init__(self) -> None:
super().__init__('yolov5_pytorch')

def init(self, model_info: ModelInformation):
self.model_info = model_info
self.imgsz = (model_info.resolution, model_info.resolution)
def init(self):
assert self.model_info is not None, 'model_info must be set before calling init()'
assert self.model_info.resolution is not None

self.imgsz = (self.model_info.resolution, self.model_info.resolution)
self.torch_transforms = classify_transforms(self.imgsz)
self.model = torch.hub.load('ultralytics/yolov5', 'custom',
path=f'{model_info.model_root_path}/model.pt', force_reload=True)
path=f'{self.model_info.model_root_path}/model.pt', force_reload=True)

def evaluate(self, image: bytes) -> ImageMetadata:
if self.model_info is None or self.model is None:
return ImageMetadata()

def evaluate(self, image: List[np.uint8]) -> Detections:
self.model.warmup(imgsz=(1, 3, *self.imgsz))
detections = Detections()
metadata = ImageMetadata()
try:
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# Perform yolov5 preprocessing
Expand All @@ -49,10 +56,13 @@ def evaluate(self, image: List[np.uint8]) -> Detections:
category_index = top_i[0]
category = [category for category in self.model_info.categories if category.name ==
self.model.names[category_index]][0]
detections.classification_detections.append(ClassificationDetection(
metadata.classification_detections.append(ClassificationDetection(
category.name, self.model_info.version, pred[0][category_index].item(), category.id
))

except Exception as e:
except Exception:
logging.exception('inference failed')
return detections
return metadata

def batch_evaluate(self, images: List[bytes]) -> ImagesMetadata:
raise NotImplementedError('batch_evaluate is not implemented for Yolov5Detector')
2 changes: 1 addition & 1 deletion detector_cpu/docker.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ fi
# ========================== BUILD CONFIGURATION / IMAGE SELECTION =======================

SEMANTIC_VERSION=0.1.11
NODE_LIB_VERSION=0.14.0
NODE_LIB_VERSION=0.17.0
build_args=" --build-arg NODE_LIB_VERSION=$NODE_LIB_VERSION"


Expand Down
12 changes: 10 additions & 2 deletions detector_cpu/yolov5_detector.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,17 @@
import logging
import os
import time
from typing import Tuple
from typing import List, Tuple

import cv2
import numpy as np
import torch # type: ignore # pylint: disable=import-error
from learning_loop_node.data_classes import BoxDetection, ImageMetadata, PointDetection
from learning_loop_node.data_classes import (
BoxDetection,
ImageMetadata,
ImagesMetadata,
PointDetection,
)
from learning_loop_node.detector.detector_logic import DetectorLogic
from learning_loop_node.enums import CategoryType

Expand Down Expand Up @@ -336,3 +341,6 @@ def xywh2xyxy(self, origin_h, origin_w, x):
y /= r_h

return y

def batch_evaluate(self, images: List[bytes]) -> ImagesMetadata:
raise NotImplementedError('batch_evaluate is not implemented yet')