- 
          
- 
                Notifications
    You must be signed in to change notification settings 
- Fork 1.9k
fix engine export #2155
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
fix engine export #2155
Changes from 1 commit
887791c
              ed08a45
              fea0fe8
              79ea692
              1022f77
              492f718
              File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -1,82 +1,193 @@ | ||||||
| from __future__ import annotations | ||||||
|  | ||||||
| import json | ||||||
| from pathlib import Path | ||||||
|  | ||||||
| from boxmot.appearance.exporters.base_exporter import BaseExporter | ||||||
| from boxmot.appearance.exporters.onnx_exporter import ONNXExporter | ||||||
| from boxmot.utils import logger as LOGGER | ||||||
|  | ||||||
|  | ||||||
| def _is_jetson() -> bool: | ||||||
| """Best-effort Jetson detection.""" | ||||||
| try: | ||||||
| return Path("/etc/nv_tegra_release").exists() | ||||||
| except Exception: | ||||||
| return False | ||||||
|  | ||||||
|  | ||||||
| class EngineExporter(BaseExporter): | ||||||
| """ | ||||||
| TensorRT engine exporter aligned with the provided onnx2engine pattern, | ||||||
| adapted for person re-identification models. | ||||||
| Notes | ||||||
| ----- | ||||||
| - INT8 is intentionally omitted per request. | ||||||
| - Writes optional metadata header (length + JSON) before engine bytes, matching your example. | ||||||
| If you prefer a pure-TRT engine without a custom header, comment out the metadata block. | ||||||
| """ | ||||||
|  | ||||||
| required_packages = ("nvidia-tensorrt",) | ||||||
| cmds = "--extra-index-url https://pypi.ngc.nvidia.com" | ||||||
|  | ||||||
| def export(self): | ||||||
| # Optional knobs the runner/CLI may attach to this instance (safe fallbacks here): | ||||||
| dla: int | None = None # DLA core index for Jetson devices, else None | ||||||
| verbose: bool = False # TensorRT verbose logging | ||||||
| metadata: dict | None = None # custom metadata to prepend to the engine file | ||||||
|  | ||||||
| def export(self): | ||||||
| # --- Preconditions --------------------------------------------------- | ||||||
| assert ( | ||||||
| self.im.device.type != "cpu" | ||||||
| ), "export running on CPU but must be on GPU, i.e. `python export.py --device 0`" | ||||||
|  | ||||||
| try: | ||||||
| import tensorrt as trt | ||||||
| except ImportError: | ||||||
| import tensorrt as trt | ||||||
| import tensorrt as trt # noqa: F401 | ||||||
| except ImportError as e: | ||||||
| raise ImportError( | ||||||
| "TensorRT not found. Install `nvidia-tensorrt` (often via pip + NGC index) and ensure CUDA-compatible drivers." | ||||||
| ) from e | ||||||
|  | ||||||
| # --- Export ONNX first ---------------------------------------------- | ||||||
| onnx_file = self.export_onnx() | ||||||
| assert Path(onnx_file).exists(), f"Failed to export ONNX file: {onnx_file}" | ||||||
|  | ||||||
| # --- Build TRT engine ------------------------------------------------ | ||||||
| return self._onnx2engine( | ||||||
| onnx_file=str(onnx_file), | ||||||
| engine_file=str(self.file.with_suffix(".engine")), | ||||||
| workspace=getattr(self, "workspace", None), # GB (float or int); handled below | ||||||
| 
     | ||||||
| workspace=getattr(self, "workspace", None), # GB (float or int); handled below | |
| workspace=getattr(self, "workspace", None), # workspace expected in gigabytes (float or int); will be converted to bytes in _onnx2engine | 
    
      
    
      Copilot
AI
    
    
    
      Oct 24, 2025 
    
  
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The # noqa comment without a specific error code suppresses all linting warnings. Since TensorRT is already imported at line 46, this is a duplicate import. Remove this redundant import or consolidate it at the module level if needed in both methods.
| import tensorrt as trt # noqa | 
    
      
    
      Copilot
AI
    
    
    
      Oct 24, 2025 
    
  
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Using signed=True for a length value is unusual and could lead to issues if the metadata length exceeds 2GB (max signed 32-bit int). Consider using signed=False for length fields unless there's a specific protocol requirement for signed integers.
| t.write(len(meta).to_bytes(4, byteorder="little", signed=True)) | |
| t.write(len(meta).to_bytes(4, byteorder="little", signed=False)) | 
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The
# noqa: F401suppresses the unused import warning, but the import is actually used on line 86. Remove the# noqa: F401comment since this import is legitimately used.