Skip to content

Commit aa33f45

Browse files
authored
Merge branch 'main' into onnx-branch
2 parents 78ffa11 + fec1153 commit aa33f45

13 files changed

+47
-48
lines changed

BabbleApp/babble_model_loader.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616

1717
def run_model(self):
18-
if self.runtime == "ONNX" or self.runtime == "Default (ONNX)":
18+
if self.runtime in ("ONNX", "Default (ONNX)"):
1919
frame = cv2.resize(self.current_image_gray, (256, 256))
2020
frame = transforms.to_tensor(frame)
2121
frame = transforms.unsqueeze(frame, 0)

BabbleApp/babble_processor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def __init__(
9696
self.opts.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
9797
self.opts.add_session_config_entry("session.intra_op.allow_spinning", "0") # ~3% savings worth ~6ms avg latency. Not noticeable at 60fps?
9898
self.opts.enable_mem_pattern = False
99-
if self.runtime == "ONNX" or self.runtime == "Default (ONNX)": # ONNX
99+
if self.runtime in ("ONNX", "Default (ONNX)"): # ONNX
100100
if self.use_gpu:
101101
provider = "DmlExecutionProvider"
102102
else:

BabbleApp/babbleapp.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -215,10 +215,10 @@ def main():
215215
# GUI Render loop
216216
while True:
217217
# First off, check for any events from the GUI
218-
event, values = window.read(timeout=2)
218+
event, values = window.read(timeout=30)
219219

220220
# If we're in either mode and someone hits q, quit immediately
221-
if event == "Exit" or event == sg.WIN_CLOSED:
221+
if event in ("Exit", sg.WIN_CLOSED):
222222
for (
223223
cam
224224
) in (

BabbleApp/camera.py

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import cv2
2+
from cv2.typing import *
23
import numpy as np
34
import queue
45
import serial
@@ -13,6 +14,7 @@
1314
from enum import Enum
1415

1516
WAIT_TIME = 0.1
17+
MAX_RESOLUTION: int = 600
1618

1719
# Serial communication protocol:
1820
# header-begin (2 bytes)
@@ -25,9 +27,9 @@
2527

2628

2729
class CameraState(Enum):
28-
CONNECTING = 0
29-
CONNECTED = 1
30-
DISCONNECTED = 2
30+
CONNECTING: int = 0
31+
CONNECTED: int = 1
32+
DISCONNECTED: int = 2
3133

3234

3335
class Camera:
@@ -304,6 +306,19 @@ def start_serial_connection(self, port):
304306
)
305307
self.camera_status = CameraState.DISCONNECTED
306308

309+
def clamp_max_res(self, image: MatLike) -> MatLike:
310+
shape = image.shape
311+
max_value = np.max(shape)
312+
if max_value > MAX_RESOLUTION:
313+
scale: float = MAX_RESOLUTION/max_value
314+
width: int = int(shape[1] * scale)
315+
height: int = int(shape[0] * scale)
316+
image = cv2.resize(image, (width, height))
317+
318+
return image
319+
else: return image
320+
321+
307322
def push_image_to_queue(self, image, frame_number, fps):
308323
# If there's backpressure, just yell. We really shouldn't have this unless we start getting
309324
# some sort of capture event conflict though.
@@ -312,5 +327,5 @@ def push_image_to_queue(self, image, frame_number, fps):
312327
print(
313328
f'{Fore.YELLOW}[{lang._instance.get_string("log.warn")}] {lang._instance.get_string("warn.backpressure1")} {qsize}. {lang._instance.get_string("warn.backpressure2")}{Fore.RESET}'
314329
)
315-
self.camera_output_outgoing.put((image, frame_number, fps))
330+
self.camera_output_outgoing.put((self.clamp_max_res(image), frame_number, fps))
316331
self.capture_event.clear()

BabbleApp/camera_widget.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import PySimpleGUI as sg
55
import cv2
66
from babble_processor import BabbleProcessor, CamInfoOrigin
7-
from camera import Camera, CameraState
7+
from camera import Camera, CameraState, MAX_RESOLUTION
88
from config import BabbleConfig
99
from osc import Tab
1010
from utils.misc_utils import (
@@ -63,7 +63,7 @@ def __init__(self, widget_id: Tab, main_config: BabbleConfig, osc_queue: Queue):
6363
self.capture_event = Event()
6464
self.capture_queue = Queue(maxsize=2)
6565
self.roi_queue = Queue(maxsize=2)
66-
self.image_queue = Queue(maxsize=500)
66+
self.image_queue = Queue(maxsize=4)
6767

6868
self.babble_cnn = BabbleProcessor(
6969
self.config,
@@ -101,9 +101,9 @@ def __init__(self, widget_id: Tab, main_config: BabbleConfig, osc_queue: Queue):
101101
],
102102
[
103103
sg.Graph(
104-
(640, 480),
105-
(0, 480),
106-
(640, 0),
104+
(MAX_RESOLUTION, MAX_RESOLUTION),
105+
(0, MAX_RESOLUTION),
106+
(MAX_RESOLUTION, 0),
107107
key=self.gui_roi_selection,
108108
drag_submits=True,
109109
enable_events=True,
@@ -212,6 +212,7 @@ def __init__(self, widget_id: Tab, main_config: BabbleConfig, osc_queue: Queue):
212212
key=self.gui_camera_addr,
213213
tooltip=lang._instance.get_string("camera.cameraAddressTooltip"),
214214
enable_events=True,
215+
size=(20,0),
215216
),
216217
sg.Button(
217218
lang._instance.get_string("camera.refreshCameraList"),
@@ -423,20 +424,20 @@ def render(self, window, event, values):
423424

424425
if event == self.gui_autoroi:
425426
print(lang._instance.get_string("info.setROI"))
426-
output = self.babble_cnn.get_framesize()
427+
output = self.maybe_image[0].shape
427428
self.config.roi_window_x = 0
428429
self.config.roi_window_y = 0
429-
self.config.roi_window_w = output[0]
430-
self.config.roi_window_h = output[1]
430+
self.config.roi_window_w = output[1]
431+
self.config.roi_window_h = output[0]
431432
self.main_config.save()
432433

433434
if event == self.gui_refresh_button:
434435
print(
435436
f'\033[94m[{lang._instance.get_string("log.info")}] {lang._instance.get_string("info.refreshedCameraList")}\033[0m'
436437
)
437438
self.camera_list = list_camera_names()
438-
print(self.camera_list)
439-
window[self.gui_camera_addr].update(values=self.camera_list)
439+
#print(self.camera_list)
440+
window[self.gui_camera_addr].update(values=self.camera_list,size=(20,0))
440441

441442
if event == self.gui_restart_calibration:
442443
if (

BabbleApp/config.py

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -64,31 +64,19 @@ def load():
6464
EnsurePath()
6565

6666
if not os.path.exists(CONFIG_FILE_NAME):
67-
print(
68-
f'[{lang._instance.get_string("log.info")}] {lang._instance.get_string("config.noSettingsFile")}'
69-
)
7067
return BabbleConfig()
7168
try:
7269
with open(CONFIG_FILE_NAME, "r") as settings_file:
7370
return BabbleConfig(**json.load(settings_file))
7471
except json.JSONDecodeError:
75-
print(
76-
f'[{lang._instance.get_string("log.info")}] {lang._instance.get_string("config.failedToLoadSettings")}'
77-
)
7872
load_config = None
7973
if os.path.exists(BACKUP_CONFIG_FILE_NAME):
8074
try:
8175
with open(BACKUP_CONFIG_FILE_NAME, "r") as settings_file:
8276
load_config = BabbleConfig(**json.load(settings_file))
83-
print(
84-
f'[{lang._instance.get_string("log.info")}] {lang._instance.get_string("config.usingBackupSettings")}'
85-
)
8677
except json.JSONDecodeError:
8778
pass
8879
if load_config is None:
89-
print(
90-
f'[{lang._instance.get_string("log.info")}] {lang._instance.get_string("config.usingBaseSettings")}'
91-
)
9280
load_config = BabbleConfig()
9381
return load_config
9482

@@ -103,11 +91,10 @@ def save(self):
10391
BabbleConfig(**json.load(settings_file))
10492
shutil.copy(CONFIG_FILE_NAME, BACKUP_CONFIG_FILE_NAME)
10593
# print("Backed up settings files.") # Comment out because it's too loud.
94+
except shutil.SameFileError:
95+
pass
10696
except json.JSONDecodeError:
10797
# No backup because the saved settings file is broken.
10898
pass
10999
with open(CONFIG_FILE_NAME, "w") as settings_file:
110100
json.dump(obj=self.dict(), fp=settings_file, indent=2)
111-
print(
112-
f'[{lang._instance.get_string("log.info")}] {lang._instance.get_string("config.saved")}.'
113-
)

BabbleApp/general_settings_widget.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -336,4 +336,4 @@ def render(self, window, event, values):
336336
if changed:
337337
self.main_config.save()
338338

339-
self.osc_queue.put((Tab.SETTINGS))
339+
self.osc_queue.put(Tab.SETTINGS)

BabbleApp/landmark_model_loader.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616

1717
def run_model(self): # Replace transforms n shit for the pfld model
18-
if self.runtime == "ONNX" or self.runtime == "Default (ONNX)":
18+
if self.runtime in ("ONNX", "Default (ONNX)"):
1919
frame = cv2.resize(self.current_image_gray, (256, 256))
2020
frame = transforms.to_tensor(frame)
2121
frame = transforms.unsqueeze(frame, 0)

BabbleApp/landmark_processor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,8 +95,8 @@ def __init__(
9595
self.opts.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
9696
self.opts.add_session_config_entry("session.intra_op.allow_spinning", "0") # ~3% savings worth ~6ms avg latency. Not noticeable at 60fps?
9797
self.opts.enable_mem_pattern = False
98-
if self.runtime == "ONNX" or self.runtime == "Default (ONNX)": # ONNX
99-
if self.use_gpu: provider = 'DmlExecutionProvider'
98+
if self.runtime in ("ONNX", "Default (ONNX)"): # ONNX
99+
if self.use_gpu: provider = 'DmlExecutionProvider'
100100
else: provider = "CPUExecutionProvider" # Build onnxruntime to get both DML and OpenVINO
101101
self.sess = ort.InferenceSession(f'{self.model}onnx/model.onnx', self.opts, providers=[provider, ], provider_options=[{'device_id': self.gpu_index}]) # Load Babble CNN until PFLD has been converted
102102
self.input_name = self.sess.get_inputs()[0].name

BabbleApp/one_euro_filter.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,7 @@ def __call__(self, x):
3131

3232
t = time()
3333
t_e = t - self.t_prev
34-
if (
35-
t_e != 0.0
36-
): # occasionally when switching to algos this becomes zero causing divide by zero errors crashing the filter.
34+
if t_e != 0.0: # occasionally when switching to algos this becomes zero causing divide by zero errors crashing the filter.
3735
t_e = np.full(x.shape, t_e)
3836

3937
# The filtered derivative of the signal.

0 commit comments

Comments
 (0)