Skip to content

Commit e85073a

Browse files
committed
Fix: Misc fixes/changes
1 parent fc56aba commit e85073a

10 files changed

+32
-32
lines changed

BabbleApp/babble_model_loader.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from one_euro_filter import OneEuroFilter
1414

1515
def run_model(self):
16-
if self.runtime == "ONNX" or self.runtime == "Default (ONNX)":
16+
if self.runtime in ("ONNX", "Default (ONNX)"):
1717
frame = cv2.resize(self.current_image_gray, (256, 256))
1818
frame = transforms.to_tensor(frame)
1919
frame = transforms.unsqueeze(frame,0)

BabbleApp/babble_processor.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -90,10 +90,10 @@ def __init__(
9090
self.opts = ort.SessionOptions()
9191
self.opts.intra_op_num_threads = settings.gui_inference_threads
9292
self.opts.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
93-
if self.runtime == "ONNX" or self.runtime == "Default (ONNX)": # ONNX
94-
if self.use_gpu: provider = 'DmlExecutionProvider'
93+
if self.runtime in ("ONNX", "Default (ONNX)"): # ONNX
94+
if self.use_gpu: provider = 'DmlExecutionProvider'
9595
else: provider = "CPUExecutionProvider" # Build onnxruntime to get both DML and OpenVINO
96-
self.sess = ort.InferenceSession(f'{self.model}onnx/model.onnx', self.opts, providers=[provider], provider_options=[{'device_id': self.gpu_index}])
96+
self.sess = ort.InferenceSession(f'{self.model}onnx/model.onnx', self.opts, providers=[provider], provider_options=[{'device_id': self.gpu_index}])
9797
self.input_name = self.sess.get_inputs()[0].name
9898
self.output_name = self.sess.get_outputs()[0].name
9999
try:

BabbleApp/babbleapp.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -203,10 +203,10 @@ def main():
203203
# GUI Render loop
204204
while True:
205205
# First off, check for any events from the GUI
206-
event, values = window.read(timeout=2)
206+
event, values = window.read(timeout=30)
207207

208208
# If we're in either mode and someone hits q, quit immediately
209-
if event == "Exit" or event == sg.WIN_CLOSED:
209+
if event in ("Exit", sg.WIN_CLOSED):
210210
for cam in cams: #yes we only have one cam page but im just gonna leave this incase
211211
cam.stop()
212212
cancellation_event.set()

BabbleApp/camera_widget.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def __init__(self, widget_id: Tab, main_config: BabbleConfig, osc_queue: Queue):
5454
self.capture_event = Event()
5555
self.capture_queue = Queue(maxsize=2)
5656
self.roi_queue = Queue(maxsize=2)
57-
self.image_queue = Queue(maxsize=500)
57+
self.image_queue = Queue(maxsize=4)
5858

5959
self.babble_cnn = BabbleProcessor(
6060
self.config,

BabbleApp/general_settings_widget.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -249,4 +249,4 @@ def render(self, window, event, values):
249249

250250
if changed:
251251
self.main_config.save()
252-
self.osc_queue.put((Tab.SETTINGS))
252+
self.osc_queue.put(Tab.SETTINGS)

BabbleApp/landmark_model_loader.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from one_euro_filter import OneEuroFilter
1414

1515
def run_model(self): # Replace transforms n shit for the pfld model
16-
if self.runtime == "ONNX" or self.runtime == "Default (ONNX)":
16+
if self.runtime in ("ONNX", "Default (ONNX)"):
1717
frame = cv2.resize(self.current_image_gray, (256, 256))
1818
frame = transforms.to_tensor(frame)
1919
frame = transforms.unsqueeze(frame,0)

BabbleApp/landmark_processor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,8 @@ def __init__(
9292
self.opts = ort.SessionOptions()
9393
self.opts.intra_op_num_threads = settings.gui_inference_threads
9494
self.opts.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
95-
if self.runtime == "ONNX" or self.runtime == "Default (ONNX)": # ONNX
96-
if self.use_gpu: provider = 'DmlExecutionProvider'
95+
if self.runtime in ("ONNX", "Default (ONNX)"): # ONNX
96+
if self.use_gpu: provider = 'DmlExecutionProvider'
9797
else: provider = "CPUExecutionProvider" # Build onnxruntime to get both DML and OpenVINO
9898
self.sess = ort.InferenceSession(f'{self.model}onnx/model.onnx', self.opts, providers=[provider, ], provider_options=[{'device_id': self.gpu_index}]) # Load Babble CNN until PFLD has been converted
9999
self.input_name = self.sess.get_inputs()[0].name

BabbleApp/one_euro_filter.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -32,21 +32,21 @@ def __call__(self, x):
3232
t = time()
3333
t_e = t - self.t_prev
3434
if t_e != 0.0: #occasionally when switching to algos this becomes zero causing divide by zero errors crashing the filter.
35-
t_e = np.full(x.shape, t_e)
35+
t_e = np.full(x.shape, t_e)
3636

37-
# The filtered derivative of the signal.
38-
a_d = smoothing_factor(t_e, self.d_cutoff)
39-
dx = (x - self.x_prev) / t_e
40-
dx_hat = exponential_smoothing(a_d, dx, self.dx_prev)
37+
# The filtered derivative of the signal.
38+
a_d = smoothing_factor(t_e, self.d_cutoff)
39+
dx = (x - self.x_prev) / t_e
40+
dx_hat = exponential_smoothing(a_d, dx, self.dx_prev)
4141

42-
# The filtered signal.
43-
cutoff = self.min_cutoff + self.beta * np.abs(dx_hat)
44-
a = smoothing_factor(t_e, cutoff)
45-
x_hat = exponential_smoothing(a, x, self.x_prev)
42+
# The filtered signal.
43+
cutoff = self.min_cutoff + self.beta * np.abs(dx_hat)
44+
a = smoothing_factor(t_e, cutoff)
45+
x_hat = exponential_smoothing(a, x, self.x_prev)
4646

47-
# Memorize the previous values.
48-
self.x_prev = x_hat
49-
self.dx_prev = dx_hat
50-
self.t_prev = t
47+
# Memorize the previous values.
48+
self.x_prev = x_hat
49+
self.dx_prev = dx_hat
50+
self.t_prev = t
5151

52-
return x_hat
52+
return x_hat

BabbleApp/osc.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -82,15 +82,13 @@ def __init__(self, cancellation_event: threading.Event, msg_queue: queue.Queue[t
8282
self.cam = Tab.CAM
8383

8484
def run(self):
85-
while True:
86-
if self.cancellation_event.is_set():
87-
print("\033[94m[INFO] Exiting OSC Queue\033[0m")
88-
return
85+
while not self.cancellation_event.is_set():
8986
try:
9087
(self.cam_id, cam_info) = self.msg_queue.get(block=True, timeout=0.1)
91-
except:
88+
except TypeError:
89+
continue
90+
except queue.Empty:
9291
continue
93-
9492
output_osc(cam_info.output, self)
9593

9694

@@ -114,7 +112,8 @@ def shutdown(self):
114112
pass
115113

116114
def recalibrate_mouth(self, address, osc_value):
117-
if type(osc_value) != bool: return # just incase we get anything other than bool
115+
if not isinstance(osc_value, bool):
116+
return # just incase we get anything other than bool
118117
if osc_value:
119118
for cam in self.cams:
120119
cam.babble_cnn.calibration_frame_counter = 300

BabbleApp/osc_calibrate_filter.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ class CamId(IntEnum):
1010
class cal():
1111

1212
def __init__(self):
13+
self.calibration_frame_counter = None
1314
self.calibrated_array = None
1415
self.raw_array = None
1516

0 commit comments

Comments
 (0)