Skip to content

Commit 6ac566e

Browse files
GPU Selection work
1 parent b3dd5df commit 6ac566e

File tree

7 files changed

+69
-18
lines changed

7 files changed

+69
-18
lines changed

BabbleApp/algo_settings_widget.py

Lines changed: 33 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@ def __init__(self, widget_id: Tab, main_config: BabbleSettingsConfig, osc_queue:
1616
self.gui_speed_coefficient = f"-SPEEDCOEFFICIENT{widget_id}-"
1717
self.gui_min_cutoff = f"-MINCUTOFF{widget_id}-"
1818
self.gui_inference_threads = f"-THREADS{widget_id}-"
19+
self.gui_backend = f"-BACKEND{widget_id}"
20+
self.gui_gpu_index = f"GPUINDEX{widget_id}"
1921
self.main_config = main_config
2022
self.config = main_config.settings
2123
self.osc_queue = osc_queue
@@ -30,21 +32,35 @@ def __init__(self, widget_id: Tab, main_config: BabbleSettingsConfig, osc_queue:
3032
size=(32),
3133
tooltip="Name of the model file.",
3234
),
35+
sg.Text("Inference Threads:", background_color='#424042'),
36+
sg.InputText(
37+
self.config.gui_inference_threads,
38+
key=self.gui_inference_threads,
39+
size=(4),
40+
tooltip = "How many threads to use for processing the model.",
41+
),
3342
],
34-
[sg.Checkbox(
35-
"Use GPU (DirectML)",
43+
[sg.Text("Backend:", background_color='#424042'), # Replace with Dropdown once I have internet to view docs.
44+
sg.InputText(
45+
self.config.gui_backend,
46+
key=self.gui_backend,
47+
size=(4),
48+
tooltip = "Method to run the model.",
49+
),
50+
sg.Text("GPU Index:", background_color='#424042'), # Replace with Dropdown once I have internet to view docs.
51+
sg.InputText(
52+
self.config.gui_gpu_index,
53+
key=self.gui_gpu_index,
54+
size=(4),
55+
tooltip = "Select which device to run inference.",
56+
),
57+
sg.Checkbox(
58+
"Use GPU",
3659
default=self.config.gui_use_gpu,
3760
key=self.gui_use_gpu,
3861
background_color='#424042',
3962
tooltip="Toggle GPU execution.",
4063
),
41-
sg.Text("Inference Threads:", background_color='#424042'),
42-
sg.InputText(
43-
self.config.gui_inference_threads,
44-
key=self.gui_inference_threads,
45-
size=(4),
46-
tooltip = "How many threads to use for processing the model.",
47-
),
4864
],
4965
[sg.Text("Model output multiplier:", background_color='#424042'),
5066
sg.InputText(
@@ -123,6 +139,14 @@ def render(self, window, event, values):
123139
if self.config.gui_use_gpu != values[self.gui_use_gpu]:
124140
self.config.gui_use_gpu = values[self.gui_use_gpu]
125141
changed = True
142+
143+
if self.config.gui_gpu_index != int(values[self.gui_gpu_index]):
144+
self.config.gui_gpu_index = int(values[self.gui_gpu_index])
145+
changed = True
146+
147+
if self.config.gui_backend != int(values[self.gui_backend]):
148+
self.config.gui_backend = int(values[self.gui_backend])
149+
changed = True
126150

127151
if self.config.gui_inference_threads != int(values[self.gui_inference_threads]):
128152
self.config.gui_inference_threads = int(values[self.gui_inference_threads])

BabbleApp/babble_model_loader.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717

1818
def run_model(self):
19-
if not self.use_gpu:
19+
if self.backend == 0:
2020
frame = cv2.resize(self.current_image_gray, (256, 256))
2121
# make it pil
2222
frame = Image.fromarray(frame)
@@ -38,7 +38,7 @@ def run_model(self):
3838
output[i] = max(min(output[i], 1), 0)
3939
self.output = output
4040

41-
else:
41+
if self.backend == 1:
4242
frame = cv2.resize(self.current_image_gray, (256, 256))
4343
# make it pil
4444
frame = Image.fromarray(frame)

BabbleApp/babble_processor.py

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -78,24 +78,30 @@ def __init__(
7878

7979
self.current_algo = CamInfoOrigin.MODEL
8080
self.model = self.settings.gui_model_file
81+
self.backend = self.settings.gui_backend
8182
self.use_gpu = self.settings.gui_use_gpu
83+
self.gpu_index = self.settings.gui_gpu_index
8284
self.output = []
8385
self.val_list = []
8486
self.calibrate_config = np.empty((1, 45))
8587
self.min_max_array = np.empty((2, 45))
8688

8789
self.opts = ort.SessionOptions()
8890
self.opts.intra_op_num_threads = settings.gui_inference_threads
89-
self.opts.inter_op_num_threads = settings.gui_inference_threads
91+
self.opts.inter_op_num_threads = settings.gui_inference_threads # Figure out how to set openvino threads
9092
self.opts.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
91-
if not self.use_gpu:
93+
if self.backend == 0: # OpenVino
94+
if self.use_gpu: provider = f'GPU{self.gpu_index}'
95+
else: provider = 'CPU'
9296
ie = IECore()
9397
net = ie.read_network(model=f'{self.model}openvino/model.xml', weights=f'{self.model}openvino/model.bin')
94-
self.sess = ie.load_network(network=net, device_name='CPU')
98+
self.sess = ie.load_network(network=net, device_name=provider)
9599
self.input_name = next(iter(net.input_info))
96100
self.output_name = next(iter(net.outputs))
97-
else:
98-
self.sess = ort.InferenceSession(f'{self.model}onnx/model.onnx', self.opts, providers=['DmlExecutionProvider'])
101+
if self.backend == 1: # ONNX
102+
if self.use_gpu: provider = 'DmlExecutionProvider' # Figure out how to set ONNX gpu index
103+
else: provider = "CPUExecutionProvider"
104+
self.sess = ort.InferenceSession(f'{self.model}onnx/model.onnx', self.opts, providers=[provider])
99105
self.input_name = self.sess.get_inputs()[0].name
100106
self.output_name = self.sess.get_outputs()[0].name
101107

@@ -105,7 +111,7 @@ def __init__(
105111
beta = float(self.settings.gui_speed_coefficient) # 0.62
106112
except:
107113
print('\033[93m[WARN] OneEuroFilter values must be a legal number.\033[0m')
108-
min_cutoff = 15.0004
114+
min_cutoff = 10.0004
109115
beta = 0.62
110116
noisy_point = np.array([45])
111117
self.one_euro_filter = OneEuroFilter(

BabbleApp/config.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,12 @@ class BabbleSettingsConfig(BaseModel):
3333
gui_osc_location: str = ""
3434
gui_multiply: int = 1
3535
gui_model_file: str = 'Models/MOS3E26MCJPEG/'
36+
gui_backend: int = 0
3637
gui_use_gpu: bool = False
38+
gui_gpu_index: int = 0
3739
gui_inference_threads: int = 1
3840
gui_use_red_channel: bool = False
39-
calib_array: str = None
41+
calib_array: str = ""
4042
gui_cam_resolution_x: int = 0
4143
gui_cam_resolution_y: int = 0
4244
gui_cam_framerate: int = 0

BabbleApp/requirements.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,3 +6,7 @@ PySimpleGUI
66
python_osc
77
torch
88
torchvision
9+
pydantic
10+
pyserial
11+
colorama
12+
winotify

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ winotify = [
2020
]
2121
onnxruntime = "^1.13.1"
2222
onnxruntime-directml = "^1.15.0"
23+
openvino = "^2023.0.1"
2324
colorama = "^0.4.6"
2425
taskipy = "^1.10.4"
2526
torch = [

test.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
import onnxruntime as ort
2+
from openvino.inference_engine import IECore
3+
import openvino.inference_engine as ovie
4+
'''
5+
print(ort.get_device()) # Returns the current device
6+
print(ort.get_all_providers())
7+
print(ort.get_available_providers())
8+
#sess = ort.InferenceSession(f'{self.model}onnx/model.onnx', self.opts, providers=['DmlExecutionProvider'])
9+
'''
10+
ie = IECore()
11+
ovie.list_devices
12+
#print(ie.get_devices())
13+
#net = ie.read_network(model=f'{self.model}openvino/model.xml', weights=f'{self.model}openvino/model.bin')
14+
#self.sess = ie.load_network(network=net, device_name='CPU')

0 commit comments

Comments
 (0)