Skip to content

Use Alternative MJPEG streamer instead of OpenCV #105

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Apr 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 44 additions & 33 deletions BabbleApp/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
from vivefacialtracker.vivetracker import ViveTracker
from vivefacialtracker.camera_controller import FTCameraController

from mjpeg_streamer import MJPEGVideoCapture

WAIT_TIME = 0.1
BUFFER_SIZE = 32768
MAX_RESOLUTION: int = 600
Expand Down Expand Up @@ -61,6 +63,7 @@ def __init__(
self.current_capture_source = config.capture_source
self.cv2_camera: "cv2.VideoCapture" = None
self.vft_camera: FTCameraController = None
self.http: bool = None

self.serial_connection = None
self.last_frame_time = time.time()
Expand Down Expand Up @@ -156,6 +159,8 @@ def run(self):
if self.cancellation_event.wait(WAIT_TIME):
return
if self.config.capture_source not in self.camera_list:
if "http://" in str(self.config.capture_source): self.http=True
else: self.http=False
self.current_capture_source = self.config.capture_source
else:
self.current_capture_source = get_camera_index_by_name(self.config.capture_source)
Expand All @@ -165,23 +170,27 @@ def run(self):
self.current_capture_source, cv2.CAP_FFMPEG
)
else:
self.cv2_camera = cv2.VideoCapture()
self.cv2_camera.open(self.current_capture_source)

if not self.settings.gui_cam_resolution_x == 0:
self.cv2_camera.set(
cv2.CAP_PROP_FRAME_WIDTH,
self.settings.gui_cam_resolution_x,
)
if not self.settings.gui_cam_resolution_y == 0:
self.cv2_camera.set(
cv2.CAP_PROP_FRAME_HEIGHT,
self.settings.gui_cam_resolution_y,
)
if not self.settings.gui_cam_framerate == 0:
self.cv2_camera.set(
cv2.CAP_PROP_FPS, self.settings.gui_cam_framerate
)
if not self.http:
self.cv2_camera = cv2.VideoCapture()
self.cv2_camera.open(self.current_capture_source)
else:
self.cv2_camera = MJPEGVideoCapture(self.current_capture_source)
self.cv2_camera.open()
if not self.http:
if not self.settings.gui_cam_resolution_x == 0:
self.cv2_camera.set(
cv2.CAP_PROP_FRAME_WIDTH,
self.settings.gui_cam_resolution_x,
)
if not self.settings.gui_cam_resolution_y == 0:
self.cv2_camera.set(
cv2.CAP_PROP_FRAME_HEIGHT,
self.settings.gui_cam_resolution_y,
)
if not self.settings.gui_cam_framerate == 0:
self.cv2_camera.set(
cv2.CAP_PROP_FPS, self.settings.gui_cam_framerate
)
should_push = False
else:
# We don't have a capture source to try yet, wait for one to show up in the GUI.
Expand Down Expand Up @@ -216,22 +225,24 @@ def get_camera_picture(self, should_push):
self.frame_number = self.frame_number + 1
elif self.cv2_camera is not None and self.cv2_camera.isOpened():
ret, image = self.cv2_camera.read() # MJPEG Stream reconnects are currently limited by the hard coded 30 second timeout time on VideoCapture.read(). We can get around this by recompiling OpenCV or using a custom MJPEG stream imp.
if not ret:
self.cv2_camera.set(cv2.CAP_PROP_POS_FRAMES, 0)
raise RuntimeError(lang._instance.get_string("error.frame"))
self.frame_number = self.cv2_camera.get(cv2.CAP_PROP_POS_FRAMES) + 1
else:
# Switching from a Vive Facial Tracker to a CV2 camera
return
self.FRAME_SIZE = image.shape
# Calculate FPS
current_frame_time = time.time() # Should be using "time.perf_counter()", not worth ~3x cycles?
delta_time = current_frame_time - self.last_frame_time
self.last_frame_time = current_frame_time
current_fps = 1 / delta_time if delta_time > 0 else 0
# Exponential moving average (EMA). ~1100ns savings, delicious..
self.fps = 0.02 * current_fps + 0.98 * self.fps
self.bps = image.nbytes * self.fps
if ret and image is not None:
if not ret:
if not self.http:
self.cv2_camera.set(cv2.CAP_PROP_POS_FRAMES, 0)
raise RuntimeError(lang._instance.get_string("error.frame"))
self.frame_number = self.cv2_camera.get(cv2.CAP_PROP_POS_FRAMES) + 1
else:
# Switching from a Vive Facial Tracker to a CV2 camera
return
self.FRAME_SIZE = image.shape
# Calculate FPS
current_frame_time = time.time() # Should be using "time.perf_counter()", not worth ~3x cycles?
delta_time = current_frame_time - self.last_frame_time
self.last_frame_time = current_frame_time
current_fps = 1 / delta_time if delta_time > 0 else 0
# Exponential moving average (EMA). ~1100ns savings, delicious..
self.fps = 0.02 * current_fps + 0.98 * self.fps
self.bps = image.nbytes * self.fps

if should_push:
self.push_image_to_queue(image, self.frame_number, self.fps)
Expand Down
105 changes: 105 additions & 0 deletions BabbleApp/mjpeg_streamer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
import requests
import numpy as np
import cv2
import threading
import time

class MJPEGVideoCapture:
def __init__(self, url):
self.url = url
self.session = requests.Session()
self.stream = None
self.byte_buffer = b""
self.frame = None
self.running = False
self.frame_ready = False
self.thread = None

def open(self):
if not self.running:
self.running = True
self.thread = threading.Thread(target=self._update, daemon=True)
self.thread.start()

def _update(self):
while self.running:
try:
self.stream = self.session.get(self.url, stream=True, timeout=1)
for chunk in self.stream.iter_content(chunk_size=1024):
if not self.running:
break
self.byte_buffer += chunk
# Process all available complete frames in the buffer
while True:
start = self.byte_buffer.find(b'\xff\xd8') # JPEG start marker
end = self.byte_buffer.find(b'\xff\xd9') # JPEG end marker
if start != -1 and end != -1:
jpg = self.byte_buffer[start:end+2]
self.byte_buffer = self.byte_buffer[end+2:]

image = np.frombuffer(jpg, dtype=np.uint8)
if image.size != 0:
frame = cv2.imdecode(image, cv2.IMREAD_COLOR)
if frame is not None:
self.frame = frame # Always update to the latest frame
self.frame_ready = True
else:
break
except requests.RequestException:
# If a network error occurs, wait briefly and retry
time.sleep(0.1)
continue

def read(self):
# Return whether a frame exists and its copy
start = time.time()
while True:
if self.frame is not None and self.frame_ready:
#time.sleep(self.sleep_time)
self.frame_old = self.frame
self.frame_ready = False
return True, self.frame.copy()
else:
end = time.time()
time.sleep(1/120)
if end-start>1:
return False, None

#return False, None

def isOpened(self):
return self.running

def isPrimed(self):
if self.frame is not None:
return True
else: return False

def release(self):
self.running = False
if self.thread is not None:
self.thread.join()
self.stream = None
self.frame = None
self.byte_buffer = b""
self.session.close()

def get(self, item):
pass
return 1


if __name__ == "__main__":
cap = MJPEGVideoCapture("http://openiristracker.local")
cap.open()

while cap.isOpened():
ret, frame = cap.read()
if ret and frame is not None:
cv2.imshow("MJPEG Stream", frame)

if cv2.waitKey(1) & 0xFF == ord("q"):
break

cap.release()
cv2.destroyAllWindows()