Skip to content

Worked yesterday, today ValueError: Could not open '~/google-coral/examples-camera/all_models/mobilenet_ssd_v1_face_quant_postprocess_edgetpu.tflite'. #110

@lamachine

Description

@lamachine

Description

I was running the last several days modifying opencv/detect.py for three usb cameras. All went well. I also modified it to just gather images with no inference. Grabbed a couple thousand images yesterday and tried to post-process and got the above error. Today same thing. I deleted and reinstalled the entire camera examples folder including downloading all models again. No joy.

Changed models to v1 and to face, same issue.

RPi4, coral usb plugged into blue jacks, updated raspbian etc (like I said, worked yesterday).

Since it worked yesterday, I don't think it is a bug in this code, but I am at a loss of what it could be. I am running it from a separate folder but changed the model folder to the correct one. See below.

This works:
`
import cv2
import time
import datetime

import argparse
import os

def main():
default_save_dir = './images'

#for i in range (1,6):
while True:
    cameras = (0, 2, 4)
    for camera in cameras:
        
        # Create a VideoCapture object
        cap = cv2.VideoCapture(camera)
        # Check if the camera is opened successfully
        if not cap.isOpened():
            print("Could not open camera")
            exit()
        # Capture a frame
        ret, frame = cap.read()
        file_name = ("image"+ str(camera) + str(datetime.datetime.now()) +".jpg")
        complete_name = os.path.join(default_save_dir, file_name)
        cv2.imwrite(complete_name, frame)
        
        
        cap.release()

if name == 'main':
main()
`

This does not. Error below
`

v4l2-ctl --list-devices

import cv2
import time
import datetime

import argparse

import os

from pycoral.adapters.common import input_size
from pycoral.adapters.detect import get_objects
from pycoral.utils.dataset import read_label_file
from pycoral.utils.edgetpu import make_interpreter
from pycoral.utils.edgetpu import run_inference

def main():
default_model_dir = '~/google-coral/examples-camera/all_models'
default_model = 'mobilenet_ssd_v1_face_quant_postprocess_edgetpu.tflite'
default_labels = 'coco_labels.txt'
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='.tflite model path',
default=os.path.join(default_model_dir,default_model))
parser.add_argument('--labels', help='label file path',
default=os.path.join(default_model_dir, default_labels))
parser.add_argument('--top_k', type=int, default=3,
help='number of categories with highest score to display')
parser.add_argument('--camera_idx', type=int, help='Index of which video source to use. ', default = 0)
parser.add_argument('--threshold', type=float, default=0.1,
help='classifier score threshold')
args = parser.parse_args()
default_save_dir = './images'

print('Loading {} with {} labels.'.format(args.model, args.labels))
interpreter = make_interpreter(args.model)
interpreter.allocate_tensors()
labels = read_label_file(args.labels)
inference_size = input_size(interpreter)



#for i in range (1,6):
while True:
    cameras = (0, 2, 4)
    for camera in cameras:
        
        # Create a VideoCapture object
        cap = cv2.VideoCapture(camera)
        # Check if the camera is opened successfully
        if not cap.isOpened():
            print("Could not open camera")
            exit()
        # Capture a frame
        ret, frame = cap.read()
        file_name = ("image"+ str(camera) + str(datetime.datetime.now()) +".jpg")
        complete_name = os.path.join(default_save_dir, file_name)
        cv2.imwrite(complete_name, frame)
        

        cv2_im_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        cv2_im_rgb = cv2.resize(cv2_im_rgb, inference_size)
        run_inference(interpreter, cv2_im_rgb.tobytes())
        objs = get_objects(interpreter, args.threshold)[:args.top_k]
        final = append_objs_to_img(frame, inference_size, objs, labels)
        file_name = ("final"+ str(camera) + str(datetime.datetime.now()) +".jpg")
        complete_name = os.path.join(default_save_dir, file_name)
        cv2.imwrite(complete_name, frame)
        
        cap.release()

def append_objs_to_img(cv2_im, inference_size, objs, labels):
height, width, channels = cv2_im.shape
scale_x, scale_y = width / inference_size[0], height / inference_size[1]
for obj in objs:
bbox = obj.bbox.scale(scale_x, scale_y)
x0, y0 = int(bbox.xmin), int(bbox.ymin)
x1, y1 = int(bbox.xmax), int(bbox.ymax)
`

    percent = int(100 * obj.score)
    label = '{}% {}'.format(percent, labels.get(obj.id, obj.id))

    cv2_im = cv2.rectangle(cv2_im, (x0, y0), (x1, y1), (0, 255, 0), 2)
    cv2_im = cv2.putText(cv2_im, label, (x0, y0+30),
                         cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 2)
return cv2_im

if name == 'main':
main()

Click to expand!

Issue Type

Support

Operating System

Ubuntu

Coral Device

USB Accelerator

Other Devices

Rapsberry Pi 4

Programming Language

No response

Relevant Log Output

python3 image_capture_ai.py 
Loading ~/google-coral/examples-camera/all_models/mobilenet_ssd_v1_face_quant_postprocess_edgetpu.tflite with ~/google-coral/examples-camera/all_models/coco_labels.txt labels.
Traceback (most recent call last):
  File "image_capture_ai.py", line 95, in <module>
    main()
  File "image_capture_ai.py", line 40, in main
    interpreter = make_interpreter(args.model)
  File "/usr/lib/python3/dist-packages/pycoral/utils/edgetpu.py", line 93, in make_interpreter
    model_path=model_path_or_content, experimental_delegates=delegates)
  File "/usr/lib/python3/dist-packages/tflite_runtime/interpreter.py", line 351, in __init__
    experimental_preserve_all_tensors))
ValueError: Could not open '~/google-coral/examples-camera/all_models/mobilenet_ssd_v1_face_quant_postprocess_edgetpu.tflite'.
pi@AshSjDemoPi40:~/Desktop/multiple-cameras-rpi $ dir ~/google-coral/examples-camera/all_models/
coco_labels.txt
imagenet_labels.txt
inat_bird_labels.txt
inat_insect_labels.txt
inat_plant_labels.txt
inception_v1_224_quant_edgetpu.tflite
inception_v1_224_quant.tflite
inception_v2_224_quant_edgetpu.tflite
inception_v2_224_quant.tflite
inception_v3_299_quant_edgetpu.tflite
inception_v3_299_quant.tflite
inception_v4_299_quant_edgetpu.tflite
inception_v4_299_quant.tflite
mobilenet_ssd_v1_coco_quant_postprocess_edgetpu.tflite
mobilenet_ssd_v1_coco_quant_postprocess.tflite
mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite
mobilenet_ssd_v2_coco_quant_postprocess.tflite
mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite
mobilenet_ssd_v2_face_quant_postprocess.tflite
mobilenet_v1_1.0_224_quant_edgetpu.tflite
mobilenet_v1_1.0_224_quant_embedding_extractor_edgetpu.tflite
mobilenet_v1_1.0_224_quant_embedding_extractor.tflite
mobilenet_v1_1.0_224_quant.tflite
mobilenet_v2_1.0_224_inat_bird_quant_edgetpu.tflite
mobilenet_v2_1.0_224_inat_bird_quant.tflite
mobilenet_v2_1.0_224_inat_insect_quant_edgetpu.tflite
mobilenet_v2_1.0_224_inat_insect_quant.tflite
mobilenet_v2_1.0_224_inat_plant_quant_edgetpu.tflite
mobilenet_v2_1.0_224_inat_plant_quant.tflite
mobilenet_v2_1.0_224_quant_edgetpu.tflite
mobilenet_v2_1.0_224_quant.tflite
pet_labels.txt

Metadata

Metadata

Assignees

No one assigned

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions