Skip to content

Commit b8d9760

Browse files
author
Antonio Vivace
authored
Merge pull request #81 from CoderBotOrg/deployable
The Python application is now deployable from the ground up
2 parents 8e5becc + 3701741 commit b8d9760

File tree

6 files changed

+104
-39
lines changed

6 files changed

+104
-39
lines changed

.gitignore

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,4 +65,12 @@ ehthumbs.db
6565
Thumbs.db
6666

6767
# Swap files
68-
*.swp
68+
*.swp
69+
70+
71+
# Python3 Virtual Environment folders
72+
73+
bin/
74+
lib/
75+
share/
76+
pyvenv.cfg

audio.py

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -42,9 +42,9 @@
4242
import re
4343
import sys
4444

45-
from google.cloud import speech
46-
from google.cloud.speech import enums
47-
from google.cloud.speech import types
45+
#from google.cloud import speech
46+
#from google.cloud.speech import enums
47+
#from google.cloud.speech import types
4848
import pyaudio
4949
from six.moves import queue
5050
# [END import_libraries]
@@ -79,7 +79,7 @@ def __init__(self):
7979
except Exception as e:
8080
logging.info("Audio: input stream not available")
8181

82-
self._google_speech_client = speech.SpeechClient()
82+
#self._google_speech_client = speech.SpeechClient()
8383

8484
def exit(self):
8585
pass
@@ -201,32 +201,32 @@ def speech_recog(self, model):
201201
logging.info("recog text: " + recog_text)
202202
return recog_text
203203

204-
def speech_recog_google(self, locale):
205-
config = types.RecognitionConfig(
206-
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
207-
sample_rate_hertz=RATE,
208-
language_code=locale)
209-
streaming_config = types.StreamingRecognitionConfig(
210-
config=config,
211-
interim_results=False,
212-
single_utterance=True)
213-
214-
t1 = time.time()
215-
with self.stream_in as stream:
216-
audio_generator = stream.generator()
217-
requests = (types.StreamingRecognizeRequest(audio_content=content)
218-
for content in audio_generator)
219-
220-
responses = self._google_speech_client.streaming_recognize(streaming_config, requests)
204+
# def speech_recog_google(self, locale):
205+
# config = types.RecognitionConfig(
206+
# encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
207+
# sample_rate_hertz=RATE,
208+
# language_code=locale)
209+
# streaming_config = types.StreamingRecognitionConfig(
210+
# config=config,
211+
# interim_results=False,
212+
# single_utterance=True)
213+
#
214+
# t1 = time.time()
215+
# with self.stream_in as stream:
216+
# audio_generator = stream.generator()
217+
# requests = (types.StreamingRecognizeRequest(audio_content=content)
218+
# for content in audio_generator)
219+
#
220+
# responses = self._google_speech_client.streaming_recognize(streaming_config, requests)
221221

222222
# Now, put the transcription responses to use.
223-
for response in responses:
224-
if time.time() - t1 > 10:
225-
return ""
226-
if response.results:
227-
result = response.results[0]
228-
if result.is_final:
229-
return result.alternatives[0].transcript
223+
# for response in responses:
224+
# if time.time() - t1 > 10:
225+
# return ""
226+
# if response.results:
227+
# result = response.results[0]
228+
# if result.is_final:
229+
# return result.alternatives[0].transcript
230230

231231
class MicrophoneStream(object):
232232
"""Opens a recording stream as a generator yielding the audio chunks."""

camera.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -84,11 +84,11 @@ def __init__(self):
8484
self._photos.append({'name': filename})
8585
self.save_photo_metadata()
8686

87-
self._cnn_classifiers = {}
88-
cnn_model = config.Config.get().get("cnn_default_model", "")
89-
if cnn_model != "":
90-
self._cnn_classifiers[cnn_model] = CNNManager.get_instance().load_model(cnn_model)
91-
self._cnn_classifier_default = self._cnn_classifiers[cnn_model]
87+
#self._cnn_classifiers = {}
88+
#cnn_model = config.Config.get().get("cnn_default_model", "")
89+
#if cnn_model != "":
90+
# self._cnn_classifiers[cnn_model] = CNNManager.get_instance().load_model(cnn_model)
91+
# self._cnn_classifier_default = self._cnn_classifiers[cnn_model]
9292

9393
self._camera.grab_start()
9494
self._image_cv = self.get_image()

cv/image.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
}
3535

3636
try:
37-
ocr = cv2.text.OCRTesseract_create("/usr/share/tesseract-ocr/", "eng", tesseract_whitelists['unspec'], 0, cv2.text.OCR_LEVEL_TEXTLINE)
37+
ocr = cv2.text.OCRTesseract_create(Null, "eng", tesseract_whitelists['unspec'], 0, cv2.text.OCR_LEVEL_TEXTLINE)
3838
except:
3939
logging.info("tesseract not availabe")
4040

main.py

Lines changed: 37 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747

4848
sh = logging.StreamHandler()
4949
# add a rotating handler
50-
fh = logging.handlers.RotatingFileHandler('/home/pi/coderbot/logs/coderbot.log', maxBytes=1000000, backupCount=5)
50+
fh = logging.handlers.RotatingFileHandler('./logs/coderbot.log', maxBytes=1000000, backupCount=5)
5151
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
5252
sh.setFormatter(formatter)
5353
fh.setFormatter(formatter)
@@ -90,8 +90,7 @@ def handle_home():
9090
config=app.bot_config,
9191
program_level=app.bot_config.get("prog_level", "std"),
9292
cam=cam != None,
93-
cnn_model_names=json.dumps([[name] for name in cnn.get_models().keys()]))
94-
93+
cnn_model_names = json.dumps({}))
9594
@app.route("/config", methods=["POST"])
9695
def handle_config():
9796
Config.write(request.form)
@@ -185,6 +184,21 @@ def video_stream(a_cam):
185184
yield frame
186185
yield "\r\n"
187186

187+
@app.route("/video")
188+
def handle_video():
189+
return """
190+
<html>
191+
<head>
192+
<style type=text/css>
193+
body { background-image: url(/video/stream); background-repeat:no-repeat; background-position:center top; background-attachment:fixed; height:100% }
194+
</style>
195+
</head>
196+
<body>
197+
&nbsp;
198+
</body>
199+
</html>
200+
"""
201+
188202
@app.route("/video/stream")
189203
def handle_video_stream():
190204
try:
@@ -196,6 +210,25 @@ def handle_video_stream():
196210
except:
197211
pass
198212

213+
def video_stream_cv(a_cam):
214+
while not app.shutdown_requested:
215+
frame = a_cam.get_image_cv_jpeg()
216+
yield ("--BOUNDARYSTRING\r\n" +
217+
"Content-type: image/jpeg\r\n" +
218+
"Content-Length: " + str(len(frame)) + "\r\n\r\n" +
219+
frame + "\r\n")
220+
221+
@app.route("/video/stream/cv")
222+
def handle_video_stream_cv():
223+
try:
224+
h = Headers()
225+
h.add('Age', 0)
226+
h.add('Cache-Control', 'no-cache, private')
227+
h.add('Pragma', 'no-cache')
228+
return Response(video_stream_cv(cam), headers=h, mimetype="multipart/x-mixed-replace; boundary=--BOUNDARYSTRING")
229+
except:
230+
pass
231+
199232
@app.route("/photos", methods=["GET"])
200233
def handle_photos():
201234
logging.info("photos")
@@ -350,7 +383,7 @@ def run_server():
350383
except picamera.exc.PiCameraError:
351384
logging.error("Camera not present")
352385

353-
cnn = CNNManager.get_instance()
386+
#cnn = CNNManager.get_instance()
354387
event = EventManager.get_instance("coderbot")
355388
conv = Conversation.get_instance()
356389

requirements.txt

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
apiai==1.2.3
2+
Babel==2.5.3
3+
click==6.7
4+
Flask==1.0.2
5+
Flask-Babel==0.11.2
6+
Flask-Cors==3.0.4
7+
itsdangerous==0.24
8+
Jinja2==2.10
9+
MarkupSafe==1.0
10+
numpy==1.14.3
11+
opencv-contrib-python==3.4.0.12
12+
picamera==1.13
13+
pigpio==1.40.post1
14+
Pillow==5.1.0
15+
pkg-resources==0.0.0
16+
protobuf==3.0.0
17+
PyAudio==0.2.11
18+
pycairo==1.17.0
19+
Pypubsub==4.0.0
20+
pytz==2018.4
21+
six==1.11.0
22+
smbus2==0.2.0
23+
tensorflow==0.11.0
24+
Werkzeug==0.14.1

0 commit comments

Comments
 (0)