Skip to content

Commit 1ee04d3

Browse files
committed
Comment Google Cloud integration for Speech Recognition, disable related features
1 parent 9ad0294 commit 1ee04d3

File tree

1 file changed

+28
-28
lines changed

1 file changed

+28
-28
lines changed

audio.py

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -42,9 +42,9 @@
4242
import re
4343
import sys
4444

45-
from google.cloud import speech
46-
from google.cloud.speech import enums
47-
from google.cloud.speech import types
45+
#from google.cloud import speech
46+
#from google.cloud.speech import enums
47+
#from google.cloud.speech import types
4848
import pyaudio
4949
from six.moves import queue
5050
# [END import_libraries]
@@ -79,7 +79,7 @@ def __init__(self):
7979
except Exception as e:
8080
logging.info("Audio: input stream not available")
8181

82-
self._google_speech_client = speech.SpeechClient()
82+
#self._google_speech_client = speech.SpeechClient()
8383

8484
def exit(self):
8585
pass
@@ -201,32 +201,32 @@ def speech_recog(self, model):
201201
logging.info("recog text: " + recog_text)
202202
return recog_text
203203

204-
def speech_recog_google(self, locale):
205-
config = types.RecognitionConfig(
206-
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
207-
sample_rate_hertz=RATE,
208-
language_code=locale)
209-
streaming_config = types.StreamingRecognitionConfig(
210-
config=config,
211-
interim_results=False,
212-
single_utterance=True)
213-
214-
t1 = time.time()
215-
with self.stream_in as stream:
216-
audio_generator = stream.generator()
217-
requests = (types.StreamingRecognizeRequest(audio_content=content)
218-
for content in audio_generator)
219-
220-
responses = self._google_speech_client.streaming_recognize(streaming_config, requests)
204+
# def speech_recog_google(self, locale):
205+
# config = types.RecognitionConfig(
206+
# encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
207+
# sample_rate_hertz=RATE,
208+
# language_code=locale)
209+
# streaming_config = types.StreamingRecognitionConfig(
210+
# config=config,
211+
# interim_results=False,
212+
# single_utterance=True)
213+
#
214+
# t1 = time.time()
215+
# with self.stream_in as stream:
216+
# audio_generator = stream.generator()
217+
# requests = (types.StreamingRecognizeRequest(audio_content=content)
218+
# for content in audio_generator)
219+
#
220+
# responses = self._google_speech_client.streaming_recognize(streaming_config, requests)
221221

222222
# Now, put the transcription responses to use.
223-
for response in responses:
224-
if time.time() - t1 > 10:
225-
return ""
226-
if response.results:
227-
result = response.results[0]
228-
if result.is_final:
229-
return result.alternatives[0].transcript
223+
# for response in responses:
224+
# if time.time() - t1 > 10:
225+
# return ""
226+
# if response.results:
227+
# result = response.results[0]
228+
# if result.is_final:
229+
# return result.alternatives[0].transcript
230230

231231
class MicrophoneStream(object):
232232
"""Opens a recording stream as a generator yielding the audio chunks."""

0 commit comments

Comments
 (0)