As mentioned by @Carcigenicate and @AzyCrw4282 in the comments connect
is not called automatically. Though i am still unsure how websockets connect if connect
is not called automatically because i could see messages coming. Anyways i have made the following changes and after making these changes i was able to see transcription in console.
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.MULAW,
sample_rate_hertz=8000,
language_code='en-US'
)
streaming_config = types.StreamingRecognitionConfig(config=config,interim_results=True)
def on_transcription_response(response):
if not response.results:
return
result = response.results[0]
if not result.alternatives:
return
transcription = result.alternatives[0].transcript
print("Transcription: " + transcription)
bridge = None
def set_bridge():
global bridge
if bridge is None:
bridge = SpeechClientBridge(streaming_config,on_transcription_response)
class MediaStreamConsumer(WebsocketConsumer):
def connect(self, message, **kwargs):
self.message.reply_channel.send({"accept": True})
def raw_receive(self, message, **kwargs):
set_bridge()
data = json.loads(message['text'])
if data["event"] in ("connected", "start"):
print(f"Media WS: Received event '{data['event']}': {message}")
if data["event"] == "media":
media = data["media"]
chunk = base64.b64decode(media["payload"])
bridge.add_request(chunk)
if data["event"] == "stop":
print(f"Media WS: Received event 'stop': {message}")
print("Stopping...")
def disconnect(self, message, **kwargs):
bridge.terminate()
CLICK HERE to find out more related problems solutions.