import io import os import time # Imports the Google Cloud client libraryfrom google.cloud import speech from google.cloud.speech import enums from google.cloud.speech import types start_time = time.time() # Instantiates a clientclient = speech.SpeechClient() # The name of the audio file to transcribefile_name = os.path.join( os.path.dirname(__file__), '/home/kishlay/Documents/DeepDive/pythonCloudSpeech', 'obamaLong.flac') # Loads the audio into memorywith io.open(file_name, 'rb') as audio_file: content = audio_file.read() audio = types.RecognitionAudio(content=content) config = types.RecognitionConfig( # encoding=enums.RecognitionConfig.AudioEncoding.FLAC, # sample_rate_hertz=16000, language_code='en-US', enable_word_time_offsets=True) # Detects speech in the audio fileresponse = client.recognize(config, audio) for result in response.results: alternative = result.alternatives[0] print(u'Transcript: {}'.format(alternative.transcript)) print('Confidence: {}'.format(alternative.confidence)) for word_info in alternative.words: word = word_info.word start_time = word_info.start_time end_time = word_info.end_time print('Word: {}, start_time: {}, end_time: {}'.format( word, start_time.seconds + start_time.nanos * 1e-9, end_time.seconds + end_time.nanos * 1e-9)) total_time = time.time() - start_time print("seconds " + str(total_time )) print("minute "+ str((total_time/60))) print(type(result)) print("_________________________________________________________")
Location for the config yml file /home/example/.ngrok2/ngrok.yml content of config file authtoken: 4nq9771bPxe8ctg7LKr_2ClH7Y15Zqe4bWLWF9p tunnels: app-foo: addr: 80 proto: http host_header: app-foo.dev app-bar: addr: 80 proto: http host_header: app-bar.dev how to start ngrok with considering the config file: ngrok start --all
Comments
Post a Comment