cadasme's picture
first commit: working whisper
87d4428
raw
history blame
1.89 kB
# Import the required libraries
import streamlit as st
import whisper
import speech_recognition as sr
import os
# Function to transcribe audio using OpenAI Whisper
def transcribe_whisper(model_name, file_path):
model = whisper.load_model(model_name)
result = model.transcribe(file_path)
return result["text"]
# Function to transcribe audio using Google Speech API
def transcribe_speech_recognition(file_path):
r = sr.Recognizer()
with sr.AudioFile(file_path) as source:
r.adjust_for_ambient_noise(source)
audio = r.record(source)
result = r.recognize_google(audio)
return result
# Streamlit App
st.title('Transcriptor de Audio')
uploaded_file = st.file_uploader("Sube tu archivo de audio para transcribir", type=['wav', 'mp3'])
if uploaded_file is not None:
file_details = {"FileName":uploaded_file.name, "FileType":uploaded_file.type, "FileSize":uploaded_file.size}
st.write(file_details)
# Make sure the temp directory exists
if not os.path.exists('temp'):
os.makedirs('temp')
with open(os.path.join("temp",uploaded_file.name), "wb") as f:
f.write(uploaded_file.getbuffer())
st.write("Archivo de audio cargado correctamente. Por favor, selecciona el m茅todo de transcripci贸n.")
transcription_method = st.selectbox('Escoge el m茅todo de transcripci贸n', ('OpenAI Whisper', 'Google Speech API'))
if transcription_method == 'OpenAI Whisper':
model_name = st.selectbox('Escoge el modelo de Whisper', ('base', 'small', 'medium', 'large', 'tiny'))
if st.button('Transcribir'):
if transcription_method == 'OpenAI Whisper':
transcript = transcribe_whisper(model_name, os.path.join("temp",uploaded_file.name))
else:
transcript = transcribe_speech_recognition(os.path.join("temp",uploaded_file.name))
st.write(transcript)