Added ZeroGPU support
Browse files
app.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
| 3 |
import torch
|
|
|
|
| 4 |
from peft import PeftModel, PeftConfig
|
| 5 |
from transformers import WhisperForConditionalGeneration, WhisperTokenizer, WhisperProcessor, AutomaticSpeechRecognitionPipeline
|
| 6 |
|
|
@@ -19,6 +20,7 @@ forced_decoder_ids = processor.get_decoder_prompt_ids(language="english", task=t
|
|
| 19 |
|
| 20 |
pipeline = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor)
|
| 21 |
|
|
|
|
| 22 |
def transcribe(audio):
|
| 23 |
if audio is None:
|
| 24 |
return "Espera a que la grabaci贸n termine de subirse al servidor !! Intentelo de nuevo en unos segundos"
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
| 3 |
import torch
|
| 4 |
+
import spaces
|
| 5 |
from peft import PeftModel, PeftConfig
|
| 6 |
from transformers import WhisperForConditionalGeneration, WhisperTokenizer, WhisperProcessor, AutomaticSpeechRecognitionPipeline
|
| 7 |
|
|
|
|
| 20 |
|
| 21 |
pipeline = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor)
|
| 22 |
|
| 23 |
+
@spaces.GPU
|
| 24 |
def transcribe(audio):
|
| 25 |
if audio is None:
|
| 26 |
return "Espera a que la grabaci贸n termine de subirse al servidor !! Intentelo de nuevo en unos segundos"
|