Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,6 +6,7 @@ from transformers import pipeline
|
|
| 6 |
from huggingface_hub import login
|
| 7 |
from streamlit.components.v1 import html
|
| 8 |
import pandas as pd
|
|
|
|
| 9 |
|
| 10 |
# Retrieve the token from environment variables
|
| 11 |
hf_token = os.environ.get("HF_TOKEN")
|
|
@@ -58,9 +59,9 @@ st.write("This model will score your reviews in your CSV file and generate a rep
|
|
| 58 |
@st.cache_resource
|
| 59 |
def load_models():
|
| 60 |
# Load the scoring model via pipeline.
|
| 61 |
-
score_pipe = pipeline("text-classification", model="mixedbread-ai/mxbai-rerank-base-v1")
|
| 62 |
# Load the Gemma text generation pipeline.
|
| 63 |
-
gemma_pipe = pipeline("text-generation", model="google/gemma-3-1b-it")
|
| 64 |
return score_pipe, gemma_pipe
|
| 65 |
|
| 66 |
score_pipe, gemma_pipe = load_models()
|
|
|
|
| 6 |
from huggingface_hub import login
|
| 7 |
from streamlit.components.v1 import html
|
| 8 |
import pandas as pd
|
| 9 |
+
import torch
|
| 10 |
|
| 11 |
# Retrieve the token from environment variables
|
| 12 |
hf_token = os.environ.get("HF_TOKEN")
|
|
|
|
| 59 |
@st.cache_resource
|
| 60 |
def load_models():
|
| 61 |
# Load the scoring model via pipeline.
|
| 62 |
+
score_pipe = pipeline("text-classification", model="mixedbread-ai/mxbai-rerank-base-v1", device=0)
|
| 63 |
# Load the Gemma text generation pipeline.
|
| 64 |
+
gemma_pipe = pipeline("text-generation", model="google/gemma-3-1b-it", device=0)
|
| 65 |
return score_pipe, gemma_pipe
|
| 66 |
|
| 67 |
score_pipe, gemma_pipe = load_models()
|