Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -274,12 +274,10 @@ def main():
|
|
| 274 |
|
| 275 |
def process_with_gemma(prompt):
|
| 276 |
try:
|
| 277 |
-
tokenizer = AutoTokenizer.from_pretrained("unsloth/gemma-3-1b-it")
|
| 278 |
pipe = pipeline(
|
| 279 |
"text-generation",
|
| 280 |
-
model="unsloth/
|
| 281 |
device="cuda" if torch.cuda.is_available() else -1,
|
| 282 |
-
tokenizer=tokenizer,
|
| 283 |
torch_dtype=torch.bfloat16,
|
| 284 |
)
|
| 285 |
result = pipe(prompt, max_new_tokens=256, return_full_text=False)
|
|
@@ -287,7 +285,7 @@ def main():
|
|
| 287 |
except Exception as e:
|
| 288 |
return None, str(e)
|
| 289 |
|
| 290 |
-
status_text.markdown("**📝 Generating report with
|
| 291 |
progress_bar.progress(80)
|
| 292 |
|
| 293 |
raw_result, error = process_with_gemma(prompt)
|
|
|
|
| 274 |
|
| 275 |
def process_with_gemma(prompt):
|
| 276 |
try:
|
|
|
|
| 277 |
pipe = pipeline(
|
| 278 |
"text-generation",
|
| 279 |
+
model="unsloth/Llama-3.2-1B-Instruct",
|
| 280 |
device="cuda" if torch.cuda.is_available() else -1,
|
|
|
|
| 281 |
torch_dtype=torch.bfloat16,
|
| 282 |
)
|
| 283 |
result = pipe(prompt, max_new_tokens=256, return_full_text=False)
|
|
|
|
| 285 |
except Exception as e:
|
| 286 |
return None, str(e)
|
| 287 |
|
| 288 |
+
status_text.markdown("**📝 Generating report with Llama...**")
|
| 289 |
progress_bar.progress(80)
|
| 290 |
|
| 291 |
raw_result, error = process_with_gemma(prompt)
|