Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -102,8 +102,10 @@ Your task:
|
|
| 102 |
Now produce the final report only, without reiterating these instructions or the query."""
|
| 103 |
|
| 104 |
messages = [
|
| 105 |
-
|
| 106 |
-
|
|
|
|
|
|
|
| 107 |
]
|
| 108 |
|
| 109 |
return messages
|
|
@@ -263,30 +265,15 @@ def main():
|
|
| 263 |
else:
|
| 264 |
sampled_docs = scored_docs
|
| 265 |
|
| 266 |
-
prompt =
|
| 267 |
-
{"role": "user", "content": f"""
|
| 268 |
-
Generate a well-structured business report based on tweets from twitter/X with sentiment that answers Query Question and meets following Requirements.
|
| 269 |
-
**Requirements:**
|
| 270 |
-
- Include an introduction, key insights, and a conclusion.
|
| 271 |
-
- Ensure the analysis is concise and does not cut off abruptly.
|
| 272 |
-
- Summarize major findings without repeating verbatim.
|
| 273 |
-
- Cover both positive and negative aspects, highlighting trends in user sentiment.
|
| 274 |
-
**Query Question:**
|
| 275 |
-
"{query_input}"
|
| 276 |
-
**Tweets with sentiment score:**
|
| 277 |
-
{sampled_docs}
|
| 278 |
-
Please ensure the report is complete and reaches approximately 300 words.
|
| 279 |
-
"""}
|
| 280 |
-
]
|
| 281 |
|
| 282 |
def process_with_gemma(prompt):
|
| 283 |
try:
|
| 284 |
-
# tokenizer = AutoTokenizer.from_pretrained("unsloth/gemma-3-1b-it")
|
| 285 |
pipe = pipeline(
|
| 286 |
"text-generation",
|
| 287 |
model="unsloth/gemma-3-1b-it",
|
| 288 |
-
device=
|
| 289 |
-
|
| 290 |
)
|
| 291 |
result = pipe(prompt, max_new_tokens=256, repetition_penalty=1.2, do_sample=True, temperature=0.5, return_full_text=False)
|
| 292 |
return result, None
|
|
|
|
| 102 |
Now produce the final report only, without reiterating these instructions or the query."""
|
| 103 |
|
| 104 |
messages = [
|
| 105 |
+
[
|
| 106 |
+
{"role": "system", "content": [{"type": "text", "text": system_message},]},
|
| 107 |
+
{"role": "user", "content": [{"type": "text", "text": user_content},]},
|
| 108 |
+
],
|
| 109 |
]
|
| 110 |
|
| 111 |
return messages
|
|
|
|
| 265 |
else:
|
| 266 |
sampled_docs = scored_docs
|
| 267 |
|
| 268 |
+
prompt = build_prompt(query_input, sampled_docs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 269 |
|
| 270 |
def process_with_gemma(prompt):
|
| 271 |
try:
|
|
|
|
| 272 |
pipe = pipeline(
|
| 273 |
"text-generation",
|
| 274 |
model="unsloth/gemma-3-1b-it",
|
| 275 |
+
device="cuda" if torch.cuda.is_available() else -1,
|
| 276 |
+
torch_dtype=torch.bfloat16,
|
| 277 |
)
|
| 278 |
result = pipe(prompt, max_new_tokens=256, repetition_penalty=1.2, do_sample=True, temperature=0.5, return_full_text=False)
|
| 279 |
return result, None
|