sammy786 commited on
Commit
46daeac
·
verified ·
1 Parent(s): 84f4f37

Create llm_explainer.py

Browse files
Files changed (1) hide show
  1. utils/llm_explainer.py +337 -0
utils/llm_explainer.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ LLM-powered explanation generator for RewardPilot recommendations.
3
+ Uses Hugging Face Inference API with Llama 3.2 for natural language explanations.
4
+ """
5
+
6
+ from huggingface_hub import InferenceClient
7
+ import os
8
+ from typing import Dict, List, Optional
9
+ import logging
10
+
11
+ logging.basicConfig(level=logging.INFO)
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ class LLMExplainer:
16
+ """Generate natural language explanations for credit card recommendations using LLM"""
17
+
18
+ def __init__(self, model: str = "meta-llama/Llama-3.2-3B-Instruct"):
19
+ """
20
+ Initialize LLM explainer with Hugging Face Inference API
21
+
22
+ Args:
23
+ model: HuggingFace model ID to use for generation
24
+ """
25
+ self.model = model
26
+ self.client = None
27
+
28
+ # Try to initialize with token
29
+ hf_token = os.getenv("HF_TOKEN", "")
30
+ if hf_token:
31
+ try:
32
+ self.client = InferenceClient(token=hf_token)
33
+ logger.info(f"✅ LLM Explainer initialized with model: {model}")
34
+ except Exception as e:
35
+ logger.warning(f"⚠️ Could not initialize HF client: {e}")
36
+ self.client = None
37
+ else:
38
+ logger.warning("⚠️ No HF_TOKEN found. LLM explanations will use fallback mode.")
39
+
40
+ def explain_recommendation(
41
+ self,
42
+ card: str,
43
+ rewards: float,
44
+ rewards_rate: str,
45
+ merchant: str,
46
+ category: str,
47
+ amount: float,
48
+ warnings: Optional[List[str]] = None,
49
+ annual_potential: Optional[float] = None,
50
+ alternatives: Optional[List[Dict]] = None
51
+ ) -> str:
52
+ """
53
+ Generate natural language explanation for a card recommendation
54
+
55
+ Args:
56
+ card: Recommended card name
57
+ rewards: Rewards earned for this transaction
58
+ rewards_rate: Rewards rate (e.g., "4x points")
59
+ merchant: Merchant name
60
+ category: Transaction category
61
+ amount: Transaction amount
62
+ warnings: List of warning messages
63
+ annual_potential: Annual rewards potential
64
+ alternatives: Alternative card options
65
+
66
+ Returns:
67
+ Natural language explanation string
68
+ """
69
+
70
+ # Fallback if LLM not available
71
+ if not self.client:
72
+ return self._generate_fallback_explanation(
73
+ card, rewards, rewards_rate, merchant, category, amount, warnings
74
+ )
75
+
76
+ # Build context-aware prompt
77
+ prompt = self._build_prompt(
78
+ card, rewards, rewards_rate, merchant, category, amount,
79
+ warnings, annual_potential, alternatives
80
+ )
81
+
82
+ try:
83
+ # Generate explanation using LLM
84
+ response = self.client.text_generation(
85
+ prompt,
86
+ model=self.model,
87
+ max_new_tokens=200,
88
+ temperature=0.7,
89
+ do_sample=True,
90
+ top_p=0.9,
91
+ repetition_penalty=1.1
92
+ )
93
+
94
+ # Clean up response
95
+ explanation = response.strip()
96
+
97
+ # Remove any prompt artifacts
98
+ if "Explanation:" in explanation:
99
+ explanation = explanation.split("Explanation:")[-1].strip()
100
+
101
+ logger.info(f"✅ Generated LLM explanation for {card}")
102
+ return explanation
103
+
104
+ except Exception as e:
105
+ logger.error(f"❌ LLM generation failed: {e}")
106
+ return self._generate_fallback_explanation(
107
+ card, rewards, rewards_rate, merchant, category, amount, warnings
108
+ )
109
+
110
+ def _build_prompt(
111
+ self,
112
+ card: str,
113
+ rewards: float,
114
+ rewards_rate: str,
115
+ merchant: str,
116
+ category: str,
117
+ amount: float,
118
+ warnings: Optional[List[str]],
119
+ annual_potential: Optional[float],
120
+ alternatives: Optional[List[Dict]]
121
+ ) -> str:
122
+ """Build optimized prompt for LLM"""
123
+
124
+ prompt = f"""You are a friendly credit card rewards expert. Explain why this card is recommended.
125
+
126
+ Transaction Details:
127
+ - Merchant: {merchant}
128
+ - Category: {category}
129
+ - Amount: ${amount:.2f}
130
+
131
+ Recommendation:
132
+ - Best Card: {card}
133
+ - Rewards Earned: ${rewards:.2f} ({rewards_rate})
134
+ """
135
+
136
+ if annual_potential:
137
+ prompt += f"- Annual Potential: ${annual_potential:.2f} in this category\n"
138
+
139
+ if warnings:
140
+ prompt += f"- Important Warning: {warnings[0]}\n"
141
+
142
+ if alternatives and len(alternatives) > 0:
143
+ alt_text = ", ".join([f"{alt['card']} (${alt['rewards']:.2f})"
144
+ for alt in alternatives[:2]])
145
+ prompt += f"- Alternatives: {alt_text}\n"
146
+
147
+ prompt += """
148
+ Provide a friendly, concise explanation (2-3 sentences) that:
149
+ 1. Explains why this card is the best choice
150
+ 2. Highlights the key benefit
151
+ 3. Mentions any important warnings if present
152
+
153
+ Keep it conversational and helpful. Don't repeat the numbers already shown."""
154
+
155
+ return prompt
156
+
157
+ def _generate_fallback_explanation(
158
+ self,
159
+ card: str,
160
+ rewards: float,
161
+ rewards_rate: str,
162
+ merchant: str,
163
+ category: str,
164
+ amount: float,
165
+ warnings: Optional[List[str]]
166
+ ) -> str:
167
+ """Generate rule-based explanation when LLM is unavailable"""
168
+
169
+ explanation = f"The {card} is your best choice for this {category.lower()} purchase at {merchant}. "
170
+ explanation += f"You'll earn {rewards_rate}, which gives you the highest rewards rate among your cards. "
171
+
172
+ if warnings:
173
+ explanation += f"⚠️ Note: {warnings[0]}"
174
+ else:
175
+ explanation += "This optimizes your rewards while staying within spending caps."
176
+
177
+ return explanation
178
+
179
+ def generate_spending_insights(
180
+ self,
181
+ user_id: str,
182
+ total_spending: float,
183
+ total_rewards: float,
184
+ optimization_score: int,
185
+ top_categories: List[Dict],
186
+ recommendations_count: int
187
+ ) -> str:
188
+ """
189
+ Generate personalized spending insights for analytics dashboard
190
+
191
+ Args:
192
+ user_id: User identifier
193
+ total_spending: Total spending amount
194
+ total_rewards: Total rewards earned
195
+ optimization_score: Optimization score (0-100)
196
+ top_categories: List of top spending categories
197
+ recommendations_count: Number of optimized transactions
198
+
199
+ Returns:
200
+ Personalized insights text
201
+ """
202
+
203
+ if not self.client:
204
+ return self._generate_fallback_insights(
205
+ total_spending, total_rewards, optimization_score
206
+ )
207
+
208
+ prompt = f"""You are a financial advisor analyzing credit card usage. Provide 2-3 personalized insights.
209
+
210
+ User Spending Summary:
211
+ - Total Spending: ${total_spending:.2f}
212
+ - Total Rewards: ${total_rewards:.2f}
213
+ - Optimization Score: {optimization_score}/100
214
+ - Optimized Transactions: {recommendations_count}
215
+ - Top Categories: {', '.join([cat['category'] for cat in top_categories[:3]])}
216
+
217
+ Provide actionable insights about:
218
+ 1. Their optimization performance
219
+ 2. Opportunities to earn more rewards
220
+ 3. One specific tip to improve their score
221
+
222
+ Be encouraging and specific. Keep it under 100 words."""
223
+
224
+ try:
225
+ response = self.client.text_generation(
226
+ prompt,
227
+ model=self.model,
228
+ max_new_tokens=150,
229
+ temperature=0.8,
230
+ do_sample=True
231
+ )
232
+
233
+ return response.strip()
234
+
235
+ except Exception as e:
236
+ logger.error(f"❌ Insights generation failed: {e}")
237
+ return self._generate_fallback_insights(
238
+ total_spending, total_rewards, optimization_score
239
+ )
240
+
241
+ def _generate_fallback_insights(
242
+ self,
243
+ total_spending: float,
244
+ total_rewards: float,
245
+ optimization_score: int
246
+ ) -> str:
247
+ """Generate rule-based insights when LLM unavailable"""
248
+
249
+ rewards_rate = (total_rewards / total_spending * 100) if total_spending > 0 else 0
250
+
251
+ insights = f"You're earning ${total_rewards:.2f} in rewards on ${total_spending:.2f} of spending "
252
+ insights += f"({rewards_rate:.1f}% effective rate). "
253
+
254
+ if optimization_score >= 80:
255
+ insights += "Excellent optimization! You're maximizing your rewards effectively. "
256
+ elif optimization_score >= 60:
257
+ insights += "Good progress! Consider using our recommendations more consistently. "
258
+ else:
259
+ insights += "There's room for improvement. Follow our card suggestions to boost your rewards. "
260
+
261
+ insights += "Keep tracking your spending to identify new optimization opportunities."
262
+
263
+ return insights
264
+
265
+ def chat_response(
266
+ self,
267
+ user_message: str,
268
+ user_context: Dict,
269
+ chat_history: List[tuple] = None
270
+ ) -> str:
271
+ """
272
+ Generate conversational response for chat interface
273
+
274
+ Args:
275
+ user_message: User's question/message
276
+ user_context: User's spending data and card portfolio
277
+ chat_history: Previous conversation history
278
+
279
+ Returns:
280
+ AI assistant response
281
+ """
282
+
283
+ if not self.client:
284
+ return "I'm currently in fallback mode. Please ask specific questions about your cards or transactions."
285
+
286
+ # Build context from user data
287
+ context = f"""User Profile:
288
+ - Cards: {', '.join(user_context.get('cards', ['Unknown']))}
289
+ - Monthly Spending: ${user_context.get('monthly_spending', 0):.2f}
290
+ - Top Category: {user_context.get('top_category', 'Unknown')}
291
+ """
292
+
293
+ # Add chat history for context
294
+ history_text = ""
295
+ if chat_history:
296
+ recent_history = chat_history[-3:] # Last 3 exchanges
297
+ history_text = "\n".join([
298
+ f"User: {user}\nAssistant: {assistant}"
299
+ for user, assistant in recent_history
300
+ ])
301
+
302
+ prompt = f"""You are RewardPilot AI, a helpful credit card rewards assistant.
303
+
304
+ {context}
305
+
306
+ Previous Conversation:
307
+ {history_text if history_text else "None"}
308
+
309
+ User Question: {user_message}
310
+
311
+ Provide a helpful, concise response (2-3 sentences). Be friendly and specific."""
312
+
313
+ try:
314
+ response = self.client.text_generation(
315
+ prompt,
316
+ model=self.model,
317
+ max_new_tokens=150,
318
+ temperature=0.8,
319
+ do_sample=True
320
+ )
321
+
322
+ return response.strip()
323
+
324
+ except Exception as e:
325
+ logger.error(f"❌ Chat response failed: {e}")
326
+ return "I'm having trouble generating a response. Please try rephrasing your question."
327
+
328
+
329
+ # Singleton instance
330
+ _llm_explainer = None
331
+
332
+ def get_llm_explainer() -> LLMExplainer:
333
+ """Get or create singleton LLM explainer instance"""
334
+ global _llm_explainer
335
+ if _llm_explainer is None:
336
+ _llm_explainer = LLMExplainer()
337
+ return _llm_explainer