File size: 15,320 Bytes
46daeac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3df99f
46daeac
 
 
f3df99f
46daeac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3df99f
 
 
 
 
 
 
 
 
 
 
 
 
 
46daeac
f3df99f
46daeac
f3df99f
46daeac
 
f3df99f
 
46daeac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3df99f
46daeac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3df99f
46daeac
 
 
 
 
 
 
 
f3df99f
46daeac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3df99f
 
46daeac
 
f3df99f
46daeac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3df99f
46daeac
 
 
 
 
 
 
 
f3df99f
46daeac
 
 
 
 
 
 
f3df99f
 
 
 
 
 
 
 
 
 
 
 
 
46daeac
f3df99f
 
46daeac
 
f3df99f
46daeac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3df99f
 
46daeac
 
f3df99f
46daeac
f3df99f
46daeac
f3df99f
46daeac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3df99f
46daeac
 
 
 
 
 
 
 
f3df99f
 
 
 
 
 
 
 
 
46daeac
f3df99f
 
 
46daeac
f3df99f
 
46daeac
 
f3df99f
 
46daeac
f3df99f
 
46daeac
 
f3df99f
46daeac
 
 
f3df99f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46daeac
 
 
 
 
f3df99f
46daeac
 
 
f3df99f
46daeac
 
f3df99f
46daeac
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
"""
LLM-powered explanation generator for RewardPilot recommendations.
Uses Hugging Face Inference API with Llama 3.2 for natural language explanations.
"""
from huggingface_hub import InferenceClient
import os
from typing import Dict, List, Optional
import logging

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class LLMExplainer:
    """Generate natural language explanations for credit card recommendations using LLM"""
    
    def __init__(self, model: str = "meta-llama/Llama-3.2-3B-Instruct"):
        """
        Initialize LLM explainer with Hugging Face Inference API
        
        Args:
            model: HuggingFace model ID to use for generation
        """
        self.model = model
        self.client = None
        
        # Try to initialize with token
        hf_token = os.getenv("HF_TOKEN", "")
        
        if hf_token:
            try:
                self.client = InferenceClient(token=hf_token)
                # Test the connection
                logger.info(f"βœ… LLM Explainer initialized with model: {model}")
            except Exception as e:
                logger.warning(f"⚠️ Could not initialize HF client: {e}")
                self.client = None
        else:
            logger.warning("⚠️ No HF_TOKEN found. LLM explanations will use fallback mode.")
    
    def explain_recommendation(
        self,
        card: str,
        rewards: float,
        rewards_rate: str,
        merchant: str,
        category: str,
        amount: float,
        warnings: Optional[List[str]] = None,
        annual_potential: Optional[float] = None,
        alternatives: Optional[List[Dict]] = None
    ) -> str:
        """
        Generate natural language explanation for a card recommendation
        
        Args:
            card: Recommended card name
            rewards: Rewards earned for this transaction
            rewards_rate: Rewards rate (e.g., "4x points")
            merchant: Merchant name
            category: Transaction category
            amount: Transaction amount
            warnings: List of warning messages
            annual_potential: Annual rewards potential
            alternatives: Alternative card options
            
        Returns:
            Natural language explanation string
        """
        
        # Fallback if LLM not available
        if not self.client:
            return self._generate_fallback_explanation(
                card, rewards, rewards_rate, merchant, category, amount, warnings
            )
        
        # Build context-aware prompt
        prompt = self._build_prompt(
            card, rewards, rewards_rate, merchant, category, amount,
            warnings, annual_potential, alternatives
        )
        
        try:
            # Generate explanation using LLM with correct API
            messages = [
                {
                    "role": "system",
                    "content": "You are a friendly credit card rewards expert who provides concise, helpful explanations."
                },
                {
                    "role": "user",
                    "content": prompt
                }
            ]
            
            response = self.client.chat_completion(
                messages=messages,
                model=self.model,
                max_tokens=200,
                temperature=0.7,
                top_p=0.9
            )
            
            # Extract response text
            explanation = response.choices[0].message.content.strip()
            
            logger.info(f"βœ… Generated LLM explanation for {card}")
            return explanation
            
        except Exception as e:
            logger.error(f"❌ LLM generation failed: {e}")
            return self._generate_fallback_explanation(
                card, rewards, rewards_rate, merchant, category, amount, warnings
            )
    
    def _build_prompt(
        self,
        card: str,
        rewards: float,
        rewards_rate: str,
        merchant: str,
        category: str,
        amount: float,
        warnings: Optional[List[str]],
        annual_potential: Optional[float],
        alternatives: Optional[List[Dict]]
    ) -> str:
        """Build optimized prompt for LLM"""
        
        prompt = f"""Explain why this credit card is the best choice for this purchase.

Transaction Details:
- Merchant: {merchant}
- Category: {category}
- Amount: ${amount:.2f}

Recommendation:
- Best Card: {card}
- Rewards Earned: ${rewards:.2f} ({rewards_rate})
"""
        
        if annual_potential:
            prompt += f"- Annual Potential: ${annual_potential:.2f} in this category\n"
        
        if warnings:
            prompt += f"- Important Warning: {warnings[0]}\n"
        
        if alternatives and len(alternatives) > 0:
            alt_text = ", ".join([f"{alt['card']} (${alt['rewards']:.2f})" for alt in alternatives[:2]])
            prompt += f"- Alternatives: {alt_text}\n"
        
        prompt += """
Provide a friendly, concise explanation (2-3 sentences) that:
1. Explains why this card is the best choice
2. Highlights the key benefit
3. Mentions any important warnings if present

Keep it conversational and helpful."""
        
        return prompt
    
    def _generate_fallback_explanation(
        self,
        card: str,
        rewards: float,
        rewards_rate: str,
        merchant: str,
        category: str,
        amount: float,
        warnings: Optional[List[str]]
    ) -> str:
        """Generate rule-based explanation when LLM is unavailable"""
        
        explanation = f"The **{card}** is your best choice for this {category.lower()} purchase at {merchant}. "
        explanation += f"You'll earn **{rewards_rate}**, which gives you the highest rewards rate among your cards. "
        
        if warnings:
            explanation += f"\n\n⚠️ **Note:** {warnings[0]}"
        else:
            explanation += "This optimizes your rewards while staying within spending caps."
        
        return explanation
    
    def generate_spending_insights(
        self,
        user_id: str,
        total_spending: float,
        total_rewards: float,
        optimization_score: int,
        top_categories: List[Dict],
        recommendations_count: int
    ) -> str:
        """
        Generate personalized spending insights for analytics dashboard
        
        Args:
            user_id: User identifier
            total_spending: Total spending amount
            total_rewards: Total rewards earned
            optimization_score: Optimization score (0-100)
            top_categories: List of top spending categories
            recommendations_count: Number of optimized transactions
            
        Returns:
            Personalized insights text
        """
        
        if not self.client:
            return self._generate_fallback_insights(
                total_spending, total_rewards, optimization_score
            )
        
        prompt = f"""Analyze this user's credit card spending and provide personalized insights.

User Spending Summary:
- Total Spending: ${total_spending:.2f}
- Total Rewards: ${total_rewards:.2f}
- Optimization Score: {optimization_score}/100
- Optimized Transactions: {recommendations_count}
- Top Categories: {', '.join([cat['category'] for cat in top_categories[:3]])}

Provide 2-3 actionable insights about:
1. Their optimization performance
2. Opportunities to earn more rewards
3. One specific tip to improve their score

Be encouraging and specific. Keep it under 100 words."""
        
        try:
            messages = [
                {
                    "role": "system",
                    "content": "You are a financial advisor specializing in credit card rewards optimization."
                },
                {
                    "role": "user",
                    "content": prompt
                }
            ]
            
            response = self.client.chat_completion(
                messages=messages,
                model=self.model,
                max_tokens=150,
                temperature=0.8
            )
            
            return response.choices[0].message.content.strip()
            
        except Exception as e:
            logger.error(f"❌ Insights generation failed: {e}")
            return self._generate_fallback_insights(
                total_spending, total_rewards, optimization_score
            )
    
    def _generate_fallback_insights(
        self,
        total_spending: float,
        total_rewards: float,
        optimization_score: int
    ) -> str:
        """Generate rule-based insights when LLM unavailable"""
        
        rewards_rate = (total_rewards / total_spending * 100) if total_spending > 0 else 0
        
        insights = f"You're earning **${total_rewards:.2f}** in rewards on **${total_spending:.2f}** of spending "
        insights += f"(**{rewards_rate:.1f}%** effective rate). "
        
        if optimization_score >= 80:
            insights += "🌟 **Excellent optimization!** You're maximizing your rewards effectively. "
        elif optimization_score >= 60:
            insights += "πŸ‘ **Good progress!** Consider using our recommendations more consistently. "
        else:
            insights += "πŸ’‘ **Room for improvement!** Follow our card suggestions to boost your rewards. "
        
        insights += "Keep tracking your spending to identify new optimization opportunities."
        
        return insights
    
    def chat_response(
        self,
        user_message: str,
        user_context: Dict,
        chat_history: List[tuple] = None
    ) -> str:
        """
        Generate conversational response for chat interface
        
        Args:
            user_message: User's question/message
            user_context: User's spending data and card portfolio
            chat_history: Previous conversation history
            
        Returns:
            AI assistant response
        """
        
        if not self.client:
            return self._generate_fallback_chat(user_message, user_context)
        
        # Build context from user data
        context = f"""User Profile:
- Cards: {', '.join(user_context.get('cards', ['Unknown']))}
- Monthly Spending: ${user_context.get('monthly_spending', 0):.2f}
- Top Category: {user_context.get('top_category', 'Unknown')}
"""
        
        # Build messages with history
        messages = [
            {
                "role": "system",
                "content": f"You are RewardPilot AI, a helpful credit card rewards assistant.\n\n{context}"
            }
        ]
        
        # Add chat history
        if chat_history:
            for user_msg, assistant_msg in chat_history[-3:]:  # Last 3 exchanges
                messages.append({"role": "user", "content": user_msg})
                messages.append({"role": "assistant", "content": assistant_msg})
        
        # Add current message
        messages.append({"role": "user", "content": user_message})
        
        try:
            response = self.client.chat_completion(
                messages=messages,
                model=self.model,
                max_tokens=200,
                temperature=0.8
            )
            
            return response.choices[0].message.content.strip()
            
        except Exception as e:
            logger.error(f"❌ Chat response failed: {e}")
            return self._generate_fallback_chat(user_message, user_context)
    
    def _generate_fallback_chat(self, user_message: str, user_context: Dict) -> str:
        """Generate rule-based chat response when LLM unavailable"""
        
        message_lower = user_message.lower()
        
        # Greeting
        if any(word in message_lower for word in ['hello', 'hi', 'hey', 'greetings']):
            return "Hello! πŸ‘‹ I'm RewardPilot AI. I can help you choose the best credit card for any purchase. What would you like to know?"
        
        # Card-specific questions
        if 'amex gold' in message_lower or 'american express gold' in message_lower:
            return "The **Amex Gold** is excellent for dining and groceries, earning **4x points** in both categories. It has a $250 annual fee but comes with dining credits. Best for foodies! 🍽️"
        
        if 'chase sapphire' in message_lower:
            return "The **Chase Sapphire Reserve** is a premium travel card earning **3x points** on travel and dining. It has a $550 annual fee but offers travel credits and lounge access. Perfect for frequent travelers! ✈️"
        
        if 'costco' in message_lower:
            return "The **Costco Anywhere Visa** offers **4% cashback** on gas (up to $7,000/year), 3% on restaurants and travel, 2% at Costco, and 1% elsewhere. No annual fee beyond Costco membership! β›½"
        
        # Category questions
        if 'grocery' in message_lower or 'groceries' in message_lower:
            return "For groceries, I recommend:\n\n1. **Amex Gold** - 4x points\n2. **Blue Cash Preferred** - 6% cashback (up to $6,000/year)\n3. **Chase Freedom Flex** - 5% in rotating categories\n\nWhich sounds best for you? πŸ›’"
        
        if 'dining' in message_lower or 'restaurant' in message_lower:
            return "For dining, top choices are:\n\n1. **Capital One Savor** - 4% cashback\n2. **Amex Gold** - 4x points\n3. **Chase Sapphire Preferred** - 3x points\n\nAll great options! 🍴"
        
        if 'travel' in message_lower:
            return "For travel, consider:\n\n1. **Chase Sapphire Reserve** - 3x points\n2. **Amex Platinum** - 5x points on flights\n3. **Capital One Venture X** - 2x miles everywhere\n\nDepends on your travel style! ✈️"
        
        if 'gas' in message_lower:
            return "For gas stations:\n\n1. **Costco Visa** - 4% cashback\n2. **BofA Customized Cash** - 3% in your choice category\n3. **Citi Custom Cash** - 5% on top category (up to $500/month)\n\nSave at the pump! β›½"
        
        # Optimization
        if 'optimize' in message_lower or 'maximize' in message_lower:
            return "To optimize your rewards:\n\n1. βœ… Use category-specific cards\n2. βœ… Have a 2% cashback baseline card\n3. βœ… Track spending caps\n4. βœ… Consider annual fees vs. rewards\n\nUse the 'Get Recommendation' tab for personalized advice!"
        
        # Help
        if 'help' in message_lower or 'what can you do' in message_lower:
            return "I can help you with:\n\nπŸ’³ Choosing the best card for specific merchants\nπŸ“Š Comparing card benefits\n🎯 Understanding rewards rates\nπŸ’° Optimizing your wallet strategy\n\nWhat would you like to know?"
        
        # Default response
        return "I can help you find the best credit card for any purchase! Try asking:\n\nβ€’ 'Which card for groceries?'\nβ€’ 'Tell me about Chase Sapphire Reserve'\nβ€’ 'How can I maximize rewards?'\n\nWhat would you like to know? πŸ€”"


# Singleton instance
_llm_explainer = None


def get_llm_explainer() -> LLMExplainer:
    """Get or create singleton LLM explainer instance"""
    global _llm_explainer
    
    if _llm_explainer is None:
        _llm_explainer = LLMExplainer()
    
    return _llm_explainer