Malaji71 commited on
Commit
be92860
·
verified ·
1 Parent(s): 85f2f4b

Create optimizer.py

Browse files
Files changed (1) hide show
  1. optimizer.py +210 -0
optimizer.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ultra Supreme Optimizer - Main optimization engine for image analysis
3
+ """
4
+
5
+ import gc
6
+ import logging
7
+ from datetime import datetime
8
+ from typing import Tuple, Dict, Any, Optional
9
+
10
+ import torch
11
+ import numpy as np
12
+ from PIL import Image
13
+ from clip_interrogator import Config, Interrogator
14
+ import spaces
15
+
16
+ from analyzer import UltraSupremeAnalyzer
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ class UltraSupremeOptimizer:
22
+ """Main optimizer class for ultra supreme image analysis"""
23
+
24
+ def __init__(self):
25
+ self.interrogator: Optional[Interrogator] = None
26
+ self.analyzer = UltraSupremeAnalyzer()
27
+ self.usage_count = 0
28
+ self.device = self._get_device()
29
+ self.is_initialized = False
30
+
31
+ @staticmethod
32
+ def _get_device() -> str:
33
+ """Determine the best available device for computation"""
34
+ if torch.cuda.is_available():
35
+ return "cuda"
36
+ elif torch.backends.mps.is_available():
37
+ return "mps"
38
+ else:
39
+ return "cpu"
40
+
41
+ def initialize_model(self) -> bool:
42
+ """Initialize the CLIP interrogator model"""
43
+ if self.is_initialized:
44
+ return True
45
+
46
+ try:
47
+ config = Config(
48
+ clip_model_name="ViT-L-14/openai",
49
+ download_cache=True,
50
+ chunk_size=2048,
51
+ quiet=True,
52
+ device=self.device
53
+ )
54
+
55
+ self.interrogator = Interrogator(config)
56
+ self.is_initialized = True
57
+
58
+ # Clean up memory after initialization
59
+ if self.device == "cpu":
60
+ gc.collect()
61
+ else:
62
+ torch.cuda.empty_cache()
63
+
64
+ return True
65
+
66
+ except Exception as e:
67
+ logger.error(f"Initialization error: {e}")
68
+ return False
69
+
70
+ def optimize_image(self, image: Any) -> Optional[Image.Image]:
71
+ """Optimize image for processing"""
72
+ if image is None:
73
+ return None
74
+
75
+ try:
76
+ # Convert to PIL Image if necessary
77
+ if isinstance(image, np.ndarray):
78
+ image = Image.fromarray(image)
79
+ elif not isinstance(image, Image.Image):
80
+ image = Image.open(image)
81
+
82
+ # Convert to RGB if necessary
83
+ if image.mode != 'RGB':
84
+ image = image.convert('RGB')
85
+
86
+ # Resize if too large
87
+ max_size = 768 if self.device != "cpu" else 512
88
+ if image.size[0] > max_size or image.size[1] > max_size:
89
+ image.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
90
+
91
+ return image
92
+
93
+ except Exception as e:
94
+ logger.error(f"Image optimization error: {e}")
95
+ return None
96
+
97
+ @spaces.GPU
98
+ def generate_ultra_supreme_prompt(self, image: Any) -> Tuple[str, str, int, Dict[str, int]]:
99
+ """
100
+ Generate ultra supreme prompt from image
101
+
102
+ Returns:
103
+ Tuple of (prompt, analysis_info, score, breakdown)
104
+ """
105
+ try:
106
+ # Initialize model if needed
107
+ if not self.is_initialized:
108
+ if not self.initialize_model():
109
+ return "❌ Model initialization failed.", "Please refresh and try again.", 0, {}
110
+
111
+ # Validate input
112
+ if image is None:
113
+ return "❌ Please upload an image.", "No image provided.", 0, {}
114
+
115
+ self.usage_count += 1
116
+
117
+ # Optimize image
118
+ image = self.optimize_image(image)
119
+ if image is None:
120
+ return "❌ Image processing failed.", "Invalid image format.", 0, {}
121
+
122
+ start_time = datetime.now()
123
+
124
+ # ULTRA SUPREME TRIPLE CLIP ANALYSIS
125
+ logger.info("ULTRA SUPREME ANALYSIS - Maximum intelligence deployment")
126
+
127
+ clip_fast = self.interrogator.interrogate_fast(image)
128
+ clip_classic = self.interrogator.interrogate_classic(image)
129
+ clip_best = self.interrogator.interrogate(image)
130
+
131
+ logger.info(f"ULTRA CLIP Results:\nFast: {clip_fast}\nClassic: {clip_classic}\nBest: {clip_best}")
132
+
133
+ # ULTRA SUPREME ANALYSIS
134
+ ultra_analysis = self.analyzer.ultra_supreme_analysis(clip_fast, clip_classic, clip_best)
135
+
136
+ # BUILD ULTRA SUPREME FLUX PROMPT
137
+ optimized_prompt = self.analyzer.build_ultra_supreme_prompt(
138
+ ultra_analysis,
139
+ [clip_fast, clip_classic, clip_best]
140
+ )
141
+
142
+ # CALCULATE ULTRA SUPREME SCORE
143
+ score, breakdown = self.analyzer.calculate_ultra_supreme_score(optimized_prompt, ultra_analysis)
144
+
145
+ end_time = datetime.now()
146
+ duration = (end_time - start_time).total_seconds()
147
+
148
+ # Memory cleanup
149
+ if self.device == "cpu":
150
+ gc.collect()
151
+ else:
152
+ torch.cuda.empty_cache()
153
+
154
+ # Generate analysis report
155
+ analysis_info = self._generate_analysis_report(
156
+ ultra_analysis, clip_fast, clip_classic, clip_best,
157
+ score, breakdown, duration
158
+ )
159
+
160
+ return optimized_prompt, analysis_info, score, breakdown
161
+
162
+ except Exception as e:
163
+ logger.error(f"Ultra supreme generation error: {e}")
164
+ return f"❌ Error: {str(e)}", "Please try with a different image.", 0, {}
165
+
166
+ def _generate_analysis_report(self, ultra_analysis: Dict[str, Any],
167
+ clip_fast: str, clip_classic: str, clip_best: str,
168
+ score: int, breakdown: Dict[str, int],
169
+ duration: float) -> str:
170
+ """Generate detailed analysis report"""
171
+
172
+ gpu_status = "⚡ ZeroGPU" if torch.cuda.is_available() else "💻 CPU"
173
+
174
+ # Format detected elements - Fixed the .title() error by checking for None
175
+ features = ", ".join(ultra_analysis["facial_ultra"]["facial_hair"]) if ultra_analysis["facial_ultra"]["facial_hair"] else "None detected"
176
+ cultural = ", ".join(ultra_analysis["demographic"]["cultural_religious"]) if ultra_analysis["demographic"]["cultural_religious"] else "None detected"
177
+ clothing = ", ".join(ultra_analysis["clothing_accessories"]["eyewear"] + ultra_analysis["clothing_accessories"]["headwear"]) if ultra_analysis["clothing_accessories"]["eyewear"] or ultra_analysis["clothing_accessories"]["headwear"] else "None detected"
178
+
179
+ # Safe access to potentially None values
180
+ age_category = ultra_analysis["demographic"].get("age_category", "Unspecified")
181
+ if age_category and age_category != "Unspecified":
182
+ age_category = age_category.replace("_", " ").title()
183
+
184
+ setting_type = ultra_analysis["environmental"].get("setting_type", "Standard")
185
+ if setting_type and setting_type != "Standard":
186
+ setting_type = setting_type.title()
187
+
188
+ primary_emotion = ultra_analysis["emotional_state"].get("primary_emotion", "Neutral")
189
+ if primary_emotion and primary_emotion != "Neutral":
190
+ primary_emotion = primary_emotion.title()
191
+
192
+ analysis_info = f"""**🚀 ULTRA SUPREME ANALYSIS COMPLETE**
193
+ **Processing:** {gpu_status} • {duration:.1f}s • Triple CLIP Ultra Intelligence
194
+ **Ultra Score:** {score}/100 • Breakdown: Structure({breakdown.get('structure',0)}) Features({breakdown.get('features',0)}) Cultural({breakdown.get('cultural',0)}) Emotional({breakdown.get('emotional',0)}) Technical({breakdown.get('technical',0)})
195
+ **Generation:** #{self.usage_count}
196
+ **🧠 ULTRA DEEP DETECTION:**
197
+ - **Age Category:** {age_category} (Confidence: {ultra_analysis["demographic"].get("age_confidence", 0)})
198
+ - **Cultural Context:** {cultural}
199
+ - **Facial Features:** {features}
200
+ - **Accessories:** {clothing}
201
+ - **Setting:** {setting_type}
202
+ - **Emotion:** {primary_emotion}
203
+ - **Total Features:** {ultra_analysis["intelligence_metrics"]["total_features_detected"]}
204
+ **📊 CLIP ANALYSIS SOURCES:**
205
+ - **Fast:** {clip_fast[:50]}...
206
+ - **Classic:** {clip_classic[:50]}...
207
+ - **Best:** {clip_best[:50]}...
208
+ **⚡ ULTRA OPTIMIZATION:** Applied absolute maximum depth analysis with Pariente AI research rules"""
209
+
210
+ return analysis_info