Commit
·
329b20b
0
Parent(s):
Added project codebase
Browse files- .gitignore +14 -0
- app/__init__.py +0 -0
- app/config.py +0 -0
- app/main.py +108 -0
- app/models/__init__.py +0 -0
- app/models/attribute_extractor.py +115 -0
- app/models/clothing_detector.py +63 -0
- app/models/color_analyzer.py +135 -0
- app/schemas/__init__.py +0 -0
- app/schemas/response.py +29 -0
- app/utils/__init__.py +0 -0
- app/utils/image_processing.py +35 -0
- app/utils/text_processing.py +0 -0
- frontend/index.html +77 -0
- frontend/script.js +165 -0
- frontend/style.css +231 -0
- requirements.txt +21 -0
- setup.py +11 -0
.gitignore
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Virtual environment
|
| 2 |
+
myenv/
|
| 3 |
+
venv/
|
| 4 |
+
.env/
|
| 5 |
+
|
| 6 |
+
# Python cache
|
| 7 |
+
__pycache__/
|
| 8 |
+
*.pyc
|
| 9 |
+
*.pyo
|
| 10 |
+
*.pyd
|
| 11 |
+
|
| 12 |
+
# Build artifacts
|
| 13 |
+
*.egg-info/
|
| 14 |
+
Clothing_Attribute_detection_Computer_Vision.egg-info/
|
app/__init__.py
ADDED
|
File without changes
|
app/config.py
ADDED
|
File without changes
|
app/main.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, File, UploadFile, HTTPException
|
| 2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
+
from fastapi.staticfiles import StaticFiles
|
| 4 |
+
from fastapi.responses import HTMLResponse
|
| 5 |
+
import uvicorn
|
| 6 |
+
from PIL import Image
|
| 7 |
+
import io
|
| 8 |
+
import asyncio
|
| 9 |
+
from typing import Dict, Any
|
| 10 |
+
|
| 11 |
+
from app.models.clothing_detector import ClothingDetector
|
| 12 |
+
from app.models.attribute_extractor import AttributeExtractor
|
| 13 |
+
from app.models.color_analyzer import ColorAnalyzer
|
| 14 |
+
from app.schemas.response import ClothingAnalysisResponse
|
| 15 |
+
from app.utils.image_processing import preprocess_image
|
| 16 |
+
|
| 17 |
+
app = FastAPI(title="Clothing Attribute Detection API", version="1.0.0")
|
| 18 |
+
|
| 19 |
+
# Add CORS middleware
|
| 20 |
+
app.add_middleware(
|
| 21 |
+
CORSMiddleware,
|
| 22 |
+
allow_origins=["*"],
|
| 23 |
+
allow_credentials=True,
|
| 24 |
+
allow_methods=["*"],
|
| 25 |
+
allow_headers=["*"],
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
# Mount static files
|
| 29 |
+
app.mount("/static", StaticFiles(directory="frontend"), name="static")
|
| 30 |
+
|
| 31 |
+
# Initialize models (loaded once at startup)
|
| 32 |
+
clothing_detector = None
|
| 33 |
+
attribute_extractor = None
|
| 34 |
+
color_analyzer = None
|
| 35 |
+
|
| 36 |
+
@app.on_event("startup")
|
| 37 |
+
async def load_models():
|
| 38 |
+
global clothing_detector, attribute_extractor, color_analyzer
|
| 39 |
+
print("Loading models...")
|
| 40 |
+
|
| 41 |
+
clothing_detector = ClothingDetector()
|
| 42 |
+
attribute_extractor = AttributeExtractor()
|
| 43 |
+
color_analyzer = ColorAnalyzer()
|
| 44 |
+
|
| 45 |
+
print("Models loaded successfully!")
|
| 46 |
+
|
| 47 |
+
@app.get("/", response_class=HTMLResponse)
|
| 48 |
+
async def read_root():
|
| 49 |
+
with open("frontend/index.html", "r", encoding="utf-8") as f:
|
| 50 |
+
html = f.read()
|
| 51 |
+
return HTMLResponse(html)
|
| 52 |
+
|
| 53 |
+
@app.get("/health")
|
| 54 |
+
async def health_check():
|
| 55 |
+
return {"status": "healthy", "message": "Clothing Attribute Detection API is running"}
|
| 56 |
+
|
| 57 |
+
@app.post("/analyze", response_model=ClothingAnalysisResponse)
|
| 58 |
+
async def analyze_clothing(file: UploadFile = File(...)):
|
| 59 |
+
try:
|
| 60 |
+
# Validate file type
|
| 61 |
+
if not file.content_type.startswith("image/"):
|
| 62 |
+
raise HTTPException(status_code=400, detail="File must be an image")
|
| 63 |
+
|
| 64 |
+
# Read and preprocess image
|
| 65 |
+
image_bytes = await file.read()
|
| 66 |
+
image = Image.open(io.BytesIO(image_bytes))
|
| 67 |
+
processed_image = preprocess_image(image)
|
| 68 |
+
|
| 69 |
+
# Run analysis in parallel
|
| 70 |
+
detection_task = asyncio.create_task(
|
| 71 |
+
clothing_detector.detect_clothing_items(processed_image)
|
| 72 |
+
)
|
| 73 |
+
attribute_task = asyncio.create_task(
|
| 74 |
+
attribute_extractor.extract_attributes(processed_image)
|
| 75 |
+
)
|
| 76 |
+
color_task = asyncio.create_task(
|
| 77 |
+
color_analyzer.analyze_colors(processed_image)
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
# Wait for all tasks to complete
|
| 81 |
+
clothing_items, attributes, color_analysis = await asyncio.gather(
|
| 82 |
+
detection_task, attribute_task, color_task
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
# Combine results
|
| 86 |
+
result = {
|
| 87 |
+
"status": "success",
|
| 88 |
+
"clothing_items": clothing_items,
|
| 89 |
+
"style_classification": attributes.get("style", "unknown"),
|
| 90 |
+
"formality": attributes.get("formality", "unknown"),
|
| 91 |
+
"texture": attributes.get("texture", "unknown"),
|
| 92 |
+
"dominant_colors": color_analysis["dominant_colors"],
|
| 93 |
+
"color_distribution": color_analysis["color_distribution"],
|
| 94 |
+
"detailed_attributes": attributes,
|
| 95 |
+
"confidence_scores": {
|
| 96 |
+
"overall": 0.85,
|
| 97 |
+
"style": attributes.get("confidence", 0.8),
|
| 98 |
+
"color": color_analysis.get("confidence", 0.9)
|
| 99 |
+
}
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
return ClothingAnalysisResponse(**result)
|
| 103 |
+
|
| 104 |
+
except Exception as e:
|
| 105 |
+
raise HTTPException(status_code=500, detail=f"Analysis failed: {str(e)}")
|
| 106 |
+
|
| 107 |
+
if __name__ == "__main__":
|
| 108 |
+
uvicorn.run("app.main:app", host="0.0.0.0", port=8000, reload=True)
|
app/models/__init__.py
ADDED
|
File without changes
|
app/models/attribute_extractor.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import re
|
| 5 |
+
import asyncio
|
| 6 |
+
from typing import Dict, Any
|
| 7 |
+
|
| 8 |
+
class AttributeExtractor:
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.model_name = "Salesforce/blip-image-captioning-base"
|
| 11 |
+
self.processor = None
|
| 12 |
+
self.model = None
|
| 13 |
+
self._load_model()
|
| 14 |
+
|
| 15 |
+
# Define attribute patterns for text analysis
|
| 16 |
+
self.style_patterns = {
|
| 17 |
+
"formal": ["suit", "blazer", "dress shirt", "tie", "formal", "business", "elegant"],
|
| 18 |
+
"casual": ["t-shirt", "jeans", "sneakers", "hoodie", "casual", "relaxed", "comfortable", "leggings"],
|
| 19 |
+
"sports": ["athletic", "sports", "gym", "workout", "running", "training"]
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
self.texture_patterns = {
|
| 23 |
+
"cotton": ["cotton", "soft", "comfortable"],
|
| 24 |
+
"denim": ["denim", "jeans", "rugged"],
|
| 25 |
+
"silk": ["silk", "smooth", "shiny", "lustrous", "leggings", "velvet"],
|
| 26 |
+
"wool": ["wool", "warm", "thick"],
|
| 27 |
+
"leather": ["leather", "tough", "durable"],
|
| 28 |
+
"synthetic": ["polyester", "synthetic", "artificial"]
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
def _load_model(self):
|
| 32 |
+
"""Load the BLIP model for image captioning"""
|
| 33 |
+
try:
|
| 34 |
+
print("Loading BLIP model for attribute extraction...")
|
| 35 |
+
self.processor = BlipProcessor.from_pretrained(self.model_name)
|
| 36 |
+
self.model = BlipForConditionalGeneration.from_pretrained(self.model_name)
|
| 37 |
+
self.model.eval()
|
| 38 |
+
print("BLIP model loaded successfully!")
|
| 39 |
+
except Exception as e:
|
| 40 |
+
print(f"Error loading BLIP model: {e}")
|
| 41 |
+
raise
|
| 42 |
+
|
| 43 |
+
async def extract_attributes(self, image: Image.Image) -> Dict[str, Any]:
|
| 44 |
+
"""Extract clothing attributes from image"""
|
| 45 |
+
try:
|
| 46 |
+
loop = asyncio.get_event_loop()
|
| 47 |
+
|
| 48 |
+
# Generate multiple captions with different prompts
|
| 49 |
+
tasks = [
|
| 50 |
+
loop.run_in_executor(None, self._generate_caption, image, "a photo of"),
|
| 51 |
+
loop.run_in_executor(None, self._generate_caption, image, "clothing style:"),
|
| 52 |
+
loop.run_in_executor(None, self._generate_caption, image, "fabric texture:")
|
| 53 |
+
]
|
| 54 |
+
|
| 55 |
+
captions = await asyncio.gather(*tasks)
|
| 56 |
+
|
| 57 |
+
# Analyze captions to extract attributes
|
| 58 |
+
attributes = self._analyze_captions(captions)
|
| 59 |
+
return attributes
|
| 60 |
+
|
| 61 |
+
except Exception as e:
|
| 62 |
+
print(f"Attribute extraction error: {e}")
|
| 63 |
+
return {"style": "unknown", "formality": "unknown", "texture": "unknown"}
|
| 64 |
+
|
| 65 |
+
def _generate_caption(self, image: Image.Image, prompt: str = "") -> str:
|
| 66 |
+
"""Generate caption for the image"""
|
| 67 |
+
try:
|
| 68 |
+
if prompt:
|
| 69 |
+
inputs = self.processor(image, prompt, return_tensors="pt")
|
| 70 |
+
else:
|
| 71 |
+
inputs = self.processor(image, return_tensors="pt")
|
| 72 |
+
|
| 73 |
+
with torch.no_grad():
|
| 74 |
+
out = self.model.generate(**inputs, max_length=50, num_beams=4)
|
| 75 |
+
caption = self.processor.decode(out[0], skip_special_tokens=True)
|
| 76 |
+
|
| 77 |
+
return caption.lower()
|
| 78 |
+
except Exception as e:
|
| 79 |
+
print(f"Caption generation error: {e}")
|
| 80 |
+
return ""
|
| 81 |
+
|
| 82 |
+
def _analyze_captions(self, captions: list) -> Dict[str, Any]:
|
| 83 |
+
"""Analyze captions to extract structured attributes"""
|
| 84 |
+
combined_text = " ".join(captions).lower()
|
| 85 |
+
|
| 86 |
+
# Determine style/formality
|
| 87 |
+
formal_score = sum(1 for word in self.style_patterns["formal"] if word in combined_text)
|
| 88 |
+
casual_score = sum(1 for word in self.style_patterns["casual"] if word in combined_text)
|
| 89 |
+
sports_score = sum(1 for word in self.style_patterns["sports"] if word in combined_text)
|
| 90 |
+
|
| 91 |
+
if formal_score > casual_score and formal_score > sports_score:
|
| 92 |
+
style = "formal"
|
| 93 |
+
formality = "formal"
|
| 94 |
+
elif sports_score > casual_score:
|
| 95 |
+
style = "athletic"
|
| 96 |
+
formality = "casual"
|
| 97 |
+
else:
|
| 98 |
+
style = "casual"
|
| 99 |
+
formality = "casual"
|
| 100 |
+
|
| 101 |
+
# Determine texture
|
| 102 |
+
texture_scores = {}
|
| 103 |
+
for texture, patterns in self.texture_patterns.items():
|
| 104 |
+
texture_scores[texture] = sum(1 for word in patterns if word in combined_text)
|
| 105 |
+
|
| 106 |
+
detected_texture = max(texture_scores, key=texture_scores.get) if max(texture_scores.values()) > 0 else "unknown"
|
| 107 |
+
|
| 108 |
+
return {
|
| 109 |
+
"style": style,
|
| 110 |
+
"formality": formality,
|
| 111 |
+
"texture": detected_texture,
|
| 112 |
+
"confidence": 0.8,
|
| 113 |
+
"raw_captions": captions,
|
| 114 |
+
"detected_keywords": [word for word in combined_text.split() if any(word in patterns for patterns in self.style_patterns.values())]
|
| 115 |
+
}
|
app/models/clothing_detector.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from sklearn.cluster import KMeans
|
| 3 |
+
import cv2
|
| 4 |
+
from transformers import AutoImageProcessor, AutoModelForObjectDetection
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import numpy as np
|
| 7 |
+
from typing import List, Dict, Any
|
| 8 |
+
import asyncio
|
| 9 |
+
|
| 10 |
+
class ClothingDetector:
|
| 11 |
+
def __init__(self):
|
| 12 |
+
self.model_name = "yainage90/fashion-object-detection"
|
| 13 |
+
self.device = 'cpu' # Force CPU usage
|
| 14 |
+
self.processor = None
|
| 15 |
+
self.model = None
|
| 16 |
+
self._load_model()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _load_model(self):
|
| 21 |
+
"""Load the pre-trained fashion detection model"""
|
| 22 |
+
try:
|
| 23 |
+
print("Loading clothing detection model...")
|
| 24 |
+
self.processor = AutoImageProcessor.from_pretrained(self.model_name)
|
| 25 |
+
self.model = AutoModelForObjectDetection.from_pretrained(self.model_name)
|
| 26 |
+
self.model.to(self.device)
|
| 27 |
+
self.model.eval()
|
| 28 |
+
print("Clothing detection model loaded successfully!")
|
| 29 |
+
except Exception as e:
|
| 30 |
+
print(f"Error loading model: {e}")
|
| 31 |
+
raise
|
| 32 |
+
|
| 33 |
+
async def detect_clothing_items(self, image: Image.Image) -> List[Dict[str, Any]]:
|
| 34 |
+
"""Detect clothing items in the image"""
|
| 35 |
+
try:
|
| 36 |
+
# Run inference in thread pool to avoid blocking
|
| 37 |
+
loop = asyncio.get_event_loop()
|
| 38 |
+
results = await loop.run_in_executor(None, self._run_detection, image)
|
| 39 |
+
return results
|
| 40 |
+
except Exception as e:
|
| 41 |
+
print(f"Detection error: {e}")
|
| 42 |
+
return []
|
| 43 |
+
|
| 44 |
+
def _run_detection(self, image: Image.Image) -> List[Dict[str, Any]]:
|
| 45 |
+
"""Run the actual detection"""
|
| 46 |
+
with torch.no_grad():
|
| 47 |
+
inputs = self.processor(images=[image], return_tensors="pt")
|
| 48 |
+
outputs = self.model(**inputs.to(self.device))
|
| 49 |
+
|
| 50 |
+
target_sizes = torch.tensor([[image.size[1], image.size[0]]])
|
| 51 |
+
results = self.processor.post_process_object_detection(
|
| 52 |
+
outputs, threshold=0.4, target_sizes=target_sizes
|
| 53 |
+
)[0]
|
| 54 |
+
|
| 55 |
+
items = []
|
| 56 |
+
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
| 57 |
+
items.append({
|
| 58 |
+
"item_type": self.model.config.id2label[label.item()],
|
| 59 |
+
"confidence": round(score.item(), 3),
|
| 60 |
+
"bounding_box": [round(i.item()) for i in box]
|
| 61 |
+
})
|
| 62 |
+
|
| 63 |
+
return items
|
app/models/color_analyzer.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from sklearn.cluster import KMeans
|
| 5 |
+
from typing import Dict, List, Any
|
| 6 |
+
import asyncio
|
| 7 |
+
import webcolors
|
| 8 |
+
|
| 9 |
+
class ColorAnalyzer:
|
| 10 |
+
def __init__(self):
|
| 11 |
+
self.color_names = {
|
| 12 |
+
'red': [255, 0, 0],
|
| 13 |
+
'green': [0, 255, 0],
|
| 14 |
+
'blue': [0, 0, 255],
|
| 15 |
+
'yellow': [255, 255, 0],
|
| 16 |
+
'orange': [255, 165, 0],
|
| 17 |
+
'purple': [128, 0, 128],
|
| 18 |
+
'pink': [255, 192, 203],
|
| 19 |
+
'brown': [165, 42, 42],
|
| 20 |
+
'black': [0, 0, 0],
|
| 21 |
+
'white': [255, 255, 255],
|
| 22 |
+
'gray': [128, 128, 128]
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
async def analyze_colors(self, image: Image.Image) -> Dict[str, Any]:
|
| 26 |
+
"""Analyze colors in the clothing image"""
|
| 27 |
+
try:
|
| 28 |
+
loop = asyncio.get_event_loop()
|
| 29 |
+
result = await loop.run_in_executor(None, self._extract_colors, image)
|
| 30 |
+
return result
|
| 31 |
+
except Exception as e:
|
| 32 |
+
print(f"Color analysis error: {e}")
|
| 33 |
+
return {"dominant_colors": [], "color_distribution": {}, "confidence": 0.0}
|
| 34 |
+
|
| 35 |
+
def _extract_colors(self, image: Image.Image) -> Dict[str, Any]:
|
| 36 |
+
"""Extract dominant colors from image"""
|
| 37 |
+
# Convert PIL image to OpenCV format
|
| 38 |
+
opencv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
| 39 |
+
|
| 40 |
+
# Resize image for faster processing
|
| 41 |
+
height, width = opencv_image.shape[:2]
|
| 42 |
+
if width > 300:
|
| 43 |
+
scale = 300 / width
|
| 44 |
+
new_width = int(width * scale)
|
| 45 |
+
new_height = int(height * scale)
|
| 46 |
+
opencv_image = cv2.resize(opencv_image, (new_width, new_height))
|
| 47 |
+
|
| 48 |
+
# Reshape image for KMeans clustering
|
| 49 |
+
data = opencv_image.reshape((-1, 3))
|
| 50 |
+
data = np.float32(data)
|
| 51 |
+
|
| 52 |
+
# Apply KMeans to find dominant colors
|
| 53 |
+
k = 5 # Number of dominant colors to find
|
| 54 |
+
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 1.0)
|
| 55 |
+
_, labels, centers = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
|
| 56 |
+
|
| 57 |
+
# Convert back to RGB and find closest color names
|
| 58 |
+
centers = np.uint8(centers)
|
| 59 |
+
dominant_colors = []
|
| 60 |
+
color_distribution = {}
|
| 61 |
+
|
| 62 |
+
# Count pixels for each cluster
|
| 63 |
+
unique_labels, counts = np.unique(labels.flatten(), return_counts=True)
|
| 64 |
+
total_pixels = len(labels)
|
| 65 |
+
total_pixels = len(labels)
|
| 66 |
+
|
| 67 |
+
# DEBUG: ensure labels shape matches centers
|
| 68 |
+
#print(f"[DEBUG] unique_labels: {unique_labels}, counts: {counts}")
|
| 69 |
+
for idx, (label, count) in enumerate(zip(unique_labels, counts)):
|
| 70 |
+
# In OpenCV kmeans, label indices correspond to centers rows
|
| 71 |
+
color_bgr = centers[label]
|
| 72 |
+
color_rgb = [int(color_bgr[2]), int(color_bgr[1]), int(color_bgr[0])] # BGR to RGB
|
| 73 |
+
|
| 74 |
+
# Find closest named color
|
| 75 |
+
color_name = self._get_closest_color_name(color_rgb)
|
| 76 |
+
#print(f"[DEBUG] Cluster {idx}: RGB {color_rgb}, named as {color_name}, count {count}")
|
| 77 |
+
percentage = (count / total_pixels) * 100
|
| 78 |
+
|
| 79 |
+
dominant_colors.append({
|
| 80 |
+
"color_name": color_name,
|
| 81 |
+
"rgb": color_rgb,
|
| 82 |
+
"hex": "#{:02x}{:02x}{:02x}".format(color_rgb[0], color_rgb[1], color_rgb[2]),
|
| 83 |
+
"percentage": round(percentage, 2)
|
| 84 |
+
})
|
| 85 |
+
|
| 86 |
+
color_distribution[color_name] = round(percentage, 2)
|
| 87 |
+
|
| 88 |
+
# Sort by percentage
|
| 89 |
+
dominant_colors.sort(key=lambda x: x["percentage"], reverse=True)
|
| 90 |
+
|
| 91 |
+
return {
|
| 92 |
+
"dominant_colors": dominant_colors[:3], # Top 3 colors
|
| 93 |
+
"color_distribution": color_distribution,
|
| 94 |
+
"confidence": 0.9
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
def _get_closest_color_name(self, rgb_color: List[int]) -> str:
|
| 98 |
+
"""Find the closest named color to the given RGB value"""
|
| 99 |
+
# Convert RGB to HSV for hue-based matching
|
| 100 |
+
import colorsys
|
| 101 |
+
r, g, b = [c/255.0 for c in rgb_color]
|
| 102 |
+
h, s, v = colorsys.rgb_to_hsv(r, g, b)
|
| 103 |
+
# Define target hues for named colors (approximate)
|
| 104 |
+
hue_map = {
|
| 105 |
+
'red': 0.0,
|
| 106 |
+
'orange': 0.08,
|
| 107 |
+
'yellow': 0.16,
|
| 108 |
+
'green': 0.33,
|
| 109 |
+
'blue': 0.61,
|
| 110 |
+
'purple': 0.78,
|
| 111 |
+
'pink': 0.92,
|
| 112 |
+
'brown': 0.08,
|
| 113 |
+
'gray': None,
|
| 114 |
+
'black': None,
|
| 115 |
+
'white': None
|
| 116 |
+
}
|
| 117 |
+
# If low saturation or extreme brightness, treat separately
|
| 118 |
+
if v < 0.2:
|
| 119 |
+
return 'black'
|
| 120 |
+
if v > 0.9 and s < 0.15:
|
| 121 |
+
return 'white'
|
| 122 |
+
if s < 0.15 and 0.2 < v < 0.9:
|
| 123 |
+
return 'gray'
|
| 124 |
+
# Otherwise, find closest hue
|
| 125 |
+
min_diff = 1.0
|
| 126 |
+
closest = 'unknown'
|
| 127 |
+
for name, target_h in hue_map.items():
|
| 128 |
+
if target_h is None or name in ('black','white','gray'):
|
| 129 |
+
continue
|
| 130 |
+
diff = abs(h - target_h)
|
| 131 |
+
diff = min(diff, 1 - diff)
|
| 132 |
+
if diff < min_diff:
|
| 133 |
+
min_diff = diff
|
| 134 |
+
closest = name
|
| 135 |
+
return closest
|
app/schemas/__init__.py
ADDED
|
File without changes
|
app/schemas/response.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel
|
| 2 |
+
from typing import List, Dict, Any, Optional
|
| 3 |
+
|
| 4 |
+
class ClothingItem(BaseModel):
|
| 5 |
+
item_type: str
|
| 6 |
+
confidence: float
|
| 7 |
+
bounding_box: List[int]
|
| 8 |
+
|
| 9 |
+
class DominantColor(BaseModel):
|
| 10 |
+
color_name: str
|
| 11 |
+
rgb: List[int]
|
| 12 |
+
hex: str
|
| 13 |
+
percentage: float
|
| 14 |
+
|
| 15 |
+
class ConfidenceScores(BaseModel):
|
| 16 |
+
overall: float
|
| 17 |
+
style: float
|
| 18 |
+
color: float
|
| 19 |
+
|
| 20 |
+
class ClothingAnalysisResponse(BaseModel):
|
| 21 |
+
status: str
|
| 22 |
+
clothing_items: List[ClothingItem]
|
| 23 |
+
style_classification: str
|
| 24 |
+
formality: str
|
| 25 |
+
texture: str
|
| 26 |
+
dominant_colors: List[DominantColor]
|
| 27 |
+
color_distribution: Dict[str, float]
|
| 28 |
+
detailed_attributes: Dict[str, Any]
|
| 29 |
+
confidence_scores: ConfidenceScores
|
app/utils/__init__.py
ADDED
|
File without changes
|
app/utils/image_processing.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
import numpy as np
|
| 3 |
+
from typing import Tuple
|
| 4 |
+
|
| 5 |
+
def preprocess_image(image: Image.Image, target_size: Tuple[int, int] = (512, 512)) -> Image.Image:
|
| 6 |
+
"""Preprocess image for model inference"""
|
| 7 |
+
# Convert to RGB if necessary
|
| 8 |
+
if image.mode != 'RGB':
|
| 9 |
+
image = image.convert('RGB')
|
| 10 |
+
|
| 11 |
+
# Resize while maintaining aspect ratio
|
| 12 |
+
image.thumbnail(target_size, Image.LANCZOS)
|
| 13 |
+
|
| 14 |
+
# Create a new image with the target size and paste the resized image
|
| 15 |
+
new_image = Image.new('RGB', target_size, (255, 255, 255))
|
| 16 |
+
paste_position = ((target_size[0] - image.width) // 2,
|
| 17 |
+
(target_size[1] - image.height) // 2)
|
| 18 |
+
new_image.paste(image, paste_position)
|
| 19 |
+
|
| 20 |
+
return new_image
|
| 21 |
+
|
| 22 |
+
def enhance_image_quality(image: Image.Image) -> Image.Image:
|
| 23 |
+
"""Enhance image quality for better analysis"""
|
| 24 |
+
import cv2
|
| 25 |
+
|
| 26 |
+
# Convert to numpy array
|
| 27 |
+
img_array = np.array(image)
|
| 28 |
+
|
| 29 |
+
# Apply slight gaussian blur to reduce noise
|
| 30 |
+
blurred = cv2.GaussianBlur(img_array, (3, 3), 0)
|
| 31 |
+
|
| 32 |
+
# Enhance contrast
|
| 33 |
+
enhanced = cv2.addWeighted(img_array, 1.5, blurred, -0.5, 0)
|
| 34 |
+
|
| 35 |
+
return Image.fromarray(enhanced)
|
app/utils/text_processing.py
ADDED
|
File without changes
|
frontend/index.html
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>Clothing Attribute Detection</title>
|
| 7 |
+
<link rel="stylesheet" href="/static/style.css">
|
| 8 |
+
</head>
|
| 9 |
+
<body>
|
| 10 |
+
<div class="container">
|
| 11 |
+
<header>
|
| 12 |
+
<h1>Clothing Attribute Detection System</h1>
|
| 13 |
+
<p>Upload an image to analyze clothing attributes, style, and colors</p>
|
| 14 |
+
</header>
|
| 15 |
+
|
| 16 |
+
<div class="upload-section">
|
| 17 |
+
<div class="upload-area" id="uploadArea">
|
| 18 |
+
<input type="file" id="imageInput" accept="image/*" hidden>
|
| 19 |
+
<div class="upload-content">
|
| 20 |
+
<i class="upload-icon">📁</i>
|
| 21 |
+
<p>Click to upload or drag and drop an image</p>
|
| 22 |
+
<p class="upload-hint">Supports: JPG, PNG, GIF</p>
|
| 23 |
+
</div>
|
| 24 |
+
</div>
|
| 25 |
+
<button id="analyzeBtn" class="analyze-btn" disabled>Analyze Image</button>
|
| 26 |
+
</div>
|
| 27 |
+
|
| 28 |
+
<div class="preview-section" id="previewSection" style="display: none;">
|
| 29 |
+
<h3>Image Preview</h3>
|
| 30 |
+
<img id="imagePreview" alt="Preview">
|
| 31 |
+
</div>
|
| 32 |
+
|
| 33 |
+
<div class="loading" id="loading" style="display: none;">
|
| 34 |
+
<div class="spinner"></div>
|
| 35 |
+
<p>Analyzing image...</p>
|
| 36 |
+
</div>
|
| 37 |
+
|
| 38 |
+
<div class="results-section" id="results" style="display: none;">
|
| 39 |
+
<h2>Analysis Results</h2>
|
| 40 |
+
|
| 41 |
+
<div class="results-grid">
|
| 42 |
+
<div class="result-card">
|
| 43 |
+
<h3>Style Classification</h3>
|
| 44 |
+
<div class="result-value" id="styleResult"></div>
|
| 45 |
+
</div>
|
| 46 |
+
|
| 47 |
+
<div class="result-card">
|
| 48 |
+
<h3>Formality</h3>
|
| 49 |
+
<div class="result-value" id="formalityResult"></div>
|
| 50 |
+
</div>
|
| 51 |
+
|
| 52 |
+
<div class="result-card">
|
| 53 |
+
<h3>Texture</h3>
|
| 54 |
+
<div class="result-value" id="textureResult"></div>
|
| 55 |
+
</div>
|
| 56 |
+
|
| 57 |
+
<div class="result-card">
|
| 58 |
+
<h3>Detected Items</h3>
|
| 59 |
+
<div class="result-value" id="itemsResult"></div>
|
| 60 |
+
</div>
|
| 61 |
+
</div>
|
| 62 |
+
|
| 63 |
+
<div class="color-analysis">
|
| 64 |
+
<h3>Color Analysis</h3>
|
| 65 |
+
<div class="color-palette" id="colorPalette"></div>
|
| 66 |
+
</div>
|
| 67 |
+
|
| 68 |
+
<div class="confidence-scores">
|
| 69 |
+
<h3>Confidence Scores</h3>
|
| 70 |
+
<div id="confidenceScores"></div>
|
| 71 |
+
</div>
|
| 72 |
+
</div>
|
| 73 |
+
</div>
|
| 74 |
+
|
| 75 |
+
<script src="/static/script.js"></script>
|
| 76 |
+
</body>
|
| 77 |
+
</html>
|
frontend/script.js
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class ClothingAnalyzer {
|
| 2 |
+
constructor() {
|
| 3 |
+
this.imageInput = document.getElementById('imageInput');
|
| 4 |
+
this.uploadArea = document.getElementById('uploadArea');
|
| 5 |
+
this.analyzeBtn = document.getElementById('analyzeBtn');
|
| 6 |
+
this.previewSection = document.getElementById('previewSection');
|
| 7 |
+
this.imagePreview = document.getElementById('imagePreview');
|
| 8 |
+
this.loading = document.getElementById('loading');
|
| 9 |
+
this.results = document.getElementById('results');
|
| 10 |
+
|
| 11 |
+
this.initializeEventListeners();
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
initializeEventListeners() {
|
| 15 |
+
// Upload area click
|
| 16 |
+
this.uploadArea.addEventListener('click', () => {
|
| 17 |
+
this.imageInput.click();
|
| 18 |
+
});
|
| 19 |
+
|
| 20 |
+
// File input change
|
| 21 |
+
this.imageInput.addEventListener('change', (e) => {
|
| 22 |
+
this.handleFileSelect(e.target.files[0]);
|
| 23 |
+
});
|
| 24 |
+
|
| 25 |
+
// Drag and drop
|
| 26 |
+
this.uploadArea.addEventListener('dragover', (e) => {
|
| 27 |
+
e.preventDefault();
|
| 28 |
+
this.uploadArea.classList.add('dragover');
|
| 29 |
+
});
|
| 30 |
+
|
| 31 |
+
this.uploadArea.addEventListener('dragleave', () => {
|
| 32 |
+
this.uploadArea.classList.remove('dragover');
|
| 33 |
+
});
|
| 34 |
+
|
| 35 |
+
this.uploadArea.addEventListener('drop', (e) => {
|
| 36 |
+
e.preventDefault();
|
| 37 |
+
this.uploadArea.classList.remove('dragover');
|
| 38 |
+
this.handleFileSelect(e.dataTransfer.files[0]);
|
| 39 |
+
});
|
| 40 |
+
|
| 41 |
+
// Analyze button
|
| 42 |
+
this.analyzeBtn.addEventListener('click', () => {
|
| 43 |
+
this.analyzeImage();
|
| 44 |
+
});
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
handleFileSelect(file) {
|
| 48 |
+
if (!file) return;
|
| 49 |
+
|
| 50 |
+
// Validate file type
|
| 51 |
+
if (!file.type.startsWith('image/')) {
|
| 52 |
+
alert('Please select a valid image file');
|
| 53 |
+
return;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
// Show preview
|
| 57 |
+
const reader = new FileReader();
|
| 58 |
+
reader.onload = (e) => {
|
| 59 |
+
this.imagePreview.src = e.target.result;
|
| 60 |
+
this.previewSection.style.display = 'block';
|
| 61 |
+
this.analyzeBtn.disabled = false;
|
| 62 |
+
this.results.style.display = 'none';
|
| 63 |
+
};
|
| 64 |
+
reader.readAsDataURL(file);
|
| 65 |
+
|
| 66 |
+
// Store file for analysis
|
| 67 |
+
this.selectedFile = file;
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
async analyzeImage() {
|
| 71 |
+
if (!this.selectedFile) return;
|
| 72 |
+
|
| 73 |
+
// Show loading
|
| 74 |
+
this.loading.style.display = 'block';
|
| 75 |
+
this.results.style.display = 'none';
|
| 76 |
+
this.analyzeBtn.disabled = true;
|
| 77 |
+
|
| 78 |
+
try {
|
| 79 |
+
// Create form data
|
| 80 |
+
const formData = new FormData();
|
| 81 |
+
formData.append('file', this.selectedFile);
|
| 82 |
+
|
| 83 |
+
// Send request
|
| 84 |
+
const response = await fetch('/analyze', {
|
| 85 |
+
method: 'POST',
|
| 86 |
+
body: formData
|
| 87 |
+
});
|
| 88 |
+
|
| 89 |
+
if (!response.ok) {
|
| 90 |
+
throw new Error(`HTTP error! status: ${response.status}`);
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
const data = await response.json();
|
| 94 |
+
this.displayResults(data);
|
| 95 |
+
|
| 96 |
+
} catch (error) {
|
| 97 |
+
console.error('Analysis failed:', error);
|
| 98 |
+
alert('Analysis failed. Please try again.');
|
| 99 |
+
} finally {
|
| 100 |
+
this.loading.style.display = 'none';
|
| 101 |
+
this.analyzeBtn.disabled = false;
|
| 102 |
+
}
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
displayResults(data) {
|
| 106 |
+
// Display basic attributes
|
| 107 |
+
document.getElementById('styleResult').textContent = data.style_classification;
|
| 108 |
+
document.getElementById('formalityResult').textContent = data.formality;
|
| 109 |
+
document.getElementById('textureResult').textContent = data.texture;
|
| 110 |
+
|
| 111 |
+
// Display detected items
|
| 112 |
+
const itemsHtml = data.clothing_items
|
| 113 |
+
.map(item => `<div class="item-tag">${item.item_type} (${Math.round(item.confidence * 100)}%)</div>`)
|
| 114 |
+
.join('');
|
| 115 |
+
document.getElementById('itemsResult').innerHTML = itemsHtml || 'No items detected';
|
| 116 |
+
|
| 117 |
+
// Display color palette
|
| 118 |
+
this.displayColorPalette(data.dominant_colors);
|
| 119 |
+
|
| 120 |
+
// Display confidence scores
|
| 121 |
+
this.displayConfidenceScores(data.confidence_scores);
|
| 122 |
+
|
| 123 |
+
// Show results
|
| 124 |
+
this.results.style.display = 'block';
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
displayColorPalette(colors) {
|
| 128 |
+
const colorPalette = document.getElementById('colorPalette');
|
| 129 |
+
|
| 130 |
+
const colorHtml = colors.map(color => `
|
| 131 |
+
<div class="color-item">
|
| 132 |
+
<div class="color-swatch" style="background-color: ${color.hex}"></div>
|
| 133 |
+
<div>
|
| 134 |
+
<div style="font-weight: 600">${color.color_name}</div>
|
| 135 |
+
<div style="font-size: 0.9rem; color: #64748b">${color.percentage}%</div>
|
| 136 |
+
</div>
|
| 137 |
+
</div>
|
| 138 |
+
`).join('');
|
| 139 |
+
|
| 140 |
+
colorPalette.innerHTML = colorHtml;
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
displayConfidenceScores(scores) {
|
| 144 |
+
const confidenceContainer = document.getElementById('confidenceScores');
|
| 145 |
+
|
| 146 |
+
const scoresHtml = Object.entries(scores).map(([key, value]) => `
|
| 147 |
+
<div style="margin-bottom: 15px">
|
| 148 |
+
<div style="display: flex; justify-content: space-between; margin-bottom: 5px">
|
| 149 |
+
<span style="text-transform: capitalize">${key}</span>
|
| 150 |
+
<span>${Math.round(value * 100)}%</span>
|
| 151 |
+
</div>
|
| 152 |
+
<div class="confidence-bar">
|
| 153 |
+
<div class="confidence-fill" style="width: ${value * 100}%"></div>
|
| 154 |
+
</div>
|
| 155 |
+
</div>
|
| 156 |
+
`).join('');
|
| 157 |
+
|
| 158 |
+
confidenceContainer.innerHTML = scoresHtml;
|
| 159 |
+
}
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
// Initialize the application
|
| 163 |
+
document.addEventListener('DOMContentLoaded', () => {
|
| 164 |
+
new ClothingAnalyzer();
|
| 165 |
+
});
|
frontend/style.css
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
* {
|
| 2 |
+
margin: 0;
|
| 3 |
+
padding: 0;
|
| 4 |
+
box-sizing: border-box;
|
| 5 |
+
}
|
| 6 |
+
|
| 7 |
+
body {
|
| 8 |
+
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
| 9 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 10 |
+
min-height: 100vh;
|
| 11 |
+
padding: 20px;
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
.container {
|
| 15 |
+
max-width: 1200px;
|
| 16 |
+
margin: 0 auto;
|
| 17 |
+
background: white;
|
| 18 |
+
border-radius: 20px;
|
| 19 |
+
box-shadow: 0 20px 40px rgba(0,0,0,0.1);
|
| 20 |
+
overflow: hidden;
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
header {
|
| 24 |
+
background: linear-gradient(135deg, #4f46e5, #7c3aed);
|
| 25 |
+
color: white;
|
| 26 |
+
padding: 40px;
|
| 27 |
+
text-align: center;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
header h1 {
|
| 31 |
+
font-size: 2.5rem;
|
| 32 |
+
margin-bottom: 10px;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
header p {
|
| 36 |
+
font-size: 1.1rem;
|
| 37 |
+
opacity: 0.9;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
.upload-section {
|
| 41 |
+
padding: 40px;
|
| 42 |
+
text-align: center;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
.upload-area {
|
| 46 |
+
border: 3px dashed #cbd5e1;
|
| 47 |
+
border-radius: 12px;
|
| 48 |
+
padding: 60px 20px;
|
| 49 |
+
cursor: pointer;
|
| 50 |
+
transition: all 0.3s ease;
|
| 51 |
+
background: #f8fafc;
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
.upload-area:hover {
|
| 55 |
+
border-color: #4f46e5;
|
| 56 |
+
background: #f1f5f9;
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
.upload-area.dragover {
|
| 60 |
+
border-color: #4f46e5;
|
| 61 |
+
background: #e0e7ff;
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
.upload-content {
|
| 65 |
+
display: flex;
|
| 66 |
+
flex-direction: column;
|
| 67 |
+
align-items: center;
|
| 68 |
+
gap: 15px;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
.upload-icon {
|
| 72 |
+
font-size: 3rem;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
.upload-hint {
|
| 76 |
+
color: #64748b;
|
| 77 |
+
font-size: 0.9rem;
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
.analyze-btn {
|
| 81 |
+
background: linear-gradient(135deg, #4f46e5, #7c3aed);
|
| 82 |
+
color: white;
|
| 83 |
+
border: none;
|
| 84 |
+
padding: 15px 40px;
|
| 85 |
+
border-radius: 50px;
|
| 86 |
+
font-size: 1.1rem;
|
| 87 |
+
cursor: pointer;
|
| 88 |
+
margin-top: 20px;
|
| 89 |
+
transition: all 0.3s ease;
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
.analyze-btn:enabled:hover {
|
| 93 |
+
transform: translateY(-2px);
|
| 94 |
+
box-shadow: 0 10px 25px rgba(79, 70, 229, 0.4);
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
.analyze-btn:disabled {
|
| 98 |
+
background: #94a3b8;
|
| 99 |
+
cursor: not-allowed;
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
.preview-section {
|
| 103 |
+
padding: 40px;
|
| 104 |
+
text-align: center;
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
.preview-section img {
|
| 108 |
+
max-width: 300px;
|
| 109 |
+
max-height: 300px;
|
| 110 |
+
border-radius: 12px;
|
| 111 |
+
box-shadow: 0 10px 25px rgba(0,0,0,0.1);
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
.loading {
|
| 115 |
+
padding: 60px;
|
| 116 |
+
text-align: center;
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
.spinner {
|
| 120 |
+
width: 50px;
|
| 121 |
+
height: 50px;
|
| 122 |
+
border: 4px solid #e2e8f0;
|
| 123 |
+
border-top: 4px solid #4f46e5;
|
| 124 |
+
border-radius: 50%;
|
| 125 |
+
animation: spin 1s linear infinite;
|
| 126 |
+
margin: 0 auto 20px;
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
@keyframes spin {
|
| 130 |
+
0% { transform: rotate(0deg); }
|
| 131 |
+
100% { transform: rotate(360deg); }
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
.results-section {
|
| 135 |
+
padding: 40px;
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
.results-grid {
|
| 139 |
+
display: grid;
|
| 140 |
+
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
|
| 141 |
+
gap: 20px;
|
| 142 |
+
margin-bottom: 30px;
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
.result-card {
|
| 146 |
+
background: #f8fafc;
|
| 147 |
+
padding: 25px;
|
| 148 |
+
border-radius: 12px;
|
| 149 |
+
border-left: 4px solid #4f46e5;
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
.result-card h3 {
|
| 153 |
+
color: #1e293b;
|
| 154 |
+
margin-bottom: 10px;
|
| 155 |
+
font-size: 1.1rem;
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
.result-value {
|
| 159 |
+
font-size: 1.3rem;
|
| 160 |
+
font-weight: 600;
|
| 161 |
+
color: #4f46e5;
|
| 162 |
+
text-transform: capitalize;
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
.color-analysis, .confidence-scores {
|
| 166 |
+
background: #f8fafc;
|
| 167 |
+
padding: 25px;
|
| 168 |
+
border-radius: 12px;
|
| 169 |
+
margin-bottom: 20px;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
.color-palette {
|
| 173 |
+
display: flex;
|
| 174 |
+
gap: 15px;
|
| 175 |
+
margin-top: 15px;
|
| 176 |
+
flex-wrap: wrap;
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
.color-item {
|
| 180 |
+
display: flex;
|
| 181 |
+
align-items: center;
|
| 182 |
+
gap: 10px;
|
| 183 |
+
padding: 10px 15px;
|
| 184 |
+
background: white;
|
| 185 |
+
border-radius: 25px;
|
| 186 |
+
box-shadow: 0 2px 8px rgba(0,0,0,0.1);
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
.color-swatch {
|
| 190 |
+
width: 25px;
|
| 191 |
+
height: 25px;
|
| 192 |
+
border-radius: 50%;
|
| 193 |
+
border: 2px solid #e2e8f0;
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
.confidence-bar {
|
| 197 |
+
background: #e2e8f0;
|
| 198 |
+
border-radius: 25px;
|
| 199 |
+
height: 8px;
|
| 200 |
+
margin-top: 5px;
|
| 201 |
+
overflow: hidden;
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
.confidence-fill {
|
| 205 |
+
height: 100%;
|
| 206 |
+
background: linear-gradient(90deg, #10b981, #34d399);
|
| 207 |
+
transition: width 0.3s ease;
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
@media (max-width: 768px) {
|
| 211 |
+
.container {
|
| 212 |
+
margin: 10px;
|
| 213 |
+
border-radius: 12px;
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
header {
|
| 217 |
+
padding: 30px 20px;
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
header h1 {
|
| 221 |
+
font-size: 2rem;
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
.upload-section, .results-section {
|
| 225 |
+
padding: 30px 20px;
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
.results-grid {
|
| 229 |
+
grid-template-columns: 1fr;
|
| 230 |
+
}
|
| 231 |
+
}
|
requirements.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn[standard]
|
| 3 |
+
python-multipart
|
| 4 |
+
|
| 5 |
+
torch
|
| 6 |
+
torchvision
|
| 7 |
+
transformers
|
| 8 |
+
pillow
|
| 9 |
+
opencv-python
|
| 10 |
+
|
| 11 |
+
numpy
|
| 12 |
+
scikit-image
|
| 13 |
+
scikit-learn
|
| 14 |
+
|
| 15 |
+
requests
|
| 16 |
+
aiofiles
|
| 17 |
+
|
| 18 |
+
pydantic
|
| 19 |
+
python-dotenv
|
| 20 |
+
webcolors
|
| 21 |
+
timm
|
setup.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from setuptools import setup, find_packages
|
| 2 |
+
|
| 3 |
+
with open("requirements.txt") as f:
|
| 4 |
+
requirements = f.read().splitlines()
|
| 5 |
+
|
| 6 |
+
setup(
|
| 7 |
+
name = "Clothing-Attribute_detection-Computer-Vision",
|
| 8 |
+
version = "0.0.1",
|
| 9 |
+
packages = find_packages(),
|
| 10 |
+
install_requires = requirements,
|
| 11 |
+
)
|