karthikeya1212 commited on
Commit
a4975c6
Β·
verified Β·
1 Parent(s): 1150a03

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -48
app.py CHANGED
@@ -4,6 +4,7 @@ from PIL import Image
4
  import torch
5
  import torch.nn.functional as F
6
 
 
7
  print("\n" + "="*80)
8
  print("πŸ” BEST FREE AI IMAGE DETECTOR 2025 - ATEEQQ MODEL ONLY")
9
  print("="*80)
@@ -12,90 +13,73 @@ print("βœ“ Diffusion detection (Midjourney, DALL-E, Stable Diffusion): 88-94% ac
12
  print("βœ“ CNN + Semantic Analysis approach")
13
  print("="*80 + "\n")
14
 
15
- # Use only Ateeqq model
16
  MODEL_NAME = "Ateeqq/ai-vs-human-image-detector"
17
- MODEL_WEIGHT = 1.0 # 100% weight
18
-
19
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
  print(f"πŸ–₯️ Device: {str(device).upper()}\n")
21
 
22
- # Load Ateeqq model
23
  try:
24
  processor = AutoImageProcessor.from_pretrained(MODEL_NAME)
25
  model = AutoModelForImageClassification.from_pretrained(MODEL_NAME).to(device)
26
  model.eval()
27
  print(f"βœ… Successfully loaded model: {MODEL_NAME}")
28
  except Exception as e:
29
- raise RuntimeError(f"❌ Failed to load model {MODEL_NAME}. Error: {e}")
30
 
31
- def predict(image):
 
32
  if image is None:
33
  return "❌ No image uploaded", 0.0, "Upload an image to analyze"
34
 
35
- try:
36
- if image.mode != 'RGB':
37
- image = image.convert('RGB')
38
 
 
39
  inputs = processor(images=image, return_tensors="pt").to(device)
40
  with torch.no_grad():
41
  outputs = model(**inputs)
42
  logits = outputs.logits
43
  probs = F.softmax(logits, dim=1)[0].cpu().numpy()
44
 
45
- real_prob = float(probs[0])
46
- ai_prob = float(probs[1])
 
47
 
48
- weighted_ai_score = ai_prob * MODEL_WEIGHT
49
- threshold = 0.50
50
- is_ai = weighted_ai_score > threshold
51
- final_pred = "🚨 AI-GENERATED" if is_ai else "βœ… REAL PHOTO"
52
- confidence = max(weighted_ai_score, 1 - weighted_ai_score)
53
-
54
- # Build report
55
  report = f"""
56
- ╔════════════════════════════════════════════════════════════════════════╗
57
- β•‘ πŸ”¬ AI IMAGE DETECTION ANALYSIS - ATEEQQ MODEL ONLY β•‘
58
- β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
 
 
 
 
 
 
59
 
60
- 🎯 PREDICTION: {final_pred}
61
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
62
- Weighted AI Probability: {weighted_ai_score:.4f}
63
- Detection Confidence: {confidence:.4f}
64
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
65
- πŸ“Š MODEL ANALYSIS:
66
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
67
- Model: Ateeqq/ai-vs-human-image-detector
68
- β”œβ”€ Detection Type: SigLIP + Semantic Analysis
69
- β”œβ”€ Vote: {"πŸ€– AI-Generated" if ai_prob > real_prob else "βœ“ Real Photo"}
70
- β”œβ”€ AI Score: {ai_prob:.4f} | Real Score: {real_prob:.4f}
71
- β”œβ”€ Confidence: {confidence:.4f}
72
- β”œβ”€ Proven Accuracy: 88-94% on diffusion models
73
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
74
- βœ… WHAT IT DETECTS:
75
- βœ“ DALL-E 3, Midjourney v6+, Stable Diffusion
76
- βœ“ Flux, GANs, Hybrid AI generators
77
- βœ“ Post-processed AI images
78
- ⚠️ LIMITATIONS:
79
- βœ— May struggle with heavily edited/compressed images
80
- βœ— Hybrid real+AI images are challenging
81
- β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
82
  """
83
- return final_pred, round(weighted_ai_score, 4), report
84
 
85
  except Exception as e:
86
  return f"❌ Error: {str(e)}", 0.0, f"Processing failed: {str(e)}"
87
 
88
- # Gradio Interface
89
  demo = gr.Interface(
90
  fn=predict,
91
  inputs=gr.Image(type="pil", label="πŸ“Έ Upload Image"),
92
  outputs=[
93
  gr.Textbox(label="🎯 Detection Result"),
94
  gr.Number(label="πŸ“Š AI Score (0.0-1.0)"),
95
- gr.Textbox(label="πŸ“‹ Research-Based Analysis", lines=25)
96
  ],
97
- title="πŸ” Advanced AI Image Detector 2025 - Ateeqq Only",
98
- description="Detection using the proven Ateeqq model (88-94% accuracy on modern AI generators)."
99
  )
100
 
101
  if __name__ == "__main__":
 
4
  import torch
5
  import torch.nn.functional as F
6
 
7
+ # ===================== Startup Info =====================
8
  print("\n" + "="*80)
9
  print("πŸ” BEST FREE AI IMAGE DETECTOR 2025 - ATEEQQ MODEL ONLY")
10
  print("="*80)
 
13
  print("βœ“ CNN + Semantic Analysis approach")
14
  print("="*80 + "\n")
15
 
16
+ # ===================== Load Model =====================
17
  MODEL_NAME = "Ateeqq/ai-vs-human-image-detector"
 
 
18
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19
  print(f"πŸ–₯️ Device: {str(device).upper()}\n")
20
 
 
21
  try:
22
  processor = AutoImageProcessor.from_pretrained(MODEL_NAME)
23
  model = AutoModelForImageClassification.from_pretrained(MODEL_NAME).to(device)
24
  model.eval()
25
  print(f"βœ… Successfully loaded model: {MODEL_NAME}")
26
  except Exception as e:
27
+ raise RuntimeError(f"❌ Failed to load model: {str(e)}")
28
 
29
+ # ===================== Prediction Function =====================
30
+ def predict(image: Image.Image):
31
  if image is None:
32
  return "❌ No image uploaded", 0.0, "Upload an image to analyze"
33
 
34
+ if image.mode != "RGB":
35
+ image = image.convert("RGB")
 
36
 
37
+ try:
38
  inputs = processor(images=image, return_tensors="pt").to(device)
39
  with torch.no_grad():
40
  outputs = model(**inputs)
41
  logits = outputs.logits
42
  probs = F.softmax(logits, dim=1)[0].cpu().numpy()
43
 
44
+ real_prob, ai_prob = float(probs[0]), float(probs[1])
45
+ pred = "🚨 AI-GENERATED" if ai_prob > real_prob else "βœ… REAL PHOTO"
46
+ confidence = max(ai_prob, real_prob)
47
 
48
+ # Build simple report
 
 
 
 
 
 
49
  report = f"""
50
+ ╔════════════════════════════════════════════════════════╗
51
+ β•‘ πŸ”¬ Ateeqq AI Image Detection Report β•‘
52
+ β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
53
+
54
+ 🎯 PREDICTION: {pred}
55
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
56
+ AI Probability: {ai_prob:.4f}
57
+ Real Probability: {real_prob:.4f}
58
+ Detection Confidence: {confidence:.4f}
59
 
60
+ βœ… Detected by: Ateeqq/ai-vs-human-image-detector
61
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
62
+ β€’ High accuracy on DALL-E 3, Midjourney v6+, Stable Diffusion
63
+ β€’ CNN + Semantic Analysis approach
64
+ β€’ Robust for post-processed AI images
65
+ β€’ Free to use for research or analysis
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  """
67
+ return pred, round(ai_prob, 4), report
68
 
69
  except Exception as e:
70
  return f"❌ Error: {str(e)}", 0.0, f"Processing failed: {str(e)}"
71
 
72
+ # ===================== Gradio Interface =====================
73
  demo = gr.Interface(
74
  fn=predict,
75
  inputs=gr.Image(type="pil", label="πŸ“Έ Upload Image"),
76
  outputs=[
77
  gr.Textbox(label="🎯 Detection Result"),
78
  gr.Number(label="πŸ“Š AI Score (0.0-1.0)"),
79
+ gr.Textbox(label="πŸ“‹ Detection Report", lines=25)
80
  ],
81
+ title="πŸ” Ateeqq AI Image Detector (2025)",
82
+ description="Detect AI-generated images using the official Ateeqq model from Hugging Face. Works best for DALL-E 3, Midjourney v6+, Stable Diffusion."
83
  )
84
 
85
  if __name__ == "__main__":