Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,6 +4,7 @@ from PIL import Image
|
|
| 4 |
import torch
|
| 5 |
import torch.nn.functional as F
|
| 6 |
|
|
|
|
| 7 |
print("\n" + "="*80)
|
| 8 |
print("π BEST FREE AI IMAGE DETECTOR 2025 - ATEEQQ MODEL ONLY")
|
| 9 |
print("="*80)
|
|
@@ -12,90 +13,73 @@ print("β Diffusion detection (Midjourney, DALL-E, Stable Diffusion): 88-94% ac
|
|
| 12 |
print("β CNN + Semantic Analysis approach")
|
| 13 |
print("="*80 + "\n")
|
| 14 |
|
| 15 |
-
#
|
| 16 |
MODEL_NAME = "Ateeqq/ai-vs-human-image-detector"
|
| 17 |
-
MODEL_WEIGHT = 1.0 # 100% weight
|
| 18 |
-
|
| 19 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 20 |
print(f"π₯οΈ Device: {str(device).upper()}\n")
|
| 21 |
|
| 22 |
-
# Load Ateeqq model
|
| 23 |
try:
|
| 24 |
processor = AutoImageProcessor.from_pretrained(MODEL_NAME)
|
| 25 |
model = AutoModelForImageClassification.from_pretrained(MODEL_NAME).to(device)
|
| 26 |
model.eval()
|
| 27 |
print(f"β
Successfully loaded model: {MODEL_NAME}")
|
| 28 |
except Exception as e:
|
| 29 |
-
raise RuntimeError(f"β Failed to load model
|
| 30 |
|
| 31 |
-
|
|
|
|
| 32 |
if image is None:
|
| 33 |
return "β No image uploaded", 0.0, "Upload an image to analyze"
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
image = image.convert('RGB')
|
| 38 |
|
|
|
|
| 39 |
inputs = processor(images=image, return_tensors="pt").to(device)
|
| 40 |
with torch.no_grad():
|
| 41 |
outputs = model(**inputs)
|
| 42 |
logits = outputs.logits
|
| 43 |
probs = F.softmax(logits, dim=1)[0].cpu().numpy()
|
| 44 |
|
| 45 |
-
real_prob = float(probs[0])
|
| 46 |
-
|
|
|
|
| 47 |
|
| 48 |
-
|
| 49 |
-
threshold = 0.50
|
| 50 |
-
is_ai = weighted_ai_score > threshold
|
| 51 |
-
final_pred = "π¨ AI-GENERATED" if is_ai else "β
REAL PHOTO"
|
| 52 |
-
confidence = max(weighted_ai_score, 1 - weighted_ai_score)
|
| 53 |
-
|
| 54 |
-
# Build report
|
| 55 |
report = f"""
|
| 56 |
-
|
| 57 |
-
β
|
| 58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 67 |
-
Model: Ateeqq/ai-vs-human-image-detector
|
| 68 |
-
ββ Detection Type: SigLIP + Semantic Analysis
|
| 69 |
-
ββ Vote: {"π€ AI-Generated" if ai_prob > real_prob else "β Real Photo"}
|
| 70 |
-
ββ AI Score: {ai_prob:.4f} | Real Score: {real_prob:.4f}
|
| 71 |
-
ββ Confidence: {confidence:.4f}
|
| 72 |
-
ββ Proven Accuracy: 88-94% on diffusion models
|
| 73 |
-
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 74 |
-
β
WHAT IT DETECTS:
|
| 75 |
-
β DALL-E 3, Midjourney v6+, Stable Diffusion
|
| 76 |
-
β Flux, GANs, Hybrid AI generators
|
| 77 |
-
β Post-processed AI images
|
| 78 |
-
β οΈ LIMITATIONS:
|
| 79 |
-
β May struggle with heavily edited/compressed images
|
| 80 |
-
β Hybrid real+AI images are challenging
|
| 81 |
-
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 82 |
"""
|
| 83 |
-
return
|
| 84 |
|
| 85 |
except Exception as e:
|
| 86 |
return f"β Error: {str(e)}", 0.0, f"Processing failed: {str(e)}"
|
| 87 |
|
| 88 |
-
# Gradio Interface
|
| 89 |
demo = gr.Interface(
|
| 90 |
fn=predict,
|
| 91 |
inputs=gr.Image(type="pil", label="πΈ Upload Image"),
|
| 92 |
outputs=[
|
| 93 |
gr.Textbox(label="π― Detection Result"),
|
| 94 |
gr.Number(label="π AI Score (0.0-1.0)"),
|
| 95 |
-
gr.Textbox(label="π
|
| 96 |
],
|
| 97 |
-
title="π
|
| 98 |
-
description="
|
| 99 |
)
|
| 100 |
|
| 101 |
if __name__ == "__main__":
|
|
|
|
| 4 |
import torch
|
| 5 |
import torch.nn.functional as F
|
| 6 |
|
| 7 |
+
# ===================== Startup Info =====================
|
| 8 |
print("\n" + "="*80)
|
| 9 |
print("π BEST FREE AI IMAGE DETECTOR 2025 - ATEEQQ MODEL ONLY")
|
| 10 |
print("="*80)
|
|
|
|
| 13 |
print("β CNN + Semantic Analysis approach")
|
| 14 |
print("="*80 + "\n")
|
| 15 |
|
| 16 |
+
# ===================== Load Model =====================
|
| 17 |
MODEL_NAME = "Ateeqq/ai-vs-human-image-detector"
|
|
|
|
|
|
|
| 18 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 19 |
print(f"π₯οΈ Device: {str(device).upper()}\n")
|
| 20 |
|
|
|
|
| 21 |
try:
|
| 22 |
processor = AutoImageProcessor.from_pretrained(MODEL_NAME)
|
| 23 |
model = AutoModelForImageClassification.from_pretrained(MODEL_NAME).to(device)
|
| 24 |
model.eval()
|
| 25 |
print(f"β
Successfully loaded model: {MODEL_NAME}")
|
| 26 |
except Exception as e:
|
| 27 |
+
raise RuntimeError(f"β Failed to load model: {str(e)}")
|
| 28 |
|
| 29 |
+
# ===================== Prediction Function =====================
|
| 30 |
+
def predict(image: Image.Image):
|
| 31 |
if image is None:
|
| 32 |
return "β No image uploaded", 0.0, "Upload an image to analyze"
|
| 33 |
|
| 34 |
+
if image.mode != "RGB":
|
| 35 |
+
image = image.convert("RGB")
|
|
|
|
| 36 |
|
| 37 |
+
try:
|
| 38 |
inputs = processor(images=image, return_tensors="pt").to(device)
|
| 39 |
with torch.no_grad():
|
| 40 |
outputs = model(**inputs)
|
| 41 |
logits = outputs.logits
|
| 42 |
probs = F.softmax(logits, dim=1)[0].cpu().numpy()
|
| 43 |
|
| 44 |
+
real_prob, ai_prob = float(probs[0]), float(probs[1])
|
| 45 |
+
pred = "π¨ AI-GENERATED" if ai_prob > real_prob else "β
REAL PHOTO"
|
| 46 |
+
confidence = max(ai_prob, real_prob)
|
| 47 |
|
| 48 |
+
# Build simple report
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
report = f"""
|
| 50 |
+
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 51 |
+
β π¬ Ateeqq AI Image Detection Report β
|
| 52 |
+
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 53 |
+
|
| 54 |
+
π― PREDICTION: {pred}
|
| 55 |
+
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 56 |
+
AI Probability: {ai_prob:.4f}
|
| 57 |
+
Real Probability: {real_prob:.4f}
|
| 58 |
+
Detection Confidence: {confidence:.4f}
|
| 59 |
|
| 60 |
+
β
Detected by: Ateeqq/ai-vs-human-image-detector
|
| 61 |
+
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 62 |
+
β’ High accuracy on DALL-E 3, Midjourney v6+, Stable Diffusion
|
| 63 |
+
β’ CNN + Semantic Analysis approach
|
| 64 |
+
β’ Robust for post-processed AI images
|
| 65 |
+
β’ Free to use for research or analysis
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
"""
|
| 67 |
+
return pred, round(ai_prob, 4), report
|
| 68 |
|
| 69 |
except Exception as e:
|
| 70 |
return f"β Error: {str(e)}", 0.0, f"Processing failed: {str(e)}"
|
| 71 |
|
| 72 |
+
# ===================== Gradio Interface =====================
|
| 73 |
demo = gr.Interface(
|
| 74 |
fn=predict,
|
| 75 |
inputs=gr.Image(type="pil", label="πΈ Upload Image"),
|
| 76 |
outputs=[
|
| 77 |
gr.Textbox(label="π― Detection Result"),
|
| 78 |
gr.Number(label="π AI Score (0.0-1.0)"),
|
| 79 |
+
gr.Textbox(label="π Detection Report", lines=25)
|
| 80 |
],
|
| 81 |
+
title="π Ateeqq AI Image Detector (2025)",
|
| 82 |
+
description="Detect AI-generated images using the official Ateeqq model from Hugging Face. Works best for DALL-E 3, Midjourney v6+, Stable Diffusion."
|
| 83 |
)
|
| 84 |
|
| 85 |
if __name__ == "__main__":
|