aiimagedetector / app.py
karthikeya1212's picture
Update app.py
a4975c6 verified
import gradio as gr
from transformers import AutoImageProcessor, AutoModelForImageClassification
from PIL import Image
import torch
import torch.nn.functional as F
# ===================== Startup Info =====================
print("\n" + "="*80)
print("πŸ” BEST FREE AI IMAGE DETECTOR 2025 - ATEEQQ MODEL ONLY")
print("="*80)
print("\nBased on Ateeqq 2025 benchmarks:")
print("βœ“ Diffusion detection (Midjourney, DALL-E, Stable Diffusion): 88-94% accuracy")
print("βœ“ CNN + Semantic Analysis approach")
print("="*80 + "\n")
# ===================== Load Model =====================
MODEL_NAME = "Ateeqq/ai-vs-human-image-detector"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"πŸ–₯️ Device: {str(device).upper()}\n")
try:
processor = AutoImageProcessor.from_pretrained(MODEL_NAME)
model = AutoModelForImageClassification.from_pretrained(MODEL_NAME).to(device)
model.eval()
print(f"βœ… Successfully loaded model: {MODEL_NAME}")
except Exception as e:
raise RuntimeError(f"❌ Failed to load model: {str(e)}")
# ===================== Prediction Function =====================
def predict(image: Image.Image):
if image is None:
return "❌ No image uploaded", 0.0, "Upload an image to analyze"
if image.mode != "RGB":
image = image.convert("RGB")
try:
inputs = processor(images=image, return_tensors="pt").to(device)
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
probs = F.softmax(logits, dim=1)[0].cpu().numpy()
real_prob, ai_prob = float(probs[0]), float(probs[1])
pred = "🚨 AI-GENERATED" if ai_prob > real_prob else "βœ… REAL PHOTO"
confidence = max(ai_prob, real_prob)
# Build simple report
report = f"""
╔════════════════════════════════════════════════════════╗
β•‘ πŸ”¬ Ateeqq AI Image Detection Report β•‘
β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
🎯 PREDICTION: {pred}
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
AI Probability: {ai_prob:.4f}
Real Probability: {real_prob:.4f}
Detection Confidence: {confidence:.4f}
βœ… Detected by: Ateeqq/ai-vs-human-image-detector
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
β€’ High accuracy on DALL-E 3, Midjourney v6+, Stable Diffusion
β€’ CNN + Semantic Analysis approach
β€’ Robust for post-processed AI images
β€’ Free to use for research or analysis
"""
return pred, round(ai_prob, 4), report
except Exception as e:
return f"❌ Error: {str(e)}", 0.0, f"Processing failed: {str(e)}"
# ===================== Gradio Interface =====================
demo = gr.Interface(
fn=predict,
inputs=gr.Image(type="pil", label="πŸ“Έ Upload Image"),
outputs=[
gr.Textbox(label="🎯 Detection Result"),
gr.Number(label="πŸ“Š AI Score (0.0-1.0)"),
gr.Textbox(label="πŸ“‹ Detection Report", lines=25)
],
title="πŸ” Ateeqq AI Image Detector (2025)",
description="Detect AI-generated images using the official Ateeqq model from Hugging Face. Works best for DALL-E 3, Midjourney v6+, Stable Diffusion."
)
if __name__ == "__main__":
demo.launch()