import gradio as gr from transformers import AutoImageProcessor, AutoModelForImageClassification from PIL import Image import torch import torch.nn.functional as F # ===================== Startup Info ===================== print("\n" + "="*80) print("🔍 BEST FREE AI IMAGE DETECTOR 2025 - ATEEQQ MODEL ONLY") print("="*80) print("\nBased on Ateeqq 2025 benchmarks:") print("✓ Diffusion detection (Midjourney, DALL-E, Stable Diffusion): 88-94% accuracy") print("✓ CNN + Semantic Analysis approach") print("="*80 + "\n") # ===================== Load Model ===================== MODEL_NAME = "Ateeqq/ai-vs-human-image-detector" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(f"🖥️ Device: {str(device).upper()}\n") try: processor = AutoImageProcessor.from_pretrained(MODEL_NAME) model = AutoModelForImageClassification.from_pretrained(MODEL_NAME).to(device) model.eval() print(f"✅ Successfully loaded model: {MODEL_NAME}") except Exception as e: raise RuntimeError(f"❌ Failed to load model: {str(e)}") # ===================== Prediction Function ===================== def predict(image: Image.Image): if image is None: return "❌ No image uploaded", 0.0, "Upload an image to analyze" if image.mode != "RGB": image = image.convert("RGB") try: inputs = processor(images=image, return_tensors="pt").to(device) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits probs = F.softmax(logits, dim=1)[0].cpu().numpy() real_prob, ai_prob = float(probs[0]), float(probs[1]) pred = "🚨 AI-GENERATED" if ai_prob > real_prob else "✅ REAL PHOTO" confidence = max(ai_prob, real_prob) # Build simple report report = f""" ╔════════════════════════════════════════════════════════╗ ║ 🔬 Ateeqq AI Image Detection Report ║ ╚════════════════════════════════════════════════════════╝ 🎯 PREDICTION: {pred} ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ AI Probability: {ai_prob:.4f} Real Probability: {real_prob:.4f} Detection Confidence: {confidence:.4f} ✅ Detected by: Ateeqq/ai-vs-human-image-detector ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ • High accuracy on DALL-E 3, Midjourney v6+, Stable Diffusion • CNN + Semantic Analysis approach • Robust for post-processed AI images • Free to use for research or analysis """ return pred, round(ai_prob, 4), report except Exception as e: return f"❌ Error: {str(e)}", 0.0, f"Processing failed: {str(e)}" # ===================== Gradio Interface ===================== demo = gr.Interface( fn=predict, inputs=gr.Image(type="pil", label="📸 Upload Image"), outputs=[ gr.Textbox(label="🎯 Detection Result"), gr.Number(label="📊 AI Score (0.0-1.0)"), gr.Textbox(label="📋 Detection Report", lines=25) ], title="🔍 Ateeqq AI Image Detector (2025)", description="Detect AI-generated images using the official Ateeqq model from Hugging Face. Works best for DALL-E 3, Midjourney v6+, Stable Diffusion." ) if __name__ == "__main__": demo.launch()