Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,9 +7,9 @@ import numpy as np
|
|
| 7 |
|
| 8 |
# Multiple specialized models for ensemble detection
|
| 9 |
MODELS = [
|
| 10 |
-
"Ateeqq/ai-vs-human-image-detector", # SigLIP-based
|
| 11 |
-
"umm-maybe/AI-image-detector", # Vision Transformer
|
| 12 |
-
"
|
| 13 |
]
|
| 14 |
|
| 15 |
print("Loading models for ensemble detection...")
|
|
@@ -51,15 +51,31 @@ def predict(image):
|
|
| 51 |
# Get predictions from all models
|
| 52 |
for i, (processor, model) in enumerate(zip(processors_list, models_list)):
|
| 53 |
try:
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
all_real_probs.append(real_prob)
|
| 65 |
all_ai_probs.append(ai_prob)
|
|
|
|
| 7 |
|
| 8 |
# Multiple specialized models for ensemble detection
|
| 9 |
MODELS = [
|
| 10 |
+
"Ateeqq/ai-vs-human-image-detector", # SigLIP-based - Best for DALL-E 3, Midjourney
|
| 11 |
+
"umm-maybe/AI-image-detector", # Vision Transformer - Good for Stable Diffusion
|
| 12 |
+
"facebook/dinov2-small", # Meta's DINOv2 - Excellent feature detector
|
| 13 |
]
|
| 14 |
|
| 15 |
print("Loading models for ensemble detection...")
|
|
|
|
| 51 |
# Get predictions from all models
|
| 52 |
for i, (processor, model) in enumerate(zip(processors_list, models_list)):
|
| 53 |
try:
|
| 54 |
+
# Special handling for DINOv2 (feature extractor)
|
| 55 |
+
if i == 2: # DINOv2
|
| 56 |
+
from torchvision import transforms
|
| 57 |
+
transform = transforms.Compose([
|
| 58 |
+
transforms.Resize((224, 224)),
|
| 59 |
+
transforms.ToTensor(),
|
| 60 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
| 61 |
+
std=[0.229, 0.224, 0.225])
|
| 62 |
+
])
|
| 63 |
+
img_tensor = transform(image).unsqueeze(0).to(device)
|
| 64 |
+
with torch.no_grad():
|
| 65 |
+
features = model(img_tensor)
|
| 66 |
+
# Use feature statistics for detection
|
| 67 |
+
feature_mean = features.mean()
|
| 68 |
+
feature_std = features.std()
|
| 69 |
+
ai_prob = float((feature_std.cpu() / (feature_mean.cpu() + 1e-6)).clamp(0, 1))
|
| 70 |
+
real_prob = 1.0 - ai_prob
|
| 71 |
+
else:
|
| 72 |
+
inputs = processor(images=image, return_tensors="pt").to(device)
|
| 73 |
+
with torch.no_grad():
|
| 74 |
+
outputs = model(**inputs)
|
| 75 |
+
logits = outputs.logits
|
| 76 |
+
probs = F.softmax(logits, dim=1)[0].cpu().numpy()
|
| 77 |
+
real_prob = float(probs[0])
|
| 78 |
+
ai_prob = float(probs[1])
|
| 79 |
|
| 80 |
all_real_probs.append(real_prob)
|
| 81 |
all_ai_probs.append(ai_prob)
|