Unique00225 commited on
Commit
d2315a1
Β·
verified Β·
1 Parent(s): bd8352b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -0
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ from PIL import Image
3
+ import gradio as gr
4
+ import io
5
+
6
+ # Load pipeline on CPU
7
+ # device=-1 forces CPU usage
8
+ pipe = pipeline("image-to-text", model="dragonstar/image-text-captcha-v2", device=-1)
9
+
10
+ def run_ocr(image):
11
+ """
12
+ image: PIL Image (Gradio will give a PIL if type="pil")
13
+ Returns: detected text (string) or error message
14
+ """
15
+ if image is None:
16
+ return "No image provided"
17
+ try:
18
+ # pipeline usually expects PIL/np image
19
+ outputs = pipe(image)
20
+ # model outputs vary; try common keys
21
+ if isinstance(outputs, list) and len(outputs) > 0:
22
+ # Many vision->text models return [{'generated_text': '...'}]
23
+ first = outputs[0]
24
+ text = first.get("generated_text") or first.get("text") or str(first)
25
+ else:
26
+ text = str(outputs)
27
+ text = text.strip()
28
+ if not text:
29
+ return "No text extracted"
30
+ return text
31
+ except Exception as e:
32
+ return f"Error: {e}"
33
+
34
+ title = "Image β†’ Text (captcha) β€” dragonstar/image-text-captcha-v2"
35
+ desc = "Upload a captcha image. Model runs on CPU in this Space. Results may be slower than cloud inference."
36
+
37
+ demo = gr.Interface(
38
+ fn=run_ocr,
39
+ inputs=gr.Image(type="pil", label="Captcha image"),
40
+ outputs=gr.Textbox(label="Detected text"),
41
+ title=title,
42
+ description=desc,
43
+ allow_flagging="never"
44
+ )
45
+
46
+ if __name__ == "__main__":
47
+ demo.launch()