Spaces:
Running
on
Zero
Running
on
Zero
Haozhe
commited on
Commit
·
311548d
1
Parent(s):
b5465a3
display image
Browse files- .gitattributes +1 -0
- app.py +8 -3
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
example_images/1.jpg filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
|
@@ -250,8 +250,10 @@ def model_inference(input_dict, history):
|
|
| 250 |
print(raw_result)
|
| 251 |
proc_img = raw_result
|
| 252 |
all_images += [proc_img]
|
| 253 |
-
|
| 254 |
-
|
|
|
|
|
|
|
| 255 |
|
| 256 |
new_piece = dict(role='user', content=[
|
| 257 |
dict(type='text', text="\nHere is the cropped image (Image Size: {}x{}):".format(proc_img.size[0], proc_img.size[1])),
|
|
@@ -275,9 +277,12 @@ with gr.Blocks() as demo:
|
|
| 275 |
|
| 276 |
gr.HTML(html_header)
|
| 277 |
|
|
|
|
|
|
|
| 278 |
gr.ChatInterface(
|
| 279 |
fn=model_inference,
|
| 280 |
description="# **Pixel Reasoner**",
|
|
|
|
| 281 |
examples=examples,
|
| 282 |
fill_height=True,
|
| 283 |
textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
|
|
@@ -290,4 +295,4 @@ with gr.Blocks() as demo:
|
|
| 290 |
gr.Markdown(learn_more_markdown)
|
| 291 |
gr.Markdown(bibtext)
|
| 292 |
|
| 293 |
-
demo.launch(debug=True, share=
|
|
|
|
| 250 |
print(raw_result)
|
| 251 |
proc_img = raw_result
|
| 252 |
all_images += [proc_img]
|
| 253 |
+
proc_img.save("tmp.png")
|
| 254 |
+
display = [dict(text="", files=["tmp.png"])]
|
| 255 |
+
complete_assistant_response_for_gradio = complete_assistant_response_for_gradio + display
|
| 256 |
+
yield complete_assistant_response_for_gradio # Update Gradio display
|
| 257 |
|
| 258 |
new_piece = dict(role='user', content=[
|
| 259 |
dict(type='text', text="\nHere is the cropped image (Image Size: {}x{}):".format(proc_img.size[0], proc_img.size[1])),
|
|
|
|
| 277 |
|
| 278 |
gr.HTML(html_header)
|
| 279 |
|
| 280 |
+
# image_op_display = gr.Image(label="Visual Operation Result", type="pil", height=480, show_download_button=True, interactive=False)
|
| 281 |
+
|
| 282 |
gr.ChatInterface(
|
| 283 |
fn=model_inference,
|
| 284 |
description="# **Pixel Reasoner**",
|
| 285 |
+
chatbot=gr.Chatbot(label="Conversation", layout="bubble", bubble_full_width=False, show_copy_button=True, height=600),
|
| 286 |
examples=examples,
|
| 287 |
fill_height=True,
|
| 288 |
textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
|
|
|
|
| 295 |
gr.Markdown(learn_more_markdown)
|
| 296 |
gr.Markdown(bibtext)
|
| 297 |
|
| 298 |
+
demo.launch(debug=True, share=False)
|