Spaces:
Runtime error
Runtime error
mahan_ym
commited on
Commit
·
cc8f276
1
Parent(s):
b183ae9
Refactor Makefile and update app.py and tools.py for improved functionality and clarity
Browse files- Makefile +8 -1
- src/app.py +37 -28
- src/tools.py +14 -10
- src/utils.py +0 -54
Makefile
CHANGED
|
@@ -1,9 +1,16 @@
|
|
| 1 |
.PHONY: test deploy_modal run dev
|
| 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
deploy_modal:
|
| 4 |
modal deploy src/modal_app.py
|
| 5 |
|
| 6 |
-
|
| 7 |
uv run test/test_modal.py
|
| 8 |
|
| 9 |
run:
|
|
|
|
| 1 |
.PHONY: test deploy_modal run dev
|
| 2 |
|
| 3 |
+
install:
|
| 4 |
+
pip install uv && \
|
| 5 |
+
uv venv && \
|
| 6 |
+
source .venv/bin/activate && \
|
| 7 |
+
uv sync && \
|
| 8 |
+
modal setup
|
| 9 |
+
|
| 10 |
deploy_modal:
|
| 11 |
modal deploy src/modal_app.py
|
| 12 |
|
| 13 |
+
test_modal:
|
| 14 |
uv run test/test_modal.py
|
| 15 |
|
| 16 |
run:
|
src/app.py
CHANGED
|
@@ -1,64 +1,68 @@
|
|
| 1 |
-
|
| 2 |
|
| 3 |
-
|
| 4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
-
|
| 7 |
-
pass
|
| 8 |
|
|
|
|
| 9 |
|
| 10 |
-
title = """
|
| 11 |
-
<
|
| 12 |
-
|
| 13 |
-
<h1>Image Alfred</h1>
|
| 14 |
-
<p>Recolor and Privacy Preserving Image Tools</p>
|
| 15 |
-
</div>
|
| 16 |
-
""" # noqa: E501
|
| 17 |
|
| 18 |
hsv_df_input = gr.Dataframe(
|
| 19 |
headers=["Object", "Hue", "Saturation Scale"],
|
| 20 |
datatype=["str", "number", "number"],
|
| 21 |
-
label="
|
| 22 |
type="array",
|
| 23 |
)
|
| 24 |
|
| 25 |
lab_df_input = gr.Dataframe(
|
| 26 |
headers=["Object", "New A", "New B"],
|
| 27 |
datatype=["str", "number", "number"],
|
| 28 |
-
label="
|
| 29 |
type="array",
|
| 30 |
)
|
| 31 |
|
| 32 |
change_color_objects_hsv_tool = gr.Interface(
|
| 33 |
fn=change_color_objects_hsv,
|
| 34 |
inputs=[
|
| 35 |
-
hsv_df_input,
|
| 36 |
gr.Image(label="Input Image", type="pil"),
|
|
|
|
| 37 |
],
|
| 38 |
-
outputs=gr.Image(),
|
| 39 |
-
title="Image Recolor
|
| 40 |
description="This tool allows you to recolor objects in an image using the HSV color space. You can specify the hue and saturation scale for each object.", # noqa: E501
|
| 41 |
)
|
| 42 |
|
| 43 |
change_color_objects_lab_tool = gr.Interface(
|
| 44 |
fn=change_color_objects_lab,
|
| 45 |
inputs=[
|
| 46 |
-
lab_df_input,
|
| 47 |
gr.Image(label="Input Image", type="pil"),
|
|
|
|
| 48 |
],
|
| 49 |
-
outputs=gr.Image(),
|
| 50 |
-
title="Image Recolor
|
| 51 |
-
description="Recolor an image based on user input using the LAB color space. You can specify the
|
| 52 |
)
|
| 53 |
|
| 54 |
privacy_preserve_tool = gr.Interface(
|
| 55 |
fn=privacy_preserve_image,
|
| 56 |
inputs=[
|
| 57 |
-
gr.Textbox("user_input"),
|
| 58 |
gr.Image(label="Input Image", type="pil"),
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
],
|
| 60 |
-
outputs=gr.Image(),
|
| 61 |
-
title="Privacy
|
| 62 |
description="Upload an image and provide a prompt for the object to enforce privacy. The tool will use blurring to obscure the specified objects in the image.", # noqa: E501
|
| 63 |
)
|
| 64 |
|
|
@@ -69,14 +73,19 @@ demo = gr.TabbedInterface(
|
|
| 69 |
privacy_preserve_tool,
|
| 70 |
],
|
| 71 |
["Change Color Objects HSV", "Change Color Objects LAB", "Privacy Preserving Tool"],
|
| 72 |
-
title=
|
| 73 |
theme=gr.themes.Default(
|
| 74 |
primary_hue="blue",
|
| 75 |
-
secondary_hue="
|
| 76 |
-
font="Inter",
|
| 77 |
-
font_mono="Courier New",
|
| 78 |
),
|
|
|
|
| 79 |
)
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
if __name__ == "__main__":
|
| 82 |
-
demo.launch(mcp_server=True)
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
|
| 3 |
+
import gradio as gr
|
| 4 |
|
| 5 |
+
from tools import (
|
| 6 |
+
change_color_objects_hsv,
|
| 7 |
+
change_color_objects_lab,
|
| 8 |
+
privacy_preserve_image,
|
| 9 |
+
)
|
| 10 |
|
| 11 |
+
gr.set_static_paths(paths=[Path.cwd().absolute() / "assets"])
|
|
|
|
| 12 |
|
| 13 |
+
icon = """<link rel="icon" type="image/x-icon" href="https://raw.githubusercontent.com/mahan-ym/ImageAlfred/main/src/assets/icons/ImageAlfredIcon.png">"""
|
| 14 |
|
| 15 |
+
title = """Image Alfred - Recolor and Privacy Preserving Image Tools
|
| 16 |
+
<img src="https://raw.githubusercontent.com/mahan-ym/ImageAlfred/main/src/assets/icons/ImageAlfredIcon.png" alt="Image Alfred Logo" style="width: 120px; height: auto; margin: 0 auto;">
|
| 17 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
hsv_df_input = gr.Dataframe(
|
| 20 |
headers=["Object", "Hue", "Saturation Scale"],
|
| 21 |
datatype=["str", "number", "number"],
|
| 22 |
+
label="Target Objects and New Settings",
|
| 23 |
type="array",
|
| 24 |
)
|
| 25 |
|
| 26 |
lab_df_input = gr.Dataframe(
|
| 27 |
headers=["Object", "New A", "New B"],
|
| 28 |
datatype=["str", "number", "number"],
|
| 29 |
+
label="Target Objects and New Settings",
|
| 30 |
type="array",
|
| 31 |
)
|
| 32 |
|
| 33 |
change_color_objects_hsv_tool = gr.Interface(
|
| 34 |
fn=change_color_objects_hsv,
|
| 35 |
inputs=[
|
|
|
|
| 36 |
gr.Image(label="Input Image", type="pil"),
|
| 37 |
+
hsv_df_input,
|
| 38 |
],
|
| 39 |
+
outputs=gr.Image(label="Output Image"),
|
| 40 |
+
title="Image Recolor Tool (HSV)",
|
| 41 |
description="This tool allows you to recolor objects in an image using the HSV color space. You can specify the hue and saturation scale for each object.", # noqa: E501
|
| 42 |
)
|
| 43 |
|
| 44 |
change_color_objects_lab_tool = gr.Interface(
|
| 45 |
fn=change_color_objects_lab,
|
| 46 |
inputs=[
|
|
|
|
| 47 |
gr.Image(label="Input Image", type="pil"),
|
| 48 |
+
lab_df_input,
|
| 49 |
],
|
| 50 |
+
outputs=gr.Image(label="Output Image"),
|
| 51 |
+
title="Image Recolor Tool (LAB)",
|
| 52 |
+
description="Recolor an image based on user input using the LAB color space. You can specify the new A and new B values for each object.", # noqa: E501
|
| 53 |
)
|
| 54 |
|
| 55 |
privacy_preserve_tool = gr.Interface(
|
| 56 |
fn=privacy_preserve_image,
|
| 57 |
inputs=[
|
|
|
|
| 58 |
gr.Image(label="Input Image", type="pil"),
|
| 59 |
+
gr.Textbox(
|
| 60 |
+
label="Objects to Mask (dot-separated)",
|
| 61 |
+
placeholder="e.g., person. car. license plate",
|
| 62 |
+
), # noqa: E501
|
| 63 |
],
|
| 64 |
+
outputs=gr.Image(label="Output Image"),
|
| 65 |
+
title="Privacy Preserving Tool",
|
| 66 |
description="Upload an image and provide a prompt for the object to enforce privacy. The tool will use blurring to obscure the specified objects in the image.", # noqa: E501
|
| 67 |
)
|
| 68 |
|
|
|
|
| 73 |
privacy_preserve_tool,
|
| 74 |
],
|
| 75 |
["Change Color Objects HSV", "Change Color Objects LAB", "Privacy Preserving Tool"],
|
| 76 |
+
title=title,
|
| 77 |
theme=gr.themes.Default(
|
| 78 |
primary_hue="blue",
|
| 79 |
+
secondary_hue="green",
|
| 80 |
+
# font="Inter",
|
| 81 |
+
# font_mono="Courier New",
|
| 82 |
),
|
| 83 |
+
head=icon,
|
| 84 |
)
|
| 85 |
|
| 86 |
+
# with gr.Blocks(title="Image Alfred", head=test) as demo:
|
| 87 |
+
# gr.HTML(header)
|
| 88 |
+
# tabs_interface.render()
|
| 89 |
+
|
| 90 |
if __name__ == "__main__":
|
| 91 |
+
demo.launch(mcp_server=True, max_file_size="5mb")
|
src/tools.py
CHANGED
|
@@ -9,13 +9,16 @@ from utils import upload_image_to_tmpfiles
|
|
| 9 |
modal_app_name = "ImageAlfred"
|
| 10 |
|
| 11 |
|
| 12 |
-
def
|
|
|
|
|
|
|
|
|
|
| 13 |
"""
|
| 14 |
Obscure specified objects in the input image based on the input prompt.
|
| 15 |
|
| 16 |
Args:
|
|
|
|
| 17 |
input_prompt (list): List of [object:str].
|
| 18 |
-
input_img (bytes): Input image in bytes format.
|
| 19 |
|
| 20 |
Returns:
|
| 21 |
bytes: Binary image data of the modified image.
|
|
@@ -28,8 +31,8 @@ def preserve_privacy(input_prompt, input_img):
|
|
| 28 |
|
| 29 |
|
| 30 |
def change_color_objects_hsv(
|
| 31 |
-
user_input,
|
| 32 |
input_img,
|
|
|
|
| 33 |
) -> np.ndarray | Image.Image | str | Path | None:
|
| 34 |
"""Changes the hue and saturation of specified objects in an image.
|
| 35 |
|
|
@@ -50,8 +53,6 @@ def change_color_objects_hsv(
|
|
| 50 |
ValueError: If user_input format is invalid, hue values are outside [0, 179] range, saturation_scale is not positive, or image format is invalid or corrupted.
|
| 51 |
TypeError: If input_img is not a supported type or modal function returns unexpected type.
|
| 52 |
""" # noqa: E501
|
| 53 |
-
print("Received input image type:", type(input_img))
|
| 54 |
-
# source, input_img = validate_image_input(input_img)
|
| 55 |
print("before processing input:", user_input)
|
| 56 |
|
| 57 |
for item in user_input:
|
|
@@ -81,12 +82,15 @@ def change_color_objects_hsv(
|
|
| 81 |
raise TypeError(
|
| 82 |
f"Expected Image.Image from modal remote function, got {type(output_pil)}"
|
| 83 |
)
|
| 84 |
-
img_link = upload_image_to_tmpfiles(output_pil)
|
| 85 |
|
| 86 |
return output_pil
|
| 87 |
|
| 88 |
|
| 89 |
-
def change_color_objects_lab(
|
|
|
|
|
|
|
|
|
|
| 90 |
"""Changes the color of specified objects in an image using LAB color space.
|
| 91 |
|
| 92 |
Segments objects based on text prompts and alters their color in the LAB
|
|
@@ -106,7 +110,6 @@ def change_color_objects_lab(user_input, input_img):
|
|
| 106 |
ValueError: If user_input format is invalid, a/b values are outside [0, 255] range, or image format is invalid or corrupted.
|
| 107 |
TypeError: If input_img is not a supported type or modal function returns unexpected type.
|
| 108 |
""" # noqa: E501
|
| 109 |
-
print("Received input image type:", type(input_img))
|
| 110 |
print("before processing input:", user_input)
|
| 111 |
for item in user_input:
|
| 112 |
if len(item) != 3:
|
|
@@ -133,12 +136,13 @@ def change_color_objects_lab(user_input, input_img):
|
|
| 133 |
raise TypeError(
|
| 134 |
f"Expected Image.Image from modal remote function, got {type(output_pil)}"
|
| 135 |
)
|
| 136 |
-
img_link = upload_image_to_tmpfiles(output_pil)
|
| 137 |
|
| 138 |
return output_pil
|
| 139 |
|
| 140 |
|
| 141 |
if __name__ == "__main__":
|
|
|
|
| 142 |
change_color_objects_hsv(
|
| 143 |
-
user_input=[["hair", 30, 1.2], ["shirt", 60, 1.0]], input_img=
|
| 144 |
)
|
|
|
|
| 9 |
modal_app_name = "ImageAlfred"
|
| 10 |
|
| 11 |
|
| 12 |
+
def privacy_preserve_image(
|
| 13 |
+
input_img,
|
| 14 |
+
input_prompt,
|
| 15 |
+
) -> np.ndarray | Image.Image | str | Path | None:
|
| 16 |
"""
|
| 17 |
Obscure specified objects in the input image based on the input prompt.
|
| 18 |
|
| 19 |
Args:
|
| 20 |
+
input_img (Image.Image): Input image in bytes format.
|
| 21 |
input_prompt (list): List of [object:str].
|
|
|
|
| 22 |
|
| 23 |
Returns:
|
| 24 |
bytes: Binary image data of the modified image.
|
|
|
|
| 31 |
|
| 32 |
|
| 33 |
def change_color_objects_hsv(
|
|
|
|
| 34 |
input_img,
|
| 35 |
+
user_input,
|
| 36 |
) -> np.ndarray | Image.Image | str | Path | None:
|
| 37 |
"""Changes the hue and saturation of specified objects in an image.
|
| 38 |
|
|
|
|
| 53 |
ValueError: If user_input format is invalid, hue values are outside [0, 179] range, saturation_scale is not positive, or image format is invalid or corrupted.
|
| 54 |
TypeError: If input_img is not a supported type or modal function returns unexpected type.
|
| 55 |
""" # noqa: E501
|
|
|
|
|
|
|
| 56 |
print("before processing input:", user_input)
|
| 57 |
|
| 58 |
for item in user_input:
|
|
|
|
| 82 |
raise TypeError(
|
| 83 |
f"Expected Image.Image from modal remote function, got {type(output_pil)}"
|
| 84 |
)
|
| 85 |
+
# img_link = upload_image_to_tmpfiles(output_pil)
|
| 86 |
|
| 87 |
return output_pil
|
| 88 |
|
| 89 |
|
| 90 |
+
def change_color_objects_lab(
|
| 91 |
+
input_img,
|
| 92 |
+
user_input,
|
| 93 |
+
) -> np.ndarray | Image.Image | str | Path | None:
|
| 94 |
"""Changes the color of specified objects in an image using LAB color space.
|
| 95 |
|
| 96 |
Segments objects based on text prompts and alters their color in the LAB
|
|
|
|
| 110 |
ValueError: If user_input format is invalid, a/b values are outside [0, 255] range, or image format is invalid or corrupted.
|
| 111 |
TypeError: If input_img is not a supported type or modal function returns unexpected type.
|
| 112 |
""" # noqa: E501
|
|
|
|
| 113 |
print("before processing input:", user_input)
|
| 114 |
for item in user_input:
|
| 115 |
if len(item) != 3:
|
|
|
|
| 136 |
raise TypeError(
|
| 137 |
f"Expected Image.Image from modal remote function, got {type(output_pil)}"
|
| 138 |
)
|
| 139 |
+
# img_link = upload_image_to_tmpfiles(output_pil)
|
| 140 |
|
| 141 |
return output_pil
|
| 142 |
|
| 143 |
|
| 144 |
if __name__ == "__main__":
|
| 145 |
+
image_pil = Image.open("./src/assets/test_image.jpg")
|
| 146 |
change_color_objects_hsv(
|
| 147 |
+
user_input=[["hair", 30, 1.2], ["shirt", 60, 1.0]], input_img=image_pil
|
| 148 |
)
|
src/utils.py
CHANGED
|
@@ -6,59 +6,6 @@ import requests
|
|
| 6 |
from PIL import Image
|
| 7 |
|
| 8 |
|
| 9 |
-
class ImageSource(Enum):
|
| 10 |
-
"""Enum representing different sources of image input"""
|
| 11 |
-
|
| 12 |
-
PIL = "PIL"
|
| 13 |
-
BASE64 = "base64"
|
| 14 |
-
URL = "url"
|
| 15 |
-
FILE = "file"
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
def validate_image_input(url_or_data):
|
| 19 |
-
"""Handle different image input formats for MCP"""
|
| 20 |
-
if isinstance(url_or_data, Image.Image):
|
| 21 |
-
print("Received input image type: PIL.Image")
|
| 22 |
-
return (ImageSource.PIL, url_or_data)
|
| 23 |
-
|
| 24 |
-
if isinstance(url_or_data, str):
|
| 25 |
-
if url_or_data.startswith("data:image"):
|
| 26 |
-
try:
|
| 27 |
-
# Handle base64 data URLs
|
| 28 |
-
print("Received input image type: base64 data")
|
| 29 |
-
header, encoded = url_or_data.split(",", 1)
|
| 30 |
-
decoded_bytes = base64.b64decode(encoded)
|
| 31 |
-
return (
|
| 32 |
-
ImageSource.BASE64,
|
| 33 |
-
Image.open(BytesIO(decoded_bytes)).convert("RGB"),
|
| 34 |
-
)
|
| 35 |
-
except Exception as e:
|
| 36 |
-
raise ValueError(f"Invalid base64 data URL: {e}")
|
| 37 |
-
elif url_or_data.startswith(("http://", "https://")):
|
| 38 |
-
# Handle URLs
|
| 39 |
-
try:
|
| 40 |
-
response = requests.get(url_or_data, timeout=30)
|
| 41 |
-
response.raise_for_status()
|
| 42 |
-
print("Received input image type: URL")
|
| 43 |
-
return (
|
| 44 |
-
ImageSource.URL,
|
| 45 |
-
Image.open(BytesIO(response.content)).convert("RGB"),
|
| 46 |
-
)
|
| 47 |
-
except requests.exceptions.RequestException as e:
|
| 48 |
-
raise ValueError(f"Could not download image from URL: {e}")
|
| 49 |
-
else:
|
| 50 |
-
# Handle file paths
|
| 51 |
-
try:
|
| 52 |
-
with open(url_or_data, "rb") as f:
|
| 53 |
-
return (ImageSource.FILE, Image.open(f).convert("RGB"))
|
| 54 |
-
except FileNotFoundError:
|
| 55 |
-
raise ValueError(f"File not found: {url_or_data}")
|
| 56 |
-
except Exception as e:
|
| 57 |
-
raise ValueError(f"Could not read file {url_or_data}: {e}")
|
| 58 |
-
|
| 59 |
-
raise ValueError(f"Unsupported image input format: {type(url_or_data)}")
|
| 60 |
-
|
| 61 |
-
|
| 62 |
def upload_image_to_tmpfiles(image):
|
| 63 |
"""
|
| 64 |
Upload an image to tmpfiles.org and return the URL.
|
|
@@ -75,7 +22,6 @@ def upload_image_to_tmpfiles(image):
|
|
| 75 |
raise ValueError(f"Upload failed: Status {response.status_code}")
|
| 76 |
|
| 77 |
data = response.json()
|
| 78 |
-
print(f"Response from tmpfiles.org: {data}")
|
| 79 |
if "data" in data and "url" in data["data"]:
|
| 80 |
url = data["data"]["url"]
|
| 81 |
if not url:
|
|
|
|
| 6 |
from PIL import Image
|
| 7 |
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
def upload_image_to_tmpfiles(image):
|
| 10 |
"""
|
| 11 |
Upload an image to tmpfiles.org and return the URL.
|
|
|
|
| 22 |
raise ValueError(f"Upload failed: Status {response.status_code}")
|
| 23 |
|
| 24 |
data = response.json()
|
|
|
|
| 25 |
if "data" in data and "url" in data["data"]:
|
| 26 |
url = data["data"]["url"]
|
| 27 |
if not url:
|