Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,9 +13,6 @@ if not os.path.exists('IndicTransToolkit'):
|
|
| 13 |
os.system('git clone https://github.com/VarunGumma/IndicTransToolkit')
|
| 14 |
os.system('cd IndicTransToolkit && python3 -m pip install --editable ./')
|
| 15 |
|
| 16 |
-
# Ensure that IndicTransToolkit is installed and used properly
|
| 17 |
-
from IndicTransToolkit import IndicProcessor
|
| 18 |
-
|
| 19 |
# Initialize BLIP for image captioning
|
| 20 |
blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 21 |
blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to("cuda" if torch.cuda.is_available() else "cpu")
|
|
@@ -112,9 +109,16 @@ if uploaded_image is not None:
|
|
| 112 |
st.write(f"Caption: {caption}")
|
| 113 |
|
| 114 |
# Select target languages for translation
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
target_languages = st.multiselect(
|
| 116 |
"Select target languages for translation",
|
| 117 |
-
|
| 118 |
["hin_Deva", "mar_Deva"]
|
| 119 |
)
|
| 120 |
|
|
@@ -123,21 +127,32 @@ if uploaded_image is not None:
|
|
| 123 |
st.write("Translating Caption...")
|
| 124 |
translations = translate_caption(caption, target_languages)
|
| 125 |
st.write("Translations:")
|
| 126 |
-
for lang
|
| 127 |
-
st.write(f"{lang}: {
|
| 128 |
|
| 129 |
-
#
|
|
|
|
|
|
|
|
|
|
| 130 |
for lang in target_languages:
|
| 131 |
-
st.write(f"
|
|
|
|
| 132 |
lang_code = {
|
| 133 |
"hin_Deva": "hi", # Hindi
|
|
|
|
| 134 |
"guj_Gujr": "gu", # Gujarati
|
| 135 |
"urd_Arab": "ur" # Urdu
|
| 136 |
}.get(lang, "en")
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 141 |
st.audio(audio_file)
|
| 142 |
else:
|
| 143 |
st.write("Upload an image to start.")
|
|
|
|
| 13 |
os.system('git clone https://github.com/VarunGumma/IndicTransToolkit')
|
| 14 |
os.system('cd IndicTransToolkit && python3 -m pip install --editable ./')
|
| 15 |
|
|
|
|
|
|
|
|
|
|
| 16 |
# Initialize BLIP for image captioning
|
| 17 |
blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 18 |
blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
| 109 |
st.write(f"Caption: {caption}")
|
| 110 |
|
| 111 |
# Select target languages for translation
|
| 112 |
+
language_options = {
|
| 113 |
+
"hin_Deva": "Hindi (Devanagari)",
|
| 114 |
+
"mar_Deva": "Marathi (Devanagari)",
|
| 115 |
+
"guj_Gujr": "Gujarati (Gujrati)",
|
| 116 |
+
"urd_Arab": "Urdu (Arabic)",
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
target_languages = st.multiselect(
|
| 120 |
"Select target languages for translation",
|
| 121 |
+
list(language_options.keys()),
|
| 122 |
["hin_Deva", "mar_Deva"]
|
| 123 |
)
|
| 124 |
|
|
|
|
| 127 |
st.write("Translating Caption...")
|
| 128 |
translations = translate_caption(caption, target_languages)
|
| 129 |
st.write("Translations:")
|
| 130 |
+
for lang in target_languages:
|
| 131 |
+
st.write(f"{language_options[lang]}: {translations[lang]}")
|
| 132 |
|
| 133 |
+
# Select audio generation method
|
| 134 |
+
audio_method = st.radio("Choose Audio Generation Method", ("gTTS (Default)", "Facebook MMS-TTS"))
|
| 135 |
+
|
| 136 |
+
# Generate audio for each target language
|
| 137 |
for lang in target_languages:
|
| 138 |
+
st.write(f"Generating audio for {language_options[lang]}...")
|
| 139 |
+
|
| 140 |
lang_code = {
|
| 141 |
"hin_Deva": "hi", # Hindi
|
| 142 |
+
"mar_Deva": "mr", # Marathi
|
| 143 |
"guj_Gujr": "gu", # Gujarati
|
| 144 |
"urd_Arab": "ur" # Urdu
|
| 145 |
}.get(lang, "en")
|
| 146 |
+
|
| 147 |
+
output_file = f"{lang}_audio.mp3"
|
| 148 |
+
|
| 149 |
+
if audio_method == "gTTS (Default)":
|
| 150 |
+
audio_file = generate_audio_gtts(translations[lang], lang_code, output_file)
|
| 151 |
+
else:
|
| 152 |
+
model_name = "your_facebook_mms_model_name" # Update this to the correct model name
|
| 153 |
+
audio_file = generate_audio_fbmms(translations[lang], model_name, output_file)
|
| 154 |
+
|
| 155 |
+
st.write(f"Playing {language_options[lang]} audio:")
|
| 156 |
st.audio(audio_file)
|
| 157 |
else:
|
| 158 |
st.write("Upload an image to start.")
|