Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,8 +9,7 @@ import gradio as gr
|
|
| 9 |
import torch
|
| 10 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
| 11 |
from keybert import KeyBERT
|
| 12 |
-
|
| 13 |
-
from moviepy.editor import VideoFileClip, concatenate_videoclips, AudioFileClip, CompositeAudioClip, concatenate_audioclips, AudioClip
|
| 14 |
import re
|
| 15 |
import math
|
| 16 |
import shutil
|
|
@@ -32,7 +31,6 @@ logger.info("INICIO DE EJECUCIÓN - GENERADOR DE VIDEOS")
|
|
| 32 |
logger.info("="*80)
|
| 33 |
|
| 34 |
# Diccionario de voces TTS disponibles organizadas por idioma
|
| 35 |
-
# Puedes expandir esta lista si conoces otros IDs de voz de Edge TTS
|
| 36 |
VOCES_DISPONIBLES = {
|
| 37 |
"Español (España)": {
|
| 38 |
"es-ES-JuanNeural": "Juan (España) - Masculino",
|
|
@@ -100,37 +98,26 @@ def get_voice_choices():
|
|
| 100 |
choices = []
|
| 101 |
for region, voices in VOCES_DISPONIBLES.items():
|
| 102 |
for voice_id, voice_name in voices.items():
|
| 103 |
-
# Formato: (Texto a mostrar en el dropdown, Valor que se pasa)
|
| 104 |
choices.append((f"{voice_name} ({region})", voice_id))
|
| 105 |
return choices
|
| 106 |
|
| 107 |
# Obtener las voces al inicio del script
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
# AVAILABLE_VOICES = asyncio.run(get_available_voices())
|
| 111 |
-
AVAILABLE_VOICES = get_voice_choices() # <-- Usamos la lista predefinida y aplanada
|
| 112 |
-
# Establecer una voz por defecto inicial
|
| 113 |
-
DEFAULT_VOICE_ID = "es-ES-JuanNeural" # ID de Juan
|
| 114 |
-
|
| 115 |
-
# Buscar el nombre amigable para la voz por defecto si existe
|
| 116 |
DEFAULT_VOICE_NAME = DEFAULT_VOICE_ID
|
| 117 |
for text, voice_id in AVAILABLE_VOICES:
|
| 118 |
if voice_id == DEFAULT_VOICE_ID:
|
| 119 |
DEFAULT_VOICE_NAME = text
|
| 120 |
break
|
| 121 |
-
# Si Juan no está en la lista (ej. lista de fallback), usar la primera voz disponible
|
| 122 |
if DEFAULT_VOICE_ID not in [v[1] for v in AVAILABLE_VOICES]:
|
| 123 |
DEFAULT_VOICE_ID = AVAILABLE_VOICES[0][1] if AVAILABLE_VOICES else "en-US-AriaNeural"
|
| 124 |
-
DEFAULT_VOICE_NAME = AVAILABLE_VOICES[0][0] if AVAILABLE_VOICES else "Aria (United States) - Female"
|
| 125 |
-
|
| 126 |
logger.info(f"Voz por defecto seleccionada (ID): {DEFAULT_VOICE_ID}")
|
| 127 |
|
| 128 |
-
|
| 129 |
# Clave API de Pexels
|
| 130 |
PEXELS_API_KEY = os.environ.get("PEXELS_API_KEY")
|
| 131 |
if not PEXELS_API_KEY:
|
| 132 |
logger.critical("NO SE ENCONTRÓ PEXELS_API_KEY EN VARIABLES DE ENTORNO")
|
| 133 |
-
# raise ValueError("API key de Pexels no configurada")
|
| 134 |
|
| 135 |
# Inicialización de modelos
|
| 136 |
MODEL_NAME = "datificate/gpt2-small-spanish"
|
|
@@ -160,7 +147,6 @@ def buscar_videos_pexels(query, api_key, per_page=5):
|
|
| 160 |
if not api_key:
|
| 161 |
logger.warning("No se puede buscar en Pexels: API Key no configurada.")
|
| 162 |
return []
|
| 163 |
-
|
| 164 |
logger.debug(f"Buscando en Pexels: '{query}' | Resultados: {per_page}")
|
| 165 |
headers = {"Authorization": api_key}
|
| 166 |
try:
|
|
@@ -170,7 +156,6 @@ def buscar_videos_pexels(query, api_key, per_page=5):
|
|
| 170 |
"orientation": "landscape",
|
| 171 |
"size": "medium"
|
| 172 |
}
|
| 173 |
-
|
| 174 |
response = requests.get(
|
| 175 |
"https://api.pexels.com/videos/search",
|
| 176 |
headers=headers,
|
|
@@ -178,19 +163,16 @@ def buscar_videos_pexels(query, api_key, per_page=5):
|
|
| 178 |
timeout=20
|
| 179 |
)
|
| 180 |
response.raise_for_status()
|
| 181 |
-
|
| 182 |
data = response.json()
|
| 183 |
videos = data.get('videos', [])
|
| 184 |
logger.info(f"Pexels: {len(videos)} videos encontrados para '{query}'")
|
| 185 |
return videos
|
| 186 |
-
|
| 187 |
except requests.exceptions.RequestException as e:
|
| 188 |
logger.error(f"Error de conexión Pexels para '{query}': {str(e)}")
|
| 189 |
except json.JSONDecodeError:
|
| 190 |
logger.error(f"Pexels: JSON inválido recibido | Status: {response.status_code} | Respuesta: {response.text[:200]}...")
|
| 191 |
except Exception as e:
|
| 192 |
logger.error(f"Error inesperado Pexels para '{query}': {str(e)}", exc_info=True)
|
| 193 |
-
|
| 194 |
return []
|
| 195 |
|
| 196 |
def generate_script(prompt, max_length=150):
|
|
@@ -198,16 +180,13 @@ def generate_script(prompt, max_length=150):
|
|
| 198 |
if not tokenizer or not model:
|
| 199 |
logger.warning("Modelos GPT-2 no disponibles - Usando prompt original como guion.")
|
| 200 |
return prompt.strip()
|
| 201 |
-
|
| 202 |
instruction_phrase_start = "Escribe un guion corto, interesante y coherente sobre:"
|
| 203 |
ai_prompt = f"{instruction_phrase_start} {prompt}"
|
| 204 |
-
|
| 205 |
try:
|
| 206 |
inputs = tokenizer(ai_prompt, return_tensors="pt", truncation=True, max_length=512)
|
| 207 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 208 |
model.to(device)
|
| 209 |
inputs = {k: v.to(device) for k, v in inputs.items()}
|
| 210 |
-
|
| 211 |
outputs = model.generate(
|
| 212 |
**inputs,
|
| 213 |
max_length=max_length + inputs[list(inputs.keys())[0]].size(1),
|
|
@@ -220,84 +199,59 @@ def generate_script(prompt, max_length=150):
|
|
| 220 |
eos_token_id=tokenizer.eos_token_id,
|
| 221 |
no_repeat_ngram_size=3
|
| 222 |
)
|
| 223 |
-
|
| 224 |
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 225 |
-
|
| 226 |
cleaned_text = text.strip()
|
| 227 |
-
# Limpieza mejorada de la frase de instrucción
|
| 228 |
try:
|
| 229 |
-
# Buscar el índice de inicio del prompt original dentro del texto generado
|
| 230 |
prompt_in_output_idx = text.lower().find(prompt.lower())
|
| 231 |
if prompt_in_output_idx != -1:
|
| 232 |
-
# Tomar todo el texto DESPUÉS del prompt original
|
| 233 |
cleaned_text = text[prompt_in_output_idx + len(prompt):].strip()
|
| 234 |
logger.debug("Texto limpiado tomando parte después del prompt original.")
|
| 235 |
else:
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
# Si ni la frase de instrucción ni el prompt se encuentran, usar el texto original
|
| 244 |
-
logger.warning("No se pudo identificar el inicio del guión generado. Usando texto generado completo.")
|
| 245 |
-
cleaned_text = text.strip() # Limpieza básica
|
| 246 |
-
|
| 247 |
-
|
| 248 |
except Exception as e:
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
logger.warning("El guión generado parece muy corto o vacío después de la limpieza heurística. Usando el texto generado original (sin limpieza adicional).")
|
| 255 |
-
cleaned_text = re.sub(r'<[^>]+>', '', text).strip() # Fallback al texto original limpio
|
| 256 |
-
|
| 257 |
-
# Limpieza final de caracteres especiales y espacios sobrantes
|
| 258 |
cleaned_text = re.sub(r'<[^>]+>', '', cleaned_text).strip()
|
| 259 |
-
cleaned_text = cleaned_text.lstrip(':').strip()
|
| 260 |
-
cleaned_text = cleaned_text.lstrip('.').strip()
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
# Intentar obtener al menos una oración completa si es posible para un inicio más limpio
|
| 264 |
sentences = cleaned_text.split('.')
|
| 265 |
if sentences and sentences[0].strip():
|
| 266 |
final_text = sentences[0].strip() + '.'
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
final_text = final_text.replace("..", ".") # Limpiar doble punto
|
| 271 |
-
|
| 272 |
logger.info(f"Guion generado final (Truncado a 100 chars): '{final_text[:100]}...'")
|
| 273 |
return final_text.strip()
|
| 274 |
-
|
| 275 |
logger.info(f"Guion generado final (sin oraciones completas detectadas - Truncado): '{cleaned_text[:100]}...'")
|
| 276 |
-
return cleaned_text.strip()
|
| 277 |
-
|
| 278 |
except Exception as e:
|
| 279 |
-
logger.error(f"Error generando guion con GPT-2
|
| 280 |
logger.warning("Usando prompt original como guion debido al error de generación.")
|
| 281 |
return prompt.strip()
|
| 282 |
|
| 283 |
-
# Función TTS ahora recibe la voz a usar
|
| 284 |
async def text_to_speech(text, output_path, voice):
|
| 285 |
logger.info(f"Convirtiendo texto a voz | Caracteres: {len(text)} | Voz: {voice} | Salida: {output_path}")
|
| 286 |
if not text or not text.strip():
|
| 287 |
logger.warning("Texto vacío para TTS")
|
| 288 |
return False
|
| 289 |
-
|
| 290 |
try:
|
| 291 |
communicate = edge_tts.Communicate(text, voice)
|
| 292 |
await communicate.save(output_path)
|
| 293 |
-
|
| 294 |
if os.path.exists(output_path) and os.path.getsize(output_path) > 100:
|
| 295 |
logger.info(f"Audio guardado exitosamente en: {output_path} | Tamaño: {os.path.getsize(output_path)} bytes")
|
| 296 |
return True
|
| 297 |
else:
|
| 298 |
logger.error(f"TTS guardó un archivo pequeño o vacío en: {output_path}")
|
| 299 |
return False
|
| 300 |
-
|
| 301 |
except Exception as e:
|
| 302 |
logger.error(f"Error en TTS con voz '{voice}': {str(e)}", exc_info=True)
|
| 303 |
return False
|
|
@@ -306,121 +260,101 @@ def download_video_file(url, temp_dir):
|
|
| 306 |
if not url:
|
| 307 |
logger.warning("URL de video no proporcionada para descargar")
|
| 308 |
return None
|
| 309 |
-
|
| 310 |
try:
|
| 311 |
logger.info(f"Descargando video desde: {url[:80]}...")
|
| 312 |
os.makedirs(temp_dir, exist_ok=True)
|
| 313 |
file_name = f"video_dl_{datetime.now().strftime('%Y%m%d_%H%M%S_%f')}.mp4"
|
| 314 |
output_path = os.path.join(temp_dir, file_name)
|
| 315 |
-
|
| 316 |
with requests.get(url, stream=True, timeout=60) as r:
|
| 317 |
r.raise_for_status()
|
| 318 |
with open(output_path, 'wb') as f:
|
| 319 |
for chunk in r.iter_content(chunk_size=8192):
|
| 320 |
f.write(chunk)
|
| 321 |
-
|
| 322 |
if os.path.exists(output_path) and os.path.getsize(output_path) > 1000:
|
| 323 |
-
|
| 324 |
-
|
| 325 |
else:
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
except requests.exceptions.RequestException as e:
|
| 332 |
logger.error(f"Error de descarga para {url[:80]}... : {str(e)}")
|
| 333 |
except Exception as e:
|
| 334 |
logger.error(f"Error inesperado descargando {url[:80]}... : {str(e)}", exc_info=True)
|
| 335 |
-
|
| 336 |
return None
|
| 337 |
|
| 338 |
def loop_audio_to_length(audio_clip, target_duration):
|
| 339 |
logger.debug(f"Ajustando audio | Duración actual: {audio_clip.duration:.2f}s | Objetivo: {target_duration:.2f}s")
|
| 340 |
-
|
| 341 |
if audio_clip is None or audio_clip.duration is None or audio_clip.duration <= 0:
|
| 342 |
logger.warning("Input audio clip is invalid (None or zero duration), cannot loop.")
|
| 343 |
try:
|
| 344 |
sr = getattr(audio_clip, 'fps', 44100) if audio_clip else 44100
|
| 345 |
-
return AudioClip(lambda t: 0, duration=target_duration,
|
| 346 |
except Exception as e:
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
if audio_clip.duration >= target_duration:
|
| 351 |
logger.debug("Audio clip already longer or equal to target. Trimming.")
|
| 352 |
trimmed_clip = audio_clip.subclip(0, target_duration)
|
| 353 |
if trimmed_clip.duration is None or trimmed_clip.duration <= 0:
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
return trimmed_clip
|
| 359 |
-
|
| 360 |
loops = math.ceil(target_duration / audio_clip.duration)
|
| 361 |
logger.debug(f"Creando {loops} loops de audio")
|
| 362 |
-
|
| 363 |
audio_segments = [audio_clip] * loops
|
| 364 |
looped_audio = None
|
| 365 |
final_looped_audio = None
|
| 366 |
try:
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
if looped_audio.duration is None or looped_audio.duration <= 0:
|
| 370 |
logger.error("Concatenated audio clip is invalid (None or zero duration).")
|
| 371 |
raise ValueError("Invalid concatenated audio.")
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
if final_looped_audio.duration is None or final_looped_audio.duration <= 0:
|
| 376 |
logger.error("Final subclipped audio clip is invalid (None or zero duration).")
|
| 377 |
raise ValueError("Invalid final subclipped audio.")
|
| 378 |
-
|
| 379 |
-
return final_looped_audio
|
| 380 |
-
|
| 381 |
except Exception as e:
|
| 382 |
logger.error(f"Error concatenating/subclipping audio clips for looping: {str(e)}", exc_info=True)
|
| 383 |
try:
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
except:
|
| 388 |
-
|
| 389 |
logger.error("Fallback to original audio clip failed.")
|
| 390 |
return AudioFileClip(filename="")
|
| 391 |
-
|
| 392 |
finally:
|
| 393 |
if looped_audio is not None and looped_audio is not final_looped_audio:
|
| 394 |
try: looped_audio.close()
|
| 395 |
except: pass
|
| 396 |
|
| 397 |
-
|
| 398 |
def extract_visual_keywords_from_script(script_text):
|
| 399 |
logger.info("Extrayendo palabras clave del guion")
|
| 400 |
if not script_text or not script_text.strip():
|
| 401 |
logger.warning("Guion vacío, no se pueden extraer palabras clave.")
|
| 402 |
-
return ["distopico", "dark", "terror", "ansiedad", "encuentros", "demonios", "siniestro",
|
| 403 |
-
"oscuro", "noche", "niebla", "abandonado", "miedo", "suspenso", "sombrio", "lluvia", "tormenta", "bosque", "cementerio",
|
| 404 |
-
"iglesia", "ruinas", "hospital", "escuela", "tunel", "puente", "carretera", "desierto", "pantano", "cueva", "paredes",
|
| 405 |
-
"ventanas rotas", "sombras", "silueta", "ojos", "susurros", "gritos", "corredor", "puerta cerrada", "escaleras",
|
| 406 |
-
"reloj parado", "matrix", "muñeca", "manchas", "sangre", "cadenas", "ritual", "velas", "libro antiguo",
|
| 407 |
-
"cruz invertida", "campanario", "campana", "nieve oscura", "cielo rojo", "luna llena", "animales muertos",
|
| 408 |
-
"cuervos", "arañas", "telarañas", "niebla densa", "luces parpadeando", "televisor estático", "radio interferencia",
|
| 409 |
-
"voz distorsionada", "figura encapuchada", "mascaras", "manos", "pies descalzos", "huellas", "ventana abierta",
|
| 410 |
"viento fuerte", "reloj de pared", "sotano"]
|
| 411 |
-
|
| 412 |
clean_text = re.sub(r'[^\w\sáéíóúñÁÉÍÓÚÑ]', '', script_text)
|
| 413 |
keywords_list = []
|
| 414 |
-
|
| 415 |
if kw_model:
|
| 416 |
try:
|
| 417 |
logger.debug("Intentando extracción con KeyBERT...")
|
| 418 |
keywords1 = kw_model.extract_keywords(clean_text, keyphrase_ngram_range=(1, 1), stop_words='spanish', top_n=5)
|
| 419 |
keywords2 = kw_model.extract_keywords(clean_text, keyphrase_ngram_range=(2, 2), stop_words='spanish', top_n=3)
|
| 420 |
-
|
| 421 |
all_keywords = keywords1 + keywords2
|
| 422 |
all_keywords.sort(key=lambda item: item[1], reverse=True)
|
| 423 |
-
|
| 424 |
seen_keywords = set()
|
| 425 |
for keyword, score in all_keywords:
|
| 426 |
formatted_keyword = keyword.lower().replace(" ", "+")
|
|
@@ -429,37 +363,28 @@ def extract_visual_keywords_from_script(script_text):
|
|
| 429 |
seen_keywords.add(formatted_keyword)
|
| 430 |
if len(keywords_list) >= 5:
|
| 431 |
break
|
| 432 |
-
|
| 433 |
if keywords_list:
|
| 434 |
logger.debug(f"Palabras clave extraídas por KeyBERT: {keywords_list}")
|
| 435 |
return keywords_list
|
| 436 |
-
|
| 437 |
except Exception as e:
|
| 438 |
logger.warning(f"KeyBERT falló: {str(e)}. Intentando método simple.")
|
| 439 |
-
|
| 440 |
logger.debug("Extrayendo palabras clave con método simple...")
|
| 441 |
words = clean_text.lower().split()
|
| 442 |
stop_words = {"el", "la", "los", "las", "de", "en", "y", "a", "que", "es", "un", "una", "con", "para", "del", "al", "por", "su", "sus", "se", "lo", "le", "me", "te", "nos", "os", "les", "mi", "tu",
|
| 443 |
"nuestro", "vuestro", "este", "ese", "aquel", "esta", "esa", "aquella", "esto", "eso", "aquello", "mis", "tus",
|
| 444 |
"nuestros", "vuestros", "estas", "esas", "aquellas", "si", "no", "más", "menos", "sin", "sobre", "bajo", "entre", "hasta", "desde", "durante", "mediante", "según", "versus", "via", "cada", "todo", "todos", "toda", "todas", "poco", "pocos", "poca", "pocas", "mucho", "muchos", "mucha", "muchas", "varios", "varias", "otro", "otros", "otra", "otras", "mismo", "misma", "mismos", "mismas", "tan", "tanto", "tanta", "tantos", "tantas", "tal", "tales", "cual", "cuales", "cuyo", "cuya", "cuyos", "cuyas", "quien", "quienes", "cuan", "cuanto", "cuanta", "cuantos", "cuantas", "como", "donde", "cuando", "porque", "aunque", "mientras", "siempre", "nunca", "jamás", "muy", "casi", "solo", "solamente", "incluso", "apenas", "quizás", "tal vez", "acaso", "claro", "cierto", "obvio", "evidentemente", "realmente", "simplemente", "generalmente", "especialmente", "principalmente", "posiblemente", "probablemente", "difícilmente", "fácilmente", "rápidamente", "lentamente", "bien", "mal", "mejor", "peor", "arriba", "abajo", "adelante", "atrás", "cerca", "lejos", "dentro", "fuera", "encima", "debajo", "frente", "detrás", "antes", "después", "luego", "pronto", "tarde", "todavía", "ya", "aun", "aún", "quizá"}
|
| 445 |
-
|
| 446 |
valid_words = [word for word in words if len(word) > 3 and word not in stop_words]
|
| 447 |
-
|
| 448 |
if not valid_words:
|
| 449 |
logger.warning("No se encontraron palabras clave válidas con método simple. Usando palabras clave predeterminadas.")
|
| 450 |
-
return ["espiritual", "terror", "matrix", "arcontes", "galaxia", "creepy", "magia", "gangstalking","conspiracy"
|
| 451 |
-
|
| 452 |
word_counts = Counter(valid_words)
|
| 453 |
top_keywords = [word.replace(" ", "+") for word, _ in word_counts.most_common(5)]
|
| 454 |
-
|
| 455 |
if not top_keywords:
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
logger.info(f"Palabras clave finales: {top_keywords}")
|
| 460 |
return top_keywords
|
| 461 |
|
| 462 |
-
# crear_video ahora recibe la voz seleccionada
|
| 463 |
def crear_video(prompt_type, input_text, selected_voice, musica_file=None):
|
| 464 |
logger.info("=" * 80)
|
| 465 |
logger.info(f"INICIANDO CREACIÓN DE VIDEO | Tipo: {prompt_type}")
|
|
@@ -468,7 +393,6 @@ def crear_video(prompt_type, input_text, selected_voice, musica_file=None):
|
|
| 468 |
|
| 469 |
start_time = datetime.now()
|
| 470 |
temp_dir_intermediate = None
|
| 471 |
-
|
| 472 |
audio_tts_original = None
|
| 473 |
musica_audio_original = None
|
| 474 |
audio_tts = None
|
|
@@ -484,9 +408,7 @@ def crear_video(prompt_type, input_text, selected_voice, musica_file=None):
|
|
| 484 |
guion = generate_script(input_text)
|
| 485 |
else:
|
| 486 |
guion = input_text.strip()
|
| 487 |
-
|
| 488 |
logger.info(f"Guion final ({len(guion)} chars): '{guion[:100]}...'")
|
| 489 |
-
|
| 490 |
if not guion.strip():
|
| 491 |
logger.error("El guion resultante está vacío o solo contiene espacios.")
|
| 492 |
raise ValueError("El guion está vacío.")
|
|
@@ -495,27 +417,16 @@ def crear_video(prompt_type, input_text, selected_voice, musica_file=None):
|
|
| 495 |
logger.info(f"Directorio temporal intermedio creado: {temp_dir_intermediate}")
|
| 496 |
temp_intermediate_files = []
|
| 497 |
|
| 498 |
-
# 2. Generar audio de voz
|
| 499 |
logger.info("Generando audio de voz...")
|
| 500 |
voz_path = os.path.join(temp_dir_intermediate, "voz.mp3")
|
| 501 |
-
|
| 502 |
-
tts_voices_to_try = [selected_voice]
|
| 503 |
-
fallback_juan = "es-ES-JuanNeural"
|
| 504 |
-
fallback_elvira = "es-ES-ElviraNeural"
|
| 505 |
-
|
| 506 |
-
if fallback_juan and fallback_juan != selected_voice and fallback_juan not in tts_voices_to_try:
|
| 507 |
-
tts_voices_to_try.append(fallback_juan)
|
| 508 |
-
if fallback_elvira and fallback_elvira != selected_voice and fallback_elvira not in tts_voices_to_try:
|
| 509 |
-
tts_voices_to_try.append(fallback_elvira)
|
| 510 |
-
|
| 511 |
tts_success = False
|
| 512 |
tried_voices = set()
|
| 513 |
-
|
| 514 |
for current_voice in tts_voices_to_try:
|
| 515 |
if not current_voice or current_voice in tried_voices:
|
| 516 |
continue
|
| 517 |
tried_voices.add(current_voice)
|
| 518 |
-
|
| 519 |
logger.info(f"Intentando TTS con voz: {current_voice}...")
|
| 520 |
try:
|
| 521 |
tts_success = asyncio.run(text_to_speech(guion, voz_path, voice=current_voice))
|
|
@@ -523,79 +434,197 @@ def crear_video(prompt_type, input_text, selected_voice, musica_file=None):
|
|
| 523 |
logger.info(f"TTS exitoso con voz '{current_voice}'.")
|
| 524 |
break
|
| 525 |
except Exception as e:
|
| 526 |
-
logger.warning(f"Fallo al generar TTS con voz '{current_voice}': {str(e)}"
|
| 527 |
-
pass
|
| 528 |
-
|
| 529 |
if not tts_success or not os.path.exists(voz_path) or os.path.getsize(voz_path) <= 100:
|
| 530 |
-
logger.error("Fallo en la generación de voz después de todos los intentos.
|
| 531 |
raise ValueError("Error generando voz a partir del guion (fallo de TTS).")
|
| 532 |
-
|
| 533 |
temp_intermediate_files.append(voz_path)
|
| 534 |
-
|
| 535 |
audio_tts_original = AudioFileClip(voz_path)
|
| 536 |
-
|
| 537 |
-
|
| 538 |
-
|
| 539 |
-
try:
|
| 540 |
-
audio_tts_original.close()
|
| 541 |
-
except:
|
| 542 |
-
pass
|
| 543 |
-
audio_tts_original = None
|
| 544 |
-
if os.path.exists(voz_path):
|
| 545 |
-
try:
|
| 546 |
-
os.remove(voz_path)
|
| 547 |
-
except:
|
| 548 |
-
pass
|
| 549 |
-
if voz_path in temp_intermediate_files:
|
| 550 |
-
temp_intermediate_files.remove(voz_path)
|
| 551 |
-
|
| 552 |
-
raise ValueError("Audio de voz generado es inválido después de procesamiento inicial.")
|
| 553 |
-
|
| 554 |
audio_tts = audio_tts_original
|
| 555 |
-
audio_duration =
|
| 556 |
logger.info(f"Duración audio voz: {audio_duration:.2f} segundos")
|
| 557 |
-
|
| 558 |
if audio_duration < 1.0:
|
| 559 |
logger.error(f"Duración audio voz ({audio_duration:.2f}s) es muy corta.")
|
| 560 |
raise ValueError("Generated voice audio is too short (min 1 second required).")
|
| 561 |
|
| 562 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 563 |
|
| 564 |
# 7. Crear video final
|
| 565 |
logger.info("Renderizando video final...")
|
| 566 |
-
|
| 567 |
-
|
| 568 |
-
|
| 569 |
-
|
| 570 |
-
|
|
|
|
|
|
|
|
|
|
| 571 |
|
| 572 |
output_filename = "final_video.mp4"
|
| 573 |
output_path = os.path.join(temp_dir_intermediate, output_filename)
|
| 574 |
logger.info(f"Escribiendo video final a: {output_path}")
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
-
|
|
|
|
|
|
|
|
|
|
| 585 |
|
| 586 |
total_time = (datetime.now() - start_time).total_seconds()
|
| 587 |
logger.info(f"PROCESO DE VIDEO FINALIZADO | Output: {output_path} | Tiempo total: {total_time:.2f}s")
|
| 588 |
|
| 589 |
-
# --- SOLUCIÓN: Copiar video a directorio accesible por Gradio ---
|
| 590 |
final_output_filename = "final_video.mp4"
|
| 591 |
final_output_path = os.path.join(os.getcwd(), final_output_filename)
|
| 592 |
-
|
| 593 |
if os.path.exists(final_output_path):
|
| 594 |
-
os.remove(final_output_path)
|
| 595 |
-
|
| 596 |
shutil.copy2(output_path, final_output_path)
|
| 597 |
logger.info(f"Video copiado a ruta accesible para Gradio: {final_output_path}")
|
| 598 |
-
output_path = final_output_path
|
| 599 |
|
| 600 |
return output_path
|
| 601 |
|
|
@@ -607,78 +636,97 @@ def crear_video(prompt_type, input_text, selected_voice, musica_file=None):
|
|
| 607 |
raise e
|
| 608 |
finally:
|
| 609 |
logger.info("Iniciando limpieza de clips y archivos temporales intermedios...")
|
| 610 |
-
|
| 611 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 612 |
|
| 613 |
-
|
| 614 |
-
def run_app(prompt_type, prompt_ia, prompt_manual, musica_file, selected_voice): # <-- Recibe el valor del Dropdown
|
| 615 |
logger.info("="*80)
|
| 616 |
logger.info("SOLICITUD RECIBIDA EN INTERFAZ")
|
| 617 |
-
|
| 618 |
-
# Elegir el texto de entrada basado en el prompt_type
|
| 619 |
input_text = prompt_ia if prompt_type == "Generar Guion con IA" else prompt_manual
|
| 620 |
-
|
| 621 |
output_video = None
|
| 622 |
output_file = None
|
| 623 |
status_msg = gr.update(value="⏳ Procesando...", interactive=False)
|
| 624 |
-
|
| 625 |
if not input_text or not input_text.strip():
|
| 626 |
logger.warning("Texto de entrada vacío.")
|
| 627 |
-
# Retornar None para video y archivo, actualizar estado con mensaje de error
|
| 628 |
return None, None, gr.update(value="⚠️ Por favor, ingresa texto para el guion o el tema.", interactive=False)
|
| 629 |
-
|
| 630 |
-
# Validar la voz seleccionada. Si no es válida, usar la por defecto.
|
| 631 |
-
# AVAILABLE_VOICES se obtiene al inicio. Hay que buscar si el voice_id existe en la lista de pares (nombre, id)
|
| 632 |
voice_ids_disponibles = [v[1] for v in AVAILABLE_VOICES]
|
| 633 |
if selected_voice not in voice_ids_disponibles:
|
| 634 |
-
logger.warning(f"Voz seleccionada inválida
|
| 635 |
-
selected_voice = DEFAULT_VOICE_ID
|
| 636 |
else:
|
| 637 |
logger.info(f"Voz seleccionada validada: {selected_voice}")
|
| 638 |
-
|
| 639 |
-
|
| 640 |
logger.info(f"Tipo de entrada: {prompt_type}")
|
| 641 |
logger.debug(f"Texto de entrada: '{input_text[:100]}...'")
|
| 642 |
if musica_file:
|
| 643 |
logger.info(f"Archivo de música recibido: {musica_file}")
|
| 644 |
else:
|
| 645 |
logger.info("No se proporcionó archivo de música.")
|
| 646 |
-
logger.info(f"Voz final a usar (ID): {selected_voice}")
|
| 647 |
-
|
| 648 |
try:
|
| 649 |
logger.info("Llamando a crear_video...")
|
| 650 |
-
|
| 651 |
-
video_path = crear_video(prompt_type, input_text, selected_voice, musica_file) # <-- PASAR selected_voice (ID) a crear_video
|
| 652 |
-
|
| 653 |
if video_path and os.path.exists(video_path):
|
| 654 |
logger.info(f"crear_video retornó path: {video_path}")
|
| 655 |
logger.info(f"Tamaño del archivo de video retornado: {os.path.getsize(video_path)} bytes")
|
| 656 |
-
output_video = video_path
|
| 657 |
-
output_file = video_path
|
| 658 |
status_msg = gr.update(value="✅ Video generado exitosamente.", interactive=False)
|
| 659 |
else:
|
| 660 |
logger.error(f"crear_video no retornó un path válido o el archivo no existe: {video_path}")
|
| 661 |
status_msg = gr.update(value="❌ Error: La generación del video falló o el archivo no se creó correctamente.", interactive=False)
|
| 662 |
-
|
| 663 |
except ValueError as ve:
|
| 664 |
logger.warning(f"Error de validación durante la creación del video: {str(ve)}")
|
| 665 |
status_msg = gr.update(value=f"⚠️ Error de validación: {str(ve)}", interactive=False)
|
| 666 |
except Exception as e:
|
| 667 |
logger.critical(f"Error crítico durante la creación del video: {str(e)}", exc_info=True)
|
| 668 |
status_msg = gr.update(value=f"❌ Error inesperado: {str(e)}", interactive=False)
|
| 669 |
-
|
| 670 |
-
logger.info("Fin del handler run_app.")
|
| 671 |
-
return output_video, output_file, status_msg
|
| 672 |
|
| 673 |
# Interfaz de Gradio
|
| 674 |
with gr.Blocks(title="Generador de Videos con IA", theme=gr.themes.Soft(), css="""
|
| 675 |
.gradio-container {max-width: 800px; margin: auto;}
|
| 676 |
h1 {text-align: center;}
|
| 677 |
""") as app:
|
| 678 |
-
|
| 679 |
gr.Markdown("# 🎬 Generador Automático de Videos con IA")
|
| 680 |
gr.Markdown("Genera videos cortos a partir de un tema o guion, usando imágenes de archivo de Pexels y voz generada.")
|
| 681 |
-
|
| 682 |
with gr.Row():
|
| 683 |
with gr.Column():
|
| 684 |
prompt_type = gr.Radio(
|
|
@@ -686,8 +734,6 @@ with gr.Blocks(title="Generador de Videos con IA", theme=gr.themes.Soft(), css="
|
|
| 686 |
label="Método de Entrada",
|
| 687 |
value="Generar Guion con IA"
|
| 688 |
)
|
| 689 |
-
|
| 690 |
-
# Contenedores para los campos de texto para controlar la visibilidad
|
| 691 |
with gr.Column(visible=True) as ia_guion_column:
|
| 692 |
prompt_ia = gr.Textbox(
|
| 693 |
label="Tema para IA",
|
|
@@ -695,9 +741,7 @@ with gr.Blocks(title="Generador de Videos con IA", theme=gr.themes.Soft(), css="
|
|
| 695 |
placeholder="Ej: Un paisaje natural con montañas y ríos al amanecer, mostrando la belleza de la naturaleza...",
|
| 696 |
max_lines=4,
|
| 697 |
value=""
|
| 698 |
-
# visible=... <-- ¡NO DEBE ESTAR AQUÍ!
|
| 699 |
)
|
| 700 |
-
|
| 701 |
with gr.Column(visible=False) as manual_guion_column:
|
| 702 |
prompt_manual = gr.Textbox(
|
| 703 |
label="Tu Guion Completo",
|
|
@@ -705,42 +749,30 @@ with gr.Blocks(title="Generador de Videos con IA", theme=gr.themes.Soft(), css="
|
|
| 705 |
placeholder="Ej: En este video exploraremos los misterios del océano. Veremos la vida marina fascinante y los arrecifes de coral vibrantes. ¡Acompáñanos en esta aventura subacuática!",
|
| 706 |
max_lines=10,
|
| 707 |
value=""
|
| 708 |
-
# visible=... <-- ¡NO DEBE ESTAR AQUÍ!
|
| 709 |
)
|
| 710 |
-
|
| 711 |
musica_input = gr.Audio(
|
| 712 |
label="Música de fondo (opcional)",
|
| 713 |
type="filepath",
|
| 714 |
interactive=True,
|
| 715 |
value=None
|
| 716 |
-
# visible=... <-- ¡NO DEBE ESTAR AQUÍ!
|
| 717 |
)
|
| 718 |
-
|
| 719 |
-
# --- COMPONENTE: Selección de Voz ---
|
| 720 |
voice_dropdown = gr.Dropdown(
|
| 721 |
label="Seleccionar Voz para Guion",
|
| 722 |
-
choices=AVAILABLE_VOICES,
|
| 723 |
-
value=DEFAULT_VOICE_ID,
|
| 724 |
interactive=True
|
| 725 |
-
# visible=... <-- ¡NO DEBE ESTAR AQUÍ!
|
| 726 |
)
|
| 727 |
-
# --- FIN COMPONENTE ---
|
| 728 |
-
|
| 729 |
-
|
| 730 |
generate_btn = gr.Button("✨ Generar Video", variant="primary")
|
| 731 |
-
|
| 732 |
with gr.Column():
|
| 733 |
video_output = gr.Video(
|
| 734 |
label="Previsualización del Video Generado",
|
| 735 |
interactive=False,
|
| 736 |
height=400
|
| 737 |
-
# visible=... <-- ¡NO DEBE ESTAR AQUÍ!
|
| 738 |
)
|
| 739 |
file_output = gr.File(
|
| 740 |
label="Descargar Archivo de Video",
|
| 741 |
interactive=False,
|
| 742 |
-
visible=False
|
| 743 |
-
# visible=... <-- ¡NO DEBE ESTAR AQUÍ si ya está visible=False arriba!
|
| 744 |
)
|
| 745 |
status_output = gr.Textbox(
|
| 746 |
label="Estado",
|
|
@@ -748,48 +780,36 @@ with gr.Blocks(title="Generador de Videos con IA", theme=gr.themes.Soft(), css="
|
|
| 748 |
show_label=False,
|
| 749 |
placeholder="Esperando acción...",
|
| 750 |
value="Esperando entrada..."
|
| 751 |
-
# visible=... <-- ¡NO DEBE ESTAR AQUÍ!
|
| 752 |
)
|
| 753 |
-
|
| 754 |
-
# Evento para mostrar/ocultar los campos de texto según el tipo de prompt
|
| 755 |
prompt_type.change(
|
| 756 |
lambda x: (gr.update(visible=x == "Generar Guion con IA"),
|
| 757 |
gr.update(visible=x == "Usar Mi Guion")),
|
| 758 |
inputs=prompt_type,
|
| 759 |
-
outputs=[ia_guion_column, manual_guion_column]
|
| 760 |
)
|
| 761 |
-
|
| 762 |
-
# Evento click del botón de generar video
|
| 763 |
generate_btn.click(
|
| 764 |
-
# Acción 1 (síncrona): Resetear salidas y establecer estado
|
| 765 |
lambda: (None, None, gr.update(value="⏳ Procesando... Esto puede tomar varios minutos.", interactive=False)),
|
| 766 |
outputs=[video_output, file_output, status_output],
|
| 767 |
).then(
|
| 768 |
-
# Acción 2 (asíncrona): Llamar a la función principal
|
| 769 |
run_app,
|
| 770 |
-
|
| 771 |
-
inputs=[prompt_type, prompt_ia, prompt_manual, musica_input, voice_dropdown], # <-- Pasar los 5 inputs a run_app
|
| 772 |
-
# run_app retornará los 3 outputs esperados
|
| 773 |
outputs=[video_output, file_output, status_output],
|
| 774 |
-
queue=True
|
| 775 |
).then(
|
| 776 |
-
# Acción 3 (síncrona): Hacer visible el enlace de descarga
|
| 777 |
lambda video_path, file_path, status_msg: gr.update(visible=file_path is not None),
|
| 778 |
inputs=[video_output, file_output, status_output],
|
| 779 |
outputs=[file_output]
|
| 780 |
)
|
| 781 |
-
|
| 782 |
-
|
| 783 |
gr.Markdown("### Instrucciones:")
|
| 784 |
gr.Markdown("""
|
| 785 |
-
1.
|
| 786 |
-
2.
|
| 787 |
-
3.
|
| 788 |
-
4.
|
| 789 |
-
5.
|
| 790 |
-
6.
|
| 791 |
-
7.
|
| 792 |
-
8.
|
| 793 |
""")
|
| 794 |
gr.Markdown("---")
|
| 795 |
gr.Markdown("Desarrollado por [Tu Nombre/Empresa/Alias - Opcional]")
|
|
@@ -804,13 +824,9 @@ if __name__ == "__main__":
|
|
| 804 |
logger.info("Clips base de MoviePy creados y cerrados exitosamente. FFmpeg parece accesible.")
|
| 805 |
except Exception as e:
|
| 806 |
logger.critical(f"Fallo al crear clip base de MoviePy. A menudo indica problemas con FFmpeg/ImageMagick. Error: {e}", exc_info=True)
|
| 807 |
-
|
| 808 |
except Exception as e:
|
| 809 |
-
|
| 810 |
-
|
| 811 |
-
# Solución para el timeout de Gradio - Añadir esta línea
|
| 812 |
-
os.environ['GRADIO_SERVER_TIMEOUT'] = '6000' # 600 segundos = 10 minutos
|
| 813 |
-
|
| 814 |
logger.info("Iniciando aplicación Gradio...")
|
| 815 |
try:
|
| 816 |
app.queue(max_size=1).launch(server_name="0.0.0.0", server_port=7860, share=False)
|
|
|
|
| 9 |
import torch
|
| 10 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
| 11 |
from keybert import KeyBERT
|
| 12 |
+
from moviepy.editor import VideoFileClip, concatenate_videoclips, AudioFileClip, CompositeAudioClip, concatenate_audioclips, AudioClip, ColorClip
|
|
|
|
| 13 |
import re
|
| 14 |
import math
|
| 15 |
import shutil
|
|
|
|
| 31 |
logger.info("="*80)
|
| 32 |
|
| 33 |
# Diccionario de voces TTS disponibles organizadas por idioma
|
|
|
|
| 34 |
VOCES_DISPONIBLES = {
|
| 35 |
"Español (España)": {
|
| 36 |
"es-ES-JuanNeural": "Juan (España) - Masculino",
|
|
|
|
| 98 |
choices = []
|
| 99 |
for region, voices in VOCES_DISPONIBLES.items():
|
| 100 |
for voice_id, voice_name in voices.items():
|
|
|
|
| 101 |
choices.append((f"{voice_name} ({region})", voice_id))
|
| 102 |
return choices
|
| 103 |
|
| 104 |
# Obtener las voces al inicio del script
|
| 105 |
+
AVAILABLE_VOICES = get_voice_choices()
|
| 106 |
+
DEFAULT_VOICE_ID = "es-ES-JuanNeural"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
DEFAULT_VOICE_NAME = DEFAULT_VOICE_ID
|
| 108 |
for text, voice_id in AVAILABLE_VOICES:
|
| 109 |
if voice_id == DEFAULT_VOICE_ID:
|
| 110 |
DEFAULT_VOICE_NAME = text
|
| 111 |
break
|
|
|
|
| 112 |
if DEFAULT_VOICE_ID not in [v[1] for v in AVAILABLE_VOICES]:
|
| 113 |
DEFAULT_VOICE_ID = AVAILABLE_VOICES[0][1] if AVAILABLE_VOICES else "en-US-AriaNeural"
|
| 114 |
+
DEFAULT_VOICE_NAME = AVAILABLE_VOICES[0][0] if AVAILABLE_VOICES else "Aria (United States) - Female"
|
|
|
|
| 115 |
logger.info(f"Voz por defecto seleccionada (ID): {DEFAULT_VOICE_ID}")
|
| 116 |
|
|
|
|
| 117 |
# Clave API de Pexels
|
| 118 |
PEXELS_API_KEY = os.environ.get("PEXELS_API_KEY")
|
| 119 |
if not PEXELS_API_KEY:
|
| 120 |
logger.critical("NO SE ENCONTRÓ PEXELS_API_KEY EN VARIABLES DE ENTORNO")
|
|
|
|
| 121 |
|
| 122 |
# Inicialización de modelos
|
| 123 |
MODEL_NAME = "datificate/gpt2-small-spanish"
|
|
|
|
| 147 |
if not api_key:
|
| 148 |
logger.warning("No se puede buscar en Pexels: API Key no configurada.")
|
| 149 |
return []
|
|
|
|
| 150 |
logger.debug(f"Buscando en Pexels: '{query}' | Resultados: {per_page}")
|
| 151 |
headers = {"Authorization": api_key}
|
| 152 |
try:
|
|
|
|
| 156 |
"orientation": "landscape",
|
| 157 |
"size": "medium"
|
| 158 |
}
|
|
|
|
| 159 |
response = requests.get(
|
| 160 |
"https://api.pexels.com/videos/search",
|
| 161 |
headers=headers,
|
|
|
|
| 163 |
timeout=20
|
| 164 |
)
|
| 165 |
response.raise_for_status()
|
|
|
|
| 166 |
data = response.json()
|
| 167 |
videos = data.get('videos', [])
|
| 168 |
logger.info(f"Pexels: {len(videos)} videos encontrados para '{query}'")
|
| 169 |
return videos
|
|
|
|
| 170 |
except requests.exceptions.RequestException as e:
|
| 171 |
logger.error(f"Error de conexión Pexels para '{query}': {str(e)}")
|
| 172 |
except json.JSONDecodeError:
|
| 173 |
logger.error(f"Pexels: JSON inválido recibido | Status: {response.status_code} | Respuesta: {response.text[:200]}...")
|
| 174 |
except Exception as e:
|
| 175 |
logger.error(f"Error inesperado Pexels para '{query}': {str(e)}", exc_info=True)
|
|
|
|
| 176 |
return []
|
| 177 |
|
| 178 |
def generate_script(prompt, max_length=150):
|
|
|
|
| 180 |
if not tokenizer or not model:
|
| 181 |
logger.warning("Modelos GPT-2 no disponibles - Usando prompt original como guion.")
|
| 182 |
return prompt.strip()
|
|
|
|
| 183 |
instruction_phrase_start = "Escribe un guion corto, interesante y coherente sobre:"
|
| 184 |
ai_prompt = f"{instruction_phrase_start} {prompt}"
|
|
|
|
| 185 |
try:
|
| 186 |
inputs = tokenizer(ai_prompt, return_tensors="pt", truncation=True, max_length=512)
|
| 187 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 188 |
model.to(device)
|
| 189 |
inputs = {k: v.to(device) for k, v in inputs.items()}
|
|
|
|
| 190 |
outputs = model.generate(
|
| 191 |
**inputs,
|
| 192 |
max_length=max_length + inputs[list(inputs.keys())[0]].size(1),
|
|
|
|
| 199 |
eos_token_id=tokenizer.eos_token_id,
|
| 200 |
no_repeat_ngram_size=3
|
| 201 |
)
|
|
|
|
| 202 |
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
| 203 |
cleaned_text = text.strip()
|
|
|
|
| 204 |
try:
|
|
|
|
| 205 |
prompt_in_output_idx = text.lower().find(prompt.lower())
|
| 206 |
if prompt_in_output_idx != -1:
|
|
|
|
| 207 |
cleaned_text = text[prompt_in_output_idx + len(prompt):].strip()
|
| 208 |
logger.debug("Texto limpiado tomando parte después del prompt original.")
|
| 209 |
else:
|
| 210 |
+
instruction_start_idx = text.find(instruction_phrase_start)
|
| 211 |
+
if instruction_start_idx != -1:
|
| 212 |
+
cleaned_text = text[instruction_start_idx + len(instruction_phrase_start):].strip()
|
| 213 |
+
logger.debug("Texto limpiado tomando parte después de la frase de instrucción base.")
|
| 214 |
+
else:
|
| 215 |
+
logger.warning("No se pudo identificar el inicio del guión generado. Usando texto generado completo.")
|
| 216 |
+
cleaned_text = text.strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 217 |
except Exception as e:
|
| 218 |
+
logger.warning(f"Error durante la limpieza heurística del guión de IA: {e}. Usando texto generado sin limpieza adicional.")
|
| 219 |
+
cleaned_text = re.sub(r'<[^>]+>', '', text).strip()
|
| 220 |
+
if not cleaned_text or len(cleaned_text) < 10:
|
| 221 |
+
logger.warning("El guión generado parece muy corto o vacío después de la limpieza heurística. Usando el texto generado original (sin limpieza adicional).")
|
| 222 |
+
cleaned_text = re.sub(r'<[^>]+>', '', text).strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
cleaned_text = re.sub(r'<[^>]+>', '', cleaned_text).strip()
|
| 224 |
+
cleaned_text = cleaned_text.lstrip(':').strip()
|
| 225 |
+
cleaned_text = cleaned_text.lstrip('.').strip()
|
|
|
|
|
|
|
|
|
|
| 226 |
sentences = cleaned_text.split('.')
|
| 227 |
if sentences and sentences[0].strip():
|
| 228 |
final_text = sentences[0].strip() + '.'
|
| 229 |
+
if len(sentences) > 1 and sentences[1].strip() and len(final_text.split()) < max_length * 0.7:
|
| 230 |
+
final_text += " " + sentences[1].strip() + "."
|
| 231 |
+
final_text = final_text.replace("..", ".")
|
|
|
|
|
|
|
| 232 |
logger.info(f"Guion generado final (Truncado a 100 chars): '{final_text[:100]}...'")
|
| 233 |
return final_text.strip()
|
|
|
|
| 234 |
logger.info(f"Guion generado final (sin oraciones completas detectadas - Truncado): '{cleaned_text[:100]}...'")
|
| 235 |
+
return cleaned_text.strip()
|
|
|
|
| 236 |
except Exception as e:
|
| 237 |
+
logger.error(f"Error generando guion con GPT-2: {str(e)}", exc_info=True)
|
| 238 |
logger.warning("Usando prompt original como guion debido al error de generación.")
|
| 239 |
return prompt.strip()
|
| 240 |
|
|
|
|
| 241 |
async def text_to_speech(text, output_path, voice):
|
| 242 |
logger.info(f"Convirtiendo texto a voz | Caracteres: {len(text)} | Voz: {voice} | Salida: {output_path}")
|
| 243 |
if not text or not text.strip():
|
| 244 |
logger.warning("Texto vacío para TTS")
|
| 245 |
return False
|
|
|
|
| 246 |
try:
|
| 247 |
communicate = edge_tts.Communicate(text, voice)
|
| 248 |
await communicate.save(output_path)
|
|
|
|
| 249 |
if os.path.exists(output_path) and os.path.getsize(output_path) > 100:
|
| 250 |
logger.info(f"Audio guardado exitosamente en: {output_path} | Tamaño: {os.path.getsize(output_path)} bytes")
|
| 251 |
return True
|
| 252 |
else:
|
| 253 |
logger.error(f"TTS guardó un archivo pequeño o vacío en: {output_path}")
|
| 254 |
return False
|
|
|
|
| 255 |
except Exception as e:
|
| 256 |
logger.error(f"Error en TTS con voz '{voice}': {str(e)}", exc_info=True)
|
| 257 |
return False
|
|
|
|
| 260 |
if not url:
|
| 261 |
logger.warning("URL de video no proporcionada para descargar")
|
| 262 |
return None
|
|
|
|
| 263 |
try:
|
| 264 |
logger.info(f"Descargando video desde: {url[:80]}...")
|
| 265 |
os.makedirs(temp_dir, exist_ok=True)
|
| 266 |
file_name = f"video_dl_{datetime.now().strftime('%Y%m%d_%H%M%S_%f')}.mp4"
|
| 267 |
output_path = os.path.join(temp_dir, file_name)
|
|
|
|
| 268 |
with requests.get(url, stream=True, timeout=60) as r:
|
| 269 |
r.raise_for_status()
|
| 270 |
with open(output_path, 'wb') as f:
|
| 271 |
for chunk in r.iter_content(chunk_size=8192):
|
| 272 |
f.write(chunk)
|
|
|
|
| 273 |
if os.path.exists(output_path) and os.path.getsize(output_path) > 1000:
|
| 274 |
+
logger.info(f"Video descargado exitosamente: {output_path} | Tamaño: {os.path.getsize(output_path)} bytes")
|
| 275 |
+
return output_path
|
| 276 |
else:
|
| 277 |
+
logger.warning(f"Descarga parece incompleta o vacía para {url[:80]}... Archivo: {output_path} Tamaño: {os.path.getsize(output_path) if os.path.exists(output_path) else 'N/A'} bytes")
|
| 278 |
+
if os.path.exists(output_path):
|
| 279 |
+
os.remove(output_path)
|
| 280 |
+
return None
|
|
|
|
| 281 |
except requests.exceptions.RequestException as e:
|
| 282 |
logger.error(f"Error de descarga para {url[:80]}... : {str(e)}")
|
| 283 |
except Exception as e:
|
| 284 |
logger.error(f"Error inesperado descargando {url[:80]}... : {str(e)}", exc_info=True)
|
|
|
|
| 285 |
return None
|
| 286 |
|
| 287 |
def loop_audio_to_length(audio_clip, target_duration):
|
| 288 |
logger.debug(f"Ajustando audio | Duración actual: {audio_clip.duration:.2f}s | Objetivo: {target_duration:.2f}s")
|
|
|
|
| 289 |
if audio_clip is None or audio_clip.duration is None or audio_clip.duration <= 0:
|
| 290 |
logger.warning("Input audio clip is invalid (None or zero duration), cannot loop.")
|
| 291 |
try:
|
| 292 |
sr = getattr(audio_clip, 'fps', 44100) if audio_clip else 44100
|
| 293 |
+
return AudioClip(lambda t: 0, duration=target_duration, fps=sr)
|
| 294 |
except Exception as e:
|
| 295 |
+
logger.error(f"Could not create silence clip: {e}", exc_info=True)
|
| 296 |
+
return AudioFileClip(filename="")
|
|
|
|
| 297 |
if audio_clip.duration >= target_duration:
|
| 298 |
logger.debug("Audio clip already longer or equal to target. Trimming.")
|
| 299 |
trimmed_clip = audio_clip.subclip(0, target_duration)
|
| 300 |
if trimmed_clip.duration is None or trimmed_clip.duration <= 0:
|
| 301 |
+
logger.error("Trimmed audio clip is invalid.")
|
| 302 |
+
try: trimmed_clip.close()
|
| 303 |
+
except: pass
|
| 304 |
+
return AudioFileClip(filename="")
|
| 305 |
return trimmed_clip
|
|
|
|
| 306 |
loops = math.ceil(target_duration / audio_clip.duration)
|
| 307 |
logger.debug(f"Creando {loops} loops de audio")
|
|
|
|
| 308 |
audio_segments = [audio_clip] * loops
|
| 309 |
looped_audio = None
|
| 310 |
final_looped_audio = None
|
| 311 |
try:
|
| 312 |
+
looped_audio = concatenate_audioclips(audio_segments)
|
| 313 |
+
if looped_audio.duration is None or looped_audio.duration <= 0:
|
|
|
|
| 314 |
logger.error("Concatenated audio clip is invalid (None or zero duration).")
|
| 315 |
raise ValueError("Invalid concatenated audio.")
|
| 316 |
+
final_looped_audio = looped_audio.subclip(0, target_duration)
|
| 317 |
+
if final_looped_audio.duration is None or final_looped_audio.duration <= 0:
|
|
|
|
|
|
|
| 318 |
logger.error("Final subclipped audio clip is invalid (None or zero duration).")
|
| 319 |
raise ValueError("Invalid final subclipped audio.")
|
| 320 |
+
return final_looped_audio
|
|
|
|
|
|
|
| 321 |
except Exception as e:
|
| 322 |
logger.error(f"Error concatenating/subclipping audio clips for looping: {str(e)}", exc_info=True)
|
| 323 |
try:
|
| 324 |
+
if audio_clip.duration is not None and audio_clip.duration > 0:
|
| 325 |
+
logger.warning("Returning original audio clip (may be too short).")
|
| 326 |
+
return audio_clip.subclip(0, min(audio_clip.duration, target_duration))
|
| 327 |
except:
|
| 328 |
+
pass
|
| 329 |
logger.error("Fallback to original audio clip failed.")
|
| 330 |
return AudioFileClip(filename="")
|
|
|
|
| 331 |
finally:
|
| 332 |
if looped_audio is not None and looped_audio is not final_looped_audio:
|
| 333 |
try: looped_audio.close()
|
| 334 |
except: pass
|
| 335 |
|
|
|
|
| 336 |
def extract_visual_keywords_from_script(script_text):
|
| 337 |
logger.info("Extrayendo palabras clave del guion")
|
| 338 |
if not script_text or not script_text.strip():
|
| 339 |
logger.warning("Guion vacío, no se pueden extraer palabras clave.")
|
| 340 |
+
return ["distopico", "dark", "terror", "ansiedad", "encuentros", "demonios", "siniestro",
|
| 341 |
+
"oscuro", "noche", "niebla", "abandonado", "miedo", "suspenso", "sombrio", "lluvia", "tormenta", "bosque", "cementerio",
|
| 342 |
+
"iglesia", "ruinas", "hospital", "escuela", "tunel", "puente", "carretera", "desierto", "pantano", "cueva", "paredes",
|
| 343 |
+
"ventanas rotas", "sombras", "silueta", "ojos", "susurros", "gritos", "corredor", "puerta cerrada", "escaleras",
|
| 344 |
+
"reloj parado", "matrix", "muñeca", "manchas", "sangre", "cadenas", "ritual", "velas", "libro antiguo",
|
| 345 |
+
"cruz invertida", "campanario", "campana", "nieve oscura", "cielo rojo", "luna llena", "animales muertos",
|
| 346 |
+
"cuervos", "arañas", "telarañas", "niebla densa", "luces parpadeando", "televisor estático", "radio interferencia",
|
| 347 |
+
"voz distorsionada", "figura encapuchada", "mascaras", "manos", "pies descalzos", "huellas", "ventana abierta",
|
| 348 |
"viento fuerte", "reloj de pared", "sotano"]
|
|
|
|
| 349 |
clean_text = re.sub(r'[^\w\sáéíóúñÁÉÍÓÚÑ]', '', script_text)
|
| 350 |
keywords_list = []
|
|
|
|
| 351 |
if kw_model:
|
| 352 |
try:
|
| 353 |
logger.debug("Intentando extracción con KeyBERT...")
|
| 354 |
keywords1 = kw_model.extract_keywords(clean_text, keyphrase_ngram_range=(1, 1), stop_words='spanish', top_n=5)
|
| 355 |
keywords2 = kw_model.extract_keywords(clean_text, keyphrase_ngram_range=(2, 2), stop_words='spanish', top_n=3)
|
|
|
|
| 356 |
all_keywords = keywords1 + keywords2
|
| 357 |
all_keywords.sort(key=lambda item: item[1], reverse=True)
|
|
|
|
| 358 |
seen_keywords = set()
|
| 359 |
for keyword, score in all_keywords:
|
| 360 |
formatted_keyword = keyword.lower().replace(" ", "+")
|
|
|
|
| 363 |
seen_keywords.add(formatted_keyword)
|
| 364 |
if len(keywords_list) >= 5:
|
| 365 |
break
|
|
|
|
| 366 |
if keywords_list:
|
| 367 |
logger.debug(f"Palabras clave extraídas por KeyBERT: {keywords_list}")
|
| 368 |
return keywords_list
|
|
|
|
| 369 |
except Exception as e:
|
| 370 |
logger.warning(f"KeyBERT falló: {str(e)}. Intentando método simple.")
|
|
|
|
| 371 |
logger.debug("Extrayendo palabras clave con método simple...")
|
| 372 |
words = clean_text.lower().split()
|
| 373 |
stop_words = {"el", "la", "los", "las", "de", "en", "y", "a", "que", "es", "un", "una", "con", "para", "del", "al", "por", "su", "sus", "se", "lo", "le", "me", "te", "nos", "os", "les", "mi", "tu",
|
| 374 |
"nuestro", "vuestro", "este", "ese", "aquel", "esta", "esa", "aquella", "esto", "eso", "aquello", "mis", "tus",
|
| 375 |
"nuestros", "vuestros", "estas", "esas", "aquellas", "si", "no", "más", "menos", "sin", "sobre", "bajo", "entre", "hasta", "desde", "durante", "mediante", "según", "versus", "via", "cada", "todo", "todos", "toda", "todas", "poco", "pocos", "poca", "pocas", "mucho", "muchos", "mucha", "muchas", "varios", "varias", "otro", "otros", "otra", "otras", "mismo", "misma", "mismos", "mismas", "tan", "tanto", "tanta", "tantos", "tantas", "tal", "tales", "cual", "cuales", "cuyo", "cuya", "cuyos", "cuyas", "quien", "quienes", "cuan", "cuanto", "cuanta", "cuantos", "cuantas", "como", "donde", "cuando", "porque", "aunque", "mientras", "siempre", "nunca", "jamás", "muy", "casi", "solo", "solamente", "incluso", "apenas", "quizás", "tal vez", "acaso", "claro", "cierto", "obvio", "evidentemente", "realmente", "simplemente", "generalmente", "especialmente", "principalmente", "posiblemente", "probablemente", "difícilmente", "fácilmente", "rápidamente", "lentamente", "bien", "mal", "mejor", "peor", "arriba", "abajo", "adelante", "atrás", "cerca", "lejos", "dentro", "fuera", "encima", "debajo", "frente", "detrás", "antes", "después", "luego", "pronto", "tarde", "todavía", "ya", "aun", "aún", "quizá"}
|
|
|
|
| 376 |
valid_words = [word for word in words if len(word) > 3 and word not in stop_words]
|
|
|
|
| 377 |
if not valid_words:
|
| 378 |
logger.warning("No se encontraron palabras clave válidas con método simple. Usando palabras clave predeterminadas.")
|
| 379 |
+
return ["espiritual", "terror", "matrix", "arcontes", "galaxia", "creepy", "magia", "gangstalking", "conspiracy"]
|
|
|
|
| 380 |
word_counts = Counter(valid_words)
|
| 381 |
top_keywords = [word.replace(" ", "+") for word, _ in word_counts.most_common(5)]
|
|
|
|
| 382 |
if not top_keywords:
|
| 383 |
+
logger.warning("El método simple no produjo keywords. Usando palabras clave predeterminadas.")
|
| 384 |
+
return ["espiritual", "terror", "matrix", "arcontes", "galaxia", "creepy", "magia", "gangstalking", "conspiracy"]
|
|
|
|
| 385 |
logger.info(f"Palabras clave finales: {top_keywords}")
|
| 386 |
return top_keywords
|
| 387 |
|
|
|
|
| 388 |
def crear_video(prompt_type, input_text, selected_voice, musica_file=None):
|
| 389 |
logger.info("=" * 80)
|
| 390 |
logger.info(f"INICIANDO CREACIÓN DE VIDEO | Tipo: {prompt_type}")
|
|
|
|
| 393 |
|
| 394 |
start_time = datetime.now()
|
| 395 |
temp_dir_intermediate = None
|
|
|
|
| 396 |
audio_tts_original = None
|
| 397 |
musica_audio_original = None
|
| 398 |
audio_tts = None
|
|
|
|
| 408 |
guion = generate_script(input_text)
|
| 409 |
else:
|
| 410 |
guion = input_text.strip()
|
|
|
|
| 411 |
logger.info(f"Guion final ({len(guion)} chars): '{guion[:100]}...'")
|
|
|
|
| 412 |
if not guion.strip():
|
| 413 |
logger.error("El guion resultante está vacío o solo contiene espacios.")
|
| 414 |
raise ValueError("El guion está vacío.")
|
|
|
|
| 417 |
logger.info(f"Directorio temporal intermedio creado: {temp_dir_intermediate}")
|
| 418 |
temp_intermediate_files = []
|
| 419 |
|
| 420 |
+
# 2. Generar audio de voz
|
| 421 |
logger.info("Generando audio de voz...")
|
| 422 |
voz_path = os.path.join(temp_dir_intermediate, "voz.mp3")
|
| 423 |
+
tts_voices_to_try = [selected_voice, "es-ES-JuanNeural", "es-ES-ElviraNeural"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 424 |
tts_success = False
|
| 425 |
tried_voices = set()
|
|
|
|
| 426 |
for current_voice in tts_voices_to_try:
|
| 427 |
if not current_voice or current_voice in tried_voices:
|
| 428 |
continue
|
| 429 |
tried_voices.add(current_voice)
|
|
|
|
| 430 |
logger.info(f"Intentando TTS con voz: {current_voice}...")
|
| 431 |
try:
|
| 432 |
tts_success = asyncio.run(text_to_speech(guion, voz_path, voice=current_voice))
|
|
|
|
| 434 |
logger.info(f"TTS exitoso con voz '{current_voice}'.")
|
| 435 |
break
|
| 436 |
except Exception as e:
|
| 437 |
+
logger.warning(f"Fallo al generar TTS con voz '{current_voice}': {str(e)}")
|
|
|
|
|
|
|
| 438 |
if not tts_success or not os.path.exists(voz_path) or os.path.getsize(voz_path) <= 100:
|
| 439 |
+
logger.error("Fallo en la generación de voz después de todos los intentos.")
|
| 440 |
raise ValueError("Error generando voz a partir del guion (fallo de TTS).")
|
|
|
|
| 441 |
temp_intermediate_files.append(voz_path)
|
|
|
|
| 442 |
audio_tts_original = AudioFileClip(voz_path)
|
| 443 |
+
if audio_tts_original is None or audio_tts_original.duration is None or audio_tts_original.duration <= 0:
|
| 444 |
+
logger.critical("Clip de audio TTS inicial es inválido.")
|
| 445 |
+
raise ValueError("Audio de voz generado es inválido.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 446 |
audio_tts = audio_tts_original
|
| 447 |
+
audio_duration = audio_tts.duration
|
| 448 |
logger.info(f"Duración audio voz: {audio_duration:.2f} segundos")
|
|
|
|
| 449 |
if audio_duration < 1.0:
|
| 450 |
logger.error(f"Duración audio voz ({audio_duration:.2f}s) es muy corta.")
|
| 451 |
raise ValueError("Generated voice audio is too short (min 1 second required).")
|
| 452 |
|
| 453 |
+
# 3. Extraer palabras clave para buscar videos
|
| 454 |
+
keywords = extract_visual_keywords_from_script(guion)
|
| 455 |
+
logger.info(f"Palabras clave para búsqueda de videos: {keywords}")
|
| 456 |
+
|
| 457 |
+
# 4. Buscar y descargar videos de Pexels
|
| 458 |
+
logger.info("Buscando y descargando videos...")
|
| 459 |
+
downloaded_videos = []
|
| 460 |
+
for keyword in keywords:
|
| 461 |
+
videos = buscar_videos_pexels(keyword, PEXELS_API_KEY, per_page=3)
|
| 462 |
+
for video in videos:
|
| 463 |
+
video_files = video.get('video_files', [])
|
| 464 |
+
if not video_files:
|
| 465 |
+
continue
|
| 466 |
+
# Priorizar videos en resolución media
|
| 467 |
+
selected_file = next((f for f in video_files if f['file_type'].startswith('video/mp4') and f['width'] >= 640 and f['width'] <= 1920), None)
|
| 468 |
+
if not selected_file:
|
| 469 |
+
continue
|
| 470 |
+
video_url = selected_file['link']
|
| 471 |
+
video_path = download_video_file(video_url, temp_dir_intermediate)
|
| 472 |
+
if video_path:
|
| 473 |
+
downloaded_videos.append(video_path)
|
| 474 |
+
temp_intermediate_files.append(video_path)
|
| 475 |
+
if not downloaded_videos:
|
| 476 |
+
logger.error("No se descargaron videos válidos de Pexels.")
|
| 477 |
+
raise ValueError("No se encontraron videos válidos para el guion.")
|
| 478 |
+
|
| 479 |
+
# 5. Procesar videos
|
| 480 |
+
logger.info(f"Procesando {len(downloaded_videos)} videos descargados...")
|
| 481 |
+
for video_path in downloaded_videos:
|
| 482 |
+
try:
|
| 483 |
+
clip = VideoFileClip(video_path)
|
| 484 |
+
if clip is None or clip.duration is None or clip.duration <= 0:
|
| 485 |
+
logger.warning(f"Clip inválido: {video_path}")
|
| 486 |
+
try: clip.close()
|
| 487 |
+
except: pass
|
| 488 |
+
continue
|
| 489 |
+
# Asegurar resolución consistente
|
| 490 |
+
clip = clip.resize((1280, 720)) # Estandarizar a 720p
|
| 491 |
+
source_clips.append(clip)
|
| 492 |
+
clips_to_concatenate.append(clip)
|
| 493 |
+
except Exception as e:
|
| 494 |
+
logger.warning(f"Error al cargar video {video_path}: {str(e)}")
|
| 495 |
+
continue
|
| 496 |
+
if not clips_to_concatenate:
|
| 497 |
+
logger.error("No se cargaron clips de video válidos para concatenar.")
|
| 498 |
+
raise ValueError("No se pudieron cargar clips de video válidos.")
|
| 499 |
+
|
| 500 |
+
# Ajustar duración de clips
|
| 501 |
+
clips_adjusted = []
|
| 502 |
+
total_video_duration = 0
|
| 503 |
+
clip_duration_target = audio_duration / len(clips_to_concatenate) if clips_to_concatenate else audio_duration
|
| 504 |
+
for clip in clips_to_concatenate:
|
| 505 |
+
try:
|
| 506 |
+
if clip.duration > clip_duration_target:
|
| 507 |
+
adjusted_clip = clip.subclip(0, clip_duration_target)
|
| 508 |
+
else:
|
| 509 |
+
adjusted_clip = clip
|
| 510 |
+
if adjusted_clip is None or adjusted_clip.duration is None or adjusted_clip.duration <= 0:
|
| 511 |
+
logger.warning("Clip ajustado tiene duración inválida.")
|
| 512 |
+
try: adjusted_clip.close()
|
| 513 |
+
except: pass
|
| 514 |
+
continue
|
| 515 |
+
clips_adjusted.append(adjusted_clip)
|
| 516 |
+
total_video_duration += adjusted_clip.duration
|
| 517 |
+
except Exception as e:
|
| 518 |
+
logger.warning(f"Error al ajustar clip: {str(e)}")
|
| 519 |
+
try: clip.close()
|
| 520 |
+
except: pass
|
| 521 |
+
continue
|
| 522 |
+
if not clips_adjusted:
|
| 523 |
+
logger.error("No hay clips ajustados válidos para concatenar.")
|
| 524 |
+
raise ValueError("No se pudieron ajustar clips de video.")
|
| 525 |
+
|
| 526 |
+
# Concatenar videos
|
| 527 |
+
logger.info("Concatenando clips de video...")
|
| 528 |
+
try:
|
| 529 |
+
video_base = concatenate_videoclips(clips_adjusted, method="compose")
|
| 530 |
+
if video_base is None or video_base.duration is None or video_base.duration <= 0:
|
| 531 |
+
logger.critical("Concatenación de videos resultó en un clip inválido.")
|
| 532 |
+
raise ValueError("La concatenación de videos falló.")
|
| 533 |
+
except Exception as e:
|
| 534 |
+
logger.error(f"Error al concatenar clips de video: {str(e)}")
|
| 535 |
+
raise ValueError("Fallo en la concatenación de videos.")
|
| 536 |
+
logger.info(f"Duración video concatenado: {video_base.duration:.2f} segundos")
|
| 537 |
+
|
| 538 |
+
# Ajustar duración del video base al audio
|
| 539 |
+
if video_base.duration < audio_duration:
|
| 540 |
+
logger.info("Video base más corto que audio, ajustando duración...")
|
| 541 |
+
try:
|
| 542 |
+
padding_duration = audio_duration - video_base.duration
|
| 543 |
+
black_clip = ColorClip(size=(1280, 720), color=(0, 0, 0), duration=padding_duration)
|
| 544 |
+
video_base = concatenate_videoclips([video_base, black_clip])
|
| 545 |
+
except Exception as e:
|
| 546 |
+
logger.error(f"Error al añadir padding al video: {str(e)}")
|
| 547 |
+
raise ValueError("Fallo al ajustar duración del video base.")
|
| 548 |
+
elif video_base.duration > audio_duration:
|
| 549 |
+
logger.info("Video base más largo que audio, recortando...")
|
| 550 |
+
try:
|
| 551 |
+
video_base = video_base.subclip(0, audio_duration)
|
| 552 |
+
except Exception as e:
|
| 553 |
+
logger.error(f"Error al recortar video base: {str(e)}")
|
| 554 |
+
raise ValueError("Fallo al recortar video base.")
|
| 555 |
+
logger.info(f"Duración final video base: {video_base.duration:.2f} segundos")
|
| 556 |
+
|
| 557 |
+
# Verificar video_base antes de set_audio
|
| 558 |
+
if video_base is None or video_base.duration is None or video_base.duration <= 0:
|
| 559 |
+
logger.critical("video_base es inválido antes de asignar audio.")
|
| 560 |
+
raise ValueError("El clip de video base es inválido.")
|
| 561 |
+
|
| 562 |
+
# 6. Manejar música de fondo
|
| 563 |
+
final_audio = audio_tts
|
| 564 |
+
if musica_file:
|
| 565 |
+
logger.info(f"Procesando música de fondo: {musica_file}")
|
| 566 |
+
try:
|
| 567 |
+
musica_audio_original = AudioFileClip(musica_file)
|
| 568 |
+
if musica_audio_original is None or musica_audio_original.duration is None or musica_audio_original.duration <= 0:
|
| 569 |
+
logger.warning("Archivo de música inválido.")
|
| 570 |
+
try: musica_audio_original.close()
|
| 571 |
+
except: pass
|
| 572 |
+
else:
|
| 573 |
+
musica_audio = loop_audio_to_length(musica_audio_original, audio_duration)
|
| 574 |
+
if musica_audio is None or musica_audio.duration is None or musica_audio.duration <= 0:
|
| 575 |
+
logger.warning("Música ajustada es inválida.")
|
| 576 |
+
try: musica_audio.close()
|
| 577 |
+
except: pass
|
| 578 |
+
else:
|
| 579 |
+
musica_audio = musica_audio.volumex(0.3)
|
| 580 |
+
final_audio = CompositeAudioClip([audio_tts, musica_audio])
|
| 581 |
+
if final_audio is None or final_audio.duration is None or final_audio.duration <= 0:
|
| 582 |
+
logger.error("Audio compuesto es inválido.")
|
| 583 |
+
try: final_audio.close()
|
| 584 |
+
except: pass
|
| 585 |
+
final_audio = audio_tts
|
| 586 |
+
except Exception as e:
|
| 587 |
+
logger.warning(f"Error al procesar música de fondo: {str(e)}")
|
| 588 |
+
final_audio = audio_tts
|
| 589 |
|
| 590 |
# 7. Crear video final
|
| 591 |
logger.info("Renderizando video final...")
|
| 592 |
+
try:
|
| 593 |
+
video_final = video_base.set_audio(final_audio)
|
| 594 |
+
if video_final is None or video_final.duration is None or video_final.duration <= 0:
|
| 595 |
+
logger.critical("Clip de video final (con audio) es inválido.")
|
| 596 |
+
raise ValueError("Clip de video final es inválido.")
|
| 597 |
+
except Exception as e:
|
| 598 |
+
logger.error(f"Error al asignar audio al video: {str(e)}")
|
| 599 |
+
raise ValueError("Fallo al crear video final con audio.")
|
| 600 |
|
| 601 |
output_filename = "final_video.mp4"
|
| 602 |
output_path = os.path.join(temp_dir_intermediate, output_filename)
|
| 603 |
logger.info(f"Escribiendo video final a: {output_path}")
|
| 604 |
+
try:
|
| 605 |
+
video_final.write_videofile(
|
| 606 |
+
output_path,
|
| 607 |
+
fps=24,
|
| 608 |
+
threads=4,
|
| 609 |
+
codec="libx264",
|
| 610 |
+
audio_codec="aac",
|
| 611 |
+
preset="medium",
|
| 612 |
+
logger='bar'
|
| 613 |
+
)
|
| 614 |
+
except Exception as e:
|
| 615 |
+
logger.critical(f"Error al escribir video final: {str(e)}")
|
| 616 |
+
raise ValueError("Fallo al escribir el video final.")
|
| 617 |
|
| 618 |
total_time = (datetime.now() - start_time).total_seconds()
|
| 619 |
logger.info(f"PROCESO DE VIDEO FINALIZADO | Output: {output_path} | Tiempo total: {total_time:.2f}s")
|
| 620 |
|
|
|
|
| 621 |
final_output_filename = "final_video.mp4"
|
| 622 |
final_output_path = os.path.join(os.getcwd(), final_output_filename)
|
|
|
|
| 623 |
if os.path.exists(final_output_path):
|
| 624 |
+
os.remove(final_output_path)
|
|
|
|
| 625 |
shutil.copy2(output_path, final_output_path)
|
| 626 |
logger.info(f"Video copiado a ruta accesible para Gradio: {final_output_path}")
|
| 627 |
+
output_path = final_output_path
|
| 628 |
|
| 629 |
return output_path
|
| 630 |
|
|
|
|
| 636 |
raise e
|
| 637 |
finally:
|
| 638 |
logger.info("Iniciando limpieza de clips y archivos temporales intermedios...")
|
| 639 |
+
for clip in source_clips:
|
| 640 |
+
try: clip.close()
|
| 641 |
+
except: pass
|
| 642 |
+
for clip in clips_to_concatenate:
|
| 643 |
+
if clip not in source_clips:
|
| 644 |
+
try: clip.close()
|
| 645 |
+
except: pass
|
| 646 |
+
if audio_tts_original:
|
| 647 |
+
try: audio_tts_original.close()
|
| 648 |
+
except: pass
|
| 649 |
+
if musica_audio_original:
|
| 650 |
+
try: musica_audio_original.close()
|
| 651 |
+
except: pass
|
| 652 |
+
if musica_audio:
|
| 653 |
+
try: musica_audio.close()
|
| 654 |
+
except: pass
|
| 655 |
+
if audio_tts and audio_tts != audio_tts_original:
|
| 656 |
+
try: audio_tts.close()
|
| 657 |
+
except: pass
|
| 658 |
+
if final_audio and final_audio != audio_tts:
|
| 659 |
+
try: final_audio.close()
|
| 660 |
+
except: pass
|
| 661 |
+
if video_base:
|
| 662 |
+
try: video_base.close()
|
| 663 |
+
except: pass
|
| 664 |
+
if video_final:
|
| 665 |
+
try: video_final.close()
|
| 666 |
+
except: pass
|
| 667 |
+
for temp_file in temp_intermediate_files:
|
| 668 |
+
try:
|
| 669 |
+
if os.path.exists(temp_file):
|
| 670 |
+
os.remove(temp_file)
|
| 671 |
+
except:
|
| 672 |
+
pass
|
| 673 |
+
if temp_dir_intermediate and os.path.exists(temp_dir_intermediate):
|
| 674 |
+
try:
|
| 675 |
+
shutil.rmtree(temp_dir_intermediate)
|
| 676 |
+
except:
|
| 677 |
+
pass
|
| 678 |
+
logger.info("Limpieza de recursos completada.")
|
| 679 |
|
| 680 |
+
def run_app(prompt_type, prompt_ia, prompt_manual, musica_file, selected_voice):
|
|
|
|
| 681 |
logger.info("="*80)
|
| 682 |
logger.info("SOLICITUD RECIBIDA EN INTERFAZ")
|
|
|
|
|
|
|
| 683 |
input_text = prompt_ia if prompt_type == "Generar Guion con IA" else prompt_manual
|
|
|
|
| 684 |
output_video = None
|
| 685 |
output_file = None
|
| 686 |
status_msg = gr.update(value="⏳ Procesando...", interactive=False)
|
|
|
|
| 687 |
if not input_text or not input_text.strip():
|
| 688 |
logger.warning("Texto de entrada vacío.")
|
|
|
|
| 689 |
return None, None, gr.update(value="⚠️ Por favor, ingresa texto para el guion o el tema.", interactive=False)
|
|
|
|
|
|
|
|
|
|
| 690 |
voice_ids_disponibles = [v[1] for v in AVAILABLE_VOICES]
|
| 691 |
if selected_voice not in voice_ids_disponibles:
|
| 692 |
+
logger.warning(f"Voz seleccionada inválida: '{selected_voice}'. Usando voz por defecto: {DEFAULT_VOICE_ID}.")
|
| 693 |
+
selected_voice = DEFAULT_VOICE_ID
|
| 694 |
else:
|
| 695 |
logger.info(f"Voz seleccionada validada: {selected_voice}")
|
|
|
|
|
|
|
| 696 |
logger.info(f"Tipo de entrada: {prompt_type}")
|
| 697 |
logger.debug(f"Texto de entrada: '{input_text[:100]}...'")
|
| 698 |
if musica_file:
|
| 699 |
logger.info(f"Archivo de música recibido: {musica_file}")
|
| 700 |
else:
|
| 701 |
logger.info("No se proporcionó archivo de música.")
|
| 702 |
+
logger.info(f"Voz final a usar (ID): {selected_voice}")
|
|
|
|
| 703 |
try:
|
| 704 |
logger.info("Llamando a crear_video...")
|
| 705 |
+
video_path = crear_video(prompt_type, input_text, selected_voice, musica_file)
|
|
|
|
|
|
|
| 706 |
if video_path and os.path.exists(video_path):
|
| 707 |
logger.info(f"crear_video retornó path: {video_path}")
|
| 708 |
logger.info(f"Tamaño del archivo de video retornado: {os.path.getsize(video_path)} bytes")
|
| 709 |
+
output_video = video_path
|
| 710 |
+
output_file = video_path
|
| 711 |
status_msg = gr.update(value="✅ Video generado exitosamente.", interactive=False)
|
| 712 |
else:
|
| 713 |
logger.error(f"crear_video no retornó un path válido o el archivo no existe: {video_path}")
|
| 714 |
status_msg = gr.update(value="❌ Error: La generación del video falló o el archivo no se creó correctamente.", interactive=False)
|
|
|
|
| 715 |
except ValueError as ve:
|
| 716 |
logger.warning(f"Error de validación durante la creación del video: {str(ve)}")
|
| 717 |
status_msg = gr.update(value=f"⚠️ Error de validación: {str(ve)}", interactive=False)
|
| 718 |
except Exception as e:
|
| 719 |
logger.critical(f"Error crítico durante la creación del video: {str(e)}", exc_info=True)
|
| 720 |
status_msg = gr.update(value=f"❌ Error inesperado: {str(e)}", interactive=False)
|
| 721 |
+
return output_video, output_file, status_msg
|
|
|
|
|
|
|
| 722 |
|
| 723 |
# Interfaz de Gradio
|
| 724 |
with gr.Blocks(title="Generador de Videos con IA", theme=gr.themes.Soft(), css="""
|
| 725 |
.gradio-container {max-width: 800px; margin: auto;}
|
| 726 |
h1 {text-align: center;}
|
| 727 |
""") as app:
|
|
|
|
| 728 |
gr.Markdown("# 🎬 Generador Automático de Videos con IA")
|
| 729 |
gr.Markdown("Genera videos cortos a partir de un tema o guion, usando imágenes de archivo de Pexels y voz generada.")
|
|
|
|
| 730 |
with gr.Row():
|
| 731 |
with gr.Column():
|
| 732 |
prompt_type = gr.Radio(
|
|
|
|
| 734 |
label="Método de Entrada",
|
| 735 |
value="Generar Guion con IA"
|
| 736 |
)
|
|
|
|
|
|
|
| 737 |
with gr.Column(visible=True) as ia_guion_column:
|
| 738 |
prompt_ia = gr.Textbox(
|
| 739 |
label="Tema para IA",
|
|
|
|
| 741 |
placeholder="Ej: Un paisaje natural con montañas y ríos al amanecer, mostrando la belleza de la naturaleza...",
|
| 742 |
max_lines=4,
|
| 743 |
value=""
|
|
|
|
| 744 |
)
|
|
|
|
| 745 |
with gr.Column(visible=False) as manual_guion_column:
|
| 746 |
prompt_manual = gr.Textbox(
|
| 747 |
label="Tu Guion Completo",
|
|
|
|
| 749 |
placeholder="Ej: En este video exploraremos los misterios del océano. Veremos la vida marina fascinante y los arrecifes de coral vibrantes. ¡Acompáñanos en esta aventura subacuática!",
|
| 750 |
max_lines=10,
|
| 751 |
value=""
|
|
|
|
| 752 |
)
|
|
|
|
| 753 |
musica_input = gr.Audio(
|
| 754 |
label="Música de fondo (opcional)",
|
| 755 |
type="filepath",
|
| 756 |
interactive=True,
|
| 757 |
value=None
|
|
|
|
| 758 |
)
|
|
|
|
|
|
|
| 759 |
voice_dropdown = gr.Dropdown(
|
| 760 |
label="Seleccionar Voz para Guion",
|
| 761 |
+
choices=AVAILABLE_VOICES,
|
| 762 |
+
value=DEFAULT_VOICE_ID,
|
| 763 |
interactive=True
|
|
|
|
| 764 |
)
|
|
|
|
|
|
|
|
|
|
| 765 |
generate_btn = gr.Button("✨ Generar Video", variant="primary")
|
|
|
|
| 766 |
with gr.Column():
|
| 767 |
video_output = gr.Video(
|
| 768 |
label="Previsualización del Video Generado",
|
| 769 |
interactive=False,
|
| 770 |
height=400
|
|
|
|
| 771 |
)
|
| 772 |
file_output = gr.File(
|
| 773 |
label="Descargar Archivo de Video",
|
| 774 |
interactive=False,
|
| 775 |
+
visible=False
|
|
|
|
| 776 |
)
|
| 777 |
status_output = gr.Textbox(
|
| 778 |
label="Estado",
|
|
|
|
| 780 |
show_label=False,
|
| 781 |
placeholder="Esperando acción...",
|
| 782 |
value="Esperando entrada..."
|
|
|
|
| 783 |
)
|
|
|
|
|
|
|
| 784 |
prompt_type.change(
|
| 785 |
lambda x: (gr.update(visible=x == "Generar Guion con IA"),
|
| 786 |
gr.update(visible=x == "Usar Mi Guion")),
|
| 787 |
inputs=prompt_type,
|
| 788 |
+
outputs=[ia_guion_column, manual_guion_column]
|
| 789 |
)
|
|
|
|
|
|
|
| 790 |
generate_btn.click(
|
|
|
|
| 791 |
lambda: (None, None, gr.update(value="⏳ Procesando... Esto puede tomar varios minutos.", interactive=False)),
|
| 792 |
outputs=[video_output, file_output, status_output],
|
| 793 |
).then(
|
|
|
|
| 794 |
run_app,
|
| 795 |
+
inputs=[prompt_type, prompt_ia, prompt_manual, musica_input, voice_dropdown],
|
|
|
|
|
|
|
| 796 |
outputs=[video_output, file_output, status_output],
|
| 797 |
+
queue=True
|
| 798 |
).then(
|
|
|
|
| 799 |
lambda video_path, file_path, status_msg: gr.update(visible=file_path is not None),
|
| 800 |
inputs=[video_output, file_output, status_output],
|
| 801 |
outputs=[file_output]
|
| 802 |
)
|
|
|
|
|
|
|
| 803 |
gr.Markdown("### Instrucciones:")
|
| 804 |
gr.Markdown("""
|
| 805 |
+
1. **Clave API de Pexels:** Asegúrate de haber configurado la variable de entorno `PEXELS_API_KEY` con tu clave.
|
| 806 |
+
2. **Selecciona el tipo de entrada**: "Generar Guion con IA" o "Usar Mi Guion".
|
| 807 |
+
3. **Sube música** (opcional): Selecciona un archivo de audio (MP3, WAV, etc.).
|
| 808 |
+
4. **Selecciona la voz** deseada del desplegable.
|
| 809 |
+
5. **Haz clic en "✨ Generar Video"**.
|
| 810 |
+
6. Espera a que se procese el video. Verás el estado.
|
| 811 |
+
7. La previsualización aparecerá si es posible, y siempre un enlace **Descargar Archivo de Video** se mostrará si la generación fue exitosa.
|
| 812 |
+
8. Revisa `video_generator_full.log` para detalles si hay errores.
|
| 813 |
""")
|
| 814 |
gr.Markdown("---")
|
| 815 |
gr.Markdown("Desarrollado por [Tu Nombre/Empresa/Alias - Opcional]")
|
|
|
|
| 824 |
logger.info("Clips base de MoviePy creados y cerrados exitosamente. FFmpeg parece accesible.")
|
| 825 |
except Exception as e:
|
| 826 |
logger.critical(f"Fallo al crear clip base de MoviePy. A menudo indica problemas con FFmpeg/ImageMagick. Error: {e}", exc_info=True)
|
|
|
|
| 827 |
except Exception as e:
|
| 828 |
+
logger.critical(f"Fallo al importar MoviePy. Asegúrate de que está instalado. Error: {e}", exc_info=True)
|
| 829 |
+
os.environ['GRADIO_SERVER_TIMEOUT'] = '6000'
|
|
|
|
|
|
|
|
|
|
| 830 |
logger.info("Iniciando aplicación Gradio...")
|
| 831 |
try:
|
| 832 |
app.queue(max_size=1).launch(server_name="0.0.0.0", server_port=7860, share=False)
|