|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import gradio as gr |
|
|
import yaml |
|
|
import logging |
|
|
import os |
|
|
import sys |
|
|
import time |
|
|
from PIL import Image |
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - [%(name)s:%(funcName)s] - %(message)s') |
|
|
logger = logging.getLogger(__name__) |
|
|
cinematic_theme = gr.themes.Base(primary_hue=gr.themes.colors.purple).set(body_background_fill="#111827", body_text_color="#D1D5DB", button_primary_background_fill="linear-gradient(90deg, #6D28D9, #4F46E5)", block_background_fill="#1F2937", block_border_width="1px", block_border_color="#374151") |
|
|
|
|
|
|
|
|
try: |
|
|
import aduc_framework |
|
|
|
|
|
from aduc_framework.types import PreProductionParams, ProductionParams |
|
|
|
|
|
with open("config.yaml", 'r') as f: |
|
|
config = yaml.safe_load(f) |
|
|
WORKSPACE_DIR = config['application']['workspace_dir'] |
|
|
|
|
|
|
|
|
aduc = aduc_framework.create_aduc_instance(workspace_root=WORKSPACE_DIR) |
|
|
|
|
|
logger.info("Framework ADUC e interface Gradio inicializados com sucesso.") |
|
|
|
|
|
except Exception as e: |
|
|
logger.critical(f"ERRO CRÍTICO NA INICIALIZAÇÃO DO ADUC FRAMEWORK: {e}", exc_info=True) |
|
|
with gr.Blocks(theme=cinematic_theme) as demo_error: |
|
|
gr.Markdown("# ERRO CRÍTICO NA INICIALIZAÇÃO") |
|
|
gr.Markdown("Não foi possível iniciar o Aduc Framework. A aplicação não pode continuar. Verifique os logs.") |
|
|
gr.Textbox(value=str(e), label="Detalhes do Erro", lines=10) |
|
|
demo_error.launch() |
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
|
|
|
def chat_beautifier(role): |
|
|
|
|
|
if "Composer2D" in role: return "Arquiteto Narrativo" |
|
|
if "Neura_Link" in role: return "Interface Neural" |
|
|
if "Planner5D" in role: return "Diretor de Set" |
|
|
return "Sistema" |
|
|
|
|
|
def process_chat_stream(generator, initial_chat_history=[]): |
|
|
""" |
|
|
Processa um gerador que produz objetos GenerationState e formata |
|
|
o histórico de chat e outros dados para a UI. |
|
|
""" |
|
|
chatbot_display_history = initial_chat_history.copy() |
|
|
fully_displayed_message_count = len(initial_chat_history) |
|
|
|
|
|
for dna_state in generator: |
|
|
|
|
|
|
|
|
backend_chat_history = dna_state.chat_history |
|
|
|
|
|
gallery_images = [] |
|
|
if dna_state.storyboard_producao: |
|
|
for scene in dna_state.storyboard_producao: |
|
|
gallery_images.extend([kf.caminho_pixel for kf in scene.keyframes]) |
|
|
elif dna_state.scenes: |
|
|
for scene in dna_state.scenes: |
|
|
gallery_images.extend([kf.caminho_pixel for kf in scene.keyframes]) |
|
|
|
|
|
final_video_path = dna_state.caminho_filme_final_bruto or dna_state.caminho_filme_final_masterizado |
|
|
dna_json = dna_state.model_dump() |
|
|
|
|
|
|
|
|
|
|
|
while len(backend_chat_history) > fully_displayed_message_count: |
|
|
new_message_obj = backend_chat_history[fully_displayed_message_count] |
|
|
role = chat_beautifier(new_message_obj.get('role', 'Sistema')) |
|
|
content_to_type = new_message_obj.get('content', '') |
|
|
is_ai_message = "Sistema" not in role |
|
|
chatbot_display_history.append({"role": "assistant" if is_ai_message else "user", "content": ""}) |
|
|
full_typed_message = "" |
|
|
for char in f"**{role}:** {content_to_type}": |
|
|
full_typed_message += char |
|
|
chatbot_display_history[-1]["content"] = full_typed_message + "▌" |
|
|
yield chatbot_display_history, gr.update(value=gallery_images), gr.update(value=dna_json), gr.update(value=final_video_path) |
|
|
time.sleep(0.005) |
|
|
chatbot_display_history[-1]["content"] = full_typed_message |
|
|
fully_displayed_message_count += 1 |
|
|
|
|
|
|
|
|
yield chatbot_display_history, gr.update(value=gallery_images), gr.update(value=dna_json), gr.update(value=final_video_path) |
|
|
|
|
|
def run_story_and_keyframes_wrapper(project_name, prompt, num_scenes, ref_files, duration_per_fragment): |
|
|
""" |
|
|
Este wrapper inicia o pipeline de PRÉ-PRODUÇÃO V2. |
|
|
""" |
|
|
if not project_name or not project_name.strip(): |
|
|
raise gr.Error("Por favor, forneça um nome para o projeto.") |
|
|
aduc.load_project(project_name.strip()) |
|
|
|
|
|
if not ref_files: raise gr.Error("Por favor, forneça pelo menos uma imagem de referência.") |
|
|
ref_paths = [aduc.process_image_for_story(f.name, f"ref_{i}.jpg") for i, f in enumerate(ref_files)] |
|
|
|
|
|
params = PreProductionParams( |
|
|
prompt=prompt, |
|
|
num_scenes=int(num_scenes), |
|
|
ref_paths=ref_paths, |
|
|
duration_per_fragment=duration_per_fragment |
|
|
) |
|
|
|
|
|
|
|
|
generator = aduc.task_run_story_and_keyframes(params) |
|
|
|
|
|
final_state_json = {} |
|
|
for update in process_chat_stream(generator): |
|
|
yield update |
|
|
if update[2] is not gr.skip(): |
|
|
final_state_json = update[2] |
|
|
|
|
|
return final_state_json |
|
|
|
|
|
def run_production_wrapper(project_name, state_dict, trim, handler, dest, guidance, stg, steps): |
|
|
""" |
|
|
Este wrapper inicia o pipeline de PRODUÇÃO V2. |
|
|
""" |
|
|
if not project_name or not project_name.strip(): |
|
|
raise gr.Error("O nome do projeto parece ter sido perdido.") |
|
|
aduc.load_project(project_name.strip()) |
|
|
|
|
|
params = ProductionParams(trim_percent=int(trim), handler_strength=handler, destination_convergence_strength=dest, guidance_scale=guidance, stg_scale=stg, inference_steps=int(steps)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prod_generator = aduc.task_produce_movie(params) |
|
|
|
|
|
|
|
|
initial_chat = state_dict.get("chat_history", []) |
|
|
|
|
|
final_state_json = {} |
|
|
for update in process_chat_stream(prod_generator, initial_chat_history=initial_chat): |
|
|
yield update |
|
|
if update[2] is not gr.skip(): |
|
|
final_state_json = update[2] |
|
|
|
|
|
return final_state_json |
|
|
|
|
|
|
|
|
with gr.Blocks(theme=cinematic_theme, css="style.css") as demo: |
|
|
generation_state_holder = gr.State({}) |
|
|
gr.Markdown("<h1>ADUC-SDR 🎬 - O Diretor de Cinema IA</h1>") |
|
|
|
|
|
with gr.Accordion("Configurações do Projeto", open=True): |
|
|
project_name_input = gr.Textbox(label="Nome do Projeto", value="Meu_Filme_01", info="O progresso será salvo em uma pasta com este nome.") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=2): |
|
|
with gr.Accordion("Etapa 1: Pré-Produção (Roteiro e Storyboard)", open=True): |
|
|
prompt_input = gr.Textbox(label="Ideia Geral do Filme", value="Um robô solitário explora as ruínas de uma cidade coberta pela natureza.") |
|
|
ref_image_input = gr.File(label="Imagens de Referência", file_count="multiple", file_types=["image"]) |
|
|
with gr.Row(): |
|
|
num_scenes_slider = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="Número de Cenas") |
|
|
duration_per_fragment_slider = gr.Slider(label="Duração de cada Ato (s)", minimum=2.0, maximum=10.0, value=5.0, step=0.1) |
|
|
start_pre_prod_button = gr.Button("1. Gerar Roteiro e Storyboard", variant="primary") |
|
|
|
|
|
with gr.Accordion("Etapa 2: Produção do Vídeo", open=False, visible=False) as step2_accordion: |
|
|
trim_percent_slider = gr.Slider(minimum=10, maximum=90, value=50, step=5, label="Poda Causal (%)") |
|
|
handler_strength_slider = gr.Slider(label="Força do Déjà-Vu", minimum=0.0, maximum=1.0, value=0.5, step=0.05) |
|
|
dest_strength_slider = gr.Slider(label="Força da Âncora Final", minimum=0.0, maximum=1.0, value=0.75, step=0.05) |
|
|
guidance_scale_slider = gr.Slider(minimum=1.0, maximum=10.0, value=2.0, step=0.1, label="Escala de Orientação") |
|
|
stg_scale_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.025, step=0.005, label="Escala STG") |
|
|
inference_steps_slider = gr.Slider(minimum=10, maximum=50, value=20, step=1, label="Passos de Inferência") |
|
|
produce_original_button = gr.Button("2. Produzir Vídeo", variant="primary", interactive=False) |
|
|
|
|
|
with gr.Column(scale=3): |
|
|
final_video_output = gr.Video(label="Último Clipe Gerado / Filme Final", interactive=False) |
|
|
chat_history_chatbot = gr.Chatbot(label="Diário da Produção", height=600, type='messages') |
|
|
keyframe_gallery = gr.Gallery(label="Keyframes Gerados", object_fit="contain", height="auto") |
|
|
|
|
|
with gr.Accordion("🧬 DNA Digital (Estado do Projeto)", open=False): |
|
|
dna_display = gr.JSON() |
|
|
|
|
|
|
|
|
start_pre_prod_button.click( |
|
|
fn=lambda: gr.update(interactive=False), |
|
|
outputs=[start_pre_prod_button] |
|
|
).then( |
|
|
fn=run_story_and_keyframes_wrapper, |
|
|
inputs=[project_name_input, prompt_input, num_scenes_slider, ref_image_input, duration_per_fragment_slider], |
|
|
outputs=[chat_history_chatbot, keyframe_gallery, dna_display, final_video_output] |
|
|
).then( |
|
|
fn=lambda data: (data, gr.update(visible=True, open=True), gr.update(interactive=True)), |
|
|
inputs=dna_display, |
|
|
outputs=[generation_state_holder, step2_accordion, produce_original_button] |
|
|
) |
|
|
|
|
|
produce_original_button.click( |
|
|
fn=lambda: gr.update(interactive=False), |
|
|
outputs=[produce_original_button] |
|
|
).then( |
|
|
fn=run_production_wrapper, |
|
|
inputs=[ |
|
|
project_name_input, |
|
|
generation_state_holder, |
|
|
trim_percent_slider, |
|
|
handler_strength_slider, |
|
|
dest_strength_slider, |
|
|
guidance_scale_slider, |
|
|
stg_scale_slider, |
|
|
inference_steps_slider |
|
|
], |
|
|
outputs=[chat_history_chatbot, keyframe_gallery, dna_display, final_video_output] |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
os.makedirs(WORKSPACE_DIR, exist_ok=True) |
|
|
logger.info("Aplicação Gradio pronta. Lançando interface...") |
|
|
demo.queue().launch() |