|
|
import gradio as gr |
|
|
import torch |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
import spaces , os |
|
|
from quickmt import Translator |
|
|
from quickmt.hub import hf_download, hf_list |
|
|
from pathlib import Path |
|
|
|
|
|
|
|
|
MODEL_ID = "bmiller22000/xyntrai-mistral-2.5-7b-chat-nsfw" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
t = None |
|
|
|
|
|
model_name_or_path = "tencent/Hunyuan-MT-7B" |
|
|
print("Loading model... This may take a few minutes.") |
|
|
|
|
|
tokenizer_trans = AutoTokenizer.from_pretrained(model_name_or_path) |
|
|
model_trans = AutoModelForCausalLM.from_pretrained( |
|
|
model_name_or_path, |
|
|
torch_dtype=torch.bfloat16, |
|
|
device_map="auto" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
@spaces.GPU(duration=60) |
|
|
def translate_text(text, lang=None, progress=gr.Progress(track_tqdm=True)): |
|
|
|
|
|
if lang is None: |
|
|
return text |
|
|
system_message = "You are a helpful AI assistant." |
|
|
max_tokens = 1024 |
|
|
temperature = 0.1 |
|
|
top_p = 0.05 |
|
|
|
|
|
|
|
|
messages = [] |
|
|
message = "Translate to "+lang+": "+text |
|
|
print("message : "+message) |
|
|
|
|
|
if system_message: |
|
|
messages.append({"role": "system", "content": system_message}) |
|
|
|
|
|
|
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
|
|
|
|
|
tokenized_chat = tokenizer_trans.apply_chat_template( |
|
|
messages, |
|
|
tokenize=True, |
|
|
add_generation_prompt=True, |
|
|
return_tensors="pt" |
|
|
) |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model_trans.generate( |
|
|
tokenized_chat.to(model.device), |
|
|
max_new_tokens=max_tokens, |
|
|
temperature=temperature, |
|
|
top_p=top_p, |
|
|
do_sample=True if temperature > 0 else False, |
|
|
pad_token_id=tokenizer_trans.eos_token_id |
|
|
) |
|
|
|
|
|
|
|
|
response = tokenizer_trans.decode(outputs[0][tokenized_chat.shape[-1]:], skip_special_tokens=True) |
|
|
|
|
|
return response |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True) |
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
MODEL_ID, |
|
|
torch_dtype=torch.float16, |
|
|
device_map="auto", |
|
|
trust_remote_code=True |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@spaces.GPU(duration=60) |
|
|
def chat_with_model(prompt, system_prompt, chatbot_display, internal_history,lang,gender,progress=gr.Progress(track_tqdm=True)): |
|
|
""" |
|
|
Hàm này nhận prompt mới, system_prompt, lịch sử hiển thị (của gr.Chatbot) |
|
|
và lịch sử nội bộ (của gr.State). |
|
|
""" |
|
|
expected_key = os.environ.get("hf_key") |
|
|
if expected_key not in prompt: |
|
|
print("❌ Invalid key.") |
|
|
return "", chatbot_display, internal_history |
|
|
prompt = prompt.replace(expected_key, "") |
|
|
isAuto = False |
|
|
if "[AUTO]" in prompt: |
|
|
prompt = prompt.replace("[AUTO]", "") |
|
|
isAuto = True |
|
|
else: |
|
|
if lang != None: |
|
|
prompt = translate_text(prompt,"English") |
|
|
|
|
|
prompt = prompt +" [Detailed description of the physical actions and expressions.]" |
|
|
print("prompt : "+prompt) |
|
|
|
|
|
|
|
|
if chatbot_display is None: |
|
|
chatbot_display = [] |
|
|
|
|
|
if internal_history is None: |
|
|
internal_history = [] |
|
|
|
|
|
|
|
|
|
|
|
messages_for_model = [{"role": "system", "content": system_prompt}] |
|
|
|
|
|
|
|
|
messages_for_model.extend(internal_history) |
|
|
|
|
|
|
|
|
messages_for_model.append({"role": "user", "content": prompt}) |
|
|
|
|
|
|
|
|
inputs = tokenizer.apply_chat_template( |
|
|
messages_for_model, |
|
|
tokenize=True, |
|
|
add_generation_prompt=True, |
|
|
return_tensors="pt" |
|
|
).to(model.device) |
|
|
|
|
|
|
|
|
output_tokens = model.generate( |
|
|
inputs, |
|
|
max_new_tokens=5120, |
|
|
do_sample=True, |
|
|
temperature=0.7, |
|
|
top_p=0.9 |
|
|
) |
|
|
|
|
|
|
|
|
response_text = tokenizer.decode(output_tokens[0][inputs.shape[-1]:], skip_special_tokens=True) |
|
|
print("response_text : "+response_text) |
|
|
translated = response_text |
|
|
if lang != None : |
|
|
if gender : |
|
|
translated = translate_text(response_text+"; Mr.",lang) |
|
|
else: |
|
|
translated = translate_text(response_text +"; Ms.",lang) |
|
|
|
|
|
print("translated : "+translated) |
|
|
|
|
|
internal_history.append({"role": "user", "content": prompt}) |
|
|
internal_history.append({"role": "assistant", "content": response_text}) |
|
|
|
|
|
|
|
|
chatbot_display.append([prompt, translated]) |
|
|
|
|
|
|
|
|
|
|
|
return "", chatbot_display, internal_history, response_text, prompt |
|
|
|
|
|
def clear_chat(): |
|
|
"""Xóa lịch sử.""" |
|
|
return None, None |
|
|
|
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Monochrome()) as demo: |
|
|
|
|
|
internal_history = gr.State() |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=3): |
|
|
|
|
|
chatbot_display = gr.Chatbot( |
|
|
label="Chat History", |
|
|
bubble_full_width=False, |
|
|
height=500 |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
lang = gr.Textbox( |
|
|
label="lang", |
|
|
placeholder="Nhập tin nhắn của bạn....", |
|
|
lines=1 |
|
|
) |
|
|
|
|
|
prompt_box = gr.Textbox( |
|
|
label="Your Message", |
|
|
placeholder="Nhập tin nhắn của bạn và nhấn Enter...", |
|
|
lines=1 |
|
|
) |
|
|
|
|
|
gender = gr.Checkbox( |
|
|
label="Gender", |
|
|
value=True, |
|
|
interactive=True |
|
|
) |
|
|
|
|
|
prompt = gr.Textbox( |
|
|
label="", |
|
|
placeholder="", |
|
|
lines=1 |
|
|
) |
|
|
|
|
|
response = gr.Textbox( |
|
|
label="", |
|
|
placeholder="", |
|
|
lines=1 |
|
|
) |
|
|
|
|
|
text_translate = gr.Textbox( |
|
|
label="", |
|
|
placeholder="", |
|
|
lines=1 |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
clear_button = gr.Button("Clear Chat") |
|
|
|
|
|
submit_button = gr.Button("Send") |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
|
|
|
system_prompt_box = gr.Textbox( |
|
|
label="System Prompt (AI's Role & Rules)", |
|
|
value="", |
|
|
lines=30 |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompt_box.submit( |
|
|
fn=chat_with_model, |
|
|
inputs=[prompt_box, system_prompt_box, chatbot_display, internal_history,lang,gender], |
|
|
outputs=[prompt_box, chatbot_display, internal_history, response, prompt] |
|
|
) |
|
|
|
|
|
text_translate.submit( |
|
|
fn=translate_text, |
|
|
inputs=[text_translate,lang], |
|
|
outputs=[prompt] |
|
|
) |
|
|
|
|
|
|
|
|
submit_button.click( |
|
|
fn=chat_with_model, |
|
|
inputs=[prompt_box, system_prompt_box, chatbot_display, internal_history,lang,gender], |
|
|
outputs=[prompt_box, chatbot_display, internal_history, response, prompt] |
|
|
) |
|
|
|
|
|
|
|
|
clear_button.click( |
|
|
fn=clear_chat, |
|
|
inputs=None, |
|
|
outputs=[chatbot_display, internal_history] |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |