Spaces:
Sleeping
Sleeping
File size: 1,072 Bytes
bb3dda2 209a482 e6e9f4b 4ce373c bb3dda2 4ce373c bb3dda2 e6e9f4b 209a482 4ce373c 209a482 aead082 209a482 4ce373c 940c5d5 bb3dda2 940c5d5 209a482 aead082 0fa7c56 209a482 aead082 4ce373c 209a482 4ce373c 209a482 f0aa58c bb3dda2 209a482 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
from transformers import pipeline
import gradio as gr
# ๋น ๋ฅธ ํ๊ตญ์ด GPT-2 ๋ชจ๋ธ ๋ก๋
generator = pipeline(
"text-generation",
model="skt/kogpt2-base-v2",
tokenizer="skt/kogpt2-base-v2"
)
def answer_question(prompt):
system_prompt = (
"๋๋ ํ๊ตญ ์
์์ ๋ณด๋ฅผ ์๋ ค์ฃผ๋ AI์ผ. "
"์ง๋ฌธ์ ๋ง๊ฒ ๊ฐ๋จ๋ช
๋ฃํ๊ฒ ์ค๋ช
ํด์ค.\n\n"
)
response = generator(
system_prompt + prompt,
max_new_tokens=150,
temperature=0.7,
top_p=0.9,
do_sample=True
)
return response[0]["generated_text"].replace(system_prompt, "").strip()
app = gr.Interface(
fn=answer_question,
inputs=gr.Textbox(
lines=2,
label="์
์ ์ง๋ฌธ ์
๋ ฅ",
placeholder="์: ์๋ฅ ์ผ์ / ํ๊ตญ๊ณตํ๋ ๋
ผ์ ์ ํ / ๊ฐ์ฒ๋ ๋ชจ์ง์ธ์"
),
outputs=gr.Textbox(label="AI ๋ต๋ณ"),
title="์
์์ ๋ณด AI (๋น ๋ฅธ ๋ฒ์ )",
description="CPU์์๋ ๋น ๋ฅด๊ฒ ์๋ํ๋ ์
์์ ๋ฌธ ์ฑ๋ด์
๋๋ค."
)
if __name__ == "__main__":
app.launch()
|