File size: 1,072 Bytes
bb3dda2
209a482
e6e9f4b
4ce373c
bb3dda2
 
4ce373c
 
bb3dda2
e6e9f4b
209a482
 
4ce373c
 
209a482
aead082
209a482
4ce373c
940c5d5
bb3dda2
940c5d5
209a482
aead082
0fa7c56
209a482
 
 
 
aead082
4ce373c
209a482
 
4ce373c
 
209a482
f0aa58c
bb3dda2
209a482
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from transformers import pipeline
import gradio as gr

# ๋น ๋ฅธ ํ•œ๊ตญ์–ด GPT-2 ๋ชจ๋ธ ๋กœ๋“œ
generator = pipeline(
    "text-generation",
    model="skt/kogpt2-base-v2",
    tokenizer="skt/kogpt2-base-v2"
)

def answer_question(prompt):
    system_prompt = (
        "๋„ˆ๋Š” ํ•œ๊ตญ ์ž…์‹œ์ •๋ณด๋ฅผ ์•Œ๋ ค์ฃผ๋Š” AI์•ผ. "
        "์งˆ๋ฌธ์— ๋งž๊ฒŒ ๊ฐ„๋‹จ๋ช…๋ฃŒํ•˜๊ฒŒ ์„ค๋ช…ํ•ด์ค˜.\n\n"
    )
    response = generator(
        system_prompt + prompt,
        max_new_tokens=150,
        temperature=0.7,
        top_p=0.9,
        do_sample=True
    )
    return response[0]["generated_text"].replace(system_prompt, "").strip()

app = gr.Interface(
    fn=answer_question,
    inputs=gr.Textbox(
        lines=2,
        label="์ž…์‹œ ์งˆ๋ฌธ ์ž…๋ ฅ",
        placeholder="์˜ˆ: ์ˆ˜๋Šฅ ์ผ์ • / ํ•œ๊ตญ๊ณตํ•™๋Œ€ ๋…ผ์ˆ ์ „ํ˜• / ๊ฐ€์ฒœ๋Œ€ ๋ชจ์ง‘์ธ์›"
    ),
    outputs=gr.Textbox(label="AI ๋‹ต๋ณ€"),
    title="์ž…์‹œ์ •๋ณด AI (๋น ๋ฅธ ๋ฒ„์ „)",
    description="CPU์—์„œ๋„ ๋น ๋ฅด๊ฒŒ ์ž‘๋™ํ•˜๋Š” ์ž…์‹œ์ „๋ฌธ ์ฑ—๋ด‡์ž…๋‹ˆ๋‹ค."
)

if __name__ == "__main__":
    app.launch()