File size: 2,211 Bytes
bda145e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForCausalLM
import torch
import torch.nn.functional as F

# ๊ฐ์ • ๋ถ„์„์šฉ ๋ชจ๋ธ
emotion_model = AutoModelForSequenceClassification.from_pretrained("beomi/KcELECTRA-base", num_labels=3)
emotion_tokenizer = AutoTokenizer.from_pretrained("beomi/KcELECTRA-base")
emotion_labels = ['๋ถ€์ •', '์ค‘๋ฆฝ', '๊ธ์ •']

# ํ…์ŠคํŠธ ์ƒ์„ฑ์šฉ GPT ๋ชจ๋ธ
gpt_model = AutoModelForCausalLM.from_pretrained("skt/kogpt2-base-v2")
gpt_tokenizer = AutoTokenizer.from_pretrained("skt/kogpt2-base-v2")

# ๊ฐ์ • ๋ถ„์„ ํ•จ์ˆ˜
def predict_emotion(text):
    inputs = emotion_tokenizer(text, return_tensors="pt", truncation=True, padding=True)
    with torch.no_grad():
        outputs = emotion_model(**inputs)
        probs = F.softmax(outputs.logits, dim=1)
        pred = torch.argmax(probs, dim=1).item()
    return emotion_labels[pred]

# GPT ์ด์–ด์“ฐ๊ธฐ ํ•จ์ˆ˜
def emotional_gpt(user_input):
    emotion = predict_emotion(user_input)

    if emotion == "๊ธ์ •":
        prompt = "๊ธฐ๋ถ„ ์ข‹์€ ํ•˜๋ฃจ์˜€๋‹ค. "
    elif emotion == "๋ถ€์ •":
        prompt = "์šฐ์šธํ•œ ๊ธฐ๋ถ„์œผ๋กœ ์‹œ์ž‘๋œ ํ•˜๋ฃจ, "
    else:
        prompt = "ํ‰๋ฒ”ํ•œ ํ•˜๋ฃจ๊ฐ€ ์‹œ์ž‘๋˜์—ˆ๋‹ค. "
    
    prompt += user_input

    input_ids = gpt_tokenizer.encode(prompt, return_tensors="pt")
    output = gpt_model.generate(input_ids, max_length=150, do_sample=True, temperature=0.8, top_k=50)
    result = gpt_tokenizer.decode(output[0], skip_special_tokens=True)

    return f"๐Ÿง  ๊ฐ์ • ๋ถ„์„ ๊ฒฐ๊ณผ: {emotion}\n\nโœ๏ธ GPT๊ฐ€ ์ด์–ด ์“ด ๊ธ€:\n{result}"

# Gradio ์ธํ„ฐํŽ˜์ด์Šค ๊ตฌ์„ฑ
gr.Interface(
    fn=emotional_gpt,
    inputs=gr.Textbox(lines=3, label="โœ๏ธ ๊ฐ์ •์„ ๋‹ด์€ ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”!", placeholder="์˜ˆ: ์˜ค๋Š˜ ๋„ˆ๋ฌด ์™ธ๋กœ์› ์–ด"),
    outputs="text",
    title="๐ŸŽญ ๊ฐ์ •ํ˜• GPT ํ•œ๊ธ€ ์ž‘๋ฌธ AI",
    description="๐Ÿง  ๊ฐ์ •์„ ๋จผ์ € ํŒŒ์•…ํ•˜๊ณ  โœจ ๊ทธ ๊ฐ์ •์— ์–ด์šธ๋ฆฌ๋Š” ๋ฌธ์žฅ์„ ์ด์–ด์„œ ์ž‘์„ฑํ•ด์ค๋‹ˆ๋‹ค!",
    theme="soft",
    examples=[
        ["๊ธฐ๋ถ„์ด ๋„ˆ๋ฌด ์ข‹์•˜์–ด"],
        ["์ง„์งœ ์™ธ๋กญ๊ณ  ํž˜๋“  ํ•˜๋ฃจ์˜€์–ด"],
        ["ํšŒ์˜๊ฐ€ ๊ทธ๋ƒฅ ๊ทธ๋žฌ์–ด"]
    ]
).launch()