File size: 1,593 Bytes
401f1d2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
from transformers import GPT2LMHeadModel, GPT2Tokenizer
def generate_diary(emotion, num_samples=1, max_length=100, temperature=0.7):
# ๊ฐ์ ์ ๊ธฐ๋ฐ์ผ๋ก ์ผ๊ธฐ๋ฅผ ์์ฑํ ํ ํฌ๋์ด์ ์ ๋ชจ๋ธ ๋ถ๋ฌ์ค๊ธฐ
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2LMHeadModel.from_pretrained("gpt2")
# ๊ฐ์ ์ ๋ฐ๋ผ prefix ๋ฌธ์ฅ ์์ฑ
if emotion == "happy":
prefix = "์ค๋์ ๊ธฐ๋ถ์ด ์ข์์. "
elif emotion == "sad":
prefix = "์ฌํ ๊ธฐ๋ถ์ด์์. "
elif emotion == "angry":
prefix = "ํ๊ฐ ์น๋ฐ์ด ์ค๋ฅด๋ ๊ธฐ๋ถ์ด์์. "
else:
prefix = "์ค๋์ ๊ธฐ๋ถ์ด ์ด์ํด์. "
# prefix๋ฅผ ํ ํฌ๋์ด์งํ์ฌ ์
๋ ฅ ์ํ์ค ์์ฑ
input_sequence = tokenizer.encode(prefix, return_tensors="pt")
# ๋ชจ๋ธ์ ์ฌ์ฉํ์ฌ ํ
์คํธ ์์ฑ
output = model.generate(
input_sequence,
max_length=max_length,
num_return_sequences=num_samples,
temperature=temperature,
pad_token_id=tokenizer.eos_token_id
)
# ์์ฑ๋ ์ผ๊ธฐ ๋ฐํ
return [tokenizer.decode(output_sequence, skip_special_tokens=True) for output_sequence in output]
def main():
# ์ฌ์ฉ์๋ก๋ถํฐ ๊ฐ์ ์
๋ ฅ ๋ฐ๊ธฐ
emotion = input("์ค๋์ ๊ฐ์ ์ ์
๋ ฅํ์ธ์ (happy, sad, angry ๋ฑ): ")
# ์ผ๊ธฐ ์์ฑ
diary_entries = generate_diary(emotion)
# ์์ฑ๋ ์ผ๊ธฐ ์ถ๋ ฅ
print("์ค๋์ ์ผ๊ธฐ:")
for i, entry in enumerate(diary_entries, start=1):
print(f"{i}. {entry}")
if __name__ == "__main__":
main()
|