File size: 3,110 Bytes
88d9c40
 
 
a328dd2
4e8ec3f
711aa3f
4e8ec3f
 
dce9e22
 
 
4e8ec3f
66f9010
dce9e22
 
a328dd2
dce9e22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88d9c40
 
 
 
4e8ec3f
88d9c40
 
dce9e22
88d9c40
 
4e8ec3f
88d9c40
 
 
 
 
dce9e22
4e8ec3f
dce9e22
 
 
4e8ec3f
dce9e22
 
88d9c40
dce9e22
 
 
88d9c40
 
dce9e22
 
 
 
4e8ec3f
dce9e22
 
 
4e8ec3f
 
 
 
 
 
 
 
 
 
88d9c40
dce9e22
88d9c40
4e8ec3f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import gradio as gr
import json
from datetime import datetime
from theme import TufteInspired
import glob
import os
import uuid
from pathlib import Path
from huggingface_hub import InferenceClient
from openai import OpenAI
from huggingface_hub import get_token
from huggingface_hub import CommitScheduler, hf_hub_download, login

from prompts import detailed_genre_description_prompt, basic_prompt
import random

# TODOs
# 1. Add a login button
# 2. Prompt library expand
# 3. log user if logged in


client = OpenAI(
    base_url="https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct/v1",
    api_key=get_token(),
)


def generate_prompt():
    if random.choice([True, False]):
        return detailed_genre_description_prompt()
    else:
        return basic_prompt()


def generate_blurb():
    max_tokens = random.randint(100, 1000)
    prompt = generate_prompt()
    print(prompt)
    chat_completion = client.chat.completions.create(
        model="tgi",
        messages=[
            {"role": "user", "content": prompt},
        ],
        stream=True,
        max_tokens=max_tokens,
    )
    full_text = ""
    for message in chat_completion:
        full_text += message.choices[0].delta.content
        yield full_text


# Function to log blurb and vote
def log_blurb_and_vote(blurb, vote):
    log_entry = {"timestamp": datetime.now().isoformat(), "blurb": blurb, "vote": vote}
    with open("blurb_log.jsonl", "a") as f:
        f.write(json.dumps(log_entry) + "\n")
        gr.Info("Thank you for voting!")
    return f"Logged: {vote}"


# Create custom theme
tufte_theme = TufteInspired()

# Create Gradio interface
with gr.Blocks(theme=tufte_theme) as demo:
    gr.Markdown("<h1 style='text-align: center;'>Would you read this book?</h1>")
    gr.Markdown(
        """<p style='text-align: center;'>Looking for your next summer read? 
    Would you read a book based on this LLM generated blurb? <br> Your vote will be added to <a href="https://example.com">this</a> Hugging Face dataset</p>"""
        + """"""
    )
    # gr.LoginButton(size="sm")
    user_name = gr.Textbox(label="User Name", placeholder="Enter your name")
    with gr.Row():
        generate_btn = gr.Button("Create a book", variant="primary")
    blurb_output = gr.Markdown(label="Book blurb")
    with gr.Row(visible=False) as voting_row:
        upvote_btn = gr.Button("πŸ‘ would read")
        downvote_btn = gr.Button("πŸ‘Ž wouldn't read")
    vote_output = gr.Textbox(label="Vote Status", interactive=False, visible=False)

    def show_voting_buttons(blurb):
        return blurb, gr.Row(visible=True)

    generate_btn.click(generate_blurb, outputs=blurb_output).then(
        show_voting_buttons, inputs=blurb_output, outputs=[blurb_output, voting_row]
    )
    upvote_btn.click(
        lambda x: log_blurb_and_vote(x, "upvote"),
        inputs=blurb_output,
        outputs=vote_output,
    )
    downvote_btn.click(
        lambda x: log_blurb_and_vote(x, "downvote"),
        inputs=blurb_output,
        outputs=vote_output,
    )


if __name__ == "__main__":
    demo.launch()