File size: 2,374 Bytes
17cf727
 
 
50eaa70
17cf727
 
50eaa70
 
 
 
 
 
17cf727
50eaa70
17cf727
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50eaa70
17cf727
 
50eaa70
17cf727
 
 
 
 
50eaa70
17cf727
50eaa70
 
17cf727
 
 
 
 
50eaa70
17cf727
50eaa70
 
17cf727
 
 
 
 
50eaa70
17cf727
 
50eaa70
17cf727
 
 
 
 
 
 
 
 
50eaa70
f41bbc3
 
 
50eaa70
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
from huggingface_hub import InferenceClient
import gradio as gr

client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")

def format_prompt(message, history):
    prompt = "<s>"
    for user_prompt, bot_response in history:
        prompt += f"[INST] {user_prompt} [/INST]"
        prompt += f" {bot_response}</s> "
    prompt += f"[INST] {message} [/INST]"
    return prompt

def generate(prompt, history, temperature=1.0, max_new_tokens=1048, top_p=1.0, repetition_penalty=1.0):
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )

    formatted_prompt = format_prompt(prompt, history)

    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for response in stream:
        output += response.token.text
        yield output
    return output

additional_inputs=[
    gr.Slider(
        label="Temperature",
        value=1.0,
        minimum=0.0,
        maximum=1.0,
        step=0.01,
        interactive=True,
        info="Higher values produce more diverse outputs",
    ),
    gr.Slider(
        label="Max new tokens",
        value=1048,
        minimum=0,
        maximum=2048,
        step=128,
        interactive=True,
        info="The maximum numbers of new tokens",
    ),
    gr.Slider(
        label="Top-p (nucleus sampling)",
        value=1.0,
        minimum=0.0,
        maximum=1.0,
        step=0.01,
        interactive=True,
        info="Higher values sample more low-probability tokens",
    ),
    gr.Slider(
        label="Repetition penalty",
        value=1.0,
        minimum=1.0,
        maximum=2.0,
        step=0.01,
        interactive=True,
        info="Penalize repeated tokens",
    )
]

gr.ChatInterface(
    fn=generate,
    chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
    additional_inputs=additional_inputs,
    title="Mistral 7B v0.3"
).launch(show_api=False)

gr.load("models/ehristoforu/dalle-3-xl-v2").launch()
gr.load("models/microsoft/Phi-3-mini-4k-instruct").launch()