File size: 10,070 Bytes
cfe7f69
1a1d765
 
 
 
3acffaa
1a1d765
f72bc8b
 
 
1a1d765
f72bc8b
03c54bf
f72bc8b
1a1d765
 
f72bc8b
 
 
 
 
 
 
 
 
 
 
 
 
 
1a1d765
 
 
 
 
 
 
 
 
 
2e080e9
1a1d765
 
03c54bf
 
 
 
 
 
 
 
 
 
 
 
1a1d765
 
03c54bf
1a1d765
03c54bf
 
1a1d765
 
73d3004
 
03c54bf
 
 
 
 
 
 
 
73d3004
 
 
 
 
03c54bf
fe07c2d
1a1d765
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
03c54bf
fe07c2d
 
 
 
 
 
 
03c54bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f72bc8b
 
fe07c2d
03c54bf
1a1d765
 
 
 
 
 
 
 
 
 
 
 
f72bc8b
1a1d765
 
 
 
f72bc8b
1a1d765
 
 
 
 
f72bc8b
1a1d765
 
 
 
 
 
f72bc8b
1a1d765
 
 
 
 
 
f72bc8b
1a1d765
 
 
 
 
 
f72bc8b
1a1d765
 
 
 
 
 
f72bc8b
1a1d765
 
f72bc8b
1a1d765
 
 
 
f72bc8b
1a1d765
 
 
03c54bf
1a1d765
 
03c54bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a1d765
 
f72bc8b
1a1d765
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
import spaces
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from datetime import datetime
import os


Title = """# Welcome to 🌟Tonic's 🌠Lucie-7B-Instruct Demo"""

description = """
🌠Lucie-7B-Instruct is a fine-tuned version of [Lucie-7B](https://huggingface.co./OpenLLM-France/Lucie-7B), an open-source, multilingual causal language model created by OpenLLM-France.

🌠Lucie-7B-Instruct is fine-tuned on synthetic instructions produced by ChatGPT and Gemma and a small set of customized prompts about OpenLLM and Lucie.
"""

training = """
## Training details

### Training data

Lucie-7B-Instruct is trained on the following datasets:
* [Alpaca-cleaned](https://huggingface.co./datasets/yahma/alpaca-cleaned) (English; 51604 samples)
* [Alpaca-cleaned-fr](https://huggingface.co./datasets/cmh/alpaca_data_cleaned_fr_52k) (French; 51655 samples)
* [Magpie-Gemma](https://huggingface.co./datasets/Magpie-Align/Magpie-Gemma2-Pro-200K-Filtered) (English; 195167 samples)
* [Wildchat](https://huggingface.co./datasets/allenai/WildChat-1M) (French subset; 26436 samples)
* Hard-coded prompts concerning OpenLLM and Lucie (based on [allenai/tulu-3-hard-coded-10x](https://huggingface.co./datasets/allenai/tulu-3-hard-coded-10x))
    * French: openllm_french.jsonl (24x10 samples)
    * English: openllm_english.jsonl (24x10 samples)"""

join_us = """
## Join us:
🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 
[![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/qdfnvSPcqP) 
On 🤗Huggingface: [MultiTransformer](https://huggingface.co./MultiTransformer) 
On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [Build Tonic](https://git.tonic-ai.com/contribute)
🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
"""

# Initialize model and tokenizer
model_id = "OpenLLM-France/Lucie-7B-Instruct"
device = "cuda" if torch.cuda.is_available() else "cpu"

# Get the token from environment variables
hf_token = os.getenv('READTOKEN')
if not hf_token:
    raise ValueError("Please set the READTOKEN environment variable")

# Initialize tokenizer and model with token authentication
tokenizer = AutoTokenizer.from_pretrained(
    model_id,
    token=hf_token,
    trust_remote_code=True
)

model = AutoModelForCausalLM.from_pretrained(
    model_id,
    token=hf_token,
    device_map="auto",
    torch_dtype=torch.bfloat16,
    trust_remote_code=True
)

config_json = model.config.to_dict()

def format_model_info(config):
    info = []
    important_keys = [
        "model_type", "vocab_size", "hidden_size", "num_attention_heads",
        "num_hidden_layers", "max_position_embeddings", "torch_dtype"
    ]
    for key in important_keys:
        if key in config:
            value = config[key]
            # Convert torch_dtype to string representation if it exists
            if key == "torch_dtype" and hasattr(value, "name"):
                value = value.name
            info.append(f"**{key}:** {value}")
    return "\n".join(info)
    
@spaces.GPU
def generate_response(system_prompt, user_prompt, temperature, max_new_tokens, top_p, repetition_penalty, top_k):
    # Construct the full prompt with system and user messages
    full_prompt = f"""<|system|>{system_prompt}</s>
<|user|>{user_prompt}</s>
<|assistant|>"""
    
    # Prepare the input prompt
    inputs = tokenizer(full_prompt, return_tensors="pt").to(device)
    
    # Generate response
    outputs = model.generate(
        **inputs,
        max_new_tokens=max_new_tokens,
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        pad_token_id=tokenizer.eos_token_id
    )
    
    # Decode and return the response
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    # Extract only the assistant's response
    return response.split("<|assistant|>")[-1].strip()

# Create the Gradio interface
with gr.Blocks() as demo:
    gr.Markdown(Title)

    with gr.Row():
        with gr.Column():
            gr.Markdown(description)
        with gr.Column():
            gr.Markdown(training)
            
    with gr.Row():
        with gr.Column():
            with gr.Group():
                gr.Markdown("### Model Configuration")
                gr.Markdown(format_model_info(config_json))
                    
        with gr.Column():
            with gr.Group():
                gr.Markdown("### Tokenizer Configuration")
                gr.Markdown(f"""
                    **Vocabulary Size:** {tokenizer.vocab_size}
                    **Model Max Length:** {tokenizer.model_max_length}
                    **Padding Token:** {tokenizer.pad_token}
                    **EOS Token:** {tokenizer.eos_token}
                    """)
    
    with gr.Row():
        with gr.Group():
            gr.Markdown(join_us)
    
    with gr.Row():
        with gr.Column():
            # System prompt
            system_prompt = gr.Textbox(
                label="Message Système",
                value="Tu es Lucie, une assistante IA française serviable et amicale. Tu réponds toujours en français de manière précise et utile. Tu es honnête et si tu ne sais pas quelque chose, tu le dis simplement.",
                lines=3
            )
            
            # User prompt
            user_prompt = gr.Textbox(
                label="🗣️Votre message",
                placeholder="Entrez votre texte ici...",
                lines=5
            )
            
            with gr.Accordion("🧪Paramètres avancés", open=False):
                temperature = gr.Slider(
                    minimum=0.1,
                    maximum=2.0,
                    value=0.7,
                    step=0.1,
                    label="🌡️Temperature"
                )
                max_new_tokens = gr.Slider(
                    minimum=1,
                    maximum=2048,
                    value=512,
                    step=1,
                    label="💶Longueur maximale"
                )
                top_p = gr.Slider(
                    minimum=0.1,
                    maximum=1.0,
                    value=0.9,
                    step=0.1,
                    label="🏅Top-p"
                )
                top_k = gr.Slider(
                    minimum=1,
                    maximum=100,
                    value=50,
                    step=1,
                    label="🏆Top-k"
                )
                repetition_penalty = gr.Slider(
                    minimum=1.0,
                    maximum=2.0,
                    value=1.2,
                    step=0.1,
                    label="🦜Pénalité de répétition"
                )
            
            generate_btn = gr.Button("🌠Générer")
        
        with gr.Column():
            # Output component
            output = gr.Textbox(
                label="🌠Lucie",
                lines=10
            )
    
    # Example prompts with all parameters
    gr.Examples(
        examples=[
            # Format: [system_prompt, user_prompt, temperature, max_tokens, top_p, rep_penalty, top_k]
            [
                "Tu es Lucie, une assistante IA française serviable et amicale.",
                "Bonjour! Comment vas-tu aujourd'hui?",
                0.7,  # temperature
                512,  # max_new_tokens
                0.9,  # top_p
                1.2,  # repetition_penalty
                50   # top_k
            ],
            [
                "Tu es une experte en intelligence artificielle.",
                "Peux-tu m'expliquer ce qu'est l'intelligence artificielle?",
                0.8,  # higher temperature for more creative explanation
                1024, # longer response
                0.95, # higher top_p for more diverse output
                1.1,  # lower repetition penalty
                40   # lower top_k for more focused output
            ],
            [
                "Tu es une poétesse française.",
                "Écris un court poème sur Paris.",
                0.9,  # higher temperature for more creativity
                256,  # shorter for poetry
                0.95, # higher top_p for more creative language
                1.3,  # higher repetition penalty for unique words
                60   # higher top_k for more varied vocabulary
            ],
            [
                "Tu es une experte en gastronomie française.",
                "Quels sont les plats traditionnels français les plus connus?",
                0.7,  # moderate temperature for factual response
                768,  # medium length
                0.9,  # balanced top_p
                1.2,  # standard repetition penalty
                50   # standard top_k
            ],
            [
                "Tu es une historienne spécialisée dans l'histoire de France.",
                "Explique-moi l'histoire de la Révolution française en quelques phrases.",
                0.6,  # lower temperature for more factual response
                1024, # longer for historical context
                0.85, # lower top_p for more focused output
                1.1,  # lower repetition penalty
                30   # lower top_k for more consistent output
            ]
        ],
        inputs=[
            system_prompt,
            user_prompt,
            temperature,
            max_new_tokens,
            top_p,
            repetition_penalty,
            top_k
        ],
        outputs=output,
        label="Exemples"
    )
    
    # Set up the generation event
    generate_btn.click(
        fn=generate_response,
        inputs=[system_prompt, user_prompt, temperature, max_new_tokens, top_p, repetition_penalty, top_k],
        outputs=output
    )

# Launch the demo
if __name__ == "__main__":
    demo.launch(ssr_mode=False)