import gradio as gr from huggingface_hub import InferenceClient import os HF_TOKEN = os.getenv('HF_TOKEN') client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=HF_TOKEN) def respond( message, history: list[tuple[str, str]], code: str, ): messages = [{"role": "system", "content": "Tu es un assistant appelé Fabrice"}] print(code) for val in history: if val[0]: messages.append({"role": "user", "content": val[0] + ' \n' + code}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message + ' \n' + code}) response = "" for message in client.chat_completion( messages, max_tokens=512, stream=True, temperature=0.7, top_p=0.4, ): token = message.choices[0].delta.content response += token yield response with gr.Blocks(analytics_enabled=True) as demo: code = gr.Code(language="python") gr.ChatInterface(respond, additional_inputs=code) demo.launch()