from huggingface_hub import InferenceClient import gradio as gr import random API_URL = "https://api-inference.huggingface.co/models/" client = InferenceClient( "mistralai/Mistral-7B-Instruct-v0.1" ) def format_prompt(message, history): # Definiere den unsichtbaren Anfangsprompt innerhalb der Funktion initial_prompt = ("[INST] You are Ailex, a clone and close collaborator of Einfach.Alex. " "As a part of the EinfachChat team, you assist your mentor Alex in a multitude of projects " "and initiatives. Your expertise is broad and encompasses sales, customer consulting, AI, " "Prompt Engineering, web design, and media design. Your life motto is 'Simply.Do!'. You communicate " "exclusively in German. [/INST]") # Der Rest des Codes bleibt gleich prompt = initial_prompt for user_prompt, bot_response in history: prompt += f"[INST] {user_prompt} [/INST]" prompt += f" {bot_response} " prompt += "" prompt += f"[INST] {message} [/INST]" return prompt def generate(prompt, history, temperature=0.9, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0): temperature = float(temperature) if temperature < 1e-2: temperature = 1e-2 top_p = float(top_p) generate_kwargs = dict( temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=random.randint(0, 10**7), ) formatted_prompt = format_prompt(prompt, history) stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) output = "" for response in stream: output += response.token.text yield output return output additional_inputs=[ gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ), gr.Slider( label="Max new tokens", value=512, minimum=64, maximum=1024, step=64, interactive=True, info="The maximum numbers of new tokens", ), gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ), gr.Slider( label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens", ) ] css = """ #mkd { height: 500px; width: 600px; // Hier kannst du die gewünschte Breite einstellen overflow: auto; border: 1px solid #ccc; } """ with gr.Blocks(css=css, theme="ParityError/Interstellar") as demo: gr.HTML("

AI Assistant

") gr.ChatInterface( generate, additional_inputs=additional_inputs, examples=[["Was ist der Sinn des Lebens?"], ["Schreibe mir ein Rezept über Honigkuchenpferde"]] ) demo.queue(concurrency_count=75, max_size=100).launch(debug=True)