Spaces:
Runtime error
Runtime error
File size: 7,199 Bytes
6bcba58 5103369 ab711c8 89e5c18 ab711c8 b53673e ab711c8 b53673e 6bcba58 54ac0bc 2932ae3 96ac3aa 6b02e11 6bcba58 6b02e11 0653671 6b02e11 0ce19af 6b02e11 454b0bf 5f667f4 a02ec5e 5f667f4 454b0bf 8861375 e17f0b6 6bcba58 b8a8a20 0ce19af b8a8a20 2932ae3 1836f0e 2932ae3 6bcba58 ab711c8 6bcba58 ab711c8 6bcba58 1836f0e 6bcba58 ab711c8 6bcba58 2932ae3 0ce19af 6bcba58 ab711c8 6bcba58 ab711c8 6bcba58 70f82e5 6bcba58 e17f0b6 6bcba58 70f82e5 0ce19af 6bcba58 ab711c8 6bcba58 0ce19af 6bcba58 31c147a 6bcba58 6b02e11 6bcba58 89e5c18 6bcba58 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
import gradio as gr
import os
import spaces
from transformers import GemmaTokenizer, AutoModelForCausalLM
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread
# Set an environment variable
HF_TOKEN = os.environ.get("HF_TOKEN", None)
# Lê as variáveis de ambiente para autenticação e compartilhamento
#auth_users = os.environ.get("GRADIO_AUTH_USERS")
#auth_passwords = os.environ.get("GRADIO_AUTH_PASSWORDS")
# Converte as strings de usuários e senhas em listas
#auth_users = [user.strip() for user in auth_users.split(",")]
#auth_passwords = [password.strip() for password in auth_passwords.split(",")]
# Cria um dicionário de autenticação
#auth_credentials = dict(zip(auth_users, auth_passwords))
DESCRIPTION = '''
<div>
<h1 style="text-align: center;">Meta Llama3 8B</h1>
<p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co./meta-llama/Meta-Llama-3-8B-Instruct"><b>Meta Llama3 8b Chat</b></a>. Meta Llama3 is the new open LLM and comes in two sizes: 8b and 70b. Feel free to play with it, or duplicate to run privately!</p>
<p>🔎 For more details about the Llama3 release and how to use the model with <code>transformers</code>, take a look <a href="https://huggingface.co./blog/llama3">at our blog post</a>.</p>
<p>🦕 Looking for an even more powerful model? Check out the <a href="https://huggingface.co./chat/"><b>Hugging Chat</b></a> integration for Meta Llama 3 70b</p>
</div>
'''
LICENSE = """
<p/>
---
CreativeWoks AI: Intelligence System for Advanced Dialogue and Organized Responses Assistance
"""
PLACEHOLDER = """
<div style="position: relative; text-align: center;">
<h1 style="font-size: 2.5em; margin-top: 20px;">CreativeWorks Ai</h1>
<img src="https://utfs.io/f/4c8a3309-2ac3-453b-8441-04e5c5a3ed0f-361e80.svg" style="width: 80%; max-width: 50%; height: auto; opacity: 0.55; position: absolute; top: 50%; left: 50%; transform: translate(-50%, -50%); z-index: 0;">
<div style="background-color: rgba(255, 255, 255, 0.8); /* Ajuste a opacidade do fundo do texto aqui */
font-size: 1.2em; text-align: center; max-width: 800px; margin: auto; position: relative; z-index: 1; padding: 20px;">
<p>Este espaço demonstra o modelo customizado para o português brasileiro <a href="https://huggingface.co./mistralai/Mistral-7B-v0.3"><b>Mistral-7B-v0.3</b></a>. O Mistral-7B-v0.3 Large Language Model (LLM) é uma versão do Mistral-7B-v0.2 com vocabulário expandido. A CreativeWorks modificou e afinou o modelo para que seja mais rápido e alcance desempenho comparável aos principais modelos de código aberto existentes 10 vezes maiores, incluindo diversas melhorias e otimização para raciocínio lógico, com foco em RAG (Recuperação Aumentada por Geração).</p>
<p>🔎 Para mais detalhes sobre o modelo e como utilizá-lo com <code>transformers</code>, dê uma olhada <a href="https://huggingface.co./CreativeWorksAi/CreativeWorks_Mistral_7b_Chat_V1">em nosso model card.</a>.</p>
<p>🦕 Procurando um modelo ainda mais poderoso? Confira a integração do <a href="https://huggingface.co./chat/"><b>Hugging Chat</b></a> para modelos maiores.</p>
</div>
</div>
"""
css = """
h1 {
text-align: center;
display: block;
}
#duplicate-button {
margin: auto;
color: white;
background: #1565c0;
border-radius: 100vh;
}
"""
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("CreativeWorksAi/CreativeWorks_Mistral_7b_Chat_V1")
model = AutoModelForCausalLM.from_pretrained("CreativeWorksAi/CreativeWorks_Mistral_7b_Chat_V1", token=HF_TOKEN, device_map="auto")
#model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto") # to("cuda:0")
terminators = [
tokenizer.eos_token_id,
tokenizer.convert_tokens_to_ids("</s>")
]
@spaces.GPU(duration=120)
def CreativeWorks_Mistral_7b_Chat_V1(message: str,
history: list,
temperature: float,
max_new_tokens: int
) -> str:
"""
Generate a streaming response using the Mistral model.
Args:
message (str): The input message.
history (list): The conversation history used by ChatInterface.
temperature (float): The temperature for generating the response.
max_new_tokens (int): The maximum number of new tokens to generate.
Returns:
str: The generated response.
"""
conversation = []
for user, assistant in history:
conversation.extend([{"from": "human", "value": user}, {"from": "assistant", "value": assistant}])
conversation.append({"from": "human", "value": message})
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
input_ids=input_ids,
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
temperature=temperature,
eos_token_id=terminators,
pad_token_id=tokenizer.eos_token_id
)
# This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash.
if temperature == 0:
generate_kwargs['do_sample'] = False
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
for text in streamer:
# Remove the unwanted prefix if present
text = text.replace("<|im_start|>assistant", " ")
outputs.append(text)
yield "".join(outputs)
# Gradio block
chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='CreativeWorks Ai')
with gr.Blocks(fill_height=True, css=css) as demo:
#gr.Markdown(DESCRIPTION)
#gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
gr.ChatInterface(
fn=CreativeWorks_Mistral_7b_Chat_V1,
chatbot=chatbot,
fill_height=True,
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
additional_inputs=[
gr.Slider(minimum=0,
maximum=1,
step=0.1,
value=0.95,
label="Temperature",
render=False),
gr.Slider(minimum=256,
maximum=8192,
step=1,
value=512,
label="Max new tokens",
render=False ),
],
examples=[
['How to setup a human base on Mars? Give short answer.'],
['Explain theory of relativity to me like I’m 8 years old.'],
['What is 9,000 * 9,000?'],
['Write a pun-filled happy birthday message to my friend Alex.'],
['Justify why a penguin might make a good king of the jungle.']
],
cache_examples=False,
)
gr.Markdown(LICENSE)
if __name__ == "__main__":
demo.launch(auth=("teste", "teste@teste"), share=True)
|