Spaces:
Runtime error
Runtime error
from pydantic import BaseModel | |
from llama_cpp import Llama | |
from concurrent.futures import ThreadPoolExecutor, as_completed | |
import re | |
import httpx | |
from spaces import GPU | |
import asyncio | |
global_data = { | |
'models': {}, | |
'tokens': { | |
'eos': 'eos_token', | |
'pad': 'pad_token', | |
'padding': 'padding_token', | |
'unk': 'unk_token', | |
'bos': 'bos_token', | |
'sep': 'sep_token', | |
'cls': 'cls_token', | |
'mask': 'mask_token' | |
} | |
} | |
model_configs = [ | |
{"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf", "name": "GPT-2 XL"}, | |
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-8B Instruct"}, | |
{"repo_id": "Ffftdtd5dtft/gemma-2-9b-it-Q2_K-GGUF", "filename": "gemma-2-9b-it-q2_k.gguf", "name": "Gemma 2-9B IT"}, | |
{"repo_id": "Ffftdtd5dtft/gemma-2-27b-Q2_K-GGUF", "filename": "gemma-2-27b-q2_k.gguf", "name": "Gemma 2-27B"}, | |
{"repo_id": "Ffftdtd5dtft/Phi-3-mini-128k-instruct-Q2_K-GGUF", "filename": "phi-3-mini-128k-instruct-q2_k.gguf", "name": "Phi-3 Mini 128K Instruct"}, | |
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-q2_k.gguf", "name": "Meta Llama 3.1-8B"}, | |
{"repo_id": "Ffftdtd5dtft/Qwen2-7B-Instruct-Q2_K-GGUF", "filename": "qwen2-7b-instruct-q2_k.gguf", "name": "Qwen2 7B Instruct"}, | |
{"repo_id": "Ffftdtd5dtft/starcoder2-3b-Q2_K-GGUF", "filename": "starcoder2-3b-q2_k.gguf", "name": "Starcoder2 3B"}, | |
{"repo_id": "Ffftdtd5dtft/Qwen2-1.5B-Instruct-Q2_K-GGUF", "filename": "qwen2-1.5b-instruct-q2_k.gguf", "name": "Qwen2 1.5B Instruct"}, | |
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-70B-Q2_K-GGUF", "filename": "meta-llama-3.1-70b-q2_k.gguf", "name": "Meta Llama 3.1-70B"}, | |
{"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Instruct-2407-Q2_K-GGUF", "filename": "mistral-nemo-instruct-2407-q2_k.gguf", "name": "Mistral Nemo Instruct 2407"}, | |
{"repo_id": "Ffftdtd5dtft/Hermes-3-Llama-3.1-8B-IQ1_S-GGUF", "filename": "hermes-3-llama-3.1-8b-iq1_s-imat.gguf", "name": "Hermes 3 Llama 3.1-8B"}, | |
{"repo_id": "Ffftdtd5dtft/Phi-3.5-mini-instruct-Q2_K-GGUF", "filename": "phi-3.5-mini-instruct-q2_k.gguf", "name": "Phi 3.5 Mini Instruct"}, | |
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-70B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-70b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-70B Instruct"}, | |
{"repo_id": "Ffftdtd5dtft/codegemma-2b-IQ1_S-GGUF", "filename": "codegemma-2b-iq1_s-imat.gguf", "name": "Codegemma 2B"}, | |
{"repo_id": "Ffftdtd5dtft/Phi-3-mini-128k-instruct-IQ2_XXS-GGUF", "filename": "phi-3-mini-128k-instruct-iq2_xxs-imat.gguf", "name": "Phi 3 Mini 128K Instruct XXS"}, | |
{"repo_id": "Ffftdtd5dtft/TinyLlama-1.1B-Chat-v1.0-IQ1_S-GGUF", "filename": "tinyllama-1.1b-chat-v1.0-iq1_s-imat.gguf", "name": "TinyLlama 1.1B Chat"}, | |
{"repo_id": "Ffftdtd5dtft/Mistral-NeMo-Minitron-8B-Base-IQ1_S-GGUF", "filename": "mistral-nemo-minitron-8b-base-iq1_s-imat.gguf", "name": "Mistral NeMo Minitron 8B Base"}, | |
{"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Instruct-2407-Q2_K-GGUF", "filename": "mistral-nemo-instruct-2407-q2_k.gguf", "name": "Mistral Nemo Instruct 2407"} | |
] | |
class ModelManager: | |
def __init__(self): | |
self.models = {} | |
def load_model(self, model_config): | |
if model_config['name'] not in self.models: | |
try: | |
self.models[model_config['name']] = Llama.from_pretrained(repo_id=model_config['repo_id'], filename=model_config['filename']) | |
except Exception as e: | |
print(f"Error loading model {model_config['name']}: {e}") | |
def load_all_models(self): | |
with ThreadPoolExecutor() as executor: | |
for config in model_configs: | |
executor.submit(self.load_model, config) | |
return self.models | |
model_manager = ModelManager() | |
global_data['models'] = model_manager.load_all_models() | |
class ChatRequest(BaseModel): | |
message: str | |
def normalize_input(input_text): | |
return input_text.strip() | |
def remove_duplicates(text): | |
text = re.sub(r'(Hello there, how are you\? \[/INST\]){2,}', 'Hello there, how are you? [/INST]', text) | |
text = re.sub(r'(How are you\? \[/INST\]){2,}', 'How are you? [/INST]', text) | |
text = text.replace('[/INST]', '') | |
lines = text.split('\n') | |
unique_lines = [] | |
seen_lines = set() | |
for line in lines: | |
if line not in seen_lines: | |
unique_lines.append(line) | |
seen_lines.add(line) | |
return '\n'.join(unique_lines) | |
def generate_model_response(model, inputs): | |
try: | |
response = model(inputs) | |
return remove_duplicates(response['choices'][0]['text']) | |
except Exception as e: | |
print(f"Error generating model response: {e}") | |
return "" | |
async def handle_request(request): | |
if request.method == "POST" and request.url.path == "/generate": | |
try: | |
chat_request = ChatRequest(**request.json()) | |
inputs = normalize_input(chat_request.message) | |
with ThreadPoolExecutor() as executor: | |
futures = [ | |
executor.submit(generate_model_response, model, inputs) | |
for model in global_data['models'].values() | |
] | |
responses = [{'model': model_name, 'response': future.result()} for model_name, future in zip(global_data['models'].keys(), as_completed(futures))] | |
unique_responses = remove_repetitive_responses(responses) | |
return httpx.Response(status_code=200, json=unique_responses) | |
except Exception as e: | |
print(f"Error handling request: {e}") | |
return httpx.Response(status_code=500, json={"error": f"Error handling request: {e}"}) | |
else: | |
return httpx.Response(status_code=404, text="Not Found") | |
async def run_server(port: int): | |
async def serve_request(request: httpx.Request) -> httpx.Response: | |
return await handle_request(request) | |
from uvicorn.config import Config | |
from uvicorn.main import Server | |
config = Config(app=serve_request, host="127.0.0.1", port=port, log_level="info") | |
server = Server(config=config) | |
await server.serve() | |
def remove_repetitive_responses(responses): | |
unique_responses = {} | |
for response in responses: | |
if response['model'] not in unique_responses: | |
unique_responses[response['model']] = response['response'] | |
return unique_responses | |
if __name__ == "__main__": | |
port = 7860 | |
asyncio.run(run_server(port)) |