|
import fastapi |
|
import json |
|
import markdown |
|
import uvicorn |
|
from fastapi.responses import HTMLResponse |
|
from fastapi.middleware.cors import CORSMiddleware |
|
from sse_starlette.sse import EventSourceResponse |
|
from ctransformers import AutoModelForCausalLM |
|
from pydantic import BaseModel |
|
|
|
llm = AutoModelForCausalLM.from_pretrained("TheBloke/WizardCoder-15B-1.0-GGML", |
|
model_file="WizardCoder-15B-1.0.ggmlv3.q4_0.bin", |
|
model_type="starcoder") |
|
app = fastapi.FastAPI(title="WizardCoder") |
|
app.add_middleware( |
|
CORSMiddleware, |
|
allow_origins=["*"], |
|
allow_credentials=True, |
|
allow_methods=["*"], |
|
allow_headers=["*"], |
|
) |
|
|
|
@app.get("/") |
|
async def index(): |
|
with open("README.md", "r", encoding="utf-8") as readme_file: |
|
md_template_string = readme_file.read() |
|
html_content = markdown.markdown(md_template_string) |
|
return HTMLResponse(content=html_content, status_code=200) |
|
|
|
class ChatCompletionRequest(BaseModel): |
|
prompt: str |
|
|
|
@app.post("/v1/chat/completions") |
|
async def chat(request: ChatCompletionRequest, response_mode=None): |
|
tokens = llm.tokenize(prompt) |
|
async def server_sent_events(chat_chunks, llm): |
|
yield prompt |
|
for chat_chunk in llm.generate(chat_chunks): |
|
yield llm.detokenize(chat_chunk) |
|
yield "" |
|
|
|
return EventSourceResponse(server_sent_events(tokens, llm)) |
|
|
|
if __name__ == "__main__": |
|
uvicorn.run(app, host="0.0.0.0", port=8000) |