Loonly / LLM /Linly-api-fast.py
thepianist9's picture
Upload folder using huggingface_hub
8d0209c verified
from fastapi import FastAPI, Request
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
import uvicorn
import json
import datetime
import torch
from configs import model_path, api_port
# 设置设备参数
DEVICE = "cuda" # 使用CUDA
DEVICE_ID = "0" # CUDA设备ID,如果未设置则为空
CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if DEVICE_ID else DEVICE # 组合CUDA设备信息
# 清理GPU内存函数
def torch_gc():
if torch.cuda.is_available(): # 检查是否可用CUDA
with torch.cuda.device(CUDA_DEVICE): # 指定CUDA设备
torch.cuda.empty_cache() # 清空CUDA缓存
torch.cuda.ipc_collect() # 收集CUDA内存碎片
# 创建FastAPI应用
app = FastAPI()
# 处理POST请求的端点
@app.post("/")
async def create_item(request: Request):
global model, tokenizer # 声明全局变量以便在函数内部使用模型和分词器
json_post_raw = await request.json() # 获取POST请求的JSON数据
json_post = json.dumps(json_post_raw) # 将JSON数据转换为字符串
json_post_list = json.loads(json_post) # 将字符串转换为Python对象
prompt = json_post_list.get('prompt') # 获取请求中的提示
history = json_post_list.get('history') # 获取请求中的历史记录
max_length = json_post_list.get('max_length') # 获取请求中的最大长度
top_p = json_post_list.get('top_p') # 获取请求中的top_p参数
temperature = json_post_list.get('temperature') # 获取请求中的温度参数
# 调用模型进行对话生成
prompt = f"请用少于25个字回答以下问题 ### Instruction:{prompt} ### Response:"
inputs = tokenizer(prompt, return_tensors="pt").to("cuda:0")
generate_ids = model.generate(inputs.input_ids,
max_new_tokens=max_length if max_length else 2048,
do_sample=True,
top_k=20,
top_p=top_p,
temperature=temperature if temperature else 0.84,
repetition_penalty=1.15, eos_token_id=2, bos_token_id=1,pad_token_id=0)
response = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
response = response.split("### Response:")[-1]
now = datetime.datetime.now() # 获取当前时间
time = now.strftime("%Y-%m-%d %H:%M:%S") # 格式化时间为字符串
# 构建响应JSON
answer = {
"response": response,
# "history": history,
"status": 200,
"time": time
}
# 构建日志信息
log = "[" + time + "] " + '", prompt:"' + prompt + '", response:"' + repr(response) + '"'
print(log) # 打印日志
torch_gc() # 执行GPU内存清理
return answer # 返回响应
# 主函数入口
if __name__ == '__main__':
# 加载预训练的分词器和模型
model = AutoModelForCausalLM.from_pretrained(model_path, device_map="cuda:0",
torch_dtype=torch.bfloat16, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, trust_remote_code=True)
model.eval() # 设置模型为评估模式
# 启动FastAPI应用
uvicorn.run(app, host='0.0.0.0', port=api_port, workers=1) # 在指定端口和主机上启动应用