|
from fastapi import FastAPI |
|
from pydantic import BaseModel |
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer |
|
import uvicorn |
|
|
|
class CodeRequest(BaseModel): |
|
prompt: str |
|
|
|
app = FastAPI() |
|
|
|
model = GPT2LMHeadModel.from_pretrained('./codegen_model') |
|
tokenizer = GPT2Tokenizer.from_pretrained('./codegen_model') |
|
|
|
@app.post("/generate-code/") |
|
def generate_code(request: CodeRequest): |
|
inputs = tokenizer.encode(request.prompt, return_tensors='pt') |
|
outputs = model.generate(inputs, max_length=150, num_return_sequences=1) |
|
generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return {"generated_code": generated_code} |
|
|
|
if __name__ == "__main__": |
|
uvicorn.run(app, host="0.0.0.0", port=8000) |