usag1e
Add debugging logs to predict endpoint
32777f1
raw
history blame
909 Bytes
from fastapi import FastAPI, HTTPException, Request
from pydantic import BaseModel
import logging
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("LLM-API")
app = FastAPI()
# Define the input schema
class InputText(BaseModel):
input_text: str
@app.get("/")
def root():
logger.info("Root endpoint called.")
return {"message": "Welcome to the LLM API"}
@app.post("/predict")
async def predict(data: InputText, request: Request):
logger.info("Received request: %s", await request.body())
try:
# Log the received input
input_text = data.input_text
logger.info(f"Processing input: {input_text}")
# Return a mock response for now
return {"response": f"The input was: {input_text}"}
except Exception as e:
logger.error(f"Error occurred: {e}")
raise HTTPException(status_code=500, detail=str(e))