from fastapi import FastAPI from pydantic import BaseModel from sentence_transformers import SentenceTransformer import numpy as np import json # Load your JSON data with open("data.json") as f: data = json.load(f) # Initialize model model = SentenceTransformer('all-MiniLM-L6-v2') # Precompute embeddings for all file names file_names = [item["file_name"] for item in data] file_embeddings = model.encode(file_names) app = FastAPI() class Query(BaseModel): text: str @app.post("/search") async def search(query: Query): # Encode query query_embedding = model.encode([query.text]) # Compute cosine similarity similarities = np.dot(file_embeddings, query_embedding.T).flatten() # Find best match best_match_idx = np.argmax(similarities) return { "best_match": data[best_match_idx]["file_name"], "similarity_score": float(similarities[best_match_idx]) }