Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
import os | |
from pathlib import Path | |
from llama_index.embeddings import HuggingFaceEmbedding, VoyageEmbedding | |
from llama_index import (load_index_from_storage, ServiceContext, StorageContext, VectorStoreIndex) | |
from llama_index import download_loader, SimpleDirectoryReader | |
from llama_index.retrievers import RecursiveRetriever | |
from llama_index.query_engine import RetrieverQueryEngine | |
from llama_index.llms import Anyscale | |
# Define the inference model | |
llm = Anyscale(model="mistralai/Mistral-7B-Instruct-v0.1", api_key=os.getenv("ANYSCALE_API_KEY")) | |
#llm = Anyscale(model="HuggingFaceH4/zephyr-7b-beta", api_key=os.getenv("ANYSCALE_API_KEY")) | |
# Define the embedding model used to embed the query. | |
# query_embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5") | |
embed_model = VoyageEmbedding(model_name="voyage-01", voyage_api_key=os.getenv("VOYAGE_API_KEY")) | |
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model) | |
storage_context = StorageContext.from_defaults(persist_dir=Path("./custom_index")) | |
# Load the vector stores that were created earlier. | |
index = load_index_from_storage(storage_context=storage_context, service_context=service_context) | |
# Define query engine: | |
index_engine = index.as_retriever(similarity_top_k=4) | |
index_retriever = RecursiveRetriever("vector",retriever_dict={"vector": index_engine}) | |
query_engine = RetrieverQueryEngine.from_args(index_retriever, service_context=service_context) | |
def authenticate(username, password): | |
if username == "Gribouille" and password == "A jamais les premiers": | |
return True | |
else: | |
return False | |
def predict(query): | |
return str(query_engine.query(query)) | |
iface = gr.Interface(fn=predict, inputs=["text"], outputs="text") | |
iface.launch(auth=authenticate, share=True) | |