#Pinecone team has been making a lot of changes to there code and here is how it should be used going forward :) from pinecone import Pinecone as PineconeClient #from langchain.vectorstores import Pinecone #This import has been replaced by the below one :) from langchain_community.vectorstores import Pinecone from transformers import pipeline #from langchain.llms import OpenAI #This import has been replaced by the below one :) from langchain_openai import OpenAI from langchain.chains.question_answering import load_qa_chain #from langchain.callbacks import get_openai_callback #This import has been replaced by the below one :) from langchain_community.callbacks import get_openai_callback from langchain_community.embeddings import SentenceTransformerEmbeddings import joblib qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad") #Function to pull index data from Pinecone... def pull_from_pinecone(pinecone_apikey,pinecone_environment,pinecone_index_name,embeddings): PineconeClient( api_key=pinecone_apikey, environment=pinecone_environment ) index_name = pinecone_index_name index = Pinecone.from_existing_index(index_name, embeddings) return index def create_embeddings(): embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") return embeddings #This function will help us in fetching the top relevent documents from our vector store - Pinecone Index def get_similar_docs(index,query,k=2): similar_docs = index.similarity_search(query, k=k) return similar_docs def get_answer(docs, user_input): # Concatenate all the documents into one large context # Assuming 'doc.page_content' is how the content is stored in your 'Document' object context = " ".join([doc.page_content for doc in docs]) # Use Hugging Face's QA model to get the answer response = qa_pipeline(question=user_input, context=context) return response['answer'] def predict(query_result): Fitmodel = joblib.load('modelsvm.pk1') result=Fitmodel.predict([query_result]) return result[0]