Update user_utils.py
Browse files- user_utils.py +9 -5
user_utils.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2 |
from pinecone import Pinecone as PineconeClient
|
3 |
#from langchain.vectorstores import Pinecone #This import has been replaced by the below one :)
|
4 |
from langchain_community.vectorstores import Pinecone
|
|
|
5 |
|
6 |
#from langchain.llms import OpenAI #This import has been replaced by the below one :)
|
7 |
from langchain_openai import OpenAI
|
@@ -11,6 +12,7 @@ from langchain_community.callbacks import get_openai_callback
|
|
11 |
from langchain_community.embeddings import SentenceTransformerEmbeddings
|
12 |
import joblib
|
13 |
|
|
|
14 |
|
15 |
#Function to pull index data from Pinecone...
|
16 |
def pull_from_pinecone(pinecone_apikey,pinecone_environment,pinecone_index_name,embeddings):
|
@@ -35,9 +37,11 @@ def get_similar_docs(index,query,k=2):
|
|
35 |
similar_docs = index.similarity_search(query, k=k)
|
36 |
return similar_docs
|
37 |
|
38 |
-
def get_answer(docs,user_input):
|
39 |
-
|
40 |
-
|
41 |
-
response = chain.run(input_documents=docs, question=user_input)
|
42 |
-
return response
|
43 |
|
|
|
|
|
|
|
|
|
|
2 |
from pinecone import Pinecone as PineconeClient
|
3 |
#from langchain.vectorstores import Pinecone #This import has been replaced by the below one :)
|
4 |
from langchain_community.vectorstores import Pinecone
|
5 |
+
from transformers import pipeline
|
6 |
|
7 |
#from langchain.llms import OpenAI #This import has been replaced by the below one :)
|
8 |
from langchain_openai import OpenAI
|
|
|
12 |
from langchain_community.embeddings import SentenceTransformerEmbeddings
|
13 |
import joblib
|
14 |
|
15 |
+
qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
|
16 |
|
17 |
#Function to pull index data from Pinecone...
|
18 |
def pull_from_pinecone(pinecone_apikey,pinecone_environment,pinecone_index_name,embeddings):
|
|
|
37 |
similar_docs = index.similarity_search(query, k=k)
|
38 |
return similar_docs
|
39 |
|
40 |
+
def get_answer(docs, user_input):
|
41 |
+
# Concatenate all the documents into one large context
|
42 |
+
context = " ".join([doc['page_content'] for doc in docs])
|
|
|
|
|
43 |
|
44 |
+
# Use Hugging Face's QA model to get the answer
|
45 |
+
response = qa_pipeline(question=user_input, context=context)
|
46 |
+
|
47 |
+
return response['answer']
|