Spaces:
Sleeping
Sleeping
Update vlog4chat.py
Browse files- vlog4chat.py +14 -9
vlog4chat.py
CHANGED
@@ -28,15 +28,20 @@ from pydantic.v1 import Extra, Field, root_validator
|
|
28 |
from typing import Any, Dict, List, Mapping, Optional
|
29 |
from langchain.memory import ConversationBufferMemory
|
30 |
from langchain import LLMChain, PromptTemplate
|
31 |
-
#from paddleocr import PaddleOCR, draw_ocr
|
32 |
|
33 |
-
|
34 |
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
warnings.filterwarnings("ignore", category=UserWarning)
|
42 |
B_INST, E_INST = "[INST]", "[/INST]"
|
@@ -166,8 +171,8 @@ class Vlogger4chat :
|
|
166 |
#chunks = text_splitter.split_documents(raw_documents)
|
167 |
#self.vector_storage = FAISS.from_documents(chunks, self.my_embedding)
|
168 |
with open('./BV11H4y1F7uH.pkl', 'rb') as f:
|
169 |
-
|
170 |
-
|
171 |
self.chain = ConversationalRetrievalChain.from_llm(self.llm, self.vector_storage.as_retriever(), return_source_documents=True)
|
172 |
return True
|
173 |
return False
|
|
|
28 |
from typing import Any, Dict, List, Mapping, Optional
|
29 |
from langchain.memory import ConversationBufferMemory
|
30 |
from langchain import LLMChain, PromptTemplate
|
|
|
31 |
|
32 |
+
import torch
|
33 |
|
34 |
+
def map_to_cpu(obj):
|
35 |
+
if isinstance(obj, torch.Tensor):
|
36 |
+
return obj.to(torch.device('cpu'))
|
37 |
+
elif isinstance(obj, dict):
|
38 |
+
return {k: map_to_cpu(v) for k, v in obj.items()}
|
39 |
+
elif isinstance(obj, list):
|
40 |
+
return [map_to_cpu(i) for i in obj]
|
41 |
+
elif isinstance(obj, tuple):
|
42 |
+
return tuple(map_to_cpu(i) for i in obj)
|
43 |
+
else:
|
44 |
+
return obj
|
45 |
|
46 |
warnings.filterwarnings("ignore", category=UserWarning)
|
47 |
B_INST, E_INST = "[INST]", "[/INST]"
|
|
|
171 |
#chunks = text_splitter.split_documents(raw_documents)
|
172 |
#self.vector_storage = FAISS.from_documents(chunks, self.my_embedding)
|
173 |
with open('./BV11H4y1F7uH.pkl', 'rb') as f:
|
174 |
+
vector_storage =pickle.load(f)
|
175 |
+
self.vector_storage = map_to_cpu(vector_storage)
|
176 |
self.chain = ConversationalRetrievalChain.from_llm(self.llm, self.vector_storage.as_retriever(), return_source_documents=True)
|
177 |
return True
|
178 |
return False
|