dj86 commited on
Commit
d411c25
1 Parent(s): 8d42871

Update vlog4chat.py

Browse files
Files changed (1) hide show
  1. vlog4chat.py +14 -9
vlog4chat.py CHANGED
@@ -28,15 +28,20 @@ from pydantic.v1 import Extra, Field, root_validator
28
  from typing import Any, Dict, List, Mapping, Optional
29
  from langchain.memory import ConversationBufferMemory
30
  from langchain import LLMChain, PromptTemplate
31
- #from paddleocr import PaddleOCR, draw_ocr
32
 
33
- #sys.path.append('/root/autodl-tmp/recognize-anything')
34
 
35
- #from ram.models import ram
36
- #from ram.models import tag2text
37
- #from ram import inference_ram as inference
38
- #from ram import inference_tag2text as inference
39
- #from ram import get_transform
 
 
 
 
 
 
40
 
41
  warnings.filterwarnings("ignore", category=UserWarning)
42
  B_INST, E_INST = "[INST]", "[/INST]"
@@ -166,8 +171,8 @@ class Vlogger4chat :
166
  #chunks = text_splitter.split_documents(raw_documents)
167
  #self.vector_storage = FAISS.from_documents(chunks, self.my_embedding)
168
  with open('./BV11H4y1F7uH.pkl', 'rb') as f:
169
- self.vector_storage =pickle.load(f)
170
-
171
  self.chain = ConversationalRetrievalChain.from_llm(self.llm, self.vector_storage.as_retriever(), return_source_documents=True)
172
  return True
173
  return False
 
28
  from typing import Any, Dict, List, Mapping, Optional
29
  from langchain.memory import ConversationBufferMemory
30
  from langchain import LLMChain, PromptTemplate
 
31
 
32
+ import torch
33
 
34
+ def map_to_cpu(obj):
35
+ if isinstance(obj, torch.Tensor):
36
+ return obj.to(torch.device('cpu'))
37
+ elif isinstance(obj, dict):
38
+ return {k: map_to_cpu(v) for k, v in obj.items()}
39
+ elif isinstance(obj, list):
40
+ return [map_to_cpu(i) for i in obj]
41
+ elif isinstance(obj, tuple):
42
+ return tuple(map_to_cpu(i) for i in obj)
43
+ else:
44
+ return obj
45
 
46
  warnings.filterwarnings("ignore", category=UserWarning)
47
  B_INST, E_INST = "[INST]", "[/INST]"
 
171
  #chunks = text_splitter.split_documents(raw_documents)
172
  #self.vector_storage = FAISS.from_documents(chunks, self.my_embedding)
173
  with open('./BV11H4y1F7uH.pkl', 'rb') as f:
174
+ vector_storage =pickle.load(f)
175
+ self.vector_storage = map_to_cpu(vector_storage)
176
  self.chain = ConversationalRetrievalChain.from_llm(self.llm, self.vector_storage.as_retriever(), return_source_documents=True)
177
  return True
178
  return False