batlahiya commited on
Commit
075036a
·
verified ·
1 Parent(s): 768f70c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -23,7 +23,7 @@ os.environ["TOKENIZERS_PARALLELISM"] = "false"
23
 
24
  # Predefined values
25
  predefined_pdf = "t6.pdf"
26
- predefined_llm = "tiiuae/falcon-7b-instruct" # Use a smaller model for faster responses
27
 
28
  def load_doc(list_file_path, chunk_size, chunk_overlap):
29
  loaders = [PyPDFLoader(x) for x in list_file_path]
@@ -156,7 +156,7 @@ collection_name = create_collection_name(pdf_filepath)
156
  vector_db = create_db(doc_splits, collection_name)
157
 
158
  # Initialize the LLM chain with threading
159
- qa_chain = initialize_llmchain(predefined_llm, temperature=0.5, max_tokens=512, top_k=5, vector_db=vector_db)
160
 
161
  # Check if qa_chain is properly initialized
162
  if qa_chain is None:
 
23
 
24
  # Predefined values
25
  predefined_pdf = "t6.pdf"
26
+ predefined_llm = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" # Use a smaller model for faster responses
27
 
28
  def load_doc(list_file_path, chunk_size, chunk_overlap):
29
  loaders = [PyPDFLoader(x) for x in list_file_path]
 
156
  vector_db = create_db(doc_splits, collection_name)
157
 
158
  # Initialize the LLM chain with threading
159
+ qa_chain = initialize_llmchain(predefined_llm, temperature=0.6, max_tokens=512, top_k=5, vector_db=vector_db)
160
 
161
  # Check if qa_chain is properly initialized
162
  if qa_chain is None: