holyhigh666 commited on
Commit
515b242
·
verified ·
1 Parent(s): e7463ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -4,7 +4,7 @@ from langchain_huggingface import HuggingFaceEmbeddings
4
  from langchain.vectorstores import FAISS
5
  import torch
6
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
7
- from langchain.llms import HuggingFacePipeline
8
  from langchain.prompts import PromptTemplate
9
  from transformers import pipeline
10
  from langchain_core.output_parsers import StrOutputParser
@@ -16,7 +16,7 @@ import gradio as gr
16
 
17
  md_path = glob.glob( "md_files/*.md")
18
 
19
- docs = [UnstructuredMarkdownLoader(md).load() for md in self.markdown_path]
20
  docs_list = [item for sublist in docs for item in sublist]
21
 
22
  # Split documents
@@ -116,7 +116,7 @@ def get_output(is_RAG:str,questions:str):
116
  generation2=rag_chain.invoke(questions)
117
  return generation2.content
118
  else:
119
- generation1=llm_chain.invoke({"context":"", "question": question})
120
  return generation1.content
121
 
122
  demo = gr.Interface(
 
4
  from langchain.vectorstores import FAISS
5
  import torch
6
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
7
+ from langchain_huggingface.llms import HuggingFacePipeline
8
  from langchain.prompts import PromptTemplate
9
  from transformers import pipeline
10
  from langchain_core.output_parsers import StrOutputParser
 
16
 
17
  md_path = glob.glob( "md_files/*.md")
18
 
19
+ docs = [UnstructuredMarkdownLoader(md).load() for md in md_path]
20
  docs_list = [item for sublist in docs for item in sublist]
21
 
22
  # Split documents
 
116
  generation2=rag_chain.invoke(questions)
117
  return generation2.content
118
  else:
119
+ generation1=llm_chain.invoke({"context":"", "question": questions})
120
  return generation1.content
121
 
122
  demo = gr.Interface(