iclalcetin commited on
Commit
e4e8f24
1 Parent(s): 2a86e95

Create aappppp.py

Browse files
Files changed (1) hide show
  1. aappppp.py +107 -0
aappppp.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.vectorstores.chroma import Chroma
2
+ from langchain.text_splitter import CharacterTextSplitter
3
+ from langchain.document_loaders import DirectoryLoader, TextLoader
4
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
5
+ from langchain.embeddings import SentenceTransformerEmbeddings
6
+ import gradio as gr
7
+ import os
8
+
9
+ from langchain.embeddings import OpenAIEmbeddings
10
+ from langchain.chat_models import ChatOpenAI
11
+ from langchain.chains import ConversationalRetrievalChain
12
+ from langchain.memory import ConversationBufferMemory
13
+ from dotenv import load_dotenv
14
+ load_dotenv()
15
+
16
+ def create_embeddings_from_txt(file_path: str) -> None:
17
+ loader = loader = TextLoader(file_path=file_path)
18
+ documents = loader.load()
19
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
20
+ texts = text_splitter.split_documents(documents)
21
+ embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
22
+ persist_directory = 'db'
23
+ vectordb = Chroma.from_documents(
24
+ documents=texts,
25
+ embedding=embeddings,
26
+ persist_directory=persist_directory
27
+ )
28
+ vectordb.persist()
29
+
30
+ def create_conversation() -> ConversationalRetrievalChain:
31
+
32
+ persist_directory = 'db'
33
+ embeddings = OpenAIEmbeddings(
34
+ openai_api_key=os.getenv('OPENAI_API_KEY')
35
+ )
36
+ db = Chroma(
37
+ persist_directory=persist_directory,
38
+ embedding_function=embeddings
39
+ )
40
+ memory = ConversationBufferMemory(
41
+ memory_key='chat_history',
42
+ return_messages=False
43
+ )
44
+ qa = ConversationalRetrievalChain.from_llm(
45
+ llm=ChatOpenAI(),
46
+ chain_type='stuff',
47
+ retriever=db.as_retriever(),
48
+ memory=memory,
49
+ get_chat_history=lambda h: h,
50
+ verbose=True
51
+ )
52
+
53
+ return qa
54
+
55
+ file_path = "./shipping.txt"
56
+ create_embeddings_from_txt(file_path)
57
+ qa = create_conversation()
58
+
59
+
60
+ def add_text(history, text):
61
+ history = history + [(text, None)]
62
+ return history, ""
63
+
64
+
65
+ def bot(history):
66
+ res = qa(
67
+ {
68
+ 'question': history[-1][0],
69
+ 'chat_history': history[:-1]
70
+ }
71
+ )
72
+ history[-1][1] = res['answer']
73
+ return history
74
+
75
+
76
+ with gr.Blocks() as demo:
77
+ chatbot = gr.Chatbot([], elem_id="chatbot",
78
+ label='Document GPT')
79
+ with gr.Row():
80
+ with gr.Column(scale=0.80):
81
+ txt = gr.Textbox(
82
+ show_label=False,
83
+ placeholder="Enter text and press enter",
84
+ )
85
+ with gr.Column(scale=0.10):
86
+ submit_btn = gr.Button(
87
+ 'Submit',
88
+ variant='primary'
89
+ )
90
+ with gr.Column(scale=0.10):
91
+ clear_btn = gr.Button(
92
+ 'Clear',
93
+ variant='stop'
94
+ )
95
+
96
+ txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
97
+ bot, chatbot, chatbot
98
+ )
99
+
100
+ submit_btn.click(add_text, [chatbot, txt], [chatbot, txt]).then(
101
+ bot, chatbot, chatbot
102
+ )
103
+
104
+ clear_btn.click(lambda: None, None, chatbot, queue=False)
105
+
106
+ if __name__ == '__main__':
107
+ demo.launch()