Spaces:
Sleeping
Sleeping
updated app
Browse files- .chainlit/.langchain.db +0 -0
- .gitignore +1 -1
- app.py +94 -5
.chainlit/.langchain.db
DELETED
Binary file (12.3 kB)
|
|
.gitignore
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
.env
|
2 |
-
|
|
|
1 |
.env
|
2 |
+
.__pycache__
|
app.py
CHANGED
@@ -1,6 +1,9 @@
|
|
1 |
import os
|
|
|
2 |
|
3 |
import chainlit as cl
|
|
|
|
|
4 |
from langchain.chains import RetrievalQAWithSourcesChain
|
5 |
from langchain.chat_models import ChatOpenAI
|
6 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
@@ -12,15 +15,101 @@ from langchain.vectorstores import FAISS
|
|
12 |
|
13 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
|
14 |
embeddings = OpenAIEmbeddings()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
@cl.on_chat_start
|
17 |
async def init():
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
chain = RetrievalQAWithSourcesChain.from_chain_type(
|
21 |
-
ChatOpenAI(temperature=
|
22 |
chain_type="stuff",
|
23 |
-
retriever=vector_store.as_retriever(search_kwargs={"k":
|
24 |
)
|
25 |
|
26 |
cl.user_session.set("chain", chain)
|
@@ -33,7 +122,7 @@ async def main(message):
|
|
33 |
stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"]
|
34 |
)
|
35 |
cb.answer_reached = True
|
36 |
-
|
37 |
res = await chain.acall(message.content, callbacks=[cb])
|
38 |
|
39 |
if cb.has_streamed_final_answer:
|
@@ -42,4 +131,4 @@ async def main(message):
|
|
42 |
answer = res["answer"]
|
43 |
await cl.Message(
|
44 |
content=answer,
|
45 |
-
).send()
|
|
|
1 |
import os
|
2 |
+
from typing import Dict, Optional
|
3 |
|
4 |
import chainlit as cl
|
5 |
+
from chainlit.input_widget import Select, Slider, Switch
|
6 |
+
# from chainlit import user_session
|
7 |
from langchain.chains import RetrievalQAWithSourcesChain
|
8 |
from langchain.chat_models import ChatOpenAI
|
9 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
|
|
15 |
|
16 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
|
17 |
embeddings = OpenAIEmbeddings()
|
18 |
+
vector_store = FAISS.load_local("docs.faiss", embeddings)
|
19 |
+
|
20 |
+
|
21 |
+
@cl.oauth_callback
|
22 |
+
def oauth_callback(
|
23 |
+
provider_id: str,
|
24 |
+
token: str,
|
25 |
+
raw_user_data: Dict[str, str],
|
26 |
+
default_app_user: cl.AppUser,
|
27 |
+
) -> Optional[cl.AppUser]:
|
28 |
+
# set AppUser tags as regular_user
|
29 |
+
match default_app_user.username:
|
30 |
+
case "Broomva":
|
31 |
+
default_app_user.tags = ["admin_user"]
|
32 |
+
default_app_user.role = "ADMIN"
|
33 |
+
case _:
|
34 |
+
default_app_user.tags = ["regular_user"]
|
35 |
+
default_app_user.role = "USER"
|
36 |
+
print(default_app_user)
|
37 |
+
return default_app_user
|
38 |
+
|
39 |
+
|
40 |
+
# @cl.set_chat_profiles
|
41 |
+
# async def chat_profile(current_user: cl.AppUser):
|
42 |
+
# if "admin_user" not in current_user.tags:
|
43 |
+
# # Default to 3.5 when not admin
|
44 |
+
# return [
|
45 |
+
# cl.ChatProfile(
|
46 |
+
# name="GPT-3.5",
|
47 |
+
# markdown_description="The underlying LLM model is **GPT-3.5**.",
|
48 |
+
# icon="https://picsum.photos/200",
|
49 |
+
# )
|
50 |
+
# ]
|
51 |
+
|
52 |
+
# return [
|
53 |
+
# cl.ChatProfile(
|
54 |
+
# name="GPT-3.5",
|
55 |
+
# markdown_description="The underlying LLM model is **GPT-3.5**.",
|
56 |
+
# icon="https://picsum.photos/200",
|
57 |
+
# ),
|
58 |
+
# cl.ChatProfile(
|
59 |
+
# name="GPT-4",
|
60 |
+
# markdown_description="The underlying LLM model is **GPT-4**.",
|
61 |
+
# icon="https://picsum.photos/250",
|
62 |
+
# ),
|
63 |
+
# ]
|
64 |
+
|
65 |
+
|
66 |
+
@cl.on_settings_update
|
67 |
+
async def setup_agent(settings):
|
68 |
+
print("on_settings_update", settings)
|
69 |
|
70 |
@cl.on_chat_start
|
71 |
async def init():
|
72 |
+
|
73 |
+
settings = await cl.ChatSettings(
|
74 |
+
[
|
75 |
+
Select(
|
76 |
+
id="model",
|
77 |
+
label="OpenAI - Model",
|
78 |
+
values=["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-4", "gpt-4-1106-preview"],
|
79 |
+
initial_index=0,
|
80 |
+
),
|
81 |
+
Switch(id="streaming", label="OpenAI - Stream Tokens", initial=True),
|
82 |
+
Slider(
|
83 |
+
id="temperature",
|
84 |
+
label="OpenAI - Temperature",
|
85 |
+
initial=1,
|
86 |
+
min=0,
|
87 |
+
max=2,
|
88 |
+
step=0.1,
|
89 |
+
),
|
90 |
+
Slider(
|
91 |
+
id="k",
|
92 |
+
label="RAG - Retrieved Documents",
|
93 |
+
initial=3,
|
94 |
+
min=1,
|
95 |
+
max=20,
|
96 |
+
step=1,
|
97 |
+
),
|
98 |
+
]
|
99 |
+
).send()
|
100 |
+
|
101 |
+
|
102 |
+
# print(settings)
|
103 |
+
# app_user = cl.user_session.get("user")
|
104 |
+
# chat_profile = cl.user_session.get("chat_profile")
|
105 |
+
# await cl.Message(
|
106 |
+
# content=f"🪼 Starting chat with {app_user.username} using the {chat_profile} chat profile"
|
107 |
+
# ).send()
|
108 |
|
109 |
chain = RetrievalQAWithSourcesChain.from_chain_type(
|
110 |
+
ChatOpenAI(temperature=settings['temperature'], streaming=settings['streaming'], model=settings['model']),
|
111 |
chain_type="stuff",
|
112 |
+
retriever=vector_store.as_retriever(search_kwargs={"k": int(settings['k'])}),
|
113 |
)
|
114 |
|
115 |
cl.user_session.set("chain", chain)
|
|
|
122 |
stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"]
|
123 |
)
|
124 |
cb.answer_reached = True
|
125 |
+
|
126 |
res = await chain.acall(message.content, callbacks=[cb])
|
127 |
|
128 |
if cb.has_streamed_final_answer:
|
|
|
131 |
answer = res["answer"]
|
132 |
await cl.Message(
|
133 |
content=answer,
|
134 |
+
).send()
|