CSAle commited on
Commit
f03c543
1 Parent(s): 5795fcf

Releasing ChatLGONoData

Browse files
.chainlit/config.toml ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ # Whether to enable telemetry (default: true). No personal data is collected.
3
+ enable_telemetry = true
4
+
5
+ # List of environment variables to be provided by each user to use the app.
6
+ user_env = []
7
+
8
+ # Duration (in seconds) during which the session is saved when the connection is lost
9
+ session_timeout = 3600
10
+
11
+ # Enable third parties caching (e.g LangChain cache)
12
+ cache = false
13
+
14
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
15
+ # follow_symlink = false
16
+
17
+ [features]
18
+ # Show the prompt playground
19
+ prompt_playground = true
20
+
21
+ # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
22
+ unsafe_allow_html = false
23
+
24
+ # Process and display mathematical expressions. This can clash with "$" characters in messages.
25
+ latex = false
26
+
27
+ # Authorize users to upload files with messages
28
+ multi_modal = true
29
+
30
+ # Allows user to use speech to text
31
+ [features.speech_to_text]
32
+ enabled = false
33
+ # See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
34
+ # language = "en-US"
35
+
36
+ [UI]
37
+ # Name of the app and chatbot.
38
+ name = "Chatbot"
39
+
40
+ # Show the readme while the conversation is empty.
41
+ show_readme_as_default = true
42
+
43
+ # Description of the app and chatbot. This is used for HTML tags.
44
+ # description = ""
45
+
46
+ # Large size content are by default collapsed for a cleaner ui
47
+ default_collapse_content = true
48
+
49
+ # The default value for the expand messages settings.
50
+ default_expand_messages = false
51
+
52
+ # Hide the chain of thought details from the user in the UI.
53
+ hide_cot = false
54
+
55
+ # Link to your github repo. This will add a github button in the UI's header.
56
+ # github = ""
57
+
58
+ # Specify a CSS file that can be used to customize the user interface.
59
+ # The CSS file can be served from the public directory or via an external link.
60
+ # custom_css = "/public/test.css"
61
+
62
+ # Override default MUI light theme. (Check theme.ts)
63
+ [UI.theme.light]
64
+ #background = "#FAFAFA"
65
+ #paper = "#FFFFFF"
66
+
67
+ [UI.theme.light.primary]
68
+ #main = "#F80061"
69
+ #dark = "#980039"
70
+ #light = "#FFE7EB"
71
+
72
+ # Override default MUI dark theme. (Check theme.ts)
73
+ [UI.theme.dark]
74
+ #background = "#FAFAFA"
75
+ #paper = "#FFFFFF"
76
+
77
+ [UI.theme.dark.primary]
78
+ #main = "#F80061"
79
+ #dark = "#980039"
80
+ #light = "#FFE7EB"
81
+
82
+
83
+ [meta]
84
+ generated_by = "0.7.700"
__pycache__/app.cpython-311.pyc ADDED
Binary file (5 kB). View file
 
app.py CHANGED
@@ -16,11 +16,7 @@ from llama_index.query_engine import SubQuestionQueryEngine
16
  from llama_index.embeddings import HuggingFaceEmbedding
17
  from chainlit.types import AskFileResponse
18
  from llama_index import download_loader
19
-
20
- print("Loading Storage Context...")
21
- storage_context = StorageContext.from_defaults(persist_dir="index/")
22
- print("Loading Index...")
23
- index = load_index_from_storage(storage_context)
24
 
25
 
26
  def process_file(file: AskFileResponse):
@@ -61,7 +57,13 @@ async def on_chat_start():
61
  # load the file
62
  documents = process_file(file)
63
 
64
- index = await cl.make_async(index.add_documents)(documents)
 
 
 
 
 
 
65
 
66
  llm = OpenAI(model="gpt-4-1106-preview", temperature=0)
67
 
 
16
  from llama_index.embeddings import HuggingFaceEmbedding
17
  from chainlit.types import AskFileResponse
18
  from llama_index import download_loader
19
+ from llama_index import VectorStoreIndex
 
 
 
 
20
 
21
 
22
  def process_file(file: AskFileResponse):
 
57
  # load the file
58
  documents = process_file(file)
59
 
60
+ context = ServiceContext.from_defaults(
61
+ embed_model=HuggingFaceEmbedding(model_name="ai-maker-space/chatlgo-finetuned")
62
+ )
63
+
64
+ index = VectorStoreIndex.from_documents(
65
+ documents=documents, context=context, show_progress=True
66
+ )
67
 
68
  llm = OpenAI(model="gpt-4-1106-preview", temperature=0)
69