momondi commited on
Commit
22ad66c
·
verified ·
1 Parent(s): 9c50ec2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -90
app.py CHANGED
@@ -1,92 +1,3 @@
1
- # # import warnings
2
- # # warnings.filterwarnings('ignore')
3
-
4
- # import pandas as pd
5
- # df = pd.read_json("./tourisme_chatbot.json")
6
-
7
- # context_data = []
8
- # for i in range(len(df)):
9
- # context = ""
10
- # for j in range(4):
11
- # context += df.columns[j]
12
- # context += ": "
13
- # context += df.iloc[i][j]
14
- # context += " "
15
- # context_data.append(context)
16
-
17
- # # Get the secret key from the environment
18
- # import os
19
- # groq_api_key = os.environ.get('groq_api_keys')
20
-
21
- # #LLM Used for RAG
22
- # from langchain_groq import ChatGroq
23
-
24
- # llm = ChatGroq(model="llama-3.1-70b-versatile",api_key=groq_api_key)
25
-
26
- # #Embedding model
27
- # from langchain_huggingface import HuggingFaceEmbeddings
28
- # embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")
29
-
30
- # # create vector store!
31
- # from langchain_chroma import Chroma
32
-
33
- # vectorstore = Chroma(
34
- # collection_name="tourism_dataset_store",
35
- # embedding_function=embed_model,
36
- # persist_directory="./",
37
- # )
38
-
39
- # # Add data to vector store
40
- # vectorstore.add_texts(context_data)
41
-
42
- # retriever = vectorstore.as_retriever()
43
-
44
- # from langchain_core.prompts import PromptTemplate
45
-
46
- # template = ("""You are a Moroccan tourism expert.
47
- # Use the provided context to answer the question.
48
- # If you don't know the answer, say so. Explain your answer in detail.
49
- # Do not discuss the context in your response; just provide the answer directly.
50
-
51
- # Context: {context}
52
-
53
- # Question: {question}
54
-
55
- # Answer:""")
56
-
57
- # rag_prompt = PromptTemplate.from_template(template)
58
-
59
- # from langchain_core.output_parsers import StrOutputParser
60
- # from langchain_core.runnables import RunnablePassthrough
61
-
62
- # rag_chain = (
63
- # {"context": retriever, "question": RunnablePassthrough()}
64
- # | rag_prompt
65
- # | llm
66
- # | StrOutputParser()
67
- # )
68
-
69
- # import gradio as gr
70
-
71
- # def rag_memory_stream(text):
72
- # partial_text = ""
73
- # for new_text in rag_chain.stream(text):
74
- # partial_text += new_text
75
- # yield partial_text
76
-
77
- # examples = ['Tourist attraction sites in Morocco', 'What are some fun activities to do in Morocco?']
78
-
79
- # title = "Real-time AI App with Groq API and LangChain to Answer Morocco Tourism questions"
80
- # demo = gr.Interface(
81
- # title=title,
82
- # fn=rag_memory_stream,
83
- # inputs="text",
84
- # outputs="text",
85
- # allow_flagging="never",
86
- # )
87
-
88
- # if __name__ == '__main__':
89
- # demo.launch()
90
  import pandas as pd
91
  import os
92
  import gradio as gr
@@ -170,7 +81,7 @@ demo = gr.Interface(
170
  fn=rag_memory_stream,
171
  inputs="text",
172
  outputs="text",
173
- examples = "examples",
174
  allow_flagging="never",
175
  )
176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import pandas as pd
2
  import os
3
  import gradio as gr
 
81
  fn=rag_memory_stream,
82
  inputs="text",
83
  outputs="text",
84
+ examples = examples,
85
  allow_flagging="never",
86
  )
87