CamiloVega commited on
Commit
770d5ac
·
verified ·
1 Parent(s): 597a6c8

Upload 3 files

Browse files
Files changed (3) hide show
  1. app-file.py +337 -0
  2. readme-file.md +64 -0
  3. requirements-file.txt +13 -0
app-file.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
+ import gradio as gr
4
+ import torch
5
+ import logging
6
+ import sys
7
+ import os
8
+ from accelerate import init_empty_weights
9
+ from typing import List, Dict
10
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
11
+ from langchain.embeddings import HuggingFaceEmbeddings
12
+ from langchain.vectorstores import FAISS
13
+ from langchain.chains import RetrievalQA
14
+ from langchain.prompts import PromptTemplate
15
+ from langchain_community.document_loaders import PyPDFLoader
16
+
17
+ # Configure logging
18
+ logging.basicConfig(
19
+ level=logging.INFO,
20
+ format='%(asctime)s - %(levelname)s - %(message)s'
21
+ )
22
+ logger = logging.getLogger(__name__)
23
+
24
+ # Get HuggingFace token from environment variable
25
+ hf_token = os.environ.get('HUGGINGFACE_TOKEN')
26
+ if not hf_token:
27
+ logger.error("HUGGINGFACE_TOKEN environment variable not set")
28
+ raise ValueError("Please set the HUGGINGFACE_TOKEN environment variable")
29
+
30
+ # Constants
31
+ MODEL_NAME = "meta-llama/Llama-2-7b-chat-hf"
32
+ KNOWLEDGE_BASE_DIR = "knowledge_base"
33
+
34
+ class DocumentLoader:
35
+ """Class to manage PDF document loading."""
36
+
37
+ @staticmethod
38
+ def load_pdfs(directory_path: str) -> List:
39
+ documents = []
40
+ pdf_files = [f for f in os.listdir(directory_path) if f.endswith('.pdf')]
41
+
42
+ for pdf_file in pdf_files:
43
+ pdf_path = os.path.join(directory_path, pdf_file)
44
+ try:
45
+ loader = PyPDFLoader(pdf_path)
46
+ pdf_documents = loader.load()
47
+
48
+ for doc in pdf_documents:
49
+ doc.metadata.update({
50
+ 'title': pdf_file,
51
+ 'type': 'technical' if 'Valencia' in pdf_file else 'qa',
52
+ 'language': 'en',
53
+ 'page': doc.metadata.get('page', 0)
54
+ })
55
+ documents.append(doc)
56
+
57
+ logger.info(f"Document {pdf_file} loaded successfully")
58
+ except Exception as e:
59
+ logger.error(f"Error loading {pdf_file}: {str(e)}")
60
+
61
+ return documents
62
+
63
+ class TextProcessor:
64
+ """Class to process and split text into chunks."""
65
+
66
+ def __init__(self):
67
+ self.technical_splitter = RecursiveCharacterTextSplitter(
68
+ chunk_size=800,
69
+ chunk_overlap=200,
70
+ separators=["\n\n", "\n", ". ", " ", ""],
71
+ length_function=len
72
+ )
73
+
74
+ self.qa_splitter = RecursiveCharacterTextSplitter(
75
+ chunk_size=500,
76
+ chunk_overlap=100,
77
+ separators=["\n\n", "\n", ". ", " ", ""],
78
+ length_function=len
79
+ )
80
+
81
+ def process_documents(self, documents: List) -> List:
82
+ if not documents:
83
+ logger.warning("No documents to process")
84
+ return []
85
+
86
+ processed_chunks = []
87
+ for doc in documents:
88
+ splitter = self.technical_splitter if doc.metadata['type'] == 'technical' else self.qa_splitter
89
+ chunks = splitter.split_documents([doc])
90
+ processed_chunks.extend(chunks)
91
+
92
+ logger.info(f"Documents processed into {len(processed_chunks)} chunks")
93
+ return processed_chunks
94
+
95
+ class RAGSystem:
96
+ """Main RAG system class."""
97
+
98
+ def __init__(self, model_name: str = MODEL_NAME):
99
+ self.model_name = model_name
100
+ self.embeddings = None
101
+ self.vector_store = None
102
+ self.qa_chain = None
103
+ self.tokenizer = None
104
+ self.model = None
105
+
106
+ def initialize_system(self):
107
+ """Initialize complete RAG system."""
108
+ try:
109
+ logger.info("Starting RAG system initialization...")
110
+
111
+ # Load and process documents
112
+ loader = DocumentLoader()
113
+ documents = loader.load_pdfs(KNOWLEDGE_BASE_DIR)
114
+
115
+ processor = TextProcessor()
116
+ processed_chunks = processor.process_documents(documents)
117
+
118
+ # Initialize embeddings
119
+ self.embeddings = HuggingFaceEmbeddings(
120
+ model_name="intfloat/multilingual-e5-large",
121
+ model_kwargs={'device': 'cuda'},
122
+ encode_kwargs={'normalize_embeddings': True}
123
+ )
124
+
125
+ # Create vector store
126
+ self.vector_store = FAISS.from_documents(
127
+ processed_chunks,
128
+ self.embeddings
129
+ )
130
+
131
+ # Initialize LLM
132
+ self.tokenizer = AutoTokenizer.from_pretrained(
133
+ self.model_name,
134
+ trust_remote_code=True,
135
+ token=hf_token
136
+ )
137
+
138
+ self.model = AutoModelForCausalLM.from_pretrained(
139
+ self.model_name,
140
+ torch_dtype=torch.float16,
141
+ trust_remote_code=True,
142
+ token=hf_token,
143
+ device_map="auto"
144
+ )
145
+
146
+ # Create generation pipeline
147
+ pipe = pipeline(
148
+ "text-generation",
149
+ model=self.model,
150
+ tokenizer=self.tokenizer,
151
+ max_new_tokens=512,
152
+ temperature=0.1,
153
+ top_p=0.95,
154
+ repetition_penalty=1.15,
155
+ device_map="auto"
156
+ )
157
+
158
+ llm = HuggingFacePipeline(pipeline=pipe)
159
+
160
+ # Create prompt template
161
+ prompt_template = """
162
+ Context: {context}
163
+
164
+ Based on the context above, please provide a clear and concise answer to the following question.
165
+ If the information is not in the context, explicitly state so.
166
+
167
+ Question: {question}
168
+ """
169
+
170
+ PROMPT = PromptTemplate(
171
+ template=prompt_template,
172
+ input_variables=["context", "question"]
173
+ )
174
+
175
+ # Set up QA chain
176
+ self.qa_chain = RetrievalQA.from_chain_type(
177
+ llm=llm,
178
+ chain_type="stuff",
179
+ retriever=self.vector_store.as_retriever(
180
+ search_kwargs={"k": 6}
181
+ ),
182
+ return_source_documents=True,
183
+ chain_type_kwargs={"prompt": PROMPT}
184
+ )
185
+
186
+ logger.info("RAG system initialized successfully")
187
+
188
+ except Exception as e:
189
+ logger.error(f"Error during RAG system initialization: {str(e)}")
190
+ raise
191
+
192
+ def generate_response(self, question: str) -> Dict:
193
+ """Generate response for a given question."""
194
+ try:
195
+ result = self.qa_chain({"query": question})
196
+
197
+ response = {
198
+ 'answer': result['result'],
199
+ 'sources': []
200
+ }
201
+
202
+ for doc in result['source_documents']:
203
+ source = {
204
+ 'title': doc.metadata.get('title', 'Unknown'),
205
+ 'content': doc.page_content[:200] + "..." if len(doc.page_content) > 200 else doc.page_content,
206
+ 'metadata': doc.metadata
207
+ }
208
+ response['sources'].append(source)
209
+
210
+ return response
211
+
212
+ except Exception as e:
213
+ logger.error(f"Error generating response: {str(e)}")
214
+ raise
215
+
216
+ @spaces.GPU(duration=60)
217
+ def process_response(user_input: str, chat_history: List) -> tuple:
218
+ """Process user input and generate response."""
219
+ try:
220
+ response = rag_system.generate_response(user_input)
221
+
222
+ # Clean and format response
223
+ answer = response['answer']
224
+ if "Answer:" in answer:
225
+ answer = answer.split("Answer:")[-1].strip()
226
+
227
+ # Format sources
228
+ sources = set([source['title'] for source in response['sources'][:3]])
229
+ if sources:
230
+ answer += "\n\n📚 Sources consulted:\n" + "\n".join([f"• {source}" for source in sources])
231
+
232
+ chat_history.append((user_input, answer))
233
+ return chat_history
234
+
235
+ except Exception as e:
236
+ logger.error(f"Error in process_response: {str(e)}")
237
+ error_message = f"Sorry, an error occurred: {str(e)}"
238
+ chat_history.append((user_input, error_message))
239
+ return chat_history
240
+
241
+ # Initialize RAG system
242
+ logger.info("Initializing RAG system...")
243
+ rag_system = RAGSystem()
244
+ rag_system.initialize_system()
245
+ logger.info("RAG system initialization completed")
246
+
247
+ # Create Gradio interface
248
+ try:
249
+ logger.info("Creating Gradio interface...")
250
+ with gr.Blocks(css="div.gradio-container {background-color: #f0f2f6}") as demo:
251
+ gr.HTML("""
252
+ <div style="text-align: center; max-width: 800px; margin: 0 auto; padding: 20px;">
253
+ <h1 style="color: #2d333a;">📊 FislacBot</h1>
254
+ <p style="color: #4a5568;">
255
+ AI Assistant specialized in fiscal analysis and FISLAC documentation
256
+ </p>
257
+ </div>
258
+ """)
259
+
260
+ chatbot = gr.Chatbot(
261
+ show_label=False,
262
+ container=True,
263
+ height=500,
264
+ bubble_full_width=True,
265
+ show_copy_button=True,
266
+ scale=2
267
+ )
268
+
269
+ with gr.Row():
270
+ message = gr.Textbox(
271
+ placeholder="💭 Type your question here...",
272
+ show_label=False,
273
+ container=False,
274
+ scale=8,
275
+ autofocus=True
276
+ )
277
+ clear = gr.Button("🗑️ Clear", size="sm", scale=1)
278
+
279
+ # Suggested questions
280
+ gr.HTML('<p style="color: #2d333a; font-weight: bold; margin: 20px 0 10px 0;">💡 Suggested questions:</p>')
281
+ with gr.Row():
282
+ suggestion1 = gr.Button("What is FISLAC?", scale=1)
283
+ suggestion2 = gr.Button("What are the main modules of FISLAC?", scale=1)
284
+
285
+ with gr.Row():
286
+ suggestion3 = gr.Button("What macroeconomic variables are relevant for advanced economies?", scale=1)
287
+ suggestion4 = gr.Button("How does fiscal risk compare between emerging and advanced countries?", scale=1)
288
+
289
+ # Footer
290
+ gr.HTML("""
291
+ <div style="text-align: center; max-width: 800px; margin: 20px auto; padding: 20px;
292
+ background-color: #f8f9fa; border-radius: 10px;">
293
+ <div style="margin-bottom: 15px;">
294
+ <h3 style="color: #2d333a;">🔍 About this assistant</h3>
295
+ <p style="color: #666; font-size: 14px;">
296
+ This bot uses RAG (Retrieval Augmented Generation) technology combining:
297
+ </p>
298
+ <ul style="list-style: none; color: #666; font-size: 14px;">
299
+ <li>🔹 LLM Engine: Llama-2-7b-chat-hf</li>
300
+ <li>🔹 Embeddings: multilingual-e5-large</li>
301
+ <li>🔹 Vector Store: FAISS</li>
302
+ </ul>
303
+ </div>
304
+ <div style="border-top: 1px solid #ddd; padding-top: 15px;">
305
+ <p style="color: #666; font-size: 14px;">
306
+ <strong>Current Knowledge Base:</strong><br>
307
+ • Valencia et al. (2022) - "Assessing macro-fiscal risk for Latin American and Caribbean countries"<br>
308
+ • FISLAC Technical Documentation
309
+ </p>
310
+ </div>
311
+ <div style="border-top: 1px solid #ddd; margin-top: 15px; padding-top: 15px;">
312
+ <p style="color: #666; font-size: 14px;">
313
+ Created by <a href="https://www.linkedin.com/in/camilo-vega-169084b1/"
314
+ target="_blank" style="color: #2196F3; text-decoration: none;">Camilo Vega</a>,
315
+ AI Consultant 🤖
316
+ </p>
317
+ </div>
318
+ </div>
319
+ """)
320
+
321
+ # Configure event handlers
322
+ def submit(user_input, chat_history):
323
+ return process_response(user_input, chat_history)
324
+
325
+ message.submit(submit, [message, chatbot], [chatbot])
326
+ clear.click(lambda: None, None, chatbot)
327
+
328
+ # Handle suggested questions
329
+ for btn in [suggestion1, suggestion2, suggestion3, suggestion4]:
330
+ btn.click(submit, [btn, chatbot], [chatbot])
331
+
332
+ logger.info("Gradio interface created successfully")
333
+ demo.launch()
334
+
335
+ except Exception as e:
336
+ logger.error(f"Error in Gradio interface creation: {str(e)}")
337
+ raise
readme-file.md ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: FislacBot
3
+ emoji: 📊
4
+ colorFrom: blue
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 5.4.0
8
+ app_file: app.py
9
+ pinned: false
10
+ accelerator: gpu
11
+ ---
12
+
13
+ # FislacBot - AI Assistant for FISLAC Documentation
14
+
15
+ FislacBot is an artificial intelligence assistant specialized in FISLAC (Fiscal Latin America and Caribbean) documentation and fiscal analysis. It uses the Llama-2-7b model with RAG (Retrieval Augmented Generation) to provide accurate responses based on official documentation.
16
+
17
+ ## Author
18
+ **Camilo Vega Barbosa**
19
+ - AI Professor and Artificial Intelligence Solutions Consultant
20
+ - Connect with me:
21
+ - [LinkedIn](https://www.linkedin.com/in/camilo-vega-169084b1/)
22
+ - [GitHub](https://github.com/CamiloVga)
23
+
24
+ ## Features
25
+ - RAG-powered responses using official FISLAC documentation
26
+ - Interactive chat interface using Gradio
27
+ - GPU-accelerated inference
28
+ - Context-aware responses with source tracking
29
+
30
+ ## How It Works
31
+ The application uses a sophisticated RAG system that:
32
+ 1. Processes and indexes FISLAC documentation
33
+ 2. Generates embeddings using multilingual-e5-large
34
+ 3. Uses FAISS for efficient vector storage and retrieval
35
+ 4. Combines retrieved context with Llama-2 for accurate responses
36
+
37
+ ## Technical Details
38
+ - **Model**: Meta-llama/Llama-2-7b-chat-hf
39
+ - **Embeddings**: intfloat/multilingual-e5-large
40
+ - **Vector Store**: FAISS
41
+ - **Framework**: Gradio
42
+ - **Dependencies**: Managed through `requirements.txt`
43
+ - **Device Configuration**: GPU-optimized using Accelerate
44
+
45
+ ## Installation
46
+ To run this application locally:
47
+ 1. Clone the repository
48
+ 2. Install dependencies:
49
+ ```bash
50
+ pip install -r requirements.txt
51
+ ```
52
+ 3. Run the application:
53
+ ```bash
54
+ python app.py
55
+ ```
56
+
57
+ ## Knowledge Base
58
+ The system is trained on:
59
+ - Official FISLAC documentation
60
+ - Valencia et al. (2022) - "Assessing macro-fiscal risk for Latin American and Caribbean countries"
61
+ - Additional BID fiscal documentation
62
+
63
+ ---
64
+ Created by Camilo Vega Barbosa, AI Professor and Solutions Consultant. For more AI projects and collaborations, feel free to connect on [LinkedIn](https://www.linkedin.com/in/camilo-vega-169084b1/) or visit my [GitHub](https://github.com/CamiloVga).
requirements-file.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ transformers==4.36.2
2
+ torch==2.4.0
3
+ accelerate==0.27.2
4
+ gradio==4.19.2
5
+ huggingface-hub==0.20.3
6
+ numpy==1.24.3
7
+ scipy==1.11.4
8
+ faiss-cpu==1.7.4
9
+ pypdf==3.17.1
10
+ langchain==0.1.0
11
+ langchain-community==0.0.13
12
+ sentence-transformers==2.2.2
13
+ pdfplumber==0.10.3