DrishtiSharma commited on
Commit
186f364
Β·
verified Β·
1 Parent(s): 58b0285

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -403
app.py CHANGED
@@ -1,10 +1,7 @@
1
  import streamlit as st
2
  import pandas as pd
3
  import sqlite3
4
- import tempfile
5
- from fpdf import FPDF
6
  import os
7
- import re
8
  import json
9
  from pathlib import Path
10
  import plotly.express as px
@@ -33,6 +30,7 @@ llm = None
33
  # Model Selection
34
  model_choice = st.radio("Select LLM", ["GPT-4o", "llama-3.3-70b"], index=0, horizontal=True)
35
 
 
36
  # API Key Validation and LLM Initialization
37
  groq_api_key = os.getenv("GROQ_API_KEY")
38
  openai_api_key = os.getenv("OPENAI_API_KEY")
@@ -53,12 +51,9 @@ elif model_choice == "GPT-4o":
53
  # Initialize session state for data persistence
54
  if "df" not in st.session_state:
55
  st.session_state.df = None
56
- if "show_preview" not in st.session_state:
57
- st.session_state.show_preview = False
58
 
59
  # Dataset Input
60
  input_option = st.radio("Select Dataset Input:", ["Use Hugging Face Dataset", "Upload CSV File"])
61
-
62
  if input_option == "Use Hugging Face Dataset":
63
  dataset_name = st.text_input("Enter Hugging Face Dataset Name:", value="Einstellung/demo-salaries")
64
  if st.button("Load Dataset"):
@@ -66,338 +61,16 @@ if input_option == "Use Hugging Face Dataset":
66
  with st.spinner("Loading dataset..."):
67
  dataset = load_dataset(dataset_name, split="train")
68
  st.session_state.df = pd.DataFrame(dataset)
69
- st.session_state.show_preview = True # Show preview after loading
70
  st.success(f"Dataset '{dataset_name}' loaded successfully!")
 
71
  except Exception as e:
72
  st.error(f"Error: {e}")
73
-
74
  elif input_option == "Upload CSV File":
75
  uploaded_file = st.file_uploader("Upload CSV File:", type=["csv"])
76
  if uploaded_file:
77
- try:
78
- st.session_state.df = pd.read_csv(uploaded_file)
79
- st.session_state.show_preview = True # Show preview after loading
80
- st.success("File uploaded successfully!")
81
- except Exception as e:
82
- st.error(f"Error loading file: {e}")
83
-
84
- # Show Dataset Preview Only After Loading
85
- if st.session_state.df is not None and st.session_state.show_preview:
86
- st.subheader("πŸ“‚ Dataset Preview")
87
- st.dataframe(st.session_state.df.head())
88
-
89
-
90
-
91
-
92
- def ask_gpt4o_for_visualization(query, df, llm):
93
- columns = ', '.join(df.columns)
94
- prompt = f"""
95
- Analyze the query and suggest one or more relevant visualizations.
96
- Query: "{query}"
97
- Available Columns: {columns}
98
- Respond in this JSON format (as a list if multiple suggestions):
99
- [
100
- {{
101
- "chart_type": "bar/box/line/scatter",
102
- "x_axis": "column_name",
103
- "y_axis": "column_name",
104
- "group_by": "optional_column_name"
105
- }}
106
- ]
107
- """
108
- response = llm.generate(prompt)
109
- try:
110
- return json.loads(response)
111
- except json.JSONDecodeError:
112
- st.error("⚠️ GPT-4o failed to generate a valid suggestion.")
113
- return None
114
-
115
- def add_stats_to_figure(fig, df, y_axis, chart_type):
116
- """
117
- Add relevant statistical annotations to the visualization
118
- based on the chart type.
119
- """
120
- # Check if the y-axis column is numeric
121
- if not pd.api.types.is_numeric_dtype(df[y_axis]):
122
- st.warning(f"⚠️ Cannot compute statistics for non-numeric column: {y_axis}")
123
- return fig
124
-
125
- # Compute statistics for numeric data
126
- min_val = df[y_axis].min()
127
- max_val = df[y_axis].max()
128
- avg_val = df[y_axis].mean()
129
- median_val = df[y_axis].median()
130
- std_dev_val = df[y_axis].std()
131
-
132
- # Format the stats for display
133
- stats_text = (
134
- f"πŸ“Š **Statistics**\n\n"
135
- f"- **Min:** ${min_val:,.2f}\n"
136
- f"- **Max:** ${max_val:,.2f}\n"
137
- f"- **Average:** ${avg_val:,.2f}\n"
138
- f"- **Median:** ${median_val:,.2f}\n"
139
- f"- **Std Dev:** ${std_dev_val:,.2f}"
140
- )
141
-
142
- # Apply stats only to relevant chart types
143
- if chart_type in ["bar", "line"]:
144
- # Add annotation box for bar and line charts
145
- fig.add_annotation(
146
- text=stats_text,
147
- xref="paper", yref="paper",
148
- x=1.02, y=1,
149
- showarrow=False,
150
- align="left",
151
- font=dict(size=12, color="black"),
152
- bordercolor="gray",
153
- borderwidth=1,
154
- bgcolor="rgba(255, 255, 255, 0.85)"
155
- )
156
-
157
- # Add horizontal reference lines
158
- fig.add_hline(y=min_val, line_dash="dot", line_color="red", annotation_text="Min", annotation_position="bottom right")
159
- fig.add_hline(y=median_val, line_dash="dash", line_color="orange", annotation_text="Median", annotation_position="top right")
160
- fig.add_hline(y=avg_val, line_dash="dashdot", line_color="green", annotation_text="Avg", annotation_position="top right")
161
- fig.add_hline(y=max_val, line_dash="dot", line_color="blue", annotation_text="Max", annotation_position="top right")
162
-
163
- elif chart_type == "scatter":
164
- # Add stats annotation only, no lines for scatter plots
165
- fig.add_annotation(
166
- text=stats_text,
167
- xref="paper", yref="paper",
168
- x=1.02, y=1,
169
- showarrow=False,
170
- align="left",
171
- font=dict(size=12, color="black"),
172
- bordercolor="gray",
173
- borderwidth=1,
174
- bgcolor="rgba(255, 255, 255, 0.85)"
175
- )
176
-
177
- elif chart_type == "box":
178
- # Box plots inherently show distribution; no extra stats needed
179
- pass
180
-
181
- elif chart_type == "pie":
182
- # Pie charts represent proportions, not suitable for stats
183
- st.info("πŸ“Š Pie charts represent proportions. Additional stats are not applicable.")
184
-
185
- elif chart_type == "heatmap":
186
- # Heatmaps already reflect data intensity
187
- st.info("πŸ“Š Heatmaps inherently reflect distribution. No additional stats added.")
188
-
189
- else:
190
- st.warning(f"⚠️ No statistical overlays applied for unsupported chart type: '{chart_type}'.")
191
-
192
- return fig
193
-
194
-
195
- # Dynamically generate Plotly visualizations based on GPT-4o suggestions
196
- def generate_visualization(suggestion, df):
197
- """
198
- Generate a Plotly visualization based on GPT-4o's suggestion.
199
- If the Y-axis is missing, infer it intelligently.
200
- """
201
- chart_type = suggestion.get("chart_type", "bar").lower()
202
- x_axis = suggestion.get("x_axis")
203
- y_axis = suggestion.get("y_axis")
204
- group_by = suggestion.get("group_by")
205
-
206
- # Step 1: Infer Y-axis if not provided
207
- if not y_axis:
208
- numeric_columns = df.select_dtypes(include='number').columns.tolist()
209
-
210
- # Avoid using the same column for both axes
211
- if x_axis in numeric_columns:
212
- numeric_columns.remove(x_axis)
213
-
214
- # Smart guess: prioritize salary or relevant metrics if available
215
- priority_columns = ["salary_in_usd", "income", "earnings", "revenue"]
216
- for col in priority_columns:
217
- if col in numeric_columns:
218
- y_axis = col
219
- break
220
-
221
- # Fallback to the first numeric column if no priority columns exist
222
- if not y_axis and numeric_columns:
223
- y_axis = numeric_columns[0]
224
-
225
- # Step 2: Validate axes
226
- if not x_axis or not y_axis:
227
- st.warning("⚠️ Unable to determine appropriate columns for visualization.")
228
- return None
229
-
230
- # Step 3: Dynamically select the Plotly function
231
- plotly_function = getattr(px, chart_type, None)
232
- if not plotly_function:
233
- st.warning(f"⚠️ Unsupported chart type '{chart_type}' suggested by GPT-4o.")
234
- return None
235
-
236
- # Step 4: Prepare dynamic plot arguments
237
- plot_args = {"data_frame": df, "x": x_axis, "y": y_axis}
238
- if group_by and group_by in df.columns:
239
- plot_args["color"] = group_by
240
-
241
- try:
242
- # Step 5: Generate the visualization
243
- fig = plotly_function(**plot_args)
244
- fig.update_layout(
245
- title=f"{chart_type.title()} Plot of {y_axis.replace('_', ' ').title()} by {x_axis.replace('_', ' ').title()}",
246
- xaxis_title=x_axis.replace('_', ' ').title(),
247
- yaxis_title=y_axis.replace('_', ' ').title(),
248
- )
249
-
250
- # Step 6: Apply statistics intelligently
251
- fig = add_statistics_to_visualization(fig, df, y_axis, chart_type)
252
-
253
- return fig
254
-
255
- except Exception as e:
256
- st.error(f"⚠️ Failed to generate visualization: {e}")
257
- return None
258
-
259
-
260
- def generate_multiple_visualizations(suggestions, df):
261
- """
262
- Generates one or more visualizations based on GPT-4o's suggestions.
263
- Handles both single and multiple suggestions.
264
- """
265
- visualizations = []
266
-
267
- for suggestion in suggestions:
268
- fig = generate_visualization(suggestion, df)
269
- if fig:
270
- # Apply chart-specific statistics
271
- fig = add_stats_to_figure(fig, df, suggestion["y_axis"], suggestion["chart_type"])
272
- visualizations.append(fig)
273
-
274
- if not visualizations and suggestions:
275
- st.warning("⚠️ No valid visualization found. Displaying the most relevant one.")
276
- best_suggestion = suggestions[0]
277
- fig = generate_visualization(best_suggestion, df)
278
- fig = add_stats_to_figure(fig, df, best_suggestion["y_axis"], best_suggestion["chart_type"])
279
- visualizations.append(fig)
280
-
281
- return visualizations
282
-
283
-
284
- def handle_visualization_suggestions(suggestions, df):
285
- """
286
- Determines whether to generate a single or multiple visualizations.
287
- """
288
- visualizations = []
289
-
290
- # If multiple suggestions, generate multiple plots
291
- if isinstance(suggestions, list) and len(suggestions) > 1:
292
- visualizations = generate_multiple_visualizations(suggestions, df)
293
-
294
- # If only one suggestion, generate a single plot
295
- elif isinstance(suggestions, dict) or (isinstance(suggestions, list) and len(suggestions) == 1):
296
- suggestion = suggestions[0] if isinstance(suggestions, list) else suggestions
297
- fig = generate_visualization(suggestion, df)
298
- if fig:
299
- visualizations.append(fig)
300
-
301
- # Handle cases when no visualization could be generated
302
- if not visualizations:
303
- st.warning("⚠️ Unable to generate any visualization based on the suggestion.")
304
-
305
- # Display all generated visualizations
306
- for fig in visualizations:
307
- st.plotly_chart(fig, use_container_width=True)
308
-
309
-
310
-
311
- # Function to create TXT file
312
- def create_text_report_with_viz_temp(report, conclusion, visualizations):
313
- content = f"### Analysis Report\n\n{report}\n\n### Visualizations\n"
314
-
315
- for i, fig in enumerate(visualizations, start=1):
316
- fig_title = fig.layout.title.text if fig.layout.title.text else f"Visualization {i}"
317
- x_axis = fig.layout.xaxis.title.text if fig.layout.xaxis.title.text else "X-axis"
318
- y_axis = fig.layout.yaxis.title.text if fig.layout.yaxis.title.text else "Y-axis"
319
-
320
- content += f"\n{i}. {fig_title}\n"
321
- content += f" - X-axis: {x_axis}\n"
322
- content += f" - Y-axis: {y_axis}\n"
323
-
324
- if fig.data:
325
- trace_types = set(trace.type for trace in fig.data)
326
- content += f" - Chart Type(s): {', '.join(trace_types)}\n"
327
- else:
328
- content += " - No data available in this visualization.\n"
329
-
330
- content += f"\n\n\n{conclusion}"
331
-
332
- with tempfile.NamedTemporaryFile(delete=False, suffix=".txt", mode='w', encoding='utf-8') as temp_txt:
333
- temp_txt.write(content)
334
- return temp_txt.name
335
-
336
-
337
-
338
- # Function to create PDF with report text and visualizations
339
- def create_pdf_report_with_viz(report, conclusion, visualizations):
340
- pdf = FPDF()
341
- pdf.set_auto_page_break(auto=True, margin=15)
342
- pdf.add_page()
343
- pdf.set_font("Arial", size=12)
344
-
345
- # Title
346
- pdf.set_font("Arial", style="B", size=18)
347
- pdf.cell(0, 10, "πŸ“Š Analysis Report", ln=True, align="C")
348
- pdf.ln(10)
349
-
350
- # Report Content
351
- pdf.set_font("Arial", style="B", size=14)
352
- pdf.cell(0, 10, "Analysis", ln=True)
353
- pdf.set_font("Arial", size=12)
354
- pdf.multi_cell(0, 10, report)
355
-
356
- pdf.ln(10)
357
- pdf.set_font("Arial", style="B", size=14)
358
- pdf.cell(0, 10, "Conclusion", ln=True)
359
- pdf.set_font("Arial", size=12)
360
- pdf.multi_cell(0, 10, conclusion)
361
-
362
- # Add Visualizations
363
- pdf.add_page()
364
- pdf.set_font("Arial", style="B", size=16)
365
- pdf.cell(0, 10, "πŸ“ˆ Visualizations", ln=True)
366
- pdf.ln(5)
367
-
368
- with tempfile.TemporaryDirectory() as temp_dir:
369
- for i, fig in enumerate(visualizations, start=1):
370
- fig_title = fig.layout.title.text if fig.layout.title.text else f"Visualization {i}"
371
- x_axis = fig.layout.xaxis.title.text if fig.layout.xaxis.title.text else "X-axis"
372
- y_axis = fig.layout.yaxis.title.text if fig.layout.yaxis.title.text else "Y-axis"
373
-
374
- # Save each visualization as a PNG image
375
- img_path = os.path.join(temp_dir, f"viz_{i}.png")
376
- fig.write_image(img_path)
377
-
378
- # Insert Title and Description
379
- pdf.set_font("Arial", style="B", size=14)
380
- pdf.multi_cell(0, 10, f"{i}. {fig_title}")
381
- pdf.set_font("Arial", size=12)
382
- pdf.multi_cell(0, 10, f"X-axis: {x_axis} | Y-axis: {y_axis}")
383
- pdf.ln(3)
384
-
385
- # Embed Visualization
386
- pdf.image(img_path, w=170)
387
- pdf.ln(10)
388
-
389
- # Save PDF
390
- temp_pdf = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
391
- pdf.output(temp_pdf.name)
392
-
393
- return temp_pdf
394
-
395
- def escape_markdown(text):
396
- # Ensure text is a string
397
- text = str(text)
398
- # Escape Markdown characters: *, _, `, ~
399
- escape_chars = r"(\*|_|`|~)"
400
- return re.sub(escape_chars, r"\\\1", text)
401
 
402
  # SQL-RAG Analysis
403
  if st.session_state.df is not None:
@@ -427,7 +100,6 @@ if st.session_state.df is not None:
427
  """Validate the SQL query syntax and structure before execution."""
428
  return QuerySQLCheckerTool(db=db, llm=llm).invoke({"query": sql_query})
429
 
430
- # Agents for SQL data extraction and analysis
431
  sql_dev = Agent(
432
  role="Senior Database Developer",
433
  goal="Extract data using optimized SQL queries.",
@@ -445,19 +117,11 @@ if st.session_state.df is not None:
445
 
446
  report_writer = Agent(
447
  role="Technical Report Writer",
448
- goal="Write a structured report with Introduction and Key Insights. DO NOT include any Conclusion or Summary.",
449
- backstory="Specializes in detailed analytical reports without conclusions.",
450
  llm=llm,
451
  )
452
 
453
- conclusion_writer = Agent(
454
- role="Conclusion Specialist",
455
- goal="Summarize findings into a clear and concise 3-5 line Conclusion highlighting only the most important insights.",
456
- backstory="An expert in crafting impactful and clear conclusions.",
457
- llm=llm,
458
- )
459
-
460
- # Define tasks for report and conclusion
461
  extract_data = Task(
462
  description="Extract data based on the query: {query}.",
463
  expected_output="Database results matching the query.",
@@ -466,100 +130,56 @@ if st.session_state.df is not None:
466
 
467
  analyze_data = Task(
468
  description="Analyze the extracted data for query: {query}.",
469
- expected_output="Key Insights and Analysis without any Introduction or Conclusion.",
470
  agent=data_analyst,
471
  context=[extract_data],
472
  )
473
 
474
  write_report = Task(
475
- description="Write the analysis report with Introduction and Key Insights. DO NOT include any Conclusion or Summary.",
476
- expected_output="Markdown-formatted report excluding Conclusion.",
477
  agent=report_writer,
478
  context=[analyze_data],
479
  )
480
 
481
- write_conclusion = Task(
482
- description="Summarize the key findings in 3-5 impactful lines, highlighting the maximum, minimum, and average salaries."
483
- "Emphasize significant insights on salary distribution and influential compensation trends for strategic decision-making.",
484
- expected_output="Markdown-formatted Conclusion section with key insights and statistics.",
485
- agent=conclusion_writer,
486
- context=[analyze_data],
487
- )
488
-
489
-
490
-
491
- # Separate Crews for report and conclusion
492
- crew_report = Crew(
493
  agents=[sql_dev, data_analyst, report_writer],
494
  tasks=[extract_data, analyze_data, write_report],
495
  process=Process.sequential,
496
  verbose=True,
497
  )
498
 
499
- crew_conclusion = Crew(
500
- agents=[data_analyst, conclusion_writer],
501
- tasks=[write_conclusion],
502
- process=Process.sequential,
503
- verbose=True,
504
- )
505
-
506
- # Tabs for Query Results and Visualizations
507
  tab1, tab2 = st.tabs(["πŸ” Query Insights + Viz", "πŸ“Š Full Data Viz"])
508
 
509
- # Query Insights + Visualization
510
  with tab1:
511
  query = st.text_area("Enter Query:", value="Provide insights into the salary of a Principal Data Scientist.")
512
  if st.button("Submit Query"):
513
  with st.spinner("Processing query..."):
514
- # Step 1: Generate the analysis report
515
- report_inputs = {"query": query + " Provide detailed analysis but DO NOT include Conclusion."}
516
- report_result = crew_report.kickoff(inputs=report_inputs)
517
-
518
- # Step 2: Generate only the concise conclusion
519
- conclusion_inputs = {"query": query + " Provide ONLY the most important insights in 3-5 concise lines."}
520
- conclusion_result = crew_conclusion.kickoff(inputs=conclusion_inputs)
521
-
522
- # Step 3: Display the report
523
- #st.markdown("### Analysis Report:")
524
- st.markdown(report_result if report_result else "⚠️ No Report Generated.")
525
-
526
- # Step 4: Generate Visualizations
527
-
528
-
529
- # Step 5: Insert Visual Insights
530
- st.markdown("### Visual Insights")
531
 
 
 
 
 
532
 
533
- # Step 6: Display Concise Conclusion
534
- #st.markdown("#### Conclusion")
535
-
536
- safe_conclusion = escape_markdown(conclusion_result if conclusion_result else "⚠️ No Conclusion Generated.")
537
- st.markdown(safe_conclusion)
538
-
539
- # Full Data Visualization Tab
540
  with tab2:
541
  st.subheader("πŸ“Š Comprehensive Data Visualizations")
542
-
543
  fig1 = px.histogram(st.session_state.df, x="job_title", title="Job Title Frequency")
544
  st.plotly_chart(fig1)
545
 
546
- fig2 = px.bar(
547
- st.session_state.df.groupby("experience_level")["salary_in_usd"].mean().reset_index(),
548
- x="experience_level", y="salary_in_usd",
549
- title="Average Salary by Experience Level"
550
- )
551
  st.plotly_chart(fig2)
552
 
553
- fig3 = px.box(st.session_state.df, x="employment_type", y="salary_in_usd",
554
- title="Salary Distribution by Employment Type")
555
- st.plotly_chart(fig3)
556
-
557
  temp_dir.cleanup()
558
  else:
559
  st.info("Please load a dataset to proceed.")
560
 
561
-
562
- # Sidebar Reference
563
  with st.sidebar:
564
  st.header("πŸ“š Reference:")
565
  st.markdown("[SQL Agents w CrewAI & Llama 3 - Plaban Nayak](https://github.com/plaban1981/Agents/blob/main/SQL_Agents_with_CrewAI_and_Llama_3.ipynb)")
 
1
  import streamlit as st
2
  import pandas as pd
3
  import sqlite3
 
 
4
  import os
 
5
  import json
6
  from pathlib import Path
7
  import plotly.express as px
 
30
  # Model Selection
31
  model_choice = st.radio("Select LLM", ["GPT-4o", "llama-3.3-70b"], index=0, horizontal=True)
32
 
33
+
34
  # API Key Validation and LLM Initialization
35
  groq_api_key = os.getenv("GROQ_API_KEY")
36
  openai_api_key = os.getenv("OPENAI_API_KEY")
 
51
  # Initialize session state for data persistence
52
  if "df" not in st.session_state:
53
  st.session_state.df = None
 
 
54
 
55
  # Dataset Input
56
  input_option = st.radio("Select Dataset Input:", ["Use Hugging Face Dataset", "Upload CSV File"])
 
57
  if input_option == "Use Hugging Face Dataset":
58
  dataset_name = st.text_input("Enter Hugging Face Dataset Name:", value="Einstellung/demo-salaries")
59
  if st.button("Load Dataset"):
 
61
  with st.spinner("Loading dataset..."):
62
  dataset = load_dataset(dataset_name, split="train")
63
  st.session_state.df = pd.DataFrame(dataset)
 
64
  st.success(f"Dataset '{dataset_name}' loaded successfully!")
65
+ st.dataframe(st.session_state.df.head())
66
  except Exception as e:
67
  st.error(f"Error: {e}")
 
68
  elif input_option == "Upload CSV File":
69
  uploaded_file = st.file_uploader("Upload CSV File:", type=["csv"])
70
  if uploaded_file:
71
+ st.session_state.df = pd.read_csv(uploaded_file)
72
+ st.success("File uploaded successfully!")
73
+ st.dataframe(st.session_state.df.head())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
  # SQL-RAG Analysis
76
  if st.session_state.df is not None:
 
100
  """Validate the SQL query syntax and structure before execution."""
101
  return QuerySQLCheckerTool(db=db, llm=llm).invoke({"query": sql_query})
102
 
 
103
  sql_dev = Agent(
104
  role="Senior Database Developer",
105
  goal="Extract data using optimized SQL queries.",
 
117
 
118
  report_writer = Agent(
119
  role="Technical Report Writer",
120
+ goal="Summarize the insights into a clear report.",
121
+ backstory="An expert in summarizing data insights into readable reports.",
122
  llm=llm,
123
  )
124
 
 
 
 
 
 
 
 
 
125
  extract_data = Task(
126
  description="Extract data based on the query: {query}.",
127
  expected_output="Database results matching the query.",
 
130
 
131
  analyze_data = Task(
132
  description="Analyze the extracted data for query: {query}.",
133
+ expected_output="Analysis text summarizing findings.",
134
  agent=data_analyst,
135
  context=[extract_data],
136
  )
137
 
138
  write_report = Task(
139
+ description="Summarize the analysis into an executive report.",
140
+ expected_output="Markdown report of insights.",
141
  agent=report_writer,
142
  context=[analyze_data],
143
  )
144
 
145
+ crew = Crew(
 
 
 
 
 
 
 
 
 
 
 
146
  agents=[sql_dev, data_analyst, report_writer],
147
  tasks=[extract_data, analyze_data, write_report],
148
  process=Process.sequential,
149
  verbose=True,
150
  )
151
 
152
+ # UI: Tabs for Query Results and General Insights
 
 
 
 
 
 
 
153
  tab1, tab2 = st.tabs(["πŸ” Query Insights + Viz", "πŸ“Š Full Data Viz"])
154
 
 
155
  with tab1:
156
  query = st.text_area("Enter Query:", value="Provide insights into the salary of a Principal Data Scientist.")
157
  if st.button("Submit Query"):
158
  with st.spinner("Processing query..."):
159
+ inputs = {"query": query}
160
+ result = crew.kickoff(inputs=inputs)
161
+ st.markdown("### Analysis Report:")
162
+ st.markdown(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
163
 
164
+ # Query-Specific Visualization
165
+ if "salary" in query.lower():
166
+ fig = px.box(st.session_state.df, x="job_title", y="salary_in_usd", title="Salary Distribution by Job Title")
167
+ st.plotly_chart(fig)
168
 
 
 
 
 
 
 
 
169
  with tab2:
170
  st.subheader("πŸ“Š Comprehensive Data Visualizations")
171
+
172
  fig1 = px.histogram(st.session_state.df, x="job_title", title="Job Title Frequency")
173
  st.plotly_chart(fig1)
174
 
175
+ fig2 = px.bar(st.session_state.df.groupby("experience_level")["salary_in_usd"].mean().reset_index(),
176
+ x="experience_level", y="salary_in_usd", title="Average Salary by Experience Level")
 
 
 
177
  st.plotly_chart(fig2)
178
 
 
 
 
 
179
  temp_dir.cleanup()
180
  else:
181
  st.info("Please load a dataset to proceed.")
182
 
 
 
183
  with st.sidebar:
184
  st.header("πŸ“š Reference:")
185
  st.markdown("[SQL Agents w CrewAI & Llama 3 - Plaban Nayak](https://github.com/plaban1981/Agents/blob/main/SQL_Agents_with_CrewAI_and_Llama_3.ipynb)")