Spaces:
Sleeping
Sleeping
File size: 5,899 Bytes
4ae7ed4 2ac9a74 4ae7ed4 2ac9a74 4ae7ed4 2ac9a74 6548944 4ae7ed4 2ac9a74 4ae7ed4 2ac9a74 77389d5 4ae7ed4 5cc7611 4ae7ed4 e1b05e1 4ae7ed4 e175348 5045944 4ae7ed4 9abae49 7752a10 9bd334d e1b05e1 5cc7611 7752a10 5cc7611 7752a10 e1b05e1 7752a10 e1b05e1 7752a10 5cc7611 e1b05e1 7752a10 e1b05e1 9abae49 e1b05e1 9bd334d 7752a10 e1b05e1 9bd334d e1b05e1 9bd334d 5cc7611 9bd334d 5cc7611 9bd334d 5cc7611 9bd334d 5cc7611 9bd334d 5cc7611 9bd334d 5cc7611 9bd334d 5cc7611 9bd334d 5cc7611 9bd334d 5cc7611 9bd334d 5cc7611 9bd334d 2b71376 9bd334d 5cc7611 7752a10 5cc7611 9bd334d 5cc7611 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
import streamlit as st
import pandas as pd
import sqlite3
import os
import json
from pathlib import Path
from datetime import datetime, timezone
from crewai import Agent, Crew, Process, Task
from crewai.tools import tool
from langchain_groq import ChatGroq
from langchain.schema.output import LLMResult
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_community.tools.sql_database.tool import (
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QuerySQLCheckerTool,
QuerySQLDataBaseTool,
)
from langchain_community.utilities.sql_database import SQLDatabase
from datasets import load_dataset
import tempfile
# API Key
os.environ["GROQ_API_KEY"] = st.secrets.get("GROQ_API_KEY", "")
# Initialize LLM
class LLMCallbackHandler(BaseCallbackHandler):
def __init__(self, log_path: Path):
self.log_path = log_path
def on_llm_start(self, serialized, prompts, **kwargs):
with self.log_path.open("a", encoding="utf-8") as file:
file.write(json.dumps({"event": "llm_start", "text": prompts[0], "timestamp": datetime.now().isoformat()}) + "\n")
def on_llm_end(self, response: LLMResult, **kwargs):
generation = response.generations[-1][-1].message.content
with self.log_path.open("a", encoding="utf-8") as file:
file.write(json.dumps({"event": "llm_end", "text": generation, "timestamp": datetime.now().isoformat()}) + "\n")
llm = ChatGroq(
temperature=0,
model_name="groq/llama-3.3-70b-versatile",
max_tokens=500,
callbacks=[LLMCallbackHandler(Path("prompts.jsonl"))],
)
st.title("SQL-RAG Using CrewAI π")
st.write("Analyze datasets using natural language queries powered by SQL and CrewAI.")
# Initialize session state for data persistence
if "df" not in st.session_state:
st.session_state.df = None
# Dataset Input
input_option = st.radio("Select Dataset Input:", ["Use Hugging Face Dataset", "Upload CSV File"])
if input_option == "Use Hugging Face Dataset":
dataset_name = st.text_input("Enter Hugging Face Dataset Name:", value="Einstellung/demo-salaries")
if st.button("Load Dataset"):
try:
with st.spinner("Loading dataset..."):
dataset = load_dataset(dataset_name, split="train")
st.session_state.df = pd.DataFrame(dataset)
st.success(f"Dataset '{dataset_name}' loaded successfully!")
st.dataframe(st.session_state.df.head())
except Exception as e:
st.error(f"Error: {e}")
elif input_option == "Upload CSV File":
uploaded_file = st.file_uploader("Upload CSV File:", type=["csv"])
if uploaded_file:
st.session_state.df = pd.read_csv(uploaded_file)
st.success("File uploaded successfully!")
st.dataframe(st.session_state.df.head())
# SQL-RAG Analysis
if st.session_state.df is not None:
temp_dir = tempfile.TemporaryDirectory()
db_path = os.path.join(temp_dir.name, "data.db")
connection = sqlite3.connect(db_path)
st.session_state.df.to_sql("salaries", connection, if_exists="replace", index=False)
db = SQLDatabase.from_uri(f"sqlite:///{db_path}")
@tool("list_tables")
def list_tables() -> str:
"""List all tables in the database."""
return ListSQLDatabaseTool(db=db).invoke("")
@tool("tables_schema")
def tables_schema(tables: str) -> str:
"""Get schema and sample rows for given tables."""
return InfoSQLDatabaseTool(db=db).invoke(tables)
@tool("execute_sql")
def execute_sql(sql_query: str) -> str:
"""Execute a SQL query against the database."""
return QuerySQLDataBaseTool(db=db).invoke(sql_query)
@tool("check_sql")
def check_sql(sql_query: str) -> str:
"""Check the validity of a SQL query."""
return QuerySQLCheckerTool(db=db, llm=llm).invoke({"query": sql_query})
sql_dev = Agent(
role="Senior Database Developer",
goal="Extract data using optimized SQL queries.",
backstory="An expert in writing optimized SQL queries for complex databases.",
llm=llm,
tools=[list_tables, tables_schema, execute_sql, check_sql],
)
data_analyst = Agent(
role="Senior Data Analyst",
goal="Analyze the data and produce insights.",
backstory="A seasoned analyst who identifies trends and patterns in datasets.",
llm=llm,
)
report_writer = Agent(
role="Technical Report Writer",
goal="Summarize the insights into a clear report.",
backstory="An expert in summarizing data insights into readable reports.",
llm=llm,
)
extract_data = Task(
description="Extract data based on the query: {query}.",
expected_output="Database results matching the query.",
agent=sql_dev,
)
analyze_data = Task(
description="Analyze the extracted data for query: {query}.",
expected_output="Analysis text summarizing findings.",
agent=data_analyst,
context=[extract_data],
)
write_report = Task(
description="Summarize the analysis into an executive report.",
expected_output="Markdown report of insights.",
agent=report_writer,
context=[analyze_data],
)
crew = Crew(
agents=[sql_dev, data_analyst, report_writer],
tasks=[extract_data, analyze_data, write_report],
process=Process.sequential,
verbose=True,
)
query = st.text_area("Enter Query:", placeholder="e.g., 'What is the average salary for senior employees?'")
if st.button("Submit Query"):
with st.spinner("Processing query..."):
inputs = {"query": query}
result = crew.kickoff(inputs=inputs)
st.markdown("### Analysis Report:")
st.markdown(result)
temp_dir.cleanup()
else:
st.info("Please load a dataset to proceed.")
|