subtest / interim.py
DrishtiSharma's picture
Update interim.py
c95d3e8 verified
raw
history blame
5.86 kB
import streamlit as st
import pandas as pd
import sqlite3
import os
import json
from pathlib import Path
from datetime import datetime, timezone
from crewai import Agent, Crew, Process, Task
from crewai_tools import tool
from langchain_groq import ChatGroq
from langchain.schema.output import LLMResult
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_community.tools.sql_database.tool import (
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QuerySQLCheckerTool,
QuerySQLDataBaseTool,
)
from langchain_community.utilities.sql_database import SQLDatabase
from datasets import load_dataset
import tempfile
# Environment setup
os.environ["GROQ_API_KEY"] = st.secrets.get("GROQ_API_KEY", "")
# LLM Callback Logger
class LLMCallbackHandler(BaseCallbackHandler):
def __init__(self, log_path: Path):
self.log_path = log_path
def on_llm_start(self, serialized, prompts, **kwargs):
with self.log_path.open("a", encoding="utf-8") as file:
file.write(json.dumps({"event": "llm_start", "text": prompts[0], "timestamp": datetime.now().isoformat()}) + "\n")
def on_llm_end(self, response: LLMResult, **kwargs):
generation = response.generations[-1][-1].message.content
with self.log_path.open("a", encoding="utf-8") as file:
file.write(json.dumps({"event": "llm_end", "text": generation, "timestamp": datetime.now().isoformat()}) + "\n")
# Initialize the LLM
llm = ChatGroq(
temperature=0,
model_name="mixtral-8x7b-32768",
callbacks=[LLMCallbackHandler(Path("prompts.jsonl"))],
)
st.title("SQL-RAG Using CrewAI πŸš€")
st.write("Analyze datasets using natural language queries powered by SQL and CrewAI.")
# Input Options
input_option = st.radio("Select Dataset Input:", ["Use Hugging Face Dataset", "Upload CSV File"])
df = None
if input_option == "Use Hugging Face Dataset":
dataset_name = st.text_input("Enter Hugging Face Dataset Name:", value="Einstellung/demo-salaries")
if st.button("Load Dataset"):
try:
with st.spinner("Loading Hugging Face dataset..."):
dataset = load_dataset(dataset_name, split="train")
df = pd.DataFrame(dataset)
st.success(f"Dataset '{dataset_name}' loaded successfully!")
st.dataframe(df.head())
except Exception as e:
st.error(f"Error loading dataset: {e}")
else:
uploaded_file = st.file_uploader("Upload CSV File:", type=["csv"])
if uploaded_file:
df = pd.read_csv(uploaded_file)
st.success("File uploaded successfully!")
st.dataframe(df.head())
# SQL-RAG Analysis
if df is not None:
temp_dir = tempfile.TemporaryDirectory()
db_path = os.path.join(temp_dir.name, "data.db")
connection = sqlite3.connect(db_path)
df.to_sql("salaries", connection, if_exists="replace", index=False)
db = SQLDatabase.from_uri(f"sqlite:///{db_path}")
# Tools with proper docstrings
@tool("list_tables")
def list_tables() -> str:
"""List all tables in the SQLite database."""
return ListSQLDatabaseTool(db=db).invoke("")
@tool("tables_schema")
def tables_schema(tables: str) -> str:
"""
Get the schema and sample rows for specific tables in the database.
Input: Comma-separated table names.
Example: 'salaries'
"""
return InfoSQLDatabaseTool(db=db).invoke(tables)
@tool("execute_sql")
def execute_sql(sql_query: str) -> str:
"""
Execute a valid SQL query on the database and return the results.
Input: A SQL query string.
Example: 'SELECT * FROM salaries LIMIT 5;'
"""
return QuerySQLDataBaseTool(db=db).invoke(sql_query)
@tool("check_sql")
def check_sql(sql_query: str) -> str:
"""
Check the validity of a SQL query before execution.
Input: A SQL query string.
Example: 'SELECT salary FROM salaries WHERE salary > 10000;'
"""
return QuerySQLCheckerTool(db=db, llm=llm).invoke({"query": sql_query})
# Agents
sql_dev = Agent(
role="Database Developer",
goal="Extract relevant data by executing SQL queries.",
llm=llm,
tools=[list_tables, tables_schema, execute_sql, check_sql],
)
data_analyst = Agent(
role="Data Analyst",
goal="Analyze the extracted data and generate detailed insights.",
llm=llm,
)
report_writer = Agent(
role="Report Writer",
goal="Summarize the analysis into an executive report.",
llm=llm,
)
# Tasks
extract_data = Task(
description="Extract data for the query: {query}.",
expected_output="Database query results.",
agent=sql_dev,
)
analyze_data = Task(
description="Analyze the query results for: {query}.",
expected_output="Analysis report.",
agent=data_analyst,
context=[extract_data],
)
write_report = Task(
description="Summarize the analysis into an executive summary.",
expected_output="Markdown-formatted report.",
agent=report_writer,
context=[analyze_data],
)
crew = Crew(
agents=[sql_dev, data_analyst, report_writer],
tasks=[extract_data, analyze_data, write_report],
process=Process.sequential,
verbose=True,
)
query = st.text_area("Enter Query:", placeholder="e.g., 'What is the average salary by experience level?'")
if st.button("Submit Query"):
with st.spinner("Processing your query with CrewAI..."):
inputs = {"query": query}
result = crew.kickoff(inputs=inputs)
st.markdown("### Analysis Report:")
st.markdown(result)
temp_dir.cleanup()
else:
st.info("Load a dataset to proceed.")