siddhartharya's picture
Update app.py
bc40ada verified
raw
history blame
9.37 kB
import gradio as gr
import requests
import os
# Load API keys securely from environment variables
proxycurl_api_key = os.getenv("PROXYCURL_API_KEY") # Proxycurl API key
groq_api_key = os.getenv("GROQ_CLOUD_API_KEY") # Groq Cloud API key
firecrawl_api_key = os.getenv("FIRECRAWL_API_KEY") # Firecrawl API key
class EmailAgent:
def __init__(self, linkedin_url, company_name, role, word_limit, user_name, email, phone, linkedin):
self.linkedin_url = linkedin_url
self.company_name = company_name
self.role = role
self.word_limit = word_limit
self.user_name = user_name
self.email = email
self.phone = phone
self.linkedin = linkedin
self.bio = None
self.skills = []
self.experiences = []
self.company_info = None
self.role_description = None
# Use the LLM to reason and reflect on the provided data
def reason_with_llm(self):
print("Reasoning: Using LLM to reason about available data...")
# LLM reasoning prompt that evaluates the current data and reflects on next actions
reasoning_prompt = f"""
You are a reasoning agent tasked with generating a job application email. Here's what we have:
1. Candidate's LinkedIn profile URL: {self.linkedin_url}
2. Company Name: {self.company_name}
3. Role: {self.role}
4. Word Limit: {self.word_limit}
5. Candidate's Name: {self.user_name}
6. Candidate's Email: {self.email}
7. Candidate's Phone: {self.phone}
8. Candidate's LinkedIn: {self.linkedin}
Candidate's Bio: {self.bio}
Candidate's Skills: {', '.join(self.skills)}
Candidate's Experiences: {', '.join([exp['title'] for exp in self.experiences])}
Company Information: {self.company_info}
Role Description: {self.role_description}
Evaluate the completeness of the data. If some key data is missing, determine whether we should:
- Scrape for more data (e.g., company info, role descriptions).
- Proceed with the available information and generate the email using default logic.
Reflect on whether we need more data or if the current information is sufficient to proceed.
"""
# Send this reasoning prompt to the LLM
url = "https://api.groq.com/openai/v1/chat/completions"
headers = {
"Authorization": f"Bearer {groq_api_key}",
"Content-Type": "application/json",
}
data = {
"messages": [{"role": "user", "content": reasoning_prompt}],
"model": "llama3-8b-8192"
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
reasoning_output = response.json()["choices"][0]["message"]["content"].strip()
print("LLM Reasoning Output:", reasoning_output)
return reasoning_output
else:
print(f"Error: {response.status_code}, {response.text}")
return "Error: Unable to complete reasoning."
# Action: Fetch LinkedIn data via Proxycurl (acting based on reasoning)
def fetch_linkedin_data(self):
if not self.linkedin_url:
print("Action: No LinkedIn URL provided, using default bio.")
self.bio = "A professional with diverse experience."
self.skills = ["Adaptable", "Hardworking"]
self.experiences = ["Worked across various industries"]
else:
print("Action: Fetching LinkedIn data via Proxycurl.")
headers = {"Authorization": f"Bearer {proxycurl_api_key}"}
url = f"https://nubela.co/proxycurl/api/v2/linkedin?url={self.linkedin_url}"
response = requests.get(url, headers=headers)
if response.status_code == 200:
data = response.json()
self.bio = data.get("summary", "No bio available")
self.skills = data.get("skills", [])
self.experiences = data.get("experiences", [])
else:
print("Error: Unable to fetch LinkedIn profile. Using default bio.")
self.bio = "A professional with diverse experience."
self.skills = ["Adaptable", "Hardworking"]
self.experiences = ["Worked across various industries"]
# Action: Fetch company information via Firecrawl API
def fetch_company_info_with_firecrawl(self):
if not self.company_name:
print("Action: No company name provided, using default company info.")
self.company_info = "A leading company in its field."
else:
print(f"Action: Fetching company info for {self.company_name} using Firecrawl.")
headers = {"Authorization": f"Bearer {firecrawl_api_key}"}
firecrawl_url = "https://api.firecrawl.dev/v1/scrape"
data = {
"url": f"https://{self.company_name}.com",
"patterns": ["description", "about", "careers", "company overview"]
}
response = requests.post(firecrawl_url, json=data, headers=headers)
if response.status_code == 200:
firecrawl_data = response.json()
self.company_info = firecrawl_data.get("description", "No detailed company info available.")
print(f"Company info fetched: {self.company_info}")
else:
print(f"Error: Unable to fetch company info via Firecrawl. Using default info.")
self.company_info = "A leading company in its field."
# Final Action: Generate the email using Groq Cloud LLM based on gathered data
def generate_email(self):
print("Action: Generating the email with the gathered information.")
# Use the LinkedIn profile link dynamically in the body
linkedin_text = f"Please find my LinkedIn profile at {self.linkedin}" if self.linkedin else ""
# Dynamic LLM prompt
prompt = f"""
Write a professional email applying for the {self.role} position at {self.company_name}.
Use the following information:
- The candidate’s LinkedIn bio: {self.bio}.
- The candidate’s most relevant skills: {', '.join(self.skills)}.
- The candidate’s professional experience: {', '.join([exp['title'] for exp in self.experiences])}.
Please research the company's public information. If no company-specific information is available, use general knowledge about the company's industry.
Tailor the email dynamically to the role of **{self.role}** at {self.company_name}, aligning the candidate's skills and experiences with the expected responsibilities of the role and the company’s operations.
{linkedin_text}
Remove references to job posting sources unless provided. Use the LinkedIn URL for the candidate and do not include placeholders.
End the email with this signature:
Best regards,
{self.user_name}
Email: {self.email}
Phone: {self.phone}
LinkedIn: {self.linkedin}
The email should not exceed {self.word_limit} words.
"""
url = "https://api.groq.com/openai/v1/chat/completions"
headers = {
"Authorization": f"Bearer {groq_api_key}",
"Content-Type": "application/json",
}
data = {
"messages": [{"role": "user", "content": prompt}],
"model": "llama3-8b-8192"
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
return response.json()["choices"][0]["message"]["content"].strip()
else:
print(f"Error: {response.status_code}, {response.text}")
return "Error generating email. Please check your API key or try again later."
# Main loop following ReAct pattern
def run(self):
reasoning_output = self.reason_with_llm() # LLM performs reasoning and reflection
print("LLM Reflection:", reasoning_output)
self.fetch_linkedin_data() # Fetch LinkedIn data
self.fetch_company_info_with_firecrawl() # Fetch company data using Firecrawl
return self.generate_email() # Final action: generate email
# Define the Gradio interface and the main app logic
def gradio_ui():
# Input fields
name_input = gr.Textbox(label="Your Name", placeholder="Enter your name")
company_input = gr.Textbox(label="Company Name or URL", placeholder="Enter the company name or website URL")
role_input = gr.Textbox(label="Role Applying For", placeholder="Enter the role you are applying for")
email_input = gr.Textbox(label="Your Email Address", placeholder="Enter your email address")
phone_input = gr.Textbox(label="Your Phone Number", placeholder="Enter your phone number")
linkedin_input = gr.Textbox(label="Your LinkedIn URL", placeholder="Enter your LinkedIn profile URL")
word_limit_slider = gr.Slider(minimum=50, maximum=300, step=10, label="Email Word Limit", value=150)
# Output field
email_output = gr.Textbox(label="Generated Email", placeholder="Your generated email will appear here", lines=10)