Spaces:
Running
Running
import streamlit as st | |
import requests | |
import nltk | |
from transformers import pipeline | |
from rake_nltk import Rake | |
from nltk.corpus import stopwords | |
from fuzzywuzzy import fuzz | |
from openai import OpenAI | |
import os | |
from dotenv import load_dotenv | |
# Load environment variables for OpenAI | |
load_dotenv() | |
# Initialize OpenAI client for Llama 3 model | |
client = OpenAI( | |
base_url="https://api-inference.huggingface.co/v1", | |
api_key=os.environ.get('HFSecret') # Replace with your token | |
) | |
repo_id = "meta-llama/Meta-Llama-3-8B-Instruct" | |
st.title("Parallel Sentiment Analysis: Transformers vs. Llama 3") | |
# Define the options for the dropdown menu, selecting a remote txt file already created to analyze the text | |
options = ['None', 'Apprecitation Letter', 'Regret Letter', 'Kindness Tale', 'Lost Melody Tale', 'Twitter Example 1', 'Twitter Example 2'] | |
# Create a dropdown menu to select options | |
selected_option = st.selectbox("Select a preset option", options) | |
# Define URLs for different options | |
urls = { | |
"Apprecitation Letter": "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Appreciation_Letter.txt", | |
"Regret Letter": "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Regret_Letter.txt", | |
"Kindness Tale": "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Kindness_Tale.txt", | |
"Lost Melody Tale": "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Lost_Melody_Tale.txt", | |
"Twitter Example 1": "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_1.txt", | |
"Twitter Example 2": "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_2.txt" | |
} | |
# Function to fetch text content based on selected option | |
def fetch_text_content(selected_option): | |
return requests.get(urls.get(selected_option, "")).text if selected_option in urls else "" | |
# Fetch text content | |
jd = fetch_text_content(selected_option) | |
# Download NLTK resources | |
nltk.download('punkt') | |
nltk.download('stopwords') | |
# Initialize transformer sentiment analysis pipeline | |
pipe_sent = pipeline('sentiment-analysis') | |
# Function to extract keywords | |
def extract_keywords(text): | |
r = Rake() | |
r.extract_keywords_from_text(text) | |
phrases_with_scores = r.get_ranked_phrases_with_scores() | |
stop_words = set(stopwords.words('english')) | |
keywords = [] | |
for score, phrase in phrases_with_scores: | |
if phrase.lower() not in stop_words: | |
keywords.append((score, phrase)) | |
keywords.sort(key=lambda x: x[0], reverse=True) | |
unique_keywords = [] | |
seen_phrases = set() | |
for score, phrase in keywords: | |
if phrase not in seen_phrases: | |
similar_phrases = [seen_phrase for seen_phrase in seen_phrases if fuzz.ratio(phrase, seen_phrase) > 70] | |
if similar_phrases: | |
merged_phrase = max([phrase] + similar_phrases, key=len) | |
unique_keywords.append((score, merged_phrase)) | |
else: | |
unique_keywords.append((score, phrase)) | |
seen_phrases.add(phrase) | |
return unique_keywords[:10] | |
# Text input | |
text = st.text_area('Enter the text to analyze', jd) | |
if st.button("Start Analysis"): | |
col1, col2 = st.columns(2) | |
# Transformers (Column 1) | |
with col1: | |
st.header("Transformers Model") | |
with st.spinner("Analyzing with Transformers..."): | |
out_sentiment = pipe_sent(text) | |
sentiment_score = out_sentiment[0]['score'] | |
sentiment_label = out_sentiment[0]['label'] | |
sentiment_emoji = '๐' if sentiment_label == 'POSITIVE' else '๐' | |
st.write(f"Sentiment Score: {sentiment_score}, Sentiment Label: {sentiment_label.capitalize()} {sentiment_emoji}") | |
st.subheader("Keywords") | |
keywords = extract_keywords(text) | |
st.write([keyword[1] for keyword in keywords]) | |
# Llama 3 Model (Column 2) | |
with col2: | |
st.header("Llama 3 Model") | |
with st.spinner("Analyzing with Llama 3..."): | |
try: | |
stream = client.chat.completions.create( | |
model=repo_id, | |
messages=[{"role": "user", "content": text}], | |
temperature=0.5, | |
stream=True, | |
max_tokens=3000 | |
) | |
response = ''.join([chunk['choices'][0]['text'] for chunk in stream]) | |
st.write(response) | |
except Exception as e: | |
st.error("Error occurred while fetching response from Llama 3") | |