peterciank commited on
Commit
5634403
·
verified ·
1 Parent(s): ad533f4

Update pages/Comparision.py

Browse files
Files changed (1) hide show
  1. pages/Comparision.py +61 -78
pages/Comparision.py CHANGED
@@ -1,105 +1,88 @@
1
  import streamlit as st
2
- import requests
3
  import os
4
  from dotenv import load_dotenv
5
- from nltk.corpus import stopwords
6
- from fuzzywuzzy import fuzz
7
- from rake_nltk import Rake
8
- import nltk
9
- from openai import OpenAI
10
 
11
  # Load environment variables
12
  load_dotenv()
13
 
14
- # Download NLTK resources
15
- nltk.download('punkt')
16
- nltk.download('stopwords')
17
-
18
- # Initialize OpenAI client for Hugging Face Llama 3
19
  client = OpenAI(
20
  base_url="https://api-inference.huggingface.co/v1",
21
- api_key=os.environ.get('HFSecret') # Replace with your token
22
  )
23
 
24
- # Define the Llama 3 model repo ID
25
  repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
26
 
27
- # Function to fetch text content based on selected option
28
- def fetch_text_content(selected_option):
29
- url_mapping = {
30
- 'Apprecitation Letter': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Appreciation_Letter.txt",
31
- 'Regret Letter': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Regret_Letter.txt",
32
- 'Kindness Tale': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Kindness_Tale.txt",
33
- 'Lost Melody Tale': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Lost_Melody_Tale.txt",
34
- 'Twitter Example 1': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_1.txt",
35
- 'Twitter Example 2': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_2.txt"
36
- }
37
- return requests.get(url_mapping.get(selected_option, "")).text
38
-
39
- # Function to extract keywords
40
- def extract_keywords(text):
41
- r = Rake()
42
- r.extract_keywords_from_text(text)
43
- phrases_with_scores = r.get_ranked_phrases_with_scores()
44
- stop_words = set(stopwords.words('english'))
45
- keywords = [(score, phrase) for score, phrase in phrases_with_scores if phrase.lower() not in stop_words]
46
- keywords.sort(key=lambda x: x[0], reverse=True)
47
-
48
- unique_keywords = []
49
- seen_phrases = set()
50
- for score, phrase in keywords:
51
- if phrase not in seen_phrases:
52
- similar_phrases = [seen_phrase for seen_phrase in seen_phrases if fuzz.ratio(phrase, seen_phrase) > 70]
53
- merged_phrase = max([phrase] + similar_phrases, key=len) if similar_phrases else phrase
54
- unique_keywords.append((score, merged_phrase))
55
- seen_phrases.add(phrase)
56
- return unique_keywords[:10]
57
-
58
- # Function to interact with Llama 3 for analysis
59
- def llama3_analysis(text, task):
60
- prompt_mapping = {
61
- "sentiment": f"Analyze the sentiment of the following text: {text}",
62
- "summarization": f"Summarize the following text: {text}"
63
- }
64
-
65
- prompt = prompt_mapping[task]
66
-
67
- try:
68
- response = client.completions.create(
69
- model=repo_id,
70
- prompt=prompt,
71
- max_tokens=500,
72
- temperature=0.5
73
- )
74
- return response.choices[0].text.strip()
75
- except Exception as e:
76
- return f"Error: {str(e)}"
77
 
78
- # Streamlit App UI
79
- st.title("Sentiment Analysis & Summarization with Llama 3")
80
 
81
- # Dropdown menu to select the text source
82
- options = ['None', 'Apprecitation Letter', 'Regret Letter', 'Kindness Tale', 'Lost Melody Tale', 'Twitter Example 1', 'Twitter Example 2']
83
  selected_option = st.selectbox("Select a preset option", options)
84
 
85
- # Fetch the text based on selection
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  jd = fetch_text_content(selected_option)
87
 
88
- # Text area for manual input or displaying fetched content
89
  text = st.text_area('Enter the text to analyze', jd)
90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  if st.button("Start Analysis"):
92
  with st.spinner("Analyzing Sentiment..."):
93
- sentiment_result = llama3_analysis(text, "sentiment")
94
- with st.expander("Sentiment Analysis - ✅ Completed", expanded=False):
95
- st.write(sentiment_result)
 
 
 
96
 
97
  with st.spinner("Summarizing..."):
98
- summary_result = llama3_analysis(text, "summarization")
99
- with st.expander("Summarization - ✅ Completed", expanded=False):
100
- st.write(summary_result)
 
 
 
101
 
102
  with st.spinner("Extracting Keywords..."):
103
- keywords = extract_keywords(text)
104
- with st.expander("Keywords Extraction - ✅ Completed", expanded=False):
105
- st.write([kw[1] for kw in keywords])
 
 
 
 
1
  import streamlit as st
2
+ from openai import OpenAI
3
  import os
4
  from dotenv import load_dotenv
 
 
 
 
 
5
 
6
  # Load environment variables
7
  load_dotenv()
8
 
9
+ # Initialize the client with HuggingFace
 
 
 
 
10
  client = OpenAI(
11
  base_url="https://api-inference.huggingface.co/v1",
12
+ api_key=os.environ.get('HFSecret') # Replace with your HuggingFace token
13
  )
14
 
15
+ # Define the Llama 3 8B model
16
  repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
17
 
18
+ # Title of the App
19
+ st.title("Text Analysis with Llama 3: Sentiment, Summarization, and Keyword Extraction")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ # Dropdown options to choose a text file
22
+ options = ['None', 'Appreciation Letter', 'Regret Letter', 'Kindness Tale', 'Lost Melody Tale', 'Twitter Example 1', 'Twitter Example 2']
23
 
24
+ # Create a dropdown menu to select options
 
25
  selected_option = st.selectbox("Select a preset option", options)
26
 
27
+ # Define URLs for different text options
28
+ url_dict = {
29
+ 'Appreciation Letter': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Appreciation_Letter.txt",
30
+ 'Regret Letter': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Regret_Letter.txt",
31
+ 'Kindness Tale': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Kindness_Tale.txt",
32
+ 'Lost Melody Tale': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Lost_Melody_Tale.txt",
33
+ 'Twitter Example 1': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_1.txt",
34
+ 'Twitter Example 2': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_2.txt"
35
+ }
36
+
37
+ # Function to fetch text content
38
+ def fetch_text_content(option):
39
+ if option in url_dict:
40
+ response = requests.get(url_dict[option])
41
+ return response.text if response.status_code == 200 else "Error fetching the text"
42
+ return ""
43
+
44
+ # Fetch the selected text
45
  jd = fetch_text_content(selected_option)
46
 
47
+ # Display fetched text
48
  text = st.text_area('Enter the text to analyze', jd)
49
 
50
+ # Function to call Llama 3 for analysis
51
+ def call_llama_analysis(task, text):
52
+ prompt = f"Perform {task} on the following text:\n\n{text}"
53
+
54
+ # Call Llama 3 for the task
55
+ response = client.completions.create(
56
+ model=repo_id,
57
+ prompt=prompt,
58
+ max_tokens=3000,
59
+ temperature=0.5
60
+ )
61
+
62
+ return response['choices'][0]['text']
63
+
64
+ # Start analysis on button click
65
  if st.button("Start Analysis"):
66
  with st.spinner("Analyzing Sentiment..."):
67
+ try:
68
+ sentiment_result = call_llama_analysis("sentiment analysis", text)
69
+ with st.expander("Sentiment Analysis - ✅ Completed", expanded=True):
70
+ st.write(sentiment_result)
71
+ except Exception as e:
72
+ st.error(f"Error in Sentiment Analysis: {str(e)}")
73
 
74
  with st.spinner("Summarizing..."):
75
+ try:
76
+ summary_result = call_llama_analysis("summarization", text)
77
+ with st.expander("Summarization - ✅ Completed", expanded=True):
78
+ st.write(summary_result)
79
+ except Exception as e:
80
+ st.error(f"Error in Summarization: {str(e)}")
81
 
82
  with st.spinner("Extracting Keywords..."):
83
+ try:
84
+ keywords_result = call_llama_analysis("keyword extraction", text)
85
+ with st.expander("Keywords Extraction - Completed", expanded=True):
86
+ st.write(keywords_result)
87
+ except Exception as e:
88
+ st.error(f"Error in Keyword Extraction: {str(e)}")