abdullahmubeen10
commited on
Upload 5 files
Browse files- .streamlit/config.toml +3 -0
- Demo.py +310 -0
- Dockerfile +70 -0
- pages/Workflow & Model Overview.py +729 -0
- requirements.txt +7 -0
.streamlit/config.toml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[theme]
|
2 |
+
base="light"
|
3 |
+
primaryColor="#29B4E8"
|
Demo.py
ADDED
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import sparknlp
|
3 |
+
import os
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
from sparknlp.base import *
|
7 |
+
from sparknlp.annotator import *
|
8 |
+
from pyspark.ml import Pipeline
|
9 |
+
from sparknlp.pretrained import PretrainedPipeline
|
10 |
+
from annotated_text import annotated_text
|
11 |
+
from streamlit_tags import st_tags
|
12 |
+
|
13 |
+
# Page configuration
|
14 |
+
st.set_page_config(
|
15 |
+
layout="wide",
|
16 |
+
initial_sidebar_state="auto"
|
17 |
+
)
|
18 |
+
|
19 |
+
# CSS for styling
|
20 |
+
st.markdown("""
|
21 |
+
<style>
|
22 |
+
.main-title {
|
23 |
+
font-size: 36px;
|
24 |
+
color: #4A90E2;
|
25 |
+
font-weight: bold;
|
26 |
+
text-align: center;
|
27 |
+
}
|
28 |
+
.section {
|
29 |
+
background-color: #f9f9f9;
|
30 |
+
padding: 10px;
|
31 |
+
border-radius: 10px;
|
32 |
+
margin-top: 10px;
|
33 |
+
}
|
34 |
+
.section p, .section ul {
|
35 |
+
color: #666666;
|
36 |
+
}
|
37 |
+
</style>
|
38 |
+
""", unsafe_allow_html=True)
|
39 |
+
|
40 |
+
@st.cache_resource
|
41 |
+
def init_spark():
|
42 |
+
return sparknlp.start()
|
43 |
+
|
44 |
+
@st.cache_resource
|
45 |
+
def create_pipeline(model, task, zeroShotLables=['']):
|
46 |
+
document_assembler = DocumentAssembler() \
|
47 |
+
.setInputCol('text') \
|
48 |
+
.setOutputCol('document')
|
49 |
+
|
50 |
+
sentence_detector = SentenceDetector() \
|
51 |
+
.setInputCols(['document']) \
|
52 |
+
.setOutputCol('sentence')
|
53 |
+
|
54 |
+
tokenizer = Tokenizer() \
|
55 |
+
.setInputCols(['sentence']) \
|
56 |
+
.setOutputCol('token')
|
57 |
+
|
58 |
+
if task == "Token Classification":
|
59 |
+
TCclassifier = BertForTokenClassification \
|
60 |
+
.pretrained('bert_base_token_classifier_conll03', 'en') \
|
61 |
+
.setInputCols(['token', 'sentence']) \
|
62 |
+
.setOutputCol('ner') \
|
63 |
+
.setCaseSensitive(True) \
|
64 |
+
.setMaxSentenceLength(512)
|
65 |
+
|
66 |
+
ner_converter = NerConverter() \
|
67 |
+
.setInputCols(['sentence', 'token', 'ner']) \
|
68 |
+
.setOutputCol('ner_chunk')
|
69 |
+
|
70 |
+
TCpipeline = Pipeline(stages=[document_assembler, sentence_detector, tokenizer, TCclassifier, ner_converter])
|
71 |
+
return TCpipeline
|
72 |
+
|
73 |
+
elif task == "Zero-Shot Classification":
|
74 |
+
ZSCtokenizer = Tokenizer() \
|
75 |
+
.setInputCols(['document']) \
|
76 |
+
.setOutputCol('token')
|
77 |
+
|
78 |
+
zeroShotClassifier = BertForZeroShotClassification \
|
79 |
+
.pretrained('bert_zero_shot_classifier_mnli', 'xx') \
|
80 |
+
.setInputCols(['token', 'document']) \
|
81 |
+
.setOutputCol('class') \
|
82 |
+
.setCaseSensitive(True) \
|
83 |
+
.setMaxSentenceLength(512) \
|
84 |
+
.setCandidateLabels(zeroShotLables)
|
85 |
+
|
86 |
+
ZSCpipeline = Pipeline(stages=[document_assembler, ZSCtokenizer, zeroShotClassifier])
|
87 |
+
return ZSCpipeline
|
88 |
+
|
89 |
+
elif task == "Sequence Classification":
|
90 |
+
sequence_classifier = BertForSequenceClassification.pretrained("bert_classifier_cbert", "en") \
|
91 |
+
.setInputCols(['sentence', 'token']) \
|
92 |
+
.setOutputCol('class')
|
93 |
+
|
94 |
+
SCpipeline = Pipeline(stages=[document_assembler, sentence_detector, tokenizer, sequence_classifier])
|
95 |
+
return SCpipeline
|
96 |
+
|
97 |
+
elif task == "Question Answering":
|
98 |
+
QAdocument_assembler = MultiDocumentAssembler()\
|
99 |
+
.setInputCols(["question", "context"]) \
|
100 |
+
.setOutputCols(["document_question", "document_context"])
|
101 |
+
|
102 |
+
spanClassifier = BertForQuestionAnswering.pretrained(model,"en") \
|
103 |
+
.setInputCols(["document_question", "document_context"]) \
|
104 |
+
.setOutputCol("answer") \
|
105 |
+
.setCaseSensitive(True)
|
106 |
+
|
107 |
+
QApipeline = Pipeline(stages=[QAdocument_assembler, spanClassifier])
|
108 |
+
return QApipeline
|
109 |
+
|
110 |
+
def fit_data(pipeline, data, task):
|
111 |
+
empty_df = spark.createDataFrame([['']]).toDF('text')
|
112 |
+
pipeline_model = pipeline.fit(empty_df)
|
113 |
+
model = LightPipeline(pipeline_model)
|
114 |
+
result = model.fullAnnotate(data)
|
115 |
+
return result
|
116 |
+
|
117 |
+
def annotate(data):
|
118 |
+
document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
|
119 |
+
annotated_words = []
|
120 |
+
for chunk, label in zip(chunks, labels):
|
121 |
+
parts = document.split(chunk, 1)
|
122 |
+
if parts[0]:
|
123 |
+
annotated_words.append(parts[0])
|
124 |
+
annotated_words.append((chunk, label))
|
125 |
+
document = parts[1]
|
126 |
+
if document:
|
127 |
+
annotated_words.append(document)
|
128 |
+
annotated_text(*annotated_words)
|
129 |
+
|
130 |
+
tasks_models_descriptions = {
|
131 |
+
"Token Classification": {
|
132 |
+
"models": ["bert_base_token_classifier_conll03"],
|
133 |
+
"description": "The 'bert_base_token_classifier_conll03' model excels in identifying and classifying tokens within text. Ideal for tasks like named entity recognition (NER), it accurately extracts entities such as names, dates, and locations, bringing clarity and structure to unstructured data."
|
134 |
+
},
|
135 |
+
"Zero-Shot Classification": {
|
136 |
+
"models": ["bert_zero_shot_classifier_mnli"],
|
137 |
+
"description": "The 'bert_zero_shot_classifier_mnli' model enables classification of text into categories without needing training data for those categories. Perfect for dynamic environments, it instantly categorizes news articles, customer feedback, and social media posts into relevant topics."
|
138 |
+
},
|
139 |
+
"Sequence Classification": {
|
140 |
+
"models": ["bert_classifier_cbert"],
|
141 |
+
"description": "The 'bert_classifier_cbert' model specializes in sentiment analysis and document classification. It accurately assesses the mood of customer reviews, classifies emails, and sorts text corpora, understanding the nuances and context of sentences."
|
142 |
+
},
|
143 |
+
"Question Answering": {
|
144 |
+
"models": ["bert_qa_bert_large_uncased_whole_word_masking_finetuned_squad", "question_answering_bert_base_cased_squad2"],
|
145 |
+
"description": "The 'bert_qa_bert_large_uncased_whole_word_masking_finetuned_squad' model is designed for answering questions based on provided context. Ideal for chatbots and virtual assistants, it delivers precise answers, improving user interaction and support system efficiency."
|
146 |
+
}
|
147 |
+
}
|
148 |
+
|
149 |
+
# Sidebar content
|
150 |
+
task = st.sidebar.selectbox("Choose the task", list(tasks_models_descriptions.keys()))
|
151 |
+
model = st.sidebar.selectbox("Choose the pretrained model", tasks_models_descriptions[task]["models"], help="For more info about the models visit: https://sparknlp.org/models")
|
152 |
+
|
153 |
+
# Reference notebook link in sidebar
|
154 |
+
link = """
|
155 |
+
<a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/NER.ipynb">
|
156 |
+
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
|
157 |
+
</a>
|
158 |
+
"""
|
159 |
+
st.sidebar.markdown('Reference notebook:')
|
160 |
+
st.sidebar.markdown(link, unsafe_allow_html=True)
|
161 |
+
|
162 |
+
# Page content
|
163 |
+
title, sub_title = (f'BERT for {task}', tasks_models_descriptions[task]["description"])
|
164 |
+
st.markdown(f'<div class="main-title">{title}</div>', unsafe_allow_html=True)
|
165 |
+
container = st.container(border=True)
|
166 |
+
container.write(sub_title)
|
167 |
+
|
168 |
+
# Load examples
|
169 |
+
examples_mapping = {
|
170 |
+
"Token Classification": [
|
171 |
+
"William Henry Gates III (born October 28, 1955) is an American business magnate, software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft, Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect, while also being the largest individual shareholder until May 2014. He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico; it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect. During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.[9] He gradually transferred his duties to Ray Ozzie and Craig Mundie. He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.",
|
172 |
+
"The Mona Lisa is a 16th century oil painting created by Leonardo. It's held at the Louvre in Paris.",
|
173 |
+
"When Sebastian Thrun started working on self-driving cars at Google in 2007, few people outside of the company took him seriously. “I can tell you very senior CEOs of major American car companies would shake my hand and turn away because I wasn’t worth talking to,” said Thrun, now the co-founder and CEO of online higher education startup Udacity, in an interview with Recode earlier this week.",
|
174 |
+
"Facebook is a social networking service launched as TheFacebook on February 4, 2004. It was founded by Mark Zuckerberg with his college roommates and fellow Harvard University students Eduardo Saverin, Andrew McCollum, Dustin Moskovitz and Chris Hughes. The website's membership was initially limited by the founders to Harvard students, but was expanded to other colleges in the Boston area, the Ivy League, and gradually most universities in the United States and Canada.",
|
175 |
+
"The history of natural language processing generally started in the 1950s, although work can be found from earlier periods. In 1950, Alan Turing published an article titled 'Computing Machinery and Intelligence' which proposed what is now called the Turing test as a criterion of intelligence",
|
176 |
+
"Geoffrey Everest Hinton is an English Canadian cognitive psychologist and computer scientist, most noted for his work on artificial neural networks. Since 2013 he divides his time working for Google and the University of Toronto. In 2017, he cofounded and became the Chief Scientific Advisor of the Vector Institute in Toronto.",
|
177 |
+
"When I told John that I wanted to move to Alaska, he warned me that I'd have trouble finding a Starbucks there.",
|
178 |
+
"Steven Paul Jobs was an American business magnate, industrial designer, investor, and media proprietor. He was the chairman, chief executive officer (CEO), and co-founder of Apple Inc., the chairman and majority shareholder of Pixar, a member of The Walt Disney Company's board of directors following its acquisition of Pixar, and the founder, chairman, and CEO of NeXT. Jobs is widely recognized as a pioneer of the personal computer revolution of the 1970s and 1980s, along with Apple co-founder Steve Wozniak. Jobs was born in San Francisco, California, and put up for adoption. He was raised in the San Francisco Bay Area. He attended Reed College in 1972 before dropping out that same year, and traveled through India in 1974 seeking enlightenment and studying Zen Buddhism.",
|
179 |
+
"Titanic is a 1997 American epic romance and disaster film directed, written, co-produced, and co-edited by James Cameron. Incorporating both historical and fictionalized aspects, it is based on accounts of the sinking of the RMS Titanic, and stars Leonardo DiCaprio and Kate Winslet as members of different social classes who fall in love aboard the ship during its ill-fated maiden voyage.",
|
180 |
+
"Other than being the king of the north, John Snow is a an english physician and a leader in the development of anaesthesia and medical hygiene. He is considered for being the first one using data to cure cholera outbreak in 1834."
|
181 |
+
],
|
182 |
+
"Zero-Shot Classification" : [
|
183 |
+
"In today’s world, staying updated with urgent information is crucial as events can unfold rapidly and require immediate attention.", # Urgent
|
184 |
+
"Mobile technology has become indispensable, allowing us to access news, updates, and connect with others no matter where we are.", # Mobile
|
185 |
+
"For those who love to travel, the convenience of mobile apps has transformed how we plan and experience trips, providing real-time updates on flights, accommodations, and local attractions.", # Travel
|
186 |
+
"The entertainment industry continually offers new movies that captivate audiences with their storytelling and visuals, providing a wide range of genres to suit every taste.", # Movie
|
187 |
+
"Music is an integral part of modern life, with streaming platforms making it easy to discover new artists and enjoy favorite tunes anytime, anywhere.", # Music
|
188 |
+
"Sports enthusiasts follow games and matches closely, with live updates and detailed statistics available at their fingertips, enhancing the excitement of every game.", # Sport
|
189 |
+
"Weather forecasts play a vital role in daily planning, offering accurate and timely information to help us prepare for various weather conditions and adjust our plans accordingly.", # Weather
|
190 |
+
"Technology continues to evolve rapidly, driving innovation across all sectors and improving our everyday lives through smarter devices, advanced software, and enhanced connectivity." # Technology
|
191 |
+
],
|
192 |
+
"Sequence Classification": [
|
193 |
+
"I had a fantastic day at the park with my friends and family, enjoying the beautiful weather and fun activities.", # Positive
|
194 |
+
"The movie was a complete waste of time, with a terrible plot and poor acting.", # Negative
|
195 |
+
"The meeting was rescheduled to next week due to a conflict in everyone's schedule.", # Neutral
|
196 |
+
"I am thrilled with the service I received at the restaurant; the food was delicious and the staff were very friendly.", # Positive
|
197 |
+
"The traffic was horrible this morning, causing me to be late for work and miss an important meeting.", # Negative
|
198 |
+
"The report was submitted on time and included all the necessary information.", # Neutral
|
199 |
+
"I love the new features on my phone; they make it so much easier to stay organized and connected.", # Positive
|
200 |
+
"The customer service was disappointing, and I won't be returning to that store.", # Negative
|
201 |
+
"The weather forecast predicts mild temperatures for the rest of the week.", # Neutral
|
202 |
+
"My vacation was amazing, with stunning views and great experiences at every destination.", # Positive
|
203 |
+
"I received a defective product and had a lot of trouble getting it replaced.", # Negative
|
204 |
+
"The new policy will be implemented starting next month and applies to all employees.", # Neutral
|
205 |
+
],
|
206 |
+
"Question Answering": {
|
207 |
+
"""What does increased oxygen concentrations in the patient’s lungs displace?""": """Hyperbaric (high-pressure) medicine uses special oxygen chambers to increase the partial pressure of O 2 around the patient and, when needed, the medical staff. Carbon monoxide poisoning, gas gangrene, and decompression sickness (the ’bends’) are sometimes treated using these devices. Increased O 2 concentration in the lungs helps to displace carbon monoxide from the heme group of hemoglobin. Oxygen gas is poisonous to the anaerobic bacteria that cause gas gangrene, so increasing its partial pressure helps kill them. Decompression sickness occurs in divers who decompress too quickly after a dive, resulting in bubbles of inert gas, mostly nitrogen and helium, forming in their blood. Increasing the pressure of O 2 as soon as possible is part of the treatment.""",
|
208 |
+
"""What category of game is Legend of Zelda: Twilight Princess?""": """The Legend of Zelda: Twilight Princess (Japanese: ゼルダの伝説 トワイライトプリンセス, Hepburn: Zeruda no Densetsu: Towairaito Purinsesu?) is an action-adventure game developed and published by Nintendo for the GameCube and Wii home video game consoles. It is the thirteenth installment in the The Legend of Zelda series. Originally planned for release on the GameCube in November 2005, Twilight Princess was delayed by Nintendo to allow its developers to refine the game, add more content, and port it to the Wii. The Wii version was released alongside the console in North America in November 2006, and in Japan, Europe, and Australia the following month. The GameCube version was released worldwide in December 2006.""",
|
209 |
+
"""Who is founder of Alibaba Group?""": """Alibaba Group founder Jack Ma has made his first appearance since Chinese regulators cracked down on his business empire. His absence had fuelled speculation over his whereabouts amid increasing official scrutiny of his businesses. The billionaire met 100 rural teachers in China via a video meeting on Wednesday, according to local government media. Alibaba shares surged 5% on Hong Kong's stock exchange on the news.""",
|
210 |
+
"""For what instrument did Frédéric write primarily for?""": """Frédéric François Chopin (/ˈʃoʊpæn/; French pronunciation: [fʁe.de.ʁik fʁɑ̃.swa ʃɔ.pɛ̃]; 22 February or 1 March 1810 – 17 October 1849), born Fryderyk Franciszek Chopin,[n 1] was a Polish and French (by citizenship and birth of father) composer and a virtuoso pianist of the Romantic era, who wrote primarily for the solo piano. He gained and has maintained renown worldwide as one of the leading musicians of his era, whose "poetic genius was based on a professional technique that was without equal in his generation." Chopin was born in what was then the Duchy of Warsaw, and grew up in Warsaw, which after 1815 became part of Congress Poland. A child prodigy, he completed his musical education and composed his earlier works in Warsaw before leaving Poland at the age of 20, less than a month before the outbreak of the November 1830 Uprising.""",
|
211 |
+
"""The most populated city in the United States is which city?""": """New York—often called New York City or the City of New York to distinguish it from the State of New York, of which it is a part—is the most populous city in the United States and the center of the New York metropolitan area, the premier gateway for legal immigration to the United States and one of the most populous urban agglomerations in the world. A global power city, New York exerts a significant impact upon commerce, finance, media, art, fashion, research, technology, education, and entertainment, its fast pace defining the term New York minute. Home to the headquarters of the United Nations, New York is an important center for international diplomacy and has been described as the cultural and financial capital of the world."""
|
212 |
+
}
|
213 |
+
}
|
214 |
+
|
215 |
+
if task == 'Question Answering':
|
216 |
+
examples = list(examples_mapping[task].keys())
|
217 |
+
selected_text = st.selectbox('Select an Example:', examples)
|
218 |
+
st.subheader('Try it yourself!')
|
219 |
+
custom_input_question = st.text_input('Create a question')
|
220 |
+
custom_input_context = st.text_input("Create it's context")
|
221 |
+
|
222 |
+
custom_examples = {}
|
223 |
+
|
224 |
+
st.subheader('Selected Text')
|
225 |
+
|
226 |
+
if custom_input_question and custom_input_context:
|
227 |
+
custom_examples[custom_input_question] = custom_input_context
|
228 |
+
selected_text = [f'"""{next(iter(custom_examples))}""" """context:{custom_examples[next(iter(custom_examples))]}"""']
|
229 |
+
st.markdown(f"**Text:** {custom_input_question}")
|
230 |
+
st.markdown(f"**Context:** {custom_input_context}")
|
231 |
+
elif selected_text:
|
232 |
+
st.markdown(f"**Text:** {selected_text}")
|
233 |
+
st.markdown(f"**Context:** {examples_mapping[task][selected_text]}")
|
234 |
+
selected_text = [f'"""{selected_text}""" """context:{examples_mapping[task][selected_text]}"""']
|
235 |
+
|
236 |
+
else:
|
237 |
+
examples = examples_mapping[task]
|
238 |
+
selected_text = st.selectbox("Select an example", examples)
|
239 |
+
custom_input = st.text_input("Try it with your own Sentence!")
|
240 |
+
|
241 |
+
if task == 'Zero-Shot Classification':
|
242 |
+
zeroShotLables = ["urgent", "mobile", "travel", "movie", "music", "sport", "weather", "technology"]
|
243 |
+
lables = st_tags(
|
244 |
+
label='Select labels',
|
245 |
+
text='Press enter to add more',
|
246 |
+
value=zeroShotLables,
|
247 |
+
suggestions=[
|
248 |
+
"Positive", "Negative", "Neutral",
|
249 |
+
"Urgent", "Mobile", "Travel", "Movie", "Music", "Sport", "Weather", "Technology",
|
250 |
+
"Happiness", "Sadness", "Anger", "Fear", "Surprise", "Disgust",
|
251 |
+
"Informational", "Navigational", "Transactional", "Commercial Investigation",
|
252 |
+
"Politics", "Business", "Sports", "Entertainment", "Health", "Science",
|
253 |
+
"Product Quality", "Delivery Experience", "Customer Service", "Pricing", "Return Policy",
|
254 |
+
"Education", "Finance", "Lifestyle", "Fashion", "Food", "Art", "History",
|
255 |
+
"Culture", "Environment", "Real Estate", "Automotive", "Travel", "Fitness", "Career"],
|
256 |
+
maxtags = -1)
|
257 |
+
|
258 |
+
try:
|
259 |
+
text_to_analyze = custom_input if custom_input else selected_text
|
260 |
+
st.subheader('Full example text')
|
261 |
+
HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
|
262 |
+
st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
|
263 |
+
except:
|
264 |
+
text_to_analyze = selected_text
|
265 |
+
|
266 |
+
# Initialize Spark and create pipeline
|
267 |
+
spark = init_spark()
|
268 |
+
|
269 |
+
if task == 'Zero-Shot Classification':
|
270 |
+
pipeline = create_pipeline(model, task, zeroShotLables)
|
271 |
+
else:
|
272 |
+
pipeline = create_pipeline(model, task)
|
273 |
+
|
274 |
+
output = fit_data(pipeline, text_to_analyze, task)
|
275 |
+
|
276 |
+
# Display matched sentence
|
277 |
+
st.subheader("Prediction:")
|
278 |
+
|
279 |
+
if task == 'Token Classification':
|
280 |
+
results = {
|
281 |
+
'Document': output[0]['document'][0].result,
|
282 |
+
'NER Chunk': [n.result for n in output[0]['ner_chunk']],
|
283 |
+
"NER Label": [n.metadata['entity'] for n in output[0]['ner_chunk']]
|
284 |
+
}
|
285 |
+
annotate(results)
|
286 |
+
df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
|
287 |
+
df.index += 1
|
288 |
+
st.dataframe(df)
|
289 |
+
|
290 |
+
elif task == 'Zero-Shot Classification':
|
291 |
+
st.markdown(f"Document Classified as: **{output[0]['class'][0].result}**")
|
292 |
+
|
293 |
+
elif task == 'Sequence Classification':
|
294 |
+
results = {
|
295 |
+
'Sentence': [n.result for n in output[0]['sentence']],
|
296 |
+
'Classification': [n.result for n in output[0]['class']],
|
297 |
+
}
|
298 |
+
df = pd.DataFrame(results)
|
299 |
+
df.index += 1
|
300 |
+
st.dataframe(df)
|
301 |
+
|
302 |
+
elif task == "Question Answering":
|
303 |
+
st.markdown(f"Answer: **{output[0]['answer'][0].result}**")
|
304 |
+
|
305 |
+
|
306 |
+
|
307 |
+
|
308 |
+
|
309 |
+
|
310 |
+
|
Dockerfile
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Download base image ubuntu 18.04
|
2 |
+
FROM ubuntu:18.04
|
3 |
+
|
4 |
+
# Set environment variables
|
5 |
+
ENV NB_USER jovyan
|
6 |
+
ENV NB_UID 1000
|
7 |
+
ENV HOME /home/${NB_USER}
|
8 |
+
|
9 |
+
# Install required packages
|
10 |
+
RUN apt-get update && apt-get install -y \
|
11 |
+
tar \
|
12 |
+
wget \
|
13 |
+
bash \
|
14 |
+
rsync \
|
15 |
+
gcc \
|
16 |
+
libfreetype6-dev \
|
17 |
+
libhdf5-serial-dev \
|
18 |
+
libpng-dev \
|
19 |
+
libzmq3-dev \
|
20 |
+
python3 \
|
21 |
+
python3-dev \
|
22 |
+
python3-pip \
|
23 |
+
unzip \
|
24 |
+
pkg-config \
|
25 |
+
software-properties-common \
|
26 |
+
graphviz \
|
27 |
+
openjdk-8-jdk \
|
28 |
+
ant \
|
29 |
+
ca-certificates-java \
|
30 |
+
&& apt-get clean \
|
31 |
+
&& update-ca-certificates -f;
|
32 |
+
|
33 |
+
# Install Python 3.8 and pip
|
34 |
+
RUN add-apt-repository ppa:deadsnakes/ppa \
|
35 |
+
&& apt-get update \
|
36 |
+
&& apt-get install -y python3.8 python3-pip \
|
37 |
+
&& apt-get clean;
|
38 |
+
|
39 |
+
# Set up JAVA_HOME
|
40 |
+
ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
|
41 |
+
RUN mkdir -p ${HOME} \
|
42 |
+
&& echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> ${HOME}/.bashrc \
|
43 |
+
&& chown -R ${NB_UID}:${NB_UID} ${HOME}
|
44 |
+
|
45 |
+
# Create a new user named "jovyan" with user ID 1000
|
46 |
+
RUN useradd -m -u ${NB_UID} ${NB_USER}
|
47 |
+
|
48 |
+
# Switch to the "jovyan" user
|
49 |
+
USER ${NB_USER}
|
50 |
+
|
51 |
+
# Set home and path variables for the user
|
52 |
+
ENV HOME=/home/${NB_USER} \
|
53 |
+
PATH=/home/${NB_USER}/.local/bin:$PATH
|
54 |
+
|
55 |
+
# Set the working directory to the user's home directory
|
56 |
+
WORKDIR ${HOME}
|
57 |
+
|
58 |
+
# Upgrade pip and install Python dependencies
|
59 |
+
RUN python3.8 -m pip install --upgrade pip
|
60 |
+
COPY requirements.txt /tmp/requirements.txt
|
61 |
+
RUN python3.8 -m pip install -r /tmp/requirements.txt
|
62 |
+
|
63 |
+
# Copy the application code into the container at /home/jovyan
|
64 |
+
COPY --chown=${NB_USER}:${NB_USER} . ${HOME}
|
65 |
+
|
66 |
+
# Expose port for Streamlit
|
67 |
+
EXPOSE 7860
|
68 |
+
|
69 |
+
# Define the entry point for the container
|
70 |
+
ENTRYPOINT ["streamlit", "run", "Demo.py", "--server.port=7860", "--server.address=0.0.0.0"]
|
pages/Workflow & Model Overview.py
ADDED
@@ -0,0 +1,729 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
# Custom CSS for better styling
|
4 |
+
st.markdown("""
|
5 |
+
<style>
|
6 |
+
.main-title {
|
7 |
+
font-size: 36px;
|
8 |
+
color: #4A90E2;
|
9 |
+
font-weight: bold;
|
10 |
+
text-align: center;
|
11 |
+
}
|
12 |
+
.sub-title {
|
13 |
+
font-size: 24px;
|
14 |
+
color: #4A90E2;
|
15 |
+
margin-top: 20px;
|
16 |
+
}
|
17 |
+
.section {
|
18 |
+
background-color: #f9f9f9;
|
19 |
+
padding: 15px;
|
20 |
+
border-radius: 10px;
|
21 |
+
margin-top: 20px;
|
22 |
+
}
|
23 |
+
.section h2 {
|
24 |
+
font-size: 22px;
|
25 |
+
color: #4A90E2;
|
26 |
+
}
|
27 |
+
.section p, .section ul {
|
28 |
+
color: #666666;
|
29 |
+
}
|
30 |
+
.link {
|
31 |
+
color: #4A90E2;
|
32 |
+
text-decoration: none;
|
33 |
+
}
|
34 |
+
.benchmark-table {
|
35 |
+
width: 100%;
|
36 |
+
border-collapse: collapse;
|
37 |
+
margin-top: 20px;
|
38 |
+
}
|
39 |
+
.benchmark-table th, .benchmark-table td {
|
40 |
+
border: 1px solid #ddd;
|
41 |
+
padding: 8px;
|
42 |
+
text-align: left;
|
43 |
+
}
|
44 |
+
.benchmark-table th {
|
45 |
+
background-color: #4A90E2;
|
46 |
+
color: white;
|
47 |
+
}
|
48 |
+
.benchmark-table td {
|
49 |
+
background-color: #f2f2f2;
|
50 |
+
}
|
51 |
+
</style>
|
52 |
+
""", unsafe_allow_html=True)
|
53 |
+
|
54 |
+
# Introduction to BERT Annotators in Spark NLP
|
55 |
+
st.markdown('<div class="main-title">Introduction to BERT Annotators in Spark NLP</div>', unsafe_allow_html=True)
|
56 |
+
st.markdown("""
|
57 |
+
<div class="section">
|
58 |
+
<p>Spark NLP provides a range of BERT-based annotators that leverage the power of Bidirectional Encoder Representations from Transformers (BERT) for various natural language processing tasks. These annotators are designed to deliver high performance and scalability in production environments. Below, we provide a detailed overview of four key BERT-based annotators available in Spark NLP:</p>
|
59 |
+
</div>
|
60 |
+
""", unsafe_allow_html=True)
|
61 |
+
st.write("")
|
62 |
+
|
63 |
+
tab1, tab2, tab3, tab4 = st.tabs(["BERT for Token Classification", "BERT for Zero-Shot Classification", "BERT for Sequence Classification", "BERT for Question Answering"])
|
64 |
+
|
65 |
+
with tab1:
|
66 |
+
st.markdown("""
|
67 |
+
<div class="section">
|
68 |
+
<h2>BERT for Token Classification</h2>
|
69 |
+
<p>The <strong>BertForTokenClassification</strong> annotator is fine-tuned for Named Entity Recognition (NER) tasks. Token classification involves labeling tokens, which are the smallest units of meaning in a text, with tags that represent specific entities. This process is crucial for understanding and extracting valuable information from text data. By identifying entities like names of people, organizations, locations, and more, token classification enables a wide range of applications, including:</p>
|
70 |
+
<ul>
|
71 |
+
<li><strong>Information Extraction:</strong> Automatically pulling out important information from large volumes of text.</li>
|
72 |
+
<li><strong>Document Categorization:</strong> Enhancing the organization and retrieval of documents based on identified entities.</li>
|
73 |
+
<li><strong>Improved Search Engine Relevancy:</strong> Enabling more accurate and context-aware search results.</li>
|
74 |
+
</ul>
|
75 |
+
<p>This annotator is highly effective for applications requiring precise entity recognition, ensuring that the identified entities are accurate and contextually relevant.</p>
|
76 |
+
<table class="benchmark-table">
|
77 |
+
<tr>
|
78 |
+
<th>Entity</th>
|
79 |
+
<th>Label</th>
|
80 |
+
</tr>
|
81 |
+
<tr>
|
82 |
+
<td>Apple</td>
|
83 |
+
<td>ORGANIZATION</td>
|
84 |
+
</tr>
|
85 |
+
<tr>
|
86 |
+
<td>Steve Jobs</td>
|
87 |
+
<td>PERSON</td>
|
88 |
+
</tr>
|
89 |
+
<tr>
|
90 |
+
<td>California</td>
|
91 |
+
<td>LOCATION</td>
|
92 |
+
</tr>
|
93 |
+
</table>
|
94 |
+
</div>
|
95 |
+
""", unsafe_allow_html=True)
|
96 |
+
|
97 |
+
# BERT Token Classification - NER CoNLL
|
98 |
+
st.markdown('<div class="sub-title">BERT Token Classification - NER CoNLL</div>', unsafe_allow_html=True)
|
99 |
+
st.markdown("""
|
100 |
+
<div class="section">
|
101 |
+
<p>The <strong>bert_base_token_classifier_conll03</strong> is a fine-tuned BERT model ready to use for Named Entity Recognition (NER) tasks. This model recognizes four types of entities: location (LOC), organizations (ORG), person (PER), and Miscellaneous (MISC).</p>
|
102 |
+
</div>
|
103 |
+
""", unsafe_allow_html=True)
|
104 |
+
|
105 |
+
# How to Use the Model - Token Classification
|
106 |
+
st.markdown('<div class="sub-title">How to Use the Model</div>', unsafe_allow_html=True)
|
107 |
+
st.code('''
|
108 |
+
from sparknlp.base import *
|
109 |
+
from sparknlp.annotator import *
|
110 |
+
from pyspark.ml import Pipeline
|
111 |
+
from pyspark.sql.functions import col, expr
|
112 |
+
|
113 |
+
document_assembler = DocumentAssembler() \\
|
114 |
+
.setInputCol('text') \\
|
115 |
+
.setOutputCol('document')
|
116 |
+
|
117 |
+
sentence_detector = SentenceDetector() \\
|
118 |
+
.setInputCols(['document']) \\
|
119 |
+
.setOutputCol('sentence')
|
120 |
+
|
121 |
+
tokenizer = Tokenizer() \\
|
122 |
+
.setInputCols(['sentence']) \\
|
123 |
+
.setOutputCol('token')
|
124 |
+
|
125 |
+
tokenClassifier = BertForTokenClassification \\
|
126 |
+
.pretrained('bert_base_token_classifier_conll03', 'en') \\
|
127 |
+
.setInputCols(['token', 'sentence']) \\
|
128 |
+
.setOutputCol('ner') \\
|
129 |
+
.setCaseSensitive(True) \\
|
130 |
+
.setMaxSentenceLength(512)
|
131 |
+
|
132 |
+
ner_converter = NerConverter() \\
|
133 |
+
.setInputCols(['sentence', 'token', 'ner']) \\
|
134 |
+
.setOutputCol('entities')
|
135 |
+
|
136 |
+
pipeline = Pipeline(stages=[
|
137 |
+
document_assembler,
|
138 |
+
sentence_detector,
|
139 |
+
tokenizer,
|
140 |
+
tokenClassifier,
|
141 |
+
ner_converter
|
142 |
+
])
|
143 |
+
|
144 |
+
example = spark.createDataFrame([["""Apple Inc. is planning to open a new headquarters in Cupertino, California. The CEO, Tim Cook, announced this during the company's annual event on March 25th, 2023. Barack Obama, the 44th President of the United States, was born on August 4th, 1961, in Honolulu, Hawaii. He attended Harvard Law School and later became a community organizer in Chicago. Amazon reported a net revenue of $125.6 billion in Q4 of 2022, an increase of 9% compared to the previous year. Jeff Bezos, the founder of Amazon, mentioned that the company's growth in cloud computing has significantly contributed to this rise. Paris, the capital city of France, is renowned for its art, fashion, and culture. Key attractions include the Eiffel Tower, the Louvre Museum, and the Notre-Dame Cathedral. Visitors often enjoy a stroll along the Seine River and dining at local bistros. The study, conducted at the Mayo Clinic in Rochester, Minnesota, examined the effects of a new drug on patients with Type 2 diabetes. Results showed a significant reduction in blood sugar levels over a 12-month period. Serena Williams won her 24th Grand Slam title at the Wimbledon Championships in London, England. She defeated Naomi Osaka in a thrilling final match on July 13th, 2023. Google's latest smartphone, the Pixel 6, was unveiled at an event in New York City. Sundar Pichai, the CEO of Google, highlighted the phone's advanced AI capabilities and improved camera features. The Declaration of Independence was signed on July 4th, 1776, in Philadelphia, Pennsylvania. Thomas Jefferson, Benjamin Franklin, and John Adams were among the key figures who drafted this historic document."""]]).toDF("text")
|
145 |
+
result = pipeline.fit(example).transform(example)
|
146 |
+
|
147 |
+
result.select(
|
148 |
+
expr("explode(entities) as ner_chunk")
|
149 |
+
).select(
|
150 |
+
col("ner_chunk.result").alias("chunk"),
|
151 |
+
col("ner_chunk.metadata.entity").alias("ner_label")
|
152 |
+
).show(truncate=False)
|
153 |
+
''', language='python')
|
154 |
+
|
155 |
+
st.text("""
|
156 |
+
+--------------------+---------+
|
157 |
+
|chunk |ner_label|
|
158 |
+
+--------------------+---------+
|
159 |
+
|Apple Inc. |ORG |
|
160 |
+
|Cupertino |LOC |
|
161 |
+
|California |LOC |
|
162 |
+
|Tim Cook |PER |
|
163 |
+
|Barack Obama |PER |
|
164 |
+
|United States |LOC |
|
165 |
+
|Honolulu |LOC |
|
166 |
+
|Hawaii |LOC |
|
167 |
+
|Harvard Law School |ORG |
|
168 |
+
|Chicago |LOC |
|
169 |
+
|Amazon |ORG |
|
170 |
+
|Jeff Bezos |PER |
|
171 |
+
|Amazon |ORG |
|
172 |
+
|Paris |LOC |
|
173 |
+
|France |LOC |
|
174 |
+
|Eiffel Tower |LOC |
|
175 |
+
|Louvre Museum |LOC |
|
176 |
+
|Notre-Dame Cathedral|LOC |
|
177 |
+
|Seine River |LOC |
|
178 |
+
|Mayo Clinic |ORG |
|
179 |
+
+--------------------+---------+
|
180 |
+
""")
|
181 |
+
|
182 |
+
# Model Information - Token Classification
|
183 |
+
st.markdown('<div class="sub-title">Model Information</div>', unsafe_allow_html=True)
|
184 |
+
st.markdown("""
|
185 |
+
<table class="benchmark-table">
|
186 |
+
<tr>
|
187 |
+
<th>Attribute</th>
|
188 |
+
<th>Description</th>
|
189 |
+
</tr>
|
190 |
+
<tr>
|
191 |
+
<td><strong>Model Name</strong></td>
|
192 |
+
<td>bert_base_token_classifier_conll03</td>
|
193 |
+
</tr>
|
194 |
+
<tr>
|
195 |
+
<td><strong>Compatibility</strong></td>
|
196 |
+
<td>Spark NLP 3.2.0+</td>
|
197 |
+
</tr>
|
198 |
+
<tr>
|
199 |
+
<td><strong>License</strong></td>
|
200 |
+
<td>Open Source</td>
|
201 |
+
</tr>
|
202 |
+
<tr>
|
203 |
+
<td><strong>Edition</strong></td>
|
204 |
+
<td>Official</td>
|
205 |
+
</tr>
|
206 |
+
<tr>
|
207 |
+
<td><strong>Input Labels</strong></td>
|
208 |
+
<td>[token, document]</td>
|
209 |
+
</tr>
|
210 |
+
<tr>
|
211 |
+
<td><strong>Output Labels</strong></td>
|
212 |
+
<td>[ner]</td>
|
213 |
+
</tr>
|
214 |
+
<tr>
|
215 |
+
<td><strong>Language</strong></td>
|
216 |
+
<td>en</td>
|
217 |
+
</tr>
|
218 |
+
<tr>
|
219 |
+
<td><strong>Size</strong></td>
|
220 |
+
<td>404.3 MB</td>
|
221 |
+
</tr>
|
222 |
+
<tr>
|
223 |
+
<td><strong>Case sensitive</strong></td>
|
224 |
+
<td>true</td>
|
225 |
+
</tr>
|
226 |
+
<tr>
|
227 |
+
<td><strong>Max sentence length</strong></td>
|
228 |
+
<td>512</td>
|
229 |
+
</tr>
|
230 |
+
</table>
|
231 |
+
""", unsafe_allow_html=True)
|
232 |
+
|
233 |
+
# References - Token Classification
|
234 |
+
st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
|
235 |
+
st.markdown("""
|
236 |
+
<div class="section">
|
237 |
+
<ul>
|
238 |
+
<li><a class="link" href="https://github.com/google-research/bert" target="_blank" rel="noopener">Google Research BERT</a></li>
|
239 |
+
<li><a class="link" href="https://arxiv.org/abs/1810.04805" target="_blank" rel="noopener">BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding</a></li>
|
240 |
+
<li><a class="link" href="https://huggingface.co/bert-base-uncased" target="_blank" rel="noopener">Hugging Face BERT Models</a></li>
|
241 |
+
</ul>
|
242 |
+
</div>
|
243 |
+
""", unsafe_allow_html=True)
|
244 |
+
|
245 |
+
with tab2:
|
246 |
+
st.markdown("""
|
247 |
+
<div class="section">
|
248 |
+
<h2>BERT for Zero-Shot Classification</h2>
|
249 |
+
<p>The <strong>BertForZeroShotClassification</strong> annotator is designed to classify text into labels it has not seen during training. This is achieved using natural language inference (NLI) to determine the relationship between input text and potential labels. This capability is essential for applications where predefined categories are either unavailable or frequently change. Zero-shot classification is particularly useful for:</p>
|
250 |
+
<ul>
|
251 |
+
<li><strong>Dynamic Content Tagging:</strong> Automatically categorizing content without the need for a pre-existing label set.</li>
|
252 |
+
<li><strong>Sentiment Analysis:</strong> Analyzing sentiment for new and emerging topics without retraining the model.</li>
|
253 |
+
</ul>
|
254 |
+
<p>By leveraging this annotator, you can ensure flexibility and adaptability in text classification tasks, making it suitable for ever-changing data environments.</p>
|
255 |
+
<table class="benchmark-table">
|
256 |
+
<tr>
|
257 |
+
<th>Text</th>
|
258 |
+
<th>Predicted Category</th>
|
259 |
+
</tr>
|
260 |
+
<tr>
|
261 |
+
<td>"The new iPhone has amazing features"</td>
|
262 |
+
<td>Technology</td>
|
263 |
+
</tr>
|
264 |
+
<tr>
|
265 |
+
<td>"The economic growth has been significant this year"</td>
|
266 |
+
<td>Finance</td>
|
267 |
+
</tr>
|
268 |
+
</table>
|
269 |
+
</div>
|
270 |
+
""", unsafe_allow_html=True)
|
271 |
+
|
272 |
+
# BERT Zero-Shot Classification Base - MNLI
|
273 |
+
st.markdown('<div class="sub-title">BERT Zero-Shot Classification Base - MNLI</div>', unsafe_allow_html=True)
|
274 |
+
st.markdown("""
|
275 |
+
<div class="section">
|
276 |
+
<p>The <strong>bert_zero_shot_classifier_mnli</strong> model is designed for zero-shot text classification, making it suitable for scenarios where predefined categories are not available or frequently change. This model is fine-tuned on the MNLI dataset and leverages natural language inference (NLI) to determine relationships between input text and candidate labels. It allows for dynamic classification without a fixed number of classes, providing flexibility and adaptability for various applications.</p>
|
277 |
+
</div>
|
278 |
+
""", unsafe_allow_html=True)
|
279 |
+
|
280 |
+
# How to Use the Model - Zero-Shot Classification
|
281 |
+
st.markdown('<div class="sub-title">How to Use the Model</div>', unsafe_allow_html=True)
|
282 |
+
st.code('''
|
283 |
+
from sparknlp.base import *
|
284 |
+
from sparknlp.annotator import *
|
285 |
+
from pyspark.ml import Pipeline
|
286 |
+
|
287 |
+
document_assembler = DocumentAssembler() \\
|
288 |
+
.setInputCol('text') \\
|
289 |
+
.setOutputCol('document')
|
290 |
+
|
291 |
+
tokenizer = Tokenizer() \\
|
292 |
+
.setInputCols(['document']) \\
|
293 |
+
.setOutputCol('token')
|
294 |
+
|
295 |
+
|
296 |
+
zeroShotClassifier = BertForZeroShotClassification \\
|
297 |
+
.pretrained('bert_zero_shot_classifier_mnli', 'xx') \\
|
298 |
+
.setInputCols(['token', 'document']) \\
|
299 |
+
.setOutputCol('class') \\
|
300 |
+
.setCaseSensitive(True) \\
|
301 |
+
.setMaxSentenceLength(512) \\
|
302 |
+
.setCandidateLabels(["urgent", "mobile", "travel", "movie", "music", "sport", "weather", "technology"])
|
303 |
+
|
304 |
+
pipeline = Pipeline(stages=[
|
305 |
+
document_assembler,
|
306 |
+
tokenizer,
|
307 |
+
zeroShotClassifier
|
308 |
+
])
|
309 |
+
|
310 |
+
example = spark.createDataFrame([['In today’s world, staying updated with urgent information is crucial as events can unfold rapidly and require immediate attention.']]).toDF("text")
|
311 |
+
result = pipeline.fit(example).transform(example)
|
312 |
+
|
313 |
+
result.select('document.result', 'class.result').show(truncate=False)
|
314 |
+
''', language='python')
|
315 |
+
|
316 |
+
st.text("""
|
317 |
+
+------------------------------------------------------------------------------------------------------------------------------------+--------+
|
318 |
+
|result |result |
|
319 |
+
+------------------------------------------------------------------------------------------------------------------------------------+--------+
|
320 |
+
|[In today’s world, staying updated with urgent information is crucial as events can unfold rapidly and require immediate attention.]|[urgent]|
|
321 |
+
+------------------------------------------------------------------------------------------------------------------------------------+--------+
|
322 |
+
""")
|
323 |
+
|
324 |
+
# Model Information - Zero-Shot Classification
|
325 |
+
st.markdown('<div class="sub-title">Model Information</div>', unsafe_allow_html=True)
|
326 |
+
st.markdown("""
|
327 |
+
<table class="benchmark-table">
|
328 |
+
<tr>
|
329 |
+
<th>Attribute</th>
|
330 |
+
<th>Description</th>
|
331 |
+
</tr>
|
332 |
+
<tr>
|
333 |
+
<td><strong>Model Name</strong></td>
|
334 |
+
<td>bert_zero_shot_classifier_mnli</td>
|
335 |
+
</tr>
|
336 |
+
<tr>
|
337 |
+
<td><strong>Compatibility</strong></td>
|
338 |
+
<td>Spark NLP 5.2.4+</td>
|
339 |
+
</tr>
|
340 |
+
<tr>
|
341 |
+
<td><strong>License</strong></td>
|
342 |
+
<td>Open Source</td>
|
343 |
+
</tr>
|
344 |
+
<tr>
|
345 |
+
<td><strong>Edition</strong></td>
|
346 |
+
<td>Official</td>
|
347 |
+
</tr>
|
348 |
+
<tr>
|
349 |
+
<td><strong>Input Labels</strong></td>
|
350 |
+
<td>[token, document]</td>
|
351 |
+
</tr>
|
352 |
+
<tr>
|
353 |
+
<td><strong>Output Labels</strong></td>
|
354 |
+
<td>[label]</td>
|
355 |
+
</tr>
|
356 |
+
<tr>
|
357 |
+
<td><strong>Language</strong></td>
|
358 |
+
<td>xx</td>
|
359 |
+
</tr>
|
360 |
+
<tr>
|
361 |
+
<td><strong>Size</strong></td>
|
362 |
+
<td>409.1 MB</td>
|
363 |
+
</tr>
|
364 |
+
<tr>
|
365 |
+
<td><strong>Case sensitive</strong></td>
|
366 |
+
<td>true</td>
|
367 |
+
</tr>
|
368 |
+
</table>
|
369 |
+
""", unsafe_allow_html=True)
|
370 |
+
|
371 |
+
# References - Zero-Shot Classification
|
372 |
+
st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
|
373 |
+
st.markdown("""
|
374 |
+
<div class="section">
|
375 |
+
<ul>
|
376 |
+
<li><a class="link" href="https://github.com/google-research/bert" target="_blank" rel="noopener">Google Research BERT</a></li>
|
377 |
+
<li><a class="link" href="https://arxiv.org/abs/1810.04805" target="_blank" rel="noopener">BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding</a></li>
|
378 |
+
<li><a class="link" href="https://huggingface.co/bert-base-uncased" target="_blank" rel="noopener">Hugging Face BERT Models</a></li>
|
379 |
+
</ul>
|
380 |
+
</div>
|
381 |
+
""", unsafe_allow_html=True)
|
382 |
+
|
383 |
+
with tab3:
|
384 |
+
st.markdown("""
|
385 |
+
<div class="section">
|
386 |
+
<h2>BERT for Sequence Classification</h2>
|
387 |
+
<p>The <strong>BertForSequenceClassification</strong> annotator is fine-tuned to classify entire sequences of text. This involves understanding the context of the entire sequence, which is crucial for tasks that require a holistic view of the input text. Sequence classification is highly effective for:</p>
|
388 |
+
<ul>
|
389 |
+
<li><strong>Sentiment Analysis:</strong> Determining the overall sentiment of a given piece of text.</li>
|
390 |
+
<li><strong>Spam Detection:</strong> Identifying unsolicited or irrelevant messages.</li>
|
391 |
+
<li><strong>Document Classification:</strong> Categorizing documents into predefined categories.</li>
|
392 |
+
</ul>
|
393 |
+
<p>With its ability to deliver accurate classification results, this annotator is widely used in various text analysis applications.</p>
|
394 |
+
<table class="benchmark-table">
|
395 |
+
<tr>
|
396 |
+
<th>Text</th>
|
397 |
+
<th>Predicted Sentiment</th>
|
398 |
+
</tr>
|
399 |
+
<tr>
|
400 |
+
<td>"I love this product, it's fantastic!"</td>
|
401 |
+
<td>Positive</td>
|
402 |
+
</tr>
|
403 |
+
<tr>
|
404 |
+
<td>"The service was terrible, I'm very disappointed."</td>
|
405 |
+
<td>Negative</td>
|
406 |
+
</tr>
|
407 |
+
</table>
|
408 |
+
</div>
|
409 |
+
""", unsafe_allow_html=True)
|
410 |
+
|
411 |
+
# English BertForSequenceClassification Cased model (from yonichi)
|
412 |
+
st.markdown('<div class="sub-title">English BertForSequenceClassification Cased model (from yonichi)</div>', unsafe_allow_html=True)
|
413 |
+
st.markdown("""
|
414 |
+
<div class="section">
|
415 |
+
<p>The <strong>bert_classifier_cbert</strong> model is a pretrained BertForSequenceClassification model. Adapted from Hugging Face and curated for scalability and production-readiness using Spark NLP, this model is designed for sequence classification tasks such as sentiment analysis. It is capable of classifying text into positive, negative, and neutral sentiments, providing valuable insights for various applications.</p>
|
416 |
+
</div>
|
417 |
+
""", unsafe_allow_html=True)
|
418 |
+
|
419 |
+
# How to Use the Model - Sequence Classification
|
420 |
+
st.markdown('<div class="sub-title">How to Use the Model</div>', unsafe_allow_html=True)
|
421 |
+
st.code('''
|
422 |
+
from sparknlp.base import *
|
423 |
+
from sparknlp.annotator import *
|
424 |
+
from pyspark.ml import Pipeline
|
425 |
+
from pyspark.sql.functions import col, expr
|
426 |
+
|
427 |
+
# Document Assembler
|
428 |
+
document_assembler = DocumentAssembler() \\
|
429 |
+
.setInputCol('text') \\
|
430 |
+
.setOutputCol('document')
|
431 |
+
|
432 |
+
# Sentence Detector
|
433 |
+
sentence_detector = SentenceDetector() \\
|
434 |
+
.setInputCols(['document']) \\
|
435 |
+
.setOutputCol('sentence')
|
436 |
+
|
437 |
+
# Tokenizer
|
438 |
+
tokenizer = Tokenizer() \\
|
439 |
+
.setInputCols(['sentence']) \\
|
440 |
+
.setOutputCol('token')
|
441 |
+
|
442 |
+
# Sequence Classifier
|
443 |
+
sequence_classifier = BertForSequenceClassification.pretrained("bert_classifier_cbert", "en") \\
|
444 |
+
.setInputCols(['sentence', 'token']) \\
|
445 |
+
.setOutputCol('class')
|
446 |
+
|
447 |
+
# Pipeline
|
448 |
+
pipeline = Pipeline(stages=[
|
449 |
+
document_assembler,
|
450 |
+
sentence_detector,
|
451 |
+
tokenizer,
|
452 |
+
sequence_classifier
|
453 |
+
])
|
454 |
+
|
455 |
+
# Create example DataFrame
|
456 |
+
example = spark.createDataFrame([("Apple Inc. is planning to open a new headquarters in Cupertino, California. The CEO, Tim Cook, announced this during the company's annual event on March 25th, 2023. Barack Obama, the 44th President of the United States, was born on August 4th, 1961, in Honolulu, Hawaii. He attended Harvard Law School and later became a community organizer in Chicago. Amazon reported a net revenue of $125.6 billion in Q4 of 2022, an increase of 9% compared to the previous year. Jeff Bezos, the founder of Amazon, mentioned that the company's growth in cloud computing has significantly contributed to this rise. Paris, the capital city of France, is renowned for its art, fashion, and culture. Key attractions include the Eiffel Tower, the Louvre Museum, and the Notre-Dame Cathedral. Visitors often enjoy a stroll along the Seine River and dining at local bistros. The study, conducted at the Mayo Clinic in Rochester, Minnesota, examined the effects of a new drug on patients with Type 2 diabetes. Results showed a significant reduction in blood sugar levels over a 12-month period. Serena Williams won her 24th Grand Slam title at the Wimbledon Championships in London, England. She defeated Naomi Osaka in a thrilling final match on July 13th, 2023. Google's latest smartphone, the Pixel 6, was unveiled at an event in New York City. Sundar Pichai, the CEO of Google, highlighted the phone's advanced AI capabilities and improved camera features. The Declaration of Independence was signed on July 4th, 1776, in Philadelphia, Pennsylvania. Thomas Jefferson, Benjamin Franklin, and John Adams were among the key figures who drafted this historic document.",)], ["text"])
|
457 |
+
|
458 |
+
# Fit and transform the data
|
459 |
+
model = pipeline.fit(example)
|
460 |
+
result = model.transform(example)
|
461 |
+
|
462 |
+
from pyspark.sql.functions import col
|
463 |
+
|
464 |
+
# Show results in a structured format for sentence-based classification
|
465 |
+
result.select(
|
466 |
+
col('sentence.result').alias('sentences'),
|
467 |
+
col('class.result').alias('classifications')
|
468 |
+
).rdd.flatMap(lambda row: list(zip(row['sentences'], row['classifications']))).toDF(['sentence', 'classification']).show(truncate=False)
|
469 |
+
|
470 |
+
''', language='python')
|
471 |
+
|
472 |
+
st.text("""
|
473 |
+
+-------------------------------------------------------------------------------------------------------------------------------------+--------------+
|
474 |
+
|sentence |classification|
|
475 |
+
+-------------------------------------------------------------------------------------------------------------------------------------+--------------+
|
476 |
+
|Apple Inc. is planning to open a new headquarters in Cupertino, California. |Neutral |
|
477 |
+
|The CEO, Tim Cook, announced this during the company's annual event on March 25th, 2023. |Dovish |
|
478 |
+
|Barack Obama, the 44th President of the United States, was born on August 4th, 1961, in Honolulu, Hawaii. |Neutral |
|
479 |
+
|He attended Harvard Law School and later became a community organizer in Chicago. |Neutral |
|
480 |
+
|Amazon reported a net revenue of $125.6 billion in Q4 of 2022, an increase of 9% compared to the previous year. |Neutral |
|
481 |
+
|Jeff Bezos, the founder of Amazon, mentioned that the company's growth in cloud computing has significantly contributed to this rise.|Dovish |
|
482 |
+
|Paris, the capital city of France, is renowned for its art, fashion, and culture. |Neutral |
|
483 |
+
|Key attractions include the Eiffel Tower, the Louvre Museum, and the Notre-Dame Cathedral. |Neutral |
|
484 |
+
|Visitors often enjoy a stroll along the Seine River and dining at local bistros. |Neutral |
|
485 |
+
|The study, conducted at the Mayo Clinic in Rochester, Minnesota, examined the effects of a new drug on patients with Type 2 diabetes.|Dovish |
|
486 |
+
|Results showed a significant reduction in blood sugar levels over a 12-month period. |Neutral |
|
487 |
+
|Serena Williams won her 24th Grand Slam title at the Wimbledon Championships in London, England. |Hawkish |
|
488 |
+
|She defeated Naomi Osaka in a thrilling final match on July 13th, 2023. |Neutral |
|
489 |
+
|Google's latest smartphone, the Pixel 6, was unveiled at an event in New York City. |Dovish |
|
490 |
+
|Sundar Pichai, the CEO of Google, highlighted the phone's advanced AI capabilities and improved camera features. |Dovish |
|
491 |
+
|The Declaration of Independence was signed on July 4th, 1776, in Philadelphia, Pennsylvania. |Hawkish |
|
492 |
+
|Thomas Jefferson, Benjamin Franklin, and John Adams were among the key figures who drafted this historic document. |Neutral |
|
493 |
+
+-------------------------------------------------------------------------------------------------------------------------------------+--------------+
|
494 |
+
""")
|
495 |
+
|
496 |
+
# Model Information - Sequence Classification
|
497 |
+
st.markdown('<div class="sub-title">Model Information</div>', unsafe_allow_html=True)
|
498 |
+
st.markdown("""
|
499 |
+
<table class="benchmark-table">
|
500 |
+
<tr>
|
501 |
+
<th>Attribute</th>
|
502 |
+
<th>Description</th>
|
503 |
+
</tr>
|
504 |
+
<tr>
|
505 |
+
<td><strong>Model Name</strong></td>
|
506 |
+
<td>bert_classifier_cbert</td>
|
507 |
+
</tr>
|
508 |
+
<tr>
|
509 |
+
<td><strong>Compatibility</strong></td>
|
510 |
+
<td>Spark NLP 4.2.0+</td>
|
511 |
+
</tr>
|
512 |
+
<tr>
|
513 |
+
<td><strong>License</strong></td>
|
514 |
+
<td>Open Source</td>
|
515 |
+
</tr>
|
516 |
+
<tr>
|
517 |
+
<td><strong>Edition</strong></td>
|
518 |
+
<td>Official</td>
|
519 |
+
</tr>
|
520 |
+
<tr>
|
521 |
+
<td><strong>Input Labels</strong></td>
|
522 |
+
<td>[document, token]</td>
|
523 |
+
</tr>
|
524 |
+
<tr>
|
525 |
+
<td><strong>Output Labels</strong></td>
|
526 |
+
<td>[class]</td>
|
527 |
+
</tr>
|
528 |
+
<tr>
|
529 |
+
<td><strong>Language</strong></td>
|
530 |
+
<td>en</td>
|
531 |
+
</tr>
|
532 |
+
<tr>
|
533 |
+
<td><strong>Size</strong></td>
|
534 |
+
<td>412.2 MB</td>
|
535 |
+
</tr>
|
536 |
+
<tr>
|
537 |
+
<td><strong>Case sensitive</strong></td>
|
538 |
+
<td>true</td>
|
539 |
+
</tr>
|
540 |
+
<tr>
|
541 |
+
<td><strong>Max sentence length</strong></td>
|
542 |
+
<td>256</td>
|
543 |
+
</tr>
|
544 |
+
</table>
|
545 |
+
""", unsafe_allow_html=True)
|
546 |
+
|
547 |
+
# References - Sequence Classification
|
548 |
+
st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
|
549 |
+
st.markdown("""
|
550 |
+
<div class="section">
|
551 |
+
<ul>
|
552 |
+
<li><a class="link" href="https://huggingface.co/yonichi/cbert" target="_blank" rel="noopener">Hugging Face cBERT Model</a></li>
|
553 |
+
</ul>
|
554 |
+
</div>
|
555 |
+
""", unsafe_allow_html=True)
|
556 |
+
|
557 |
+
with tab4:
|
558 |
+
st.markdown("""
|
559 |
+
<div class="section">
|
560 |
+
<h2>BERT for Question Answering</h2>
|
561 |
+
<p>The <strong>BertForQuestionAnswering</strong> annotator is fine-tuned to provide answers to questions based on a given context. This involves extracting relevant information from a passage of text in response to a specific query, making it ideal for applications requiring precise information retrieval. Question answering is particularly useful for:</p>
|
562 |
+
<ul>
|
563 |
+
<li><strong>Building Question-Answering Systems:</strong> Creating systems that can automatically answer user queries.</li>
|
564 |
+
<li><strong>Customer Support Bots:</strong> Providing accurate and timely responses to customer inquiries.</li>
|
565 |
+
<li><strong>Information Retrieval:</strong> Extracting specific information from large volumes of text.</li>
|
566 |
+
</ul>
|
567 |
+
<p>By leveraging this annotator, you can enhance the ability to extract and deliver accurate information from text data.</p>
|
568 |
+
<table class="benchmark-table">
|
569 |
+
<tr>
|
570 |
+
<th>Context</th>
|
571 |
+
<th>Question</th>
|
572 |
+
<th>Predicted Answer</th>
|
573 |
+
</tr>
|
574 |
+
<tr>
|
575 |
+
<td>"The Eiffel Tower is one of the most recognizable structures in the world. It was constructed in 1889 as the entrance arch to the 1889 World's Fair held in Paris, France."</td>
|
576 |
+
<td>"When was the Eiffel Tower constructed?"</td>
|
577 |
+
<td>1889</td>
|
578 |
+
</tr>
|
579 |
+
<tr>
|
580 |
+
<td>"The Amazon rainforest, also known as Amazonia, is a vast tropical rainforest in South America. It is home to an incredible diversity of flora and fauna."</td>
|
581 |
+
<td>"What is the Amazon rainforest also known as?"</td>
|
582 |
+
<td>Amazonia</td>
|
583 |
+
</tr>
|
584 |
+
</table>
|
585 |
+
</div>
|
586 |
+
""", unsafe_allow_html=True)
|
587 |
+
|
588 |
+
# English BertForQuestionAnswering Large Uncased Model
|
589 |
+
st.markdown('<div class="sub-title">bert_qa_large_uncased_whole_word_masking_finetuned_squad</div>', unsafe_allow_html=True)
|
590 |
+
st.markdown("""
|
591 |
+
<div class="section">
|
592 |
+
<p>This model is a pretrained BERT model, adapted from Hugging Face, curated to provide scalability and production-readiness using Spark NLP. It is designed to handle question-answering tasks effectively.</p>
|
593 |
+
</div>
|
594 |
+
""", unsafe_allow_html=True)
|
595 |
+
|
596 |
+
# How to Use the Model - Question Answering
|
597 |
+
st.markdown('<div class="sub-title">How to Use the Model</div>', unsafe_allow_html=True)
|
598 |
+
st.code('''
|
599 |
+
from sparknlp.base import *
|
600 |
+
from sparknlp.annotator import *
|
601 |
+
from pyspark.ml import Pipeline
|
602 |
+
from pyspark.sql.functions import col, expr
|
603 |
+
|
604 |
+
# Document Assembler
|
605 |
+
document_assembler = MultiDocumentAssembler()\\
|
606 |
+
.setInputCols(["question", "context"]) \\
|
607 |
+
.setOutputCols(["document_question", "document_context"])
|
608 |
+
|
609 |
+
# BertForQuestionAnswering
|
610 |
+
spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_large_uncased_whole_word_masking_finetuned_squad","en") \\
|
611 |
+
.setInputCols(["document_question", "document_context"]) \\
|
612 |
+
.setOutputCol("answer") \\
|
613 |
+
.setCaseSensitive(True)
|
614 |
+
|
615 |
+
# Pipeline
|
616 |
+
pipeline = Pipeline().setStages([
|
617 |
+
document_assembler,
|
618 |
+
spanClassifier
|
619 |
+
])
|
620 |
+
|
621 |
+
# Create example DataFrame
|
622 |
+
example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
|
623 |
+
|
624 |
+
# Fit and transform the data
|
625 |
+
model = pipeline.fit(example)
|
626 |
+
result = model.transform(example)
|
627 |
+
|
628 |
+
# Show results
|
629 |
+
result.select('document_question.result', 'answer.result').show(truncate=False)
|
630 |
+
''', language='python')
|
631 |
+
|
632 |
+
st.text("""
|
633 |
+
+-----------------+-------+
|
634 |
+
|result |result |
|
635 |
+
+-----------------+-------+
|
636 |
+
|[What's my name?]|[Clara]|
|
637 |
+
+-----------------+-------+
|
638 |
+
""")
|
639 |
+
|
640 |
+
# Model Information - Question Answering
|
641 |
+
st.markdown('<div class="sub-title">Model Information</div>', unsafe_allow_html=True)
|
642 |
+
st.markdown("""
|
643 |
+
<table class="benchmark-table">
|
644 |
+
<tr>
|
645 |
+
<th>Attribute</th>
|
646 |
+
<th>Description</th>
|
647 |
+
</tr>
|
648 |
+
<tr>
|
649 |
+
<td><strong>Model Name</strong></td>
|
650 |
+
<td>bert_qa_large_uncased_whole_word_masking_finetuned_squad</td>
|
651 |
+
</tr>
|
652 |
+
<tr>
|
653 |
+
<td><strong>Compatibility</strong></td>
|
654 |
+
<td>Spark NLP 4.4.0+</td>
|
655 |
+
</tr>
|
656 |
+
<tr>
|
657 |
+
<td><strong>License</strong></td>
|
658 |
+
<td>Open Source</td>
|
659 |
+
</tr>
|
660 |
+
<tr>
|
661 |
+
<td><strong>Edition</strong></td>
|
662 |
+
<td>Official</td>
|
663 |
+
</tr>
|
664 |
+
<tr>
|
665 |
+
<td><strong>Input Labels</strong></td>
|
666 |
+
<td>[document_question, document_context]</td>
|
667 |
+
</tr>
|
668 |
+
<tr>
|
669 |
+
<td><strong>Output Labels</strong></td>
|
670 |
+
<td>[answer]</td>
|
671 |
+
</tr>
|
672 |
+
<tr>
|
673 |
+
<td><strong>Language</strong></td>
|
674 |
+
<td>en</td>
|
675 |
+
</tr>
|
676 |
+
<tr>
|
677 |
+
<td><strong>Size</strong></td>
|
678 |
+
<td>1.3 GB</td>
|
679 |
+
</tr>
|
680 |
+
<tr>
|
681 |
+
<td><strong>Case sensitive</strong></td>
|
682 |
+
<td>false</td>
|
683 |
+
</tr>
|
684 |
+
<tr>
|
685 |
+
<td><strong>Max sentence length</strong></td>
|
686 |
+
<td>512</td>
|
687 |
+
</tr>
|
688 |
+
</table>
|
689 |
+
""", unsafe_allow_html=True)
|
690 |
+
|
691 |
+
# References - Question Answering
|
692 |
+
st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
|
693 |
+
st.markdown("""
|
694 |
+
<div class="section">
|
695 |
+
<ul>
|
696 |
+
<li><a class="link" href="https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad" target="_blank" rel="noopener">BertForQuestionAnswering Model</a></li>
|
697 |
+
<li><a class="link" href="https://arxiv.org/abs/1810.04805" target="_blank" rel="noopener">BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding</a></li>
|
698 |
+
<li><a class="link" href="https://github.com/google-research/bert" target="_blank" rel="noopener">Google Research BERT</a></li>
|
699 |
+
</ul>
|
700 |
+
</div>
|
701 |
+
""", unsafe_allow_html=True)
|
702 |
+
|
703 |
+
st.markdown("""
|
704 |
+
<div class="section">
|
705 |
+
<h2>Conclusion</h2>
|
706 |
+
<p>In this guide, we've explored a range of BERT-based annotators and models available in Spark NLP, each tailored to specific natural language processing tasks. Here's a summary of the four key BERT annotators and their respective models:</p>
|
707 |
+
<ul>
|
708 |
+
<li><strong>BERT for Sequence Classification</strong> - The <code>BertForSequenceClassification</code> annotator, exemplified by the <code>bert_classifier_cbert</code> model, excels in classifying entire sequences of text. This model is particularly useful for tasks like sentiment analysis, spam detection, and document categorization, providing insights into the overall sentiment of a text or its classification into predefined categories.</li>
|
709 |
+
<li><strong>BERT for Token Classification</strong> - The <code>BertForTokenClassification</code> annotator, with the <code>bert_base_token_classifier_conll03</code> model, specializes in Named Entity Recognition (NER). This annotator identifies entities such as people, organizations, locations, and more within a text, making it invaluable for information extraction and document categorization.</li>
|
710 |
+
<li><strong>BERT for Zero-Shot Classification</strong> - The <code>BertForZeroShotClassification</code> annotator, represented by the <code>bert_zero_shot_classifier_mnli</code> model, offers a flexible approach to text classification without requiring a predefined set of categories. It leverages natural language inference (NLI) to classify text into dynamically chosen labels, making it ideal for applications with evolving or unknown categories.</li>
|
711 |
+
<li><strong>BERT for Question Answering</strong> - The <code>BertForQuestionAnswering</code> annotator, though not highlighted in this specific instance, is designed to extract answers from a given context based on a query. This model is highly effective for building question-answering systems and customer support bots, facilitating precise information retrieval from large text corpora.</li>
|
712 |
+
</ul>
|
713 |
+
<p>Each of these models and annotators demonstrates the versatility and power of BERT-based approaches in natural language processing. Whether you need to classify sequences, identify entities, handle zero-shot classification, or answer questions, Spark NLP provides robust tools to enhance your text analysis capabilities. Leveraging these models allows for scalable and production-ready solutions in various applications, from sentiment analysis to dynamic content tagging.</p>
|
714 |
+
</div>
|
715 |
+
""", unsafe_allow_html=True)
|
716 |
+
|
717 |
+
# Community & Support
|
718 |
+
st.markdown('<div class="sub-title">Community & Support</div>', unsafe_allow_html=True)
|
719 |
+
st.markdown("""
|
720 |
+
<div class="section">
|
721 |
+
<ul>
|
722 |
+
<li><a class="link" href="https://sparknlp.org/" target="_blank">Official Website</a>: Documentation and examples</li>
|
723 |
+
<li><a class="link" href="https://join.slack.com/t/spark-nlp/shared_invite/zt-198dipu77-L3UWNe_AJ8xqDk0ivmih5Q" target="_blank">Slack</a>: Live discussion with the community and team</li>
|
724 |
+
<li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp" target="_blank">GitHub</a>: Bug reports, feature requests, and contributions</li>
|
725 |
+
<li><a class="link" href="https://medium.com/spark-nlp" target="_blank">Medium</a>: Spark NLP articles</li>
|
726 |
+
<li><a class="link" href="https://www.youtube.com/channel/UCmFOjlpYEhxf_wJUDuz6xxQ/videos" target="_blank">YouTube</a>: Video tutorials</li>
|
727 |
+
</ul>
|
728 |
+
</div>
|
729 |
+
""", unsafe_allow_html=True)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
st-annotated-text
|
3 |
+
streamlit-tags
|
4 |
+
pandas
|
5 |
+
numpy
|
6 |
+
spark-nlp
|
7 |
+
pyspark
|