Upload 6 files
Browse files- .streamlit/config.toml +3 -0
- Demo-old.py +312 -0
- Demo.py +248 -0
- Dockerfile +72 -0
- pages/Workflow & Model Overview.py +479 -0
- requirements.txt +7 -0
.streamlit/config.toml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[theme]
|
2 |
+
base="light"
|
3 |
+
primaryColor="#29B4E8"
|
Demo-old.py
ADDED
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import sparknlp
|
3 |
+
import os
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
from sparknlp.base import *
|
7 |
+
from sparknlp.annotator import *
|
8 |
+
from pyspark.ml import Pipeline
|
9 |
+
from sparknlp.pretrained import PretrainedPipeline
|
10 |
+
from annotated_text import annotated_text
|
11 |
+
from streamlit_tags import st_tags
|
12 |
+
|
13 |
+
# Page configuration
|
14 |
+
st.set_page_config(
|
15 |
+
layout="wide",
|
16 |
+
initial_sidebar_state="auto"
|
17 |
+
)
|
18 |
+
|
19 |
+
# CSS for styling
|
20 |
+
st.markdown("""
|
21 |
+
<style>
|
22 |
+
.main-title {
|
23 |
+
font-size: 36px;
|
24 |
+
color: #4A90E2;
|
25 |
+
font-weight: bold;
|
26 |
+
text-align: center;
|
27 |
+
}
|
28 |
+
.section {
|
29 |
+
background-color: #f9f9f9;
|
30 |
+
padding: 10px;
|
31 |
+
border-radius: 10px;
|
32 |
+
margin-top: 10px;
|
33 |
+
}
|
34 |
+
.section p, .section ul {
|
35 |
+
color: #666666;
|
36 |
+
}
|
37 |
+
</style>
|
38 |
+
""", unsafe_allow_html=True)
|
39 |
+
|
40 |
+
@st.cache_resource
|
41 |
+
def init_spark():
|
42 |
+
return sparknlp.start()
|
43 |
+
|
44 |
+
@st.cache_resource
|
45 |
+
def create_pipeline(model, task, zeroShotLables=['']):
|
46 |
+
document_assembler = DocumentAssembler() \
|
47 |
+
.setInputCol('text') \
|
48 |
+
.setOutputCol('document')
|
49 |
+
|
50 |
+
sentence_detector = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "xx")\
|
51 |
+
.setInputCols(["document"])\
|
52 |
+
.setOutputCol("sentence")
|
53 |
+
|
54 |
+
tokenizer = Tokenizer() \
|
55 |
+
.setInputCols(['sentence']) \
|
56 |
+
.setOutputCol('token')
|
57 |
+
|
58 |
+
if task == "Token Classification":
|
59 |
+
TCclassifier = DeBertaForTokenClassification \
|
60 |
+
.pretrained("deberta_v3_small_token_classifier_conll03", "en") \
|
61 |
+
.setInputCols(["sentence", "token"]) \
|
62 |
+
.setOutputCol("ner") \
|
63 |
+
.setCaseSensitive(True) \
|
64 |
+
.setMaxSentenceLength(512)
|
65 |
+
|
66 |
+
ner_converter = NerConverter() \
|
67 |
+
.setInputCols(['sentence', 'token', 'ner']) \
|
68 |
+
.setOutputCol('ner_chunk')
|
69 |
+
|
70 |
+
TCpipeline = Pipeline(stages=[document_assembler, sentence_detector, tokenizer, TCclassifier, ner_converter])
|
71 |
+
return TCpipeline
|
72 |
+
|
73 |
+
elif task == "Zero-Shot Classification":
|
74 |
+
ZSCtokenizer = Tokenizer() \
|
75 |
+
.setInputCols(['document']) \
|
76 |
+
.setOutputCol('token')
|
77 |
+
|
78 |
+
zeroShotClassifier = DeBertaForZeroShotClassification \
|
79 |
+
.pretrained('deberta_base_zero_shot_classifier_mnli_anli_v3', 'en') \
|
80 |
+
.setInputCols(['token', 'document']) \
|
81 |
+
.setOutputCol('class') \
|
82 |
+
.setCaseSensitive(False) \
|
83 |
+
.setMaxSentenceLength(512) \
|
84 |
+
.setCandidateLabels(zeroShotLables)
|
85 |
+
|
86 |
+
ZSCpipeline = Pipeline(stages=[document_assembler, ZSCtokenizer, zeroShotClassifier])
|
87 |
+
return ZSCpipeline
|
88 |
+
|
89 |
+
elif task == "Sequence Classification":
|
90 |
+
SCtokenizer = Tokenizer() \
|
91 |
+
.setInputCols(['document']) \
|
92 |
+
.setOutputCol('token')
|
93 |
+
|
94 |
+
sequence_classifier = DeBertaForSequenceClassification \
|
95 |
+
.pretrained("deberta_v3_base_sequence_classifier_imdb", "en") \
|
96 |
+
.setInputCols(["document", "token"]) \
|
97 |
+
.setOutputCol("class")
|
98 |
+
|
99 |
+
SCpipeline = Pipeline(stages=[document_assembler, SCtokenizer, sequence_classifier])
|
100 |
+
return SCpipeline
|
101 |
+
|
102 |
+
elif task == "Question Answering":
|
103 |
+
QAdocument_assembler = MultiDocumentAssembler()\
|
104 |
+
.setInputCols(["question", "context"]) \
|
105 |
+
.setOutputCols(["document_question", "document_context"])
|
106 |
+
|
107 |
+
spanClassifier = DebertaForQuestionAnswering \
|
108 |
+
.pretrained("deberta_v3_xsmall_qa_squad2", "en") \
|
109 |
+
.setInputCols(["document_question","document_context"]) \
|
110 |
+
.setOutputCol("answer")
|
111 |
+
|
112 |
+
QApipeline = Pipeline(stages=[QAdocument_assembler, spanClassifier])
|
113 |
+
return QApipeline
|
114 |
+
|
115 |
+
def fit_data(pipeline, data, task, ques='', cont=''):
|
116 |
+
if task in ['Token Classification', 'Sequence Classification']:
|
117 |
+
empty_df = spark.createDataFrame([['']]).toDF('text')
|
118 |
+
pipeline_model = pipeline.fit(empty_df)
|
119 |
+
model = LightPipeline(pipeline_model)
|
120 |
+
result = model.fullAnnotate(data)
|
121 |
+
return result
|
122 |
+
else:
|
123 |
+
df = spark.createDataFrame([[ques, cont]]).toDF("question", "context")
|
124 |
+
result = pipeline.fit(df).transform(df)
|
125 |
+
return result.select('answer.result').collect()
|
126 |
+
|
127 |
+
def annotate(data):
|
128 |
+
document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
|
129 |
+
annotated_words = []
|
130 |
+
for chunk, label in zip(chunks, labels):
|
131 |
+
parts = document.split(chunk, 1)
|
132 |
+
if parts[0]:
|
133 |
+
annotated_words.append(parts[0])
|
134 |
+
annotated_words.append((chunk, label))
|
135 |
+
document = parts[1]
|
136 |
+
if document:
|
137 |
+
annotated_words.append(document)
|
138 |
+
annotated_text(*annotated_words)
|
139 |
+
|
140 |
+
tasks_models_descriptions = {
|
141 |
+
"Token Classification": {
|
142 |
+
"models": ["deberta_v3_small_token_classifier_conll03"],
|
143 |
+
"description": "The 'deberta_v3_small_token_classifier_conll03' model is adept at token classification tasks, including named entity recognition (NER). It identifies and categorizes tokens in text, such as names, dates, and locations, enhancing the extraction of meaningful information from unstructured data."
|
144 |
+
},
|
145 |
+
"Zero-Shot Classification": {
|
146 |
+
"models": ["deberta_base_zero_shot_classifier_mnli_anli_v3"],
|
147 |
+
"description": "The 'deberta_base_zero_shot_classifier_mnli_anli_v3' model provides flexible text classification without needing training data for specific categories. It is ideal for dynamic scenarios where text needs to be categorized into topics like urgent issues, technology, or sports without prior labeling."
|
148 |
+
},
|
149 |
+
"Sequence Classification": {
|
150 |
+
"models": ["deberta_v3_base_sequence_classifier_imdb"],
|
151 |
+
"description": "The 'deberta_v3_base_sequence_classifier_imdb' model is proficient in sequence classification tasks, such as sentiment analysis and document categorization. It effectively determines the sentiment of reviews, classifies text, and sorts documents based on their content and context."
|
152 |
+
},
|
153 |
+
"Question Answering": {
|
154 |
+
"models": ["deberta_v3_xsmall_qa_squad2"],
|
155 |
+
"description": "The 'deberta_v3_xsmall_qa_squad2' model, based on RoBERTa, is designed for precise question answering. They excel in extracting answers from a given context, making them suitable for developing advanced QA systems, enhancing customer support, and retrieving specific information from text."
|
156 |
+
}
|
157 |
+
}
|
158 |
+
|
159 |
+
# Sidebar content
|
160 |
+
task = st.sidebar.selectbox("Choose the task", list(tasks_models_descriptions.keys()))
|
161 |
+
model = st.sidebar.selectbox("Choose the pretrained model", tasks_models_descriptions[task]["models"], help="For more info about the models visit: https://sparknlp.org/models")
|
162 |
+
|
163 |
+
# Reference notebook link in sidebar
|
164 |
+
link = """
|
165 |
+
<a href="https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/357691d18373d6e8f13b5b1015137a398fd0a45f/Spark_NLP_Udemy_MOOC/Open_Source/17.01.Transformers-based_Embeddings.ipynb#L103">
|
166 |
+
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
|
167 |
+
</a>
|
168 |
+
"""
|
169 |
+
st.sidebar.markdown('Reference notebook:')
|
170 |
+
st.sidebar.markdown(link, unsafe_allow_html=True)
|
171 |
+
|
172 |
+
# Page content
|
173 |
+
title, sub_title = (f'DeBERTa for {task}', tasks_models_descriptions[task]["description"])
|
174 |
+
st.markdown(f'<div class="main-title">{title}</div>', unsafe_allow_html=True)
|
175 |
+
container = st.container(border=True)
|
176 |
+
container.write(sub_title)
|
177 |
+
|
178 |
+
# Load examples
|
179 |
+
examples_mapping = {
|
180 |
+
"Token Classification": [
|
181 |
+
"William Henry Gates III (born October 28, 1955) is an American business magnate, software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft, Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect, while also being the largest individual shareholder until May 2014. He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico; it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect. During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.[9] He gradually transferred his duties to Ray Ozzie and Craig Mundie. He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.",
|
182 |
+
"The Mona Lisa is a 16th century oil painting created by Leonardo. It's held at the Louvre in Paris.",
|
183 |
+
"When Sebastian Thrun started working on self-driving cars at Google in 2007, few people outside of the company took him seriously. “I can tell you very senior CEOs of major American car companies would shake my hand and turn away because I wasn’t worth talking to,” said Thrun, now the co-founder and CEO of online higher education startup Udacity, in an interview with Recode earlier this week.",
|
184 |
+
"Facebook is a social networking service launched as TheFacebook on February 4, 2004. It was founded by Mark Zuckerberg with his college roommates and fellow Harvard University students Eduardo Saverin, Andrew McCollum, Dustin Moskovitz and Chris Hughes. The website's membership was initially limited by the founders to Harvard students, but was expanded to other colleges in the Boston area, the Ivy League, and gradually most universities in the United States and Canada.",
|
185 |
+
"The history of natural language processing generally started in the 1950s, although work can be found from earlier periods. In 1950, Alan Turing published an article titled 'Computing Machinery and Intelligence' which proposed what is now called the Turing test as a criterion of intelligence",
|
186 |
+
"Geoffrey Everest Hinton is an English Canadian cognitive psychologist and computer scientist, most noted for his work on artificial neural networks. Since 2013 he divides his time working for Google and the University of Toronto. In 2017, he cofounded and became the Chief Scientific Advisor of the Vector Institute in Toronto.",
|
187 |
+
"When I told John that I wanted to move to Alaska, he warned me that I'd have trouble finding a Starbucks there.",
|
188 |
+
"Steven Paul Jobs was an American business magnate, industrial designer, investor, and media proprietor. He was the chairman, chief executive officer (CEO), and co-founder of Apple Inc., the chairman and majority shareholder of Pixar, a member of The Walt Disney Company's board of directors following its acquisition of Pixar, and the founder, chairman, and CEO of NeXT. Jobs is widely recognized as a pioneer of the personal computer revolution of the 1970s and 1980s, along with Apple co-founder Steve Wozniak. Jobs was born in San Francisco, California, and put up for adoption. He was raised in the San Francisco Bay Area. He attended Reed College in 1972 before dropping out that same year, and traveled through India in 1974 seeking enlightenment and studying Zen Buddhism.",
|
189 |
+
"Titanic is a 1997 American epic romance and disaster film directed, written, co-produced, and co-edited by James Cameron. Incorporating both historical and fictionalized aspects, it is based on accounts of the sinking of the RMS Titanic, and stars Leonardo DiCaprio and Kate Winslet as members of different social classes who fall in love aboard the ship during its ill-fated maiden voyage.",
|
190 |
+
"Other than being the king of the north, John Snow is a an english physician and a leader in the development of anaesthesia and medical hygiene. He is considered for being the first one using data to cure cholera outbreak in 1834."
|
191 |
+
],
|
192 |
+
"Zero-Shot Classification" : [
|
193 |
+
"In today’s world, staying updated with urgent information is crucial as events can unfold rapidly and require immediate attention.", # Urgent
|
194 |
+
"Mobile technology has become indispensable, allowing us to access news, updates, and connect with others no matter where we are.", # Mobile
|
195 |
+
"For those who love to travel, the convenience of mobile apps has transformed how we plan and experience trips, providing real-time updates on flights, accommodations, and local attractions.", # Travel
|
196 |
+
"The entertainment industry continually offers new movies that captivate audiences with their storytelling and visuals, providing a wide range of genres to suit every taste.", # Movie
|
197 |
+
"Music is an integral part of modern life, with streaming platforms making it easy to discover new artists and enjoy favorite tunes anytime, anywhere.", # Music
|
198 |
+
"Sports enthusiasts follow games and matches closely, with live updates and detailed statistics available at their fingertips, enhancing the excitement of every game.", # Sport
|
199 |
+
"Weather forecasts play a vital role in daily planning, offering accurate and timely information to help us prepare for various weather conditions and adjust our plans accordingly.", # Weather
|
200 |
+
"Technology continues to evolve rapidly, driving innovation across all sectors and improving our everyday lives through smarter devices, advanced software, and enhanced connectivity." # Technology
|
201 |
+
],
|
202 |
+
"Sequence Classification": [
|
203 |
+
"This movie was absolutely fantastic! The storyline was gripping, the characters were well-developed, and the cinematography was stunning. I was on the edge of my seat the entire time.",
|
204 |
+
"A heartwarming and beautiful film. The performances were top-notch, and the direction was flawless. This is easily one of the best movies I've seen this year.",
|
205 |
+
"What a delightful surprise! The humor was spot on, and the plot was refreshingly original. The cast did an amazing job bringing the characters to life. Highly recommended!",
|
206 |
+
"This was one of the worst movies I’ve ever seen. The plot was predictable, the acting was wooden, and the pacing was painfully slow. I couldn’t wait for it to end.",
|
207 |
+
"A complete waste of time. The movie lacked any real substance or direction, and the dialogue was cringe-worthy. I wouldn’t recommend this to anyone.",
|
208 |
+
"I had high hopes for this film, but it turned out to be a huge disappointment. The story was disjointed, and the special effects were laughably bad. Don’t bother watching this one.",
|
209 |
+
"The movie was okay, but nothing special. It had a few good moments, but overall, it felt pretty average. Not something I would watch again, but it wasn’t terrible either.",
|
210 |
+
"An average film with a decent plot. The acting was passable, but it didn't leave much of an impression on me. It's a movie you might watch once and forget about.",
|
211 |
+
"This movie was neither good nor bad, just kind of there. It had some interesting ideas, but they weren’t executed very well. It’s a film you could take or leave."
|
212 |
+
],
|
213 |
+
"Question Answering": {
|
214 |
+
"""What does increased oxygen concentrations in the patient’s lungs displace?""": """Hyperbaric (high-pressure) medicine uses special oxygen chambers to increase the partial pressure of O 2 around the patient and, when needed, the medical staff. Carbon monoxide poisoning, gas gangrene, and decompression sickness (the ’bends’) are sometimes treated using these devices. Increased O 2 concentration in the lungs helps to displace carbon monoxide from the heme group of hemoglobin. Oxygen gas is poisonous to the anaerobic bacteria that cause gas gangrene, so increasing its partial pressure helps kill them. Decompression sickness occurs in divers who decompress too quickly after a dive, resulting in bubbles of inert gas, mostly nitrogen and helium, forming in their blood. Increasing the pressure of O 2 as soon as possible is part of the treatment.""",
|
215 |
+
"""What category of game is Legend of Zelda: Twilight Princess?""": """The Legend of Zelda: Twilight Princess (Japanese: ゼルダの伝説 トワイライトプリンセス, Hepburn: Zeruda no Densetsu: Towairaito Purinsesu?) is an action-adventure game developed and published by Nintendo for the GameCube and Wii home video game consoles. It is the thirteenth installment in the The Legend of Zelda series. Originally planned for release on the GameCube in November 2005, Twilight Princess was delayed by Nintendo to allow its developers to refine the game, add more content, and port it to the Wii. The Wii version was released alongside the console in North America in November 2006, and in Japan, Europe, and Australia the following month. The GameCube version was released worldwide in December 2006.""",
|
216 |
+
"""Who is founder of Alibaba Group?""": """Alibaba Group founder Jack Ma has made his first appearance since Chinese regulators cracked down on his business empire. His absence had fuelled speculation over his whereabouts amid increasing official scrutiny of his businesses. The billionaire met 100 rural teachers in China via a video meeting on Wednesday, according to local government media. Alibaba shares surged 5% on Hong Kong's stock exchange on the news.""",
|
217 |
+
"""For what instrument did Frédéric write primarily for?""": """Frédéric François Chopin (/ˈʃoʊpæn/; French pronunciation: [fʁe.de.ʁik fʁɑ̃.swa ʃɔ.pɛ̃]; 22 February or 1 March 1810 – 17 October 1849), born Fryderyk Franciszek Chopin,[n 1] was a Polish and French (by citizenship and birth of father) composer and a virtuoso pianist of the Romantic era, who wrote primarily for the solo piano. He gained and has maintained renown worldwide as one of the leading musicians of his era, whose "poetic genius was based on a professional technique that was without equal in his generation." Chopin was born in what was then the Duchy of Warsaw, and grew up in Warsaw, which after 1815 became part of Congress Poland. A child prodigy, he completed his musical education and composed his earlier works in Warsaw before leaving Poland at the age of 20, less than a month before the outbreak of the November 1830 Uprising.""",
|
218 |
+
"""The most populated city in the United States is which city?""": """New York—often called New York City or the City of New York to distinguish it from the State of New York, of which it is a part—is the most populous city in the United States and the center of the New York metropolitan area, the premier gateway for legal immigration to the United States and one of the most populous urban agglomerations in the world. A global power city, New York exerts a significant impact upon commerce, finance, media, art, fashion, research, technology, education, and entertainment, its fast pace defining the term New York minute. Home to the headquarters of the United Nations, New York is an important center for international diplomacy and has been described as the cultural and financial capital of the world."""
|
219 |
+
}
|
220 |
+
}
|
221 |
+
|
222 |
+
if task == 'Question Answering':
|
223 |
+
examples = list(examples_mapping[task].keys())
|
224 |
+
selected_text = st.selectbox('Select an Example:', examples)
|
225 |
+
st.subheader('Try it yourself!')
|
226 |
+
custom_input_question = st.text_input('Create a question')
|
227 |
+
custom_input_context = st.text_input("Create it's context")
|
228 |
+
|
229 |
+
custom_examples = {}
|
230 |
+
|
231 |
+
st.subheader('Selected Text')
|
232 |
+
|
233 |
+
if custom_input_question and custom_input_context:
|
234 |
+
QUESTION = custom_input_question
|
235 |
+
CONTEXT = custom_input_context
|
236 |
+
elif selected_text:
|
237 |
+
QUESTION = selected_text
|
238 |
+
CONTEXT = examples_mapping[task][selected_text]
|
239 |
+
|
240 |
+
st.markdown(f"**Question:** {QUESTION}")
|
241 |
+
st.markdown(f"**Context:** {CONTEXT}")
|
242 |
+
|
243 |
+
else:
|
244 |
+
examples = examples_mapping[task]
|
245 |
+
selected_text = st.selectbox("Select an example", examples)
|
246 |
+
custom_input = st.text_input("Try it with your own Sentence!")
|
247 |
+
|
248 |
+
if task == 'Zero-Shot Classification':
|
249 |
+
zeroShotLables = ["urgent", "mobile", "travel", "movie", "music", "sport", "weather", "technology"]
|
250 |
+
lables = st_tags(
|
251 |
+
label='Select labels',
|
252 |
+
text='Press enter to add more',
|
253 |
+
value=zeroShotLables,
|
254 |
+
suggestions=[
|
255 |
+
"Positive", "Negative", "Neutral",
|
256 |
+
"Urgent", "Mobile", "Travel", "Movie", "Music", "Sport", "Weather", "Technology",
|
257 |
+
"Happiness", "Sadness", "Anger", "Fear", "Surprise", "Disgust",
|
258 |
+
"Informational", "Navigational", "Transactional", "Commercial Investigation",
|
259 |
+
"Politics", "Business", "Sports", "Entertainment", "Health", "Science",
|
260 |
+
"Product Quality", "Delivery Experience", "Customer Service", "Pricing", "Return Policy",
|
261 |
+
"Education", "Finance", "Lifestyle", "Fashion", "Food", "Art", "History",
|
262 |
+
"Culture", "Environment", "Real Estate", "Automotive", "Travel", "Fitness", "Career"],
|
263 |
+
maxtags = -1)
|
264 |
+
|
265 |
+
try:
|
266 |
+
text_to_analyze = custom_input if custom_input else selected_text
|
267 |
+
st.subheader('Full example text')
|
268 |
+
HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
|
269 |
+
st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
|
270 |
+
except:
|
271 |
+
text_to_analyze = selected_text
|
272 |
+
|
273 |
+
# Initialize Spark and create pipeline
|
274 |
+
spark = init_spark()
|
275 |
+
|
276 |
+
if task == 'Zero-Shot Classification':
|
277 |
+
pipeline = create_pipeline(model, task, zeroShotLables)
|
278 |
+
else:
|
279 |
+
pipeline = create_pipeline(model, task)
|
280 |
+
|
281 |
+
try:
|
282 |
+
output = fit_data(pipeline, text_to_analyze, task, QUESTION, CONTEXT)
|
283 |
+
except:
|
284 |
+
output = fit_data(pipeline, text_to_analyze, task)
|
285 |
+
|
286 |
+
# Display matched sentence
|
287 |
+
st.subheader("Prediction:")
|
288 |
+
|
289 |
+
if task == 'Token Classification':
|
290 |
+
abbreviation_mapping = {'R': 'PER', 'G': 'ORG', 'C': 'LOC', 'SC': 'MISC'}
|
291 |
+
results = {
|
292 |
+
'Document': output[0]['document'][0].result,
|
293 |
+
'NER Chunk': [n.result for n in output[0]['ner_chunk']],
|
294 |
+
'NER Label': [abbreviation_mapping.get(n.metadata['entity'], 'UNKNOWN') for n in output[0]['ner_chunk']]
|
295 |
+
}
|
296 |
+
annotate(results)
|
297 |
+
df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
|
298 |
+
df.index += 1
|
299 |
+
st.dataframe(df)
|
300 |
+
|
301 |
+
elif task == 'Zero-Shot Classification':
|
302 |
+
st.markdown(f"Document Classified as: **{output[0]['class'][0].result}**")
|
303 |
+
|
304 |
+
elif task == 'Sequence Classification':
|
305 |
+
st.markdown(f"Classified as : **{output[0]['class'][0].result}**")
|
306 |
+
|
307 |
+
elif task == "Question Answering":
|
308 |
+
output_text = "".join(output[0][0])
|
309 |
+
st.markdown(f"Answer: **{output_text}**")
|
310 |
+
|
311 |
+
|
312 |
+
|
Demo.py
ADDED
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import sparknlp
|
3 |
+
import os
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
from sparknlp.base import *
|
7 |
+
from sparknlp.annotator import *
|
8 |
+
from pyspark.ml import Pipeline
|
9 |
+
from sparknlp.pretrained import PretrainedPipeline
|
10 |
+
from annotated_text import annotated_text
|
11 |
+
from streamlit_tags import st_tags
|
12 |
+
|
13 |
+
# Page configuration
|
14 |
+
st.set_page_config(
|
15 |
+
layout="wide",
|
16 |
+
initial_sidebar_state="auto"
|
17 |
+
)
|
18 |
+
|
19 |
+
# CSS for styling
|
20 |
+
st.markdown("""
|
21 |
+
<style>
|
22 |
+
.main-title {
|
23 |
+
font-size: 36px;
|
24 |
+
color: #4A90E2;
|
25 |
+
font-weight: bold;
|
26 |
+
text-align: center;
|
27 |
+
}
|
28 |
+
.section {
|
29 |
+
background-color: #f9f9f9;
|
30 |
+
padding: 10px;
|
31 |
+
border-radius: 10px;
|
32 |
+
margin-top: 10px;
|
33 |
+
}
|
34 |
+
.section p, .section ul {
|
35 |
+
color: #666666;
|
36 |
+
}
|
37 |
+
</style>
|
38 |
+
""", unsafe_allow_html=True)
|
39 |
+
|
40 |
+
@st.cache_resource
|
41 |
+
def init_spark():
|
42 |
+
return sparknlp.start()
|
43 |
+
|
44 |
+
@st.cache_resource
|
45 |
+
def create_pipeline(model, task, zeroShotLables=['']):
|
46 |
+
document_assembler = DocumentAssembler() \
|
47 |
+
.setInputCol('text') \
|
48 |
+
.setOutputCol('document')
|
49 |
+
|
50 |
+
sentence_detector = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "xx")\
|
51 |
+
.setInputCols(["document"])\
|
52 |
+
.setOutputCol("sentence")
|
53 |
+
|
54 |
+
tokenizer = Tokenizer() \
|
55 |
+
.setInputCols(['sentence']) \
|
56 |
+
.setOutputCol('token')
|
57 |
+
|
58 |
+
if task == "Token Classification":
|
59 |
+
TCclassifier = DeBertaForTokenClassification \
|
60 |
+
.pretrained(model, "en") \
|
61 |
+
.setInputCols(["sentence", "token"]) \
|
62 |
+
.setOutputCol("ner") \
|
63 |
+
.setCaseSensitive(True) \
|
64 |
+
.setMaxSentenceLength(512)
|
65 |
+
|
66 |
+
ner_converter = NerConverter() \
|
67 |
+
.setInputCols(['sentence', 'token', 'ner']) \
|
68 |
+
.setOutputCol('ner_chunk')
|
69 |
+
|
70 |
+
TCpipeline = Pipeline(stages=[document_assembler, sentence_detector, tokenizer, TCclassifier, ner_converter])
|
71 |
+
return TCpipeline
|
72 |
+
|
73 |
+
elif task == "Zero-Shot Classification":
|
74 |
+
ZSCtokenizer = Tokenizer() \
|
75 |
+
.setInputCols(['document']) \
|
76 |
+
.setOutputCol('token')
|
77 |
+
|
78 |
+
zeroShotClassifier = DeBertaForZeroShotClassification \
|
79 |
+
.pretrained('deberta_base_zero_shot_classifier_mnli_anli_v3', 'en') \
|
80 |
+
.setInputCols(['token', 'document']) \
|
81 |
+
.setOutputCol('class') \
|
82 |
+
.setCaseSensitive(False) \
|
83 |
+
.setMaxSentenceLength(512) \
|
84 |
+
.setCandidateLabels(zeroShotLables)
|
85 |
+
|
86 |
+
ZSCpipeline = Pipeline(stages=[document_assembler, ZSCtokenizer, zeroShotClassifier])
|
87 |
+
return ZSCpipeline
|
88 |
+
|
89 |
+
elif task == "Sequence Classification":
|
90 |
+
SCtokenizer = Tokenizer() \
|
91 |
+
.setInputCols(['document']) \
|
92 |
+
.setOutputCol('token')
|
93 |
+
|
94 |
+
sequence_classifier = DeBertaForSequenceClassification \
|
95 |
+
.pretrained("deberta_v3_base_sequence_classifier_imdb", "en") \
|
96 |
+
.setInputCols(["document", "token"]) \
|
97 |
+
.setOutputCol("class")
|
98 |
+
|
99 |
+
SCpipeline = Pipeline(stages=[document_assembler, SCtokenizer, sequence_classifier])
|
100 |
+
return SCpipeline
|
101 |
+
|
102 |
+
def fit_data(pipeline, data, task):
|
103 |
+
if task in ['Token Classification', 'Sequence Classification']:
|
104 |
+
empty_df = spark.createDataFrame([['']]).toDF('text')
|
105 |
+
pipeline_model = pipeline.fit(empty_df)
|
106 |
+
model = LightPipeline(pipeline_model)
|
107 |
+
result = model.fullAnnotate(data)
|
108 |
+
return result
|
109 |
+
|
110 |
+
def annotate(data):
|
111 |
+
document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
|
112 |
+
annotated_words = []
|
113 |
+
for chunk, label in zip(chunks, labels):
|
114 |
+
parts = document.split(chunk, 1)
|
115 |
+
if parts[0]:
|
116 |
+
annotated_words.append(parts[0])
|
117 |
+
annotated_words.append((chunk, label))
|
118 |
+
document = parts[1]
|
119 |
+
if document:
|
120 |
+
annotated_words.append(document)
|
121 |
+
annotated_text(*annotated_words)
|
122 |
+
|
123 |
+
tasks_models_descriptions = {
|
124 |
+
"Token Classification": {
|
125 |
+
"models": ["deberta_v3_small_token_classifier_conll03", "deberta_v3_large_token_classifier_conll03"],
|
126 |
+
"description": "The 'deberta_v3_small_token_classifier_conll03' model is adept at token classification tasks, including named entity recognition (NER). It identifies and categorizes tokens in text, such as names, dates, and locations, enhancing the extraction of meaningful information from unstructured data."
|
127 |
+
},
|
128 |
+
"Zero-Shot Classification": {
|
129 |
+
"models": ["deberta_base_zero_shot_classifier_mnli_anli_v3"],
|
130 |
+
"description": "The 'deberta_base_zero_shot_classifier_mnli_anli_v3' model provides flexible text classification without needing training data for specific categories. It is ideal for dynamic scenarios where text needs to be categorized into topics like urgent issues, technology, or sports without prior labeling."
|
131 |
+
},
|
132 |
+
"Sequence Classification": {
|
133 |
+
"models": ["deberta_v3_base_sequence_classifier_imdb"],
|
134 |
+
"description": "The 'deberta_v3_base_sequence_classifier_imdb' model is proficient in sequence classification tasks, such as sentiment analysis and document categorization. It effectively determines the sentiment of reviews, classifies text, and sorts documents based on their content and context."
|
135 |
+
}
|
136 |
+
}
|
137 |
+
|
138 |
+
# Sidebar content
|
139 |
+
task = st.sidebar.selectbox("Choose the task", list(tasks_models_descriptions.keys()))
|
140 |
+
model = st.sidebar.selectbox("Choose the pretrained model", tasks_models_descriptions[task]["models"], help="For more info about the models visit: https://sparknlp.org/models")
|
141 |
+
|
142 |
+
# Reference notebook link in sidebar
|
143 |
+
link = """
|
144 |
+
<a href="https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/357691d18373d6e8f13b5b1015137a398fd0a45f/Spark_NLP_Udemy_MOOC/Open_Source/17.01.Transformers-based_Embeddings.ipynb#L103">
|
145 |
+
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
|
146 |
+
</a>
|
147 |
+
"""
|
148 |
+
st.sidebar.markdown('Reference notebook:')
|
149 |
+
st.sidebar.markdown(link, unsafe_allow_html=True)
|
150 |
+
|
151 |
+
# Page content
|
152 |
+
title, sub_title = (f'DeBERTa for {task}', tasks_models_descriptions[task]["description"])
|
153 |
+
st.markdown(f'<div class="main-title">{title}</div>', unsafe_allow_html=True)
|
154 |
+
container = st.container(border=True)
|
155 |
+
container.write(sub_title)
|
156 |
+
|
157 |
+
# Load examples
|
158 |
+
examples_mapping = {
|
159 |
+
"Token Classification": [
|
160 |
+
"William Henry Gates III (born October 28, 1955) is an American business magnate, software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft, Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect, while also being the largest individual shareholder until May 2014. He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico; it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect. During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.[9] He gradually transferred his duties to Ray Ozzie and Craig Mundie. He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.",
|
161 |
+
"The Mona Lisa is a 16th century oil painting created by Leonardo. It's held at the Louvre in Paris.",
|
162 |
+
"When Sebastian Thrun started working on self-driving cars at Google in 2007, few people outside of the company took him seriously. “I can tell you very senior CEOs of major American car companies would shake my hand and turn away because I wasn’t worth talking to,” said Thrun, now the co-founder and CEO of online higher education startup Udacity, in an interview with Recode earlier this week.",
|
163 |
+
"Facebook is a social networking service launched as TheFacebook on February 4, 2004. It was founded by Mark Zuckerberg with his college roommates and fellow Harvard University students Eduardo Saverin, Andrew McCollum, Dustin Moskovitz and Chris Hughes. The website's membership was initially limited by the founders to Harvard students, but was expanded to other colleges in the Boston area, the Ivy League, and gradually most universities in the United States and Canada.",
|
164 |
+
"The history of natural language processing generally started in the 1950s, although work can be found from earlier periods. In 1950, Alan Turing published an article titled 'Computing Machinery and Intelligence' which proposed what is now called the Turing test as a criterion of intelligence",
|
165 |
+
"Geoffrey Everest Hinton is an English Canadian cognitive psychologist and computer scientist, most noted for his work on artificial neural networks. Since 2013 he divides his time working for Google and the University of Toronto. In 2017, he cofounded and became the Chief Scientific Advisor of the Vector Institute in Toronto.",
|
166 |
+
"When I told John that I wanted to move to Alaska, he warned me that I'd have trouble finding a Starbucks there.",
|
167 |
+
"Steven Paul Jobs was an American business magnate, industrial designer, investor, and media proprietor. He was the chairman, chief executive officer (CEO), and co-founder of Apple Inc., the chairman and majority shareholder of Pixar, a member of The Walt Disney Company's board of directors following its acquisition of Pixar, and the founder, chairman, and CEO of NeXT. Jobs is widely recognized as a pioneer of the personal computer revolution of the 1970s and 1980s, along with Apple co-founder Steve Wozniak. Jobs was born in San Francisco, California, and put up for adoption. He was raised in the San Francisco Bay Area. He attended Reed College in 1972 before dropping out that same year, and traveled through India in 1974 seeking enlightenment and studying Zen Buddhism.",
|
168 |
+
"Titanic is a 1997 American epic romance and disaster film directed, written, co-produced, and co-edited by James Cameron. Incorporating both historical and fictionalized aspects, it is based on accounts of the sinking of the RMS Titanic, and stars Leonardo DiCaprio and Kate Winslet as members of different social classes who fall in love aboard the ship during its ill-fated maiden voyage.",
|
169 |
+
"Other than being the king of the north, John Snow is a an english physician and a leader in the development of anaesthesia and medical hygiene. He is considered for being the first one using data to cure cholera outbreak in 1834."
|
170 |
+
],
|
171 |
+
"Zero-Shot Classification" : [
|
172 |
+
"In today’s world, staying updated with urgent information is crucial as events can unfold rapidly and require immediate attention.", # Urgent
|
173 |
+
"Mobile technology has become indispensable, allowing us to access news, updates, and connect with others no matter where we are.", # Mobile
|
174 |
+
"For those who love to travel, the convenience of mobile apps has transformed how we plan and experience trips, providing real-time updates on flights, accommodations, and local attractions.", # Travel
|
175 |
+
"The entertainment industry continually offers new movies that captivate audiences with their storytelling and visuals, providing a wide range of genres to suit every taste.", # Movie
|
176 |
+
"Music is an integral part of modern life, with streaming platforms making it easy to discover new artists and enjoy favorite tunes anytime, anywhere.", # Music
|
177 |
+
"Sports enthusiasts follow games and matches closely, with live updates and detailed statistics available at their fingertips, enhancing the excitement of every game.", # Sport
|
178 |
+
"Weather forecasts play a vital role in daily planning, offering accurate and timely information to help us prepare for various weather conditions and adjust our plans accordingly.", # Weather
|
179 |
+
"Technology continues to evolve rapidly, driving innovation across all sectors and improving our everyday lives through smarter devices, advanced software, and enhanced connectivity." # Technology
|
180 |
+
],
|
181 |
+
"Sequence Classification": [
|
182 |
+
"This movie was absolutely fantastic! The storyline was gripping, the characters were well-developed, and the cinematography was stunning. I was on the edge of my seat the entire time.",
|
183 |
+
"A heartwarming and beautiful film. The performances were top-notch, and the direction was flawless. This is easily one of the best movies I've seen this year.",
|
184 |
+
"What a delightful surprise! The humor was spot on, and the plot was refreshingly original. The cast did an amazing job bringing the characters to life. Highly recommended!",
|
185 |
+
"This was one of the worst movies I’ve ever seen. The plot was predictable, the acting was wooden, and the pacing was painfully slow. I couldn’t wait for it to end.",
|
186 |
+
"A complete waste of time. The movie lacked any real substance or direction, and the dialogue was cringe-worthy. I wouldn’t recommend this to anyone.",
|
187 |
+
"I had high hopes for this film, but it turned out to be a huge disappointment. The story was disjointed, and the special effects were laughably bad. Don’t bother watching this one.",
|
188 |
+
"The movie was okay, but nothing special. It had a few good moments, but overall, it felt pretty average. Not something I would watch again, but it wasn’t terrible either.",
|
189 |
+
"An average film with a decent plot. The acting was passable, but it didn't leave much of an impression on me. It's a movie you might watch once and forget about.",
|
190 |
+
"This movie was neither good nor bad, just kind of there. It had some interesting ideas, but they weren’t executed very well. It’s a film you could take or leave."
|
191 |
+
]
|
192 |
+
}
|
193 |
+
|
194 |
+
examples = examples_mapping[task]
|
195 |
+
selected_text = st.selectbox("Select an example", examples)
|
196 |
+
custom_input = st.text_input("Try it with your own Sentence!")
|
197 |
+
|
198 |
+
if task == 'Zero-Shot Classification':
|
199 |
+
zeroShotLables = ["urgent", "mobile", "travel", "movie", "music", "sport", "weather", "technology"]
|
200 |
+
lables = st_tags(
|
201 |
+
label='Select labels',
|
202 |
+
text='Press enter to add more',
|
203 |
+
value=zeroShotLables,
|
204 |
+
suggestions=[
|
205 |
+
"Positive", "Negative", "Neutral",
|
206 |
+
"Urgent", "Mobile", "Travel", "Movie", "Music", "Sport", "Weather", "Technology",
|
207 |
+
"Happiness", "Sadness", "Anger", "Fear", "Surprise", "Disgust",
|
208 |
+
"Informational", "Navigational", "Transactional", "Commercial Investigation",
|
209 |
+
"Politics", "Business", "Sports", "Entertainment", "Health", "Science",
|
210 |
+
"Product Quality", "Delivery Experience", "Customer Service", "Pricing", "Return Policy",
|
211 |
+
"Education", "Finance", "Lifestyle", "Fashion", "Food", "Art", "History",
|
212 |
+
"Culture", "Environment", "Real Estate", "Automotive", "Travel", "Fitness", "Career"],
|
213 |
+
maxtags = -1)
|
214 |
+
|
215 |
+
try:
|
216 |
+
text_to_analyze = custom_input if custom_input else selected_text
|
217 |
+
st.subheader('Full example text')
|
218 |
+
HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
|
219 |
+
st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
|
220 |
+
except:
|
221 |
+
text_to_analyze = selected_text
|
222 |
+
|
223 |
+
# Initialize Spark and create pipeline
|
224 |
+
spark = init_spark()
|
225 |
+
pipeline = create_pipeline(model, task)
|
226 |
+
output = fit_data(pipeline, text_to_analyze, task)
|
227 |
+
|
228 |
+
# Display matched sentence
|
229 |
+
st.subheader("Prediction:")
|
230 |
+
|
231 |
+
if task == 'Token Classification':
|
232 |
+
results = {
|
233 |
+
'Document': output[0]['document'][0].result,
|
234 |
+
'NER Chunk': [n.result for n in output[0]['ner_chunk']],
|
235 |
+
'NER Label': [n.metadata['entity'] for n in output[0]['ner_chunk']]
|
236 |
+
}
|
237 |
+
annotate(results)
|
238 |
+
df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
|
239 |
+
df.index += 1
|
240 |
+
st.dataframe(df)
|
241 |
+
|
242 |
+
elif task == 'Zero-Shot Classification':
|
243 |
+
st.markdown(f"Document Classified as: **{output[0]['class'][0].result}**")
|
244 |
+
|
245 |
+
elif task == 'Sequence Classification':
|
246 |
+
st.markdown(f"Classified as : **{output[0]['class'][0].result}**")
|
247 |
+
|
248 |
+
|
Dockerfile
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Download base image ubuntu 18.04
|
2 |
+
FROM ubuntu:18.04
|
3 |
+
|
4 |
+
# Set environment variables
|
5 |
+
ENV NB_USER jovyan
|
6 |
+
ENV NB_UID 1000
|
7 |
+
ENV HOME /home/${NB_USER}
|
8 |
+
ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
|
9 |
+
|
10 |
+
# Install required packages
|
11 |
+
RUN apt-get update && apt-get install -y \
|
12 |
+
tar \
|
13 |
+
wget \
|
14 |
+
bash \
|
15 |
+
rsync \
|
16 |
+
gcc \
|
17 |
+
libfreetype6-dev \
|
18 |
+
libhdf5-serial-dev \
|
19 |
+
libpng-dev \
|
20 |
+
libzmq3-dev \
|
21 |
+
python3 \
|
22 |
+
python3-dev \
|
23 |
+
python3-pip \
|
24 |
+
unzip \
|
25 |
+
pkg-config \
|
26 |
+
software-properties-common \
|
27 |
+
graphviz \
|
28 |
+
openjdk-8-jdk \
|
29 |
+
ant \
|
30 |
+
ca-certificates-java \
|
31 |
+
&& apt-get clean \
|
32 |
+
&& update-ca-certificates -f
|
33 |
+
|
34 |
+
# Install Python 3.8 and pip
|
35 |
+
RUN add-apt-repository ppa:deadsnakes/ppa \
|
36 |
+
&& apt-get update \
|
37 |
+
&& apt-get install -y python3.8 python3-pip \
|
38 |
+
&& apt-get clean
|
39 |
+
|
40 |
+
# Set up JAVA_HOME
|
41 |
+
RUN echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> /etc/profile \
|
42 |
+
&& echo "export PATH=\$JAVA_HOME/bin:\$PATH" >> /etc/profile
|
43 |
+
# Create a new user named "jovyan" with user ID 1000
|
44 |
+
RUN useradd -m -u ${NB_UID} ${NB_USER}
|
45 |
+
|
46 |
+
# Switch to the "jovyan" user
|
47 |
+
USER ${NB_USER}
|
48 |
+
|
49 |
+
# Set home and path variables for the user
|
50 |
+
ENV HOME=/home/${NB_USER} \
|
51 |
+
PATH=/home/${NB_USER}/.local/bin:$PATH
|
52 |
+
|
53 |
+
# Set up PySpark to use Python 3.8 for both driver and workers
|
54 |
+
ENV PYSPARK_PYTHON=/usr/bin/python3.8
|
55 |
+
ENV PYSPARK_DRIVER_PYTHON=/usr/bin/python3.8
|
56 |
+
|
57 |
+
# Set the working directory to the user's home directory
|
58 |
+
WORKDIR ${HOME}
|
59 |
+
|
60 |
+
# Upgrade pip and install Python dependencies
|
61 |
+
RUN python3.8 -m pip install --upgrade pip
|
62 |
+
COPY requirements.txt /tmp/requirements.txt
|
63 |
+
RUN python3.8 -m pip install -r /tmp/requirements.txt
|
64 |
+
|
65 |
+
# Copy the application code into the container at /home/jovyan
|
66 |
+
COPY --chown=${NB_USER}:${NB_USER} . ${HOME}
|
67 |
+
|
68 |
+
# Expose port for Streamlit
|
69 |
+
EXPOSE 7860
|
70 |
+
|
71 |
+
# Define the entry point for the container
|
72 |
+
ENTRYPOINT ["streamlit", "run", "Demo.py", "--server.port=7860", "--server.address=0.0.0.0"]
|
pages/Workflow & Model Overview.py
ADDED
@@ -0,0 +1,479 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
# Page configuration
|
4 |
+
st.set_page_config(
|
5 |
+
layout="wide",
|
6 |
+
initial_sidebar_state="auto"
|
7 |
+
)
|
8 |
+
|
9 |
+
# Custom CSS for better styling
|
10 |
+
st.markdown("""
|
11 |
+
<style>
|
12 |
+
.main-title {
|
13 |
+
font-size: 36px;
|
14 |
+
color: #4A90E2;
|
15 |
+
font-weight: bold;
|
16 |
+
text-align: center;
|
17 |
+
}
|
18 |
+
.sub-title {
|
19 |
+
font-size: 24px;
|
20 |
+
color: #4A90E2;
|
21 |
+
margin-top: 20px;
|
22 |
+
}
|
23 |
+
.section {
|
24 |
+
background-color: #f9f9f9;
|
25 |
+
padding: 15px;
|
26 |
+
border-radius: 10px;
|
27 |
+
margin-top: 20px;
|
28 |
+
}
|
29 |
+
.section h2 {
|
30 |
+
font-size: 22px;
|
31 |
+
color: #4A90E2;
|
32 |
+
}
|
33 |
+
.section p, .section ul {
|
34 |
+
color: #666666;
|
35 |
+
}
|
36 |
+
.link {
|
37 |
+
color: #4A90E2;
|
38 |
+
text-decoration: none;
|
39 |
+
}
|
40 |
+
.benchmark-table {
|
41 |
+
width: 100%;
|
42 |
+
border-collapse: collapse;
|
43 |
+
margin-top: 20px;
|
44 |
+
}
|
45 |
+
.benchmark-table th, .benchmark-table td {
|
46 |
+
border: 1px solid #ddd;
|
47 |
+
padding: 8px;
|
48 |
+
text-align: left;
|
49 |
+
}
|
50 |
+
.benchmark-table th {
|
51 |
+
background-color: #4A90E2;
|
52 |
+
color: white;
|
53 |
+
}
|
54 |
+
.benchmark-table td {
|
55 |
+
background-color: #f2f2f2;
|
56 |
+
}
|
57 |
+
</style>
|
58 |
+
""", unsafe_allow_html=True)
|
59 |
+
|
60 |
+
# Title
|
61 |
+
st.markdown('<div class="main-title">Introduction to DeBERTa Annotators in Spark NLP</div>', unsafe_allow_html=True)
|
62 |
+
|
63 |
+
# Subtitle
|
64 |
+
st.markdown("""
|
65 |
+
<div class="section">
|
66 |
+
<p>DeBERTa (Decoding-enhanced BERT with Disentangled Attention) is an advanced language model that builds upon BERT and RoBERTa, incorporating novel techniques such as disentangled attention and enhanced mask decoding. DeBERTa models excel in various NLP tasks, including text classification, token classification, masked language modeling, and question answering. Below, we provide an overview of the DeBERTa annotators for these tasks:</p>
|
67 |
+
</div>
|
68 |
+
""", unsafe_allow_html=True)
|
69 |
+
|
70 |
+
# Tabs for DeBERTa Annotators
|
71 |
+
tab1, tab2, tab3, tab4 = st.tabs(["DeBERTa for Token Classification", "DeBERTa for Sequence Classification", "DeBERTa for Zero Shot Classification", "DeBERTa for Question Answering"])
|
72 |
+
|
73 |
+
# Tab 1: DeBERTa for Token Classification
|
74 |
+
with tab1:
|
75 |
+
st.markdown("""
|
76 |
+
<div class="section">
|
77 |
+
<h2>Token Classification with Spark NLP</h2>
|
78 |
+
<p>The <strong>Token Classification</strong> task is a core component of Natural Language Processing (NLP), focusing on classifying tokens (words or subwords) in a text into predefined categories. This task is fundamental for various applications, such as Named Entity Recognition (NER), Part-of-Speech (POS) tagging, and more.</p>
|
79 |
+
<p>Spark NLP offers a robust suite of tools for token classification, leveraging state-of-the-art models like BERT, RoBERTa, and DeBERTa. These models have been fine-tuned on diverse datasets and are readily available in Spark NLP to cater to a wide range of token classification tasks.</p>
|
80 |
+
<p>Token classification with Spark NLP enables:</p>
|
81 |
+
<ul>
|
82 |
+
<li><strong>Named Entity Recognition (NER):</strong> Recognizing and categorizing entities such as locations (LOC), organizations (ORG), persons (PER), and more.</li>
|
83 |
+
<li><strong>Information Extraction:</strong> Extracting structured data from unstructured text for deeper analysis and processing.</li>
|
84 |
+
<li><strong>Text Categorization:</strong> Enhancing document retrieval, classification, and organization based on identified entities.</li>
|
85 |
+
</ul>
|
86 |
+
<p>Using Spark NLP for token classification tasks offers several advantages:</p>
|
87 |
+
<ul>
|
88 |
+
<li><strong>Scalability:</strong> Spark NLP is designed to scale seamlessly with Apache Spark, making it suitable for processing large volumes of text data efficiently.</li>
|
89 |
+
<li><strong>Flexibility:</strong> A wide array of pre-trained models are available, allowing you to select the model that best fits your specific task, whether it’s for NER, POS tagging, or another classification task.</li>
|
90 |
+
<li><strong>Ease of Use:</strong> Spark NLP integrates smoothly into your existing Spark pipeline, allowing for quick and easy implementation.</li>
|
91 |
+
<li><strong>Customization:</strong> Models can be fine-tuned or adapted to new domains, giving you the flexibility to tailor the solution to your specific needs.</li>
|
92 |
+
</ul>
|
93 |
+
</div>
|
94 |
+
""", unsafe_allow_html=True)
|
95 |
+
|
96 |
+
# General Information about Using Token Classification Models
|
97 |
+
st.markdown('<div class="sub-title">How to Use Token Classification Models in Spark NLP</div>', unsafe_allow_html=True)
|
98 |
+
st.markdown("""
|
99 |
+
<div class="section">
|
100 |
+
<p>To perform token classification in Spark NLP, one powerful model you can use is DeBERTa, which stands for <strong>Decoding-enhanced BERT with Disentangled Attention</strong>. DeBERTa improves upon earlier models like BERT and RoBERTa, offering superior performance in tasks such as Named Entity Recognition (NER). Below is a template for setting up a token classification pipeline in Spark NLP using DeBERTa. This approach is flexible, allowing you to adjust the pipeline and parameters to meet your specific needs while leveraging DeBERTa's advanced capabilities.</p>
|
101 |
+
</div>
|
102 |
+
""", unsafe_allow_html=True)
|
103 |
+
|
104 |
+
st.code('''
|
105 |
+
from sparknlp.base import *
|
106 |
+
from sparknlp.annotator import *
|
107 |
+
from pyspark.ml import Pipeline
|
108 |
+
from pyspark.sql.functions import col, expr
|
109 |
+
|
110 |
+
document_assembler = DocumentAssembler() \\
|
111 |
+
.setInputCol("text") \\
|
112 |
+
.setOutputCol("document")
|
113 |
+
|
114 |
+
tokenizer = Tokenizer() \\
|
115 |
+
.setInputCols(["document"]) \\
|
116 |
+
.setOutputCol("token")
|
117 |
+
|
118 |
+
# Example of loading a token classification model (e.g., BERT, RoBERTa, DeBERTa)
|
119 |
+
tokenClassifier = DeBertaForTokenClassification \\
|
120 |
+
.pretrained("deberta_v3_small_token_classifier_conll03", "en") \\
|
121 |
+
.setInputCols(["document", "token"]) \\
|
122 |
+
.setOutputCol("ner") \\
|
123 |
+
.setCaseSensitive(True) \\
|
124 |
+
.setMaxSentenceLength(512)
|
125 |
+
|
126 |
+
ner_converter = NerConverter() \\
|
127 |
+
.setInputCols(['document', 'token', 'ner']) \\
|
128 |
+
.setOutputCol('entities')
|
129 |
+
|
130 |
+
pipeline = Pipeline(stages=[
|
131 |
+
document_assembler,
|
132 |
+
tokenizer,
|
133 |
+
tokenClassifier,
|
134 |
+
ner_converter
|
135 |
+
])
|
136 |
+
|
137 |
+
data = spark.createDataFrame([["Spark NLP is an exceptional library for NLP tasks."]]).toDF("text")
|
138 |
+
result = pipeline.fit(data).transform(data)
|
139 |
+
|
140 |
+
result.selectExpr("explode(entities) as ner_chunk").select(
|
141 |
+
col("ner_chunk.result").alias("chunk"),
|
142 |
+
col("ner_chunk.metadata.entity").alias("ner_label")
|
143 |
+
).show(truncate=False)
|
144 |
+
''', language='python')
|
145 |
+
|
146 |
+
# Results Example
|
147 |
+
st.text("""
|
148 |
+
+--------------------------+---------+
|
149 |
+
|chunk |ner_label|
|
150 |
+
+--------------------------+---------+
|
151 |
+
|Spark NLP |ORG |
|
152 |
+
+--------------------------+---------+
|
153 |
+
""")
|
154 |
+
|
155 |
+
# Model Info Section
|
156 |
+
st.markdown('<div class="sub-title">Choosing the Right Model</div>', unsafe_allow_html=True)
|
157 |
+
st.markdown("""
|
158 |
+
<div class="section">
|
159 |
+
<p>Spark NLP offers a variety of pre-trained models for token classification tasks, including BERT, RoBERTa, DeBERTa, and more. The choice of model can significantly impact the accuracy and performance of your task.</p>
|
160 |
+
<p>To explore and choose the most suitable model for your specific needs, visit the <a class="link" href="https://sparknlp.org/models" target="_blank">Spark NLP Models Hub</a>. Here, you can find detailed information about each model, including its size, compatibility, and the specific tasks it excels at.</p>
|
161 |
+
</div>
|
162 |
+
""", unsafe_allow_html=True)
|
163 |
+
|
164 |
+
st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
|
165 |
+
st.markdown("""
|
166 |
+
<div class="section">
|
167 |
+
<ul>
|
168 |
+
<li><a class="link" href="https://sparknlp.org/docs/en/transformers#debertafortokenclassification" target="_blank">Transformers in Spark NLP</a></li>
|
169 |
+
</ul>
|
170 |
+
</div>
|
171 |
+
""", unsafe_allow_html=True)
|
172 |
+
# Tab 2: DeBERTa for Sequence Classification
|
173 |
+
with tab2:
|
174 |
+
st.markdown("""
|
175 |
+
<div class="section">
|
176 |
+
<h2>Sequence Classification with Spark NLP</h2>
|
177 |
+
<p><strong>Sequence Classification</strong> is a critical task in Natural Language Processing (NLP) where entire sequences of text (such as sentences or paragraphs) are classified into predefined categories. This task is essential for applications like sentiment analysis, document classification, and more.</p>
|
178 |
+
<p>Spark NLP offers robust tools for sequence classification, utilizing advanced models such as BERT, RoBERTa, and DeBERTa. These models are pre-trained on diverse datasets and are readily available within Spark NLP, enabling you to address a wide range of sequence classification challenges.</p>
|
179 |
+
<p>Sequence classification with Spark NLP supports:</p>
|
180 |
+
<ul>
|
181 |
+
<li><strong>Sentiment Analysis:</strong> Determining the sentiment expressed in a sequence, such as positive, negative, or neutral.</li>
|
182 |
+
<li><strong>Document Classification:</strong> Categorizing documents into various classes based on their content.</li>
|
183 |
+
<li><strong>Intent Detection:</strong> Identifying the underlying intent behind a sequence, often used in chatbot and virtual assistant applications.</li>
|
184 |
+
</ul>
|
185 |
+
<p>Leveraging Spark NLP for sequence classification offers several advantages:</p>
|
186 |
+
<ul>
|
187 |
+
<li><strong>Scalability:</strong> Spark NLP scales effortlessly with Apache Spark, making it well-suited for processing large-scale text data.</li>
|
188 |
+
<li><strong>Flexibility:</strong> A broad selection of pre-trained models is available, allowing you to choose the most appropriate model for your specific task.</li>
|
189 |
+
<li><strong>Ease of Integration:</strong> Spark NLP integrates smoothly into existing Spark pipelines, facilitating quick and efficient implementation.</li>
|
190 |
+
<li><strong>Customizability:</strong> Models can be fine-tuned or adapted to different domains, providing tailored solutions for specific needs.</li>
|
191 |
+
</ul>
|
192 |
+
</div>
|
193 |
+
""", unsafe_allow_html=True)
|
194 |
+
|
195 |
+
# General Information about Using Sequence Classification Models
|
196 |
+
st.markdown('<div class="sub-title">How to Use Sequence Classification Models in Spark NLP</div>', unsafe_allow_html=True)
|
197 |
+
st.markdown("""
|
198 |
+
<div class="section">
|
199 |
+
<p>For sequence classification in Spark NLP, one powerful model you can use is DeBERTa, which stands for <strong>Decoding-enhanced BERT with Disentangled Attention</strong>. DeBERTa offers enhanced performance compared to earlier models like BERT and RoBERTa, especially for tasks such as sentiment analysis. Below is a template for setting up a sequence classification pipeline in Spark NLP using DeBERTa. This approach is adaptable, allowing you to adjust the pipeline and parameters to suit your specific requirements while utilizing DeBERTa's advanced features.</p>
|
200 |
+
</div>
|
201 |
+
""", unsafe_allow_html=True)
|
202 |
+
|
203 |
+
st.code('''
|
204 |
+
from sparknlp.base import *
|
205 |
+
from sparknlp.annotator import *
|
206 |
+
from pyspark.ml import Pipeline
|
207 |
+
from pyspark.sql.functions import col, expr
|
208 |
+
|
209 |
+
document_assembler = DocumentAssembler() \\
|
210 |
+
.setInputCol("text") \\
|
211 |
+
.setOutputCol("document")
|
212 |
+
|
213 |
+
tokenizer = Tokenizer() \\
|
214 |
+
.setInputCols(['document']) \\
|
215 |
+
.setOutputCol('token')
|
216 |
+
|
217 |
+
# Example of loading a sequence classification model using DeBERTa
|
218 |
+
sequenceClassifier = DeBertaForSequenceClassification \\
|
219 |
+
.pretrained("deberta_v3_base_sequence_classifier_imdb", "en") \\
|
220 |
+
.setInputCols(["document", "token"]) \\
|
221 |
+
.setOutputCol("class") \\
|
222 |
+
.setCaseSensitive(True) \\
|
223 |
+
.setMaxSentenceLength(512)
|
224 |
+
|
225 |
+
pipeline = Pipeline(stages=[
|
226 |
+
document_assembler,
|
227 |
+
tokenizer,
|
228 |
+
sequenceClassifier
|
229 |
+
])
|
230 |
+
|
231 |
+
example = spark.createDataFrame([['I really liked that movie!']]).toDF("text")
|
232 |
+
result = pipeline.fit(example).transform(example)
|
233 |
+
|
234 |
+
result.select("text", "class.result").show(truncate=False)
|
235 |
+
''', language='python')
|
236 |
+
|
237 |
+
# Results Example
|
238 |
+
st.text("""
|
239 |
+
+------------------------------+---------+
|
240 |
+
|text |class |
|
241 |
+
+------------------------------+---------+
|
242 |
+
|I really liked that movie! |positive |
|
243 |
+
+------------------------------+---------+
|
244 |
+
""")
|
245 |
+
|
246 |
+
# Model Info Section
|
247 |
+
st.markdown('<div class="sub-title">Choosing the Right Model</div>', unsafe_allow_html=True)
|
248 |
+
st.markdown("""
|
249 |
+
<div class="section">
|
250 |
+
<p>Spark NLP provides a diverse range of pre-trained models for sequence classification tasks, including BERT, RoBERTa, DeBERTa, and more. The model you choose can greatly impact the accuracy and performance of your task.</p>
|
251 |
+
<p>To explore and select the model that best fits your specific needs, visit the <a class="link" href="https://sparknlp.org/models" target="_blank">Spark NLP Models Hub</a>. This resource offers detailed information about each model, including its size, compatibility, and the tasks it excels at.</p>
|
252 |
+
</div>
|
253 |
+
""", unsafe_allow_html=True)
|
254 |
+
|
255 |
+
# References Section
|
256 |
+
st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
|
257 |
+
st.markdown("""
|
258 |
+
<div class="section">
|
259 |
+
<ul>
|
260 |
+
<li><a class="link" href="https://sparknlp.org/docs/en/transformers#debertaforsequenceclassification" target="_blank">Spark NLP Annotators Documentation</a></li>
|
261 |
+
<li><a class="link" href="https://arxiv.org/abs/2006.03654" target="_blank">DeBERTa: Decoding-enhanced BERT with Disentangled Attention (Research Paper)</a></li>
|
262 |
+
<li><a class="link" href="https://www.microsoft.com/en-us/research/project/deberta/" target="_blank">Microsoft Research: DeBERTa</a></li>
|
263 |
+
<li><a class="link" href="https://github.com/microsoft/DeBERTa" target="_blank">DeBERTa GitHub Repository</a></li>
|
264 |
+
<li><a class="link" href="https://paperswithcode.com/task/sequence-classification" target="_blank">Sequence Classification: Research Papers with Code</a></li>
|
265 |
+
</ul>
|
266 |
+
</div>
|
267 |
+
""", unsafe_allow_html=True)
|
268 |
+
|
269 |
+
# Tab 3: DeBERTa for Zero Shot Classification
|
270 |
+
with tab3:
|
271 |
+
st.markdown("""
|
272 |
+
<div class="section">
|
273 |
+
<h2>Zero-Shot Classification with Spark NLP</h2>
|
274 |
+
<p><strong>Zero-Shot Classification</strong> is a technique in Natural Language Processing (NLP) that allows a model to classify text into categories that it has not been explicitly trained on. This approach is particularly useful when you have new, unseen classes or labels that were not part of the training data.</p>
|
275 |
+
<p>Spark NLP provides powerful tools for zero-shot classification, leveraging models like DeBERTa. These models are trained to handle a wide range of classification tasks without requiring retraining on specific categories. This enables flexibility and adaptability for various classification needs.</p>
|
276 |
+
<p>Zero-shot classification with Spark NLP facilitates:</p>
|
277 |
+
<ul>
|
278 |
+
<li><strong>Dynamic Categorization:</strong> Classify text into new categories without additional training.</li>
|
279 |
+
<li><strong>Adaptability:</strong> Easily adapt to evolving classification needs and emerging topics.</li>
|
280 |
+
<li><strong>Cost Efficiency:</strong> Reduce the need for extensive retraining and model updates for new classification tasks.</li>
|
281 |
+
</ul>
|
282 |
+
<p>Using Spark NLP for zero-shot classification offers several benefits:</p>
|
283 |
+
<ul>
|
284 |
+
<li><strong>Scalability:</strong> Spark NLP integrates with Apache Spark, making it capable of handling large-scale text data efficiently.</li>
|
285 |
+
<li><strong>Flexibility:</strong> The zero-shot classification models can be used for various tasks without the need for task-specific retraining.</li>
|
286 |
+
<li><strong>Ease of Implementation:</strong> Seamlessly integrate zero-shot classification into existing Spark pipelines for efficient processing.</li>
|
287 |
+
<li><strong>Customizable:</strong> Define custom candidate labels and adapt the model to different classification needs.</li>
|
288 |
+
</ul>
|
289 |
+
</div>
|
290 |
+
""", unsafe_allow_html=True)
|
291 |
+
|
292 |
+
# General Information about Using Zero-Shot Classification Models
|
293 |
+
st.markdown('<div class="sub-title">How to Use Zero-Shot Classification Models in Spark NLP</div>', unsafe_allow_html=True)
|
294 |
+
st.markdown("""
|
295 |
+
<div class="section">
|
296 |
+
<p>For zero-shot classification in Spark NLP, one powerful model you can use is DeBERTa, which stands for <strong>Decoding-enhanced BERT with Disentangled Attention</strong>. DeBERTa's zero-shot classification capabilities enable it to classify text into categories without additional training on those specific categories. Below is a template for setting up a zero-shot classification pipeline in Spark NLP using DeBERTa. This approach is flexible, allowing you to adjust the pipeline and parameters to fit your specific needs while leveraging DeBERTa's advanced features.</p>
|
297 |
+
</div>
|
298 |
+
""", unsafe_allow_html=True)
|
299 |
+
|
300 |
+
st.code('''
|
301 |
+
from sparknlp.base import *
|
302 |
+
from sparknlp.annotator import *
|
303 |
+
from pyspark.ml import Pipeline
|
304 |
+
from pyspark.sql.functions import col, expr
|
305 |
+
|
306 |
+
document_assembler = DocumentAssembler() \\
|
307 |
+
.setInputCol('text') \\
|
308 |
+
.setOutputCol('document')
|
309 |
+
|
310 |
+
tokenizer = Tokenizer() \\
|
311 |
+
.setInputCols(['document']) \\
|
312 |
+
.setOutputCol('token')
|
313 |
+
|
314 |
+
# Example of loading a zero-shot classification model using DeBERTa
|
315 |
+
zeroShotClassifier = DeBertaForZeroShotClassification \\
|
316 |
+
.pretrained('deberta_base_zero_shot_classifier_mnli_anli_v3', 'en') \\
|
317 |
+
.setInputCols(['token', 'document']) \\
|
318 |
+
.setOutputCol('class') \\
|
319 |
+
.setCaseSensitive(True) \\
|
320 |
+
.setMaxSentenceLength(512) \\
|
321 |
+
.setCandidateLabels(["urgent", "mobile", "travel", "movie", "music", "sport", "weather", "technology"])
|
322 |
+
|
323 |
+
pipeline = Pipeline(stages=[
|
324 |
+
document_assembler,
|
325 |
+
tokenizer,
|
326 |
+
zeroShotClassifier
|
327 |
+
])
|
328 |
+
|
329 |
+
example = spark.createDataFrame([['I have a problem with my iphone that needs to be resolved asap!!']]).toDF("text")
|
330 |
+
result = pipeline.fit(example).transform(example)
|
331 |
+
|
332 |
+
result.select("text", "class.result").show(truncate=False)
|
333 |
+
''', language='python')
|
334 |
+
|
335 |
+
# Results Example
|
336 |
+
st.text("""
|
337 |
+
+------------------------------------------------------------+-------------+
|
338 |
+
|text |class |
|
339 |
+
+------------------------------------------------------------+-------------+
|
340 |
+
|I have a problem with my iphone that needs to be resolved asap!!|mobile |
|
341 |
+
+------------------------------------------------------------+-------------+
|
342 |
+
""")
|
343 |
+
|
344 |
+
# Model Info Section
|
345 |
+
st.markdown('<div class="sub-title">Choosing the Right Model</div>', unsafe_allow_html=True)
|
346 |
+
st.markdown("""
|
347 |
+
<div class="section">
|
348 |
+
<p>Spark NLP offers a variety of pre-trained models for zero-shot classification, including BERT, RoBERTa, and DeBERTa. These models are capable of handling a wide range of classification tasks without requiring additional training on specific categories.</p>
|
349 |
+
<p>To explore and select the most suitable model for your needs, visit the <a class="link" href="https://sparknlp.org/models" target="_blank">Spark NLP Models Hub</a>. Here, you can find detailed information about each model, including its size, compatibility, and the specific tasks it excels at.</p>
|
350 |
+
</div>
|
351 |
+
""", unsafe_allow_html=True)
|
352 |
+
|
353 |
+
# References Section
|
354 |
+
st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
|
355 |
+
st.markdown("""
|
356 |
+
<div class="section">
|
357 |
+
<ul>
|
358 |
+
<li><a class="link" href="https://sparknlp.org/docs/en/transformers#distilbertforzeroshotclassification" target="_blank">Spark NLP DistilBertForZeroShotClassification Annotators Documentation</a></li>
|
359 |
+
<li><a class="link" href="https://arxiv.org/abs/2006.11477" target="_blank">Enabling Zero-Shot Classification Using Pre-trained Transformers (Research Paper)</a></li>
|
360 |
+
<li><a class="link" href="https://paperswithcode.com/task/zero-shot-classification" target="_blank">Zero-Shot Classification: Research Papers with Code</a></li>
|
361 |
+
</ul>
|
362 |
+
</div>
|
363 |
+
""", unsafe_allow_html=True)
|
364 |
+
|
365 |
+
# Tab 4: DeBERTa for Question Answering
|
366 |
+
with tab4:
|
367 |
+
st.markdown("""
|
368 |
+
<div class="section">
|
369 |
+
<h2>Question Answering with Spark NLP</h2>
|
370 |
+
<p><strong>Question Answering (QA)</strong> is a fundamental NLP task that involves building models capable of understanding and responding to questions based on a given context. This task is essential for applications such as chatbots, virtual assistants, and information retrieval systems.</p>
|
371 |
+
<p>Spark NLP provides robust tools for question answering, leveraging advanced models like DeBERTa. These models are trained to accurately identify and extract answers from a provided context, enhancing the effectiveness of QA systems.</p>
|
372 |
+
<p>Question answering with Spark NLP enables:</p>
|
373 |
+
<ul>
|
374 |
+
<li><strong>Automated Information Retrieval:</strong> Extracting relevant information from text to answer user queries.</li>
|
375 |
+
<li><strong>Interactive Systems:</strong> Enhancing chatbots and virtual assistants to provide accurate responses to user questions.</li>
|
376 |
+
<li><strong>Knowledge Extraction:</strong> Improving the ability to understand and leverage contextual information for various applications.</li>
|
377 |
+
</ul>
|
378 |
+
<p>Using Spark NLP for question answering offers several advantages:</p>
|
379 |
+
<ul>
|
380 |
+
<li><strong>Scalability:</strong> Spark NLP integrates with Apache Spark, making it suitable for handling large-scale QA tasks efficiently.</li>
|
381 |
+
<li><strong>Flexibility:</strong> The pre-trained models can be easily adapted to various QA scenarios and domains.</li>
|
382 |
+
<li><strong>Ease of Integration:</strong> Seamlessly integrate QA models into existing pipelines for efficient question answering.</li>
|
383 |
+
<li><strong>Customization:</strong> Fine-tune or adapt models to specific contexts or industries to improve performance.</li>
|
384 |
+
</ul>
|
385 |
+
</div>
|
386 |
+
""", unsafe_allow_html=True)
|
387 |
+
|
388 |
+
# General Information about Using Question Answering Models
|
389 |
+
st.markdown('<div class="sub-title">How to Use Question Answering Models in Spark NLP</div>', unsafe_allow_html=True)
|
390 |
+
st.markdown("""
|
391 |
+
<div class="section">
|
392 |
+
<p>For question answering in Spark NLP, you can utilize DeBERTa, which stands for <strong>Decoding-enhanced BERT with Disentangled Attention</strong>. The DeBERTa model for question answering is designed to extract precise answers from a given context in response to user queries. Below is a template for setting up a question answering pipeline in Spark NLP using DeBERTa. This approach allows you to effectively manage and process question-answering tasks.</p>
|
393 |
+
</div>
|
394 |
+
""", unsafe_allow_html=True)
|
395 |
+
|
396 |
+
st.code('''
|
397 |
+
from sparknlp.base import *
|
398 |
+
from sparknlp.annotator import *
|
399 |
+
from pyspark.ml import Pipeline
|
400 |
+
from pyspark.sql.functions import col, expr
|
401 |
+
|
402 |
+
documentAssembler = MultiDocumentAssembler() \\
|
403 |
+
.setInputCols(["question", "context"]) \\
|
404 |
+
.setOutputCols(["document_question", "document_context"])
|
405 |
+
|
406 |
+
spanClassifier = DebertaForQuestionAnswering \\
|
407 |
+
.pretrained("deberta_v3_xsmall_qa_squad2", "en") \\
|
408 |
+
.setInputCols(["document_question", "document_context"]) \\
|
409 |
+
.setOutputCol("answer") \\
|
410 |
+
.setCaseSensitive(True)
|
411 |
+
|
412 |
+
pipeline = Pipeline(stages=[
|
413 |
+
documentAssembler,
|
414 |
+
spanClassifier
|
415 |
+
])
|
416 |
+
|
417 |
+
data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
|
418 |
+
result = pipeline.fit(data).transform(data)
|
419 |
+
|
420 |
+
result.select("question", "context", "answer.result").show(truncate=False)
|
421 |
+
''', language='python')
|
422 |
+
|
423 |
+
# Results Example
|
424 |
+
st.text("""
|
425 |
+
+------------------------------+--------------------------------------------+------------------+
|
426 |
+
|question |context |answer |
|
427 |
+
+------------------------------+--------------------------------------------+------------------+
|
428 |
+
|What is my name? |My name is Clara and I live in Berkeley. |Clara |
|
429 |
+
+------------------------------+--------------------------------------------+------------------+
|
430 |
+
""")
|
431 |
+
|
432 |
+
# Model Info Section
|
433 |
+
st.markdown('<div class="sub-title">Choosing the Right Model</div>', unsafe_allow_html=True)
|
434 |
+
st.markdown("""
|
435 |
+
<div class="section">
|
436 |
+
<p>Spark NLP offers a range of pre-trained models for question answering tasks, including DeBERTa and other advanced transformers. Selecting the right model can significantly impact the quality of your QA system.</p>
|
437 |
+
<p>To explore and select the most appropriate model for your QA needs, visit the <a class="link" href="https://sparknlp.org/models" target="_blank">Spark NLP Models Hub</a>. Here, you can find detailed information about each model, including its capabilities and performance.</p>
|
438 |
+
</div>
|
439 |
+
""", unsafe_allow_html=True)
|
440 |
+
|
441 |
+
# References Section
|
442 |
+
st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
|
443 |
+
st.markdown("""
|
444 |
+
<div class="section">
|
445 |
+
<ul>
|
446 |
+
<li><a class="link" href="https://sparknlp.org/docs/en/transformers#debertaforsequenceclassification" target="_blank">Transformers in Spark NLP</a></li>
|
447 |
+
<li><a class="link" href="https://arxiv.org/abs/2005.00979" target="_blank">SQuAD2.0: The Stanford Question Answering Dataset (Research Paper)</a></li>
|
448 |
+
<li><a class="link" href="https://paperswithcode.com/task/question-answering" target="_blank">Question Answering: Research Papers with Code</a></li>
|
449 |
+
</ul>
|
450 |
+
</div>
|
451 |
+
""", unsafe_allow_html=True)
|
452 |
+
|
453 |
+
st.markdown('<div class="sub-title">Community & Support</div>', unsafe_allow_html=True)
|
454 |
+
|
455 |
+
# Footer
|
456 |
+
st.markdown("""
|
457 |
+
<div class="section">
|
458 |
+
<ul>
|
459 |
+
<li><a class="link" href="https://sparknlp.org/" target="_blank">Official Website</a>: Documentation and examples</li>
|
460 |
+
<li><a class="link" href="https://join.slack.com/t/spark-nlp/shared_invite/zt-198dipu77-L3UWNe_AJ8xqDk0ivmih5Q" target="_blank">Slack</a>: Live discussion with the community and team</li>
|
461 |
+
<li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp" target="_blank">GitHub</a>: Bug reports, feature requests, and contributions</li>
|
462 |
+
<li><a class="link" href="https://medium.com/spark-nlp" target="_blank">Medium</a>: Spark NLP articles</li>
|
463 |
+
<li><a class="link" href="https://www.youtube.com/channel/UCmFOjlpYEhxf_wJUDuz6xxQ/videos" target="_blank">YouTube</a>: Video tutorials</li>
|
464 |
+
</ul>
|
465 |
+
</div>
|
466 |
+
""", unsafe_allow_html=True)
|
467 |
+
|
468 |
+
st.markdown('<div class="sub-title">Quick Links</div>', unsafe_allow_html=True)
|
469 |
+
|
470 |
+
st.markdown("""
|
471 |
+
<div class="section">
|
472 |
+
<ul>
|
473 |
+
<li><a class="link" href="https://sparknlp.org/docs/en/quickstart" target="_blank">Getting Started</a></li>
|
474 |
+
<li><a class="link" href="https://nlp.johnsnowlabs.com/models" target="_blank">Pretrained Models</a></li>
|
475 |
+
<li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp/tree/master/examples/python/annotation/text/english" target="_blank">Example Notebooks</a></li>
|
476 |
+
<li><a class="link" href="https://sparknlp.org/docs/en/install" target="_blank">Installation Guide</a></li>
|
477 |
+
</ul>
|
478 |
+
</div>
|
479 |
+
""", unsafe_allow_html=True)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
st-annotated-text
|
3 |
+
streamlit-tags
|
4 |
+
pandas
|
5 |
+
numpy
|
6 |
+
spark-nlp
|
7 |
+
pyspark
|