Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -1,46 +1,14 @@
|
|
1 |
from transformers import TFAutoModelForCausalLM, AutoTokenizer
|
2 |
import tensorflow as tf
|
3 |
import gradio as gr
|
4 |
-
from transformers import TFAutoModelForSequenceClassification
|
5 |
-
from transformers import AutoTokenizer
|
6 |
-
import plotly.express as px
|
7 |
-
import plotly.io as pio
|
8 |
|
9 |
# configuration params
|
10 |
-
pio.templates.default = "plotly_dark"
|
11 |
-
|
12 |
-
# setting up the text in the page
|
13 |
TITLE = "<center><h1>Talk with an AI</h1></center>"
|
14 |
-
DESCRIPTION = r"""<center>This application allows you to talk with a machine/robot with state-of-the-art technology!!<br>
|
15 |
-
In the back-end is using the Elapt1c/ElapticAI-1a model. One of the best models in text generation and comprehension.<br>
|
16 |
-
Language processing is done using RoBERTa for sentiment-analysis and spaCy for named-entity recognition and dependency plotting.<br>
|
17 |
-
The AI thinks he is a human, so please treat him as such, else he migh get angry!<br>
|
18 |
-
"""
|
19 |
-
EXAMPLES = [
|
20 |
-
["What is your favorite videogame?"],
|
21 |
-
["What gets you really sad?"],
|
22 |
-
["How can I make you really angry? "],
|
23 |
-
["What do you do for work?"],
|
24 |
-
["What are your hobbies?"],
|
25 |
-
["What is your favorite food?"],
|
26 |
-
]
|
27 |
-
ARTICLE = r"""<center>
|
28 |
-
Done by dr. Gabriel Lopez<br>
|
29 |
-
For more please visit: <a href='https://sites.google.com/view/dr-gabriel-lopez/home'>My Page</a><br>
|
30 |
-
For info about the chat-bot model can also see the <a href="https://arxiv.org/abs/1911.00536">ArXiv paper</a><br>
|
31 |
-
</center>"""
|
32 |
|
33 |
# Loading necessary NLP models
|
34 |
-
# dialog
|
35 |
checkpoint = "elapt1c/ElapticAI-1a" # tf
|
36 |
model_gtp2 = TFAutoModelForCausalLM.from_pretrained(checkpoint)
|
37 |
tokenizer_gtp2 = AutoTokenizer.from_pretrained(checkpoint)
|
38 |
-
# sentiment
|
39 |
-
checkpoint = f"cardiffnlp/twitter-roberta-base-emotion"
|
40 |
-
model_roberta = TFAutoModelForSequenceClassification.from_pretrained(checkpoint)
|
41 |
-
tokenizer_roberta = AutoTokenizer.from_pretrained(checkpoint)
|
42 |
-
# NER & Dependency
|
43 |
-
nlp = spacy.load("en_core_web_sm")
|
44 |
|
45 |
# test-to-test : chatting function -- GPT2
|
46 |
def chat_with_bot(user_input, chat_history_and_input=[]):
|
@@ -55,51 +23,27 @@ def chat_with_bot(user_input, chat_history_and_input=[]):
|
|
55 |
[chat_history_and_input, emb_user_input], axis=-1
|
56 |
) # other iterations
|
57 |
chat_history_and_input = model_gtp2.generate(
|
58 |
-
bot_input_ids, max_length=
|
59 |
).numpy()
|
60 |
-
# print
|
61 |
bot_response = tokenizer_gtp2.decode(
|
62 |
chat_history_and_input[:, bot_input_ids.shape[-1] :][0],
|
63 |
skip_special_tokens=True,
|
64 |
)
|
|
|
|
|
65 |
return bot_response, chat_history_and_input
|
66 |
|
67 |
-
|
68 |
-
# text-to-sentiment
|
69 |
-
def text_to_sentiment(text_input):
|
70 |
-
"""Sentiment analysis using RoBERTa"""
|
71 |
-
labels = ["anger", "joy", "optimism", "sadness"]
|
72 |
-
encoded_input = tokenizer_roberta(text_input, return_tensors="tf")
|
73 |
-
output = model_roberta(encoded_input)
|
74 |
-
scores = output[0][0].numpy()
|
75 |
-
scores = softmax(scores)
|
76 |
-
return px.histogram(x=labels, y=scores, height=200)
|
77 |
-
|
78 |
-
|
79 |
-
# text_to_semantics
|
80 |
-
def text_to_semantics(text_input):
|
81 |
-
"""NER and Dependency plot using Spacy"""
|
82 |
-
processed_text = nlp(text_input)
|
83 |
-
# Dependency
|
84 |
-
html_dep = displacy.render(
|
85 |
-
processed_text,
|
86 |
-
style="dep",
|
87 |
-
options={"compact": True, "color": "white", "bg": "light-black"},
|
88 |
-
page=False,
|
89 |
-
)
|
90 |
-
html_dep = "" + html_dep + ""
|
91 |
-
# NER
|
92 |
-
pos_tokens = []
|
93 |
-
for token in processed_text:
|
94 |
-
pos_tokens.extend([(token.text, token.pos_), (" ", None)])
|
95 |
-
# html_ner = ("" + html_ner + "")s
|
96 |
-
return pos_tokens, html_dep
|
97 |
-
|
98 |
-
|
99 |
# gradio interface
|
100 |
blocks = gr.Blocks()
|
101 |
with blocks:
|
102 |
-
# physical elements
|
103 |
session_state = gr.State([])
|
104 |
gr.Markdown(TITLE)
|
105 |
-
gr.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from transformers import TFAutoModelForCausalLM, AutoTokenizer
|
2 |
import tensorflow as tf
|
3 |
import gradio as gr
|
|
|
|
|
|
|
|
|
4 |
|
5 |
# configuration params
|
|
|
|
|
|
|
6 |
TITLE = "<center><h1>Talk with an AI</h1></center>"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
# Loading necessary NLP models
|
|
|
9 |
checkpoint = "elapt1c/ElapticAI-1a" # tf
|
10 |
model_gtp2 = TFAutoModelForCausalLM.from_pretrained(checkpoint)
|
11 |
tokenizer_gtp2 = AutoTokenizer.from_pretrained(checkpoint)
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
# test-to-test : chatting function -- GPT2
|
14 |
def chat_with_bot(user_input, chat_history_and_input=[]):
|
|
|
23 |
[chat_history_and_input, emb_user_input], axis=-1
|
24 |
) # other iterations
|
25 |
chat_history_and_input = model_gtp2.generate(
|
26 |
+
bot_input_ids, max_length=50, pad_token_id=tokenizer_gtp2.eos_token_id
|
27 |
).numpy()
|
|
|
28 |
bot_response = tokenizer_gtp2.decode(
|
29 |
chat_history_and_input[:, bot_input_ids.shape[-1] :][0],
|
30 |
skip_special_tokens=True,
|
31 |
)
|
32 |
+
# Limit history to last 500 characters
|
33 |
+
chat_history_and_input = chat_history_and_input[:, -500:]
|
34 |
return bot_response, chat_history_and_input
|
35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
# gradio interface
|
37 |
blocks = gr.Blocks()
|
38 |
with blocks:
|
|
|
39 |
session_state = gr.State([])
|
40 |
gr.Markdown(TITLE)
|
41 |
+
user_input = gr.Textbox(label="User Input")
|
42 |
+
bot_response = gr.Textbox(label="Bot Response")
|
43 |
+
user_input.change(
|
44 |
+
chat_with_bot,
|
45 |
+
inputs=[user_input, session_state],
|
46 |
+
outputs=[bot_response, session_state],
|
47 |
+
)
|
48 |
+
|
49 |
+
blocks.launch()
|