Spaces:
Running
Running
kovacsvi
commited on
Commit
·
f7e1e22
1
Parent(s):
b5c8485
added titles to interfaces
Browse files- interfaces/cap.py +2 -1
- interfaces/emotion.py +2 -1
- interfaces/emotion9.py +2 -1
- interfaces/illframes.py +2 -1
- interfaces/manifesto.py +2 -1
- interfaces/ner.py +2 -1
- interfaces/ontolisst.py +2 -1
- interfaces/sentiment.py +2 -1
interfaces/cap.py
CHANGED
@@ -101,8 +101,9 @@ def predict_cap(text, language, domain):
|
|
101 |
return predict(text, model_id, tokenizer_id)
|
102 |
|
103 |
demo = gr.Interface(
|
|
|
104 |
fn=predict_cap,
|
105 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
106 |
gr.Dropdown(languages, label="Language"),
|
107 |
gr.Dropdown(domains.keys(), label="Domain")],
|
108 |
-
outputs=[gr.Label(num_top_classes=5, label="Output"), gr.Markdown()])
|
|
|
101 |
return predict(text, model_id, tokenizer_id)
|
102 |
|
103 |
demo = gr.Interface(
|
104 |
+
title="CAP Babel Demo",
|
105 |
fn=predict_cap,
|
106 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
107 |
gr.Dropdown(languages, label="Language"),
|
108 |
gr.Dropdown(domains.keys(), label="Domain")],
|
109 |
+
outputs=[gr.Label(num_top_classes=5, label="Output"), gr.Markdown()])
|
interfaces/emotion.py
CHANGED
@@ -50,8 +50,9 @@ def predict_cap(text, language, domain):
|
|
50 |
return predict(text, model_id, tokenizer_id)
|
51 |
|
52 |
demo = gr.Interface(
|
|
|
53 |
fn=predict_cap,
|
54 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
55 |
gr.Dropdown(languages, label="Language"),
|
56 |
gr.Dropdown(domains.keys(), label="Domain")],
|
57 |
-
outputs=[gr.Label(num_top_classes=5, label="Output"), gr.Markdown()])
|
|
|
50 |
return predict(text, model_id, tokenizer_id)
|
51 |
|
52 |
demo = gr.Interface(
|
53 |
+
title="Emotion Babel Demo",
|
54 |
fn=predict_cap,
|
55 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
56 |
gr.Dropdown(languages, label="Language"),
|
57 |
gr.Dropdown(domains.keys(), label="Domain")],
|
58 |
+
outputs=[gr.Label(num_top_classes=5, label="Output"), gr.Markdown()])
|
interfaces/emotion9.py
CHANGED
@@ -50,8 +50,9 @@ def predict_e6(text, language, domain):
|
|
50 |
return predict(text, model_id, tokenizer_id)
|
51 |
|
52 |
demo = gr.Interface(
|
|
|
53 |
fn=predict_e6,
|
54 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
55 |
gr.Dropdown(languages, label="Language"),
|
56 |
gr.Dropdown(domains.keys(), label="Domain")],
|
57 |
-
outputs=[gr.Label(num_top_classes=5, label="Output"), gr.Markdown()])
|
|
|
50 |
return predict(text, model_id, tokenizer_id)
|
51 |
|
52 |
demo = gr.Interface(
|
53 |
+
title="Emotions (9) Babel Demo",
|
54 |
fn=predict_e6,
|
55 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
56 |
gr.Dropdown(languages, label="Language"),
|
57 |
gr.Dropdown(domains.keys(), label="Domain")],
|
58 |
+
outputs=[gr.Label(num_top_classes=5, label="Output"), gr.Markdown()])
|
interfaces/illframes.py
CHANGED
@@ -102,8 +102,9 @@ def predict_illframes(text, language, domain):
|
|
102 |
return predict(text, model_id, tokenizer_id, label_names)
|
103 |
|
104 |
demo = gr.Interface(
|
|
|
105 |
fn=predict_illframes,
|
106 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
107 |
gr.Dropdown(languages, label="Language"),
|
108 |
gr.Dropdown(domains.keys(), label="Domain")],
|
109 |
-
outputs=[gr.Label(num_top_classes=5, label="Output"), gr.Markdown()])
|
|
|
102 |
return predict(text, model_id, tokenizer_id, label_names)
|
103 |
|
104 |
demo = gr.Interface(
|
105 |
+
title="ILLFRAMES Babel Demo",
|
106 |
fn=predict_illframes,
|
107 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
108 |
gr.Dropdown(languages, label="Language"),
|
109 |
gr.Dropdown(domains.keys(), label="Domain")],
|
110 |
+
outputs=[gr.Label(num_top_classes=5, label="Output"), gr.Markdown()])
|
interfaces/manifesto.py
CHANGED
@@ -48,7 +48,8 @@ def predict_cap(text, language):
|
|
48 |
return predict(text, model_id, tokenizer_id)
|
49 |
|
50 |
demo = gr.Interface(
|
|
|
51 |
fn=predict_cap,
|
52 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
53 |
gr.Dropdown(languages, label="Language")],
|
54 |
-
outputs=[gr.Label(num_top_classes=5, label="Output"), gr.Markdown()])
|
|
|
48 |
return predict(text, model_id, tokenizer_id)
|
49 |
|
50 |
demo = gr.Interface(
|
51 |
+
title="Manifesto Babel Demo",
|
52 |
fn=predict_cap,
|
53 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
54 |
gr.Dropdown(languages, label="Language")],
|
55 |
+
outputs=[gr.Label(num_top_classes=5, label="Output"), gr.Markdown()])
|
interfaces/ner.py
CHANGED
@@ -44,7 +44,8 @@ def named_entity_recognition(text, language):
|
|
44 |
return output, output_info
|
45 |
|
46 |
demo = gr.Interface(
|
|
|
47 |
fn=named_entity_recognition,
|
48 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
49 |
gr.Dropdown(languages, label="Language")],
|
50 |
-
outputs=[gr.HighlightedText(label='Output'), gr.Markdown()])
|
|
|
44 |
return output, output_info
|
45 |
|
46 |
demo = gr.Interface(
|
47 |
+
title="NER Babel Demo",
|
48 |
fn=named_entity_recognition,
|
49 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
50 |
gr.Dropdown(languages, label="Language")],
|
51 |
+
outputs=[gr.HighlightedText(label='Output'), gr.Markdown()])
|
interfaces/ontolisst.py
CHANGED
@@ -82,7 +82,8 @@ def predict_cap(text, language):
|
|
82 |
return predict(text, model_id, tokenizer_id)
|
83 |
|
84 |
demo = gr.Interface(
|
|
|
85 |
fn=predict_cap,
|
86 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
87 |
gr.Dropdown(languages, label="Language")],
|
88 |
-
outputs=[gr.Label(num_top_classes=3, label="Output"), gr.Markdown()])
|
|
|
82 |
return predict(text, model_id, tokenizer_id)
|
83 |
|
84 |
demo = gr.Interface(
|
85 |
+
title="ONTOLISST Babel Demo",
|
86 |
fn=predict_cap,
|
87 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
88 |
gr.Dropdown(languages, label="Language")],
|
89 |
+
outputs=[gr.Label(num_top_classes=3, label="Output"), gr.Markdown()])
|
interfaces/sentiment.py
CHANGED
@@ -59,8 +59,9 @@ def predict_cap(text, language, domain):
|
|
59 |
return predict(text, model_id, tokenizer_id)
|
60 |
|
61 |
demo = gr.Interface(
|
|
|
62 |
fn=predict_cap,
|
63 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
64 |
gr.Dropdown(languages, label="Language"),
|
65 |
gr.Dropdown(domains.keys(), label="Domain")],
|
66 |
-
outputs=[gr.Label(num_top_classes=3, label="Output"), gr.Markdown()])
|
|
|
59 |
return predict(text, model_id, tokenizer_id)
|
60 |
|
61 |
demo = gr.Interface(
|
62 |
+
title="Sentiment (3) Babel Demo",
|
63 |
fn=predict_cap,
|
64 |
inputs=[gr.Textbox(lines=6, label="Input"),
|
65 |
gr.Dropdown(languages, label="Language"),
|
66 |
gr.Dropdown(domains.keys(), label="Domain")],
|
67 |
+
outputs=[gr.Label(num_top_classes=3, label="Output"), gr.Markdown()])
|