Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline | |
#pipelines init | |
qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2") | |
classification_pipeline = pipeline("zero-shot-classification", model="facebook/bart-large-mnli") | |
translation_pipeline = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr") | |
topic_classification_pipeline = pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english") # Fine-tuned model for topic classification | |
summarization_pipeline = pipeline("summarization", model="facebook/bart-large-cnn") | |
#functions defining | |
def answer_question(context, question): | |
return qa_pipeline(question=question, context=context)["answer"] | |
def classify_text(text, labels): | |
labels = labels.split(",") | |
results = classification_pipeline(text, candidate_labels=labels) | |
return {label: float(f"{prob:.4f}") for label, prob in zip(results["labels"], results["scores"])} | |
def translate_text(text): | |
return translation_pipeline(text)[0]['translation_text'] if text else "No translation available" | |
def classify_topic(text): | |
results = topic_classification_pipeline(text) | |
return ", ".join([f"{result['label']}: {result['score']:.4f}" for result in results]) | |
def summarize_text(text): | |
result = summarization_pipeline(text, max_length=60) | |
return result[0]['summary_text'] if result else "No summary available" | |
def multi_model_interaction(text): | |
summary = summarize_text(text) | |
translated_summary = translate_text(summary) | |
return { | |
"Summary (English)": summary, | |
"Summary (French)": translated_summary, | |
} | |
#Blocking interface | |
with gr.Blocks() as demo: | |
with gr.Tab("Single Models"): | |
with gr.Column(): | |
gr.Markdown("### Question Answering") | |
context = gr.Textbox(label="Context") | |
question = gr.Textbox(label="Question") | |
answer_output = gr.Text(label="Answer") | |
gr.Button("Answer").click(answer_question, inputs=[context, question], outputs=answer_output) | |
with gr.Column(): | |
gr.Markdown("### Zero-Shot Classification") | |
text_zsc = gr.Textbox(label="Text") | |
labels = gr.Textbox(label="Labels (comma separated)") | |
classification_result = gr.JSON(label="Classification Results") | |
gr.Button("Classify").click(classify_text, inputs=[text_zsc, labels], outputs=classification_result) | |
with gr.Column(): | |
gr.Markdown("### Translation") | |
text_to_translate = gr.Textbox(label="Text") | |
translated_text = gr.Text(label="Translated Text") | |
gr.Button("Translate").click(translate_text, inputs=[text_to_translate], outputs=translated_text) | |
with gr.Column(): | |
gr.Markdown("### Sentiment Analysis") | |
text_for_sentiment = gr.Textbox(label="Text for Sentiment Analysis") | |
sentiment_result = gr.Text(label="Sentiment") | |
gr.Button("Classify Sentiment").click(classify_topic, inputs=[text_for_sentiment], outputs=sentiment_result) | |
with gr.Column(): | |
gr.Markdown("### Summarization") | |
text_to_summarize = gr.Textbox(label="Text") | |
summary = gr.Text(label="Summary") | |
gr.Button("Summarize").click(summarize_text, inputs=[text_to_summarize], outputs=summary) | |
with gr.Tab("Multi-Model"): | |
gr.Markdown("### Multi-Model") | |
input_text = gr.Textbox(label="Enter Text for Multi-Model Analysis") | |
multi_output = gr.Text(label="Results") | |
gr.Button("Process").click(multi_model_interaction, inputs=[input_text], outputs=multi_output) | |
#Launching demo | |
demo.launch(share=True, debug=True) |