import gradio as gr import re import textstat from transformers import pipeline from langdetect import detect # Load a summarization model summarizer = pipeline("summarization") def text_analysis_and_summarization(text): # Analyze text: word count, character count, language detection, and readability words = re.findall(r'\w+', text.lower()) sentences = re.split(r'[.!?]+', text) num_sentences = len(sentences) - 1 num_words = len(words) num_chars = len("".join(words)) reading_ease = textstat.flesch_reading_ease(text) language = detect(text) # Summarize text using the transformer model summary = summarizer(text, max_length=130, min_length=30, do_sample=False)[0]['summary_text'] # Format the results return { "Language": language, "Sentences": num_sentences, "Words": num_words, "Characters": num_chars, "Readability (Flesch Reading Ease)": reading_ease, "Summary": summary } # Define an interface for text analysis and summarization text_analysis_and_summarization_interface = gr.Interface(fn=text_analysis_and_summarization, inputs=gr.Textbox(lines=4, placeholder="Type something here..."), outputs=gr.JSON(label="Text Analysis and Summarization")) # Launch the interface if __name__ == "__main__": text_analysis_and_summarization_interface.launch()