raptor1 commited on
Commit
0815456
·
verified ·
1 Parent(s): 6b36801

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -52
app.py CHANGED
@@ -1,64 +1,65 @@
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
 
 
 
 
 
 
 
 
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
 
27
 
28
- response = ""
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
41
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
 
1
+ # Importing dependencies (Transformers for summarization and sentiment pipeline. Gradio for building UI)
2
+ #!pip install transformers gradio (needed for Google Colab, not for HF)
3
+
4
+ from transformers import pipeline
5
  import gradio as gr
 
6
 
7
+ #intialiazing summarizer pipeline
8
+ summarizer_pipeline = pipeline("summarization")
9
+
10
+ #intialiazing sentiment pipline. It in not essential to specify the model but I have chosen it as this model is designed for sentiment analysis.
11
+ sentiment_pipeline = pipeline("sentiment-analysis", model="siebert/sentiment-roberta-large-english")
12
+
13
+ """"
14
+ Function to input text and process the input text (summarization and sentiment analysis),
15
+ Choosing a text limit for input length since the summarizer works best if the text to be summarized is greater in length than its out max_length(token output).
16
  """
 
 
 
17
 
18
 
19
+ #Function
 
 
 
 
 
 
 
 
20
 
21
+ def summarize_and_sentiment(text):
 
 
 
 
22
 
23
+ # Checking if text length is at least 150 characters as I have set max_length for output token = 150
24
+ if len(text) < 150 or len(text) > 500:
25
+ return "Error: Text should be in between 150 and 500 characters long. Please increase the length of the text.", "" #Using "" as second argument so that when an error occurs the function returns two values instead of one and this is what expected in Gradio UI.The outputs would no longer match the expected structure if we return just one outpit from function.
26
 
27
+ # Generate summary using summarizer pipeline
28
+ summary = summarizer_pipeline(text, min_length=4, max_length=150)
29
+ summary_text = summary[0]["summary_text"] #Only using the text generated for summarization.
30
 
31
+ # Perform sentiment analysis on the summary using sentiment pipeline
32
+ sentiment = sentiment_pipeline(summary_text)
33
+ sentiment_label = sentiment[0]['label'] #Only using the label(positive or negative) and not the score for sentiment.
 
 
 
 
 
34
 
35
+ return summary_text, sentiment_label #Returing two outputs (only summarized text and sentiment label), hence in case of error, if loop also needs to output two values so that UI input-output structure can be easily handled.
 
36
 
37
 
38
+ # Setting up Gradio UI
39
+ with gr.Blocks() as demo:
40
+ gr.Markdown("# Text Summarizer and Sentiment Analysis Tool")
41
+ gr.Markdown("Input a text with at least 150 characters to get its summary and sentiment analysis.")
42
+
43
+ # Text input box
44
+ input_text = gr.Textbox(
45
+ label="Enter your text : ",
46
+ placeholder="Paste your text here...",
47
+ lines=5
48
+ )
49
+
50
+ # Output boxes
51
+ output_summary = gr.Textbox(label="Summary : ", interactive=False)
52
+ output_sentiment = gr.Textbox(label="Sentiment : ", interactive=False)
53
+
54
+ # Button to process text
55
+ analyze_button = gr.Button("Start the process!")
56
+
57
+ # Connect button with function. Analyze button when clicked, itself maps input and output to and from the function(def summarize_and_sentiment).
58
+ analyze_button.click(
59
+ fn=summarize_and_sentiment,
60
+ inputs=input_text,
61
+ outputs=[output_summary, output_sentiment] #Order of output must be the same as the order of output of function(def summarize_and_sentiment).
62
+ )
63
+
64
+ # Launch the Gradio app
65
+ demo.launch()