hsuvaskakoty commited on
Commit
481f13d
·
verified ·
1 Parent(s): 6c89cc9

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -17
app.py CHANGED
@@ -17,25 +17,20 @@ model_dict = {
17
  "Offensive Language Detection": "offensive"
18
  }
19
 
20
- def process_url(url, model_key):
21
- model_name = model_dict[model_key]
22
  processed_text = data_prep.process_data(url)
23
  final_scores = model_predict.predict_text(processed_text, model_name)
24
-
25
- print(f"Final Scores: {final_scores}") # Debug statement
26
-
27
  if model_name == 'outcome':
28
  highest_prob_item = max(final_scores, key=lambda x: x['score'])
29
  highest_prob_label = highest_prob_item['outcome']
30
  highest_prob = highest_prob_item['score']
31
  progress_bars = {item['outcome']: item['score'] for item in final_scores}
32
- else:
33
- highest_prob_item = max(final_scores, key=lambda x: x['score'])
34
- highest_prob_label = highest_prob_item[list(highest_prob_item.keys())[1]]
35
- highest_prob = highest_prob_item['score']
36
- progress_bars = {item[list(item.keys())[1]]: item['score'] for item in final_scores}
37
 
38
- return processed_text, highest_prob_label, highest_prob, progress_bars
 
39
 
40
  title = 'Wikipedia Deletion Discussion Analysis Suite'
41
  desc = """ This demo is about classifying deletion discussions from Wikipedia about Wikipedia articles. Wikipedia community engages in discussions related to an article’s quality, and map potential issues to existing templates, or Wikipedia policies, which cover diverse areas, from low notability of sources to content implausibility or vandalism.
@@ -55,16 +50,14 @@ The input to the classifier is a URL of a Wikipedia deletion discussion page, an
55
  The input to the classifier is a URL of a Wikipedia deletion discussion page, and the output is the predicted label of the discussion, along with the probability of the predicted label, and the probabilities of all the labels.
56
  """
57
 
58
-
59
  url_input = gr.Textbox(label="URL")
60
  model_name_input = gr.Dropdown(label="Choose the Task", choices=list(model_dict.keys()), value=list(model_dict.keys())[0])
61
  outputs = [
62
  gr.Textbox(label="Processed Text"),
63
- gr.Textbox(label="Label with Highest Probability"),
64
- gr.Textbox(label="Probability"),
65
- gr.JSON(label="All Labels and Probabilities"),
66
- #gr.HTML(label="Processed Text")
67
  ]
68
 
69
  demo = gr.Interface(fn=process_url, inputs=[url_input, model_name_input], outputs=outputs, title=title, description=desc)
70
- demo.launch() #share=True)
 
17
  "Offensive Language Detection": "offensive"
18
  }
19
 
20
+ def process_url(url, model_name):
21
+ model_name = model_dict[model_name]
22
  processed_text = data_prep.process_data(url)
23
  final_scores = model_predict.predict_text(processed_text, model_name)
 
 
 
24
  if model_name == 'outcome':
25
  highest_prob_item = max(final_scores, key=lambda x: x['score'])
26
  highest_prob_label = highest_prob_item['outcome']
27
  highest_prob = highest_prob_item['score']
28
  progress_bars = {item['outcome']: item['score'] for item in final_scores}
29
+
30
+ return processed_text, highest_prob_label, highest_prob, progress_bars
 
 
 
31
 
32
+ else:
33
+ return processed_text, "", "", final_scores
34
 
35
  title = 'Wikipedia Deletion Discussion Analysis Suite'
36
  desc = """ This demo is about classifying deletion discussions from Wikipedia about Wikipedia articles. Wikipedia community engages in discussions related to an article’s quality, and map potential issues to existing templates, or Wikipedia policies, which cover diverse areas, from low notability of sources to content implausibility or vandalism.
 
50
  The input to the classifier is a URL of a Wikipedia deletion discussion page, and the output is the predicted label of the discussion, along with the probability of the predicted label, and the probabilities of all the labels.
51
  """
52
 
 
53
  url_input = gr.Textbox(label="URL")
54
  model_name_input = gr.Dropdown(label="Choose the Task", choices=list(model_dict.keys()), value=list(model_dict.keys())[0])
55
  outputs = [
56
  gr.Textbox(label="Processed Text"),
57
+ gr.Textbox(label="Label with Highest Probability"), # This will only be used for the outcome task
58
+ gr.Textbox(label="Probability"), # This will only be used for the outcome task
59
+ gr.JSON(label="All Labels and Probabilities") # This will be used for all tasks
 
60
  ]
61
 
62
  demo = gr.Interface(fn=process_url, inputs=[url_input, model_name_input], outputs=outputs, title=title, description=desc)
63
+ demo.launch() # share=True