szili2011 commited on
Commit
8b7429d
·
verified ·
1 Parent(s): 99e0307

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -13
app.py CHANGED
@@ -3,36 +3,36 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
  # Initialize the model and tokenizer
5
  try:
6
- print("Initializing model...")
7
  tokenizer = AutoTokenizer.from_pretrained("satvikag/chatbot")
8
  model = AutoModelForCausalLM.from_pretrained("satvikag/chatbot")
9
  chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
10
- print("Model loaded successfully.")
11
  except Exception as e:
12
- print(f"Error during model initialization: {e}")
13
  chat_pipeline = None
14
 
15
  def ai_vote(poll_title, choices, num_ais):
16
  if chat_pipeline is None:
17
  return {"Error": "Model not initialized"}, []
18
 
19
- results = {}
 
20
  explanations = []
21
 
22
- for _ in range(num_ais):
 
23
  input_text = f"Poll Title: {poll_title}\nChoices: {', '.join(choices)}\nChoose the best option and explain why."
24
  try:
25
  response = chat_pipeline(input_text, max_length=150, num_return_sequences=1)[0]['generated_text']
 
 
 
 
 
 
 
26
  except Exception as e:
27
  return {"Error": str(e)}, []
28
 
29
- # Extract the chosen option and explanation
30
- chosen_option = response.split("\n")[0].strip()
31
- explanation = "\n".join(response.split("\n")[1:]).strip()
32
-
33
- results[chosen_option] = results.get(chosen_option, 0) + 1
34
- explanations.append((chosen_option, explanation))
35
-
36
  return results, explanations
37
 
38
  def gradio_interface(title, choices, num_ais):
@@ -43,6 +43,7 @@ def gradio_interface(title, choices, num_ais):
43
  except Exception as e:
44
  return {"Error": str(e)}, "An error occurred."
45
 
 
46
  interface = gr.Interface(
47
  fn=gradio_interface,
48
  inputs=[
@@ -57,5 +58,4 @@ interface = gr.Interface(
57
  )
58
 
59
  if __name__ == "__main__":
60
- print("Launching interface...")
61
  interface.launch()
 
3
 
4
  # Initialize the model and tokenizer
5
  try:
 
6
  tokenizer = AutoTokenizer.from_pretrained("satvikag/chatbot")
7
  model = AutoModelForCausalLM.from_pretrained("satvikag/chatbot")
8
  chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
9
  except Exception as e:
10
+ print(f"Error initializing model: {e}")
11
  chat_pipeline = None
12
 
13
  def ai_vote(poll_title, choices, num_ais):
14
  if chat_pipeline is None:
15
  return {"Error": "Model not initialized"}, []
16
 
17
+ # Initialize results and explanations
18
+ results = {choice: 0 for choice in choices}
19
  explanations = []
20
 
21
+ # Loop through the number of AIs to get responses
22
+ for i in range(num_ais):
23
  input_text = f"Poll Title: {poll_title}\nChoices: {', '.join(choices)}\nChoose the best option and explain why."
24
  try:
25
  response = chat_pipeline(input_text, max_length=150, num_return_sequences=1)[0]['generated_text']
26
+ # Extract the choice and explanation
27
+ for choice in choices:
28
+ if choice.lower() in response.lower():
29
+ results[choice] += 1
30
+ explanation = response.split("\n", 1)[-1].strip() # Extract explanation
31
+ explanations.append((choice, explanation))
32
+ break
33
  except Exception as e:
34
  return {"Error": str(e)}, []
35
 
 
 
 
 
 
 
 
36
  return results, explanations
37
 
38
  def gradio_interface(title, choices, num_ais):
 
43
  except Exception as e:
44
  return {"Error": str(e)}, "An error occurred."
45
 
46
+ # Gradio Interface
47
  interface = gr.Interface(
48
  fn=gradio_interface,
49
  inputs=[
 
58
  )
59
 
60
  if __name__ == "__main__":
 
61
  interface.launch()