research14 commited on
Commit
4ef034d
·
1 Parent(s): a450a5f
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -44,19 +44,19 @@ with gr.Blocks() as demo:
44
  vicuna_S1_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
45
  llama_S1_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
46
  gpt_S1_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
47
- clear = gr.ClearButton([msg, vicuna_S1_chatbot_CHUNK])
48
  gr.Markdown("Strategy 2 Instruction")
49
  with gr.Row():
50
  vicuna_S2_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
51
  llama_S2_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
52
  gpt_S2_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
53
- clear = gr.ClearButton([msg, vicuna_S2_chatbot_CHUNK])
54
  gr.Markdown("Strategy 3 Structured Prompting")
55
  with gr.Row():
56
  vicuna_S3_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
57
  llama_S3_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
58
  gpt_S3_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
59
- clear = gr.ClearButton([msg, vicuna_S3_chatbot_CHUNK])
60
 
61
  def respond(message, chat_history):
62
  input_ids = tokenizer.encode(message, return_tensors="pt")
 
44
  vicuna_S1_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
45
  llama_S1_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
46
  gpt_S1_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
47
+ clear = gr.ClearButton([prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
48
  gr.Markdown("Strategy 2 Instruction")
49
  with gr.Row():
50
  vicuna_S2_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
51
  llama_S2_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
52
  gpt_S2_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
53
+ clear = gr.ClearButton([prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
54
  gr.Markdown("Strategy 3 Structured Prompting")
55
  with gr.Row():
56
  vicuna_S3_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
57
  llama_S3_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
58
  gpt_S3_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
59
+ clear = gr.ClearButton([prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
60
 
61
  def respond(message, chat_history):
62
  input_ids = tokenizer.encode(message, return_tensors="pt")