research14 commited on
Commit
9430486
·
1 Parent(s): f9ca505
Files changed (1) hide show
  1. app.py +1 -6
app.py CHANGED
@@ -1,10 +1,6 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- # Load the models and tokenizers
5
- gpt35_model = AutoModelForCausalLM.from_pretrained("gpt-3.5-turbo-0613")
6
- gpt35_tokenizer = AutoTokenizer.from_pretrained("gpt-3.5-turbo-0613")
7
-
8
  vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.3")
9
  vicuna_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.3")
10
 
@@ -20,11 +16,10 @@ def generate_response(model, tokenizer, prompt):
20
 
21
  # Define the Gradio interface
22
  def chatbot_interface(prompt):
23
- gpt35_response = generate_response(gpt35_model, gpt35_tokenizer, prompt)
24
  vicuna_response = generate_response(vicuna_model, vicuna_tokenizer, prompt)
25
  llama_response = generate_response(llama_model, llama_tokenizer, prompt)
26
 
27
- return {"GPT-3.5": gpt35_response, "Vicuna-7B": vicuna_response, "Llama-7B": llama_response}
28
 
29
  iface = gr.Interface(fn=chatbot_interface,
30
  inputs="text",
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
 
 
 
 
4
  vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.3")
5
  vicuna_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.3")
6
 
 
16
 
17
  # Define the Gradio interface
18
  def chatbot_interface(prompt):
 
19
  vicuna_response = generate_response(vicuna_model, vicuna_tokenizer, prompt)
20
  llama_response = generate_response(llama_model, llama_tokenizer, prompt)
21
 
22
+ return {"Vicuna-7B": vicuna_response, "Llama-7B": llama_response}
23
 
24
  iface = gr.Interface(fn=chatbot_interface,
25
  inputs="text",