research14 commited on
Commit
615bec2
·
1 Parent(s): 290200f

changed models and prompts

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -5,12 +5,12 @@ import os
5
  import openai
6
 
7
  # Load the Vicuna 7B model and tokenizer
8
- vicuna_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.3")
9
- vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.3")
10
 
11
  # Load the LLaMA 7b model and tokenizer
12
- llama_tokenizer = AutoTokenizer.from_pretrained("luodian/llama-7b-hf")
13
- llama_model = AutoModelForCausalLM.from_pretrained("luodian/llama-7b-hf")
14
 
15
  template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
16
 
@@ -56,7 +56,7 @@ def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
56
  return "", chat_history
57
 
58
  def vicuna_respond(tab_name, message, chat_history):
59
- formatted_prompt = f'''Output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
60
  print('Prompt + Context:')
61
  print(formatted_prompt)
62
  input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
@@ -69,7 +69,7 @@ def vicuna_respond(tab_name, message, chat_history):
69
  return tab_name, "", chat_history
70
 
71
  def llama_respond(tab_name, message, chat_history):
72
- formatted_prompt = f'''Output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
73
  print('Prompt + Context:')
74
  print(formatted_prompt)
75
  input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
 
5
  import openai
6
 
7
  # Load the Vicuna 7B model and tokenizer
8
+ vicuna_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.5")
9
+ vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.5")
10
 
11
  # Load the LLaMA 7b model and tokenizer
12
+ llama_tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
13
+ llama_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
14
 
15
  template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
16
 
 
56
  return "", chat_history
57
 
58
  def vicuna_respond(tab_name, message, chat_history):
59
+ formatted_prompt = f'''Output any {tab_name} in the following sentence one per line without any additional text: {message}'''
60
  print('Prompt + Context:')
61
  print(formatted_prompt)
62
  input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
 
69
  return tab_name, "", chat_history
70
 
71
  def llama_respond(tab_name, message, chat_history):
72
+ formatted_prompt = f'''Output any {tab_name} in the following sentence one per line without any additional text: {message}'''
73
  print('Prompt + Context:')
74
  print(formatted_prompt)
75
  input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")