Tonic commited on
Commit
b1cb928
·
1 Parent(s): 813bedf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -5
app.py CHANGED
@@ -12,17 +12,14 @@ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code
12
  model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True).eval()
13
  model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True) # Different generation length, top_p and other related super parameters can be specified.
14
 
15
-
16
  def predict(input, history=[]):
17
- # tokenize the new input sentence
18
- new_user_input_ids = tokenizer.encode(
19
  if input is not None and tokenizer.eos_token is not None:
20
  combined_input = input + tokenizer.eos_token
21
  # Rest of your code using combined_input
22
  else:
23
  # Handle the case where input or tokenizer.eos_token is None
24
- print("Input or eos_token is None. Cannot concatenate.")
25
- )
26
 
27
  # append the new user input tokens to the chat history
28
  bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
 
12
  model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True).eval()
13
  model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True) # Different generation length, top_p and other related super parameters can be specified.
14
 
 
15
  def predict(input, history=[]):
16
+ # Check if input is not None and eos_token is not None
 
17
  if input is not None and tokenizer.eos_token is not None:
18
  combined_input = input + tokenizer.eos_token
19
  # Rest of your code using combined_input
20
  else:
21
  # Handle the case where input or tokenizer.eos_token is None
22
+ print("Input or eos_token is None. Cannot concatenate.")
 
23
 
24
  # append the new user input tokens to the chat history
25
  bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)