freddyaboulton HF staff commited on
Commit
1890c65
·
1 Parent(s): a682024

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -30,7 +30,7 @@ is_gpu_associated = torch.cuda.is_available()
30
 
31
  def generate(
32
  message: str,
33
- history_with_input: list[tuple[str, str]],
34
  system_prompt=DEFAULT_SYSTEM_PROMPT,
35
  max_new_tokens=DEFAULT_MAX_NEW_TOKENS,
36
  temperature=1.0,
@@ -42,7 +42,6 @@ def generate(
42
  if max_new_tokens > MAX_MAX_NEW_TOKENS:
43
  raise ValueError
44
 
45
- history = history_with_input[:-1]
46
  input_token_length = get_input_token_length(message, history, system_prompt)
47
  if input_token_length > MAX_INPUT_TOKEN_LENGTH:
48
  response = f'The accumulated input is too long ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}). Please create a new thread.'
 
30
 
31
  def generate(
32
  message: str,
33
+ history: list[tuple[str, str]],
34
  system_prompt=DEFAULT_SYSTEM_PROMPT,
35
  max_new_tokens=DEFAULT_MAX_NEW_TOKENS,
36
  temperature=1.0,
 
42
  if max_new_tokens > MAX_MAX_NEW_TOKENS:
43
  raise ValueError
44
 
 
45
  input_token_length = get_input_token_length(message, history, system_prompt)
46
  if input_token_length > MAX_INPUT_TOKEN_LENGTH:
47
  response = f'The accumulated input is too long ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}). Please create a new thread.'