frankaging commited on
Commit
fcb8864
·
1 Parent(s): 8a3c057

initial commit

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -19,11 +19,6 @@ MAX_MAX_NEW_TOKENS = 2048
19
  DEFAULT_MAX_NEW_TOKENS = 1024
20
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
21
 
22
- terminators = [
23
- tokenizer.eos_token_id,
24
- tokenizer.convert_tokens_to_ids("<|eot_id|>")
25
- ]
26
-
27
  system_prompt = "You are a helpful assistant."
28
 
29
  DESCRIPTION = """\
@@ -55,6 +50,11 @@ if torch.cuda.is_available():
55
  tokenizer = AutoTokenizer.from_pretrained(model_id)
56
  tokenizer.use_default_system_prompt = True
57
 
 
 
 
 
 
58
  # position info about the interventions
59
  share_weights = True # whether the prefix and suffix interventions sharing weights.
60
  positions="f1+l1" # the intervening positions of prefix tokens (f[irst]1) and suffix tokens (l[ast]1).
 
19
  DEFAULT_MAX_NEW_TOKENS = 1024
20
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
21
 
 
 
 
 
 
22
  system_prompt = "You are a helpful assistant."
23
 
24
  DESCRIPTION = """\
 
50
  tokenizer = AutoTokenizer.from_pretrained(model_id)
51
  tokenizer.use_default_system_prompt = True
52
 
53
+ terminators = [
54
+ tokenizer.eos_token_id,
55
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
56
+ ]
57
+
58
  # position info about the interventions
59
  share_weights = True # whether the prefix and suffix interventions sharing weights.
60
  positions="f1+l1" # the intervening positions of prefix tokens (f[irst]1) and suffix tokens (l[ast]1).