appvoid commited on
Commit
7008e6a
Β·
verified Β·
1 Parent(s): 7808c60

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -9,17 +9,17 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStream
9
 
10
  MAX_MAX_NEW_TOKENS = 2048
11
  DEFAULT_MAX_NEW_TOKENS = 1024
12
- MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
- # palmer-003
16
  """
17
 
18
  if not torch.cuda.is_available():
19
  DESCRIPTION += "\n<p>Running on CPU πŸ₯Ά This demo does not work on CPU.</p>"
20
 
21
  if torch.cuda.is_available():
22
- model_id = "appvoid/palmer-003"
23
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=False)
24
  tokenizer = AutoTokenizer.from_pretrained(model_id)
25
  tokenizer.use_default_system_prompt = True
 
9
 
10
  MAX_MAX_NEW_TOKENS = 2048
11
  DEFAULT_MAX_NEW_TOKENS = 1024
12
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "2048"))
13
 
14
  DESCRIPTION = """\
15
+ # palmer-004
16
  """
17
 
18
  if not torch.cuda.is_available():
19
  DESCRIPTION += "\n<p>Running on CPU πŸ₯Ά This demo does not work on CPU.</p>"
20
 
21
  if torch.cuda.is_available():
22
+ model_id = "appvoid/palmer-004"
23
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=False)
24
  tokenizer = AutoTokenizer.from_pretrained(model_id)
25
  tokenizer.use_default_system_prompt = True