helloerikaaa commited on
Commit
eb331f0
·
verified ·
1 Parent(s): 7790b7d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -4
app.py CHANGED
@@ -11,10 +11,6 @@ MAX_MAX_NEW_TOKENS = 2048
11
  DEFAULT_MAX_NEW_TOKENS = 1024
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
- if not torch.cuda.is_available():
15
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
16
-
17
-
18
  if torch.cuda.is_available():
19
  model_id = "meta-llama/Llama-2-13b-chat-hf"
20
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
 
11
  DEFAULT_MAX_NEW_TOKENS = 1024
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
 
 
 
 
14
  if torch.cuda.is_available():
15
  model_id = "meta-llama/Llama-2-13b-chat-hf"
16
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)