Tonic commited on
Commit
6e49c29
·
1 Parent(s): 5d8f4a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -5,12 +5,16 @@ import os
5
  import gradio as gr
6
  import sentencepiece
7
 
 
8
 
9
  os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:126'
 
10
 
11
  # Load the model and tokenizer using transformers
12
- model = AutoModelForCausalLM.from_pretrained("01-ai/Yi-34B-200K", device_map="auto", torch_dtype="auto", trust_remote_code=True)
13
  tokenizer = YiTokenizer(vocab_file="./tokenizer.model")
 
 
 
14
 
15
  def run(message, chat_history, max_new_tokens=4056, temperature=3.5, top_p=0.9, top_k=800):
16
  prompt = get_prompt(message, chat_history)
 
5
  import gradio as gr
6
  import sentencepiece
7
 
8
+ model_id = "01-ai/Yi-34B-200K"
9
 
10
  os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:126'
11
+ device = "cuda" if torch.cuda.is_available() else "cpu"
12
 
13
  # Load the model and tokenizer using transformers
 
14
  tokenizer = YiTokenizer(vocab_file="./tokenizer.model")
15
+ model = AutoModelForCausalLM.from_pretrained(01-ai/Yi-34B-200K, trust_remote_code=True)
16
+ model = model.to(torch.bfloat16)
17
+ model = model.to(device)
18
 
19
  def run(message, chat_history, max_new_tokens=4056, temperature=3.5, top_p=0.9, top_k=800):
20
  prompt = get_prompt(message, chat_history)