schuler commited on
Commit
c97bd5e
·
verified ·
1 Parent(s): ef24b05

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -3,6 +3,7 @@
3
 
4
  import os
5
  from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig, pipeline
 
6
  import streamlit as st
7
  import torch
8
 
@@ -17,7 +18,8 @@ st.title("Experimental KPhi3 Model - Currently in Training")
17
  # Load tokenizer and model
18
  @st.cache_resource(show_spinner="Loading model...")
19
  def load_model(repo_name):
20
- tokenizer = AutoTokenizer.from_pretrained(repo_name, trust_remote_code=True)
 
21
  generator_conf = GenerationConfig.from_pretrained(repo_name)
22
  model = AutoModelForCausalLM.from_pretrained(repo_name, trust_remote_code=True, torch_dtype=torch.bfloat16)
23
  return tokenizer, generator_conf, model
 
3
 
4
  import os
5
  from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig, pipeline
6
+ from transformers import LlamaTokenizer
7
  import streamlit as st
8
  import torch
9
 
 
18
  # Load tokenizer and model
19
  @st.cache_resource(show_spinner="Loading model...")
20
  def load_model(repo_name):
21
+ # tokenizer = AutoTokenizer.from_pretrained(repo_name, trust_remote_code=True)
22
+ tokenizer = LlamaTokenizer.from_pretrained(repo_name, trust_remote_code=True)
23
  generator_conf = GenerationConfig.from_pretrained(repo_name)
24
  model = AutoModelForCausalLM.from_pretrained(repo_name, trust_remote_code=True, torch_dtype=torch.bfloat16)
25
  return tokenizer, generator_conf, model