rodrisouza commited on
Commit
651cba0
·
verified ·
1 Parent(s): f9160fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -4
app.py CHANGED
@@ -48,10 +48,18 @@ def load_model(model_name):
48
  del model
49
  torch.cuda.empty_cache()
50
 
 
 
 
 
 
 
 
 
51
  tokenizer = AutoTokenizer.from_pretrained(
52
- models[model_name],
53
  padding_side='left',
54
- token=hugging_face_token,
55
  trust_remote_code=True
56
  )
57
 
@@ -61,8 +69,8 @@ def load_model(model_name):
61
  tokenizer.add_special_tokens({'pad_token': tokenizer.eos_token})
62
 
63
  model = AutoModelForCausalLM.from_pretrained(
64
- models[model_name],
65
- token=hugging_face_token,
66
  trust_remote_code=True
67
  )
68
 
@@ -76,6 +84,7 @@ def load_model(model_name):
76
  raise e
77
  return tokenizer, model
78
 
 
79
  # Ensure the initial model is loaded
80
  tokenizer, model = load_model(selected_model)
81
 
 
48
  del model
49
  torch.cuda.empty_cache()
50
 
51
+ # Check if the model is in models or quantized_models and load accordingly
52
+ if model_name in models:
53
+ model_path = models[model_name]
54
+ elif model_name in quantized_models:
55
+ model_path = quantized_models[model_name]
56
+ else:
57
+ raise ValueError(f"Model {model_name} not found in either models or quantized_models.")
58
+
59
  tokenizer = AutoTokenizer.from_pretrained(
60
+ model_path,
61
  padding_side='left',
62
+ use_auth_token=hugging_face_token,
63
  trust_remote_code=True
64
  )
65
 
 
69
  tokenizer.add_special_tokens({'pad_token': tokenizer.eos_token})
70
 
71
  model = AutoModelForCausalLM.from_pretrained(
72
+ model_path,
73
+ use_auth_token=hugging_face_token,
74
  trust_remote_code=True
75
  )
76
 
 
84
  raise e
85
  return tokenizer, model
86
 
87
+
88
  # Ensure the initial model is loaded
89
  tokenizer, model = load_model(selected_model)
90