arya-ai-model commited on
Commit
ba41c7f
·
1 Parent(s): 214e325

updated model.py

Browse files
Files changed (1) hide show
  1. model.py +8 -2
model.py CHANGED
@@ -8,7 +8,13 @@ HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
8
  # Force CPU mode
9
  device = "cpu" # Change this from "cuda"
10
 
 
11
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)
 
 
 
 
 
12
  model = AutoModelForCausalLM.from_pretrained(
13
  MODEL_NAME,
14
  token=HF_TOKEN,
@@ -18,6 +24,6 @@ model = AutoModelForCausalLM.from_pretrained(
18
  ).to(device)
19
 
20
  def generate_code(prompt: str, max_tokens: int = 256):
21
- inputs = tokenizer(prompt, return_tensors="pt").to(device)
22
- output = model.generate(**inputs, max_new_tokens=max_tokens)
23
  return tokenizer.decode(output[0], skip_special_tokens=True)
 
8
  # Force CPU mode
9
  device = "cpu" # Change this from "cuda"
10
 
11
+ # Load tokenizer and model
12
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)
13
+
14
+ # Ensure the tokenizer has a pad token set
15
+ if tokenizer.pad_token is None:
16
+ tokenizer.pad_token = tokenizer.eos_token # Set pad_token to eos_token
17
+
18
  model = AutoModelForCausalLM.from_pretrained(
19
  MODEL_NAME,
20
  token=HF_TOKEN,
 
24
  ).to(device)
25
 
26
  def generate_code(prompt: str, max_tokens: int = 256):
27
+ inputs = tokenizer(prompt, return_tensors="pt", padding=True).to(device) # Enable padding
28
+ output = model.generate(**inputs, max_new_tokens=max_tokens, pad_token_id=tokenizer.pad_token_id) # Explicit pad_token_id
29
  return tokenizer.decode(output[0], skip_special_tokens=True)