import os from transformers import AutoModelForCausalLM, AutoTokenizer import torch MODEL_NAME = "bigcode/starcoderbase-1b" HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN") # Force CPU mode device = "cpu" # Change this from "cuda" tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN) model = AutoModelForCausalLM.from_pretrained( MODEL_NAME, token=HF_TOKEN, torch_dtype=torch.float16, # Keep memory low device_map="auto", # Still allows auto placement trust_remote_code=True ).to(device) def generate_code(prompt: str, max_tokens: int = 256): inputs = tokenizer(prompt, return_tensors="pt").to(device) output = model.generate(**inputs, max_new_tokens=max_tokens) return tokenizer.decode(output[0], skip_special_tokens=True)