from transformers import AutoModelForCausalLM, AutoTokenizer import torch # Load the pre-trained model MODEL_NAME = "bigcode/starcoder" tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, device_map="auto") def generate_code(prompt: str, max_tokens: int = 256): """Generates code based on the input prompt.""" if not prompt.strip(): return "Error: Empty prompt provided." inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu") output = model.generate(**inputs, max_new_tokens=max_tokens) return tokenizer.decode(output[0], skip_special_tokens=True)