import os from transformers import AutoModelForCausalLM, AutoTokenizer import torch # Correct model name MODEL_NAME = "bigcode/starcoder" # Ensure the token is provided HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN") if not HF_TOKEN: raise ValueError("Missing Hugging Face token. Set HUGGINGFACE_TOKEN as an environment variable.") # Load tokenizer and model with authentication tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN) model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, token=HF_TOKEN, device_map="auto") def generate_code(prompt: str, max_tokens: int = 256): """Generates code based on the input prompt.""" if not prompt.strip(): return "Error: Empty prompt provided." device = "cuda" if torch.cuda.is_available() else "cpu" inputs = tokenizer(prompt, return_tensors="pt").to(device) output = model.generate(**inputs, max_new_tokens=max_tokens) return tokenizer.decode(output[0], skip_special_tokens=True)