File size: 683 Bytes
ea0af80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# Load the pre-trained model
MODEL_NAME = "bigcode/starcoder"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, device_map="auto")

def generate_code(prompt: str, max_tokens: int = 256):
    """Generates code based on the input prompt."""
    if not prompt.strip():
        return "Error: Empty prompt provided."

    inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
    output = model.generate(**inputs, max_new_tokens=max_tokens)
    return tokenizer.decode(output[0], skip_special_tokens=True)