from transformers import AutoTokenizer, AutoModelForCausalLM import torch # Path to the locally saved quantized model directory model_path = '/path/to/your/quantized_model_directory' # Load tokenizer tokenizer = AutoTokenizer.from_pretrained(model_name) # Load quantized model quantized_model = AutoModelForCausalLM.from_pretrained(model_path) # Check if a GPU is available and move model to GPU if available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") quantized_model.to(device) # Example text input text_input = "How did Tesla perform in Q1 2024?" # Tokenize input inputs = tokenizer(text_input, return_tensors="pt").to(device) # Generate response outputs = quantized_model.generate(**inputs, max_length=150, do_sample=False) # Decode generated tokens to readable string response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Print generated response print(f"Generated response: {response}")