Spaces:
Sleeping
Sleeping
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig | |
from peft import PeftModel | |
class DeepSeekLoraCPUInference: | |
def __init__(self, base_model="deepseek-ai/deepseek-r1", fine_tuned_model="./deepseek_lora_finetuned"): | |
self.tokenizer = AutoTokenizer.from_pretrained(fine_tuned_model) | |
# Load model in 4-bit on CPU (if no GPU is available) | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
quant_config = BitsAndBytesConfig( | |
load_in_4bit=True if device == "cuda" else False, # Use 4-bit only if GPU is available | |
bnb_4bit_compute_dtype=torch.bfloat16, | |
bnb_4bit_quant_type="nf4", | |
bnb_4bit_use_double_quant=True | |
) | |
self.model = AutoModelForCausalLM.from_pretrained( | |
base_model, | |
quantization_config=quant_config if device == "cuda" else None, | |
device_map=device | |
) | |
# Load fine-tuned LoRA model | |
self.model = PeftModel.from_pretrained(self.model, fine_tuned_model) | |
self.model.to(device) | |
self.model.eval() | |
def generate_text(self, prompt, max_length=200): | |
"""Generates text efficiently using CPU or GPU.""" | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
inputs = self.tokenizer(prompt, return_tensors="pt").to(device) | |
with torch.no_grad(): | |
output = self.model.generate( | |
**inputs, | |
max_length=max_length, | |
temperature=0.7, | |
top_p=0.9, | |
repetition_penalty=1.1 | |
) | |
return self.tokenizer.decode(output[0], skip_special_tokens=True) | |
if __name__ == "__main__": | |
model = DeepSeekLoraCPUInference() | |
prompt = "The implications of AI in the next decade are" | |
generated_text = model.generate_text(prompt) | |
print("\nGenerated Text:\n", generated_text) |