Create inference.py
Browse files- inference.py +81 -0
inference.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import AutoModelForCausalLM, PreTrainedTokenizerFast
|
3 |
+
|
4 |
+
# Paths to your fine-tuned model and tokenizer (update these)
|
5 |
+
MODEL_DIR = "./mixtral_finetuned" # Directory from your training script
|
6 |
+
TOKENIZER_JSON = "./mixtral_finetuned/tokenizer.json" # Custom tokenizer file
|
7 |
+
|
8 |
+
# Device setup
|
9 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
+
print(f"Using device: {device}")
|
11 |
+
|
12 |
+
class Charm15Inference:
|
13 |
+
def __init__(self, model_dir=MODEL_DIR, tokenizer_json=TOKENIZER_JSON):
|
14 |
+
"""Initialize model and tokenizer for inference."""
|
15 |
+
try:
|
16 |
+
# Load tokenizer from JSON (assumes your custom BPE or fine-tuned output)
|
17 |
+
self.tokenizer = PreTrainedTokenizerFast(tokenizer_file=tokenizer_json)
|
18 |
+
if self.tokenizer.pad_token is None:
|
19 |
+
self.tokenizer.pad_token = self.tokenizer.eos_token
|
20 |
+
|
21 |
+
# Load model with optimizations
|
22 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
23 |
+
model_dir,
|
24 |
+
torch_dtype=torch.bfloat16, # Match your training dtype
|
25 |
+
device_map="auto", # Auto-distribute across GPU/CPU
|
26 |
+
low_cpu_mem_usage=True # Reduce RAM usage
|
27 |
+
).to(device)
|
28 |
+
print(f"Loaded model from {model_dir} and tokenizer from {tokenizer_json}")
|
29 |
+
except Exception as e:
|
30 |
+
print(f"Error loading model/tokenizer: {e}")
|
31 |
+
raise
|
32 |
+
|
33 |
+
def generate_response(self, prompt, max_length=2048, temperature=0.7, top_k=50, top_p=0.95):
|
34 |
+
"""Generate a response from the model."""
|
35 |
+
try:
|
36 |
+
# Tokenize input
|
37 |
+
inputs = self.tokenizer(prompt, return_tensors="pt").to(device)
|
38 |
+
|
39 |
+
# Generate output with your earlier generation config in mind
|
40 |
+
output = self.model.generate(
|
41 |
+
**inputs,
|
42 |
+
max_length=max_length, # Aligned with your 2048/4096 configs
|
43 |
+
temperature=temperature,
|
44 |
+
top_k=top_k,
|
45 |
+
top_p=top_p,
|
46 |
+
do_sample=True, # Sampling for variety
|
47 |
+
repetition_penalty=1.1, # From your generation config
|
48 |
+
no_repeat_ngram_size=2, # Prevent repetition
|
49 |
+
use_cache=True # Speed up inference
|
50 |
+
)
|
51 |
+
return self.tokenizer.decode(output[0], skip_special_tokens=True)
|
52 |
+
except Exception as e:
|
53 |
+
print(f"Generation error: {e}")
|
54 |
+
return "Sorry, I couldn’t generate a response."
|
55 |
+
|
56 |
+
if __name__ == "__main__":
|
57 |
+
# Initialize inference class
|
58 |
+
try:
|
59 |
+
infer = Charm15Inference()
|
60 |
+
except Exception as e:
|
61 |
+
print(f"Initialization failed: {e}")
|
62 |
+
exit(1)
|
63 |
+
|
64 |
+
# Interactive loop
|
65 |
+
print("Chat with Charm 15 (type 'exit' or 'quit' to stop):")
|
66 |
+
while True:
|
67 |
+
user_input = input("User: ")
|
68 |
+
if user_input.lower() in ["exit", "quit"]:
|
69 |
+
print("Goodbye!")
|
70 |
+
break
|
71 |
+
if not user_input.strip():
|
72 |
+
print("Charm 15: Please say something!")
|
73 |
+
continue
|
74 |
+
|
75 |
+
response = infer.generate_response(user_input)
|
76 |
+
print("Charm 15:", response)
|
77 |
+
|
78 |
+
# Cleanup
|
79 |
+
del infer.model
|
80 |
+
torch.cuda.empty_cache()
|
81 |
+
print("Memory cleared.")
|