aigen / app /inference.py
Ais
Update app/inference.py
988fa7f verified
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
from peft import PeftModel
import torch
ADAPTER_PATH = "adapter"
BASE_MODEL = "Qwen/Qwen2-0.5B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
BASE_MODEL,
device_map="auto",
trust_remote_code=True,
torch_dtype=torch.float16
)
model = PeftModel.from_pretrained(model, ADAPTER_PATH)
model.eval()
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
def generate_response(prompt: str, conversation_history: list = None) -> str:
"""
Generate response with optional conversation history
Args:
prompt: Current user message
conversation_history: List of {"role": "user/assistant", "content": "..."}
"""
# Build conversation format
formatted = "<|im_start|>system\nYou are a helpful AI assistant.<|im_end|>\n"
# Add conversation history if provided
if conversation_history:
for msg in conversation_history:
role = msg.get("role", "")
content = msg.get("content", "")
if role == "user":
formatted += f"<|im_start|>user\n{content}<|im_end|>\n"
elif role == "assistant":
formatted += f"<|im_start|>assistant\n{content}<|im_end|>\n"
# Add current prompt
formatted += f"<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n"
inputs = tokenizer(formatted, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.7,
top_p=0.9,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
decoded = tokenizer.decode(output[0], skip_special_tokens=True)
answer = decoded.split("<|im_start|>assistant\n")[-1].strip()
# Clean up any end tokens
if "<|im_end|>" in answer:
answer = answer.split("<|im_end|>")[0].strip()
return answer
# Example usage with conversation history
if __name__ == "__main__":
# Test with conversation history
history = [
{"role": "user", "content": "What is Python?"},
{"role": "assistant", "content": "Python is a high-level programming language..."},
]
# This should now consider the conversation context
response = generate_response("Can you show me a simple example?", conversation_history=history)
print("Response:", response)