File size: 2,590 Bytes
1cf2bdf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
988fa7f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1cf2bdf
988fa7f
1cf2bdf
 
 
 
 
 
 
 
 
988fa7f
1cf2bdf
 
988fa7f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
from peft import PeftModel
import torch

ADAPTER_PATH = "adapter"
BASE_MODEL = "Qwen/Qwen2-0.5B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    BASE_MODEL,
    device_map="auto",
    trust_remote_code=True,
    torch_dtype=torch.float16
)
model = PeftModel.from_pretrained(model, ADAPTER_PATH)
model.eval()

streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)

def generate_response(prompt: str, conversation_history: list = None) -> str:
    """
    Generate response with optional conversation history
    
    Args:
        prompt: Current user message
        conversation_history: List of {"role": "user/assistant", "content": "..."}
    """
    
    # Build conversation format
    formatted = "<|im_start|>system\nYou are a helpful AI assistant.<|im_end|>\n"
    
    # Add conversation history if provided
    if conversation_history:
        for msg in conversation_history:
            role = msg.get("role", "")
            content = msg.get("content", "")
            
            if role == "user":
                formatted += f"<|im_start|>user\n{content}<|im_end|>\n"
            elif role == "assistant":
                formatted += f"<|im_start|>assistant\n{content}<|im_end|>\n"
    
    # Add current prompt
    formatted += f"<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n"
    
    inputs = tokenizer(formatted, return_tensors="pt").to(model.device)
    
    with torch.no_grad():
        output = model.generate(
            **inputs,
            max_new_tokens=512,
            temperature=0.7,
            top_p=0.9,
            do_sample=True,
            pad_token_id=tokenizer.eos_token_id
        )
    
    decoded = tokenizer.decode(output[0], skip_special_tokens=True)
    answer = decoded.split("<|im_start|>assistant\n")[-1].strip()
    
    # Clean up any end tokens
    if "<|im_end|>" in answer:
        answer = answer.split("<|im_end|>")[0].strip()
    
    return answer

# Example usage with conversation history
if __name__ == "__main__":
    # Test with conversation history
    history = [
        {"role": "user", "content": "What is Python?"},
        {"role": "assistant", "content": "Python is a high-level programming language..."},
    ]
    
    # This should now consider the conversation context
    response = generate_response("Can you show me a simple example?", conversation_history=history)
    print("Response:", response)