bwilkie commited on
Commit
3d16b77
·
verified ·
1 Parent(s): 84b2d97

Update myagent.py

Browse files
Files changed (1) hide show
  1. myagent.py +8 -6
myagent.py CHANGED
@@ -63,12 +63,14 @@ class LocalLlamaModel:
63
  def generate(self, prompt: str, max_new_tokens=512, **kwargs):
64
  try:
65
  # Generate answer using the provided prompt - following the recommended pattern
66
- input_ids = self.tokenizer.apply_chat_template(
67
- [{"role": "user", "content": str(prompt)}],
68
- add_generation_prompt=True,
69
- return_tensors="pt",
70
- tokenize=True,
71
- ).to(self.model.device)
 
 
72
 
73
  # Generate output - exactly as in recommended code
74
  output = self.model.generate(
 
63
  def generate(self, prompt: str, max_new_tokens=512, **kwargs):
64
  try:
65
  # Generate answer using the provided prompt - following the recommended pattern
66
+ # input_ids = self.tokenizer.apply_chat_template(
67
+ # [{"role": "user", "content": str(prompt)}],
68
+ # add_generation_prompt=True,
69
+ # return_tensors="pt",
70
+ # tokenize=True,
71
+ # ).to(self.model.device)
72
+
73
+ inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
74
 
75
  # Generate output - exactly as in recommended code
76
  output = self.model.generate(