Spaces:
Sleeping
Sleeping
Update myagent.py
Browse files- myagent.py +12 -14
myagent.py
CHANGED
@@ -11,9 +11,18 @@ import torch
|
|
11 |
|
12 |
# Basic model wrapper for local inference with debug info
|
13 |
class BasicAgent:
|
14 |
-
def __init__(self
|
15 |
-
|
16 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
print(f"Model device: {self.device}")
|
18 |
|
19 |
def _extract_prompt(self, prompt):
|
@@ -63,17 +72,6 @@ class BasicAgent:
|
|
63 |
def __call__(self, prompt, max_new_tokens=512):
|
64 |
return self.generate(prompt, max_new_tokens)
|
65 |
|
66 |
-
# Load your model and tokenizer
|
67 |
-
def load_model(model_id="LiquidAI/LFM2-1.2B"):
|
68 |
-
print(f"Loading model: {model_id}")
|
69 |
-
model = AutoModelForCausalLM.from_pretrained(
|
70 |
-
model_id,
|
71 |
-
device_map="auto",
|
72 |
-
torch_dtype=torch.bfloat16,
|
73 |
-
trust_remote_code=True,
|
74 |
-
)
|
75 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
76 |
-
return BasicAgent(model, tokenizer)
|
77 |
|
78 |
# Run minimal test
|
79 |
if __name__ == "__main__":
|
|
|
11 |
|
12 |
# Basic model wrapper for local inference with debug info
|
13 |
class BasicAgent:
|
14 |
+
def __init__(self):
|
15 |
+
print("[INFO] Loading default model...")
|
16 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
17 |
+
"LiquidAI/LFM2-1.2B",
|
18 |
+
device_map="auto",
|
19 |
+
torch_dtype=torch.bfloat16,
|
20 |
+
trust_remote_code=True,
|
21 |
+
)
|
22 |
+
|
23 |
+
model_id="LiquidAI/LFM2-1.2B"
|
24 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_id)
|
25 |
+
self.device = self.model.device if hasattr(self.model, 'device') else 'cpu'
|
26 |
print(f"Model device: {self.device}")
|
27 |
|
28 |
def _extract_prompt(self, prompt):
|
|
|
72 |
def __call__(self, prompt, max_new_tokens=512):
|
73 |
return self.generate(prompt, max_new_tokens)
|
74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
# Run minimal test
|
77 |
if __name__ == "__main__":
|