Commit
·
e795394
1
Parent(s):
568d135
added inference code to README
Browse files
README.md
CHANGED
@@ -53,6 +53,156 @@ Here's the training loss progression:
|
|
53 |
|
54 |

|
55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
### 📝 Example Outputs
|
58 |
Below are generated examples illustrating Argonne-1.0's style and capability when prompted:
|
|
|
53 |
|
54 |

|
55 |
|
56 |
+
### Inference
|
57 |
+
|
58 |
+
```pyrhon
|
59 |
+
from huggingface_hub import snapshot_download
|
60 |
+
snapshot_download(repo_id="PursuitOfDataScience/Argonne-1.0")
|
61 |
+
```
|
62 |
+
|
63 |
+
Set up `minimal_chat.py` as follows:
|
64 |
+
|
65 |
+
```python
|
66 |
+
import os
|
67 |
+
import sys
|
68 |
+
import torch
|
69 |
+
import json
|
70 |
+
import argparse
|
71 |
+
import time
|
72 |
+
from transformers import AutoTokenizer
|
73 |
+
|
74 |
+
def main():
|
75 |
+
parser = argparse.ArgumentParser(description="Minimal Argonne chat")
|
76 |
+
parser.add_argument("--model_dir", required=True, help="Directory containing model files")
|
77 |
+
parser.add_argument("--mp_dir", required=True, help="Directory containing mp_pretrain.py")
|
78 |
+
args = parser.parse_args()
|
79 |
+
|
80 |
+
# Print all input arguments
|
81 |
+
print(f"Model directory: {args.model_dir}")
|
82 |
+
print(f"mp_pretrain directory: {args.mp_dir}")
|
83 |
+
|
84 |
+
# Check that directories exist
|
85 |
+
if not os.path.exists(args.model_dir):
|
86 |
+
print(f"Error: Model directory {args.model_dir} does not exist")
|
87 |
+
sys.exit(1)
|
88 |
+
if not os.path.exists(args.mp_dir):
|
89 |
+
print(f"Error: mp_pretrain directory {args.mp_dir} does not exist")
|
90 |
+
sys.exit(1)
|
91 |
+
|
92 |
+
# Check for required files
|
93 |
+
required_files = [
|
94 |
+
os.path.join(args.model_dir, "config.json"),
|
95 |
+
os.path.join(args.model_dir, "tokenizer.json")
|
96 |
+
]
|
97 |
+
|
98 |
+
for file_path in required_files:
|
99 |
+
if not os.path.exists(file_path):
|
100 |
+
print(f"Error: Required file {file_path} does not exist")
|
101 |
+
sys.exit(1)
|
102 |
+
|
103 |
+
# Check for either pytorch_model.bin or model.safetensors
|
104 |
+
weights_file = None
|
105 |
+
if os.path.exists(os.path.join(args.model_dir, "pytorch_model.bin")):
|
106 |
+
weights_file = os.path.join(args.model_dir, "pytorch_model.bin")
|
107 |
+
print(f"Found PyTorch weights at {weights_file}")
|
108 |
+
elif os.path.exists(os.path.join(args.model_dir, "model.safetensors")):
|
109 |
+
weights_file = os.path.join(args.model_dir, "model.safetensors")
|
110 |
+
print(f"Found safetensors weights at {weights_file}")
|
111 |
+
else:
|
112 |
+
print(f"Error: No model weights found in {args.model_dir}")
|
113 |
+
sys.exit(1)
|
114 |
+
|
115 |
+
# Add mp_pretrain directory to Python path
|
116 |
+
sys.path.insert(0, args.mp_dir)
|
117 |
+
|
118 |
+
# Import required modules
|
119 |
+
try:
|
120 |
+
print("Importing modules from mp_pretrain...")
|
121 |
+
from mp_pretrain import ArgonneModelParallel, ArgonneConfig, load_bpe_tokenizer
|
122 |
+
print("Import successful")
|
123 |
+
except ImportError as e:
|
124 |
+
print(f"Error importing modules from mp_pretrain.py: {e}")
|
125 |
+
sys.exit(1)
|
126 |
+
|
127 |
+
# Load the config
|
128 |
+
print("Loading model config...")
|
129 |
+
with open(os.path.join(args.model_dir, "config.json"), 'r') as f:
|
130 |
+
config_dict = json.load(f)
|
131 |
+
config = ArgonneConfig(**config_dict)
|
132 |
+
print("Config loaded")
|
133 |
+
|
134 |
+
# Load the tokenizer
|
135 |
+
print("Loading tokenizer...")
|
136 |
+
tokenizer = AutoTokenizer.from_pretrained(args.model_dir)
|
137 |
+
print("Tokenizer loaded")
|
138 |
+
|
139 |
+
# Create the model
|
140 |
+
print("Creating model...")
|
141 |
+
model = ArgonneModelParallel(config)
|
142 |
+
print("Model created")
|
143 |
+
|
144 |
+
# Load weights
|
145 |
+
print(f"Loading weights from {weights_file}...")
|
146 |
+
if weights_file.endswith(".bin"):
|
147 |
+
# Load PyTorch weights
|
148 |
+
state_dict = torch.load(weights_file, map_location="cpu")
|
149 |
+
else:
|
150 |
+
# Load safetensors weights
|
151 |
+
from safetensors.torch import load_file
|
152 |
+
state_dict = load_file(weights_file)
|
153 |
+
|
154 |
+
# Load state dict
|
155 |
+
print("Applying weights to model...")
|
156 |
+
model.load_state_dict(state_dict, strict=False)
|
157 |
+
print("Weights loaded")
|
158 |
+
|
159 |
+
# Move to GPU if available
|
160 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
161 |
+
print(f"Moving model to {device}...")
|
162 |
+
model = model.to(device)
|
163 |
+
|
164 |
+
# Set devices attribute needed for generate
|
165 |
+
model.devices = [device]
|
166 |
+
|
167 |
+
print("Model ready for chat!")
|
168 |
+
|
169 |
+
# Simple chat loop
|
170 |
+
print("\n" + "="*50)
|
171 |
+
print("Argonne Model Chat - Type 'exit' to quit")
|
172 |
+
print("="*50 + "\n")
|
173 |
+
|
174 |
+
while True:
|
175 |
+
user_input = input("You: ").strip()
|
176 |
+
if user_input.lower() in ["exit", "quit"]:
|
177 |
+
print("Goodbye!")
|
178 |
+
break
|
179 |
+
|
180 |
+
# Generate response
|
181 |
+
|
182 |
+
# Encode input
|
183 |
+
input_ids = tokenizer.encode(user_input, return_tensors="pt").to(device)
|
184 |
+
|
185 |
+
# Generate
|
186 |
+
with torch.no_grad():
|
187 |
+
output_ids = model.generate(
|
188 |
+
input_ids,
|
189 |
+
max_new_tokens=50,
|
190 |
+
temperature=0.7,
|
191 |
+
top_k=50)[0]
|
192 |
+
|
193 |
+
# Decode output
|
194 |
+
response = tokenizer.decode(output_ids, skip_special_tokens=True)
|
195 |
+
print(f"Model: {response}")
|
196 |
+
|
197 |
+
|
198 |
+
if __name__ == "__main__":
|
199 |
+
main()
|
200 |
+
|
201 |
+
```
|
202 |
+
|
203 |
+
```
|
204 |
+
python minimal_chat.py --model_dir /path/to/model --mp_dir /path/to/mp_pretrain.py
|
205 |
+
```
|
206 |
|
207 |
### 📝 Example Outputs
|
208 |
Below are generated examples illustrating Argonne-1.0's style and capability when prompted:
|