Update rp.py
Browse files
rp.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import torch
|
2 |
from safetensors.torch import load_file, save_file
|
3 |
|
4 |
-
def reduce_key_size(input_file, output_file, reduction_factor=0.
|
5 |
# Load the model
|
6 |
model_data = load_file(input_file)
|
7 |
|
@@ -12,18 +12,15 @@ def reduce_key_size(input_file, output_file, reduction_factor=0.50):
|
|
12 |
# Calculate the new size
|
13 |
new_size = int(original_tensor.size(0) * (1 - reduction_factor))
|
14 |
|
15 |
-
# Resize the tensor (
|
16 |
-
if new_size > 0:
|
17 |
reduced_tensor = original_tensor[:new_size]
|
18 |
-
|
19 |
-
# Convert to FP16 precision (half-precision floating point)
|
20 |
-
fp16_tensor = reduced_tensor.to(torch.float16)
|
21 |
-
model_data[key] = fp16_tensor
|
22 |
|
23 |
# Save the modified model
|
24 |
save_file(model_data, output_file)
|
25 |
|
26 |
# Usage example
|
27 |
-
input_file = '
|
28 |
-
output_file = '
|
29 |
reduce_key_size(input_file, output_file)
|
|
|
1 |
import torch
|
2 |
from safetensors.torch import load_file, save_file
|
3 |
|
4 |
+
def reduce_key_size(input_file, output_file, reduction_factor=0.45):
|
5 |
# Load the model
|
6 |
model_data = load_file(input_file)
|
7 |
|
|
|
12 |
# Calculate the new size
|
13 |
new_size = int(original_tensor.size(0) * (1 - reduction_factor))
|
14 |
|
15 |
+
# Resize the tensor (this could vary depending on your requirements)
|
16 |
+
if new_size > 0: # Ensure new size is positive
|
17 |
reduced_tensor = original_tensor[:new_size]
|
18 |
+
model_data[key] = reduced_tensor
|
|
|
|
|
|
|
19 |
|
20 |
# Save the modified model
|
21 |
save_file(model_data, output_file)
|
22 |
|
23 |
# Usage example
|
24 |
+
input_file = 'merged_model_08.safetensors' # Replace with your input model file
|
25 |
+
output_file = 'merged_model_06.safetensors' # Desired output file name
|
26 |
reduce_key_size(input_file, output_file)
|