File size: 5,975 Bytes
674c962
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
# Zero Redundancy Optimizer (ZeRO)
def estimate_zero1_model_states_mem_needs(total_params,
                                          num_gpus_per_node=1,
                                          num_nodes=1,
                                          cpu_offload=True,
                                          additional_buffer_factor=1.5,
                                          precision_fac = 2, # half precision
                                          params_fac = 4 # 4 bytes per float32 model parameter type
                                          ):
   
    # TODO: check if params_fac is needed during full fp32 training.
    # Normally, mixed precision training results in 1.5x memory compared to FP32.
    # Currently, we are assuming 2x memory for FP32, as deepspeed's ZeRO-2 is optimized for FP16 training.
    
    total_gpus = num_nodes * num_gpus_per_node

    master_params_fac = 4
    variance_fac = 4
    momentum_fac = 4
    grads_fac = 4
    optimizer_fac = variance_fac + momentum_fac # Adam optimizer

    total_gpus = num_nodes * num_gpus_per_node

    if cpu_offload:
        gpu_mem = (precision_fac * total_params) + (precision_fac * total_params)
        cpu_mem = total_params * max(params_fac * total_gpus, (master_params_fac+optimizer_fac+grads_fac)) * additional_buffer_factor
    else:
        gpu_mem = (precision_fac * total_params) + (precision_fac * total_params) + int((precision_fac + optimizer_fac + master_params_fac + precision_fac) * total_params / total_gpus)
        cpu_mem = total_params * params_fac * num_gpus_per_node * additional_buffer_factor

    return int(cpu_mem), int(gpu_mem)

def estimate_zero2_model_states_mem_needs(total_params,
                                          num_gpus_per_node=1,
                                          num_nodes=1,
                                          cpu_offload=True,
                                          additional_buffer_factor=1.5,
                                          precision_fac = 2, # half precision
                                          params_fac = 4 # 4 bytes per float32 model parameter type
                                          ):
   
    # TODO: check if params_fac is needed during full fp32 training.
    # Normally, mixed precision training results in 1.5x memory compared to FP32.
    # Currently, we are assuming 2x memory for FP32, as deepspeed's ZeRO-2 is optimized for FP16 training.
    
    total_gpus = num_nodes * num_gpus_per_node

    master_params_fac = 4
    variance_fac = 4
    momentum_fac = 4
    grads_fac = 4
    optimizer_fac = variance_fac + momentum_fac # Adam optimizer

    total_gpus = num_nodes * num_gpus_per_node

    if cpu_offload:
        gpu_mem = precision_fac * total_params
        cpu_mem = total_params * max(params_fac * total_gpus, (master_params_fac+optimizer_fac+grads_fac)) * additional_buffer_factor
    else:
        gpu_mem = precision_fac * total_params + int((precision_fac + grads_fac + optimizer_fac + master_params_fac + precision_fac) * total_params / total_gpus)
        cpu_mem = total_params * params_fac * num_gpus_per_node * additional_buffer_factor

    return int(cpu_mem), int(gpu_mem)


def estimate_zero3_model_states_mem_needs(total_params,
                                          largest_layer_params,
                                          num_gpus_per_node=1,
                                          num_nodes=1,
                                          cpu_offload=True,
                                          cpu_offload_params=True,
                                          zero_init=True,
                                          additional_buffer_factor=1.5,
                                          precision_fac = 2, # half precision
                                          params_fac = 4 # 4 bytes per float32 model parameter type
                                          ):

    # TODO: check if params_fac is needed during full fp32 training.
    # Normally, mixed precision training results in 1.5x memory compared to FP32.
    # Currently, we are assuming 2x memory for FP32, as deepspeed's ZeRO-2 is optimized for FP16 training.
        
    total_gpus = num_nodes * num_gpus_per_node
    gpus_factor = 1 / num_nodes
    master_params_fac = 4
    variance_fac = 4
    momentum_fac = 4
    grads_fac = 4
    optimizer_fac = variance_fac + momentum_fac # Adam optimizer

    largest_layer_memory = (2 * precision_fac) * largest_layer_params # params + grads = (2 * modifier)

    if cpu_offload:
        if cpu_offload_params:
            gpu_mem = largest_layer_memory

            if zero_init:
                cpu_mem = total_params * (master_params_fac + grads_fac + optimizer_fac + params_fac) * gpus_factor * additional_buffer_factor
            else:

                cpu_mem = total_params * max(params_fac * num_gpus_per_node, (master_params_fac + grads_fac + optimizer_fac + params_fac) * gpus_factor) * additional_buffer_factor
        else:
            gpu_mem = largest_layer_memory + int(precision_fac * total_params / total_gpus)

            if zero_init:
                cpu_mem = total_params * (master_params_fac + grads_fac + optimizer_fac) * gpus_factor * additional_buffer_factor 
            else:
                cpu_mem = total_params * max(params_fac * num_gpus_per_node, (master_params_fac + grads_fac + optimizer_fac) * gpus_factor) * additional_buffer_factor
    else:
        gpu_mem = largest_layer_memory + int((master_params_fac + grads_fac + optimizer_fac + precision_fac) * total_params / total_gpus)
        # 2b for fp16 params, 4b master params, 4b grads, 4b momentum and 4b variance per parameter = 18

        if zero_init:
            cpu_mem = largest_layer_params * params_fac * num_gpus_per_node * additional_buffer_factor
        else:
            cpu_mem = total_params * params_fac * num_gpus_per_node * additional_buffer_factor

    return int(cpu_mem), int(gpu_mem), largest_layer_memory