Wendy
commited on
Upload infer_gpu.py with huggingface_hub
Browse files- infer_gpu.py +39 -0
infer_gpu.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import time
|
3 |
+
import os
|
4 |
+
import argparse
|
5 |
+
import shutil
|
6 |
+
import sys
|
7 |
+
|
8 |
+
def parse_args():
|
9 |
+
parser = argparse.ArgumentParser(description='Matrix multiplication')
|
10 |
+
parser.add_argument('--gpus', help='gpu amount', required=True, type=int)
|
11 |
+
parser.add_argument('--size', help='matrix size', required=True, type=int)
|
12 |
+
parser.add_argument('--interval', help='sleep interval', required=True, type=float)
|
13 |
+
args = parser.parse_args()
|
14 |
+
return args
|
15 |
+
|
16 |
+
|
17 |
+
def matrix_multiplication(args):
|
18 |
+
|
19 |
+
a_list, b_list, result = [], [], []
|
20 |
+
size = (args.size, args.size)
|
21 |
+
|
22 |
+
# for i in range(args.gpus):
|
23 |
+
|
24 |
+
i = args.gpus
|
25 |
+
a_list.append(torch.rand(size, device=i))
|
26 |
+
b_list.append(torch.rand(size, device=i))
|
27 |
+
result.append(torch.rand(size, device=i))
|
28 |
+
|
29 |
+
while True:
|
30 |
+
# for i in range(args.gpus):
|
31 |
+
|
32 |
+
result[0] = a_list[0] * b_list[0]
|
33 |
+
time.sleep(args.interval)
|
34 |
+
|
35 |
+
if __name__ == "__main__":
|
36 |
+
# usage: python matrix_multiplication_gpus.py --size 20000 --gpus 2 --interval 0.01
|
37 |
+
args = parse_args()
|
38 |
+
matrix_multiplication(args)
|
39 |
+
|