python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
###############################################################################
# Code from
# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py
# Modified the original code so that it also loads images from the current
# directory as well as the subdirectories
###############################################################################
import torch.utils.data as data
from PIL import Image
import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG', '.pgm', '.PGM',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff',
'.txt', '.json'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images
def make_grouped_dataset(dir):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
fnames = sorted(os.walk(dir))
for fname in sorted(fnames):
paths = []
root = fname[0]
for f in sorted(fname[2]):
if is_image_file(f):
paths.append(os.path.join(root, f))
if len(paths) > 0:
images.append(paths)
return images
def check_path_valid(A_paths, B_paths):
assert(len(A_paths) == len(B_paths))
for a, b in zip(A_paths, B_paths):
assert(len(a) == len(b))
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " +
",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| vid2vid-master | data/image_folder.py |
import os
import glob
from skimage import io
import numpy as np
import dlib
import sys
if len(sys.argv) < 2 or (sys.argv[1] != 'train' and sys.argv[1] != 'test'):
raise ValueError('usage: python data/face_landmark_detection.py [train|test]')
phase = sys.argv[1]
dataset_path = 'datasets/face/'
faces_folder_path = os.path.join(dataset_path, phase + '_img/')
predictor_path = os.path.join(dataset_path, 'shape_predictor_68_face_landmarks.dat')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
img_paths = sorted(glob.glob(faces_folder_path + '*'))
for i in range(len(img_paths)):
f = img_paths[i]
print("Processing video: {}".format(f))
save_path = os.path.join(dataset_path, phase + '_keypoints', os.path.basename(f))
if not os.path.isdir(save_path):
os.makedirs(save_path)
for img_name in sorted(glob.glob(os.path.join(f, '*.jpg'))):
img = io.imread(img_name)
dets = detector(img, 1)
if len(dets) > 0:
shape = predictor(img, dets[0])
points = np.empty([68, 2], dtype=int)
for b in range(68):
points[b,0] = shape.part(b).x
points[b,1] = shape.part(b).y
save_name = os.path.join(save_path, os.path.basename(img_name)[:-4] + '.txt')
np.savetxt(save_name, points, fmt='%d', delimiter=',')
| vid2vid-master | data/face_landmark_detection.py |
import os.path
import torchvision.transforms as transforms
import torch
from PIL import Image
import numpy as np
from data.base_dataset import BaseDataset, get_img_params, get_transform, get_video_params, concat_frame
from data.image_folder import make_grouped_dataset, check_path_valid
from data.keypoint2img import read_keypoints
class PoseDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_dp = os.path.join(opt.dataroot, opt.phase + '_densepose')
self.dir_op = os.path.join(opt.dataroot, opt.phase + '_openpose')
self.dir_img = os.path.join(opt.dataroot, opt.phase + '_img')
self.img_paths = sorted(make_grouped_dataset(self.dir_img))
if not opt.openpose_only:
self.dp_paths = sorted(make_grouped_dataset(self.dir_dp))
check_path_valid(self.dp_paths, self.img_paths)
if not opt.densepose_only:
self.op_paths = sorted(make_grouped_dataset(self.dir_op))
check_path_valid(self.op_paths, self.img_paths)
self.init_frame_idx(self.img_paths)
def __getitem__(self, index):
A, B, _, seq_idx = self.update_frame_idx(self.img_paths, index)
img_paths = self.img_paths[seq_idx]
n_frames_total, start_idx, t_step = get_video_params(self.opt, self.n_frames_total, len(img_paths), self.frame_idx)
img = Image.open(img_paths[start_idx]).convert('RGB')
size = img.size
params = get_img_params(self.opt, size)
frame_range = list(range(n_frames_total)) if (self.opt.isTrain or self.A is None) else [self.opt.n_frames_G-1]
for i in frame_range:
img_path = img_paths[start_idx + i * t_step]
if not self.opt.openpose_only:
dp_path = self.dp_paths[seq_idx][start_idx + i * t_step]
Di = self.get_image(dp_path, size, params, input_type='densepose')
Di[2,:,:] = ((Di[2,:,:] * 0.5 + 0.5) * 255 / 24 - 0.5) / 0.5
if not self.opt.densepose_only:
op_path = self.op_paths[seq_idx][start_idx + i * t_step]
Oi = self.get_image(op_path, size, params, input_type='openpose')
if self.opt.openpose_only:
Ai = Oi
elif self.opt.densepose_only:
Ai = Di
else:
Ai = torch.cat([Di, Oi])
Bi = self.get_image(img_path, size, params, input_type='img')
Ai, Bi = self.crop(Ai), self.crop(Bi) # only crop the central half region to save time
A = concat_frame(A, Ai, n_frames_total)
B = concat_frame(B, Bi, n_frames_total)
if not self.opt.isTrain:
self.A, self.B = A, B
self.frame_idx += 1
change_seq = False if self.opt.isTrain else self.change_seq
return_list = {'A': A, 'B': B, 'inst': 0, 'A_path': img_path, 'change_seq': change_seq}
return return_list
def get_image(self, A_path, size, params, input_type):
if input_type != 'openpose':
A_img = Image.open(A_path).convert('RGB')
else:
random_drop_prob = self.opt.random_drop_prob if self.opt.isTrain else 0
A_img = Image.fromarray(read_keypoints(A_path, size, random_drop_prob, self.opt.remove_face_labels, self.opt.basic_point_only))
if input_type == 'densepose' and self.opt.isTrain:
# randomly remove labels
A_np = np.array(A_img)
part_labels = A_np[:,:,2]
for part_id in range(1, 25):
if (np.random.rand() < self.opt.random_drop_prob):
A_np[(part_labels == part_id), :] = 0
if self.opt.remove_face_labels:
A_np[(part_labels == 23) | (part_labels == 24), :] = 0
A_img = Image.fromarray(A_np)
is_img = input_type == 'img'
method = Image.BICUBIC if is_img else Image.NEAREST
transform_scaleA = get_transform(self.opt, params, method=method)
A_scaled = transform_scaleA(A_img)
return A_scaled
def crop(self, Ai):
w = Ai.size()[2]
base = 32
x_cen = w // 2
bs = int(w * 0.25) // base * base
return Ai[:,:,(x_cen-bs):(x_cen+bs)]
def normalize_pose(self, A_img, target_yc, target_len, first=False):
w, h = A_img.size
A_np = np.array(A_img)
if first == True:
part_labels = A_np[:,:,2]
part_coords = np.nonzero((part_labels == 1) | (part_labels == 2))
y, x = part_coords[0], part_coords[1]
ys, ye = y.min(), y.max()
min_i, max_i = np.argmin(y), np.argmax(y)
v_min = A_np[y[min_i], x[min_i], 1] / 255
v_max = A_np[y[max_i], x[max_i], 1] / 255
ylen = (ye-ys) / (v_max-v_min)
yc = (0.5-v_min) / (v_max-v_min) * (ye-ys) + ys
ratio = target_len / ylen
offset_y = int(yc - (target_yc / ratio))
offset_x = int(w * (1 - 1/ratio) / 2)
padding = int(max(0, max(-offset_y, int(offset_y + h/ratio) - h)))
padding = int(max(padding, max(-offset_x, int(offset_x + w/ratio) - w)))
offset_y += padding
offset_x += padding
self.offset_y, self.offset_x = offset_y, offset_x
self.ratio, self.padding = ratio, padding
p = self.padding
A_np = np.pad(A_np, ((p,p),(p,p),(0,0)), 'constant', constant_values=0)
A_np = A_np[self.offset_y:int(self.offset_y + h/self.ratio), self.offset_x:int(self.offset_x + w/self.ratio):, :]
A_img = Image.fromarray(A_np)
A_img = A_img.resize((w, h))
return A_img
def __len__(self):
return sum(self.frames_count)
def name(self):
return 'PoseDataset'
"""
DensePose label
0 = Background
1, 2 = Torso
3 = Right Hand
4 = Left Hand
5 = Right Foot
6 = Left Foot
7, 9 = Upper Leg Right
8, 10 = Upper Leg Left
11, 13 = Lower Leg Right
12, 14 = Lower Leg Left
15, 17 = Upper Arm Left
16, 18 = Upper Arm Right
19, 21 = Lower Arm Left
20, 22 = Lower Arm Right
23, 24 = Head """
| vid2vid-master | data/pose_dataset.py |
### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import os.path
import random
import torch
from data.base_dataset import BaseDataset, get_img_params, get_transform, get_video_params
from data.image_folder import make_grouped_dataset, check_path_valid
from PIL import Image
import numpy as np
class TemporalDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_A = os.path.join(opt.dataroot, opt.phase + '_A')
self.dir_B = os.path.join(opt.dataroot, opt.phase + '_B')
self.A_is_label = self.opt.label_nc != 0
self.A_paths = sorted(make_grouped_dataset(self.dir_A))
self.B_paths = sorted(make_grouped_dataset(self.dir_B))
check_path_valid(self.A_paths, self.B_paths)
if opt.use_instance:
self.dir_inst = os.path.join(opt.dataroot, opt.phase + '_inst')
self.I_paths = sorted(make_grouped_dataset(self.dir_inst))
check_path_valid(self.A_paths, self.I_paths)
self.n_of_seqs = len(self.A_paths) # number of sequences to train
self.seq_len_max = max([len(A) for A in self.A_paths])
self.n_frames_total = self.opt.n_frames_total # current number of frames to train in a single iteration
def __getitem__(self, index):
tG = self.opt.n_frames_G
A_paths = self.A_paths[index % self.n_of_seqs]
B_paths = self.B_paths[index % self.n_of_seqs]
if self.opt.use_instance:
I_paths = self.I_paths[index % self.n_of_seqs]
# setting parameters
n_frames_total, start_idx, t_step = get_video_params(self.opt, self.n_frames_total, len(A_paths), index)
# setting transformers
B_img = Image.open(B_paths[start_idx]).convert('RGB')
params = get_img_params(self.opt, B_img.size)
transform_scaleB = get_transform(self.opt, params)
transform_scaleA = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) if self.A_is_label else transform_scaleB
# read in images
A = B = inst = 0
for i in range(n_frames_total):
A_path = A_paths[start_idx + i * t_step]
B_path = B_paths[start_idx + i * t_step]
Ai = self.get_image(A_path, transform_scaleA, is_label=self.A_is_label)
Bi = self.get_image(B_path, transform_scaleB)
A = Ai if i == 0 else torch.cat([A, Ai], dim=0)
B = Bi if i == 0 else torch.cat([B, Bi], dim=0)
if self.opt.use_instance:
I_path = I_paths[start_idx + i * t_step]
Ii = self.get_image(I_path, transform_scaleA) * 255.0
inst = Ii if i == 0 else torch.cat([inst, Ii], dim=0)
return_list = {'A': A, 'B': B, 'inst': inst, 'A_path': A_path, 'B_paths': B_path}
return return_list
def get_image(self, A_path, transform_scaleA, is_label=False):
A_img = Image.open(A_path)
A_scaled = transform_scaleA(A_img)
if is_label:
A_scaled *= 255.0
return A_scaled
def __len__(self):
return len(self.A_paths)
def name(self):
return 'TemporalDataset' | vid2vid-master | data/temporal_dataset.py |
### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import os.path
import torch
from data.base_dataset import BaseDataset, get_img_params, get_transform, concat_frame
from data.image_folder import make_grouped_dataset, check_path_valid
from PIL import Image
import numpy as np
class TestDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_A = os.path.join(opt.dataroot, opt.phase + '_A')
self.dir_B = os.path.join(opt.dataroot, opt.phase + '_B')
self.use_real = opt.use_real_img
self.A_is_label = self.opt.label_nc != 0
self.A_paths = sorted(make_grouped_dataset(self.dir_A))
if self.use_real:
self.B_paths = sorted(make_grouped_dataset(self.dir_B))
check_path_valid(self.A_paths, self.B_paths)
if self.opt.use_instance:
self.dir_inst = os.path.join(opt.dataroot, opt.phase + '_inst')
self.I_paths = sorted(make_grouped_dataset(self.dir_inst))
check_path_valid(self.A_paths, self.I_paths)
self.init_frame_idx(self.A_paths)
def __getitem__(self, index):
self.A, self.B, self.I, seq_idx = self.update_frame_idx(self.A_paths, index)
tG = self.opt.n_frames_G
A_img = Image.open(self.A_paths[seq_idx][0]).convert('RGB')
params = get_img_params(self.opt, A_img.size)
transform_scaleB = get_transform(self.opt, params)
transform_scaleA = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) if self.A_is_label else transform_scaleB
frame_range = list(range(tG)) if self.A is None else [tG-1]
for i in frame_range:
A_path = self.A_paths[seq_idx][self.frame_idx + i]
Ai = self.get_image(A_path, transform_scaleA, is_label=self.A_is_label)
self.A = concat_frame(self.A, Ai, tG)
if self.use_real:
B_path = self.B_paths[seq_idx][self.frame_idx + i]
Bi = self.get_image(B_path, transform_scaleB)
self.B = concat_frame(self.B, Bi, tG)
else:
self.B = 0
if self.opt.use_instance:
I_path = self.I_paths[seq_idx][self.frame_idx + i]
Ii = self.get_image(I_path, transform_scaleA) * 255.0
self.I = concat_frame(self.I, Ii, tG)
else:
self.I = 0
self.frame_idx += 1
return_list = {'A': self.A, 'B': self.B, 'inst': self.I, 'A_path': A_path, 'change_seq': self.change_seq}
return return_list
def get_image(self, A_path, transform_scaleA, is_label=False):
A_img = Image.open(A_path)
A_scaled = transform_scaleA(A_img)
if is_label:
A_scaled *= 255.0
return A_scaled
def __len__(self):
return sum(self.frames_count)
def n_of_seqs(self):
return len(self.A_paths)
def name(self):
return 'TestDataset' | vid2vid-master | data/test_dataset.py |
import torch.utils.data
from data.base_data_loader import BaseDataLoader
def CreateDataset(opt):
dataset = None
if opt.dataset_mode == 'temporal':
from data.temporal_dataset import TemporalDataset
dataset = TemporalDataset()
elif opt.dataset_mode == 'face':
from data.face_dataset import FaceDataset
dataset = FaceDataset()
elif opt.dataset_mode == 'pose':
from data.pose_dataset import PoseDataset
dataset = PoseDataset()
elif opt.dataset_mode == 'test':
from data.test_dataset import TestDataset
dataset = TestDataset()
else:
raise ValueError("Dataset [%s] not recognized." % opt.dataset_mode)
print("dataset [%s] was created" % (dataset.name()))
dataset.initialize(opt)
return dataset
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = CreateDataset(opt)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads))
def load_data(self):
return self.dataloader
def __len__(self):
return min(len(self.dataset), self.opt.max_dataset_size)
| vid2vid-master | data/custom_dataset_data_loader.py |
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from fed_learn.numproto import proto_to_ndarray, ndarray_to_proto
from fed_learn.server.model_aggregator import Aggregator
from fed_learn.model_meta import FLContext
class CustomModelAggregator(Aggregator):
def process(self, accumulator: [FLContext], fl_ctx: FLContext):
"""Aggregate the contributions from all the submitted FL clients.
For the FLContext type we can use get_model() method to get the model data.
The model data is a protobuf message and its format is defined as below.
// A model consists of multiple tensors
message ModelData {
map<string, NDArray> params = 1;
}
// NDArray data for protobuf
message NDArray {
bytes ndarray = 1;
}
In this aggregation method we are using local number of iterations to weight each
contribution and get a weighted average of that to be our new value.
This function is not thread-safe.
:param accumulator: List of all the contributions in FLContext.
:param fl_ctx: An instance of FLContext.
:return: Return True to indicates the current model is the best model so far.
"""
# The model data is in model.params as a dict.
model = fl_ctx.get_model()
vars_to_aggregate = [set(item.get_model().params) for item in accumulator]
vars_to_aggregate = set.union(*vars_to_aggregate)
for v_name in vars_to_aggregate:
n_local_iters, np_vars = [], []
for item in accumulator:
data = item.get_model()
if v_name not in data.params:
continue # this item doesn't have the variable from client
# contribution is a protobuf msg
# it has `n_iter` which represents number of local iterations
# used to compute this contribution
acc = item.get_prop('_contribution')
float_n_iter = np.float(acc.n_iter)
n_local_iters.append(float_n_iter)
# weighted using local iterations
weighted_value = proto_to_ndarray(data.params[v_name]) * float_n_iter
np_vars.append(weighted_value)
if not n_local_iters:
continue # didn't receive this variable from any clients
new_val = np.sum(np_vars, axis=0) / np.sum(n_local_iters)
new_val += proto_to_ndarray(model.params[v_name])
# Update the params in model using CopyFrom because it is a ProtoBuf structure
model.params[v_name].CopyFrom(ndarray_to_proto(new_val))
return False
| clara-train-examples-master | Tensorflow-Deprecated/FL/adminMMAR/custom/BYO_aggregator.py |
# SPDX-License-Identifier: Apache-2.0
import logging
from fed_learn.client.fed_privacy import PrivacyProtocol
import numpy as np
class MyPrivacyProtocol(PrivacyProtocol):
def __init__(self, percentile=10, gamma=0.01):
self.logger = logging.getLogger('---- my Pricavy protocol')
PrivacyProtocol.__init__(self)
def apply(self, model_diff, train_ctx=None):
self.logger.info("----------------")
self.logger.info("-------client applying privacy---------")
self.logger.info("----------------")
"""
:param model_diff: model after a round of local training
:param train_ctx: unused param.
:return: model data to be uploaded to the server
"""
# invariant to local steps
scale = np.float(train_ctx.total_steps)
delta_w = {name: model_diff[name] / scale for name in model_diff}
return delta_w
class MyRandomProtocol(PrivacyProtocol):
"""
Randomly drop some gradients
"""
def __init__(self, percentage=10, gamma=0.01, non_zero_only=True):
self.logger = logging.getLogger('RandomProtocol')
PrivacyProtocol.__init__(self)
# must be in 0..100, only update abs diff greater than percentile
self.percentage = percentage
assert percentage >= 0 & percentage <= 100, "select a percentage between 0 and 100!"
def apply(self, model_diff, train_ctx=None):
delta_w = {}
n_removed = 0
n_total = 0
for name in model_diff:
diff = model_diff[name]
n = np.size(diff)
n_total += n
n_select = int(np.floor(n*self.percentage/100))
if n_select > 0:
idx = np.arange(n)
select = np.random.choice(idx, size=n_select, replace=False)
np.put(diff, select, v=0.0) # set the randomly selected gradients to zero
n_removed += n_select
delta_w[name] = diff
self.logger.info(f'Removed {n_removed} of {n_total} ({100*n_removed/n_total:.2f}%) parameters.')
return delta_w
| clara-train-examples-master | Tensorflow-Deprecated/FL/adminMMAR/custom/BYO_Privacy.py |
# SPDX-License-Identifier: Apache-2.0
from queue import PriorityQueue
from random import randint, uniform
from automl.components.controllers.controller import Controller
from automl.defs import Context, Recommendation, Outcome, SearchResult
from automl.components.handlers.handler import Handler
from automl.defs import Context, ContextKey, Status
import mlflow
import threading
class RandomController(Controller):
def __init__(self, max_rounds=1000):
Controller.__init__(self)
self.current_rec_id = 0
self.space = None
self.ctx = None
self.enum_space = None
self.float_space = None
self.enum_size = 0
self.search_log = dict()
self.score_priority_queue = PriorityQueue()
self.max_rounds = max_rounds
def set_search_space(self, space, ctx):
self.space = space
self.ctx = ctx
self.enum_space = self._get_subspace('.enum')
self.float_space = self._get_subspace('.float')
enum_size = 1
for k in self.enum_space:
enum_size = enum_size * len(self.enum_space[k])
self.enum_size = enum_size
def _get_subspace(self, subspace_key):
return {k: v for k, v in self.space.targets.items() if subspace_key in k}
def _sample_space(self):
# modify this to generate 2 options at once
# returns recommends: list of recommendation to run at the same time
recommends = list()
for _ in range(self.max_rounds): # generate random samples
values = dict()
for k, v in self.enum_space.items():
# print("in Enum space k,v=",k,v)
target = randint(0,len(v)-1)
values[k] = target
for k, v in self.float_space.items():
target = uniform(v[0].min, v[0].max)
values[k] = target
self._keep_log(values)
sr = SearchResult(self.space, values)
recommend = Recommendation(self.current_rec_id, sr)
recommends.append(recommend)
# TODO append another recommendation it will be sch automatically
# print(" values", values)
self.current_rec_id = self.current_rec_id + 1
return recommends
def initial_recommendation(self, ctx):
recommends = self._sample_space()
return recommends
def _keep_log(self, values):
self.search_log[self.current_rec_id] = dict()
self.search_log[self.current_rec_id]['recommendation'] = values
self.search_log[self.current_rec_id]['outcome'] = None
def _update_log_with_outcome(self, rec_id, outcome):
self.search_log[rec_id]['outcome'] = outcome
def refine_recommendation(self, outcome: Outcome, ctx: Context):
outcome_score = outcome.score
outcome_rec_id = outcome.recommendation_id
self.score_priority_queue.put((-outcome_score, outcome_rec_id))
self._update_log_with_outcome(outcome_rec_id, outcome)
if self.score_priority_queue.qsize() >= self.max_rounds:
ctx.stop_work(self,"Number of runs reached {}. Requesting stop.".format(self.max_rounds))
return []
recommends = self._sample_space()
return recommends
class MyHandler(Handler):
def __init__(self):
Handler.__init__(self)
self.recs = list()
self.update_lock = threading.Lock()
# self.logger = logging.getLogger(self.__class__.__name__)
def recommendations_available(self, ctx):
recs = ctx.get_prop(ContextKey.RECOMMENDATIONS)
print('recommendations available')
for i, rec in enumerate(recs):
self.recs.append(rec)
# print('recommendation #{}'.format(i))
# rec.result.dump()
# print()
def startup(self, ctx: Context):
print(" __________starting up")
def shutdown(self, ctx: Context):
# print("__________shutdown")
pass
def start_job(self, ctx: Context):
print("start job ")
self.recommendations_available(ctx)
print("______Job __name",ctx.get_prop("_jobName"),"________has______started")
recomds=ctx.get_prop("_recommendations")
pass
def round_ended(self, ctx: Context):
print("_________round_ended")
pass
def end_job(self, ctx: Context):
print("_____________ end_job")
job_name = ctx.get_prop(ContextKey.JOB_NAME)
print("job name {}".format(job_name))
parms = ctx.get_prop(ContextKey.CONCRETE_SEARCH_VALUE)
# mlflow.start_run()
mlflow.set_tracking_uri("/claraDevDay/AutoML/mlruns")
with self.update_lock:
with mlflow.start_run() as run:
for k, v in parms.items():
par = k.split(":")[1]
v=v[0]
print("par=", par, " val=", v)
mlflow.log_param(par, v)
score = ctx.get_prop(ContextKey.SCORE)
print("score =",score)
mlflow.log_metric("Acc", score)
print ("MLFLOW added ")
print("___________________________")
# mlflow.end_run()
return
| clara-train-examples-master | Tensorflow-Deprecated/AutoML/BYOC/myAutoMLController.py |
# SPDX-License-Identifier: Apache-2.0
import tensorflow as tf
from ai4med.components.losses.loss import Loss
class MyClonedDiceLoss(Loss):
def __init__(self,data_format='channels_first',skip_background=False,squared_pred=False,
jaccard=False,smooth=1e-5,top_smooth=0.0,is_onehot_targets=False
# , label_weights=None #
):
Loss.__init__(self)
self.data_format = data_format
self.skip_background = skip_background
self.squared_pred = squared_pred
self.jaccard = jaccard
self.smooth = smooth
self.top_smooth = top_smooth
self.is_onehot_targets = is_onehot_targets
# self.label_weights = label_weights
def get_loss(self, predictions, targets, build_ctx=None):
return dice_loss(predictions, targets,
data_format=self.data_format,
skip_background=self.skip_background,
squared_pred=self.squared_pred,
jaccard=self.jaccard,
smooth=self.smooth,
top_smooth=self.top_smooth,
is_onehot_targets=self.is_onehot_targets
# , label_weights=self.label_weights
)
def dice_loss(predictions,
targets,
data_format='channels_first',
skip_background=False,
squared_pred=False,
jaccard=False,
smooth=1e-5,
top_smooth=0.0,
is_onehot_targets=False):
is_channels_first = (data_format == 'channels_first')
ch_axis = 1 if is_channels_first else -1
n_channels_pred = predictions.get_shape()[ch_axis].value
n_channels_targ = targets.get_shape()[ch_axis].value
n_len = len(predictions.get_shape())
print('dice_loss targets', targets.get_shape().as_list(),
'predictions', predictions.get_shape().as_list(),
'targets.dtype', targets.dtype,
'predictions.dtype', predictions.dtype)
print('dice_loss is_channels_first:', is_channels_first,
'skip_background:', skip_background,
'is_onehot_targets', is_onehot_targets)
# Sanity checks
if skip_background and n_channels_pred == 1:
raise ValueError("There is only 1 single channel in the predicted output, and skip_zero is True")
if skip_background and n_channels_targ == 1 and is_onehot_targets:
raise ValueError("There is only 1 single channel in the true output (and it is is_onehot_true), "
"and skip_zero is True")
if is_onehot_targets and n_channels_targ != n_channels_pred:
raise ValueError("Number of channels in target {} and pred outputs {} "
"must be equal to use is_onehot_true == True".format(
n_channels_targ, n_channels_pred))
# End sanity checks
if not is_onehot_targets:
# if not one-hot representation already
# remove singleton (channel) dimension for true labels
targets = tf.cast(tf.squeeze(targets, axis=ch_axis), tf.int32)
targets = tf.one_hot(targets, depth=n_channels_pred, axis=ch_axis,
dtype=tf.float32, name="loss_dice_targets_onehot")
if skip_background:
# if skipping background, removing first channel
targets = targets[:, 1:] if is_channels_first else targets[..., 1:]
predictions = predictions[:, 1:] if is_channels_first else predictions[..., 1:]
# uncomment lines below for exercise #1
# if label_weights is not None:
# label_weights = label_weights[1:]
# reducing only spatial dimensions (not batch nor channels)
reduce_axis = list(range(2, n_len)) if is_channels_first else list(range(1, n_len - 1))
intersection = tf.reduce_sum(targets * predictions, axis=reduce_axis)
# uncomment lines below for exercise #1
# if label_weights is not None: # add wights to labels
# print("========== debug research intersection.shape=", intersection.shape)
# w = tf.constant(label_weights, dtype=tf.float32)
# intersection = tf.multiply(w, intersection)
if squared_pred:
# technically we don't need this square for binary true values
# (but in cases where true is probability/float, we still need to square
targets = tf.square(targets)
predictions = tf.square(predictions)
y_true_o = tf.reduce_sum(targets, axis=reduce_axis)
y_pred_o = tf.reduce_sum(predictions, axis=reduce_axis)
denominator = y_true_o + y_pred_o
if jaccard:
denominator -= intersection
f = (2.0 * intersection + top_smooth) / (denominator + smooth)
f = tf.reduce_mean(f) # final reduce_mean across batches and channels
return 1 - f
| clara-train-examples-master | Tensorflow-Deprecated/GettingStarted/BYOC/myLoss.py |
# SPDX-License-Identifier: Apache-2.0
import numpy as np
# note the ai4med here
# from ai4med.common.medical_image import MedicalImage
# from ai4med.common.transform_ctx import TransformContext
from ai4med.components.transforms.multi_field_transformer import MultiFieldTransformer
# from ai4med.components.transforms.scale_intensity_range import ScaleIntensityRange
# class MyScaleIntensityRange(ScaleIntensityRange):
# def __init__(self,fields, a_min, b_min, a_max=None, b_max=None, clip=False,a_offset=0,b_offset=None, dtype='float32'):
# if a_offset is not None:
# a_max=a_min+a_offset
# if b_offset is not None:
# b_max=b_min+b_offset
# assert isinstance(a_offset, (int, float)) , "------AEH why is this not working "
# ScaleIntensityRange.__init__(self,fields, a_min, a_max, b_min, b_max, clip,dtype=dtype)
class MyAddRandomConstant(MultiFieldTransformer):
def __init__(self, fields, magnitude, dtype=np.float32):
# fields specifies the names of the image fields in the data dict that you want to add constant to
MultiFieldTransformer.__init__(self, fields)
self.dtype = dtype
self.magnitude = magnitude
def transform(self, transform_ctx):
for field in self.fields:
offset = (np.random.rand() * 2.0 - 1.0) * self.magnitude
# get the MedicalImage using field
img = transform_ctx.get_image(field)
# get_data give us a numpy array of data
result = img.get_data() + offset
# create a new MedicalImage use new_image() method
# which will carry over the properties of the original image
result_img = img.new_image(result, img.get_shape_format())
# set the image back in transform_ctx
transform_ctx.set_image(field, result_img)
return transform_ctx
def is_deterministic(self):
# This is not a deterministic transform.
return False
| clara-train-examples-master | Tensorflow-Deprecated/GettingStarted/BYOC/myTransformation.py |
clara-train-examples-master | Tensorflow-Deprecated/GettingStarted/BYOC/__init__.py |
|
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import logging
from ai4med.common.constants import ImageProperty
from ai4med.common.medical_image import MedicalImage
from ai4med.common.shape_format import ShapeFormat
from ai4med.common.transform_ctx import TransformContext
from ai4med.utils.dtype_utils import str_to_dtype
from ai4med.components.transforms.multi_field_transformer import MultiFieldTransformer
class MyNumpyLoader(MultiFieldTransformer):
"""Load Image from Numpy files.
Args:
shape (ShapeFormat): Shape of output image.
dtype : Type for output data.
"""
def __init__(self, fields, shape, dtype="float32"):
MultiFieldTransformer.__init__(self, fields=fields)
self._dtype = str_to_dtype(dtype)
self._shape = ShapeFormat(shape)
self._reader = MyNumpyReader(self._dtype)
def transform(self, transform_ctx: TransformContext):
for field in self.fields:
file_name = transform_ctx[field]
transform_ctx.set_image(field, self._reader.read(file_name, self._shape))
return transform_ctx
class MyNumpyReader(object):
"""Reads Numpy files.
Args:
dtype: Type for data to be loaded.
"""
def __init__(self, dtype=np.float32):
self._logger = logging.getLogger(self.__class__.__name__)
self._dtype = dtype
def read(self, file_name, shape: ShapeFormat):
assert shape, "Please provide a valid shape."
assert file_name, "Please provide a filename."
if isinstance(file_name, (bytes, bytearray)):
file_name = file_name.decode('UTF-8')
print("---------- opening np file ",file_name)
data = np.load(file_name, allow_pickle=True).astype(self._dtype)
assert len(data.shape) == shape.get_number_of_dims(), \
"Dims of loaded data and provided shape don't match."
img = MedicalImage(data, shape)
img.set_property(ImageProperty.ORIGINAL_SHAPE, data.shape)
img.set_property(ImageProperty.FILENAME, file_name)
return img
| clara-train-examples-master | Tensorflow-Deprecated/GettingStarted/BYOC/myNpLoader.py |
# SPDX-License-Identifier: Apache-2.0
import tensorflow as tf
from ai4med.components.models.model import Model
import tensorflow.contrib.slim as slim
class CustomNetwork(Model):
def __init__(self, num_classes,factor=32,
training=False,data_format='channels_first',
final_activation='linear'):
Model.__init__(self)
self.model = None
self.num_classes = num_classes
self.factor = factor
self.training = training
self.data_format = data_format
self.final_activation = final_activation
if data_format == 'channels_first':
self.channel_axis = 1
elif data_format == 'channels_last':
self.channel_axis = -1
def network(self, inputs, training, num_classes, factor, data_format, channel_axis):
# very shallow Unet Network
with tf.variable_scope('CustomNetwork'):
conv1_1 = tf.keras.layers.Conv3D(factor, 3, padding='same', data_format=data_format, activation='relu')(inputs)
conv1_2 = tf.keras.layers.Conv3D(factor * 2, 3, padding='same', data_format=data_format, activation='relu')(conv1_1)
pool1 = tf.keras.layers.MaxPool3D(pool_size=(2, 2, 2), strides=2, data_format=data_format)(conv1_2)
conv2_1 = tf.keras.layers.Conv3D(factor * 2, 3, padding='same', data_format=data_format, activation='relu')(pool1)
conv2_2 = tf.keras.layers.Conv3D(factor * 4, 3, padding='same', data_format=data_format, activation='relu')(conv2_1)
unpool1 = tf.keras.layers.UpSampling3D(size=(2, 2, 2), data_format=data_format)(conv2_2)
unpool1 = tf.keras.layers.Concatenate(axis=channel_axis)([unpool1, conv1_2])
conv7_1 = tf.keras.layers.Conv3D(factor * 2, 3, padding='same', data_format=data_format, activation='relu')(unpool1)
conv7_2 = tf.keras.layers.Conv3D(factor * 2, 3, padding='same', data_format=data_format, activation='relu')(conv7_1)
output = tf.keras.layers.Conv3D(num_classes, 1, padding='same', data_format=data_format)(conv7_2)
if str.lower(self.final_activation) == 'softmax':
output = tf.nn.softmax(output, axis=channel_axis, name='softmax')
elif str.lower(self.final_activation) == 'sigmoid':
output = tf.nn.sigmoid(output, name='sigmoid')
elif str.lower(self.final_activation) == 'linear':
pass
else:
raise ValueError(
'Unsupported final_activation, it must of one (softmax, sigmoid or linear), but provided:' + self.final_activation)
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
return output
# additional custom loss
def loss(self):
return 0
def get_predictions(self, inputs, training, build_ctx=None):
self.model = self.network(inputs=inputs,training=training,num_classes=self.num_classes
,factor=self.factor,data_format=self.data_format,channel_axis=self.channel_axis)
return self.model
def get_loss(self):
return self.loss()
| clara-train-examples-master | Tensorflow-Deprecated/GettingStarted/BYOC/myNetworkArch.py |
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from ai4med.components.metric import Metric
from ai4med.libs.metrics.metric_list import MetricList
class SampleMetricAverage(MetricList):
"""
Generic class for tracking averages of metrics. Expects that the elements in self._list
are scalar values that will be averaged
"""
def __init__(self, name, invalid_value=float('nan'), report_path=None):
MetricList.__init__(self, name,
invalid_value=invalid_value,
report_path=report_path)
def get(self):
if self._list is None or self._list.size == 0:
return 0
return np.mean(self._list)
class SampleComputeAverage(Metric):
def __init__(self, name, field,
invalid_value=float('nan'),
report_path=None,
do_summary=True,
do_print=True,
is_key_metric=False):
m = SampleMetricAverage(name, invalid_value, report_path)
Metric.__init__(self, m,
field=field,
do_summary=do_summary,
do_print=do_print,
is_key_metric=is_key_metric)
| clara-train-examples-master | Tensorflow-Deprecated/GettingStarted/BYOC/myMetric.py |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import statistics
import numpy as np
import torch
import torch.distributed
from monai.engines.workflow import Engine, Events
from monai.handlers.tensorboard_handlers import SummaryWriter
from monai.metrics import compute_meandice
from monai.transforms import rescale_array
from monai.utils import optional_import
from monai.visualize import plot_2d_or_3d_image
nib, _ = optional_import("nibabel")
torchvision, _ = optional_import("torchvision")
make_grid, _ = optional_import("torchvision.utils", name="make_grid")
Image, _ = optional_import("PIL.Image")
ImageDraw, _ = optional_import("PIL.ImageDraw")
class RegionDice:
def __init__(self):
self.data = []
def reset(self):
self.data = []
def update(self, y_pred, y, batched=True):
if not batched:
y_pred = y_pred[None]
y = y[None]
score = compute_meandice(y_pred=y_pred, y=y, include_background=False).mean()
self.data.append(score.item())
def mean(self):
return statistics.mean(self.data)
def stdev(self):
return statistics.stdev(self.data) if len(self.data) > 1 else 0
class DeepgrowStatsHandler:
def __init__(
self,
summary_writer=None,
interval=1,
log_dir="./runs",
tag_name="val_dice",
compute_metric=True,
images=True,
image_interval=1,
max_channels=1,
max_frames=64,
add_scalar=True,
merge_scalar=False,
fold_size=0,
):
self.writer = SummaryWriter(log_dir=log_dir) if summary_writer is None else summary_writer
self.interval = interval
self.tag_name = tag_name
self.compute_metric = compute_metric
self.images = images
self.image_interval = image_interval
self.max_channels = max_channels
self.max_frames = max_frames
self.add_scalar = add_scalar
self.merge_scalar = merge_scalar
self.fold_size = fold_size
self.logger = logging.getLogger(__name__)
if torch.distributed.is_initialized():
self.tag_name = "{}-r{}".format(self.tag_name, torch.distributed.get_rank())
self.plot_data = {}
self.metric_data = {}
def attach(self, engine: Engine) -> None:
engine.add_event_handler(Events.ITERATION_COMPLETED(every=self.interval), self, "iteration")
engine.add_event_handler(Events.EPOCH_COMPLETED(every=1), self, "epoch")
def write_images(self, epoch):
if not self.plot_data or not len(self.plot_data):
return
all_imgs = []
for region in sorted(self.plot_data.keys()):
metric = self.metric_data.get(region)
region_data = self.plot_data[region]
if len(region_data[0].shape) == 3:
ti = Image.new("RGB", region_data[0].shape[1:])
d = ImageDraw.Draw(ti)
t = "region: {}".format(region)
if self.compute_metric:
t = t + "\ndice: {:.4f}".format(metric.mean())
t = t + "\nstdev: {:.4f}".format(metric.stdev())
d.multiline_text((10, 10), t, fill=(255, 255, 0))
ti = rescale_array(np.rollaxis(np.array(ti), 2, 0)[0][np.newaxis])
all_imgs.append(ti)
all_imgs.extend(region_data)
if len(all_imgs[0].shape) == 3:
img_tensor = make_grid(tensor=torch.from_numpy(np.array(all_imgs)), nrow=4, normalize=True, pad_value=2)
self.writer.add_image(tag=f"Deepgrow Regions ({self.tag_name})", img_tensor=img_tensor, global_step=epoch)
if len(all_imgs[0].shape) == 4:
for region in sorted(self.plot_data.keys()):
tags = [f"region_{region}_image", f"region_{region}_label", f"region_{region}_output"]
if torch.distributed.is_initialized():
rank = "r{}-".format(torch.distributed.get_rank())
tags = [rank + tags[0], rank + tags[1], rank + tags[2]]
for i in range(3):
img = self.plot_data[region][i]
img = np.moveaxis(img, -3, -1)
plot_2d_or_3d_image(
img[np.newaxis], epoch, self.writer, 0, self.max_channels, self.max_frames, tags[i]
)
self.logger.info(
"Saved {} Regions {} into Tensorboard at epoch: {}".format(
len(self.plot_data), sorted([*self.plot_data]), epoch
)
)
self.writer.flush()
def write_region_metrics(self, epoch):
metric_sum = 0
means = {}
for region in self.metric_data:
metric = self.metric_data[region].mean()
self.logger.info(
"Epoch[{}] Metrics -- Region: {:0>2d}, {}: {:.4f}".format(epoch, region, self.tag_name, metric)
)
if self.merge_scalar:
means["{:0>2d}".format(region)] = metric
else:
self.writer.add_scalar("{}_{:0>2d}".format(self.tag_name, region), metric, epoch)
metric_sum += metric
if self.merge_scalar:
means["avg"] = metric_sum / len(self.metric_data)
self.writer.add_scalars("{}_region".format(self.tag_name), means, epoch)
elif len(self.metric_data) > 1:
metric_avg = metric_sum / len(self.metric_data)
self.writer.add_scalar("{}_regions_avg".format(self.tag_name), metric_avg, epoch)
self.writer.flush()
def __call__(self, engine: Engine, action) -> None:
total_steps = engine.state.iteration
if total_steps < engine.state.epoch_length:
total_steps = engine.state.epoch_length * (engine.state.epoch - 1) + total_steps
if action == "epoch" and not self.fold_size:
epoch = engine.state.epoch
elif self.fold_size and total_steps % self.fold_size == 0:
epoch = int(total_steps / self.fold_size)
else:
epoch = None
if epoch:
if self.images and epoch % self.image_interval == 0:
self.write_images(epoch)
if self.add_scalar:
self.write_region_metrics(epoch)
if action == "epoch" or epoch:
self.plot_data = {}
self.metric_data = {}
return
device = engine.state.device
batch_data = engine.state.batch
output_data = engine.state.output
for bidx in range(len(batch_data.get("region", []))):
region = batch_data.get("region")[bidx]
region = region.item() if torch.is_tensor(region) else region
if self.images and self.plot_data.get(region) is None:
self.plot_data[region] = [
rescale_array(batch_data["image"][bidx][0].detach().cpu().numpy()[np.newaxis], 0, 1),
rescale_array(batch_data["label"][bidx].detach().cpu().numpy(), 0, 1),
rescale_array(output_data["pred"][bidx].detach().cpu().numpy(), 0, 1),
]
if self.compute_metric:
if self.metric_data.get(region) is None:
self.metric_data[region] = RegionDice()
self.metric_data[region].update(
y_pred=output_data["pred"][bidx].to(device), y=batch_data["label"][bidx].to(device), batched=False
)
class SegmentationSaver:
def __init__(
self,
output_dir: str = "./runs",
save_np=False,
images=True,
):
self.output_dir = output_dir
self.save_np = save_np
self.images = images
os.makedirs(self.output_dir, exist_ok=True)
def attach(self, engine: Engine) -> None:
if not engine.has_event_handler(self, Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.ITERATION_COMPLETED, self)
def __call__(self, engine: Engine):
batch_data = engine.state.batch
output_data = engine.state.output
device = engine.state.device
tag = ""
if torch.distributed.is_initialized():
tag = "r{}-".format(torch.distributed.get_rank())
for bidx in range(len(batch_data.get("image"))):
step = engine.state.iteration
region = batch_data.get("region")[bidx]
region = region.item() if torch.is_tensor(region) else region
image = batch_data["image"][bidx][0].detach().cpu().numpy()[np.newaxis]
label = batch_data["label"][bidx].detach().cpu().numpy()
pred = output_data["pred"][bidx].detach().cpu().numpy()
dice = compute_meandice(
y_pred=output_data["pred"][bidx][None].to(device),
y=batch_data["label"][bidx][None].to(device),
include_background=False,
).mean()
if self.save_np:
np.savez(
os.path.join(
self.output_dir,
"{}img_label_pred_{}_{:0>4d}_{:0>2d}_{:.4f}".format(tag, region, step, bidx, dice),
),
image,
label,
pred,
)
if self.images and len(image.shape) == 3:
img = make_grid(torch.from_numpy(rescale_array(image, 0, 1)[0]))
lab = make_grid(torch.from_numpy(rescale_array(label, 0, 1)[0]))
pos = rescale_array(output_data["image"][bidx][1].detach().cpu().numpy()[np.newaxis], 0, 1)[0]
neg = rescale_array(output_data["image"][bidx][2].detach().cpu().numpy()[np.newaxis], 0, 1)[0]
pre = make_grid(torch.from_numpy(np.array([rescale_array(pred, 0, 1)[0], pos, neg])))
torchvision.utils.save_image(
tensor=[img, lab, pre],
nrow=3,
pad_value=2,
fp=os.path.join(
self.output_dir,
"{}img_label_pred_{}_{:0>4d}_{:0>2d}_{:.4f}.png".format(tag, region, step, bidx, dice),
),
)
if self.images and len(image.shape) == 4:
samples = {"image": image[0], "label": label[0], "pred": pred[0]}
for sample in samples:
img = np.moveaxis(samples[sample], -3, -1)
img = nib.Nifti1Image(img, np.eye(4))
nib.save(
img,
os.path.join(
self.output_dir, "{}{}_{:0>4d}_{:0>2d}_{:.4f}.nii.gz".format(tag, sample, step, bidx, dice)
),
)
| clara-train-examples-master | PyTorch/NoteBooks/AIAA/deepgrow_2D/custom/deepgrow/handler.py |
import argparse
import json
import logging
import os
import sys
from monai.apps.deepgrow.dataset import create_dataset
from monai.data import partition_dataset
def prepare_datalist(args):
dimensions = args.dimensions
dataset_json = os.path.join(args.output, 'dataset.json')
logging.info('Processing dataset...')
with open(os.path.join(args.dataset_json)) as f:
datalist = json.load(f)
datalist = create_dataset(
datalist=datalist[args.datalist_key],
base_dir=args.dataset_root,
output_dir=args.output,
dimension=dimensions,
pixdim=[1.0] * dimensions,
limit=args.limit,
relative_path=args.relative_path
)
with open(dataset_json, 'w') as fp:
json.dump(datalist, fp, indent=2)
dataset_json = os.path.join(args.output, 'dataset.json')
with open(dataset_json) as f:
datalist = json.load(f)
logging.info('+++ Dataset File: {}'.format(dataset_json))
logging.info('+++ Total Records: {}'.format(len(datalist)))
logging.info('')
train_ds, val_ds = partition_dataset(datalist, ratios=[args.split, (1 - args.split)], shuffle=True, seed=args.seed)
dataset_json = os.path.join(args.output, 'dataset_0.json')
with open(dataset_json, 'w') as fp:
json.dump({'training': train_ds, 'validation': val_ds}, fp, indent=2)
logging.info('*** Dataset File: {}'.format(dataset_json))
logging.info('*** Total Records for Training: {}'.format(len(train_ds)))
logging.info('*** Total Records for Validation: {}'.format(len(val_ds)))
assert len(train_ds) > 0, "Train Dataset/Records is EMPTY"
assert len(val_ds) > 0, "Validation Dataset/Records is EMPTY"
def run(args):
for arg in vars(args):
logging.info('USING:: {} = {}'.format(arg, getattr(args, arg)))
logging.info("")
if not os.path.exists(args.output):
logging.info('output path [{}] does not exist. creating it now.'.format(args.output))
os.makedirs(args.output, exist_ok=True)
prepare_datalist(args)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--seed', type=int, default=42, help='Random Seed')
parser.add_argument('-dims', '--dimensions', type=int, default=2, choices=[2, 3], help='Output Dimension')
parser.add_argument('-d', '--dataset_root', default='/workspace/data/52432', help='Dataset Root Folder')
parser.add_argument('-j', '--dataset_json', default='/workspace/data/52432/dataset.json', help='Dataset JSON File')
parser.add_argument('-k', '--datalist_key', default='training', help='Key in Dataset JSON File')
parser.add_argument('-o', '--output', default='/workspace/data/52432/2D', help='Output path to save processed data')
parser.add_argument('-x', '--split', type=float, default=0.9, help='Ratio to split into training and validation')
parser.add_argument('-t', '--limit', type=int, default=0, help='Limit input records to process; 0 = no limit')
parser.add_argument('-r', '--relative_path', action='store_true', default=False, help='use relative path in output')
args = parser.parse_args()
run(args)
if __name__ == "__main__":
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='[%(asctime)s.%(msecs)03d][%(levelname)5s] - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
main()
| clara-train-examples-master | PyTorch/NoteBooks/AIAA/deepgrow_2D/custom/deepgrow/prepare_dataset.py |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from monai.apps.deepgrow.interaction import Interaction
from monai.transforms import Compose
class ClickInteraction(Interaction):
"""
Deepgrow Training/Evaluation iteration method with interactions (simulation of clicks) support for image and label.
Args:
transforms: execute additional transformation during every iteration (before train).
Typically, several Tensor based transforms composed by `Compose`.
max_interactions: maximum number of interactions per iteration
train: training or evaluation
key_probability: field name to fill probability for every interaction
"""
def __init__(self, transforms, max_interactions: int, train: bool, key_probability: str = "probability") -> None:
self.transforms = transforms
self.max_interactions = max_interactions
self.train = train
self.key_probability = key_probability
if not isinstance(self.transforms, Compose):
transforms = []
for t in self.transforms:
transforms.append(self.init_external_class(t))
self.transforms = Compose(transforms)
@staticmethod
def init_external_class(config_dict):
class_args = None if config_dict.get("args") is None else dict(config_dict.get("args"))
class_path = config_dict.get("path", config_dict["name"])
module_name, class_name = class_path.rsplit(".", 1)
m = importlib.import_module(module_name)
c = getattr(m, class_name)
return c(**class_args) if class_args else c()
| clara-train-examples-master | PyTorch/NoteBooks/AIAA/deepgrow_2D/custom/deepgrow/interaction.py |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| clara-train-examples-master | PyTorch/NoteBooks/AIAA/deepgrow_2D/custom/deepgrow/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Allow password to be changed at login for the notebook server.
#
# While loggin in with a token, the notebook server UI will give the opportunity
# to the user to enter a new password at the same time that will replace the
# token login mechanism.
#
# This can be set to false to prevent changing password from the UI/API.
#c.NotebookApp.allow_password_change = True
## Allow requests where the Host header doesn't point to a local server
#
# By default, requests get a 403 forbidden response if the 'Host' header shows
# that the browser thinks it's on a non-local domain. Setting this option to
# True disables this check.
#
# This protects against 'DNS rebinding' attacks, where a remote web server
# serves you a page and then changes its DNS to send later requests to a local
# IP, bypassing same-origin checks.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are allowed as local, along
# with hostnames configured in local_hostnames.
#c.NotebookApp.allow_remote_access = False
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## Reload the webapp when changes are made to any Python src files.
#c.NotebookApp.autoreload = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
c.NotebookApp.base_url = '/notebooks/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = '/usr/certs/server.crt'
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## Override URL shown to users.
#
# Replace actual URL, including protocol, address, port and base URL, with the
# given value when displaying URL to the users. Do not change the actual
# connection URL. If authentication token is enabled, the token is added to the
# custom URL automatically.
#
# This option is intended to be used when the URL to display to the user cannot
# be determined reliably by the Jupyter notebook server (proxified or
# containerized setups for example).
#c.NotebookApp.custom_display_url = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## handlers that should be loaded at higher priority than the default services
#c.NotebookApp.extra_services = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Extra keyword arguments to pass to `get_secure_cookie`. See tornado's
# get_secure_cookie docs for details.
#c.NotebookApp.get_secure_cookie_kwargs = {}
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which stream output can be sent on iopub before
# they are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
#c.NotebookApp.ip = 'localhost'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = '/usr/certs/server.key'
## Hostnames to allow as local when allow_remote_access is False.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are automatically accepted as
# local as well.
#c.NotebookApp.local_hostnames = ['localhost']
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Sets the maximum allowed size of the client request body, specified in the
# Content-Length request header field. If the size in a request exceeds the
# configured value, a malformed HTTP message is returned to the client.
#
# Note: max_body_size is applied even in streaming mode.
#c.NotebookApp.max_body_size = 536870912
## Gets or sets the maximum amount of memory, in bytes, that is allocated for use
# by the buffer manager.
#c.NotebookApp.max_buffer_size = 536870912
## Gets or sets a lower bound on the open file handles process resource limit.
# This may need to be increased if you run into an OSError: [Errno 24] Too many
# open files. This is not applicable when running on Windows.
#c.NotebookApp.min_open_files_limit = 0
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = ''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = ''
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine through ssh.
#
# In such a case, serving the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on (env: JUPYTER_PORT).
#c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available
# (env: JUPYTER_PORT_RETRIES).
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## If True, display a button in the dashboard to quit (shutdown the notebook
# server).
#c.NotebookApp.quit_button = True
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Shut down the server after N seconds with no kernels or terminals running and
# no activity. This can be used together with culling idle kernels
# (MappingKernelManager.cull_idle_timeout) to shutdown the notebook server when
# it's not in use. This is not precisely timed: it may shut down up to a minute
# later. 0 (the default) disables this automatic shutdown.
#c.NotebookApp.shutdown_no_activity_timeout = 0
## The UNIX socket the notebook server will listen on.
#c.NotebookApp.sock = ''
## The permissions mode for UNIX socket creation (default: 0600).
#c.NotebookApp.sock_mode = '0600'
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command". On
# Unix, if "shell_command" is not provided, a non-login shell is launched by
# default when the notebook server is connected to a terminal, a login shell
# otherwise.
c.NotebookApp.terminado_settings = {"shell_command": ["/bin/bash"]}
## Set to False to disable terminals.
#
# This does *not* make the notebook server more secure by itself. Anything the
# user can in a terminal, they can also do in a notebook.
#
# Terminals may also be automatically disabled if the terminado package is not
# available.
#c.NotebookApp.terminals_enabled = True
## Token used for authenticating first-time connections to the server.
#
# The token can be read from the file referenced by JUPYTER_TOKEN_FILE or set
# directly with the JUPYTER_TOKEN environment variable.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## Disable launching browser by redirect file
#
# For versions of notebook > 5.7.2, a security feature measure was added that
# prevented the authentication token used to launch the browser from being
# visible. This feature makes it difficult for other users on a multi-user
# system from running code in your Jupyter session as you.
#
# However, some environments (like Windows Subsystem for Linux (WSL) and
# Chromebooks), launching a browser using a redirect file can lead the browser
# failing to load. This is because of the difference in file structures/paths
# between the runtime and the browser.
#
# Disabling this setting to False will disable this behavior, allowing the
# browser to launch by using a URL and visible token (as before).
#c.NotebookApp.use_redirect_file = True
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## Specify Where to open the notebook on startup. This is the `new` argument
# passed to the standard library method `webbrowser.open`. The behaviour is not
# guaranteed, but depends on browser support. Valid values are:
#
# - 2 opens a new tab,
# - 1 opens a new window,
# - 0 opens in an existing window.
#
# See the `webbrowser.open` documentation for details.
#c.NotebookApp.webbrowser_open_new = 2
## Set the tornado compression options for websocket connections.
#
# This value will be returned from
# :meth:`WebSocketHandler.get_compression_options`. None (default) will disable
# compression. A dict (even an empty one) will enable compression.
#
# See the tornado docs for WebSocketHandler.get_compression_options for details.
#c.NotebookApp.websocket_compression_options = None
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# LabApp(NotebookApp) configuration
#------------------------------------------------------------------------------
## The app directory to launch JupyterLab from.
#c.LabApp.app_dir = '/opt/conda/share/jupyter/lab'
## Whether to start the app in core mode. In this mode, JupyterLab will run using
# the JavaScript assets that are within the installed JupyterLab Python package.
# In core mode, third party extensions are disabled. The `--dev-mode` flag is an
# alias to this to be used when the Python package itself is installed in
# development mode (`pip install -e .`).
#c.LabApp.core_mode = False
## The default URL to redirect to from `/`
#c.LabApp.default_url = '/lab'
## Whether to start the app in dev mode. Uses the unpublished local JavaScript
# packages in the `dev_mode` folder. In this case JupyterLab will show a red
# stripe at the top of the page. It can only be used if JupyterLab is installed
# as `pip install -e .`.
#c.LabApp.dev_mode = False
## Whether to expose the global app instance to browser via window.jupyterlab
#c.LabApp.expose_app_in_browser = False
## The override url for static lab assets, typically a CDN.
#c.LabApp.override_static_url = ''
## The override url for static lab theme assets, typically a CDN.
#c.LabApp.override_theme_url = ''
## The directory for user settings.
#c.LabApp.user_settings_dir = '/root/.jupyter/lab/user-settings'
## Whether to serve the app in watch mode
#c.LabApp.watch = False
## The directory for workspaces
#c.LabApp.workspaces_dir = '/root/.jupyter/lab/workspaces'
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'username'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
## Share a single zmq.Context to talk to all my kernels
#c.MultiKernelManager.shared_context = True
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
## White list of allowed kernel message types. When the list is empty, all
# message types are allowed.
#c.MappingKernelManager.allowed_message_types = []
## Whether messages from kernels whose frontends have disconnected should be
# buffered in-memory. When True (default), messages are buffered and replayed on
# reconnect, avoiding lost messages due to interrupted connectivity. Disable if
# long-running kernels will produce too much output while no frontends are
# connected.
#c.MappingKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy. Only effective if
# cull_idle_timeout > 0.
#c.MappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections. Only
# effective if cull_idle_timeout > 0.
#c.MappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled. Values of 0 or lower disable culling. Very short timeouts may result
# in kernels being culled for users with poor network connections.
#c.MappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
#c.MappingKernelManager.cull_interval = 300
## Timeout for giving up on a kernel (in seconds). On starting and restarting
# kernels, we check whether the kernel is running and responsive by sending
# kernel_info_requests. This sets the timeout in seconds for how long the kernel
# can take before being presumed dead. This affects the MappingKernelManager
# (which handles kernel restarts) and the ZMQChannelsHandler (which handles the
# startup).
#c.MappingKernelManager.kernel_info_timeout = 60
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
## Allow access to hidden files
#c.ContentsManager.allow_hidden = False
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## handler class to use when serving raw file requests.
#
# Default is a fallback that talks to the ContentsManager API, which may be
# inefficient, especially for large files.
#
# Local files-based ContentsManagers can use a StaticFileHandler subclass, which
# will be much more efficient.
#
# Access to these files should be Authenticated.
#c.ContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler'
## Extra parameters to pass to files_handler_class.
#
# For example, StaticFileHandlers generally expect a `path` argument specifying
# the root directory from which to serve files.
#c.ContentsManager.files_handler_params = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# successfully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system without operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## If True (default), deleting files will send them to the platform's
# trash/recycle bin, where they can be recovered. If False, deleting files
# really deletes them.
#c.FileContentsManager.delete_to_trash = True
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# GatewayKernelManager(MappingKernelManager) configuration
#------------------------------------------------------------------------------
## Kernel manager that supports remote kernels hosted by Jupyter Kernel or
# Enterprise Gateway.
#------------------------------------------------------------------------------
# GatewayKernelSpecManager(KernelSpecManager) configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# GatewayClient(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This class manages the configuration. It's its own singleton class so that we
# can share these values across all objects. It also contains some helper methods
# to build request arguments out of the various config options.
## The authorization token used in the HTTP headers. (JUPYTER_GATEWAY_AUTH_TOKEN
# env var)
#c.GatewayClient.auth_token = None
## The filename of CA certificates or None to use defaults.
# (JUPYTER_GATEWAY_CA_CERTS env var)
#c.GatewayClient.ca_certs = None
## The filename for client SSL certificate, if any. (JUPYTER_GATEWAY_CLIENT_CERT
# env var)
#c.GatewayClient.client_cert = None
## The filename for client SSL key, if any. (JUPYTER_GATEWAY_CLIENT_KEY env var)
#c.GatewayClient.client_key = None
## The time allowed for HTTP connection establishment with the Gateway server.
# (JUPYTER_GATEWAY_CONNECT_TIMEOUT env var)
#c.GatewayClient.connect_timeout = 40.0
## A comma-separated list of environment variable names that will be included,
# along with their values, in the kernel startup request. The corresponding
# `env_whitelist` configuration value must also be set on the Gateway server -
# since that configuration value indicates which environmental values to make
# available to the kernel. (JUPYTER_GATEWAY_ENV_WHITELIST env var)
#c.GatewayClient.env_whitelist = ''
## Additional HTTP headers to pass on the request. This value will be converted
# to a dict. (JUPYTER_GATEWAY_HEADERS env var)
#c.GatewayClient.headers = '{}'
## The password for HTTP authentication. (JUPYTER_GATEWAY_HTTP_PWD env var)
#c.GatewayClient.http_pwd = None
## The username for HTTP authentication. (JUPYTER_GATEWAY_HTTP_USER env var)
#c.GatewayClient.http_user = None
## The gateway API endpoint for accessing kernel resources
# (JUPYTER_GATEWAY_KERNELS_ENDPOINT env var)
#c.GatewayClient.kernels_endpoint = '/api/kernels'
## The gateway API endpoint for accessing kernelspecs
# (JUPYTER_GATEWAY_KERNELSPECS_ENDPOINT env var)
#c.GatewayClient.kernelspecs_endpoint = '/api/kernelspecs'
## The gateway endpoint for accessing kernelspecs resources
# (JUPYTER_GATEWAY_KERNELSPECS_RESOURCE_ENDPOINT env var)
#c.GatewayClient.kernelspecs_resource_endpoint = '/kernelspecs'
## The time allowed for HTTP request completion. (JUPYTER_GATEWAY_REQUEST_TIMEOUT
# env var)
#c.GatewayClient.request_timeout = 40.0
## The url of the Kernel or Enterprise Gateway server where kernel specifications
# are defined and kernel management takes place. If defined, this Notebook
# server acts as a proxy for all kernel management and kernel specification
# retrieval. (JUPYTER_GATEWAY_URL env var)
#c.GatewayClient.url = None
## For HTTPS requests, determines if server's certificate should be validated or
# not. (JUPYTER_GATEWAY_VALIDATE_CERT env var)
#c.GatewayClient.validate_cert = True
## The websocket url of the Kernel or Enterprise Gateway server. If not
# provided, this value will correspond to the value of the Gateway url with 'ws'
# in place of 'http'. (JUPYTER_GATEWAY_WS_URL env var)
#c.GatewayClient.ws_url = None
#------------------------------------------------------------------------------
# TerminalManager(LoggingConfigurable,NamedTermManager) configuration
#------------------------------------------------------------------------------
##
## Timeout (in seconds) in which a terminal has been inactive and ready to be
# culled. Values of 0 or lower disable culling.
#c.TerminalManager.cull_inactive_timeout = 0
## The interval (in seconds) on which to check for terminals exceeding the
# inactive timeout value.
#c.TerminalManager.cull_interval = 300
| clara-train-examples-master | PyTorch/NoteBooks/AIAA/OHIF-Orthanc/config/jupyter_notebook_config.py |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import statistics
import numpy as np
import torch
import torch.distributed
from monai.engines.workflow import Engine, Events
from monai.handlers.tensorboard_handlers import SummaryWriter
from monai.metrics import compute_meandice
from monai.transforms import rescale_array
from monai.utils import optional_import
from monai.visualize import plot_2d_or_3d_image
nib, _ = optional_import("nibabel")
torchvision, _ = optional_import("torchvision")
make_grid, _ = optional_import("torchvision.utils", name="make_grid")
Image, _ = optional_import("PIL.Image")
ImageDraw, _ = optional_import("PIL.ImageDraw")
class RegionDice:
def __init__(self):
self.data = []
def reset(self):
self.data = []
def update(self, y_pred, y, batched=True):
if not batched:
y_pred = y_pred[None]
y = y[None]
score = compute_meandice(y_pred=y_pred, y=y, include_background=False).mean()
self.data.append(score.item())
def mean(self):
return statistics.mean(self.data)
def stdev(self):
return statistics.stdev(self.data) if len(self.data) > 1 else 0
class DeepgrowStatsHandler:
def __init__(
self,
summary_writer=None,
interval=1,
log_dir="./runs",
tag_name="val_dice",
compute_metric=True,
images=True,
image_interval=1,
max_channels=1,
max_frames=64,
add_scalar=True,
merge_scalar=False,
fold_size=0,
):
self.writer = SummaryWriter(log_dir=log_dir) if summary_writer is None else summary_writer
self.interval = interval
self.tag_name = tag_name
self.compute_metric = compute_metric
self.images = images
self.image_interval = image_interval
self.max_channels = max_channels
self.max_frames = max_frames
self.add_scalar = add_scalar
self.merge_scalar = merge_scalar
self.fold_size = fold_size
self.logger = logging.getLogger(__name__)
if torch.distributed.is_initialized():
self.tag_name = "{}-r{}".format(self.tag_name, torch.distributed.get_rank())
self.plot_data = {}
self.metric_data = {}
def attach(self, engine: Engine) -> None:
engine.add_event_handler(Events.ITERATION_COMPLETED(every=self.interval), self, "iteration")
engine.add_event_handler(Events.EPOCH_COMPLETED(every=1), self, "epoch")
def write_images(self, epoch):
if not self.plot_data or not len(self.plot_data):
return
all_imgs = []
for region in sorted(self.plot_data.keys()):
metric = self.metric_data.get(region)
region_data = self.plot_data[region]
if len(region_data[0].shape) == 3:
ti = Image.new("RGB", region_data[0].shape[1:])
d = ImageDraw.Draw(ti)
t = "region: {}".format(region)
if self.compute_metric:
t = t + "\ndice: {:.4f}".format(metric.mean())
t = t + "\nstdev: {:.4f}".format(metric.stdev())
d.multiline_text((10, 10), t, fill=(255, 255, 0))
ti = rescale_array(np.rollaxis(np.array(ti), 2, 0)[0][np.newaxis])
all_imgs.append(ti)
all_imgs.extend(region_data)
if len(all_imgs[0].shape) == 3:
img_tensor = make_grid(tensor=torch.from_numpy(np.array(all_imgs)), nrow=4, normalize=True, pad_value=2)
self.writer.add_image(tag=f"Deepgrow Regions ({self.tag_name})", img_tensor=img_tensor, global_step=epoch)
if len(all_imgs[0].shape) == 4:
for region in sorted(self.plot_data.keys()):
tags = [f"region_{region}_image", f"region_{region}_label", f"region_{region}_output"]
if torch.distributed.is_initialized():
rank = "r{}-".format(torch.distributed.get_rank())
tags = [rank + tags[0], rank + tags[1], rank + tags[2]]
for i in range(3):
img = self.plot_data[region][i]
img = np.moveaxis(img, -3, -1)
plot_2d_or_3d_image(
img[np.newaxis], epoch, self.writer, 0, self.max_channels, self.max_frames, tags[i]
)
self.logger.info(
"Saved {} Regions {} into Tensorboard at epoch: {}".format(
len(self.plot_data), sorted([*self.plot_data]), epoch
)
)
self.writer.flush()
def write_region_metrics(self, epoch):
metric_sum = 0
means = {}
for region in self.metric_data:
metric = self.metric_data[region].mean()
self.logger.info(
"Epoch[{}] Metrics -- Region: {:0>2d}, {}: {:.4f}".format(epoch, region, self.tag_name, metric)
)
if self.merge_scalar:
means["{:0>2d}".format(region)] = metric
else:
self.writer.add_scalar("{}_{:0>2d}".format(self.tag_name, region), metric, epoch)
metric_sum += metric
if self.merge_scalar:
means["avg"] = metric_sum / len(self.metric_data)
self.writer.add_scalars("{}_region".format(self.tag_name), means, epoch)
elif len(self.metric_data) > 1:
metric_avg = metric_sum / len(self.metric_data)
self.writer.add_scalar("{}_regions_avg".format(self.tag_name), metric_avg, epoch)
self.writer.flush()
def __call__(self, engine: Engine, action) -> None:
total_steps = engine.state.iteration
if total_steps < engine.state.epoch_length:
total_steps = engine.state.epoch_length * (engine.state.epoch - 1) + total_steps
if action == "epoch" and not self.fold_size:
epoch = engine.state.epoch
elif self.fold_size and total_steps % self.fold_size == 0:
epoch = int(total_steps / self.fold_size)
else:
epoch = None
if epoch:
if self.images and epoch % self.image_interval == 0:
self.write_images(epoch)
if self.add_scalar:
self.write_region_metrics(epoch)
if action == "epoch" or epoch:
self.plot_data = {}
self.metric_data = {}
return
device = engine.state.device
batch_data = engine.state.batch
output_data = engine.state.output
for bidx in range(len(batch_data.get("region", []))):
region = batch_data.get("region")[bidx]
region = region.item() if torch.is_tensor(region) else region
if self.images and self.plot_data.get(region) is None:
self.plot_data[region] = [
rescale_array(batch_data["image"][bidx][0].detach().cpu().numpy()[np.newaxis], 0, 1),
rescale_array(batch_data["label"][bidx].detach().cpu().numpy(), 0, 1),
rescale_array(output_data["pred"][bidx].detach().cpu().numpy(), 0, 1),
]
if self.compute_metric:
if self.metric_data.get(region) is None:
self.metric_data[region] = RegionDice()
self.metric_data[region].update(
y_pred=output_data["pred"][bidx].to(device), y=batch_data["label"][bidx].to(device), batched=False
)
class SegmentationSaver:
def __init__(
self,
output_dir: str = "./runs",
save_np=False,
images=True,
):
self.output_dir = output_dir
self.save_np = save_np
self.images = images
os.makedirs(self.output_dir, exist_ok=True)
def attach(self, engine: Engine) -> None:
if not engine.has_event_handler(self, Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.ITERATION_COMPLETED, self)
def __call__(self, engine: Engine):
batch_data = engine.state.batch
output_data = engine.state.output
device = engine.state.device
tag = ""
if torch.distributed.is_initialized():
tag = "r{}-".format(torch.distributed.get_rank())
for bidx in range(len(batch_data.get("image"))):
step = engine.state.iteration
region = batch_data.get("region")[bidx]
region = region.item() if torch.is_tensor(region) else region
image = batch_data["image"][bidx][0].detach().cpu().numpy()[np.newaxis]
label = batch_data["label"][bidx].detach().cpu().numpy()
pred = output_data["pred"][bidx].detach().cpu().numpy()
dice = compute_meandice(
y_pred=output_data["pred"][bidx][None].to(device),
y=batch_data["label"][bidx][None].to(device),
include_background=False,
).mean()
if self.save_np:
np.savez(
os.path.join(
self.output_dir,
"{}img_label_pred_{}_{:0>4d}_{:0>2d}_{:.4f}".format(tag, region, step, bidx, dice),
),
image,
label,
pred,
)
if self.images and len(image.shape) == 3:
img = make_grid(torch.from_numpy(rescale_array(image, 0, 1)[0]))
lab = make_grid(torch.from_numpy(rescale_array(label, 0, 1)[0]))
pos = rescale_array(output_data["image"][bidx][1].detach().cpu().numpy()[np.newaxis], 0, 1)[0]
neg = rescale_array(output_data["image"][bidx][2].detach().cpu().numpy()[np.newaxis], 0, 1)[0]
pre = make_grid(torch.from_numpy(np.array([rescale_array(pred, 0, 1)[0], pos, neg])))
torchvision.utils.save_image(
tensor=[img, lab, pre],
nrow=3,
pad_value=2,
fp=os.path.join(
self.output_dir,
"{}img_label_pred_{}_{:0>4d}_{:0>2d}_{:.4f}.png".format(tag, region, step, bidx, dice),
),
)
if self.images and len(image.shape) == 4:
samples = {"image": image[0], "label": label[0], "pred": pred[0]}
for sample in samples:
img = np.moveaxis(samples[sample], -3, -1)
img = nib.Nifti1Image(img, np.eye(4))
nib.save(
img,
os.path.join(
self.output_dir, "{}{}_{:0>4d}_{:0>2d}_{:.4f}.nii.gz".format(tag, sample, step, bidx, dice)
),
)
| clara-train-examples-master | PyTorch/NoteBooks/AIAA/deepgrow_3D/custom/deepgrow/handler.py |
import argparse
import json
import logging
import os
import sys
from monai.apps.deepgrow.dataset import create_dataset
from monai.data import partition_dataset
def prepare_datalist(args):
dimensions = args.dimensions
dataset_json = os.path.join(args.output, 'dataset.json')
logging.info('Processing dataset...')
with open(os.path.join(args.dataset_json)) as f:
datalist = json.load(f)
datalist = create_dataset(
datalist=datalist[args.datalist_key],
base_dir=args.dataset_root,
output_dir=args.output,
dimension=dimensions,
pixdim=[1.0] * dimensions,
limit=args.limit,
relative_path=args.relative_path
)
with open(dataset_json, 'w') as fp:
json.dump(datalist, fp, indent=2)
dataset_json = os.path.join(args.output, 'dataset.json')
with open(dataset_json) as f:
datalist = json.load(f)
logging.info('+++ Dataset File: {}'.format(dataset_json))
logging.info('+++ Total Records: {}'.format(len(datalist)))
logging.info('')
train_ds, val_ds = partition_dataset(datalist, ratios=[args.split, (1 - args.split)], shuffle=True, seed=args.seed)
dataset_json = os.path.join(args.output, 'dataset_0.json')
with open(dataset_json, 'w') as fp:
json.dump({'training': train_ds, 'validation': val_ds}, fp, indent=2)
logging.info('*** Dataset File: {}'.format(dataset_json))
logging.info('*** Total Records for Training: {}'.format(len(train_ds)))
logging.info('*** Total Records for Validation: {}'.format(len(val_ds)))
assert len(train_ds) > 0, "Train Dataset/Records is EMPTY"
assert len(val_ds) > 0, "Validation Dataset/Records is EMPTY"
def run(args):
for arg in vars(args):
logging.info('USING:: {} = {}'.format(arg, getattr(args, arg)))
logging.info("")
if not os.path.exists(args.output):
logging.info('output path [{}] does not exist. creating it now.'.format(args.output))
os.makedirs(args.output, exist_ok=True)
prepare_datalist(args)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--seed', type=int, default=42, help='Random Seed')
parser.add_argument('-dims', '--dimensions', type=int, default=3, choices=[2, 3], help='Output Dimension')
parser.add_argument('-d', '--dataset_root', default='/workspace/data/52432', help='Dataset Root Folder')
parser.add_argument('-j', '--dataset_json', default='/workspace/data/52432/dataset.json', help='Dataset JSON File')
parser.add_argument('-k', '--datalist_key', default='training', help='Key in Dataset JSON File')
parser.add_argument('-o', '--output', default='/workspace/data/52432/3D', help='Output path to save processed data')
parser.add_argument('-x', '--split', type=float, default=0.9, help='Ratio to split into training and validation')
parser.add_argument('-t', '--limit', type=int, default=0, help='Limit input records to process; 0 = no limit')
parser.add_argument('-r', '--relative_path', action='store_true', default=False, help='use relative path in output')
args = parser.parse_args()
run(args)
if __name__ == "__main__":
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='[%(asctime)s.%(msecs)03d][%(levelname)5s] - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
main()
| clara-train-examples-master | PyTorch/NoteBooks/AIAA/deepgrow_3D/custom/deepgrow/prepare_dataset.py |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from monai.apps.deepgrow.interaction import Interaction
from monai.transforms import Compose
class ClickInteraction(Interaction):
"""
Deepgrow Training/Evaluation iteration method with interactions (simulation of clicks) support for image and label.
Args:
transforms: execute additional transformation during every iteration (before train).
Typically, several Tensor based transforms composed by `Compose`.
max_interactions: maximum number of interactions per iteration
train: training or evaluation
key_probability: field name to fill probability for every interaction
"""
def __init__(self, transforms, max_interactions: int, train: bool, key_probability: str = "probability") -> None:
self.transforms = transforms
self.max_interactions = max_interactions
self.train = train
self.key_probability = key_probability
if not isinstance(self.transforms, Compose):
transforms = []
for t in self.transforms:
transforms.append(self.init_external_class(t))
self.transforms = Compose(transforms)
@staticmethod
def init_external_class(config_dict):
class_args = None if config_dict.get("args") is None else dict(config_dict.get("args"))
class_path = config_dict.get("path", config_dict["name"])
module_name, class_name = class_path.rsplit(".", 1)
m = importlib.import_module(module_name)
c = getattr(m, class_name)
return c(**class_args) if class_args else c()
| clara-train-examples-master | PyTorch/NoteBooks/AIAA/deepgrow_3D/custom/deepgrow/interaction.py |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| clara-train-examples-master | PyTorch/NoteBooks/AIAA/deepgrow_3D/custom/deepgrow/__init__.py |
import json
import xml.etree.ElementTree as ET
import copy
import numpy as np
from skimage.measure import points_in_poly
np.random.seed(0)
class Polygon(object):
"""
Polygon represented as [N, 2] array of vertices
"""
def __init__(self, name, vertices):
"""
Initialize the polygon.
Arguments:
name: string, name of the polygon
vertices: [N, 2] 2D numpy array of int
"""
self._name = name
self._vertices = vertices
def __str__(self):
return self._name
def inside(self, coord):
"""
Determine if a given coordinate is inside the polygon or not.
Arguments:
coord: 2 element tuple of int, e.g. (x, y)
Returns:
bool, if the coord is inside the polygon.
"""
return points_in_poly([coord], self._vertices)[0]
def vertices(self):
return np.array(self._vertices)
class Annotation(object):
"""
Annotation about the regions within WSI in terms of vertices of polygons.
"""
def __init__(self):
self._json_path = ''
self._polygons_positive = []
self._polygons_negative = []
def __str__(self):
return self._json_path
def from_json(self, json_path):
"""
Initialize the annotation from a json file.
Arguments:
json_path: string, path to the json annotation.
"""
self._json_path = json_path
with open(json_path) as f:
annotations_json = json.load(f)
for annotation in annotations_json['positive']:
name = annotation['name']
vertices = np.array(annotation['vertices'])
polygon = Polygon(name, vertices)
self._polygons_positive.append(polygon)
for annotation in annotations_json['negative']:
name = annotation['name']
vertices = np.array(annotation['vertices'])
polygon = Polygon(name, vertices)
self._polygons_negative.append(polygon)
def inside_polygons(self, coord, is_positive):
"""
Determine if a given coordinate is inside the positive/negative
polygons of the annotation.
Arguments:
coord: 2 element tuple of int, e.g. (x, y)
is_positive: bool, inside positive or negative polygons.
Returns:
bool, if the coord is inside the positive/negative polygons of the
annotation.
"""
if is_positive:
polygons = copy.deepcopy(self._polygons_positive)
else:
polygons = copy.deepcopy(self._polygons_negative)
for polygon in polygons:
if polygon.inside(coord):
return True
return False
def polygon_vertices(self, is_positive):
"""
Return the polygon represented as [N, 2] array of vertices
Arguments:
is_positive: bool, return positive or negative polygons.
Returns:
[N, 2] 2D array of int
"""
if is_positive:
return list(map(lambda x: x.vertices(), self._polygons_positive))
else:
return list(map(lambda x: x.vertices(), self._polygons_negative))
class Formatter(object):
"""
Format converter e.g. CAMELYON16 to internal json
"""
def camelyon16xml2json(inxml, outjson):
"""
Convert an annotation of camelyon16 xml format into a json format.
Arguments:
inxml: string, path to the input camelyon16 xml format
outjson: string, path to the output json format
"""
root = ET.parse(inxml).getroot()
annotations_tumor = \
root.findall('./Annotations/Annotation[@PartOfGroup="Tumor"]')
annotations_0 = \
root.findall('./Annotations/Annotation[@PartOfGroup="_0"]')
annotations_1 = \
root.findall('./Annotations/Annotation[@PartOfGroup="_1"]')
annotations_2 = \
root.findall('./Annotations/Annotation[@PartOfGroup="_2"]')
annotations_positive = \
annotations_tumor + annotations_0 + annotations_1
annotations_negative = annotations_2
json_dict = {}
json_dict['positive'] = []
json_dict['negative'] = []
for annotation in annotations_positive:
X = list(map(lambda x: float(x.get('X')),
annotation.findall('./Coordinates/Coordinate')))
Y = list(map(lambda x: float(x.get('Y')),
annotation.findall('./Coordinates/Coordinate')))
vertices = np.round([X, Y]).astype(int).transpose().tolist()
name = annotation.attrib['Name']
json_dict['positive'].append({'name': name, 'vertices': vertices})
for annotation in annotations_negative:
X = list(map(lambda x: float(x.get('X')),
annotation.findall('./Coordinates/Coordinate')))
Y = list(map(lambda x: float(x.get('Y')),
annotation.findall('./Coordinates/Coordinate')))
vertices = np.round([X, Y]).astype(int).transpose().tolist()
name = annotation.attrib['Name']
json_dict['negative'].append({'name': name, 'vertices': vertices})
with open(outjson, 'w') as f:
json.dump(json_dict, f, indent=1)
def vertices2json(outjson, positive_vertices=[], negative_vertices=[]):
json_dict = {}
json_dict['positive'] = []
json_dict['negative'] = []
for i in range(len(positive_vertices)):
name = 'Annotation {}'.format(i)
vertices = positive_vertices[i].astype(int).tolist()
json_dict['positive'].append({'name': name, 'vertices': vertices})
for i in range(len(negative_vertices)):
name = 'Annotation {}'.format(i)
vertices = negative_vertices[i].astype(int).tolist()
json_dict['negative'].append({'name': name, 'vertices': vertices})
with open(outjson, 'w') as f:
json.dump(json_dict, f, indent=1)
| clara-train-examples-master | PyTorch/NoteBooks/DomainExamples/DigitalPathology/MMAR_DP/custom/annotation.py |
import torch
from torchvision import models
class TorchModelFullyConv(torch.nn.Module):
"""Customize TorchVision models to replace fully connected layer by convolutional layer."""
def __init__(self, model_name: str = "resnet18", n_classes: int = 1, pretrained: bool = False):
super().__init__()
model = getattr(models, model_name)(pretrained=pretrained)
# remove last fully connected (FC) layer and adaptive avg pooling
self.features = torch.nn.Sequential(*list(model.children())[:-2])
# add 7x7 avg pooling (in place of adaptive avg pooling)
self.pool = torch.nn.AvgPool2d(7, stride=1)
# add 1x1 conv (it behaves like a FC layer)
self.fc = torch.nn.Conv2d(model.fc.in_features, n_classes, kernel_size=(1, 1))
def forward(self, x):
x = self.features(x)
# apply 7x7 avg pooling
x = self.pool(x)
# apply last 1x1 conv layer that act like fc layer
x = self.fc(x)
# remove the color channel
x = x.squeeze(1)
return x
| clara-train-examples-master | PyTorch/NoteBooks/DomainExamples/DigitalPathology/MMAR_DP/custom/models.py |
import os
import sys
from argparse import ArgumentParser
import logging
import json
import time
import numpy as np
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from annotation import Annotation
ROOT_DATA_PATH="/claraDevDay/Data/DP_CAMELYON16/"
ANO_PATH = ROOT_DATA_PATH+"/jsons/"
TRAIN_NORMAL_LOC_PATH = ROOT_DATA_PATH+"/coords/normal_train.txt"
TRAIN_TUMOR_LOC_PATH = ROOT_DATA_PATH+"/coords/tumor_train.txt"
VALID_NORMAL_LOC_PATH = ROOT_DATA_PATH+"/coords/normal_valid.txt"
VALID_TUMOR_LOC_PATH = ROOT_DATA_PATH+"/coords/tumor_valid.txt"
TRAIN_NORMAL_OUT_PATH = ROOT_DATA_PATH+"/LocLabel/normal_train.txt"
TRAIN_TUMOR_OUT_PATH = ROOT_DATA_PATH+"/LocLabel/tumor_train.txt"
VALID_NORMAL_OUT_PATH = ROOT_DATA_PATH+"/LocLabel/normal_valid.txt"
VALID_TUMOR_OUT_PATH = ROOT_DATA_PATH+"/LocLabel/tumor_valid.txt"
IMG_SIZE = 768
SUB_SIZE = 256
def loclabel_gen(ano_path, loc_path, out_path):
pids = list(map(lambda x: x.strip('.json'), os.listdir(ano_path)))
annotations = {}
for pid in pids:
pid_json_path = os.path.join(ano_path, pid + '.json')
anno = Annotation()
anno.from_json(pid_json_path)
annotations[pid] = anno
coords = []
infile = open(loc_path)
for i, line in enumerate(infile):
pid, x_center, y_center = line.strip('\n').split(',')
coords.append((pid, x_center, y_center))
infile.close()
num_sample = len(coords)
print(f"{out_path} Total sample: {num_sample}")
outfile = open(out_path,'w')
for index in range(num_sample):
pid, x_center, y_center = coords[index]
x_center = int(x_center)
y_center = int(y_center)
x_top_left = int(x_center - IMG_SIZE / 2)
y_top_left = int(y_center - IMG_SIZE / 2)
label=[]
for x_idx in range(3):
for y_idx in range(3):
# (x, y) is the center of each patch
x = x_top_left + int((x_idx + 0.5) * SUB_SIZE)
y = y_top_left + int((y_idx + 0.5) * SUB_SIZE)
# get label information according to annotation
if annotations[pid].inside_polygons((x, y), True):
label.append(1)
else:
label.append(0)
# write output
outfile.write(f"{pid.lower()}, {x_center}, {y_center}, {str(label)[1:-1]}\n")
outfile.close()
def main():
loclabel_gen(ANO_PATH, TRAIN_TUMOR_LOC_PATH, TRAIN_TUMOR_OUT_PATH)
loclabel_gen(ANO_PATH, TRAIN_NORMAL_LOC_PATH, TRAIN_NORMAL_OUT_PATH)
loclabel_gen(ANO_PATH, VALID_TUMOR_LOC_PATH, VALID_TUMOR_OUT_PATH)
loclabel_gen(ANO_PATH, VALID_NORMAL_LOC_PATH, VALID_NORMAL_OUT_PATH)
return
if __name__ == "__main__":
main()
| clara-train-examples-master | PyTorch/NoteBooks/DomainExamples/DigitalPathology/MMAR_DP/custom/create_list.py |
import argparse
import logging
import numpy as np
from monai.data.image_reader import WSIReader
from skimage.color import rgb2hsv
from skimage.filters import threshold_otsu
def create_masks(args):
logging.basicConfig(level=logging.INFO)
reader = WSIReader(reader_lib="cuCIM")
img_obj = reader.read(args.wsi_path)
img_rgb, _ = reader.get_data(img_obj, level=args.level)
img_hsv = rgb2hsv(img_rgb.transpose(1, 2, 0))
background_R = img_rgb[0] > threshold_otsu(img_rgb[0])
background_G = img_rgb[1] > threshold_otsu(img_rgb[1])
background_B = img_rgb[2] > threshold_otsu(img_rgb[2])
tissue_RGB = np.logical_not(background_R & background_G & background_B)
tissue_S = img_hsv[..., 1] > threshold_otsu(img_hsv[..., 1])
min_R = img_rgb[0] > args.RGB_min
min_G = img_rgb[1] > args.RGB_min
min_B = img_rgb[2] > args.RGB_min
tissue_mask = tissue_S & tissue_RGB & min_R & min_G & min_B
np.save(args.npy_path, tissue_mask)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create tissue masks of WSI and save it in numpy array.")
parser.add_argument(
"wsi_path",
default=None,
metavar="WSI_PATH",
type=str,
help="Path to the directory containing WSI files for testing.",
)
parser.add_argument(
"npy_path",
default=None,
metavar="NPY_PATH",
type=str,
help="Path to the output directory for numpy tissue masks.",
)
parser.add_argument(
"--level",
default=6,
type=int,
help="at which WSI level" " to obtain the mask, default 6",
)
parser.add_argument("--RGB_min", default=50, type=int, help="min value for RGB" " channel, default 50")
args = parser.parse_args()
create_masks(args)
| clara-train-examples-master | PyTorch/NoteBooks/DomainExamples/DigitalPathology/MMAR_DP/custom/tissue_mask.py |
import math
import os
import numpy as np
from monai.data import Dataset, SmartCacheDataset
from skimage.transform import resize
from image_reader import WSIReader
class PatchWSIDataset(Dataset):
"""
Load whole slide images and associated class labels and create patches
"""
def __init__(self, data, region_size, grid_size, patch_size, image_reader_name="CuImage", transform=None):
if type(region_size) == int:
self.region_size = (region_size, region_size)
else:
self.region_size = region_size
if type(grid_size) == int:
self.grid_size = (grid_size, grid_size)
else:
self.grid_size = grid_size
self.sub_region_size = (self.region_size[0] / self.grid_size[0], self.region_size[1] / self.grid_size[1])
self.patch_size = patch_size
self.transform = transform
self.image_base_path = data[0]["image"]
self.samples = self.load_samples(data[0]["label"])
self.image_path_list = {x[0] for x in self.samples}
self.num_samples = len(self.samples)
self.image_reader_name = image_reader_name
self.image_reader = WSIReader(image_reader_name)
self.cu_image_dict = {}
self._fetch_cu_images()
def _fetch_cu_images(self):
for image_path in self.image_path_list:
self.cu_image_dict[image_path] = self.image_reader.read(image_path)
def process_label_row(self, row):
row = row.strip("\n").split(",")
# create full image path
image_name = row[0] + ".tif"
image_path = os.path.join(self.image_base_path, image_name)
# change center locations to upper left location
location = (int(row[1]) - self.region_size[0] // 2, int(row[2]) - self.region_size[1] // 2)
# convert labels to float32 and add empty HxW channel to label
labels = tuple(int(lbl) for lbl in row[3:])
labels = np.array(labels, dtype=np.float32)[:, np.newaxis, np.newaxis]
return image_path, location, labels
def load_samples(self, loc_path):
with open(loc_path) as label_file:
rows = [self.process_label_row(row) for row in label_file.readlines()]
return rows
def __len__(self):
return self.num_samples
def __getitem__(self, index):
image_path, location, labels = self.samples[index]
if self.image_reader_name == 'openslide':
img_obj = self.image_reader.read(image_path)
else:
img_obj = self.cu_image_dict[image_path]
images = self.image_reader.get_data(
img_obj=img_obj,
location=location,
size=self.region_size,
grid_shape=self.grid_size,
patch_size=self.patch_size,
)
samples = [{"image": images[i], "label": labels[i]} for i in range(labels.shape[0])]
if self.transform:
samples = self.transform(samples)
return samples
class SmartCachePatchWSIDataset(SmartCacheDataset):
"""
Add SmartCache functionality to PatchWSIDataset
"""
def __init__(
self,
data,
region_size,
grid_size,
patch_size,
transform,
replace_rate,
cache_num,
cache_rate=1.0,
num_init_workers=None,
num_replace_workers=0,
image_reader_name="CuImage",
):
extractor = PatchWSIDataset(data, region_size, grid_size, patch_size, image_reader_name)
super().__init__(
data=extractor,
transform=transform,
replace_rate=replace_rate,
cache_num=cache_num,
cache_rate=cache_rate,
num_init_workers=num_init_workers,
num_replace_workers=num_replace_workers,
)
class SlidingWindowWSIDataset(Dataset):
"""
Load image patches in a sliding window manner with foreground mask
Parameters include image and mask paths, and patch_size
Output will be at same level as the foreground mask
"""
def __init__(self, data, patch_size, image_reader_name="CuImage", transform=None):
if type(patch_size) == int:
self.patch_size = (patch_size, patch_size)
else:
self.patch_size = patch_size
self.image_reader = WSIReader(image_reader_name)
self.down_ratio = int(np.ceil(self.patch_size[0] / 32) - 6)
self.transform = transform
self.coords = []
self.info = {}
for wsi_sample in data:
image_name, img, num_idx, x_idx, y_idx, level, ratio, mask_dims, image_dims = self._preprocess(wsi_sample)
self.info[image_name] = {
"img": img,
"mask_dims": mask_dims,
"image_dims": image_dims,
"num_idx": num_idx,
"level": level,
"ratio": ratio,
"counter": 0,
}
coords = zip([image_name] * num_idx, x_idx, y_idx)
self.coords.extend(coords)
self.total_n_patches = len(self.coords)
def _preprocess(self, sample):
image_path = sample["image"]
mask_path = sample["label"]
image_name = os.path.splitext(os.path.basename(image_path))[0]
img = self.image_reader.read(image_path)
msk = np.load(mask_path)
dim_y_img, dim_x_img, _ = img.shape
dim_x_msk, dim_y_msk = msk.shape
ratio_x = dim_x_img / dim_x_msk
ratio_y = dim_y_img / dim_y_msk
level_x = math.log2(ratio_x)
if ratio_x != ratio_y:
raise Exception(
"{}: Image/Mask dimension does not match ,"
" dim_x_img / dim_x_msk : {} / {},"
" dim_y_img / dim_y_msk : {} / {}".format(image_name, dim_x_img, dim_x_msk, dim_y_img, dim_y_msk)
)
else:
if not level_x.is_integer():
raise Exception(
"{}: Mask not at regular level (ratio not power of 2),"
" image / mask ratio: {},".format(image_name, ratio_x)
)
else:
ratio = ratio_x
level = level_x
print("{}: Mask at level {}, with ratio {}".format(image_name, int(level), int(ratio)))
print("Downsample ratio {}".format(self.down_ratio))
msk_down = resize(msk, (int(dim_x_msk / self.down_ratio), int(dim_y_msk / self.down_ratio)))
# get all indices for tissue region from the foreground mask
x_idx, y_idx = np.where(msk_down)
# output same size as the foreground mask
# attention: not original wsi image size
self.x_idx = x_idx * self.down_ratio
self.y_idx = y_idx * self.down_ratio
num_idx = len(x_idx)
return image_name, img, num_idx, x_idx, y_idx, level, ratio, (dim_x_msk, dim_y_msk), (dim_x_img, dim_y_img)
def _load_sample(self, index):
"""
Load patch for sliding window inference on WSI
Read ROI with patch_size at patch_loc into a dictionary of {'image': array, "name": str}.
"""
name, x_msk, y_msk = self.coords[index]
ratio = self.info[name]["ratio"]
# convert to image space
x_img = int((x_msk + 0.5) * ratio - self.patch_size[0] / 2)
y_img = int((y_msk + 0.5) * ratio - self.patch_size[1] / 2)
location = (x_img, y_img)
image = self.image_reader.get_data(img_obj=self.info[name]["img"], location=location, size=self.patch_size)
sample = {"image": image, "name": name, "location": (x_msk, y_msk), "ratio": ratio}
return sample
def __len__(self):
return self.total_n_patches
def __getitem__(self, index):
sample = self._load_sample(index)
if self.transform:
sample = self.transform(sample)
return sample
| clara-train-examples-master | PyTorch/NoteBooks/DomainExamples/DigitalPathology/MMAR_DP/custom/datasets.py |
import logging
import os
from typing import TYPE_CHECKING, Optional
import numpy as np
import matplotlib.pyplot as plt
from skimage import filters
from monai.utils import exact_version, optional_import
Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine")
class ProbMapSaver:
"""
Event handler triggered on completing every iteration to save the probability map
"""
def __init__(
self,
output_dir: str = "./",
filename: str = None,
name: Optional[str] = None,
) -> None:
"""
Args:
output_dir: output Numpy and CSV file directory.
overwrite: whether to overwriting existing CSV file content. If we are not overwriting,
then we check if the results have been previously saved, and load them to the prediction_dict.
name: identifier of logging.logger to use, defaulting to `engine.logger`.
"""
self.logger = logging.getLogger(name)
self._name = name
self.output_dir = output_dir
self.probs_maps = {}
self.levels = {}
self.case_names = []
def attach(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
for name, info in engine.data_loader.dataset.info.items():
self.case_names.append(name)
self.probs_maps[name] = np.zeros(info['mask_dims'])
self.levels[name] = info['level']
if self._name is None:
self.logger = engine.logger
if not engine.has_event_handler(self, Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.ITERATION_COMPLETED, self)
if not engine.has_event_handler(self.finalize, Events.COMPLETED):
engine.add_event_handler(Events.COMPLETED, lambda engine: self.finalize())
def __call__(self, engine: Engine) -> None:
"""
This method assumes self.batch_transform will extract metadata from the input batch.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
names = engine.state.batch['name']
locs = engine.state.batch['location']
preds = engine.state.output['pred']
for i, name in enumerate(names):
self.probs_maps[name][locs[0][i], locs[1][i]] = preds[i]
def finalize(self):
for name in self.case_names:
file_path = os.path.join(self.output_dir, name)
np.save(file_path + '.npy', self.probs_maps[name])
plt.imshow(np.transpose(self.probs_maps[name]))
plt.savefig(file_path + '.png')
self.nms(self.probs_maps[name], file_path, level=self.levels[name])
def nms(self, probs_map, file_path, level, sigma=0.0, prob_thred=0.5, radius=24):
if sigma > 0:
probs_map = filters.gaussian(probs_map, sigma=sigma)
x_shape, y_shape = probs_map.shape
resolution = pow(2, level)
with open(file_path + '.csv', 'w') as outfile:
while np.max(probs_map) > prob_thred:
prob_max = probs_map.max()
max_idx = np.where(probs_map == prob_max)
x_mask, y_mask = max_idx[0][0], max_idx[1][0]
x_wsi = int((x_mask + 0.5) * resolution)
y_wsi = int((y_mask + 0.5) * resolution)
outfile.write('{:0.5f},{},{}'.format(prob_max, x_wsi, y_wsi) + '\n')
x_min = x_mask - radius if x_mask - radius > 0 else 0
x_max = x_mask + radius if x_mask + radius <= x_shape else x_shape
y_min = y_mask - radius if y_mask - radius > 0 else 0
y_max = y_mask + radius if y_mask + radius <= y_shape else y_shape
for x in range(x_min, x_max):
for y in range(y_min, y_max):
probs_map[x, y] = 0
| clara-train-examples-master | PyTorch/NoteBooks/DomainExamples/DigitalPathology/MMAR_DP/custom/handlers.py |
import argparse
import json
import os
import random
TRAIN_NORMAL_FILE = "normal_train.txt"
TRAIN_TUMOR_FILE = "tumor_train.txt"
VALID_NORMAL_FILE = "normal_valid.txt"
VALID_TUMOR_FILE = "tumor_valid.txt"
WSI_IMAGE_FOLDER = "WSI/"
def read_file(file_path):
coords = []
infile = open(file_path)
for i, line in enumerate(infile):
info = line.strip('\n').split(',')
coords.append(info)
infile.close()
return coords
def main():
parser = argparse.ArgumentParser(description="prostate seg json generate")
parser.add_argument("--json_temp",
action="store",
required=True,
help="full path of .json template file")
parser.add_argument("--list_folder",
action="store",
help="path to list folder root")
parser.add_argument("--json_out",
action="store",
required=True,
help="full path of .json output file")
args = parser.parse_args()
json_template = args.json_temp
list_folder = args.list_folder
json_out = args.json_out
train_normal_path = list_folder + TRAIN_NORMAL_FILE
train_tumor_path = list_folder + TRAIN_TUMOR_FILE
valid_normal_path = list_folder + VALID_NORMAL_FILE
valid_tumor_path = list_folder + VALID_TUMOR_FILE
train_normal_label = read_file(train_normal_path)
train_tumor_label = read_file(train_tumor_path)
valid_normal_label = read_file(valid_normal_path)
valid_tumor_label = read_file(valid_tumor_path)
with open(json_template) as f:
json_data = json.load(f)
for i in range(len(train_normal_label)):
info = train_normal_label[i]
new_item = {}
new_item["image"] = WSI_IMAGE_FOLDER + info[0] + ".tif"
new_item["location"] = [int(info[2]), int(info[1])]
new_item["label"] = [int(i) for i in info[3:]]
to_append = "training"
temp = json_data[to_append]
temp.append(new_item)
for i in range(len(train_tumor_label)):
info = train_tumor_label[i]
new_item = {}
new_item["image"] = WSI_IMAGE_FOLDER + info[0] + ".tif"
new_item["location"] = [int(info[2]), int(info[1])]
new_item["label"] = [int(i) for i in info[3:]]
to_append = "training"
temp = json_data[to_append]
temp.append(new_item)
for i in range(len(valid_normal_label)):
info = valid_normal_label[i]
new_item = {}
new_item["image"] = WSI_IMAGE_FOLDER + info[0] + ".tif"
new_item["location"] = [int(info[2]), int(info[1])]
new_item["label"] = [int(i) for i in info[3:]]
to_append = "validation"
temp = json_data[to_append]
temp.append(new_item)
for i in range(len(valid_tumor_label)):
info = valid_tumor_label[i]
new_item = {}
new_item["image"] = WSI_IMAGE_FOLDER + info[0] + ".tif"
new_item["location"] = [int(info[2]), int(info[1])]
new_item["label"] = [int(i) for i in info[3:]]
to_append = "validation"
temp = json_data[to_append]
temp.append(new_item)
to_shuffle = "training"
random.shuffle(json_data[to_shuffle])
to_shuffle = "validation"
random.shuffle(json_data[to_shuffle])
with open(json_out, 'w') as f:
json.dump(json_data, f, indent=4)
return
if __name__ == "__main__":
main()
| clara-train-examples-master | PyTorch/NoteBooks/DomainExamples/DigitalPathology/MMAR_DP/custom/generate_json.py |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 14:09:32 2016
@author: Babak Ehteshami Bejnordi
Modified by Ziyue Xu and Behrooz Hashemian
Evaluation code for the Camelyon16 challenge on cancer metastases detecion
"""
import openslide
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as nd
from skimage import measure
import os
import sys
import matplotlib.pyplot as plt
def computeEvaluationMask(maskDIR, resolution, level):
"""Computes the evaluation mask.
Args:
maskDIR: the directory of the ground truth mask
resolution: Pixel resolution of the image at level 0
level: The level at which the evaluation mask is made
Returns:
evaluation_mask
"""
slide = openslide.open_slide(maskDIR)
dims = slide.level_dimensions[level]
pixelarray = np.zeros(dims[0]*dims[1], dtype='uint')
pixelarray = np.array(slide.read_region((0,0), level, dims))
neg = 255 - pixelarray[:,:,0]*255
#plt.subplot(1,2,1)
#plt.imshow(np.transpose(neg))
distance = nd.distance_transform_edt(neg)
#plt.subplot(1,2,2)
#plt.imshow(np.transpose(distance))
#plt.show()
Threshold = 75/(resolution * pow(2, level) * 2) # 75µm is the equivalent size of 5 tumor cells
binary = distance < Threshold
filled_image = nd.morphology.binary_fill_holes(binary)
filled_image = filled_image
evaluation_mask = measure.label(filled_image, connectivity = 2)
#plt.imshow(np.transpose(evaluation_mask))
#plt.show()
return evaluation_mask
def computeITCList(evaluation_mask, resolution, level):
"""Compute the list of labels containing Isolated Tumor Cells (ITC)
Description:
A region is considered ITC if its longest diameter is below 200µm.
As we expanded the annotations by 75µm, the major axis of the object
should be less than 275µm to be considered as ITC (Each pixel is
0.243µm*0.243µm in level 0). Therefore the major axis of the object
in level 5 should be less than 275/(2^5*0.243) = 35.36 pixels.
Args:
evaluation_mask: The evaluation mask
resolution: Pixel resolution of the image at level 0
level: The level at which the evaluation mask was made
Returns:
Isolated_Tumor_Cells: list of labels containing Isolated Tumor Cells
"""
max_label = np.amax(evaluation_mask)
properties = measure.regionprops(evaluation_mask)
Isolated_Tumor_Cells = []
threshold = 275/(resolution * pow(2, level))
for i in range(0, max_label):
if properties[i].major_axis_length < threshold:
Isolated_Tumor_Cells.append(i+1)
return Isolated_Tumor_Cells
def readCSVContent(csvDIR):
"""Reads the data inside CSV file
Args:
csvDIR: The directory including all the .csv files containing the results.
Note that the CSV files should have the same name as the original image
Returns:
Probs: list of the Probabilities of the detected lesions
Xcorr: list of X-coordinates of the lesions
Ycorr: list of Y-coordinates of the lesions
"""
Xcorr, Ycorr, Probs = ([] for i in range(3))
csv_lines = open(csvDIR,"r").readlines()
for i in range(len(csv_lines)):
line = csv_lines[i]
elems = line.rstrip().split(',')
Probs.append(float(elems[0]))
Xcorr.append(int(elems[1]))
Ycorr.append(int(elems[2]))
return Probs, Xcorr, Ycorr
def compute_FP_TP_Probs(Ycorr, Xcorr, Probs, is_tumor, evaluation_mask, Isolated_Tumor_Cells, level):
"""Generates true positive and false positive stats for the analyzed image
Args:
Probs: list of the Probabilities of the detected lesions
Xcorr: list of X-coordinates of the lesions
Ycorr: list of Y-coordinates of the lesions
is_tumor: A boolean variable which is one when the case cotains tumor
evaluation_mask: The evaluation mask
Isolated_Tumor_Cells: list of labels containing Isolated Tumor Cells
level: The level at which the evaluation mask was made
Returns:
FP_probs: A list containing the probabilities of the false positive detections
TP_probs: A list containing the probabilities of the True positive detections
NumberOfTumors: Number of Tumors in the image (excluding Isolate Tumor Cells)
detection_summary: A python dictionary object with keys that are the labels
of the lesions that should be detected (non-ITC tumors) and values
that contain detection details [confidence score, X-coordinate, Y-coordinate].
Lesions that are missed by the algorithm have an empty value.
FP_summary: A python dictionary object with keys that represent the
false positive finding number and values that contain detection
details [confidence score, X-coordinate, Y-coordinate].
"""
max_label = np.amax(evaluation_mask)
FP_probs = []
TP_probs = np.zeros((max_label,), dtype=np.float32)
detection_summary = {}
FP_summary = {}
for i in range(1,max_label+1):
if i not in Isolated_Tumor_Cells:
label = 'Label ' + str(i)
detection_summary[label] = []
FP_counter = 0
if (is_tumor):
for i in range(0,len(Xcorr)):
HittedLabel = evaluation_mask[int(Ycorr[i]/pow(2, level)), int(Xcorr[i]/pow(2, level))]
if HittedLabel == 0:
FP_probs.append(Probs[i])
key = 'FP ' + str(FP_counter)
FP_summary[key] = [Probs[i], Xcorr[i], Ycorr[i]]
FP_counter+=1
elif HittedLabel not in Isolated_Tumor_Cells:
if (Probs[i]>TP_probs[HittedLabel-1]):
label = 'Label ' + str(HittedLabel)
detection_summary[label] = [Probs[i], Xcorr[i], Ycorr[i]]
TP_probs[HittedLabel-1] = Probs[i]
else:
for i in range(0,len(Xcorr)):
FP_probs.append(Probs[i])
key = 'FP ' + str(FP_counter)
FP_summary[key] = [Probs[i], Xcorr[i], Ycorr[i]]
FP_counter+=1
num_of_tumors = max_label - len(Isolated_Tumor_Cells);
return FP_probs, TP_probs, num_of_tumors, detection_summary, FP_summary
def computeFROC(FROC_data):
"""Generates the data required for plotting the FROC curve
Args:
FROC_data: Contains the list of TPs, FPs, number of tumors in each image
Returns:
total_FPs: A list containing the average number of false positives
per image for different thresholds
total_sensitivity: A list containig overall sensitivity of the system
for different thresholds
"""
unlisted_FPs = [item for sublist in FROC_data[1] for item in sublist]
unlisted_TPs = [item for sublist in FROC_data[2] for item in sublist]
total_FPs, total_TPs = [], []
all_probs = sorted(set(unlisted_FPs + unlisted_TPs))
for Thresh in all_probs[1:]:
total_FPs.append((np.asarray(unlisted_FPs) >= Thresh).sum())
total_TPs.append((np.asarray(unlisted_TPs) >= Thresh).sum())
total_FPs.append(0)
total_TPs.append(0)
total_FPs = np.asarray(total_FPs)/float(len(FROC_data[0]))
total_sensitivity = np.asarray(total_TPs)/float(sum(FROC_data[3]))
return total_FPs, total_sensitivity
def plotFROC(total_FPs, total_sensitivity):
"""Plots the FROC curve
Args:
total_FPs: A list containing the average number of false positives
per image for different thresholds
total_sensitivity: A list containig overall sensitivity of the system
for different thresholds
Returns:
-
"""
fig = plt.figure()
plt.xlabel('Average Number of False Positives', fontsize=12)
plt.ylabel('Metastasis detection sensitivity', fontsize=12)
fig.suptitle('Free response receiver operating characteristic curve', fontsize=12)
plt.plot(total_FPs, total_sensitivity, '-', color='#000000')
plt.show()
if __name__ == "__main__":
mask_folder = sys.argv[1]
result_folder = sys.argv[2]
result_file_list = []
result_file_list += [each for each in os.listdir(result_folder) if each.endswith('.csv')]
EVALUATION_MASK_LEVEL = 5 # Image level at which the evaluation is done
L0_RESOLUTION = 0.243 # pixel resolution at level 0
FROC_data = np.zeros((4, len(result_file_list)), dtype=np.object)
FP_summary = np.zeros((2, len(result_file_list)), dtype=np.object)
detection_summary = np.zeros((2, len(result_file_list)), dtype=np.object)
ground_truth_test = []
ground_truth_test += [each[0:8] for each in os.listdir(mask_folder) if each.endswith('.tif')]
ground_truth_test = set(ground_truth_test)
caseNum = 0
for case in result_file_list:
print('Evaluating Performance on image:', case[0:-4])
sys.stdout.flush()
csvDIR = os.path.join(result_folder, case)
Probs, Xcorr, Ycorr = readCSVContent(csvDIR)
is_tumor = case[0:-4] in ground_truth_test
if (is_tumor):
maskDIR = os.path.join(mask_folder, case[0:-4]) + '.tif'
evaluation_mask = computeEvaluationMask(maskDIR, L0_RESOLUTION, EVALUATION_MASK_LEVEL)
ITC_labels = computeITCList(evaluation_mask, L0_RESOLUTION, EVALUATION_MASK_LEVEL)
else:
evaluation_mask = 0
ITC_labels = []
FROC_data[0][caseNum] = case
FP_summary[0][caseNum] = case
detection_summary[0][caseNum] = case
FROC_data[1][caseNum], FROC_data[2][caseNum], FROC_data[3][caseNum], detection_summary[1][caseNum], FP_summary[1][caseNum] = compute_FP_TP_Probs(Ycorr, Xcorr, Probs, is_tumor, evaluation_mask, ITC_labels, EVALUATION_MASK_LEVEL)
caseNum += 1
# Compute FROC curve
total_FPs, total_sensitivity = computeFROC(FROC_data)
# plot FROC curve
# plotFROC(total_FPs, total_sensitivity)
eval_threshold = [.25, .5, 1, 2, 4, 8]
eval_TPs = np.interp(eval_threshold, total_FPs[::-1], total_sensitivity[::-1])
#for i in range(len(eval_threshold)):
# print('Avg FP = ', str(eval_threshold[i]))
# print('Sensitivity = ', str(eval_TPs[i]))
print('Avg Sensivity = ', np.mean(eval_TPs))
| clara-train-examples-master | PyTorch/NoteBooks/DomainExamples/DigitalPathology/MMAR_DP/custom/froc.py |
from typing import List, Optional, Sequence, Tuple, Type, Union
import numpy as np
from monai.data.image_reader import ImageReader
from monai.data.utils import is_supported_format
from monai.utils import ensure_tuple, optional_import
cuimage, has_cci = optional_import("cuimage")
openslide, has_osl = optional_import("openslide")
class WSIReader(ImageReader):
"""
Read whole slide imaging
"""
def __init__(self, wsi_reader_name: str = "CuImage"):
super().__init__()
self.wsi_reader_name = wsi_reader_name.lower()
if self.wsi_reader_name == "cuclaraimage":
self.wsi_reader = cuimage.CuImage
print('> CuClaraImage is being used.')
elif self.wsi_reader_name == "openslide":
self.wsi_reader = openslide.OpenSlide
print('> OpenSlide is being used .')
else:
raise ValueError('`wsi_reader_name` should be either "CuClaraImage" or "OpenSlide"')
def verify_suffix(self, filename: Union[Sequence[str], str]) -> bool:
"""
Verify whether the specified file or files format is supported by WSI reader.
Args:
filename: file name or a list of file names to read.
if a list of files, verify all the suffixes.
"""
return is_supported_format(filename, ["tif", "tiff"])
def read(self, data: Union[Sequence[str], str, np.ndarray], **kwargs):
"""
Read image data from specified file or files.
Note that the returned object is CuImage or list of CuImage objects.
Args:
data: file name or a list of file names to read.
"""
img_: List = []
filenames: Sequence[str] = ensure_tuple(data)
for name in filenames:
img = self.wsi_reader(name)
img_.append(img)
return img_ if len(filenames) > 1 else img_[0]
def get_data(
self,
img_obj,
location: Tuple = (0, 0),
size: Optional[Tuple] = None,
level: int = 0,
dtype: Type = np.uint8,
grid_shape: Union[Tuple[int, int], int] = (1, 1),
patch_size: Optional[Union[Tuple[int, int], int]] = None,
):
"""
Extract regions as numpy array from WSI image and return them.
Args:
img: a wsi_reader object loaded from a file, or list of CuImage objects
location: (x_min, y_min) tuple giving the top left pixel in the level 0 reference frame,
or list of tuples (default=(0, 0))
size: (width, height) tuple giving the region size, or list of tuples (default=(wsi_width, wsi_height))
This is the size of image at the given level (`level`)
level: the level number, or list of level numbers (default=0)
"""
if size is None:
if location == (0, 0):
# the maximum size is set to WxH
size = (img_obj.shape[1] // (2 ** level), img_obj.shape[0] // (2 ** level))
print(f"Size is set to maximum size at level={level}: {size}")
else:
print("Size need to be provided!")
return
region = self._extract_region(img_obj, location=location, size=size, level=level, dtype=dtype)
patches = self._extract_patches(region, patch_size=patch_size, grid_shape=grid_shape, dtype=dtype)
return patches
def _extract_region(
self,
img_obj,
location: Tuple = (0, 0),
size: Optional[Tuple] = None,
level: int = 0,
dtype: Type = np.uint8,
):
region = img_obj.read_region(location=location, size=size, level=level)
if self.wsi_reader_name == "openslide":
region = region.convert("RGB")
# convert to numpy
region = np.asarray(region, dtype=dtype)
# cuCalaraImage/OpenSlide: (H x W x C) -> torch image: (C X H X W)
region = region.transpose((2, 0, 1))
return region
def _extract_patches(
self,
region: np.array,
grid_shape: Union[Tuple[int, int], int] = (1, 1),
patch_size: Optional[Union[Tuple[int, int], int]] = None,
dtype: Type = np.uint8,
):
if patch_size is None and grid_shape == (1, 1):
return region
if type(grid_shape) == int:
grid_shape = (grid_shape, grid_shape)
n_patches = np.prod(grid_shape)
region_size = region.shape[1:]
if patch_size == None:
patch_size = (region_size[0] // grid_shape[0], region_size[1] // grid_shape[1])
elif type(patch_size) == int:
patch_size = (patch_size, patch_size)
# split the region into patches on the grid and center crop them to patch size
flat_patch_grid = np.zeros((n_patches, 3, patch_size[0], patch_size[1]), dtype=dtype)
start_points = [
np.round(region_size[i] * (0.5 + np.arange(grid_shape[i])) / grid_shape[i] - patch_size[i] / 2).astype(int)
for i in range(2)
]
idx = 0
for y_start in start_points[1]:
for x_start in start_points[0]:
x_end = x_start + patch_size[0]
y_end = y_start + patch_size[1]
flat_patch_grid[idx] = region[:, x_start:x_end, y_start:y_end]
idx += 1
return flat_patch_grid
| clara-train-examples-master | PyTorch/NoteBooks/DomainExamples/DigitalPathology/MMAR_DP/custom/image_reader.py |
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import os
import torch
import torch.distributed as dist
from monai.data import (
CacheDataset,
DataLoader,
load_decathlon_datalist,
partition_dataset,
)
from monai.engines import SupervisedEvaluator, SupervisedTrainer
from monai.handlers import (
CheckpointSaver,
LrScheduleHandler,
MeanDice,
StatsHandler,
TensorBoardStatsHandler,
ValidationHandler,
)
from monai.inferers import SimpleInferer, SlidingWindowInferer
from monai.losses import DiceLoss
from monai.networks.layers import Norm
from monai.networks.nets import UNet
from monai.transforms import (
Activationsd,
Spacingd,
AsDiscreted,
Compose,
CropForegroundd,
EnsureChannelFirstd,
LoadImaged,
RandCropByPosNegLabeld,
RandShiftIntensityd,
ScaleIntensityRanged,
ToTensord,
)
from torch.nn.parallel import DistributedDataParallel
class TrainConfiger:
"""
This class is used to config the necessary components of train and evaluate engines
for MONAI trainer.
Please check the implementation of `SupervisedEvaluator` and `SupervisedTrainer`
from `monai.engines` and determine which components can be used.
Args:
config_root: root folder path of config files.
wf_config_file_name: json file name of the workflow config file.
"""
def __init__(
self,
config_root: str,
wf_config_file_name: str,
local_rank: int = 0,
):
with open(os.path.join(config_root, wf_config_file_name)) as file:
wf_config = json.load(file)
self.wf_config = wf_config
self.max_epochs = wf_config["max_epochs"]
self.learning_rate = wf_config["learning_rate"]
self.data_list_file_path = wf_config["data_list_file_path"]
self.val_interval = wf_config["val_interval"]
self.ckpt_dir = wf_config["ckpt_dir"]
self.save_interval = wf_config["save_interval"]
self.amp = wf_config["amp"]
self.use_gpu = wf_config["use_gpu"]
self.multi_gpu = wf_config["multi_gpu"]
self.local_rank = local_rank
def set_device(self):
if self.multi_gpu:
# initialize distributed training
dist.init_process_group(backend="nccl", init_method="env://")
device = torch.device(f"cuda:{self.local_rank}")
torch.cuda.set_device(device)
else:
device = torch.device("cuda" if self.use_gpu else "cpu")
self.device = device
def configure(self):
self.set_device()
network = UNet(
dimensions=3,
in_channels=1,
out_channels=2,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
norm=Norm.BATCH,
).to(self.device)
if self.multi_gpu:
network = DistributedDataParallel(
module=network,
device_ids=[self.device],
find_unused_parameters=False,
)
train_transforms = Compose(
[
LoadImaged(keys=("image", "label")),
EnsureChannelFirstd(keys=("image", "label")),
Spacingd(keys=("image", "label"),
pixdim=[1.0, 1.0, 1.0],
mode=["bilinear", "nearest"]
),
ScaleIntensityRanged(
keys="image",
a_min=-57,
a_max=164,
b_min=0.0,
b_max=1.0,
clip=True,
),
CropForegroundd(keys=("image", "label"), source_key="image"),
RandCropByPosNegLabeld(
keys=("image", "label"),
label_key="label",
spatial_size=(96, 96, 96),
pos=1,
neg=1,
num_samples=4,
image_key="image",
image_threshold=0,
),
RandShiftIntensityd(keys="image", offsets=0.1, prob=0.5),
ToTensord(keys=("image", "label")),
]
)
train_datalist = load_decathlon_datalist(
self.data_list_file_path, True, "training"
)
if self.multi_gpu:
train_datalist = partition_dataset(
data=train_datalist,
shuffle=True,
num_partitions=dist.get_world_size(),
even_divisible=True,
)[dist.get_rank()]
train_ds = CacheDataset(
data=train_datalist,
transform=train_transforms,
cache_num=32,
cache_rate=1.0,
num_workers=4,
)
train_data_loader = DataLoader(
train_ds,
batch_size=2,
shuffle=True,
num_workers=4,
)
val_transforms = Compose(
[
LoadImaged(keys=("image", "label")),
EnsureChannelFirstd(keys=("image", "label")),
ScaleIntensityRanged(
keys="image",
a_min=-57,
a_max=164,
b_min=0.0,
b_max=1.0,
clip=True,
),
CropForegroundd(keys=("image", "label"), source_key="image"),
ToTensord(keys=("image", "label")),
]
)
val_datalist = load_decathlon_datalist(
self.data_list_file_path, True, "validation"
)
val_ds = CacheDataset(val_datalist, val_transforms, 9, 0.0, 4)
val_data_loader = DataLoader(
val_ds,
batch_size=1,
shuffle=False,
num_workers=4,
)
post_transform = Compose(
[
Activationsd(keys="pred", softmax=True),
AsDiscreted(
keys=["pred", "label"],
argmax=[True, False],
to_onehot=True,
n_classes=2,
),
]
)
# metric
key_val_metric = {
"val_mean_dice": MeanDice(
include_background=False,
output_transform=lambda x: (x["pred"], x["label"]),
device=self.device,
)
}
val_handlers = [
StatsHandler(output_transform=lambda x: None),
CheckpointSaver(
save_dir=self.ckpt_dir,
save_dict={"model": network},
save_key_metric=True,
),
TensorBoardStatsHandler(
log_dir=self.ckpt_dir, output_transform=lambda x: None
),
]
self.eval_engine = SupervisedEvaluator(
device=self.device,
val_data_loader=val_data_loader,
network=network,
inferer=SlidingWindowInferer(
roi_size=[160, 160, 160],
sw_batch_size=4,
overlap=0.5,
),
post_transform=post_transform,
key_val_metric=key_val_metric,
val_handlers=val_handlers,
amp=self.amp,
)
optimizer = torch.optim.Adam(network.parameters(), self.learning_rate)
loss_function = DiceLoss(to_onehot_y=True, softmax=True)
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=5000, gamma=0.1
)
train_handlers = [
LrScheduleHandler(lr_scheduler=lr_scheduler, print_lr=True),
ValidationHandler(
validator=self.eval_engine, interval=self.val_interval, epoch_level=True
),
StatsHandler(tag_name="train_loss", output_transform=lambda x: x["loss"]),
TensorBoardStatsHandler(
log_dir=self.ckpt_dir,
tag_name="train_loss",
output_transform=lambda x: x["loss"],
),
]
self.train_engine = SupervisedTrainer(
device=self.device,
max_epochs=self.max_epochs,
train_data_loader=train_data_loader,
network=network,
optimizer=optimizer,
loss_function=loss_function,
inferer=SimpleInferer(),
post_transform=post_transform,
key_train_metric=None,
train_handlers=train_handlers,
amp=self.amp,
)
if self.local_rank > 0:
self.train_engine.logger.setLevel(logging.WARNING)
self.eval_engine.logger.setLevel(logging.WARNING)
| clara-train-examples-master | PyTorch/NoteBooks/FL/adminMMAR_BYOT_monai/custom/train_configer.py |
# SPDX-License-Identifier: Apache-2.0
import logging
import torch.distributed as dist
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import FLConstants, ShareableKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.trainer import Trainer
from nvflare.common.signal import Signal
from nvflare.utils.fed_utils import generate_failure
from train_configer import TrainConfiger
from utils import (
IterAggregateHandler,
MONAIModelManager,
TrainContext,
get_lr_values,
set_engine_state,
)
class MONAITrainer(Trainer):
"""
This class implements a MONAI based trainer that can be used for Federated Learning.
Args:
aggregation_epochs: the number of training epochs for a round.
This parameter only works when `aggregation_iters` is 0. Defaults to 1.
aggregation_iters: the number of training iterations for a round.
If the value is larger than 0, the trainer will use iteration based aggregation
rather than epoch based aggregation. Defaults to 0.
"""
def __init__(self, aggregation_epochs: int = 1, aggregation_iters: int = 0):
super().__init__()
self.aggregation_epochs = aggregation_epochs
self.aggregation_iters = aggregation_iters
self.model_manager = MONAIModelManager()
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.info("===========-------------------in init")
def _initialize_trainer(self, fl_ctx: FLContext):
"""
The trainer's initialization function. At the beginning of a FL experiment,
the train and evaluate engines, as well as train context and FL context
should be initialized.
"""
# Initialize train and evaluation engines.
self.logger.info("===========-------------------in _initialize_trainer")
config_root = fl_ctx.get_prop(FLConstants.TRAIN_ROOT)
fl_args = fl_ctx.get_prop(FLConstants.ARGS)
conf = TrainConfiger(
config_root=config_root,
wf_config_file_name=fl_args.train_config,
local_rank=fl_args.local_rank,
)
conf.configure()
self.train_engine = conf.train_engine
self.eval_engine = conf.eval_engine
self.multi_gpu = conf.multi_gpu
# for iterations based aggregation, the train engine should attach
# the following handler.
if self.aggregation_iters > 0:
IterAggregateHandler(interval=self.aggregation_iters).attach(
self.train_engine
)
# Instantiate a train context class. This instance is used to
# save training related information such as current epochs, iterations
# and the learning rate.
self.train_ctx = TrainContext()
self.train_ctx.initial_learning_rate = get_lr_values(
self.train_engine.optimizer
)
# Initialize the FL context.
fl_ctx.set_prop(FLConstants.MY_RANK, self.train_engine.state.rank)
fl_ctx.set_prop(FLConstants.MODEL_NETWORK, self.train_engine.network)
fl_ctx.set_prop(FLConstants.MULTI_GPU, self.multi_gpu)
fl_ctx.set_prop(FLConstants.DEVICE, self.train_engine.state.device)
def handle_event(self, event_type: str, fl_ctx: FLContext):
"""
This function is an extended function from the super class.
It is used to perform the handler process based on the
event_type. At the start point of a FL experiment, necessary
components should be initialized. At the end of the experiment,
the running engines should be terminated.
Args:
event_type: the type of event that will be fired. In MONAITrainer,
only `START_RUN` and `END_RUN` need to be handled.
fl_ctx: an `FLContext` object.
"""
if event_type == EventType.START_RUN:
self._initialize_trainer(fl_ctx)
elif event_type == EventType.END_RUN:
try:
self.train_engine.terminate()
self.eval_engine.terminate()
except BaseException as e:
self.logger.info(f"exception in closing fitter {e}")
def train(
self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal
) -> Shareable:
"""
This function is an extended function from the super class.
As a supervised learning based trainer, the train function will run
evaluate and train engines based on model weights from `shareable`.
After fininshing training, a new `Shareable` object will be submitted
to server for aggregation.
Args:
shareable: the `Shareable` object acheived from server.
fl_ctx: the `FLContext` object achieved from server.
abort_signal: if triggered, the training will be aborted.
Returns:
a new `Shareable` object to be submitted to server for aggregation.
"""
# check abort signal
self.logger.info(f"MonaiTrainer abort signal: {abort_signal.triggered}")
if abort_signal.triggered:
self.finalize(fl_ctx)
shareable = generate_failure(fl_ctx=fl_ctx, reason="abort signal triggered")
return shareable
# achieve model weights
if self.train_engine.state.rank == 0:
model_weights = shareable[ShareableKey.MODEL_WEIGHTS]
# load achieved model weights for the network (saved in fl_ctx)
self.model_manager.assign_current_model(model_weights, fl_ctx)
# for multi-gpu training, only rank 0 process will achieve the model weights.
# Thus, it should be broadcasted to all processes.
if self.multi_gpu:
net = fl_ctx.get_prop(FLConstants.MODEL_NETWORK)
for _, v in net.state_dict().items():
dist.broadcast(v, src=0)
# set engine state parameters, like number of training epochs/iterations.
self.train_engine = set_engine_state(
self.train_engine, self.aggregation_epochs, self.aggregation_iters
)
# get current epoch and iteration when a round starts
self.train_ctx.epoch_of_start_time = self.train_engine.state.epoch
self.train_ctx.iter_of_start_time = self.train_engine.state.iteration
# execute validation at the beginning of every round
self.eval_engine.run(self.train_engine.state.epoch + 1)
self.train_ctx.fl_init_validation_metric = self.eval_engine.state.metrics.get(
self.eval_engine.state.key_metric_name, -1
)
# record iteration and epoch data before training
starting_iters = self.train_engine.state.iteration
starting_epochs = self.train_engine.state.epoch
self.train_engine.run()
# calculate current iteration and epoch data after training
self.train_ctx.current_iters = (
self.train_engine.state.iteration - starting_iters
)
self.train_ctx.current_executed_epochs = (
self.train_engine.state.epoch - starting_epochs
)
# create a new `Shareable` object
if self.train_engine.state.rank == 0:
self.train_ctx.set_context(self.train_engine, self.eval_engine)
shareable = self.model_manager.generate_shareable(
self.train_ctx,
fl_ctx,
)
# update train context into FL context.
fl_ctx.set_prop(FLConstants.TRAIN_CONTEXT, self.train_ctx)
return shareable
| clara-train-examples-master | PyTorch/NoteBooks/FL/adminMMAR_BYOT_monai/custom/monai_trainer.py |
# SPDX-License-Identifier: Apache-2.0
import logging
import math
from typing import Dict
import numpy as np
import torch
from nvflare.apis.fl_constant import FLConstants, ShareableKey, ShareableValue
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from ignite.engine import Engine, Events
from torch.optim import Optimizer
class TrainContext:
"""
Train Context class contains training related parameters/variables,
such as learning rate, number of gpus and current training iterations.
"""
def __init__(self):
self.initial_learning_rate = 0
self.current_learning_rate = 0
self.current_iters = 0
self.current_executed_epochs = 0
self.fl_init_validation_metric = 0
self.epoch_of_start_time = 0
self.iter_of_start_time = 0
def set_context(self, train_engine: Engine, eval_engine: Engine):
"""
This function is usually called after train engine has finished running.
The variables that updated here will add to the shareable object and then
submit to server. You can add other variables in this function if they are
needed to be shared.
"""
self.current_learning_rate = get_lr_values(train_engine.optimizer)
class MONAIModelManager:
def __init__(self):
self.logger = logging.getLogger("ModelShareableManager")
def assign_current_model(
self, model_weights: Dict[str, np.ndarray], fl_ctx: FLContext
):
"""
This function is used to load provided weights for the network saved
in FL context.
"""
net = fl_ctx.get_prop(FLConstants.MODEL_NETWORK)
if fl_ctx.get_prop(FLConstants.MULTI_GPU):
net = net.module
local_var_dict = net.state_dict()
model_keys = model_weights.keys()
for var_name in local_var_dict:
if var_name in model_keys:
weights = model_weights[var_name]
try:
local_var_dict[var_name] = torch.as_tensor(weights)
except Exception as e:
raise ValueError(
"Convert weight from {} failed with error: {}".format(
var_name, str(e)
)
)
net.load_state_dict(local_var_dict)
def extract_model(self, fl_ctx: FLContext) -> Dict[str, np.ndarray]:
"""
This function is used to extract weights of the netwrok saved in FL
context.
The extracted weights will be converted into a numpy array based dict.
"""
net = fl_ctx.get_prop(FLConstants.MODEL_NETWORK)
if fl_ctx.get_prop(FLConstants.MULTI_GPU):
net = net.module
local_state_dict = net.state_dict()
local_model_dict = {}
for var_name in local_state_dict:
try:
local_model_dict[var_name] = local_state_dict[var_name].cpu().numpy()
except Exception as e:
raise ValueError(
"Convert weight from {} failed with error: {}".format(
var_name, str(e)
)
)
return local_model_dict
def generate_shareable(self, train_ctx: TrainContext, fl_ctx: FLContext):
"""
This function is used to generate a shareable instance according to
the train context and FL context.
A Shareable instance can not only contain model weights, but also
some additional information that clients want to share. These information
should be added into ShareableKey.META.
"""
# input the initlal metric into meta data. You can also add other parameters.
meta_data = {}
meta_data[FLConstants.INITIAL_METRICS] = train_ctx.fl_init_validation_metric
meta_data[FLConstants.CURRENT_LEARNING_RATE] = train_ctx.current_learning_rate
shareable = Shareable()
shareable[ShareableKey.TYPE] = ShareableValue.TYPE_WEIGHT_DIFF
shareable[ShareableKey.DATA_TYPE] = ShareableValue.DATA_TYPE_UNENCRYPTED
shareable[ShareableKey.MODEL_WEIGHTS] = self.extract_model(fl_ctx)
shareable[ShareableKey.META] = meta_data
return shareable
class IterAggregateHandler:
"""
This class implements an event handler for iteration based aggregation.
"""
def __init__(self, interval: int):
self.interval = interval
def attach(self, engine: Engine):
engine.add_event_handler(Events.ITERATION_COMPLETED(every=self.interval), self)
def __call__(self, engine: Engine):
engine.terminate()
# save current iteration for next round
engine.state.dataloader_iter = engine._dataloader_iter
if engine.state.iteration % engine.state.epoch_length == 0:
# if current iteration is end of 1 epoch, manually trigger epoch completed event
engine._fire_event(Events.EPOCH_COMPLETED)
def get_lr_values(optimizer: Optimizer):
"""
This function is used to get the learning rates of the optimizer.
"""
return [group["lr"] for group in optimizer.state_dict()["param_groups"]]
def set_engine_state(engine: Engine, aggregation_epochs: int, aggregation_iters: int):
"""
This function is used to set the engine's state parameters according to
the aggregation ways (iteration based or epoch based).
Args:
engine: the engine that to be processed.
aggregation_epochs: the number of epochs before aggregation.
This parameter only works when `aggregation_iters` is 0.
aggregation_iters: the number of iterations before aggregation.
If the value is larger than 0, the engine will use iteration based aggregation
rather than epoch based aggregation.
"""
if aggregation_iters > 0:
next_aggr_iter = engine.state.iteration + aggregation_iters
engine.state.max_epochs = math.ceil(next_aggr_iter / engine.state.epoch_length)
previous_iter = engine.state.iteration % engine.state.epoch_length
if engine.state.iteration > 0 and previous_iter != 0:
# init to continue from previous epoch
engine.state.epoch -= 1
if hasattr(engine.state, "dataloader_iter"):
# initialize to continue from previous iteration
engine._init_iter.append(previous_iter)
engine._dataloader_iter = engine.state.dataloader_iter
else:
engine.state.max_epochs = engine.state.epoch + aggregation_epochs
return engine
| clara-train-examples-master | PyTorch/NoteBooks/FL/adminMMAR_BYOT_monai/custom/utils.py |
from typing import Tuple
from nvflare.apis.aggregator import Aggregator
from nvflare.apis.fl_constant import ShareableKey, ShareableValue
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
"""
This example shows a aggregator that just does simple averaging of submitted
weight diffs from clients.
The accept method is called every time a client's submission is received;
The aggregate method is called at the end of the round after all submissions are received.
"""
class MyJustInTimeAggregator(Aggregator):
def __init__(self):
"""Perform simple averaging aggregation
"""
super().__init__()
self.total = dict()
self.counts = dict()
def handle_event(self, event_type: str, fl_ctx: FLContext):
return True
def reset_stats(self):
self.total = dict()
self.counts = dict()
def accept(self, shareable: Shareable, fl_ctx: FLContext) -> Tuple[bool, bool]:
"""Store shareable and update aggregator's internal state
This method is called to accept a client's submitted training result (weight diff)
Args:
shareable: information from client. It contains weight diff
fl_ctx: context provided by workflow. You can get name of the submitting client
from this context, among other things.
Returns:
The first boolean indicates if this shareable is accepted.
The second boolean indicates if aggregate can be called.
"""
assert (
shareable.get(ShareableKey.TYPE, None) == ShareableValue.TYPE_WEIGHT_DIFF
), f"{self._name} support weight difference type shareable only"
assert (
shareable.get(ShareableKey.DATA_TYPE, None) == ShareableValue.DATA_TYPE_UNENCRYPTED
), f"{self._name} support clear datatype shareable only"
aggr_data = shareable[ShareableKey.MODEL_WEIGHTS]
for k, v in aggr_data.items():
current_total = self.total.get(k, None)
if current_total is None:
self.total[k] = v
self.counts[k] = 1
else:
self.total[k] = current_total + v
self.counts[k] = self.counts[k] + 1
return True, False
def aggregate(self, fl_ctx: FLContext) -> Shareable:
"""Called when workflow determines to generate shareable to send back to clients
Args:
fl_ctx (FLContext): context provided by workflow
Returns:
Shareable: the weighted mean of accepted shareables from clients
"""
aggregated_dict = dict()
for k, v in self.total.items():
aggregated_dict[k] = v / self.counts[k]
self.reset_stats()
shareable = Shareable()
shareable[ShareableKey.TYPE] = ShareableValue.TYPE_WEIGHT_DIFF
shareable[ShareableKey.DATA_TYPE] = ShareableValue.DATA_TYPE_UNENCRYPTED
shareable[ShareableKey.MODEL_WEIGHTS] = aggregated_dict
return shareable
| clara-train-examples-master | PyTorch/NoteBooks/FL/adminMMAR_privacy/custom/BYO_Aggregator.py |
# SPDX-License-Identifier: Apache-2.0
from queue import PriorityQueue
from random import randint, uniform
from automl.components.controllers.controller import Controller
from automl.defs import Context, Recommendation, Outcome, SearchResult
from automl.components.handlers.handler import Handler
from automl.defs import Context, ContextKey, Status
import mlflow
import threading
class RandomController(Controller):
def __init__(self, max_rounds=1000):
Controller.__init__(self)
self.current_rec_id = 0
self.space = None
self.ctx = None
self.enum_space = None
self.float_space = None
self.enum_size = 0
self.search_log = dict()
self.score_priority_queue = PriorityQueue()
self.max_rounds = max_rounds
def set_search_space(self, space, ctx):
self.space = space
self.ctx = ctx
self.enum_space = self._get_subspace('.enum')
self.float_space = self._get_subspace('.float')
enum_size = 1
for k in self.enum_space:
enum_size = enum_size * len(self.enum_space[k])
self.enum_size = enum_size
def _get_subspace(self, subspace_key):
return {k: v for k, v in self.space.targets.items() if subspace_key in k}
def _sample_space(self):
# modify this to generate 2 options at once
# returns recommends: list of recommendation to run at the same time
recommends = list()
for _ in range(self.max_rounds): # generate random samples
values = dict()
for k, v in self.enum_space.items():
# print("in Enum space k,v=",k,v)
target = randint(0,len(v)-1)
values[k] = target
for k, v in self.float_space.items():
target = uniform(v[0].min, v[0].max)
values[k] = target
self._keep_log(values)
sr = SearchResult(self.space, values)
recommend = Recommendation(self.current_rec_id, sr)
recommends.append(recommend)
# TODO append another recommendation it will be sch automatically
# print(" values", values)
self.current_rec_id = self.current_rec_id + 1
return recommends
def initial_recommendation(self, ctx):
recommends = self._sample_space()
return recommends
def _keep_log(self, values):
self.search_log[self.current_rec_id] = dict()
self.search_log[self.current_rec_id]['recommendation'] = values
self.search_log[self.current_rec_id]['outcome'] = None
def _update_log_with_outcome(self, rec_id, outcome):
self.search_log[rec_id]['outcome'] = outcome
def refine_recommendation(self, outcome: Outcome, ctx: Context):
outcome_score = outcome.score
outcome_rec_id = outcome.recommendation_id
self.score_priority_queue.put((-outcome_score, outcome_rec_id))
self._update_log_with_outcome(outcome_rec_id, outcome)
if self.score_priority_queue.qsize() >= self.max_rounds:
ctx.stop_work(self,"Number of runs reached {}. Requesting stop.".format(self.max_rounds))
return []
recommends = self._sample_space()
return recommends
###########################################################################################################
class MyHandler(Handler):
def __init__(self):
Handler.__init__(self)
self.recs = list()
self.update_lock = threading.Lock()
# self.logger = logging.getLogger(self.__class__.__name__)
def recommendations_available(self, ctx):
recs = ctx.get_prop(ContextKey.RECOMMENDATIONS)
print('recommendations available')
for i, rec in enumerate(recs):
self.recs.append(rec)
# print('recommendation #{}'.format(i))
# rec.result.dump()
# print()
def startup(self, ctx: Context):
print(" __________starting up")
def shutdown(self, ctx: Context):
# print("__________shutdown")
pass
def start_job(self, ctx: Context):
print("start job ")
self.recommendations_available(ctx)
print("______Job __name",ctx.get_prop("_jobName"),"________has______started")
recomds=ctx.get_prop("_recommendations")
pass
def round_ended(self, ctx: Context):
print("_________round_ended")
pass
def end_job(self, ctx: Context):
print("_____________ end_job")
job_name = ctx.get_prop(ContextKey.JOB_NAME)
print("job name {}".format(job_name))
parms = ctx.get_prop(ContextKey.CONCRETE_SEARCH_VALUE)
# mlflow.start_run()
mlflow.set_tracking_uri("/claraDevDay/AutoML/mlruns")
with self.update_lock:
with mlflow.start_run() as run:
for k, v in parms.items():
par = k.split(":")[1]
v=v[0]
print("par=", par, " val=", v)
mlflow.log_param(par, v)
score = ctx.get_prop(ContextKey.SCORE)
print("score =",score)
mlflow.log_metric("Acc", score)
print ("MLFLOW added ")
print("___________________________")
# mlflow.end_run()
return
###########################################################################################################
class MyHandler2(Handler):
def __init__(self):
Handler.__init__(self)
pass
def recommendations_available(self, ctx):
pass
def startup(self, ctx: Context):
pass
def shutdown(self, ctx: Context):
pass
def start_job(self, ctx: Context):
pass
def round_ended(self, ctx: Context):
pass
def end_job(self, ctx: Context):
pass
###########################################################################################################
class RandomController2(Controller):
def __init__(self, max_rounds=1000):
Controller.__init__(self)
def set_search_space(self, space, ctx):
pass
def initial_recommendation(self, ctx):
pass
def refine_recommendation(self, outcome: Outcome, ctx: Context):
pass
| clara-train-examples-master | PyTorch/NoteBooks/AutoML/BYOC/myAutoMLController.py |
# SPDX-License-Identifier: Apache-2.0
c.NotebookApp.terminado_settings = {"shell_command": ["/bin/bash"]}
| clara-train-examples-master | PyTorch/NoteBooks/scripts/jupyter_notebook_config.py |
# Coded copied and simplified as an example from https://docs.monai.io/en/latest/_modules/monai/losses/dice.html#DiceLoss
import warnings
from typing import Callable, Optional, Union
import torch
from torch.nn.modules.loss import _Loss
from monai.networks import one_hot
from monai.utils import LossReduction, Weight
class MyDiceLoss(_Loss):
"""
Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.
Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
Axis N of `input` is expected to have logit predictions for each class rather than being image channels,
while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are
values added to the intersection and union components of the inter-over-union calculation to smooth results
respectively, these values should be small. The `include_background` class attribute can be set to False for
an instance of DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be
background. If the non-background segmentations are small compared to the total image size they can get
overwhelmed by the signal from the background so excluding it in such cases helps convergence.
Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016.
"""
def __init__(self,include_background: bool = True,to_onehot_y: bool = False,sigmoid: bool = False,softmax: bool = False,
other_act: Optional[Callable] = None, squared_pred: bool = False, jaccard: bool = False,
reduction: Union[LossReduction, str] = LossReduction.MEAN,smooth_nr: float = 1e-5,smooth_dr: float = 1e-5,batch: bool = False,
# label_weights: Optional[Union[Sequence[float], float, int, torch.Tensor]] = None
) -> None:
print(f" ####################-------------------- Triggering your own Loss code ")
print(f" --------------------#################### You can change this as you see fit")
"""
Args:
include_background: if False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: if True, apply a sigmoid function to the prediction.
softmax: if True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, a Dice loss value is computed independently from each item in the batch
before any `reduction`.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""
super().__init__(reduction=LossReduction(reduction).value)
if other_act is not None and not callable(other_act):
raise TypeError(f"other_act must be None or callable but is {type(other_act).__name__}.")
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
raise ValueError("Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].")
self.include_background = include_background
self.to_onehot_y = to_onehot_y
self.sigmoid = sigmoid
self.softmax = softmax
self.other_act = other_act
self.squared_pred = squared_pred
self.jaccard = jaccard
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
self.batch = batch
# uncomment lines below to enable label weights
# self.label_weights=label_weights
# if self.label_weights is not None:
# self.label_weights=[x / sum(self.label_weights) for x in self.label_weights]
# print ("====== AEH applying label weights {} refactored as {}".format(label_weights,self.label_weights))
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""
Args:
input: the shape should be BNH[WD].
target: the shape should be BNH[WD].
Raises:
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
"""
if self.sigmoid:
input = torch.sigmoid(input)
n_pred_ch = input.shape[1]
if self.softmax:
if n_pred_ch == 1:
warnings.warn("single channel prediction, `softmax=True` ignored.")
else:
input = torch.softmax(input, 1)
if self.other_act is not None:
input = self.other_act(input)
if self.to_onehot_y:
if n_pred_ch == 1:
warnings.warn("single channel prediction, `to_onehot_y=True` ignored.")
else:
target = one_hot(target, num_classes=n_pred_ch)
if not self.include_background:
if n_pred_ch == 1:
warnings.warn("single channel prediction, `include_background=False` ignored.")
else:
# if skipping background, removing first channel
target = target[:, 1:]
input = input[:, 1:]
if target.shape != input.shape:
raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
# reducing only spatial dimensions (not batch nor channels)
reduce_axis = list(range(2, len(input.shape)))
if self.batch:
# reducing spatial dimensions and batch
reduce_axis = [0] + reduce_axis
intersection = torch.sum(target * input, dim=reduce_axis)
### uncoment lines below to enable label weights
# if self.label_weights is not None: # add wights to labels
# bs=intersection.shape[0]
# w = torch.tensor(self.label_weights, dtype=torch.float32,device=torch.device('cuda:0'))
# w= w.repeat(bs, 1) ## change size to [BS, Num of classes ]
# intersection = w* intersection
if self.squared_pred:
target = torch.pow(target, 2)
input = torch.pow(input, 2)
ground_o = torch.sum(target, dim=reduce_axis)
pred_o = torch.sum(input, dim=reduce_axis)
denominator = ground_o + pred_o
if self.jaccard:
denominator = 2.0 * (denominator - intersection)
f: torch.Tensor = 1.0 - (2.0 * intersection + self.smooth_nr) / (denominator + self.smooth_dr)
if self.reduction == LossReduction.MEAN.value:
f = torch.mean(f) # the batch and channel average
elif self.reduction == LossReduction.SUM.value:
f = torch.sum(f) # sum over the batch and channel dims
elif self.reduction == LossReduction.NONE.value:
pass # returns [N, n_classes] losses
else:
raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
return f
| clara-train-examples-master | PyTorch/NoteBooks/GettingStarted/custom/myLoss.py |
# SPDX-License-Identifier: Apache-2.0
from monai.transforms import Transform,MapTransform,Randomizable
from monai.config import KeysCollection
from typing import Optional, Any, Mapping, Hashable
import numpy as np
import monai
class RandAdditiveNoise(Randomizable, Transform):
def __init__(self, prob: float = 0.5, max_add: float = 1.0) -> None:
self.prob = np.clip(prob, 0.0, 1.0)
self.max_add = max_add
self._noise = 0
def randomize(self, data: np.ndarray) -> None:
self._noise = 0
if self.R.random() < self.prob:
noise_array = self.R.rand(*data.shape[1:])[None]
self._noise = (noise_array * self.max_add).astype(data.dtype)
def add_noise(self, img: np.ndarray) -> np.ndarray:
return img + self._noise
def __call__(self, img: np.ndarray) -> np.ndarray:
self.randomize(img)
return self.add_noise(img)
################################################################################################################
class MyRandAdditiveNoised(Randomizable, MapTransform):
def __init__(self, keys: KeysCollection, prob: float = 0.5, max_add: float = 1.0
) -> None:
super(Randomizable, self).__init__(keys)
self.transform = RandAdditiveNoise(prob, max_add)
def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "RandAdditiveNoised":
self.transform.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, data: Optional[Any] = None) -> None:
self.transform.randomize(data)
def __call__(self, data: Mapping[Hashable, np.ndarray]
) -> Mapping[Hashable, np.ndarray]:
self.randomize(data[monai.utils.first(self.keys)])
d = dict(data)
for key in self.keys:
d[key] = self.transform.add_noise(d[key])
return d
| clara-train-examples-master | PyTorch/NoteBooks/GettingStarted/custom/myTransformation.py |
clara-train-examples-master | PyTorch/NoteBooks/GettingStarted/custom/__init__.py |
|
# copied from https://docs.monai.io/en/latest/_modules/monai/networks/nets/basic_unet.html#BasicUNet
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence, Union
import torch
import torch.nn as nn
from monai.networks.blocks import Convolution, UpSample
from monai.networks.layers.factories import Conv, Pool
from monai.utils import ensure_tuple_rep
__all__ = ["BasicUNet", "BasicUnet", "Basicunet"]
class TwoConv(nn.Sequential):
"""two convolutions."""
def __init__(
self,
dim: int,
in_chns: int,
out_chns: int,
act: Union[str, tuple],
norm: Union[str, tuple],
dropout: Union[float, tuple] = 0.0,
):
"""
Args:
dim: number of spatial dimensions.
in_chns: number of input channels.
out_chns: number of output channels.
act: activation type and arguments.
norm: feature normalization type and arguments.
dropout: dropout ratio. Defaults to no dropout.
"""
super().__init__()
conv_0 = Convolution(dim, in_chns, out_chns, act=act, norm=norm, dropout=dropout, padding=1)
conv_1 = Convolution(dim, out_chns, out_chns, act=act, norm=norm, dropout=dropout, padding=1)
self.add_module("conv_0", conv_0)
self.add_module("conv_1", conv_1)
class Down(nn.Sequential):
"""maxpooling downsampling and two convolutions."""
def __init__(
self,
dim: int,
in_chns: int,
out_chns: int,
act: Union[str, tuple],
norm: Union[str, tuple],
dropout: Union[float, tuple] = 0.0,
):
"""
Args:
dim: number of spatial dimensions.
in_chns: number of input channels.
out_chns: number of output channels.
act: activation type and arguments.
norm: feature normalization type and arguments.
dropout: dropout ratio. Defaults to no dropout.
"""
super().__init__()
max_pooling = Pool["MAX", dim](kernel_size=2)
convs = TwoConv(dim, in_chns, out_chns, act, norm, dropout)
self.add_module("max_pooling", max_pooling)
self.add_module("convs", convs)
class UpCat(nn.Module):
"""upsampling, concatenation with the encoder feature map, two convolutions"""
def __init__(
self,
dim: int,
in_chns: int,
cat_chns: int,
out_chns: int,
act: Union[str, tuple],
norm: Union[str, tuple],
dropout: Union[float, tuple] = 0.0,
upsample: str = "deconv",
halves: bool = True,
):
"""
Args:
dim: number of spatial dimensions.
in_chns: number of input channels to be upsampled.
cat_chns: number of channels from the decoder.
out_chns: number of output channels.
act: activation type and arguments.
norm: feature normalization type and arguments.
dropout: dropout ratio. Defaults to no dropout.
upsample: upsampling mode, available options are
``"deconv"``, ``"pixelshuffle"``, ``"nontrainable"``.
halves: whether to halve the number of channels during upsampling.
"""
super().__init__()
up_chns = in_chns // 2 if halves else in_chns
self.upsample = UpSample(dim, in_chns, up_chns, 2, mode=upsample)
self.convs = TwoConv(dim, cat_chns + up_chns, out_chns, act, norm, dropout)
def forward(self, x: torch.Tensor, x_e: torch.Tensor):
"""
Args:
x: features to be upsampled.
x_e: features from the encoder.
"""
x_0 = self.upsample(x)
# handling spatial shapes due to the 2x maxpooling with odd edge lengths.
dimensions = len(x.shape) - 2
sp = [0] * (dimensions * 2)
for i in range(dimensions):
if x_e.shape[-i - 1] != x_0.shape[-i - 1]:
sp[i * 2 + 1] = 1
x_0 = torch.nn.functional.pad(x_0, sp, "replicate")
x = self.convs(torch.cat([x_e, x_0], dim=1)) # input channels: (cat_chns + up_chns)
return x
class MyBasicUNet(nn.Module):
def __init__(
self,
dimensions: int = 3,
in_channels: int = 1,
out_channels: int = 2,
features: Sequence[int] = (32, 32, 64, 128, 256, 32),
act: Union[str, tuple] = ("LeakyReLU", {"negative_slope": 0.1, "inplace": True}),
norm: Union[str, tuple] = ("instance", {"affine": True}),
dropout: Union[float, tuple] = 0.0,
upsample: str = "deconv",
):
print(f" ####################-------------------- Triggering your own Arch code ")
print(f" --------------------#################### You can change this as you see fit")
"""
A UNet implementation with 1D/2D/3D supports.
Based on:
Falk et al. "U-Net – Deep Learning for Cell Counting, Detection, and
Morphometry". Nature Methods 16, 67–70 (2019), DOI:
http://dx.doi.org/10.1038/s41592-018-0261-2
Args:
dimensions: number of spatial dimensions. Defaults to 3 for spatial 3D inputs.
in_channels: number of input channels. Defaults to 1.
out_channels: number of output channels. Defaults to 2.
features: six integers as numbers of features.
Defaults to ``(32, 32, 64, 128, 256, 32)``,
- the first five values correspond to the five-level encoder feature sizes.
- the last value corresponds to the feature size after the last upsampling.
act: activation type and arguments. Defaults to LeakyReLU.
norm: feature normalization type and arguments. Defaults to instance norm.
dropout: dropout ratio. Defaults to no dropout.
upsample: upsampling mode, available options are
``"deconv"``, ``"pixelshuffle"``, ``"nontrainable"``.
Examples::
# for spatial 2D
>>> net = BasicUNet(dimensions=2, features=(64, 128, 256, 512, 1024, 128))
# for spatial 2D, with group norm
>>> net = BasicUNet(dimensions=2, features=(64, 128, 256, 512, 1024, 128), norm=("group", {"num_groups": 4}))
# for spatial 3D
>>> net = BasicUNet(dimensions=3, features=(32, 32, 64, 128, 256, 32))
See Also
- :py:class:`monai.networks.nets.DynUNet`
- :py:class:`monai.networks.nets.UNet`
"""
super().__init__()
fea = ensure_tuple_rep(features, 6)
print(f"BasicUNet features: {fea}.")
self.conv_0 = TwoConv(dimensions, in_channels, features[0], act, norm, dropout)
self.down_1 = Down(dimensions, fea[0], fea[1], act, norm, dropout)
self.down_2 = Down(dimensions, fea[1], fea[2], act, norm, dropout)
self.down_3 = Down(dimensions, fea[2], fea[3], act, norm, dropout)
self.down_4 = Down(dimensions, fea[3], fea[4], act, norm, dropout)
self.upcat_4 = UpCat(dimensions, fea[4], fea[3], fea[3], act, norm, dropout, upsample)
self.upcat_3 = UpCat(dimensions, fea[3], fea[2], fea[2], act, norm, dropout, upsample)
self.upcat_2 = UpCat(dimensions, fea[2], fea[1], fea[1], act, norm, dropout, upsample)
self.upcat_1 = UpCat(dimensions, fea[1], fea[0], fea[5], act, norm, dropout, upsample, halves=False)
self.final_conv = Conv["conv", dimensions](fea[5], out_channels, kernel_size=1)
def forward(self, x: torch.Tensor):
"""
Args:
x: input should have spatially N dimensions
``(Batch, in_channels, dim_0[, dim_1, ..., dim_N])``, N is defined by `dimensions`.
It is recommended to have ``dim_n % 16 == 0`` to ensure all maxpooling inputs have
even edge lengths.
Returns:
A torch Tensor of "raw" predictions in shape
``(Batch, out_channels, dim_0[, dim_1, ..., dim_N])``.
"""
x0 = self.conv_0(x)
x1 = self.down_1(x0)
x2 = self.down_2(x1)
x3 = self.down_3(x2)
x4 = self.down_4(x3)
u4 = self.upcat_4(x4, x3)
u3 = self.upcat_3(u4, x2)
u2 = self.upcat_2(u3, x1)
u1 = self.upcat_1(u2, x0)
logits = self.final_conv(u1)
return logits
# BasicUnet = Basicunet = BasicUNet
| clara-train-examples-master | PyTorch/NoteBooks/GettingStarted/custom/myNetworkArch.py |
import argparse
from pathlib import Path
import logging
import os
import glob
import fileIO
import numpy as np
from time import sleep
import json
#from src import create_csv_db
LOGGING_LEVEL = logging.INFO
# LOGGING_LEVEL=logging.DEBUG
# logging.basicConfig(level=LOGGING_LEVEL)
logging.basicConfig(level=LOGGING_LEVEL,
format='[%(asctime)s.%(msecs)03d][%(levelname)5s](%(name)s:%(funcName)s) - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
SERIES_INST_UID_APPEND_TO_SEG_FNAME=6
def runOScmd(cmd):
logging.info("cmd to run cmd= {}".format(cmd))
os.system(cmd)
def convertDcm2nii(source, outputDir):
runOScmd("medl-dataconvert --src_ext .dcm --dst_ext .nii.gz --output " + outputDir + " --dir " + source)
def covertAllSeg2Nifti(df, outputDir,flipAxis):
lbKeyDic = {}
for ds in df:
# print(ds)
seg_Series_IUID=ds['SeriesInstanceUID']
PatientID = ds['PatientID']
filePath = ds['file_location']
RefDicomIUID = ds['ReferencedSeriesSequence0_SeriesInstanceUID']
RefDicomIUID = RefDicomIUID.replace("'", "")
print("ref2 fetch=", filePath)
# create folder for each patient
# patOutputDir = outputDir +"/"+PatientID
# os.makedirs(patOutputDir, exist_ok=True)
fileNameFormat = RefDicomIUID + "_" + seg_Series_IUID[-SERIES_INST_UID_APPEND_TO_SEG_FNAME:] + "_seg.nii.gz"
fileNameFormat = PatientID + "_seg_"+seg_Series_IUID[-SERIES_INST_UID_APPEND_TO_SEG_FNAME:]+".nii.gz"
outputFilePath = outputDir + "/" + fileNameFormat.replace("'","") # remove some ' that are in the seg file name
covertSeg2Nii(filePath, outputFilePath, lbKeyDic,tmp_path=outputDir+"/../tmp/",flipAxis=flipAxis)
# moved to fileIO use import
# def covertSeg2Nii(filePath, outputFilePath, lbKeyDic,tmp_path,flipAxis):
# print("Converting Seg from Location =", filePath)
# # tmp_path = outputFilePath+"/../tmp/"
# os.makedirs(tmp_path, exist_ok=True)
# runOScmd("rm " + tmp_path + "/*")
# sleep(1)
# runOScmd("segimage2itkimage -t nii --outputDirectory " + tmp_path + " --inputDICOM " + filePath)
# metaFile= tmp_path + "meta.json"
# file2IdDic=readSegMetaJson(metaFile,lbKeyDic)
# mergeMultipleNiftis(tmp_path, outputFilePath, file2IdDic,flipAxis)
#
#
# def mergeMultipleNiftis(tmp_path, outputFilePath, file2IdDic,flipAxis):
# # simple merge by file name doesn't work since labels are continues numbers and can be for different labels
# # Need to read the json to know what label is what number
# allNP = None
# for fileName in file2IdDic:
# segment_number = file2IdDic[fileName]
# print(fileName, '->', segment_number)
# # files = glob.glob(tmp_path+"/*.nii.gz")
# # for segment_number,f in enumerate(files):
# fPath = tmp_path + "/" + fileName
# print("Merging nii files =", fPath)
# imgNp, hdr = fileIO.openNifti(fPath)
# if allNP is None:
# allNP = np.zeros_like(imgNp, dtype=np.uint8)
# allNP[imgNp > 0] = segment_number
#
# if "rot" in flipAxis or flipAxis=="all":
# allNP = np.swapaxes(allNP , 0,1) # rotate on axial
# if "lr" in flipAxis or flipAxis=="all":
# allNP = np.flip(allNP, axis=0) #lr
# if "ap" in flipAxis or flipAxis=="all":
# allNP = np.flip(allNP, axis=1) # ap
# if "si" in flipAxis or flipAxis=="all":
# allNP = np.flip(allNP, axis=2) # si
# print("writing mergeged file as", outputFilePath)
#
# # normal file name
# fileIO.writeNifti(allNP, outputFilePath[:outputFilePath.find("_seg_") + 4] + ".nii.gz", hdr.get_base_affine())
# # file name with Ser UID appended
# fileIO.writeNifti(allNP, outputFilePath, hdr.get_base_affine())
# # file after removing 1 slice
# fileIO.writeNifti(allNP[:,:,:-1], outputFilePath.replace("_seg","_seg_ShortZ"), hdr.get_base_affine())
# def readSegMetaJson(filePath, lbKeyDic):
# # this function will add to lbkeyDic the is inputed
# # will return dic of ids and what label to use for it
# ## {2.nii:1, 1.nii:2, 3.nii:4}
# with open(filePath) as f:
# data = json.load(f)
# segmentAttributes = data['segmentAttributes']
# file2IdDic = {}
# for s in segmentAttributes:
# s0 = s[0]
# id = str(s0['labelID'])
# lbName = s0['SegmentLabel']
# lbDesc = s0['SegmentDescription']
# fName = id + ".nii.gz"
# print("id=", id, " lb Name=", lbName, " lb desc", lbDesc, " file=", fName)
# if lbDesc not in lbKeyDic.keys():
# lbKeyDic[lbDesc] = id
# # key are now already there but could have another id
# file2IdDic[ id + ".nii.gz"] = lbKeyDic[lbDesc]
# return file2IdDic
# adjusted from src to return a list
def extract_dcm_metadata_to_csv(folder: Path, n_jobs, filter_slice=True, filter_series=True):
from joblib import Parallel, delayed
import pandas as pd
from src.filters import keep_slice, small_series
from src.create_csv_db import dcm_file_to_flat_dict, merge_series
folder = folder.expanduser().resolve()
files = folder.rglob("*.dcm")
with Parallel(n_jobs=n_jobs) as parallel:
list_of_metadata_dict = parallel(delayed(dcm_file_to_flat_dict)(file) for file in files)
if filter_slice:
indexer = parallel(delayed(keep_slice)(slice_) for slice_ in list_of_metadata_dict)
list_of_metadata_dict = [x for x, y in zip(list_of_metadata_dict, indexer) if y]
metadatas_group_by_series_acq_number = merge_series(list_of_metadata_dict)
final_list_of_mdatas = []
for unique_series, series_slices in metadatas_group_by_series_acq_number.items():
if filter_series and small_series(series_slices):
continue
else:
final_list_of_mdatas.extend(series_slices)
df = pd.DataFrame.from_records(final_list_of_mdatas)
df.to_csv(folder / "metadatas.csv", index=False)
return final_list_of_mdatas
def main():
parser = argparse.ArgumentParser()
parser.add_argument("source", help="the root folder where to recursively search and analyse dicom files")
parser.add_argument("--jobs", "-j", help="Number of workers to use", default=4, type=int)
parser.add_argument("--filter_small_series", help="filter series with less than 25 slices in it", action="store_true")
parser.add_argument("--filter_slices", help="keep only CT,MR,AC PT,RTSTRUC and SEG, original acquisition only", default=True,action="store_true")
parser.add_argument("--outputDir", "-o", help="Output Directory for converted files ", default="./output", type=str)
parser.add_argument("--dicomConvert", "-d", help="Convert dicom to nii ", default=False,action="store_true")
parser.add_argument("--flipAxis", "-f", help="Flips axes [ap,lr,si,rot]", default="",type=str,choices=['ap','lr','si','rot',"all",'rotsi','rotsilr','apsi'])
args = parser.parse_args()
print(args)
print("args.filter_slices" + str(args.filter_slices))
if args.dicomConvert:
convertDcm2nii(args.source,args.outputDir)
else:
#final_list_of_mdatas = create_csv_db.extract_dcm_metadata_to_csv(Path(args.source), args.jobs, args.filter_slices, args.filter_small_series)
final_list_of_mdatas = extract_dcm_metadata_to_csv(Path(args.source), args.jobs, args.filter_slices, args.filter_small_series)
covertAllSeg2Nifti(final_list_of_mdatas, args.outputDir,args.flipAxis)
if __name__ == '__main__':
main()
print("---------------Done ")
| clara-train-examples-master | PyTorch/NoteBooks/Data/TCIA/convert2nii.py |
from os import path
import nibabel as nib
import numpy as np
###################################################################################
# nifti files
###################################################################################
def openNifti(fname,type=None):
'''
:param fname: file path to open
:return: imgNp: numpy
hdr: info
'''
assert path.isfile(fname), "file doesn't exist "+fname
img = nib.load(fname)
imgNp = img.get_fdata()
if type is not None:
imgNp=imgNp.astype(type)
# imgNp.shape
hdr = img.header
return imgNp, hdr
def writeNifti(dataNp, fname,affineNp):
'''
:param dataNp: np to write
:param fname: file name to write to
:param affineNp: affine transformation to use. usually from hdr.get_base_affine()
:return:
'''
# data = np.ones((32, 32, 15, 100), dtype=np.int16)
if dataNp.dtype == np.bool: #or dataNp.dtype == np.uint8:
img = nib.Nifti1Image(dataNp.astype(np.uint8), affineNp)
else:
img = nib.Nifti1Image(dataNp, affineNp)
img.to_filename(fname)
# nib.save(img, fname)
###################################################################################
# json for datalist
###################################################################################
import json
import os
class DataJson():
def __init__(self, dataRoot="",file2Load=None):
self._jsonData = {}
self._rootPath = dataRoot
self._jsonData['rootPath'] = self._rootPath
if file2Load is not None:
self._load(file2Load)
else:
self._jsonData['training'] = []
def appendDataPt(self, img, gt,checkFileExist=True):
if checkFileExist:
f=self._rootPath+img
assert os.path.isfile(f) , "files "+f+" doesn't exist"
f = self._rootPath + gt
assert os.path.isfile(f), "files " + f + " doesn't exist"
self._jsonData['training'].append({
"image": img,
"label": gt
})
print("adding img",img," gt ",gt)
def write2file(self, filename):
with open(filename, 'w') as outfile:
json.dump(self._jsonData, outfile)
print("written file to disk with {} items in Training".format(self.getNumItem()))
# print("and {} items in validation".format(self.getNumItem("validation")))
def getJson(self):
return self._jsonData
def getNumItem(self,dictItem="training"):
return len(self._jsonData[dictItem])
def getItemAt(self,index=0,key='image',dictItem="training"):
return self._jsonData["rootPath"]+self._jsonData[dictItem][index][key]
def _load(self,filePath):
with open(filePath, 'r') as f:
self._jsonData = json.load(f)
def _print4debug(self,dictItem="training"):
root=self._jsonData["rootPath"]
print(root)
for itm in self._jsonData[dictItem]:
# print(root+itm["image"])
print(root+itm["label"])
print(os.path.basename(root+itm["label"]))
| clara-train-examples-master | PyTorch/NoteBooks/Data/TCIA/fileIO.py |
DICOM_TAGS_TO_KEEP = ['ReferencedSeriesSequence0_SeriesInstanceUID','AccessionNumber', 'AcquisitionDate',
'AcquisitionDateTime', 'AcquisitionNumber', 'AcquisitionTime',
'AttenuationCorrectionMethod', 'BodyPartExamined',
'ContentDate', 'ContentTime', 'ContrastBolusAgent',
'ContrastBolusIngredientConcentration',
'ContrastBolusRoute', 'ContrastBolusStartTime', 'ContrastBolusStopTime', 'ContrastBolusTotalDose',
'ContrastBolusVolume', 'ContrastFlowDuration', 'ContrastFlowRate', 'ConversionType',
'ConvolutionKernel', 'Columns',
'CorrectedImage', 'CountsSource',
'DeadTimeFactor', 'DecayCorrection', 'DecayFactor',
'DoseCalibrationFactor', 'EnergyWeightingFactor',
'EnergyWindowRangeSequence0_EnergyWindowLowerLimit',
'EnergyWindowRangeSequence0_EnergyWindowUpperLimit',
'FrameOfReferenceUID',
'FrameReferenceTime', 'FrameTime',
'ImageOrientationPatient',
'ImagePositionPatient', 'ImageType', 'InstanceCreationDate', 'InstanceCreationTime',
'InstanceCreatorUID',
'InstanceNumber',
'LargestImagePixelValue',
'Laterality', 'Manufacturer',
'ManufacturerModelName', 'ModalitiesInStudy', 'Modality',
'NumberOfSlices',
'NumberOfStudyRelatedInstances',
'PatientAge',
'PatientBirthDate',
'PatientID', 'PatientName', 'PatientOrientation',
'PatientPosition', 'PatientSex',
'PatientSize',
'PatientWeight', 'PhotometricInterpretation',
'PixelAspectRatio',
'PixelPaddingValue', 'PixelRepresentation', 'PixelSpacing', 'PlanarConfiguration',
'PositionReferenceIndicator',
'RadiopharmaceuticalInformationSequence0_RadionuclideHalfLife',
'RadiopharmaceuticalInformationSequence0_RadionuclidePositronFraction',
'RadiopharmaceuticalInformationSequence0_RadionuclideTotalDose',
'RadiopharmaceuticalInformationSequence0_Radiopharmaceutical',
'RadiopharmaceuticalInformationSequence0_RadiopharmaceuticalStartDateTime',
'RadiopharmaceuticalInformationSequence0_RadiopharmaceuticalStartTime',
'RadiopharmaceuticalInformationSequence0_RadiopharmaceuticalStopDateTime',
'RadiopharmaceuticalInformationSequence0_RadiopharmaceuticalStopTime',
'RadiopharmaceuticalInformationSequence0_RadiopharmaceuticalVolume', 'RandomsCorrectionMethod',
'ReasonForStudy',
'ReconstructionDiameter', 'ReconstructionMethod', 'ReconstructionTargetCenterPatient',
'ReferencedImageSequence0_ReferencedSOPClassUID',
'ReferencedImageSequence0_ReferencedSOPInstanceUID',
'ReferencedStudySequence0_ReferencedSOPClassUID',
'ReferencedStudySequence0_ReferencedSOPInstanceUID', 'ReferringPhysicianName',
'RelatedSeriesSequence0_SeriesInstanceUID',
'RelatedSeriesSequence0_StudyInstanceUID', 'ReprojectionMethod',
'RescaleIntercept', 'RescaleSlope',
'RescaleType',
'Rows', 'SOPClassUID', 'SOPInstanceUID',
'ScatterCorrectionMethod', 'ScatterFractionFactor',
'SeriesDate',
'SeriesDescription',
'SeriesInstanceUID', 'SeriesNumber', 'SeriesTime', 'SeriesType',
'SliceLocation',
'SliceSensitivityFactor', 'SmokingStatus',
'SpacingBetweenSlices',
'StudyDate',
'StudyDescription',
'StudyID', 'StudyInstanceUID', 'StudyPriorityID', 'StudyStatusID', 'StudyTime', 'file_location']
| clara-train-examples-master | PyTorch/NoteBooks/Data/TCIA/src/dicom_keys.py |
"""List of function to pre-filter dicom slices or series
"""
import re
from src.dicom_keys import DICOM_TAGS_TO_KEEP
ImageType = "ImageType"
CorrectedImage = "CorrectedImage"
Modality = "Modality"
SeriesDescription = "SeriesDescription"
assert ImageType in DICOM_TAGS_TO_KEEP
assert CorrectedImage in DICOM_TAGS_TO_KEEP
assert Modality in DICOM_TAGS_TO_KEEP
assert SeriesDescription in DICOM_TAGS_TO_KEEP
def original_image(metas):
# specify modality in case dicom rt or seg are not original
return ("ORIGINAL" in metas[ImageType]) and (metas[Modality] in ["CT", "PT", "MR"])
def attn_corrected(metas):
if metas[Modality] != "PT":
return True
else:
conds = []
no_ac_strings = ["noac", "nac", "noattn"]
if CorrectedImage in metas:
conds.append("ATTN" in metas[CorrectedImage])
conds.extend(
pattern not in re.sub(r'[^A-Za-z0-9]+', '', metas[SeriesDescription]).lower() for pattern in no_ac_strings)
return all(conds)
def is_ct_rtstruct_seg_mr_pt(metas):
return metas[Modality] in ["RTSTRUCT", "CT", "PT", "SEG", "MR"]
def keep_slice(metas):
# return True
return metas[Modality] in ["SEG"] ## AEH need only seg
for predicate in (is_ct_rtstruct_seg_mr_pt, original_image, attn_corrected):
if predicate(metas):
continue
else:
return False
return True
def small_series(list_of_slices):
return len(list_of_slices) < 25
| clara-train-examples-master | PyTorch/NoteBooks/Data/TCIA/src/filters.py |
# Import libraries
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
# Global parameters
colors = ['b','r','g','m','y','c']
styles = ['o','s','v','^','D',">"]
def plot_single_perf(bm, df, xaxis, unique_labels):
fig = fig = plt.figure(1,figsize=(5, 5))
fig.suptitle(bm)
ax = fig.gca()
ax.set_xlabel(xaxis)
ax.set_ylabel('GPU Time (sec)')
ax.set_xscale('log')
ax.set_xticks(list(df[xaxis]))
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
marker_handles = []
num_style = len(df["Distribution"].unique())
# Iterate over labels and label indices
for lindex, lbl in enumerate(unique_labels):
tmpdf = df.loc[df['Label'] == lbl]
x = tmpdf[xaxis]
perf = tmpdf["GPU Time (sec)"]
# Get style & type index
sid = lindex % num_style
tid = int(lindex / num_style)
if not tid:
ax.plot(x, perf, color=colors[sid])
ax.scatter(x, perf, color=colors[sid], marker=styles[sid])
# Add legend
marker_handles.append(ax.plot([], [], c=colors[sid], marker=styles[sid], \
label=lbl)[0])
else:
ax.plot(x, perf, color=colors[sid], linestyle="--")
ax.scatter(x, perf, color=colors[sid], marker=styles[sid], facecolors='none')
# Add legend
marker_handles.append(ax.plot([], [], c=colors[sid], marker=styles[sid], \
mfc='none', linestyle="--", label=lbl)[0])
leg = plt.legend(handles = marker_handles, loc="upper left", ncol=2, frameon=False)
plt.savefig(bm + '.eps')
def plot_dual_perf(bm, df, xaxis, unique_labels):
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5))
fig.suptitle(bm)
marker_handles = []
lax = [ax1, ax2, ax3]
for item in lax:
item.set_xlabel(xaxis)
item.set_ylabel("GPU Time (sec)")
num_style = len(df["Distribution"].unique())
# Iterate over labels and label indices
for lindex, lbl in enumerate(unique_labels):
tmpdf = df.loc[df['Label'] == lbl]
x = tmpdf[xaxis]
perf = tmpdf["GPU Time (sec)"]
# Get style & type index
sid = lindex % num_style
tid = int(lindex / num_style)
# INT32
if not tid:
lax[sid].plot(x, perf, color=colors[sid])
lax[sid].scatter(x, perf, color=colors[sid], marker=styles[sid])
# Add legend
marker_handles.append(lax[sid].plot([], [], c=colors[sid], marker=styles[sid], \
label=lbl)[0])
# INT64
else:
lax[sid].plot(x, perf, color=colors[sid], linestyle="--")
lax[sid].scatter(x, perf, color=colors[sid], marker=styles[sid], facecolors='none')
# Add legend
marker_handles.append(lax[sid].plot([], [], c=colors[sid], marker=styles[sid], \
mfc='none', linestyle="--", label=lbl)[0])
leg = plt.legend(handles = marker_handles, loc="upper left", ncol=2, frameon=False)
plt.savefig(bm + '.eps') | cuCollections-dev | benchmarks/analysis/notebooks/Utils.py |
#!/usr/bin/python
###############################################################################
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
import time
from datetime import datetime, timedelta
from ansible.errors import AnsibleConnectionFailure
from ansible.plugins.action import ActionBase
DOCUMENTATION = r'''
---
module: raw_reboot
short_description: Module issuing a raw style reboot and wait to come online
version_added: "1.1.0"
description: Module for raw reboots
options:
reboot_timeout:
description: Maximum number of seconds to wait for a reboot
'''
EXAMPLES = r'''
- name: raw reboot
raw_reboot:
reboot_timeout: 1200
'''
class TimeoutException(Exception):
pass
class ActionModule(ActionBase):
def run(self, **kwargs):
result = super(ActionModule, self).run(kwargs)
result['failed'] = True
result['rebooted'] = False
reboot_timeout = int(self._task.args.get('reboot_timeout', 600))
end_time = datetime.utcnow() + timedelta(seconds=reboot_timeout)
# Now reboot and then wait
self._low_level_execute_command("/sbin/reboot", sudoable=True)
# Sleep just in case the reboot takes a few seconds
time.sleep(30)
while datetime.utcnow() < end_time:
try:
self._low_level_execute_command("/usr/bin/whoami", sudoable=True)
result['failed'] = False
result['rebooted'] = True
return result
except Exception as e:
# a connection failure is fine here, we are waiting for it to reboot anyway
# reset it and move on
if isinstance(e, AnsibleConnectionFailure):
try:
self._connection.reset()
except AnsibleConnectionFailure:
pass
time.sleep(60)
raise TimeoutException("Timed out waiting for the host to reboot timeout seconds {timeout}".format(timeout=reboot_timeout))
| ansible-collection-dpu-ops-main | plugins/action/raw_reboot.py |
#!/usr/bin/python
###############################################################################
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
import time
from datetime import datetime, timedelta
from ansible.errors import AnsibleConnectionFailure
from ansible.plugins.action import ActionBase
DOCUMENTATION = r'''
---
module: raw_upgrade
short_description: Module issuing a raw style upgrade of firmwares
version_added: "1.1.0"
description: Module for raw upgrades
options:
retries:
description: Maximum number of retries
delay:
description: Number of seconds to wait between retries
'''
EXAMPLES = r'''
- name: raw upgrade
raw_upgrade:
retries: 100
delay: 60
'''
class FailedActivationException(Exception):
pass
class UnfinishedActivationException(Exception):
pass
ACTIVATE_LINE = "busctl set-property xyz.openbmc_project.Software.BMC.Updater /xyz/openbmc_project/software/{} xyz.openbmc_project.Software.Activation RequestedActivation s xyz.openbmc_project.Software.Activation.RequestedActivations.Active"
VERIFY_LINE = "busctl get-property xyz.openbmc_project.Software.BMC.Updater /xyz/openbmc_project/software/{} xyz.openbmc_project.Software.Activation Activation"
class ActionModule(ActionBase):
def run(self, **kwargs):
result = super(ActionModule, self).run(kwargs)
failed = False
active = False
retries = int(self._task.args.get('retries', 100))
delay = int(self._task.args.get('delay', 60))
current_try = 0
image_lines = self._low_level_execute_command("ls --color=none -t /tmp/images/")['stdout_lines']
if len(image_lines) > 1:
raise FailedActivationException("More than one file is present in /tmp/images")
image_name = image_lines[0]
self._low_level_execute_command(ACTIVATE_LINE.format(image_name))
while current_try < retries:
verify_out = self._low_level_execute_command(VERIFY_LINE.format(image_name))['stdout']
if "Activation.Activations.Active" in verify_out:
active = True
break
if "Activation.Activations.Failed" in verify_out:
failed = True
break
current_try += 1
time.sleep(delay)
if failed:
raise FailedActivationException("Activation of firmware has failed")
if not active:
raise UnfinishedActivationException("Activation of firmware timed out and stayed in Activating state")
result['active'] = active
return result
| ansible-collection-dpu-ops-main | plugins/action/raw_upgrade.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
r'''
`get_rshim` filter implementation
'''
def get_rshim(bf2_devices, rshim):
"returns list of rshim devices"
return [f for f in bf2_devices if f['rshim'] == rshim]
class FilterModule:
"""Ansible filter `get_rshim`"""
def filters(self):
'return dict pointing at function'
return {'get_rshim': get_rshim,}
| ansible-collection-dpu-ops-main | plugins/filter/rshim_filter.py |
#!/usr/bin/python
###############################################################################
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
DOCUMENTATION = r'''
---
module: bf2_facts
short_description: Module for generating bf2 facts
version_added: "1.1.0"
description: MModule for generating bf2 facts
'''
EXAMPLES = r'''
- name: gather bf2 facts
bf2_facts:
'''
RETURN = r'''
ansible_facts:
description: Facts to add to ansible_facts.
returned: always
type: dict
contains:
'''
UNDEFINED = 'UNDEFINED'
import shlex
import subprocess
from ansible.module_utils.basic import AnsibleModule
# singleton, cache of mlxconfig, key is pci/mst dev, val is dict()
nvconfig_cache = dict()
lspci_cache = dict()
class CommandError(Exception):
"""
helper class for handling stderr failures
"""
def __init__(self, stderr):
self.stderr = stderr
def __str__(self):
return self.stderr
def execute(cmd):
"""
Executes a command, will raise an error if stderr is not clean
"""
if type(cmd) == str:
cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
stdout, stderr = proc.communicate(input=None, timeout=15)
if proc.returncode != 0:
# if stderr:
raise CommandError(stderr)
except subprocess.TimeoutExpired:
proc.kill()
stdout, stderr = proc.communicate()
return stdout.decode('utf-8')
def get_lines(cmd):
return execute(cmd).rstrip().split('\n')
def get_first_result(results, key):
for r in results:
if key in r:
return r
return None
# return next(filter(lambda r: key in r, results))
def has_query_privhost():
lines = get_lines('mlxprivhost -h')
return get_first_result(lines, 'query') is not None
def get_rshim_output(rshim_path):
# File IO in the Popen call is unhappy w/ the special rshim files, so this call command is used
subprocess.call("echo 'DISPLAY_LEVEL 1' > {}/misc".format(rshim_path), shell=True)
lines = get_lines("cat {}/misc".format(rshim_path))
# add in the rshim slot for later use
lines.append("RSHIM_SLOT {}".format(rshim_path))
dev_name_line = get_first_result(lines, 'DEV_NAME')
full_dev_name = shlex.split(dev_name_line)[1]
return full_dev_name, lines
def get_mst_and_pci():
# get all the lines with BlueField2 since those are the cards
# Note that the -v flag will have 2 devices per card
# the second device will be in the form of device.1
# We discard the device.1's to not have duplicate devices
lines = get_lines('mst status -v')
# FIXME BlueField (1), BlueField3 ?
bf_lines = [l for l in lines if 'BlueField' in l]
# grab only the pcie device name
mst_and_pci = [tuple(l.split()[1:3]) for l in bf_lines]
# discard the devices with a period in the name
return [l for l in mst_and_pci if '.' not in l[0]]
def _parse_mlxconfig(lines):
"""
Input: lines of `mlxconfig -d .. q` output
Output: dict
"""
# in_hdr = True
ret = dict()
for l in lines:
# if in_hdr:
# if l.startswith('Configurations'):
# in_hdr = False
# continue
# if not l:
# continue
ary = re.split(r'\s+', l)
# print(repr(ary), file=sys.stderr)
# (x, hdr, val) = re.split(r'\s+', l)
if len(ary) >= 3 and ary[0] == '':
ret[ary[1]] = ary[2]
return ret
def get_mlxconfig(mst):
global nvconfig_cache
if mst in nvconfig_cache:
return nvconfig_cache[mst]
lines = get_lines("mlxconfig -d {} q".format(mst))
ret = _parse_mlxconfig(lines)
# needed for PRIS and ROY adapters:
if 'PCI_DOWNSTREAM_PORT_OWNER' in ret:
k = 'PCI_DOWNSTREAM_PORT_OWNER[4]'
lines = get_lines("mlxconfig -d {} q {}".format(mst, k))
r2 = _parse_mlxconfig(lines)
ret[k] = r2[k]
nvconfig_cache[mst] = ret
return(ret)
def get_mode(mst):
nvcfg = get_mlxconfig(mst)
# print(f"(get_mode: {nvcfg['INTERNAL_CPU_MODEL']})", file=sys.stderr)
# TODO what about NIC_MODE vs SNIC_MODE vs SEPARATED_MODE ?
v = nvcfg.get('INTERNAL_CPU_MODEL', None)
if v is not None:
return 'embedded' if v == 'EMBEDDED_CPU(1)' else 'separated'
else:
return UNDEFINED
def get_vpd(pci):
if pci in lspci_cache:
return lspci_cache[pci]
lines = get_lines("lspci -vvs {}".format(pci))
rx = re.compile('^\s+\[(\w\w)\]\s[^:]+:\s(.*?)\s*$')
ret = dict()
for l in lines:
m = rx.search(l)
if m is None:
continue
ret[m.group(1)] = m.group(2)
lspci_cache[pci] = ret
return ret
def get_serial_number(pci):
# lines = get_lines("lspci -vvs {}".format(pci))
# line = get_first_result(lines, 'Serial number')
# if line is None:
# return UNDEFINED
# return line.split(":")[-1].strip()
vpd = get_vpd(pci)
return vpd.get('SN', UNDEFINED)
def get_part_number(pci):
vpd = get_vpd(pci)
return vpd.get('PN', UNDEFINED)
# lines = get_lines("lspci -vvs {}".format(pci))
# line = get_first_result(lines, 'Part number')
# if line is None:
# return UNDEFINED
# return line.split(":")[-1].strip()
def get_rshims_from_fs():
# the case of no rshims should return an empty list, not a list of 1 empty item
rshims = get_lines('find /dev -maxdepth 1 -name "rshim*"')
if len(rshims) == 1 and not rshims[0]:
return []
return rshims
def get_rshim_from_pci(rshim_outs, pci):
if not rshim_outs:
return None
# Split on the dot of the pci as the key in the rshim_outs
# has a different dot version (62:00.0 vs 62:00.2)
rshim_key = pci.split('.')[0]
# There may not be rshim's on the host for a given card, so not finding
# a result just means it is not found
key = get_first_result(rshim_outs.keys(), rshim_key)
if key is None:
return []
return rshim_outs.get(key)
def get_mac_from_rshim_output(rshim_out):
line = get_first_result(rshim_out, 'PEER_MAC')
return shlex.split(line)[1]
def get_rshim_slot_from_rshim_output(rshim_out):
line = get_first_result(rshim_out, 'RSHIM_SLOT')
return shlex.split(line)[1]
def get_restriction_level(mst):
lines = get_lines("mlxprivhost -d {} q".format(mst))
line = get_first_result(lines, 'level')
return line.split(":")[1].strip().lower()
def get_versions(mst):
lines = get_lines("mlxfwmanager -d {}".format(mst))
versions = {}
for line in lines:
for phrase in ['FW', 'PXE', 'UEFI', 'UNKNOWN_ROM']:
if phrase in line:
# Some of the UEFI Virtio have 3 words before the version so this
# takes that into consideration
split = shlex.split(line)
if (split[1] == 'Virtio'):
key = "{} {} {}".format(split[0], split[1], split[2])
versions[key] = split[3]
else:
versions[split[0]] = split[1]
return versions
def run_module():
ansible_facts = {'bf2_devices': []}
warnings = []
module = AnsibleModule(
argument_spec={},
supports_check_mode=True
)
try:
try:
execute('mst start')
except FileNotFoundError:
# if mst is not installed on the machine, popen will throw this exception,
# so it can be handled gracefully
module.exit_json(ansible_facts=ansible_facts,
warnings="could not find the mst command, ensure that mlnx-ofed-all is installed")
# validate if mlxprivhost can be used for query mode. some versions do not have the query flag
can_query_privhost = has_query_privhost()
rshims = get_rshims_from_fs()
# rshim output will contain a key to the pcie device name with info inside it
rshim_outs = {}
# get all the rshim's on a single machine
for rshim_path in rshims:
full_dev_name, lines = get_rshim_output(rshim_path)
rshim_outs[full_dev_name] = lines
for mst, pci in get_mst_and_pci():
rshim_out = get_rshim_from_pci(rshim_outs, pci)
permission = get_restriction_level(mst) if can_query_privhost else UNDEFINED
if permission == 'privileged':
# many items only work in privileged mode
ownership = get_mode(mst)
versions = get_versions(mst)
else:
ownership = UNDEFINED
versions = UNDEFINED
ansible_facts['bf2_devices'].append({
'mst': mst,
'pci': pci,
'ownership': ownership,
'permission': permission,
'serial_number': get_serial_number(pci),
'part_number': get_part_number(pci),
# Sort this out once the mac is not all 00's
# 'mac': get_mac_from_rshim_output(rshim_out) if rshim_out else UNDEFINED,
'rshim': get_rshim_slot_from_rshim_output(rshim_out) if rshim_out else UNDEFINED,
'versions': versions,
'nvconfig': nvconfig_cache.get(mst, {})
})
module.exit_json(ansible_facts=ansible_facts, warnings="")
except Exception as e:
module.fail_json(msg='An unhandled error occured', exception=e)
def main():
run_module()
if __name__ == '__main__':
main()
| ansible-collection-dpu-ops-main | plugins/modules/bf2_facts.py |
#!/usr/bin/python
###############################################################################
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
import bf2_facts
import shlex
import unittest
from unittest.mock import patch
def generate_rshim_output(mock_execute, pci, rshim, mac='00:00:00:00:00:00'):
example = """DISPLAY_LEVEL 1 (0:basic, 1:advanced, 2:log)
BOOT_MODE 1 (0:rshim, 1:emmc, 2:emmc-boot-swap)
BOOT_TIMEOUT 100 (seconds)
DROP_MODE 0 (0:normal, 1:drop)
SW_RESET 0 (1: reset)
DEV_NAME pcie-0000:{}.2
DEV_INFO BlueField-2(Rev 1)
BOOT_RESET_SKIP 0 (1: skip)
PEER_MAC {} (rw)
PXE_ID 0x00000000 (rw)
VLAN_ID 0 0 (rw)
""".format(pci, mac)
mock_execute.return_value = example
return bf2_facts.get_rshim_output(rshim)
class Test(unittest.TestCase):
@patch('bf2_facts.execute')
def test_has_query_privhost_new_version(self, mock_execute):
example = """usage: mlxprivhost [-h] [-v] --device DEVICE [--disable_rshim] [--disable_tracer] [--disable_counter_rd] [--disable_port_owner] {r,restrict,p,privilege,q,query}
restrict or privilege host
Note: New configurations takes effect immediately.
Note: privileged host - host has all supported privileges.
restricted host - host is not allowed to modify global
per port/parameters or access other hosts parametersis.
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
Options:
--device DEVICE, -d DEVICE
Device to work with.
--disable_rshim When TRUE, the host does not have an RSHIM function
to access the embedded CPU registers
--disable_tracer When TRUE, the host will not be allowed to own the Tracer
--disable_counter_rd When TRUE, the host will not be allowed to read Physical port counters
--disable_port_owner When TRUE, the host will not be allowed to be Port Owner
Commands:
{r,restrict,p,privilege,q,query}
restrict: Set host 1 (ARM) privileged, host 0 (x86_64) restricted.
privilege: Set host 1 (ARM) privileged, host 0 (x86_64) privileged
(back to default).
query: Query current host configuration.
"""
mock_execute.return_value = example
self.assertTrue(bf2_facts.has_query_privhost())
@patch('bf2_facts.execute')
def test_has_query_privhost_old_version(self, mock_execute):
example = """usage: mlxprivhost [-h] [-v] --device DEVICE [--disable_rshim] [--disable_tracer] [--disable_counter_rd] [--disable_port_owner] {r,restrict,p,privilege}
restrict or privilege host
Note: New configurations takes effect immediately.
Note: privileged host - host has all supported privileges.
restricted host - host is not allowed to modify global
per port/parameters or access other hosts parametersis.
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
Options:
--device DEVICE, -d DEVICE
Device to work with.
--disable_rshim When TRUE, the host does not have an RSHIM function
to access the embedded CPU registers
--disable_tracer When TRUE, the host will not be allowed to own the Tracer
--disable_counter_rd When TRUE, the host will not be allowed to read Physical port counters
--disable_port_owner When TRUE, the host will not be allowed to be Port Owner
Commands:
{r,restrict,p,privilege}
restrict: Set host 1 (ARM) privileged, host 0 (x86_64) restricted.
privilege: Set host 1 (ARM) privileged, host 0 (x86_64) privileged
(back to default).
"""
mock_execute.return_value = example
self.assertFalse(bf2_facts.has_query_privhost())
@patch('bf2_facts.execute')
@patch('subprocess.call')
def test_get_rshim_output(self, call, mock_execute):
example = """DISPLAY_LEVEL 1 (0:basic, 1:advanced, 2:log)
BOOT_MODE 1 (0:rshim, 1:emmc, 2:emmc-boot-swap)
BOOT_TIMEOUT 100 (seconds)
DROP_MODE 0 (0:normal, 1:drop)
SW_RESET 0 (1: reset)
DEV_NAME pcie-0000:e2:00.2
DEV_INFO BlueField-2(Rev 1)
BOOT_RESET_SKIP 0 (1: skip)
PEER_MAC 00:00:00:00:00:00 (rw)
PXE_ID 0x00000000 (rw)
VLAN_ID 0 0 (rw)
"""
mock_execute.return_value = example
actual_rshim_slot = '/dev/rshim100'
key, val = generate_rshim_output(mock_execute, 'e2:00', actual_rshim_slot)
self.assertEqual(key, 'pcie-0000:e2:00.2')
self.assertEqual(len(val), 12)
rshim_slot = shlex.split([l for l in val if 'RSHIM_SLOT' in l][0])[1]
self.assertEqual(rshim_slot, actual_rshim_slot)
@patch('bf2_facts.execute')
def test_get_mst_and_pci(self, mock_execute):
example = """MST modules:
------------
MST PCI module is not loaded
MST PCI configuration module loaded
PCI devices:
------------
DEVICE_TYPE MST PCI RDMA NET NUMA
BlueField2(rev:1) /dev/mst/mt41686_pciconf0.1 e2:00.1 mlx5_1 net-ens7f1 1
BlueField2(rev:1) /dev/mst/mt41686_pciconf0 e2:00.0 mlx5_0 net-ens7f0 1
"""
mock_execute.return_value = example
mst_and_pci = bf2_facts.get_mst_and_pci()
self.assertEqual(len(mst_and_pci), 1)
self.assertEqual(mst_and_pci[0][0], '/dev/mst/mt41686_pciconf0')
self.assertEqual(mst_and_pci[0][1], 'e2:00.0')
@patch('bf2_facts.execute')
def test_get_mode(self, mock_execute):
example = """
Device #1:
----------
Device type: BlueField2
Name: MBF2M516A-EEEO_Ax
Description: BlueField-2 E-Series SmartNIC 100GbE/EDR VPI Dual-Port QSFP56; PCIe Gen4 x16; Crypto Enabled; 16GB on-board DDR; 1GbE OOB management; FHHL
Device: /dev/mst/mt41686_pciconf0
Configurations: Next Boot
MEMIC_BAR_SIZE 0
MEMIC_SIZE_LIMIT _256KB(1)
HOST_CHAINING_MODE DISABLED(0)
HOST_CHAINING_CACHE_DISABLE False(0)
HOST_CHAINING_DESCRIPTORS Array[0..7]
HOST_CHAINING_TOTAL_BUFFER_SIZE Array[0..7]
INTERNAL_CPU_MODEL EMBEDDED_CPU(1)
_INTERNAL_CPU_MODEL SEPARATED_HOST(0)
FLEX_PARSER_PROFILE_ENABLE 0
PROG_PARSE_GRAPH False(0)
FLEX_IPV4_OVER_VXLAN_PORT 0
ROCE_NEXT_PROTOCOL 254
ESWITCH_HAIRPIN_DESCRIPTORS Array[0..7]
ESWITCH_HAIRPIN_TOT_BUFFER_SIZE Array[0..7]
PF_BAR2_SIZE 0
NON_PREFETCHABLE_PF_BAR False(0)
VF_VPD_ENABLE False(0)
PER_PF_NUM_SF False(0)
LINK_TYPE_P1 ETH(2)
LINK_TYPE_P2 ETH(2)
"""
mock_execute.return_value = example
mode = bf2_facts.get_mode('/dev/mst/mt41686_pciconf0')
self.assertEqual(mode, 'embedded')
example = """
Device #1:
----------
Device type: BlueField2
Name: MBF2M516A-EEEO_Ax
Description: BlueField-2 E-Series SmartNIC 100GbE/EDR VPI Dual-Port QSFP56; PCIe Gen4 x16; Crypto Enabled; 16GB on-board DDR; 1GbE OOB management; FHHL
Device: /dev/mst/mt41686_pciconf0.1
Configurations: Next Boot
MEMIC_BAR_SIZE 0
INTERNAL_CPU_MODEL SEPARATED_HOST(0)
"""
mock_execute.return_value = example
mode = bf2_facts.get_mode('/dev/mst/mt41686_pciconf0.1')
self.assertEqual(mode, 'separated')
@patch('bf2_facts.execute')
def test_get_part_and_serial_number(self, mock_execute):
example = """e2:00.0 Ethernet controller: Mellanox Technologies MT42822 BlueField-2 integrated ConnectX-6 Dx network controller (rev 01)
Subsystem: Mellanox Technologies MT42822 BlueField-2 integrated ConnectX-6 Dx network controller
Physical Slot: 7-1
Control: I/O- Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr+ Stepping- SERR+ FastB2B- DisINTx+
Status: Cap+ 66MHz- UDF- FastB2B- ParErr- DEVSEL=fast >TAbort- <TAbort- <MAbort- >SERR- <PERR- INTx-
Latency: 0
Interrupt: pin A routed to IRQ 229
NUMA node: 1
Region 0: Memory at 47ea2000000 (64-bit, prefetchable) [size=32M]
Region 2: Memory at 47ea1000000 (64-bit, prefetchable) [size=8M]
Expansion ROM at <ignored> [disabled]
Capabilities: [48] Vital Product Data
Product Name: BlueField-2 DPU 100GbE/EDR/HDR100 VPI Dual-Port QSFP56, Crypto Enabled, 16GB on-board DDR, 1GbE OOB management, Tall Bracket
Read-only fields:
[PN] Part number: MBF2M516A-EEEOT
[EC] Engineering changes: A4
[V2] Vendor specific: MBF2M516A-EEEOT
[SN] Serial number: MT2050X00614
[V3] Vendor specific: 9c20a1608d3feb118000043f72ff4c16
[VA] Vendor specific: MLX:MN=MLNX:CSKU=V2:UUID=V3:PCI=V0:MODL=BF2M516A
[V0] Vendor specific: PCIeGen4 x16
[RV] Reserved: checksum good, 1 byte(s) reserved
End
"""
mock_execute.return_value = example
bf2_facts.lspci_cache = dict() # need to clean it up
serial_number = bf2_facts.get_serial_number('e2:00.0')
self.assertEqual('MT2050X00614', serial_number)
part_number = bf2_facts.get_part_number('e2:00.0')
self.assertEqual('MBF2M516A-EEEOT', part_number)
@patch('bf2_facts.execute')
def test_no_vpd(self, mock_execute):
example = """e2:00.0 Ethernet controller: Mellanox Technologies MT42822 BlueField-2 integrated ConnectX-6 Dx network controller (rev 01)
Subsystem: Mellanox Technologies MT42822 BlueField-2 integrated ConnectX-6 Dx network controller
Physical Slot: 7-1
Control: I/O- Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr+ Stepping- SERR+ FastB2B- DisINTx+
Status: Cap+ 66MHz- UDF- FastB2B- ParErr- DEVSEL=fast >TAbort- <TAbort- <MAbort- >SERR- <PERR- INTx-
Latency: 0
Interrupt: pin A routed to IRQ 229
NUMA node: 1
Region 0: Memory at 47ea2000000 (64-bit, prefetchable) [size=32M]
Region 2: Memory at 47ea1000000 (64-bit, prefetchable) [size=8M]
Expansion ROM at <ignored> [disabled]
Capabilities: [48] Vital Product Data
End
"""
mock_execute.return_value = example
bf2_facts.lspci_cache = dict() # need to clean it up
serial_number = bf2_facts.get_serial_number('e2:00.0')
self.assertEqual('UNDEFINED', serial_number)
part_number = bf2_facts.get_part_number('e2:00.0')
self.assertEqual('UNDEFINED', part_number)
@patch('bf2_facts.execute')
def test_get_rshims_from_fs(self, mock_execute):
example = """/dev/rshim0
/dev/rshim1
/dev/rshim100
"""
mock_execute.return_value = example
rshims = bf2_facts.get_rshims_from_fs()
self.assertEqual(3, len(rshims))
@patch('bf2_facts.execute')
@patch('subprocess.call')
def test_get_rshim_from_pci(self, call, mock_execute):
rshim_outs = {}
pci_1 = 'aa:00'
pci_2 = 'bb:00'
for k,v in [(pci_1, '/dev/rshim1'), (pci_2, '/dev/rshim2')]:
name, lines = generate_rshim_output(mock_execute, k, v)
rshim_outs[name] = lines
rshim_out = bf2_facts.get_rshim_from_pci(rshim_outs, pci_1)
pci = [l for l in rshim_out if 'DEV_NAME' in l][0]
self.assertTrue(pci_1 in pci)
# empty case
self.assertIsNone(bf2_facts.get_rshim_from_pci([], pci_1))
@patch('bf2_facts.execute')
@patch('subprocess.call')
def test_get_mac_from_rshim_output(self, call, mock_execute):
# first get some rshim_out data populated
rshim_outs = {}
pci = 'aa:00'
mac = '01:01:01:01:01:01'
name, lines = generate_rshim_output(mock_execute, pci, '/dev/rshim0', mac=mac)
rshim_outs[name] = lines
rshim_out = bf2_facts.get_rshim_from_pci(rshim_outs, pci)
out_mac = bf2_facts.get_mac_from_rshim_output(rshim_out)
self.assertEqual(mac, out_mac)
@patch('bf2_facts.execute')
@patch('subprocess.call')
def test_get_rshim_slot_from_rshim_output(self, call, mock_execute):
# first get some rshim_out data populated
rshim_outs = {}
pci = 'aa:00'
rshim_slot = '/dev/rshim100'
name, lines = generate_rshim_output(mock_execute, pci, rshim_slot)
rshim_outs[name] = lines
rshim_out = bf2_facts.get_rshim_from_pci(rshim_outs, pci)
out_rshim_slot = bf2_facts.get_rshim_slot_from_rshim_output(rshim_out)
self.assertEqual(rshim_slot, out_rshim_slot)
@patch('bf2_facts.execute')
def test_get_restriction_level(self, mock_execute):
example = """Current device configurations:
------------------------------
level : PRIVILEGED
Port functions status:
-----------------------
disable_rshim : FALSE
disable_tracer : FALSE
disable_port_owner : FALSE
disable_counter_rd : FALSE
"""
mock_execute.return_value = example
level = bf2_facts.get_restriction_level('/dev/mst/mt41686_pciconf0')
self.assertEqual(level, 'privileged')
@patch('bf2_facts.execute')
def test_get_versions(self, mock_execute):
example = """Querying Mellanox devices firmware ...
Device #1:
----------
Device Type: BlueField2
Part Number: MBF2M516A-EEEO_Ax
Description: BlueField-2 E-Series SmartNIC 100GbE/EDR VPI Dual-Port QSFP56; PCIe Gen4 x16; Crypto Enabled; 16GB on-board DDR; 1GbE OOB management; FHHL
PSID: MT_0000000559
PCI Device Name: /dev/mst/mt41686_pciconf0
Base MAC: 043f72a45a9c
Versions: Current Available
FW 24.29.2008 N/A
PXE 3.6.0205 N/A
UEFI 14.22.0019 N/A
UNKNOWN_ROM 22.1.0011 N/A
UEFI Virtio x 1.2.3.4
Status: No matching image found
"""
mock_execute.return_value = example
versions = bf2_facts.get_versions('/dev/mst/mt41686_pciconf0')
self.assertEqual(versions['FW'], '24.29.2008')
self.assertEqual(versions['PXE'], '3.6.0205')
self.assertEqual(versions['UEFI'], '14.22.0019')
self.assertEqual(versions['UNKNOWN_ROM'], '22.1.0011')
self.assertEqual(versions['UEFI Virtio x'], '1.2.3.4')
if __name__ == '__main__':
unittest.main()
| ansible-collection-dpu-ops-main | plugins/modules/bf2_facts_test.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / 'README.md').read_text(encoding='utf-8')
setup(
name='lddl',
version='0.1.0',
description=
'Language Datasets and Data Loaders for NVIDIA Deep Learning Examples',
long_description=long_description,
long_description_content_type='text/markdown',
url='github.com/NVIDIA/DeepLearningExamples/tools/lddl',
author='Shang Wang',
author_email='[email protected]',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3 :: Only',
],
packages=find_packages(),
python_requires='>=3.6',
install_requires=[
'dask[complete]==2021.7.1',
'distributed==2021.7.1',
'dask-mpi==2021.11.0',
'bokeh==2.4.3',
'pyarrow>=4.0.1',
'mpi4py==3.1.3',
'transformers==4.16.2',
'wikiextractor==3.0.6',
'news-please @ git+https://github.com/fhamborg/news-please.git@3b7d9fdfeb148ef73f393bb2f2557e6bd878a09f',
'cchardet==2.1.7',
'awscli>=1.22.55',
'wikiextractor @ git+https://github.com/attardi/[email protected]',
'gdown==4.5.3',
],
entry_points={
'console_scripts': [
'download_wikipedia=lddl.download.wikipedia:console_script',
'download_books=lddl.download.books:console_script',
'download_common_crawl=lddl.download.common_crawl:console_script',
'download_open_webtext=lddl.download.openwebtext:console_script',
'preprocess_bert_pretrain=lddl.dask.bert.pretrain:console_script',
'preprocess_bart_pretrain=lddl.dask.bart.pretrain:console_script',
'balance_dask_output=lddl.dask.load_balance:console_script',
'generate_num_samples_cache=lddl.dask.load_balance:generate_num_samples_cache',
],
},
)
| LDDL-main | setup.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import argparse
import os
import random
import numpy as np
import matplotlib.pyplot as plt
from lddl.utils import expand_outdir_and_mkdir, get_all_files_paths_under
def collect_data(args):
npzs = [
fp for fp in get_all_files_paths_under(args.in_dir)
if 'lens_' in fp and os.path.splitext(fp)[1] == '.npz'
]
min_lens, max_lens = {}, {}
seq_len_hist, padded_zero_hist = None, None
for npz in npzs:
rank = int(os.path.splitext(os.path.basename(npz))[0].split('_')[1])
with np.load(npz) as data:
min_lens[rank] = data['min_lens']
max_lens[rank] = data['max_lens']
if seq_len_hist is None:
seq_len_hist = data['seq_len_hist']
else:
seq_len_hist += data['seq_len_hist']
if padded_zero_hist is None:
padded_zero_hist = data['padded_zero_hist']
else:
padded_zero_hist += data['padded_zero_hist']
assert max_lens[rank].shape == min_lens[rank].shape
return min_lens, max_lens, seq_len_hist, padded_zero_hist
def plot_rank_diff(args, min_lens, max_lens):
""" Make sure the diff between min seq lens and max seq lens is smaller than
the bin size.
min_lens and max_lens and dict[int] -> np.array that map rank number to
the list of max and min seq lens of all training iterations.
"""
rank_arrays = []
diffs = []
ranks = list(sorted(min_lens.keys()))
for rank in ranks:
diffs.append(max_lens[rank] - min_lens[rank])
rank_arrays.append(np.full(min_lens[rank].shape, rank, dtype=np.uint16))
rank_arrays = np.concatenate(rank_arrays)
diffs = np.concatenate(diffs)
plt.scatter(rank_arrays, diffs, s=0.1)
plt.xlabel('rank')
plt.xticks(ranks)
plt.ylabel('diff')
plt.yticks(np.arange(0, diffs.max() + 1, 1))
plt.title('rank vs. diff')
plt.grid()
plt.savefig(os.path.join(args.out_dir, 'rank_dist.png'))
plt.close()
def plot_min_max_lens(args, min_lens, max_lens):
""" Make sure the min and max seq lens are limited by the bin size.
"""
ranks = list(sorted(min_lens.keys()))
for rank in ranks:
plt.scatter(min_lens[rank], max_lens[rank], s=0.1)
plt.xlabel('min_lens')
plt.xticks(np.arange(0, min_lens[rank].max() + args.bin_size,
args.bin_size))
plt.ylabel('max_lens')
plt.yticks(np.arange(0, max_lens[rank].max() + args.bin_size,
args.bin_size))
plt.title('min_lens vs. max_lens')
plt.grid()
plt.savefig(os.path.join(args.out_dir, 'min_max_lens_{}.png'.format(rank)))
plt.close()
def plot_global_diff(args, min_lens, max_lens):
""" Make sure that each rank chooses the same bin in each iteration.
"""
ranks = list(sorted(min_lens.keys()))
global_min_lens = np.stack([min_lens[rank] for rank in ranks], axis=-1)
global_max_lens = np.stack([max_lens[rank] for rank in ranks], axis=-1)
diffs = global_max_lens.max(axis=-1) - global_min_lens.min(axis=-1)
plt.scatter(np.full(diffs.shape, 0, dtype=np.uint8), diffs, s=0.1)
plt.xticks([0])
plt.ylabel('diff')
plt.yticks(np.arange(0, diffs.max() + 1, 1))
plt.title('global diff')
plt.grid()
plt.savefig(os.path.join(args.out_dir, 'global_diff.png'))
plt.close()
def plot_seq_len_hist(args, seq_len_hist):
hist = []
xticks = []
for start in range(1, seq_len_hist.shape[0], args.seq_len_hist_bin):
n = 0
for seq_len in range(start, start + args.seq_len_hist_bin):
n += seq_len_hist[seq_len]
hist.append(n)
xticks.append('{}-{}'.format(start, start + args.seq_len_hist_bin - 1))
plt.figure(figsize=(20, 5))
plt.bar(xticks, hist)
plt.xlabel('seq_lens')
plt.ylabel('# Samples')
plt.title('Sequence Length Histogram')
plt.grid()
plt.savefig(os.path.join(args.out_dir, 'seq_len_hist.png'))
plt.close()
def plot_padded_zero_hist(args, padded_zero_hist):
plt.bar(np.arange(0, len(padded_zero_hist)), padded_zero_hist)
plt.xlabel('# zeros in a sequence')
plt.ylabel('# samples')
plt.title('# zeros in a sequence vs. # samples')
plt.grid()
plt.savefig(os.path.join(args.out_dir, 'padded_zero_hist.png'))
plt.close()
def hist_sum(hist):
s = 0
for v in range(hist.shape[0]):
s += v * hist[v]
return s
def calculate_padded_zero_ratio(padded_zero_hist, seq_len_hist):
num_zeros = hist_sum(padded_zero_hist)
num_tokens = hist_sum(seq_len_hist)
print('padded_zeros : tokens = {} : {} = {} : 1'.format(
num_zeros, num_tokens, num_zeros / num_tokens))
def main(args):
args.out_dir = expand_outdir_and_mkdir(args.out_dir)
min_lens, max_lens, seq_len_hist, padded_zero_hist = collect_data(args)
plot_rank_diff(args, min_lens, max_lens)
plot_min_max_lens(args, min_lens, max_lens)
plot_global_diff(args, min_lens, max_lens)
plot_seq_len_hist(args, seq_len_hist)
plot_padded_zero_hist(args, padded_zero_hist)
calculate_padded_zero_ratio(padded_zero_hist, seq_len_hist)
def attach_args(parser=argparse.ArgumentParser()):
parser.add_argument('--in-dir', type=str, required=True)
parser.add_argument('--out-dir', type=str, default="./fig")
parser.add_argument('--bin-size', type=int, default=32)
parser.add_argument('--seq-len-hist-bin', type=int, default=32)
return parser
if __name__ == "__main__":
main(attach_args().parse_args())
| LDDL-main | benchmarks/make_training_seqlen_plots.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import argparse
import logging
import numpy as np
import os
import random
import time
import torch
from transformers import BertTokenizerFast
from lddl.torch import get_bert_pretrain_data_loader
from lddl.torch.utils import barrier, get_rank
from lddl.utils import mkdir
def get_batch_seq_lens(attention_mask):
return attention_mask.sum(dim=1).int()
class AverageMeter:
"""
Computes and stores the average and current value
"""
def __init__(self, warmup=0, keep=False):
self.reset()
self.warmup = warmup
self.keep = keep
def reset(self):
self.val = 0
self.avg = 0
self.max = float('-inf')
self.min = float('inf')
self.sum = 0
self.count = 0
self.iters = 0
self.vals = []
def update(self, val, n=1):
self.iters += 1
self.val = val
if self.iters > self.warmup:
self.sum += val * n
self.max = max(val, self.max)
self.min = min(val, self.min)
self.count += n
self.avg = self.sum / self.count
if self.keep:
self.vals.append(val)
class Histogram:
"""
Computes and stores the histogram of values.
"""
def __init__(self):
self.hist = np.zeros((1,), dtype=np.uint64)
def update(self, val, n=1):
if val >= self.hist.shape[0]:
new_hist = np.zeros((val + 1,), dtype=np.uint64)
new_hist[:self.hist.shape[0]] = self.hist[:]
self.hist = new_hist
self.hist[val] += n
def update_with_tensor(self, t):
for v in t.flatten().tolist():
self.update(v)
def main(args):
torch.cuda.set_device(args.local_rank)
world_size = int(os.getenv('WORLD_SIZE', 1))
if world_size > 1:
torch.distributed.init_process_group(
backend='nccl',
init_method='env://',
)
if get_rank() == 0 and args.seq_len_dir is not None:
mkdir(args.seq_len_dir)
loader = get_bert_pretrain_data_loader(
args.path,
local_rank=args.local_rank,
shuffle_buffer_size=args.shuffle_buffer_size,
shuffle_buffer_warmup_factor=args.shuffle_buffer_warmup_factor,
vocab_file=args.vocab_file,
data_loader_kwargs={
'batch_size': args.batch_size,
'num_workers': args.workers,
'prefetch_factor': args.prefetch
},
mlm_probability=args.mlm_probability,
base_seed=args.seed,
log_dir=args.log_dir,
log_level=getattr(logging, args.log_level),
return_raw_samples=args.debug,
start_epoch=args.start_epoch,
sequence_length_alignment=args.sequence_length_alignment,
ignore_index=args.ignore_index,
)
if os.path.isfile(args.vocab_file):
test_tokenizer = BertTokenizerFast(args.vocab_file)
else:
test_tokenizer = BertTokenizerFast.from_pretrained(args.vocab_file)
meter = AverageMeter(warmup=args.warmup)
lens_shape = (args.epochs, min(len(loader), args.iters_per_epoch))
min_lens, max_lens, batch_sizes, padded_lens = (
np.zeros(lens_shape, dtype=np.uint16),
np.zeros(lens_shape, dtype=np.uint16),
np.zeros(lens_shape, dtype=np.uint16),
np.zeros(lens_shape, dtype=np.uint16),
)
seq_len_hist = Histogram()
padded_zero_hist = Histogram()
for epoch in range(args.start_epoch, args.start_epoch + args.epochs):
barrier()
epoch_timer_start = time.time()
batch_timer_start = time.time()
total_samples = 0
for i, data in enumerate(loader):
if i >= args.iters_per_epoch:
break
if not args.debug:
(input_ids, token_type_ids, attention_mask, labels,
next_sentence_labels) = (
data['input_ids'],
data['token_type_ids'],
data['attention_mask'],
data['labels'],
data['next_sentence_labels'],
)
batch_timer_stop = time.time()
elapsed = batch_timer_stop - batch_timer_start
meter.update(elapsed)
if args.debug:
current_samples = len(data[0]) * world_size
else:
current_samples = input_ids.size(0) * world_size
assert input_ids.size() == token_type_ids.size()
assert input_ids.size() == attention_mask.size()
assert input_ids.size() == labels.size()
assert next_sentence_labels.dim() == 1
assert input_ids.size(0) == next_sentence_labels.size(0)
seq_lens = get_batch_seq_lens(attention_mask)
seq_len_hist.update_with_tensor(seq_lens)
(
min_lens[epoch - args.start_epoch, i],
max_lens[epoch - args.start_epoch, i],
) = seq_lens.min(), seq_lens.max()
batch_sizes[epoch - args.start_epoch, i] = input_ids.size(0)
padded_lens[epoch - args.start_epoch, i] = input_ids.size(1)
padded_zero_hist.update_with_tensor(input_ids.size(1) - seq_lens)
total_samples += current_samples
current_throughput = current_samples / elapsed
if (i + 1) % args.log_freq == 0 and get_rank() == 0:
avg_throughput = total_samples / meter.sum
print('avg_throughput={}, avg_latency={} ms, '
'min_latency={} ms, max_latency={} ms, '
'current_throughput={}, current_latency={} ms'.format(
avg_throughput,
meter.avg * 1000,
meter.min * 1000,
meter.max * 1000,
current_throughput,
elapsed * 1000,
))
if args.debug:
print('len(data[0])={}'.format(len(data[0])))
print('sample=({} <SEP> {} - {})'.format(
data[0][0],
data[1][0],
data[2][0],
))
else:
print("Min length={} Max length={} Diff={}".format(
min_lens[epoch - args.start_epoch, i],
max_lens[epoch - args.start_epoch, i],
max_lens[epoch - args.start_epoch, i] -
min_lens[epoch - args.start_epoch, i],
))
print('input_ids.size()={}'.format(input_ids.size()))
print('input_ids[0]={}'.format(input_ids[0]))
print('convert_ids_to_tokens(input_ids[0])={}'.format(
test_tokenizer.convert_ids_to_tokens(input_ids[0].tolist())))
print('token_type_ids[0]={}'.format(token_type_ids[0]))
print('attention_mask[0]={}'.format(attention_mask[0]))
print('labels[0]={}'.format(labels[0]))
print('next_sentence_labels[0]={}'.format(next_sentence_labels[0]))
mask = labels[0] != args.ignore_index
input_ids[0, mask] = labels[0, mask]
print('original sequence={}'.format(
test_tokenizer.convert_ids_to_tokens(input_ids[0].tolist())))
barrier()
batch_timer_start = time.time()
epoch_timer_stop = time.time()
epoch_elapsed = epoch_timer_stop - epoch_timer_start
if args.local_rank == 0:
avg_throughput = total_samples / meter.sum
print('epoch={}, epoch_elapsed={}, avg_throughput={}, '
'total_samples={}'.format(
epoch,
epoch_elapsed,
avg_throughput,
total_samples,
))
assert meter.iters == min(len(loader), args.iters_per_epoch)
meter.reset()
if args.seq_len_dir is not None:
# Save the sequence lengths to file
np.savez_compressed(
os.path.join(args.seq_len_dir, 'lens_{}.npz'.format(get_rank())),
min_lens=min_lens,
max_lens=max_lens,
batch_sizes=batch_sizes,
padded_lens=padded_lens,
seq_len_hist=seq_len_hist.hist,
padded_zero_hist=padded_zero_hist.hist,
)
def attach_args(parser=argparse.ArgumentParser()):
parser.add_argument('--path', type=str, required=True)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--workers', type=int, default=4)
parser.add_argument('--warmup', type=int, default=0)
parser.add_argument('--epochs', type=int, default=2)
parser.add_argument('--iters-per-epoch', type=int, default=float('inf'))
parser.add_argument('--prefetch', type=int, default=2)
parser.add_argument(
'--local_rank',
type=int,
default=os.getenv('LOCAL_RANK', 0),
)
parser.add_argument('--mlm-probability', type=float, default=0.15)
parser.add_argument('--shuffle-buffer-size', type=int, default=16384)
parser.add_argument('--shuffle-buffer-warmup-factor', type=int, default=16)
parser.add_argument('--vocab-file', type=str, required=True)
parser.add_argument('--seed', type=int, default=127)
parser.add_argument('--start-epoch', type=int, default=0)
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--log-freq', type=int, default=1000)
parser.add_argument('--log-dir', type=str, default=None)
parser.add_argument(
'--log-level',
type=str,
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'],
default='WARNING',
)
parser.add_argument('--seq-len-dir', type=str, default=None)
parser.add_argument('--sequence-length-alignment', type=int, default=8)
parser.add_argument('--ignore-index', type=int, default=-1)
return parser
if __name__ == '__main__':
main(attach_args().parse_args())
| LDDL-main | benchmarks/torch_train.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import argparse
import logging
import numpy as np
import os
import time
from transformers import BertTokenizerFast
import paddle.distributed as dist
from lddl.paddle import get_bert_pretrain_data_loader
from lddl.paddle.utils import barrier, get_rank, get_world_size
from lddl.utils import mkdir
def get_batch_seq_lens(attention_mask):
return attention_mask.sum(axis=1)
class AverageMeter:
"""
Computes and stores the average and current value
"""
def __init__(self, warmup=0, keep=False):
self.reset()
self.warmup = warmup
self.keep = keep
def reset(self):
self.val = 0
self.avg = 0
self.max = float('-inf')
self.min = float('inf')
self.sum = 0
self.count = 0
self.iters = 0
self.vals = []
def update(self, val, n=1):
self.iters += 1
self.val = val
if self.iters > self.warmup:
self.sum += val * n
self.max = max(val, self.max)
self.min = min(val, self.min)
self.count += n
self.avg = self.sum / self.count
if self.keep:
self.vals.append(val)
class Histogram:
"""
Computes and stores the histogram of values.
"""
def __init__(self):
self.hist = np.zeros((1,), dtype=np.uint64)
def update(self, val, n=1):
if val >= self.hist.shape[0]:
new_hist = np.zeros((val + 1,), dtype=np.uint64)
new_hist[:self.hist.shape[0]] = self.hist[:]
self.hist = new_hist
self.hist[val] += n
def update_with_tensor(self, t):
for v in t.flatten().tolist():
self.update(v)
def main(args):
dist.init_parallel_env()
world_size = get_world_size()
if get_rank() == 0 and args.seq_len_dir is not None:
mkdir(args.seq_len_dir)
loader = get_bert_pretrain_data_loader(
args.path,
shuffle_buffer_size=args.shuffle_buffer_size,
shuffle_buffer_warmup_factor=args.shuffle_buffer_warmup_factor,
vocab_file=args.vocab_file,
data_loader_kwargs={
'batch_size': args.batch_size,
'num_workers': args.workers,
'prefetch_factor': args.prefetch
},
mlm_probability=args.mlm_probability,
base_seed=args.seed,
log_dir=args.log_dir,
log_level=getattr(logging, args.log_level),
return_raw_samples=args.debug,
start_epoch=args.start_epoch,
sequence_length_alignment=args.sequence_length_alignment,
ignore_index=args.ignore_index,
)
if os.path.isfile(args.vocab_file):
test_tokenizer = BertTokenizerFast(args.vocab_file)
else:
test_tokenizer = BertTokenizerFast.from_pretrained(args.vocab_file)
meter = AverageMeter(warmup=args.warmup)
lens_shape = (args.epochs, min(len(loader), args.iters_per_epoch))
min_lens, max_lens, batch_sizes, padded_lens = (
np.zeros(lens_shape, dtype=np.uint16),
np.zeros(lens_shape, dtype=np.uint16),
np.zeros(lens_shape, dtype=np.uint16),
np.zeros(lens_shape, dtype=np.uint16),
)
seq_len_hist = Histogram()
padded_zero_hist = Histogram()
step = 0
for epoch in range(args.start_epoch, args.start_epoch + args.epochs):
barrier()
epoch_timer_start = time.time()
batch_timer_start = time.time()
total_samples = 0
for i, data in enumerate(loader):
step += 1
if not args.debug:
(input_ids, token_type_ids, attention_mask, masked_lm_labels,
next_sentence_labels) = (
data['input_ids'],
data['token_type_ids'],
data['attention_mask'],
data['masked_lm_labels'],
data['next_sentence_labels'],
)
batch_timer_stop = time.time()
elapsed = batch_timer_stop - batch_timer_start
meter.update(elapsed)
if args.debug:
current_samples = len(data[0]) * world_size
else:
current_samples = input_ids.shape[0] * world_size
# mask shape: [batch, 1, 1, seq_len] -> [batch, seq_len]
assert attention_mask.dim() == 4
attention_mask = attention_mask.squeeze(axis=[1, 2])
assert input_ids.shape == token_type_ids.shape
assert input_ids.shape == attention_mask.shape
assert input_ids.shape == masked_lm_labels.shape
# next_sentence_laels shape: [batch, 1]
assert next_sentence_labels.dim() == 2
assert next_sentence_labels.shape[1] == 1
assert input_ids.shape[0] == next_sentence_labels.shape[0]
seq_lens = get_batch_seq_lens(attention_mask)
seq_len_hist.update_with_tensor(seq_lens)
(
min_lens[epoch - args.start_epoch, i],
max_lens[epoch - args.start_epoch, i],
) = seq_lens.min(), seq_lens.max()
batch_sizes[epoch - args.start_epoch, i] = input_ids.shape[0]
padded_lens[epoch - args.start_epoch, i] = input_ids.shape[1]
padded_zero_hist.update_with_tensor(input_ids.shape[1] - seq_lens)
total_samples += current_samples
current_throughput = current_samples / elapsed
if (i + 1) % args.log_freq == 0 and get_rank() == 0:
avg_throughput = total_samples / meter.sum
print('avg_throughput={}, avg_latency={} ms, '
'min_latency={} ms, max_latency={} ms, '
'current_throughput={}, current_latency={} ms'.format(
avg_throughput,
meter.avg * 1000,
meter.min * 1000,
meter.max * 1000,
current_throughput,
elapsed * 1000,
))
if args.debug:
print('len(data[0])={}'.format(len(data[0])))
print('sample=({} <SEP> {} - {})'.format(
data[0][0],
data[1][0],
data[2][0],
))
else:
print("Min length={} Max length={} Diff={}".format(
min_lens[epoch - args.start_epoch, i],
max_lens[epoch - args.start_epoch, i],
max_lens[epoch - args.start_epoch, i] -
min_lens[epoch - args.start_epoch, i],
))
print('input_ids.shape={}'.format(input_ids.shape))
print('input_ids[0]={}'.format(input_ids[0]))
print('convert_ids_to_tokens(input_ids[0])={}'.format(
test_tokenizer.convert_ids_to_tokens(input_ids[0].tolist())))
print('token_type_ids[0]={}'.format(token_type_ids[0]))
print('attention_mask[0]={}'.format(attention_mask[0]))
print('masked_lm_labels[0]={}'.format(masked_lm_labels[0]))
print('next_sentence_labels[0]={}'.format(next_sentence_labels[0]))
mask = masked_lm_labels[0] != args.ignore_index
print(f"mask: {mask}")
for i in range(0, mask.shape[0]):
if mask[i]:
input_ids[0, i] = masked_lm_labels[0, i]
print('original sequence={}'.format(
test_tokenizer.convert_ids_to_tokens(input_ids[0].tolist())))
barrier()
batch_timer_start = time.time()
if step >= args.iters_per_epoch:
break
epoch_timer_stop = time.time()
epoch_elapsed = epoch_timer_stop - epoch_timer_start
if get_rank() == 0:
avg_throughput = total_samples / meter.sum
print('epoch={}, epoch_elapsed={}, avg_throughput={}, '
'total_samples={}'.format(
epoch,
epoch_elapsed,
avg_throughput,
total_samples,
))
assert meter.iters == min(len(loader), args.iters_per_epoch)
meter.reset()
if args.seq_len_dir is not None:
# Save the sequence lengths to file
np.savez_compressed(
os.path.join(args.seq_len_dir, 'lens_{}.npz'.format(get_rank())),
min_lens=min_lens,
max_lens=max_lens,
batch_sizes=batch_sizes,
padded_lens=padded_lens,
seq_len_hist=seq_len_hist.hist,
padded_zero_hist=padded_zero_hist.hist,
)
def attach_args(parser=argparse.ArgumentParser()):
parser.add_argument('--path', type=str, required=True)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--workers', type=int, default=4)
parser.add_argument('--warmup', type=int, default=0)
parser.add_argument('--epochs', type=int, default=2)
parser.add_argument('--iters-per-epoch', type=int, default=float('inf'))
parser.add_argument('--prefetch', type=int, default=2)
parser.add_argument('--mlm-probability', type=float, default=0.15)
parser.add_argument('--shuffle-buffer-size', type=int, default=16384)
parser.add_argument('--shuffle-buffer-warmup-factor', type=int, default=16)
parser.add_argument('--vocab-file', type=str, required=True)
parser.add_argument('--seed', type=int, default=127)
parser.add_argument('--start-epoch', type=int, default=0)
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--log-freq', type=int, default=1000)
parser.add_argument('--log-dir', type=str, default=None)
parser.add_argument(
'--log-level',
type=str,
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'],
default='WARNING',
)
parser.add_argument('--seq-len-dir', type=str, default=None)
parser.add_argument('--sequence-length-alignment', type=int, default=8)
parser.add_argument('--ignore-index', type=int, default=-1)
return parser
if __name__ == '__main__':
main(attach_args().parse_args())
| LDDL-main | benchmarks/paddle_train.py |
LDDL-main | lddl/__init__.py |
|
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
class File:
def __init__(self, path, num_samples):
self.path = path
self.num_samples = num_samples
def __repr__(self):
return 'File(path={}, num_samples={})'.format(self.path, self.num_samples)
| LDDL-main | lddl/types.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import random
def _swap_rng_state(new_state):
old_state = random.getstate()
random.setstate(new_state)
return old_state
def randrange(stop, rng_state=None):
orig_rng_state = _swap_rng_state(rng_state)
n = random.randrange(stop)
return n, _swap_rng_state(orig_rng_state)
def shuffle(x, rng_state=None):
orig_rng_state = _swap_rng_state(rng_state)
random.shuffle(x)
return _swap_rng_state(orig_rng_state)
def sample(population, k, rng_state=None):
orig_rng_state = _swap_rng_state(rng_state)
s = random.sample(population, k)
return s, _swap_rng_state(orig_rng_state)
def choices(population, weights=None, cum_weights=None, k=1, rng_state=None):
orig_rng_state = _swap_rng_state(rng_state)
c = random.choices(population, weights=weights, cum_weights=cum_weights, k=k)
return c, _swap_rng_state(orig_rng_state)
| LDDL-main | lddl/random.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import os
import io
import numpy as np
import pathlib
import pyarrow.parquet as pq
def mkdir(d):
pathlib.Path(d).mkdir(parents=True, exist_ok=True)
def expand_outdir_and_mkdir(outdir):
outdir = os.path.abspath(os.path.expanduser(outdir))
mkdir(outdir)
return outdir
def get_all_files_paths_under(root):
return (
os.path.join(r, f) for r, subdirs, files in os.walk(root) for f in files)
def get_all_parquets_under(path):
return sorted([
p for p in get_all_files_paths_under(path)
if '.parquet' in os.path.splitext(p)[1]
])
def get_all_bin_ids(file_paths):
def is_binned_parquet(p):
return '_' in os.path.splitext(p)[1]
def get_bin_id(p):
return int(os.path.splitext(p)[1].split('_')[-1])
bin_ids = list(
sorted(set((get_bin_id(p) for p in file_paths if is_binned_parquet(p)))))
for a, e in zip(bin_ids, range(len(bin_ids))):
if a != e:
raise ValueError('bin id must be contiguous integers starting from 0!')
return bin_ids
def get_file_paths_for_bin_id(file_paths, bin_id):
return [
p for p in file_paths
if '.parquet_{}'.format(bin_id) == os.path.splitext(p)[1]
]
def get_num_samples_of_parquet(path):
return len(pq.read_table(path))
def attach_bool_arg(parser, flag_name, default=False, help_str=None):
attr_name = flag_name.replace('-', '_')
parser.add_argument(
'--{}'.format(flag_name),
dest=attr_name,
action='store_true',
help=flag_name.replace('-', ' ') if help_str is None else help_str,
)
parser.add_argument(
'--no-{}'.format(flag_name),
dest=attr_name,
action='store_false',
help=flag_name.replace('-', ' ') if help_str is None else help_str,
)
parser.set_defaults(**{attr_name: default})
def serialize_np_array(a):
memfile = io.BytesIO()
np.save(memfile, a)
memfile.seek(0)
return memfile.read()
def deserialize_np_array(b):
memfile = io.BytesIO()
memfile.write(b)
memfile.seek(0)
return np.load(memfile)
| LDDL-main | lddl/utils.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import dask.bag as db
import nltk
import os
import random
def _filter_empty_strs(bag_strs):
return bag_strs.map(lambda s: s.strip()).filter(lambda s: len(s) > 0)
def _find_files_under(path, extensions={'.txt'}):
all_files = []
for current_dir, sub_dirs, file_names in os.walk(path):
for file_name in file_names:
if os.path.splitext(file_name)[1] in extensions:
all_files.append(os.path.join(current_dir, file_name))
return list(sorted(all_files))
def _total_bytes_of(files):
return sum(map(os.path.getsize, files))
def estimate_block_size(paths, num_blocks):
total_bytes = 0
for p in paths:
if p is None:
continue
total_bytes += _total_bytes_of(_find_files_under(p))
print('total_bytes = {}, num_blocks = {}'.format(total_bytes, num_blocks))
block_size = round(total_bytes / num_blocks)
print('block_size = {} bytes'.format(block_size))
return block_size
def _read_bag_of_text(
path,
blocksize=None,
sample_ratio=1.0,
sample_seed=12345,
):
input_files = _find_files_under(path)
bag_strs = db.read_text(input_files, blocksize=blocksize)
bag_strs = _filter_empty_strs(bag_strs)
if sample_ratio < 1.0:
bag_strs = bag_strs.random_sample(sample_ratio, random_state=sample_seed)
return bag_strs
def read_wikipedia(
path,
lang='en',
blocksize=None,
sample_ratio=1.0,
sample_seed=12345,
):
return _read_bag_of_text(
os.path.join(path, lang),
blocksize=blocksize,
sample_ratio=sample_ratio,
sample_seed=sample_seed,
)
def read_books(
path,
blocksize=None,
sample_ratio=1.0,
sample_seed=12345,
):
return _read_bag_of_text(
path,
blocksize=blocksize,
sample_ratio=sample_ratio,
sample_seed=sample_seed,
)
def read_common_crawl(
path,
blocksize=None,
sample_ratio=1.0,
sample_seed=12345,
):
return _read_bag_of_text(
path,
blocksize=blocksize,
sample_ratio=sample_ratio,
sample_seed=sample_seed,
)
def read_open_webtext(
path,
blocksize=None,
sample_ratio=1.0,
sample_seed=12345,
):
return _read_bag_of_text(
path,
blocksize=blocksize,
sample_ratio=sample_ratio,
sample_seed=sample_seed,
)
def split_id_text(raw_text):
# The first token is the document id.
i = 0
while i < len(raw_text) and not raw_text[i].isspace():
i += 1
return raw_text[:i], raw_text[i + 1:]
| LDDL-main | lddl/dask/readers.py |
LDDL-main | lddl/dask/__init__.py |
|
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import argparse
import json
import numpy as np
import os
import pyarrow as pa
import pyarrow.parquet as pq
import time
from mpi4py import MPI
from lddl.types import File
from lddl.utils import (get_all_files_paths_under, expand_outdir_and_mkdir,
get_all_parquets_under, get_all_bin_ids,
get_file_paths_for_bin_id, get_num_samples_of_parquet,
attach_bool_arg)
class Shard:
def __init__(self, idx, input_files, outdir, keep_orig=True, postfix=''):
self.idx = idx
self._input_files = input_files
self._outdir = outdir
self._keep_orig = keep_orig
self._postfix = postfix
self._output_file = None
@property
def num_samples(self):
n = 0
if self._input_files is not None:
for input_file in self._input_files:
n += input_file.num_samples
if self._output_file is not None:
n += self._output_file.num_samples
return n
def __repr__(self):
return ('Shard(idx={}, input_files={}, outdir={}, keep_orig={}, '
'postfix={}, output_file={})'.format(
self.idx,
self._input_files,
self._outdir,
self._keep_orig,
self._postfix,
self._output_file,
))
def _read_table(self, path):
table = pq.read_table(path)
if not self._keep_orig: # Only keep the read table in memory.
os.remove(path)
return table
def _read_table_from_file(self, f):
table = self._read_table(f.path)
assert f.num_samples == len(table)
return table
def _store(self, num_samples, table=None):
if table is not None:
assert num_samples == len(table)
if self._output_file is None:
self._output_file = File(
os.path.join(
self._outdir,
'shard-{}.parquet{}'.format(self.idx, self._postfix),
),
0,
)
else:
if table is not None:
table = pa.concat_tables([
self._read_table_from_file(self._output_file),
table,
])
self._output_file.num_samples += num_samples
if table is not None:
assert self._output_file.num_samples == len(table)
pq.write_table(table, self._output_file.path)
def _load(self, num_samples, return_table=False):
if return_table:
tables = []
while num_samples > 0:
if len(self._input_files) > 0:
load_file = self._input_files.pop()
else:
load_file = self._output_file
self._output_file = None
load_num_samples = min(load_file.num_samples, num_samples)
if return_table:
load_table = self._read_table_from_file(load_file)
tables.append(load_table.slice(length=load_num_samples))
if load_num_samples < load_file.num_samples:
self._store(
load_file.num_samples - load_num_samples,
table=load_table.slice(
offset=load_num_samples) if return_table else None,
)
num_samples -= load_num_samples
if return_table:
return pa.concat_tables(tables)
def balance(larger_shard, smaller_shard, idx):
assert larger_shard.num_samples > smaller_shard.num_samples
num_samples_to_transfer = (
larger_shard.num_samples -
(larger_shard.num_samples + smaller_shard.num_samples) // 2)
smaller_shard._store(
num_samples_to_transfer,
table=larger_shard._load(
num_samples_to_transfer,
return_table=(idx % get_world_size() == get_rank()),
),
)
def flush(self, idx):
if idx % get_world_size() == get_rank():
input_tables = []
num_samples_to_flush = 0
while len(self._input_files) > 0:
input_file = self._input_files.pop()
num_samples_to_flush += input_file.num_samples
if idx % get_world_size() == get_rank():
input_tables.append(self._read_table_from_file(input_file))
if num_samples_to_flush > 0:
self._store(
num_samples_to_flush,
table=(pa.concat_tables(input_tables) if
(idx % get_world_size() == get_rank()) else None),
)
class Progress:
def __init__(self, shards):
num_shards = len(shards)
total_num_samples = sum((s.num_samples for s in shards))
base_num_samples_per_shard = total_num_samples // num_shards
self._targets = {
base_num_samples_per_shard: num_shards - total_num_samples % num_shards,
base_num_samples_per_shard + 1: total_num_samples % num_shards,
}
self._ready_shards = []
def __repr__(self):
s = [
'Progress(',
' Remaining:',
]
s += [
' {} shards with {} samples per shard'.format(v, k)
for k, v in self._targets.items()
]
s += [
' Ready:',
' {} shards'.format(len(self._ready_shards)),
')',
]
return '\n'.join(s)
def completed(self):
return sum(self._targets.values()) == 0
def report(self, shards):
smaller_shards, larger_shards = [], []
for shard in shards:
if shard.num_samples in self._targets:
self._targets[shard.num_samples] -= 1
self._ready_shards.append(shard)
if self._targets[shard.num_samples] == 0:
del self._targets[shard.num_samples]
else:
if shard.num_samples < min(self._targets.keys()):
smaller_shards.append(shard)
else:
larger_shards.append(shard)
return smaller_shards, larger_shards
@property
def ready_shards(self):
return self._ready_shards
def get_world_size():
return MPI.COMM_WORLD.Get_size()
def get_rank():
return MPI.COMM_WORLD.Get_rank()
def barrier():
return MPI.COMM_WORLD.barrier()
def allreduce(array, op=MPI.SUM):
MPI.COMM_WORLD.Allreduce(MPI.IN_PLACE, array, op=op)
def _build_files(file_paths):
# Get the number of samples for each file in a collectively distributed
# approach.
all_files_num_samples = np.zeros((len(file_paths),), dtype=np.uint64)
for file_idx in range(get_rank(), len(file_paths), get_world_size()):
all_files_num_samples[file_idx] = get_num_samples_of_parquet(
file_paths[file_idx])
allreduce(all_files_num_samples)
return sorted(
[
File(path, num_samples) for (path, num_samples) in zip(
file_paths,
all_files_num_samples.tolist(),
)
],
key=lambda f: f.num_samples,
)
def _build_shards(files, num_shards, outdir, keep_orig=True, postfix=''):
return [
Shard(
idx,
files[idx::num_shards] if idx < len(files) else None,
outdir,
keep_orig=keep_orig,
postfix=postfix,
) for idx in range(num_shards)
]
def _calculate_mean_std_num_samples(shards):
num_samples = [shard.num_samples for shard in shards]
if len(num_samples) > 0:
return np.mean(num_samples), np.std(num_samples)
else:
return np.NAN, np.NAN
def attach_args(parser=argparse.ArgumentParser("""
LDDL Load Balancer for the parquet shards generated by the LDDL Preprocessor
Assume the set of parquet shards generated by the LDDL Preprocessor is P, for
any two parquet shards a and b in P, the LDDL load balancer makes sure that the
numbers of samples in a and b differ *at most* by 1. In other words, the LDDL
load balancer "balances" the number of samples among the parquet shards.
MPI is used to scale the LDDL load balancer to multi-processes and multi-nodes.
MPI can be accessed in various ways. For example, we can access MPI via mpirun:
$ mpirun -c <number of processes per node> --oversubscribe --allow-run-as-root \\
balance_dask_output ...
We can also access MPI via SLURM in a HPC cluster:
$ srun -l --mpi=pmix --ntasks-per-node=<number of processes per node> \\
balance_dask_output ...
""")):
parser.add_argument(
'--indir',
type=str,
required=True,
help='The path to the directory that contains the parquet shards '
'generated by the LDDL Preprocessor.',
)
parser.add_argument(
'--outdir',
type=str,
default=None,
help="The path where the balanced parquet shards will be stored. If "
"unspecified, the balanced parquet shards will be stored in the "
"directory of '--indir'.",
)
parser.add_argument(
'--num-shards',
type=int,
required=True,
help='The total number of shards that should be balanced into.',
)
parser.add_argument(
'--bin-ids',
type=int,
nargs='*',
default=None,
help='The bin IDs to perform load balance on (if binning is enabled). If '
'unspecified, load balance will be performed on all bins.',
)
attach_bool_arg(
parser,
'keep-orig',
default=False,
help_str="If '--keep-orig' is specified, the original unbalanced parquet "
"shards are kept. By default, those original unbalanced parquet shards "
"are deleted after the balanced shards are generated.",
)
return parser
def _balance(file_paths, num_shards, outdir, keep_orig=True, postfix=''):
files = _build_files(file_paths)
shards = _build_shards(
files,
num_shards,
outdir,
keep_orig=keep_orig,
postfix=postfix,
)
if get_rank() == 0:
print('Balancing the following {} files into {} shards:'.format(
len(files), num_shards))
print('SUM(files.num_samples) = {}, SUM(shards.num_samples) = {}'.format(
sum((f.num_samples for f in files)),
sum((s.num_samples for s in shards)),
))
progress = Progress(shards)
if get_rank() == 0:
print('Begin with {}'.format(progress))
iteration = 0
while not progress.completed():
smaller_shards, larger_shards = progress.report(shards)
if get_rank() == 0:
print('iteration {}, {}, left {}, right {}'.format(
iteration,
progress,
_calculate_mean_std_num_samples(smaller_shards),
_calculate_mean_std_num_samples(larger_shards),
))
smaller_shards = list(
sorted(smaller_shards, key=lambda shard: shard.num_samples))
larger_shards = list(
sorted(
larger_shards,
key=lambda shard: shard.num_samples,
reverse=True,
))
num_pairs = min(len(smaller_shards), len(larger_shards))
for i, (smaller_shard, larger_shard) in enumerate(
zip(smaller_shards[:num_pairs], larger_shards[:num_pairs])):
larger_shard.balance(smaller_shard, i)
barrier()
shards = smaller_shards + larger_shards
iteration += 1
[shard.flush(i) for i, shard in enumerate(progress.ready_shards)]
if get_rank() == 0:
print('Done!')
return progress.ready_shards
def _store_num_samples_per_shard(shards, outdir):
num_samples_per_shard = {
os.path.basename(shard._output_file.path): shard._output_file.num_samples
for shard in shards
}
with open(os.path.join(outdir, '.num_samples.json'), 'w') as f:
json.dump(num_samples_per_shard, f)
def main(args):
if args.outdir is None:
args.outdir = args.indir
else:
args.outdir = expand_outdir_and_mkdir(args.outdir)
file_paths = get_all_parquets_under(args.indir)
if args.bin_ids is None:
bin_ids = get_all_bin_ids(file_paths)
if len(bin_ids) > 0:
args.bin_ids = bin_ids
ready_shards = []
if args.bin_ids is None:
if get_rank() == 0:
print('Load balancing for unbinned files ...')
ready_shards.extend(
_balance(file_paths,
args.num_shards,
args.outdir,
keep_orig=args.keep_orig))
else:
if get_rank() == 0:
print('Load balancing for bin_ids = {} ...'.format(args.bin_ids))
for bin_id in args.bin_ids:
if get_rank() == 0:
print('Balancing bin_id = {} ...'.format(bin_id))
file_paths_current_bin = get_file_paths_for_bin_id(file_paths, bin_id)
ready_shards.extend(
_balance(
file_paths_current_bin,
args.num_shards,
args.outdir,
keep_orig=args.keep_orig,
postfix='_{}'.format(bin_id),
))
if get_rank() == 0:
_store_num_samples_per_shard(ready_shards, args.outdir)
def console_script():
tic = time.perf_counter()
main(attach_args().parse_args())
if get_rank() == 0:
print('Load balancing took {} s!'.format(time.perf_counter() - tic))
def generate_num_samples_cache():
parser = argparse.ArgumentParser(
'Generate .num_samples.json for the balanced parquets.')
parser.add_argument(
'--indir',
type=str,
default=None,
help='path to the dir that contains the balanced shards',
)
args = parser.parse_args()
file_paths = get_all_parquets_under(args.indir)
# Get the number of samples for each file in a collectively distributed
# approach.
all_files_num_samples = np.zeros((len(file_paths),), dtype=np.uint64)
for file_idx in range(get_rank(), len(file_paths), get_world_size()):
all_files_num_samples[file_idx] = get_num_samples_of_parquet(
file_paths[file_idx])
allreduce(all_files_num_samples)
all_files_num_samples = all_files_num_samples.tolist()
with open(os.path.join(args.indir, '.num_samples.json'), 'w') as nsf:
json.dump(
{
os.path.basename(file_paths[file_idx]):
all_files_num_samples[file_idx]
for file_idx in range(len(file_paths))
},
nsf,
)
| LDDL-main | lddl/dask/load_balance.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import argparse
import dask
import dask.bag as db
import dask.distributed
import functools
import nltk
import os
import pyarrow as pa
import time
from lddl.dask.readers import (read_open_webtext, read_wikipedia, read_books,
read_common_crawl, estimate_block_size)
from lddl.utils import expand_outdir_and_mkdir
from lddl.download.utils import parse_str_of_num_bytes
def _get_sequences(wikipedia_path=None,
books_path=None,
common_crawl_path=None,
open_webtext_path=None,
wikipedia_lang='en',
target_seq_length=128,
short_seq_prob=0.1,
blocksize=None,
num_blocks=None):
if num_blocks is not None:
if blocksize is not None:
raise ValueError('Only one of num_blocks or blocksize needs to be set!')
blocksize = estimate_block_size(
(wikipedia_path, books_path, common_crawl_path, open_webtext_path),
num_blocks,
)
bags = []
if wikipedia_path is not None:
bags.append(
read_wikipedia(
wikipedia_path,
lang=wikipedia_lang,
blocksize=blocksize,
))
if books_path is not None:
bags.append(read_books(
books_path,
blocksize=blocksize,
))
if common_crawl_path is not None:
bags.append(read_common_crawl(
common_crawl_path,
blocksize=blocksize,
))
if open_webtext_path is not None:
bags.append(read_open_webtext(
open_webtext_path,
blocksize=blocksize,
))
def _segment(article):
return filter(
None,
map(lambda s: s.strip(), nltk.tokenize.sent_tokenize(article)),
)
def _aggregate_sentences(sentences):
# Cutting sentences into chunks that are close to target_seq_length
# results is in the format of
# [
# {
# 'sentences': [sent1, sent2],
# 'num_tokens': [num_tokens1, num_tokens2],
# },
# {
# 'sentences': [sent1, sent2, sent3],
# 'num_tokens': [num_tokens1, num_tokens2, num_tokens3],
# },
# {
# 'sentences': [sent1],
# 'num_tokens': [num_tokens1],
# },
# ...
# ]
results = []
# Excluding [CLS], [SEP], [SEP]
target_length = target_seq_length - 3
chunk = ""
num_tokens = 0
for sentence in sentences:
chunk += " " + sentence
num_tokens += len(list(sentence.split()))
if num_tokens >= target_length:
results.append({
'sentences': chunk,
'num_tokens': num_tokens,
'target_length': target_length,
})
chunk = ""
num_tokens = 0
if num_tokens > 0:
results.append({
'sentences': chunk,
'num_tokens': num_tokens,
'target_length': target_length,
})
return results
def _generate_sequences(article):
return _aggregate_sentences(_segment(article))
return db.concat(bags).map(_generate_sequences).flatten()
def save(pairs, path, output_format='parquet'):
if output_format == 'parquet':
pairs.to_dataframe(meta={
'sentences': str,
}).to_parquet(
path,
engine='pyarrow',
write_index=False,
schema={
'sentences': pa.string(),
},
)
elif output_format == 'txt':
pairs = pairs.map(lambda p: '{}'.format(p['sentences'],)).to_textfiles(
os.path.join(path, '*.txt'))
else:
raise ValueError('Format {} not supported!'.format(output_format))
def main(args):
if args.schedule == 'mpi':
from dask_mpi import initialize
initialize()
client = dask.distributed.Client()
else:
client = dask.distributed.Client(
n_workers=args.local_n_workers,
threads_per_worker=args.local_threads_per_worker,
)
nltk.download('punkt')
tic = time.perf_counter()
sequences = _get_sequences(
wikipedia_path=args.wikipedia,
books_path=args.books,
common_crawl_path=args.common_crawl,
open_webtext_path=args.open_webtext,
wikipedia_lang=args.wikipedia_lang,
target_seq_length=args.target_seq_length,
short_seq_prob=args.short_seq_prob,
blocksize=args.block_size,
num_blocks=args.num_blocks,
)
args.sink = expand_outdir_and_mkdir(args.sink)
save(sequences, args.sink, output_format=args.output_format)
print('Running the dask pipeline took {} s'.format(time.perf_counter() - tic))
def attach_args(
parser=argparse.ArgumentParser('BART pretrain dataset dask pipeline')):
parser.add_argument(
'--schedule',
type=str,
default='mpi',
choices=['mpi', 'local'],
help='how the dask pipeline is scheduled',
)
parser.add_argument(
'--local-n-workers',
type=int,
default=os.cpu_count(),
help='number of worker processes for the local cluster; '
'only used when --schedule=local',
)
parser.add_argument(
'--local-threads-per-worker',
type=int,
default=1,
help='number of Python user-level threads per worker process for the '
'local cluster; only used when --schedule=local',
)
parser.add_argument(
'--wikipedia',
type=str,
default=None,
help='path to the Wikipedia corpus',
)
parser.add_argument(
'--books',
type=str,
default=None,
help='path to the Toronto books corpus',
)
parser.add_argument(
'--common-crawl',
type=str,
default=None,
help='path to the Common Crawl news corpus',
)
parser.add_argument(
'--open-webtext',
type=str,
default=None,
help='path to the Open WebText Corpus',
)
parser.add_argument(
'--sink',
type=str,
default=None,
required=True,
help='path to the dir to store output files',
)
parser.add_argument(
'--output-format',
type=str,
default='parquet',
choices=['parquet', 'txt'],
help='output file format',
)
parser.add_argument(
'--wikipedia-lang',
type=str,
default='en',
choices=['en', 'zh'],
help='wikipedia language type',
)
parser.add_argument(
'--target-seq-length',
type=int,
default=128,
help='target sequence length',
)
parser.add_argument(
'--short-seq-prob',
type=float,
default=0.1,
help='probability to use sequences shorter than --target-seq-length',
)
parser.add_argument(
'--block-size',
type=functools.partial(parse_str_of_num_bytes, return_str=False),
default=None,
metavar='n[KMG]',
help='The size of each output parquet/txt shard. Since Dask cannot '
'guarantee perfect load balance, this value is only used as an estimate. '
'Only one of --block-size and --num-blocks needs to be set, since one '
'value can be derived from the other. Default: {}'.format(None),
)
parser.add_argument(
'--num-blocks',
type=int,
default=None,
help='The total number of the output parquet/txt shards. Since Dask '
'cannot guarantee perfect load balance, this value is only used as an '
'estimate. Only one of --block-size or --num-blocks needs to be set, '
'since one value can be derived from the other. Default: {}'.format(None),
)
return parser
def console_script():
main(attach_args().parse_args())
| LDDL-main | lddl/dask/bart/pretrain.py |
LDDL-main | lddl/dask/bart/__init__.py |
|
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
from dask.highlevelgraph import HighLevelGraph
# to_dataframe
from dask.base import tokenize
from dask.bag.core import reify
import itertools
# to_parquet
import tlz as toolz
from fsspec.core import get_fs_token_paths
from dask.base import compute_as_if_collection
from dask.delayed import Delayed
from dask.utils import apply
from dask.dataframe.core import Scalar
from dask.dataframe.io.parquet.core import get_engine
from dask.dataframe.io.parquet.arrow import _index_in_schema
import pyarrow.parquet as pq
try:
import snappy
snappy.compress
except (ImportError, AttributeError):
snappy = None
NONE_LABEL = "__null_dask_index__"
# to_textfiles
import io
import uuid
from dask.bytes import open_files
from dask.utils import ensure_unicode, ensure_bytes, system_encoding
from contextlib import ExitStack
#
# dataframes
#
def _to_dataframe_binned(seq, columns, dtypes, bin_size, nbins):
import pandas as pd
seq = reify(seq)
if not isinstance(seq, list):
seq = list(seq)
seqs = [[] for _ in range(nbins)]
for i, iseq in enumerate(seq):
seq_len = iseq['num_tokens']
bin_id = (seq_len - 1) // bin_size
bin_id = nbins - 1 if bin_id > nbins - 1 else bin_id
seqs[bin_id].append(iseq)
dfl = list(
map(
lambda l: pd.DataFrame(
l,
columns=list(columns),
).astype(dtypes, copy=False),
seqs,
))
dfs = pd.concat(dfl, keys=list(map(str, list(range(nbins)))))
# Add a bin_id column
dfs['bin_id'] = list(
itertools.chain.from_iterable(
[[i] * len(bingrp) for i, bingrp in enumerate(seqs)]))
return dfs
def to_dataframe_binned(self, bin_size, nbins, meta=None, columns=None):
import pandas as pd
import dask.dataframe as dd
if meta is None:
head = self.take(1, warn=False)
if len(head) == 0:
raise ValueError("`dask.bag.Bag.to_dataframe` failed to "
"properly infer metadata, please pass in "
"metadata via the `meta` keyword")
meta_nobin = pd.DataFrame(list(head), columns=columns)
elif columns is not None:
raise ValueError("Can't specify both `meta` and `columns`")
else:
meta_nobin = dd.utils.make_meta(meta, parent_meta=pd.DataFrame())
# Serializing the columns and dtypes is much smaller than serializing
# the empty frame
cols = list(meta_nobin.columns)
dtypes = meta_nobin.dtypes.to_dict()
name = "to_dataframe-binned-" + tokenize(self, cols, dtypes)
dsk = self.__dask_optimize__(self.dask, self.__dask_keys__())
for i in range(self.npartitions):
dsk[(name, i)] = (_to_dataframe_binned, (self.name, i), cols, dtypes,
bin_size, nbins)
# Update the meta
meta['bin_id'] = int
meta = dd.utils.make_meta(meta, parent_meta=pd.DataFrame())
divisions = [None] * (self.npartitions + 1)
return dd.DataFrame(dsk, name, meta, divisions)
#
# parquet files
#
def to_parquet_binned(
df,
path,
nbins,
engine="auto",
compression="default",
write_index=True,
append=False,
overwrite=False,
ignore_divisions=False,
partition_on=None,
storage_options=None,
custom_metadata=None,
write_metadata_file=True,
compute=True,
compute_kwargs=None,
schema=None,
**kwargs,
):
compute_kwargs = compute_kwargs or {}
if compression == "default":
if snappy is not None:
compression = "snappy"
else:
compression = None
partition_on = partition_on or []
if isinstance(partition_on, str):
partition_on = [partition_on]
if set(partition_on) - set(df.columns):
raise ValueError("Partitioning on non-existent column. "
"partition_on=%s ."
"columns=%s" % (str(partition_on), str(list(df.columns))))
if isinstance(engine, str):
engine = get_engine(engine)
if hasattr(path, "name"):
path = stringify_path(path)
fs, _, _ = get_fs_token_paths(path,
mode="wb",
storage_options=storage_options)
# Trim any protocol information from the path before forwarding
path = fs._strip_protocol(path)
if overwrite:
if isinstance(fs, LocalFileSystem):
working_dir = fs.expand_path(".")[0]
if path.rstrip("/") == working_dir.rstrip("/"):
raise ValueError(
"Cannot clear the contents of the current working directory!")
if append:
raise ValueError("Cannot use both `overwrite=True` and `append=True`!")
if fs.exists(path) and fs.isdir(path):
# Only remove path contents if
# (1) The path exists
# (2) The path is a directory
# (3) The path is not the current working directory
fs.rm(path, recursive=True)
# Save divisions and corresponding index name. This is necessary,
# because we may be resetting the index to write the file
division_info = {"divisions": df.divisions, "name": df.index.name}
if division_info["name"] is None:
# As of 0.24.2, pandas will rename an index with name=None
# when df.reset_index() is called. The default name is "index",
# but dask will always change the name to the NONE_LABEL constant
if NONE_LABEL not in df.columns:
division_info["name"] = NONE_LABEL
elif write_index:
raise ValueError(
"Index must have a name if __null_dask_index__ is a column.")
else:
warnings.warn("If read back by Dask, column named __null_dask_index__ "
"will be set to the index (and renamed to None).")
# There are some "resrved" names that may be used as the default column
# name after resetting the index. However, we don't want to treat it as
# a "special" name if the string is already used as a "real" column name.
reserved_names = []
for name in ["index", "level_0"]:
if name not in df.columns:
reserved_names.append(name)
# If write_index==True (default), reset the index and record the
# name of the original index in `index_cols` (we will set the name
# to the NONE_LABEL constant if it is originally `None`).
# `fastparquet` will use `index_cols` to specify the index column(s)
# in the metadata. `pyarrow` will revert the `reset_index` call
# below if `index_cols` is populated (because pyarrow will want to handle
# index preservation itself). For both engines, the column index
# will be written to "pandas metadata" if write_index=True
index_cols = []
if write_index:
real_cols = set(df.columns)
none_index = list(df._meta.index.names) == [None]
df = df.reset_index()
if none_index:
df.columns = [
c if c not in reserved_names else NONE_LABEL for c in df.columns
]
index_cols = [c for c in set(df.columns) - real_cols]
else:
# Not writing index - might as well drop it
df = df.reset_index(drop=True)
_to_parquet_kwargs = {
"engine",
"compression",
"write_index",
"append",
"ignore_divisions",
"partition_on",
"storage_options",
"write_metadata_file",
"compute",
}
kwargs_pass = {k: v for k, v in kwargs.items() if k not in _to_parquet_kwargs}
# Engine-specific initialization steps to write the dataset.
# Possibly create parquet metadata, and load existing stuff if appending
meta, schema, i_offset = engine.initialize_write(
df,
fs,
path,
append=append,
ignore_divisions=ignore_divisions,
partition_on=partition_on,
division_info=division_info,
index_cols=index_cols,
schema=schema,
**kwargs_pass,
)
# Use i_offset and df.npartitions to define file-name list
filenames = [
"part.%i.parquet" % (i + i_offset) for i in range(df.npartitions)
]
# Construct IO graph
dsk = {}
name = "to-parquet-binned" + tokenize(
df,
fs,
path,
append,
ignore_divisions,
partition_on,
division_info,
index_cols,
schema,
)
part_tasks = []
kwargs_pass["fmd"] = meta
kwargs_pass["compression"] = compression
kwargs_pass["index_cols"] = index_cols
kwargs_pass["schema"] = schema
if custom_metadata:
if b"pandas" in custom_metadata.keys():
raise ValueError(
"User-defined key/value metadata (custom_metadata) can not "
"contain a b'pandas' key. This key is reserved by Pandas, "
"and overwriting the corresponding value can render the "
"entire dataset unreadable.")
kwargs_pass["custom_metadata"] = custom_metadata
# Override write_partition to write binned parquet files
engine.write_partition = write_partition_binned
for d, filename in enumerate(filenames):
dsk[(name, d)] = (
apply,
engine.write_partition,
[
engine,
(df._name, d),
path,
fs,
filename,
partition_on,
write_metadata_file,
nbins,
],
toolz.merge(kwargs_pass, {"head": True}) if d == 0 else kwargs_pass,
)
part_tasks.append((name, d))
final_name = "metadata-" + name
# Collect metadata and write _metadata
if write_metadata_file:
dsk[(final_name, 0)] = (
apply,
engine.write_metadata,
[
part_tasks,
meta,
fs,
path,
],
{
"append": append,
"compression": compression
},
)
else:
dsk[(final_name, 0)] = (lambda x: None, part_tasks)
graph = HighLevelGraph.from_collections(final_name, dsk, dependencies=[df])
out = Delayed(name, graph)
if compute:
return compute_as_if_collection(Scalar, graph, [(final_name, 0)],
**compute_kwargs)
else:
return Scalar(graph, final_name, "")
def write_partition_binned(
cls,
df,
path,
fs,
filename,
partition_on,
return_metadata,
nbins,
fmd=None,
compression=None,
index_cols=None,
schema=None,
head=False,
custom_metadata=None,
**kwargs,
):
_meta = None
preserve_index = False
if _index_in_schema(index_cols, schema):
df.set_index(index_cols, inplace=True)
preserve_index = True
else:
index_cols = []
for ibin in range(nbins):
dff = df[df.bin_id == ibin]
filename_b = "%s_%d" % (filename, ibin)
t = cls._pandas_to_arrow_table(
dff,
preserve_index=preserve_index,
schema=schema,
)
if custom_metadata:
_md = t.schema.metadata
_md.update(custom_metadata)
t = t.replace_schema_metadata(metadata=_md)
if partition_on:
md_list = _write_partitioned(
t,
path,
filename_b,
partition_on,
fs,
index_cols=index_cols,
compression=compression,
**kwargs,
)
if md_list:
_meta = md_list[0]
for i in range(1, len(md_list)):
_append_row_groups(_meta, md_list[i])
else:
md_list = []
with fs.open(fs.sep.join([path, filename_b]), "wb") as fil:
pq.write_table(
t,
fil,
compression=compression,
metadata_collector=md_list,
**kwargs,
)
if md_list:
_meta = md_list[0]
_meta.set_file_path(filename)
# Return the schema needed to write the metadata
if return_metadata:
d = {"meta": _meta}
if head:
# Only return schema if this is the "head" partition
d["schema"] = t.schema
return [d]
else:
return []
#
# text files
#
class file_namer(object):
def __init__(self, bin_size, nbins, prefix=""):
self.__bin_size = bin_size
self.__nbins = nbins
self.__prefix = prefix
def name_function(self, i):
num = i // self.__nbins
bin_val = i % self.__nbins
return '%s%d_%d' % (self.__prefix, num, bin_val)
def _to_textfiles_chunk_binned(data, lazy_files, last_endline, bin_size):
nbins = len(lazy_files)
with ExitStack() as stack:
fs = [stack.enter_context(lazy_file) for lazy_file in lazy_files]
if isinstance(fs[0], io.TextIOWrapper):
endline = "\n"
ensure = ensure_unicode
else:
endline = b"\n"
ensure = ensure_bytes
starteds = [False] * nbins
for d in data:
# Assuming the last character containes the number of tokens.
seq_len = int(d.split()[-1])
bin_id = (seq_len - 1) // bin_size
bin_id = nbins - 1 if bin_id > nbins - 1 else bin_id
if starteds[bin_id]:
fs[bin_id].write(endline)
else:
starteds[bin_id] = True
fs[bin_id].write(ensure(d))
if last_endline:
for f in fs:
f.write(endline)
def to_textfiles_binned(b,
path,
bin_size=64,
nbins=8,
compression="infer",
encoding=system_encoding,
compute=True,
storage_options=None,
last_endline=False,
**kwargs):
mode = "wb" if encoding is None else "wt"
files = open_files(path,
compression=compression,
mode=mode,
encoding=encoding,
name_function=file_namer(bin_size, nbins).name_function,
num=b.npartitions * nbins,
**(storage_options or {}))
name = "to-textfiles-binned-" + uuid.uuid4().hex
dsk = {(name, i): (_to_textfiles_chunk_binned, (b.name, i),
files[k:k + nbins], last_endline, bin_size)
for i, k in enumerate(range(0, len(files), nbins))}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b])
out = type(b)(graph, name, b.npartitions)
if compute:
out.compute(**kwargs)
return [f.path for f in files]
else:
return out.to_delayed()
| LDDL-main | lddl/dask/bert/binning.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import argparse
import dask
import dask.bag as db
import dask.distributed
import functools
import json
import nltk
import numpy as np
import os
import pyarrow as pa
import random
import time
import transformers
from collections import deque, namedtuple
from ..readers import (read_wikipedia, read_books, read_common_crawl,
split_id_text, estimate_block_size)
from lddl.utils import (expand_outdir_and_mkdir, attach_bool_arg,
serialize_np_array, deserialize_np_array)
from lddl.download.utils import parse_str_of_num_bytes
from .binning import to_textfiles_binned, to_dataframe_binned, to_parquet_binned
class Sentence:
def __init__(self, tokens):
self._tokens = tokens
def __repr__(self):
return 'Sentence(_tokens={})'.format(self._tokens)
def __len__(self):
return len(self._tokens)
class Document:
def __init__(self, doc_id, sentences):
self._id = doc_id
self._sentences = sentences
def __repr__(self):
return 'Document(_id={}, _sentences={})'.format(self._id, self._sentences)
def __len__(self):
return len(self._sentences)
def __getitem__(self, idx):
return self._sentences[idx]
def _get_documents(bag_texts, tokenizer, max_length=512):
def _tokenize(s):
return tokenizer.tokenize(s, max_length=max_length, truncation=True)
def _to_document(raw_text):
doc_id, text = split_id_text(raw_text)
sentence_strs = filter(
None,
map(lambda s: s.strip(), nltk.tokenize.sent_tokenize(text)),
)
sentences = (Sentence(tuple(tokens))
for tokens in (
_tokenize(sentence_str) for sentence_str in sentence_strs)
if len(tokens) > 0)
document = Document(doc_id, tuple(sentences))
return document
return bag_texts.map(_to_document).filter(lambda d: len(d._sentences) > 0)
def _shuffle_bag_texts(bag_texts):
return bag_texts.map(lambda text: {
'text': text,
'on': random.random(),
}).to_dataframe(meta={
'text': str,
'on': float,
}).shuffle(
'on',
ignore_index=True,
).sample(frac=1.0).to_bag().map(lambda t: t[0])
def _cut(lcut, tokens, rcut):
if random.random() > 0.5:
rcut.appendleft(tokens.pop())
else:
lcut.append(tokens.popleft())
def _is_following_subword(word):
return word[:2] == '##' and len(word) > 2 and word[3:].isalpha()
def _adjust(lcut, tokens, rcut):
inclusive = (random.random() > 0.5)
while len(tokens) > 0 and _is_following_subword(tokens[0]):
if inclusive:
if len(lcut) == 0:
break
tokens.appendleft(lcut.pop())
else:
lcut.append(tokens.popleft())
inclusive = (random.random() > 0.5)
while len(rcut) > 0 and _is_following_subword(rcut[0]):
if inclusive:
tokens.append(rcut.popleft())
else:
if len(tokens) == 0:
break
rcut.appendleft(tokens.pop())
def _truncate(tokens_A, tokens_B, max_length):
tokens_A, tokens_B = deque(tokens_A), deque(tokens_B)
lcut_A, rcut_A = deque([]), deque([])
lcut_B, rcut_B = deque([]), deque([])
# Truncate each sequence into 3 pieces: lcut, tokens, rcut
while len(tokens_A) + len(tokens_B) > max_length:
if len(tokens_A) > len(tokens_B):
_cut(lcut_A, tokens_A, rcut_A)
else:
_cut(lcut_B, tokens_B, rcut_B)
_adjust(lcut_A, tokens_A, rcut_A)
_adjust(lcut_B, tokens_B, rcut_B)
return tuple(tokens_A), tuple(tokens_B)
def _truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if random.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
MaskedLmInstance = namedtuple("MaskedLmInstance", ["index", "label"])
def create_masked_lm_predictions(tokens_a, tokens_b, masked_lm_ratio,
vocab_words):
"""Creates the predictions for the masked LM objective."""
num_tokens_a, num_tokens_b = len(tokens_a), len(tokens_b)
tokens = ['[CLS]'] + tokens_a + ['[SEP]'] + tokens_b + ['[SEP]']
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
continue
cand_indexes.append(i)
random.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = max(1, int(round(len(tokens) * masked_lm_ratio)))
masked_lms = []
covered_indexes = set()
for index in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
if index in covered_indexes:
continue
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if random.random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if random.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_words[random.randint(0, len(vocab_words) - 1)]
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (
output_tokens[1:1 + num_tokens_a],
output_tokens[2 + num_tokens_a:2 + num_tokens_a + num_tokens_b],
masked_lm_positions,
masked_lm_labels,
)
def create_pairs_from_document(
all_documents,
document_index,
max_seq_length=128,
short_seq_prob=0.1,
masking=False,
masked_lm_ratio=0.15,
vocab_words=None,
):
"""Create a pair for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if random.random() < short_seq_prob:
target_seq_length = random.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = random.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j]._tokens)
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or random.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = random.randint(0, len(all_documents) - 1)
if random_document_index != document_index:
break
#If picked random document is the same as the current document
if random_document_index == document_index:
is_random_next = False
random_document = all_documents[random_document_index]
random_start = random.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j]._tokens)
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j]._tokens)
_truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
if masking:
(
tokens_a,
tokens_b,
masked_lm_positions,
masked_lm_labels,
) = create_masked_lm_predictions(
tokens_a,
tokens_b,
masked_lm_ratio,
vocab_words,
)
masked_lm_positions = serialize_np_array(
np.asarray(masked_lm_positions, dtype=np.uint16))
instance = {
'A': ' '.join(tokens_a),
'B': ' '.join(tokens_b),
'is_random_next': is_random_next,
'num_tokens': len(tokens_a) + len(tokens_b) + 3,
}
if masking:
instance.update({
'masked_lm_positions': masked_lm_positions,
'masked_lm_labels': ' '.join(masked_lm_labels),
})
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
def _get_pairs(
wikipedia_path=None,
books_path=None,
common_crawl_path=None,
wikipedia_lang='en',
target_seq_length=128,
short_seq_prob=0.1,
blocksize=None,
num_blocks=None,
duplicate_factor=5,
sample_ratio=0.9,
seed=12345,
tokenizer=None,
masking=False,
masked_lm_ratio=0.15,
):
vocab_words = tuple(tokenizer.vocab.keys())
def _to_partition_pairs(partition_documents):
partition_documents = tuple(partition_documents)
partition_pairs = []
for _ in range(duplicate_factor):
for document_index in range(len(partition_documents)):
partition_pairs.extend(
create_pairs_from_document(
partition_documents,
document_index,
max_seq_length=target_seq_length,
short_seq_prob=short_seq_prob,
masking=masking,
masked_lm_ratio=masked_lm_ratio,
vocab_words=vocab_words,
))
random.shuffle(partition_pairs)
return partition_pairs
if num_blocks is not None:
if blocksize is not None:
raise ValueError('Only one of num_blocks or blocksize needs to be set!')
blocksize = estimate_block_size(
(wikipedia_path, books_path, common_crawl_path),
num_blocks,
)
bags = []
if wikipedia_path is not None:
bags.append(
read_wikipedia(
wikipedia_path,
lang=wikipedia_lang,
blocksize=blocksize,
sample_ratio=sample_ratio,
sample_seed=seed,
))
if books_path is not None:
bags.append(
read_books(
books_path,
blocksize=blocksize,
sample_ratio=sample_ratio,
sample_seed=seed,
))
if common_crawl_path is not None:
bags.append(
read_common_crawl(
common_crawl_path,
blocksize=blocksize,
sample_ratio=sample_ratio,
sample_seed=seed,
))
bag_texts = db.concat(bags)
bag_texts = _shuffle_bag_texts(bag_texts)
bag_documents = _get_documents(bag_texts, tokenizer)
return bag_documents.map_partitions(_to_partition_pairs)
def _save_parquet(
pairs,
path,
bin_size=None,
target_seq_length=128,
masking=False,
):
base_meta = {
'A': str,
'B': str,
'is_random_next': bool,
'num_tokens': int,
}
base_schema = {
'A': pa.string(),
'B': pa.string(),
'is_random_next': pa.bool_(),
'num_tokens': pa.uint16(),
}
if masking:
base_meta.update({
'masked_lm_positions': bytes,
'masked_lm_labels': str,
})
base_schema.update({
'masked_lm_positions': pa.binary(),
'masked_lm_labels': pa.string()
})
if bin_size is None:
pairs.to_dataframe(meta=base_meta).to_parquet(
path,
engine='pyarrow',
write_index=False,
schema=base_schema,
)
else:
nbins = target_seq_length // bin_size
pairs.to_dataframe = to_dataframe_binned
dfs = pairs.to_dataframe(
pairs,
meta=base_meta,
bin_size=bin_size,
nbins=nbins,
)
to_parquet_binned(
dfs,
path,
nbins,
engine='pyarrow',
write_index=False,
schema={
**base_schema,
'bin_id': pa.int64(),
},
)
def _save_txt(
pairs,
path,
bin_size=None,
target_seq_length=128,
masking=False,
):
if masking:
pairs = pairs.map(
lambda p: 'is_random_next: {} - [CLS] {} [SEP] {} [SEP] '
'- masked_lm_positions: {} - masked_lm_labels: {} - {}'.format(
p['is_random_next'],
p['A'],
p['B'],
deserialize_np_array(p['masked_lm_positions']),
p['masked_lm_labels'],
p['num_tokens'],
))
else:
pairs = pairs.map(
lambda p: 'is_random_next: {} - [CLS] {} [SEP] {} [SEP] - {}'.format(
p['is_random_next'],
p['A'],
p['B'],
p['num_tokens'],
))
if bin_size is None:
db.core.to_textfiles(pairs, os.path.join(path, '*.txt'))
else:
nbins = target_seq_length // bin_size
to_textfiles_binned(pairs, os.path.join(path, '*.txt'), bin_size, nbins)
def _save(
pairs,
path,
output_format='parquet',
bin_size=None,
target_seq_length=128,
masking=False,
):
if output_format == 'parquet':
_save_parquet(
pairs,
path,
bin_size=bin_size,
target_seq_length=target_seq_length,
masking=masking,
)
elif output_format == 'txt':
_save_txt(
pairs,
path,
bin_size=bin_size,
target_seq_length=target_seq_length,
masking=masking,
)
else:
raise ValueError('Format {} not supported!'.format(output_format))
def main(args):
dask.config.set({"distributed.comm.timeouts.connect": 60})
if args.bin_size is not None:
if args.bin_size > args.target_seq_length:
raise ValueError("Please provide a bin size that is <= target-seq-length")
if args.target_seq_length % args.bin_size != 0:
raise ValueError("Please provide a bin size that can divide the target "
"sequence length.")
if args.schedule == 'mpi':
from dask_mpi import initialize
initialize(local_directory='/tmp/dask-worker-space', nanny=False)
client = dask.distributed.Client()
else:
client = dask.distributed.Client(
n_workers=args.local_n_workers,
threads_per_worker=args.local_threads_per_worker,
)
nltk.download('punkt')
if os.path.isfile(args.vocab_file):
tokenizer = transformers.BertTokenizerFast(args.vocab_file)
else:
tokenizer = transformers.BertTokenizerFast.from_pretrained(args.vocab_file)
tic = time.perf_counter()
pairs = _get_pairs(
wikipedia_path=args.wikipedia,
books_path=args.books,
common_crawl_path=args.common_crawl,
wikipedia_lang=args.wikipedia_lang,
target_seq_length=args.target_seq_length,
short_seq_prob=args.short_seq_prob,
blocksize=args.block_size,
num_blocks=args.num_blocks,
duplicate_factor=args.duplicate_factor,
sample_ratio=args.sample_ratio,
seed=args.seed,
tokenizer=tokenizer,
masking=args.masking,
masked_lm_ratio=args.masked_lm_ratio,
)
args.sink = expand_outdir_and_mkdir(args.sink)
_save(
pairs,
args.sink,
output_format=args.output_format,
bin_size=args.bin_size,
target_seq_length=args.target_seq_length,
masking=args.masking,
)
print('Running the dask pipeline took {} s'.format(time.perf_counter() - tic))
def attach_args(parser=argparse.ArgumentParser("""
LDDL Preprocessor for the BERT Pretraining Task
The LDDL preprocessor takes the text shards under 'source' subdirectories from
datasets, and preprocesses them into parquet files under the directory specified
by --sink. These parquet files are the input to the LDDL Load Balancer.
MPI is used to scale the LDDL preprocessor to multi-processes and multi-nodes.
MPI can be accessed in various ways. For example, we can access MPI via mpirun:
$ mpirun -c <number of processes per node> --oversubscribe --allow-run-as-root \\
preprocess_bert_pretrain ...
We can also access MPI via SLURM in a HPC cluster:
$ srun -l --mpi=pmix --ntasks-per-node=<number of processes per node> \\
preprocess_bert_pretrain ...
If you want to use jemalloc as the memory allocator, set the value of the
LD_PRELOAD environment variable to the path that points to libjemalloc.so. In
mpirun, we can set the '-x LD_PRELOAD=<path to libjemalloc.so>' flag. In SLURM,
we can set the '--export=ALL,LD_PRELOAD=<path to libjemalloc.so>' flag.
Since the LDDL preprocessor needs some data as input, at least one of
'--wikipedia', '--books' and '--common-crawl' needs to be set. For each dataset
that is fetched by a LDDL downloader, a 'source' subdirectory is expected to be
generated by the LDDL downloader. The path to the 'source' subdirectory should
be used as the value to each of the '--wikipedia', '--books' and
'--common-crawl' flags.
LDDL supports sequence binning. Given a bin size, the input sequences can be
categorized into several bins. For example, if --target-seq-length is set to 128
and --bin-size (which specifies the stride of the sequence length for each bin)
is set to 32, then we have 4 bins:
- sequences that has 1 to 32 tokens;
- sequences that has 33 to 64 tokens;
- sequences that has 65 to 96 tokens;
- sequences that has 97 to 128 tokens.
Each parquet file that the LDDL preprocessor generates only has sequences that
belong to one bin. During one training iteration, for all ranks, the input
mini-batch of data returned by the LDDL data loader only contains sequences that
belong to one bin, therefore, saving:
- wasted computation during the forward and backward passes on the padding
tokens which need to be appended to sequences shorter than the longest
sequence in a mini-batch;
- idle waiting time for the rank that uses a batch of sequences shorter than the
longest sequence among all batches of all ranks.
The --bin-size flag needs to be set in order to enable sequence binning. Note
that, although a very small bin size would reduce the runtime as much as
possible, at the same time, it could lead to noticeable difference in the
convergence. A good bin size should be determined empirically by trading off
runtime with convergence impact.
""")):
parser.add_argument(
'--schedule',
type=str,
default='mpi',
choices=['mpi', 'local'],
help='Which scheduler is used to scale this LDDL pipeline. MPI should '
'always be used and will be used by default. The local scheduler can only'
' support a single node and is for debugging purpose only. Default: mpi',
)
defaults = {
'--local-n-workers': os.cpu_count(),
'--local-threads-per-worker': 1,
'--wikipedia': None,
'--books': None,
'--common-crawl': None,
'--sink': None,
'--output-format': 'parquet',
'--wikipedia-lang': 'en',
'--target-seq-length': 128,
'--short-seq-prob': 0.1,
'--block-size': None,
'--num-blocks': None,
'--bin-size': None,
'--sample-ratio': 0.9,
'--seed': 12345,
'--duplicate-factor': 5,
'--vocab-file': 'bert-large-uncased',
'--masked-lm-ratio': 0.15,
}
parser.add_argument(
'--local-n-workers',
type=int,
default=defaults['--local-n-workers'],
help='The number of worker processes for the local scheduler; only used '
'when --schedule=local . Default: {}'.format(
defaults['--local-n-workers']),
)
parser.add_argument(
'--local-threads-per-worker',
type=int,
default=defaults['--local-threads-per-worker'],
help='The number of Python user-level threads per worker process for the '
'local scheduler; only used when --schedule=local . Default: {}'.format(
defaults['--local-threads-per-worker']),
)
parser.add_argument(
'--wikipedia',
type=str,
default=defaults['--wikipedia'],
help="The path to the 'source' subdirectory for the Wikipedia corpus. "
"Default: {}".format(defaults['--wikipedia']),
)
parser.add_argument(
'--books',
type=str,
default=defaults['--books'],
help="The path to the 'source' subdirectory for the Toronto books corpus."
" Default: {}".format(defaults['--books']),
)
parser.add_argument(
'--common-crawl',
type=str,
default=defaults['--common-crawl'],
help="The path to the 'source' subdirectory for the Common Crawl news "
"corpus. Default: {}".format(defaults['--common-crawl']),
)
parser.add_argument(
'--sink',
type=str,
default=defaults['--sink'],
required=True,
help='The path to the directory that stores the output (parquet or txt) '
'files. Default: {}'.format(defaults['--sink']),
)
parser.add_argument(
'--output-format',
type=str,
default=defaults['--output-format'],
choices=['parquet', 'txt'],
help='The format of the output files. parquet should always be used and '
'will be used by default. txt is for debugging purpose only. Default: '
'{}'.format(defaults['--output-format']),
)
parser.add_argument(
'--wikipedia-lang',
type=str,
default=defaults['--wikipedia-lang'],
choices=['en', 'zh'],
help='The language type for the Wikipedia corpus. Currenly, only en is '
'supported. Default: {}'.format(defaults['--wikipedia-lang']),
)
parser.add_argument(
'--target-seq-length',
type=int,
default=defaults['--target-seq-length'],
help="The targeted, maximum number of tokens for the "
"'[CLS] A [SEP] B [SEP]' pair input sequences to the BERT Pretraining "
"task. In the original BERT Pretraining task, Phase 1 requires "
"--target-seq-length=128 whereas Phase 2 requires --target-seq-length=512"
" . However, you can also be creative and set --target-seq-length to "
"other positive integers greater than 3. Default: {}".format(
defaults['--target-seq-length']),
)
parser.add_argument(
'--short-seq-prob',
type=float,
default=defaults['--short-seq-prob'],
help="If all samples are long sequences, BERT would overfit to only long "
"sequences. Therefore, you need to introduce shorter sequences sometimes."
" This flag specifies the probability of a random variable X with the "
"Bernoulli distribution (i.e., X in {{0, 1}} and "
"Pr(X = 1) = p = 1 - Pr(X = 0)), such that the value of X is drawn for "
"every document/article and, when X = 1, the value of the targeted, "
"maximum number of tokens for the '[CLS] A [SEP] B [SEP]' pair input "
"sequences is a random integer following the uniform distribution "
"between 2 and the value specified by --target-seq-length minus 3 (to "
"exclude the '[CLS]' and 'SEP' tokens). Default: {}".format(
defaults['--short-seq-prob']),
)
parser.add_argument(
'--block-size',
type=functools.partial(parse_str_of_num_bytes, return_str=False),
default=defaults['--block-size'],
metavar='n[KMG]',
help='The size of each output parquet/txt shard. Since Dask cannot '
'guarantee perfect load balance, this value is only used as an estimate. '
'Only one of --block-size and --num-blocks needs to be set, since one '
'value can be derived from the other. Default: {}'.format(
defaults['--block-size']),
)
parser.add_argument(
'--num-blocks',
type=int,
default=defaults['--num-blocks'],
help='The total number of the output parquet/txt shards. Since Dask '
'cannot guarantee perfect load balance, this value is only used as an '
'estimate. Only one of --block-size or --num-blocks needs to be set, '
'since one value can be derived from the other. Default: {}'.format(
defaults['--num-blocks']),
)
parser.add_argument(
'--bin-size',
type=int,
default=defaults['--bin-size'],
help='If this flag is set, sequence binning is enabled. This flag '
'specifies the stride of the sequence length for each bin. For example, '
'if --bin-size is 64, the first bin contains sequences with 1 to 64 '
'tokens, the second bin contains sequences with 65 to 128 tokens, and so '
'on. The bin size has to be an integer that can divide the value of '
'--target-seq-length. Default: {}'.format(defaults['--bin-size']),
)
parser.add_argument(
'--sample-ratio',
type=float,
default=defaults['--sample-ratio'],
help='Not all articles/documents have to be included into the pretraining'
' dataset. This flag specifies the ratio of how many articles/documents '
'are sampled from each corpus (i.e., --wikipedia, --books and '
'--common_crawl). Default: {}'.format(defaults['--sample-ratio']),
)
parser.add_argument(
'--seed',
type=int,
default=defaults['--seed'],
help='The seed value for article/document sampling (i.e., '
'--sample-ratio). Note that, the other part of this Dask pipeline is '
'non-deterministic. Default: {}'.format(defaults['--seed']),
)
parser.add_argument(
'--duplicate-factor',
type=int,
default=defaults['--duplicate-factor'],
help="There is stochasticity when creating the '[CLS] A [SEP] B [SEP]' "
"pair input sequences for each article/document, specifically from "
"determining (1) the targeted, maximum number of tokens for each "
"article/document, (2) which sentences are used as B, (3) how the "
"sequence is truncated in case it is longer than the targeted, maximum "
"number of tokens. Therefore, even the same article/document could lead "
"to a different set of input sequences at different times. The "
"--duplicate-factor flag specifies how many times the preprocessor "
"repeats to create the input pairs from the same article/document. "
"Default: {}".format(defaults['--duplicate-factor']),
)
parser.add_argument(
'--vocab-file',
type=str,
default=defaults['--vocab-file'],
help='Either the path to a vocab file, or the model id of a pretrained '
'model hosted inside a model repo on huggingface.co. '
'Default: {}'.format(defaults['--vocab-file']),
)
attach_bool_arg(
parser,
'masking',
default=False,
help_str='LDDL supports both static and dynamic masking. Static masking '
'means that the masking operation is applied by the preprocessor, thus, '
'which and how tokens are masked is fixed during training. Dynamic '
'masking refers to delaying the masking operation to the data loader, '
'therefore, the same input sequence could be masked differently the next '
'time it is returned by the data loader during a training iteration. In '
'order to enable static masking, this flag needs to be set. This flag is'
' not set by default.',
)
parser.add_argument(
'--masked-lm-ratio',
type=float,
default=defaults['--masked-lm-ratio'],
help='The ratio of the number of tokens to be masked when static masking '
'is enabled (i.e., when --masking is set). Default: {}'.format(
defaults['--masked-lm-ratio']),
)
return parser
def console_script():
main(attach_args().parse_args())
| LDDL-main | lddl/dask/bert/pretrain.py |
LDDL-main | lddl/dask/bert/__init__.py |
|
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import logging
import os
import pathlib
from .utils import (get_local_rank, get_node_rank)
def _get_logger_name(node_rank, local_rank=None, worker_rank=None):
if local_rank is None and worker_rank is None:
return 'node-{}'.format(node_rank)
elif worker_rank is None:
return 'node-{}_local-{}'.format(node_rank, local_rank)
else:
return 'node-{}_local-{}_worker-{}'.format(node_rank, local_rank,
worker_rank)
class DummyLogger:
def debug(self, msg, *args, **kwargs):
pass
def info(self, msg, *args, **kwargs):
pass
def warning(self, msg, *args, **kwargs):
pass
def error(self, msg, *args, **kwargs):
pass
def critical(self, msg, *args, **kwargs):
pass
def log(self, msg, *args, **kwargs):
pass
def exception(self, msg, *args, **kwargs):
pass
class DatasetLogger:
def __init__(
self,
log_dir=None,
log_level=logging.INFO,
):
self._log_dir = log_dir
self._node_rank = get_node_rank()
self._local_rank = get_local_rank()
self._worker_rank = None
self._log_level = log_level
if log_dir is not None:
pathlib.Path(log_dir).mkdir(parents=True, exist_ok=True)
# Create node level logger.
if self._local_rank == 0:
self._create_logger(_get_logger_name(self._node_rank))
# Create local_rank level logger.
self._create_logger(
_get_logger_name(self._node_rank, local_rank=self._local_rank))
def _create_logger(self, name):
logger = logging.getLogger(name)
fmt = logging.Formatter(
'LDDL - %(asctime)s - %(filename)s:%(lineno)d:%(funcName)s - %(name)s '
'- %(levelname)s : %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(fmt)
logger.addHandler(stream_handler)
if self._log_dir is not None:
path = os.path.join(self._log_dir, '{}.txt'.format(name))
file_handler = logging.FileHandler(path)
file_handler.setFormatter(fmt)
logger.addHandler(file_handler)
logger.setLevel(self._log_level)
return logger
def init_for_worker(self, worker_rank):
if self._worker_rank is None:
self._worker_rank = worker_rank
self._create_logger(
_get_logger_name(
self._node_rank,
local_rank=self._local_rank,
worker_rank=worker_rank,
))
def to(self, which):
assert which in {'node', 'rank', 'worker'}
if which == 'node':
if (self._local_rank == 0 and
(self._worker_rank is None or self._worker_rank == 0)):
return logging.getLogger(_get_logger_name(self._node_rank))
else:
return DummyLogger()
elif which == 'rank':
if self._worker_rank is None or self._worker_rank == 0:
return logging.getLogger(
_get_logger_name(self._node_rank, local_rank=self._local_rank))
else:
return DummyLogger()
else: # which == 'worker'
return logging.getLogger(
_get_logger_name(
self._node_rank,
local_rank=self._local_rank,
worker_rank=self._worker_rank,
))
| LDDL-main | lddl/paddle/log.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import json
import numpy as np
import os
import pyarrow.parquet as pq
import random
import paddle
from paddle.io import IterableDataset, get_worker_info
from paddle.fluid.framework import in_dygraph_mode
from lddl.types import File
from lddl.utils import get_num_samples_of_parquet
from lddl.random import randrange, shuffle, sample
from .utils import (get_rank, get_local_rank, get_world_size,
get_nproc_per_node, get_num_nodes, get_node_rank,
all_reduce_in_static_mode)
class ShuffleBuffer:
def __init__(
self,
files,
max_num_samples_to_yield,
decode_record_batch,
size,
warmup_factor,
logger,
rng_state,
):
num_samples_wasted = (sum(
(f.num_samples for f in files)) - max_num_samples_to_yield)
assert 0 <= num_samples_wasted <= len(files)
self._files = files
self._max_num_samples_to_yield = max_num_samples_to_yield
self._decode_record_batch = decode_record_batch
self._size = size
self._warmup_factor = warmup_factor
self._logger = logger
self._rng_state = rng_state
@property
def num_samples(self):
return sum((f.num_samples for f in self._files))
def _randrange(self, stop):
n, self._rng_state = randrange(stop, rng_state=self._rng_state)
return n
def _shuffle(self, x):
self._rng_state = shuffle(x, rng_state=self._rng_state)
def __iter__(self):
buffer = []
num_samples_to_yield = min(
self._max_num_samples_to_yield,
sum((f.num_samples for f in self._files)),
)
remaining_num_samples = num_samples_to_yield
for f in self._files:
self._logger.to('worker').info('Reading {}'.format(f.path))
for b in pq.read_table(f.path).to_batches():
for isample in self._decode_record_batch(b):
if remaining_num_samples <= 0:
return
if (len(buffer) >= min(
self._size, (num_samples_to_yield - remaining_num_samples + 1) *
self._warmup_factor)):
replace_idx = self._randrange(len(buffer))
yield buffer[replace_idx]
buffer[replace_idx] = isample
remaining_num_samples -= 1
else:
buffer.append(isample)
self._shuffle(buffer)
for isample in buffer:
if remaining_num_samples <= 0:
return
yield isample
remaining_num_samples -= 1
class ParquetDataset(IterableDataset):
def __init__(
self,
file_paths,
transform=lambda x: x,
shuffle_buffer_size=16384,
shuffle_buffer_warmup_factor=16,
base_seed=12345,
logger=None,
start_epoch=0,
):
super().__init__()
self._transform = transform
self._local_rank = get_local_rank
self._shuffle_buffer_size = shuffle_buffer_size
self._shuffle_buffer_warmup_factor = shuffle_buffer_warmup_factor
self._base_seed = base_seed
self._rank = get_rank()
self._world_size = get_world_size()
self._nproc_per_node = get_nproc_per_node()
self._num_nodes = get_num_nodes()
self._node_rank = get_node_rank()
self._epoch = start_epoch - 1
self._logger = logger
assert len(file_paths) % self._num_nodes == 0
assert len(file_paths) % self._world_size == 0
self._files = self._get_files(file_paths)
max_num_samples_per_file = max((f.num_samples for f in self._files))
min_num_samples_per_file = min((f.num_samples for f in self._files))
assert min_num_samples_per_file in {
max_num_samples_per_file - 1,
max_num_samples_per_file,
}
self._num_samples_per_file = min_num_samples_per_file
total_num_samples = sum((f.num_samples for f in self._files))
num_samples_lost = (total_num_samples -
self._num_samples_per_file * len(self._files))
self._logger.to('node').warning('lost {}/{}={}% samples in total'.format(
num_samples_lost,
total_num_samples,
num_samples_lost / total_num_samples * 100,
))
self._world_rng_state = None
self._worker_rng_state = None
def _get_files(self, file_paths):
if in_dygraph_mode():
all_files_num_samples = paddle.zeros((len(file_paths),), dtype='int64')
else:
all_files_num_samples = np.zeros((len(file_paths),), dtype=np.int64)
# Figure out how many samples in each file.
num_samples_cache = {} # Map dirname to the dict of {basename: num_samples}
for idx in range(self._rank, len(file_paths), self._world_size):
fp = file_paths[idx]
dn = os.path.dirname(fp)
bn = os.path.basename(fp)
# Load the num_samples cache file if it exists.
if dn not in num_samples_cache:
nsfp = os.path.join(dn, '.num_samples.json')
try:
with open(nsfp, 'r') as nsf:
num_samples_cache[dn] = json.load(nsf)
except Exception as e:
self._logger.to('rank').warning('failed to load {}: {}'.format(
nsfp, e))
# Mark that the num_samples cache file doesn't exist for this
# directory.
num_samples_cache[dn] = None
if num_samples_cache[dn] is not None and bn in num_samples_cache[dn]:
all_files_num_samples[idx] = num_samples_cache[dn][bn]
else:
# Find out num_samples by loading the parquet table.
all_files_num_samples[idx] = get_num_samples_of_parquet(fp)
if self._world_size > 1:
# Sync. accross all ranks.
if in_dygraph_mode():
paddle.distributed.all_reduce(
all_files_num_samples,
op=paddle.distributed.ReduceOp.SUM,
)
else:
all_files_num_samples = all_reduce_in_static_mode(
all_files_num_samples, paddle.distributed.ReduceOp.SUM)
all_files_num_samples = all_files_num_samples.tolist()
return [File(fp, ns) for fp, ns in zip(file_paths, all_files_num_samples)]
def __len__(self):
""" This function only returns how many samples per rank will be yielded
by this dataset.
Note that, len(dataloader), where dataloader is a PaddlePaddle DataLoader
wrapping this dataset, does NOT return the accurate number of batches. This
is because, when (num_samples_per_file * num_files_per_worker) is not
divisible by batch_size, each worker is going to generate a partial batch
at the very end.
However, PaddlePaddle DataLoader's __len__ only divide the number returned from
this function by batch_size, which would be smaller than the actual number
of batches by at most (num_workers - 1).
We need to patch PaddlePaddle DataLoader function for this function to behave
correctly.
"""
return self._num_samples_per_file * len(self._files) // self._world_size
@property
def num_samples_per_file(self):
return self._num_samples_per_file
@property
def num_files_per_rank(self):
return len(self._files) // self._world_size
def _decode_record_batch(self, b):
raise NotImplementedError('ParquetDataset is an abstract/interface class!')
def _world_identical_sample(self, population, k, counts=None):
s, self._world_rng_state = sample(
population,
k,
rng_state=self._world_rng_state,
)
return s
def _init_worker(self):
worker_info = get_worker_info()
if worker_info is None:
num_workers_per_rank = 1
worker_rank = 0
else:
num_workers_per_rank = worker_info.num_workers
worker_rank = worker_info.id
assert (len(self._files) % (self._world_size * num_workers_per_rank) == 0)
self._logger.init_for_worker(worker_rank)
return worker_rank, num_workers_per_rank
def _init_rng_states(self, worker_rank, num_workers_per_rank):
orig_rng_state = random.getstate()
random.seed(self._base_seed + self._epoch)
self._world_rng_state = random.getstate()
random.seed(self._base_seed +
(self._epoch * self._world_size + self._rank) *
num_workers_per_rank + worker_rank)
self._worker_rng_state = random.getstate()
random.setstate(orig_rng_state)
def __iter__(self):
self._epoch += 1
worker_rank, num_workers_per_rank = self._init_worker()
self._init_rng_states(worker_rank, num_workers_per_rank)
files = self._world_identical_sample(self._files, k=len(self._files))
self._logger.to('node').warning('epoch = {}'.format(self._epoch))
self._logger.to('worker').info(
'\n'.join(['files('] + [' {}'.format(f) for f in files] + [')']))
rank_files = files[self._rank::self._world_size]
worker_files = rank_files[worker_rank::num_workers_per_rank]
self._logger.to('worker').info(
'\n'.join(['worker_files('] + [' {}'.format(f) for f in worker_files] +
[')']))
sb = ShuffleBuffer(
worker_files,
self._num_samples_per_file * len(worker_files),
lambda b: self._decode_record_batch(b),
self._shuffle_buffer_size,
self._shuffle_buffer_warmup_factor,
self._logger,
self._worker_rng_state,
)
for isample in iter(sb):
yield self._transform(isample)
| LDDL-main | lddl/paddle/datasets.py |
from .bert import get_bert_pretrain_data_loader
| LDDL-main | lddl/paddle/__init__.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import os
import paddle
from paddle.fluid.framework import in_dygraph_mode
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready
def get_rank():
return int(os.getenv("PADDLE_TRAINER_ID", "0"))
def get_local_rank():
return int(os.getenv('PADDLE_RANK_IN_NODE', '0'))
def get_world_size():
return int(os.getenv('PADDLE_TRAINERS_NUM', '1'))
def barrier():
if get_world_size() > 1:
paddle.distributed.barrier()
def get_endpoints():
endpoints = os.getenv('PADDLE_TRAINER_ENDPOINTS')
return endpoints.split(",")
def get_current_endpoint():
return os.getenv("PADDLE_CURRENT_ENDPOINT")
def get_other_endpoints():
other_endpoints = get_endpoints()[:]
current_endpoint = get_current_endpoint()
other_endpoints.remove(current_endpoint)
return other_endpoints
def get_num_nodes():
# paddle_local_size = int(os.getenv('PADDLE_LOCAL_SIZE', '-1'))
endpoints = get_endpoints()[:]
ips = set()
for endpoint in endpoints:
ip = endpoint.split(":")[0]
ips.add(ip)
return len(ips)
def get_nproc_per_node():
return get_world_size() // get_num_nodes()
def get_node_rank():
""" This assume the training processes are launched via
paddle.distributed.launch.py. Therefore, the ordering scheme of
rank -> (node_rank, local_rank) mapping is:
0 -> (0, 0)
1 -> (0, 1)
...
nproc_per_node -> (1, 0)
nproc_per_node+1 -> (1, 1)
...
"""
nproc_per_node = get_nproc_per_node()
node_rank = get_rank() // nproc_per_node
return node_rank
def all_reduce_in_static_mode(local_tensor, reduce_op):
assert not in_dygraph_mode(), "this function can only be used in static mode"
rank = get_rank()
local_rank = get_local_rank()
nranks = get_world_size()
current_endpoint = get_current_endpoint()
other_endpoints = get_other_endpoints()
device = paddle.set_device("gpu")
if rank == 0:
wait_server_ready(other_endpoints)
startup_program = paddle.static.Program()
main_program = paddle.static.Program()
exe = paddle.static.Executor(device)
block = startup_program.global_block()
nccl_id_var = block.create_var(
name=paddle.fluid.unique_name.generate('nccl_id'),
persistable=True,
type=paddle.fluid.core.VarDesc.VarType.RAW,
)
block.append_op(
type='c_gen_nccl_id',
inputs={},
outputs={'Out': nccl_id_var},
attrs={
'rank': rank,
'endpoint': current_endpoint,
'other_endpoints': other_endpoints,
},
)
block.append_op(
type='c_comm_init',
inputs={'X': nccl_id_var},
outputs={},
attrs={
'nranks': nranks,
'rank': rank,
'ring_id': 0
},
)
with paddle.static.program_guard(main_program, startup_program):
data = paddle.static.data(name='local_value', shape=[-1], dtype='int64')
paddle.distributed.all_reduce(data, op=reduce_op)
exe.run(startup_program)
results = exe.run(main_program,
feed={'local_value': local_tensor},
fetch_list=[data.name])
return results[0]
| LDDL-main | lddl/paddle/utils.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import random
import paddle
from lddl.random import choices
from .datasets import ParquetDataset
class Binned:
def __init__(self, dataloaders, base_seed=12345, start_epoch=0, logger=None):
self._dataloaders = dataloaders
self._base_seed = base_seed
self._epoch = start_epoch - 1
self._logger = logger
self._world_rng_state = None
def _init_rng_states(self):
orig_rng_state = random.getstate()
random.seed(self._base_seed + self._epoch)
self._world_rng_state = random.getstate()
random.setstate(orig_rng_state)
def _init_iter(self):
self._init_rng_states()
num_samples_remaining = [len(dl.dataset) for dl in self._dataloaders]
dataiters = [iter(dl) for dl in self._dataloaders]
return num_samples_remaining, dataiters
def __len__(self):
return sum((len(dl) for dl in self._dataloaders))
def _get_batch_size(self, batch):
raise NotImplementedError('Binned is an abstract class!')
def _choices(self, population, weights=None, cum_weights=None, k=1):
c, self._world_rng_state = choices(
population,
weights=weights,
cum_weights=cum_weights,
k=k,
rng_state=self._world_rng_state,
)
return c
def __iter__(self):
self._epoch += 1
num_samples_remaining, dataiters = self._init_iter()
for i in range(len(self)):
bin_id = self._choices(
list(range(len(dataiters))),
weights=num_samples_remaining,
k=1,
)[0]
self._logger.to('rank').info('{}-th iteration selects bin_id = {}'.format(
i, bin_id))
assert num_samples_remaining[bin_id] > 0
batch = next(dataiters[bin_id])
num_samples_remaining[bin_id] -= self._get_batch_size(batch)
yield batch
assert sum((nsr for nsr in num_samples_remaining)) == 0
class DataLoader(paddle.io.DataLoader):
def __len__(self):
if isinstance(self.dataset, ParquetDataset):
num_workers_per_rank = max(self.num_workers, 1)
num_files_per_worker = self.dataset.num_files_per_rank // num_workers_per_rank
num_samples_per_worker = self.dataset.num_samples_per_file * num_files_per_worker
num_batches_per_worker = (
(num_samples_per_worker - 1) // self.batch_size + 1)
return num_batches_per_worker * num_workers_per_rank
else:
super().__len__()
| LDDL-main | lddl/paddle/dataloader.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import logging
import os
import paddle
import transformers
from lddl.utils import (get_all_parquets_under, get_all_bin_ids,
get_file_paths_for_bin_id, deserialize_np_array)
from .dataloader import Binned, DataLoader
from .datasets import ParquetDataset
from .log import DatasetLogger
def _decode_record_batch(b):
b = b.to_pydict()
if 'masked_lm_positions' in b:
assert 'masked_lm_labels' in b
columns = tuple((b[k] for k in (
'A',
'B',
'is_random_next',
'masked_lm_positions',
'masked_lm_labels',
) if k in b))
for sample in zip(*columns):
yield sample
class BertPretrainDataset(ParquetDataset):
def _decode_record_batch(self, b):
return _decode_record_batch(b)
class BertPretrainBinned(Binned):
def _get_batch_size(self, batch):
if isinstance(batch['input_ids'], paddle.Tensor):
return batch['input_ids'].shape[0]
else:
return batch['input_ids'].shape()[0]
def _to_encoded_inputs(
batch,
tokenizer,
sequence_length_alignment=8,
ignore_index=-1,
):
batch_size = len(batch)
As, Bs, are_random_next = [], [], []
static_masking = (len(batch[0]) > 3)
if static_masking:
assert len(batch[0]) == 5
all_masked_lm_positions, all_masked_lm_labels = [], []
# Unpack each field.
for sample in batch:
As.append(tuple(sample[0].split()))
Bs.append(tuple(sample[1].split()))
are_random_next.append(sample[2])
if static_masking:
all_masked_lm_positions.append(
paddle.to_tensor(deserialize_np_array(sample[3]).astype(int)))
all_masked_lm_labels.append(sample[4].split())
# Figure out the sequence length of this batch.
batch_seq_len = max(
(len(tokens_A) + len(tokens_B) + 3 for tokens_A, tokens_B in zip(As, Bs)))
# Align the batch_seq_len to a multiple of sequence_length_alignment, because
# TC doesn't like it otherwise.
batch_seq_len = (((batch_seq_len - 1) // sequence_length_alignment + 1) *
sequence_length_alignment)
# Allocate the input paddle.Tensor's.
input_ids = paddle.zeros((batch_size, batch_seq_len), dtype='int64')
token_type_ids = paddle.zeros_like(input_ids)
attention_mask = paddle.zeros_like(input_ids)
if static_masking:
labels = paddle.full_like(input_ids, ignore_index)
else:
special_tokens_mask = paddle.zeros_like(input_ids)
# Fill in the input paddle.Tensor's.
for sample_idx in range(batch_size):
tokens_A, tokens_B = As[sample_idx], Bs[sample_idx]
# Prepare the input token IDs.
tokens = ('[CLS]',) + tokens_A + ('[SEP]',) + tokens_B + ('[SEP]',)
input_ids[sample_idx, :len(tokens)] = paddle.to_tensor(
tokenizer.convert_tokens_to_ids(tokens),
dtype='int64',
)
# Prepare the token type ids (segment ids).
start_idx = len(tokens_A) + 2
end_idx = len(tokens_A) + len(tokens_B) + 3
token_type_ids[sample_idx, start_idx:end_idx] = 1
# Prepare the attention mask (input mask).
attention_mask[sample_idx, :end_idx] = 1
if static_masking:
# Prepare the MLM labels.
labels[sample_idx,
all_masked_lm_positions[sample_idx]] = paddle.to_tensor(
tokenizer.convert_tokens_to_ids(
all_masked_lm_labels[sample_idx]),
dtype='int64',
)
else:
# Prepare special_tokens_mask (for DataCollatorForLanguageModeling)
special_tokens_mask[sample_idx, 0] = 1
special_tokens_mask[sample_idx, len(tokens_A) + 1] = 1
special_tokens_mask[sample_idx, len(tokens_A) + len(tokens_B) + 2:] = 1
# reshape to [batch, 1, 1, seq_len]
attention_mask = attention_mask.unsqueeze(axis=[1, 2])
next_sentence_labels = paddle.to_tensor(are_random_next,
dtype='int64').unsqueeze(axis=-1)
# Compose output dict.
encoded_inputs = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
'next_sentence_labels': next_sentence_labels,
}
if static_masking:
encoded_inputs['masked_lm_labels'] = labels
else:
encoded_inputs['special_tokens_mask'] = special_tokens_mask
return encoded_inputs
def _mask_tokens(
inputs,
special_tokens_mask=None,
tokenizer=None,
mlm_probability=0.15,
ignore_index=-1,
):
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK,
10% random, 10% original.
"""
labels = inputs.clone()
# We sample a few tokens in each sequence for MLM training (with probability
# `mlm_probability`)
probability_matrix = paddle.full(labels.shape, mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [
tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
for val in labels.tolist()
]
special_tokens_mask = paddle.to_tensor(special_tokens_mask, dtype='bool')
else:
special_tokens_mask = special_tokens_mask.astype('bool')
# probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
def _masked_fill(x, mask, value):
y = paddle.full(x.shape, value, x.dtype)
return paddle.where(mask, y, x)
probability_matrix = _masked_fill(probability_matrix,
special_tokens_mask,
value=0.0)
masked_indices = paddle.bernoulli(probability_matrix).astype('bool')
# We only compute loss on masked tokens
labels[~masked_indices] = ignore_index
# 80% of the time, we replace masked input tokens with tokenizer.mask_token
# ([MASK])
indices_replaced = (paddle.bernoulli(paddle.full(labels.shape, 0.8)).astype('bool') &
masked_indices)
inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(
tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = (paddle.bernoulli(paddle.full(labels.shape, 0.5)).astype('bool') &
masked_indices & ~indices_replaced)
random_words = paddle.randint(high=len(tokenizer), shape=labels.shape, dtype='int64')
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens
# unchanged
return inputs, labels
def get_bert_pretrain_data_loader(
path,
shuffle_buffer_size=16384,
shuffle_buffer_warmup_factor=16,
tokenizer_class=transformers.BertTokenizerFast,
vocab_file=None,
tokenizer_kwargs={},
data_loader_class=DataLoader,
data_loader_kwargs={},
mlm_probability=0.15,
base_seed=12345,
log_dir=None,
log_level=logging.INFO,
return_raw_samples=False,
start_epoch=0,
sequence_length_alignment=8,
ignore_index=-1,
):
"""Gets a PaddlePaddle DataLoader for the BERT pretraining task.
The LDDL DataLoader can be used in the same way as a normal PaddlePaddle
DataLoader. The 'persistent_workers' attribute will always be enabled.
The LDDL DataLoader streams samples from disk into memory, and uses a shuffle
buffer to perform shuffling: at each iteration, a random sample from the
shuffle buffer is popped, and a new sample is pushed into the shuffle buffer
at this vacant location.
Args:
path: A string of the path pointing to the directory that contains the
pretraining dataset in the format of balanced parquet shards.
shuffle_buffer_size: The size of the shuffle buffer.
shuffle_buffer_warmup_factor: At the beginning, the shuffle buffer is empty.
Therefore, in order to fill the shuffle buffer, at each iteration, more
samples need to be pushed into the shuffle buffer than being popped out
of. This factor indicates how many samples is pushed into the shuffle
buffer per 1 sample being popped out of the shuffle buffer, until the
shuffle buffer is full.
tokenizer_class: The HuggingFace tokenizer class for BERT pretraining.
vocab_file: The path to a vocab file, or the name of a pretrained model
registered on huggingface.co (e.g., 'bert-large-uncased') of which the
vocab file is downloaded.
tokenizer_kwargs: The arguments to the tokenizer class.
data_loader_class: The class of the DataLoader.
data_loader_kwargs: The arguments to the DataLoader class.
mlm_probability: The probability for masking tokens in the masked language
modeling task (in BERT pretraining).
base_seed: A base seed value on which other seeds used in the DataLoader are
based.
log_dir: The path to a directory to store the logs from the LDDL DataLoader.
log_level: The logging verbose level.
return_raw_samples: If True, returns the raw string pairs instead of token
indices.
start_epoch: The epoch number to start from. An epoch is defined as going
through every sample in a dataset once.
sequence_length_alignment: To get the input tensors of token indices, each
sequence in a batch will only be padded to the longest sequence in this
batch. However, certain hardware features might prefer the shapes of the
input tensors to meet certain conditions. For example, it's better for the
Tensor Core on NVIDIA GPUs if the dimensions of the input tensors are
divisible by 8. Therefore, this argument is an alignment factor such that
the sequences in a batch will be padded to the first sequence length
larger than the longest sequence in this batch and also divisible by this
alignment factor.
ignore_index: The label value for the unmasked tokens in the language
modeling task (in BERT pretraining).
Returns:
A PaddlePaddle DataLoader that, in each iteration, yield:
- If return_raw_samples is False, a dict of 5 key-value pairs which are the
necessary input for BERT pretraining:
{
'input_ids': a paddle.Tensor of size [batch_size, sequence_length],
'token_type_ids': a paddle.Tensor of size [batch_size, sequence_length],
'attention_mask': a paddle.Tensor of size [batch_size, sequence_length],
'masked_lm_labels': a paddle.Tensor of size [batch_size, sequence_length],
'next_sentence_labels': a paddle.Tensor of size [batch_size],
}
- If return_raw_samples is True, a list of the following lists:
[
strings of the first sequences in the sequence pairs,
strings of the second sequences in the sequence pairs,
bools that indicate whether the second sequences are the next sequences
for the first sequences,
numpy.ndarrays of positions of the masked tokens for the masked language
modeling task (only exists if static masking is enabled),
strings of space-seperated labels of the masked tokens for the masked
language modeling task (only exists if static masking is enabled),
]
Examples:
train_dataloader = lddl.paddle.get_bert_pretrain_data_loader(
input_dir,
vocab_file=vocab_file,
data_loader_kwargs={
'batch_size': batch_size,
'num_workers': num_workers,
'pin_memory': True,
},
log_level=logging.WARNING,
start_epoch=start_epoch,
)
for epoch in range(start_epoch, start_epoch + epochs):
for i, batch in enumerate(train_dataloader):
prediction_scores, seq_relationship_score = model(
input_ids=batch['input_ids'],
token_type_ids=batch['token_type_ids'],
attention_mask=batch['attention_mask'],
)
loss = criterion(
prediction_scores,
seq_relationship_score,
batch['masked_lm_labels'],
batch['next_sentence_labels'],
)
...
"""
assert isinstance(path, str)
assert isinstance(shuffle_buffer_size, int) and shuffle_buffer_size > 0
assert (isinstance(shuffle_buffer_warmup_factor, int) and
shuffle_buffer_warmup_factor > 0)
assert tokenizer_class in {
transformers.BertTokenizerFast, transformers.BertTokenizer
}
assert isinstance(vocab_file, str)
assert isinstance(tokenizer_kwargs, dict)
assert data_loader_class in {DataLoader}
assert isinstance(data_loader_kwargs, dict)
assert isinstance(mlm_probability, (int, float)) and 0 <= mlm_probability <= 1
assert isinstance(base_seed, int)
assert log_dir is None or isinstance(log_dir, str)
assert log_level in {
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
}
assert isinstance(return_raw_samples, bool)
assert isinstance(start_epoch, int)
if os.path.isfile(vocab_file):
tokenizer = tokenizer_class(vocab_file, **tokenizer_kwargs)
else:
tokenizer = tokenizer_class.from_pretrained(vocab_file, **tokenizer_kwargs)
def _batch_preprocess(batch):
with paddle.no_grad():
encoded_inputs = _to_encoded_inputs(
batch,
tokenizer,
sequence_length_alignment=sequence_length_alignment,
ignore_index=ignore_index,
)
if 'special_tokens_mask' in encoded_inputs: # Dynamic masking.
special_tokens_mask = encoded_inputs.pop('special_tokens_mask', None)
(encoded_inputs['input_ids'], encoded_inputs['masked_lm_labels']) = _mask_tokens(
encoded_inputs['input_ids'],
special_tokens_mask=special_tokens_mask,
tokenizer=tokenizer,
mlm_probability=mlm_probability,
ignore_index=ignore_index,
)
return encoded_inputs
logger = DatasetLogger(
log_dir=log_dir,
log_level=log_level,
)
dataset_kwargs = {
'shuffle_buffer_size': shuffle_buffer_size,
'shuffle_buffer_warmup_factor': shuffle_buffer_warmup_factor,
'base_seed': base_seed,
'logger': logger,
'start_epoch': start_epoch,
}
extra_collate = data_loader_kwargs.get('collate_fn', lambda x: x)
if not return_raw_samples:
data_loader_kwargs['collate_fn'] = lambda batch: extra_collate(
_batch_preprocess(batch))
# data_loader_kwargs['persistent_workers'] = True
# Find all the parquet file paths and figure out whether it is binned or
# un-binned.
all_file_paths = get_all_parquets_under(path)
bin_ids = get_all_bin_ids(all_file_paths)
if len(bin_ids) > 0:
data_loader = BertPretrainBinned(
[
data_loader_class(
BertPretrainDataset(
get_file_paths_for_bin_id(all_file_paths, bin_id),
**dataset_kwargs,
),
**data_loader_kwargs,
) for bin_id in bin_ids
],
base_seed=base_seed,
start_epoch=start_epoch,
logger=logger,
)
else: # un-binned
data_loader = data_loader_class(
BertPretrainDataset(all_file_paths, **dataset_kwargs),
**data_loader_kwargs,
)
return data_loader
| LDDL-main | lddl/paddle/bert.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import logging
import os
import pathlib
def _get_logger_name(node_rank, local_rank=None, worker_rank=None):
if local_rank is None and worker_rank is None:
return 'node-{}'.format(node_rank)
elif worker_rank is None:
return 'node-{}_local-{}'.format(node_rank, local_rank)
else:
return 'node-{}_local-{}_worker-{}'.format(node_rank, local_rank,
worker_rank)
class DummyLogger:
def debug(self, msg, *args, **kwargs):
pass
def info(self, msg, *args, **kwargs):
pass
def warning(self, msg, *args, **kwargs):
pass
def error(self, msg, *args, **kwargs):
pass
def critical(self, msg, *args, **kwargs):
pass
def log(self, msg, *args, **kwargs):
pass
def exception(self, msg, *args, **kwargs):
pass
class DatasetLogger:
def __init__(
self,
log_dir=None,
node_rank=0,
local_rank=0,
log_level=logging.INFO,
):
self._log_dir = log_dir
self._node_rank = node_rank
self._local_rank = local_rank
self._worker_rank = None
self._log_level = log_level
if log_dir is not None:
pathlib.Path(log_dir).mkdir(parents=True, exist_ok=True)
# Create node level logger.
if local_rank == 0:
self._create_logger(_get_logger_name(node_rank))
# Create local_rank level logger.
self._create_logger(_get_logger_name(node_rank, local_rank=local_rank))
def _create_logger(self, name):
logger = logging.getLogger(name)
fmt = logging.Formatter(
'LDDL - %(asctime)s - %(filename)s:%(lineno)d:%(funcName)s - %(name)s '
'- %(levelname)s : %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(fmt)
logger.addHandler(stream_handler)
if self._log_dir is not None:
path = os.path.join(self._log_dir, '{}.txt'.format(name))
file_handler = logging.FileHandler(path)
file_handler.setFormatter(fmt)
logger.addHandler(file_handler)
logger.setLevel(self._log_level)
return logger
def init_for_worker(self, worker_rank):
if self._worker_rank is None:
self._worker_rank = worker_rank
self._create_logger(
_get_logger_name(
self._node_rank,
local_rank=self._local_rank,
worker_rank=worker_rank,
))
def to(self, which):
assert which in {'node', 'rank', 'worker'}
if which == 'node':
if (self._local_rank == 0 and
(self._worker_rank is None or self._worker_rank == 0)):
return logging.getLogger(_get_logger_name(self._node_rank))
else:
return DummyLogger()
elif which == 'rank':
if self._worker_rank is None or self._worker_rank == 0:
return logging.getLogger(
_get_logger_name(self._node_rank, local_rank=self._local_rank))
else:
return DummyLogger()
else: # which == 'worker'
return logging.getLogger(
_get_logger_name(
self._node_rank,
local_rank=self._local_rank,
worker_rank=self._worker_rank,
))
| LDDL-main | lddl/torch/log.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import itertools
import json
import logging
import numpy as np
import os
import pathlib
import pyarrow.parquet as pq
import random
import torch
import warnings
from torch.utils.data import IterableDataset
from torch.utils.data import get_worker_info
from lddl.types import File
from lddl.utils import get_num_samples_of_parquet
from lddl.random import randrange, shuffle, sample
from .utils import (get_rank, get_world_size, get_nproc_per_node, get_num_nodes,
get_node_rank)
class ShuffleBuffer:
def __init__(
self,
files,
max_num_samples_to_yield,
decode_record_batch,
size,
warmup_factor,
logger,
rng_state,
):
num_samples_wasted = (sum(
(f.num_samples for f in files)) - max_num_samples_to_yield)
assert 0 <= num_samples_wasted <= len(files)
self._files = files
self._max_num_samples_to_yield = max_num_samples_to_yield
self._decode_record_batch = decode_record_batch
self._size = size
self._warmup_factor = warmup_factor
self._logger = logger
self._rng_state = rng_state
@property
def num_samples(self):
return sum((f.num_samples for f in self._files))
def _randrange(self, stop):
n, self._rng_state = randrange(stop, rng_state=self._rng_state)
return n
def _shuffle(self, x):
self._rng_state = shuffle(x, rng_state=self._rng_state)
def __iter__(self):
buffer = []
num_samples_to_yield = min(
self._max_num_samples_to_yield,
sum((f.num_samples for f in self._files)),
)
remaining_num_samples = num_samples_to_yield
for f in self._files:
self._logger.to('worker').info('Reading {}'.format(f.path))
for b in pq.read_table(f.path).to_batches():
for sample in self._decode_record_batch(b):
if remaining_num_samples <= 0:
return
if (len(buffer) >= min(
self._size, (num_samples_to_yield - remaining_num_samples + 1) *
self._warmup_factor)):
replace_idx = self._randrange(len(buffer))
yield buffer[replace_idx]
buffer[replace_idx] = sample
remaining_num_samples -= 1
else:
buffer.append(sample)
self._shuffle(buffer)
for sample in buffer:
if remaining_num_samples <= 0:
return
yield sample
remaining_num_samples -= 1
class ParquetDataset(IterableDataset):
def __init__(
self,
file_paths,
transform=lambda x: x,
local_rank=0,
shuffle_buffer_size=16384,
shuffle_buffer_warmup_factor=16,
base_seed=12345,
logger=None,
start_epoch=0,
):
super().__init__()
self._transform = transform
self._local_rank = local_rank
self._shuffle_buffer_size = shuffle_buffer_size
self._shuffle_buffer_warmup_factor = shuffle_buffer_warmup_factor
self._base_seed = base_seed
self._rank = get_rank()
self._world_size = get_world_size()
self._nproc_per_node = get_nproc_per_node(local_rank)
self._num_nodes = get_num_nodes(nproc_per_node=self._nproc_per_node)
self._node_rank = get_node_rank(nproc_per_node=self._nproc_per_node)
self._epoch = start_epoch - 1
self._logger = logger
assert len(file_paths) % self._num_nodes == 0
assert len(file_paths) % self._world_size == 0
self._files = self._get_files(file_paths)
max_num_samples_per_file = max((f.num_samples for f in self._files))
min_num_samples_per_file = min((f.num_samples for f in self._files))
assert min_num_samples_per_file + 1 == max_num_samples_per_file
self._num_samples_per_file = min_num_samples_per_file
total_num_samples = sum((f.num_samples for f in self._files))
num_samples_lost = (total_num_samples -
self._num_samples_per_file * len(self._files))
self._logger.to('node').warning('lost {}/{}={}% samples in total'.format(
num_samples_lost,
total_num_samples,
num_samples_lost / total_num_samples * 100,
))
self._world_rng_state = None
self._worker_rng_state = None
def _get_files(self, file_paths):
all_files_num_samples = torch.zeros((len(file_paths),), dtype=torch.long)
if self._world_size > 1 and torch.distributed.get_backend() == 'nccl':
all_files_num_samples = all_files_num_samples.to('cuda')
# Figure out how many samples in each file.
num_samples_cache = {} # Map dirname to the dict of {basename: num_samples}
for idx in range(self._rank, len(file_paths), self._world_size):
fp = file_paths[idx]
dn = os.path.dirname(fp)
bn = os.path.basename(fp)
# Load the num_samples cache file if it exists.
if dn not in num_samples_cache:
nsfp = os.path.join(dn, '.num_samples.json')
try:
with open(nsfp, 'r') as nsf:
num_samples_cache[dn] = json.load(nsf)
except Exception as e:
self._logger.to('rank').warning('failed to load {}: {}'.format(
nsfp, e))
# Mark that the num_samples cache file doesn't exist for this
# directory.
num_samples_cache[dn] = None
if num_samples_cache[dn] is not None and bn in num_samples_cache[dn]:
all_files_num_samples[idx] = num_samples_cache[dn][bn]
else:
# Find out num_samples by loading the parquet table.
all_files_num_samples[idx] = get_num_samples_of_parquet(fp)
if self._world_size > 1:
# Sync. accross all ranks.
torch.distributed.all_reduce(
all_files_num_samples,
op=torch.distributed.ReduceOp.SUM,
)
all_files_num_samples = all_files_num_samples.tolist()
return [File(fp, ns) for fp, ns in zip(file_paths, all_files_num_samples)]
def __len__(self):
""" This function only returns how many samples per rank will be yielded
by this dataset.
Note that, len(dataloader), where dataloader is a PyTorch DataLoader
wrapping this dataset, does NOT return the accurate number of batches. This
is because, when (num_samples_per_file * num_files_per_worker) is not
divisible by batch_size, each worker is going to generate a partial batch
at the very end.
However, PyTorch DataLoader's __len__ only divide the number returned from
this function by batch_size, which would be smaller than the actual number
of batches by at most (num_workers - 1).
We need to patch PyTorch DataLoader function for this function to behave
correctly.
"""
return self._num_samples_per_file * len(self._files) // self._world_size
@property
def num_samples_per_file(self):
return self._num_samples_per_file
@property
def num_files_per_rank(self):
return len(self._files) // self._world_size
def _decode_record_batch(self, b):
raise NotImplementedError('ParquetDataset is an abstract/interface class!')
def _world_identical_sample(self, population, k, counts=None):
s, self._world_rng_state = sample(
population,
k,
rng_state=self._world_rng_state,
)
return s
def _init_worker(self):
worker_info = get_worker_info()
if worker_info is None:
num_workers_per_rank = 1
worker_rank = 0
else:
num_workers_per_rank = worker_info.num_workers
worker_rank = worker_info.id
assert (len(self._files) % (self._world_size * num_workers_per_rank) == 0)
self._logger.init_for_worker(worker_rank)
return worker_rank, num_workers_per_rank
def _init_rng_states(self, worker_rank, num_workers_per_rank):
orig_rng_state = random.getstate()
random.seed(self._base_seed + self._epoch)
self._world_rng_state = random.getstate()
random.seed(self._base_seed +
(self._epoch * self._world_size + self._rank) *
num_workers_per_rank + worker_rank)
self._worker_rng_state = random.getstate()
random.setstate(orig_rng_state)
def __iter__(self):
self._epoch += 1
worker_rank, num_workers_per_rank = self._init_worker()
self._init_rng_states(worker_rank, num_workers_per_rank)
files = self._world_identical_sample(self._files, k=len(self._files))
self._logger.to('node').warning('epoch = {}'.format(self._epoch))
self._logger.to('worker').info(
'\n'.join(['files('] + [' {}'.format(f) for f in files] + [')']))
rank_files = files[self._rank::self._world_size]
worker_files = rank_files[worker_rank::num_workers_per_rank]
self._logger.to('worker').info(
'\n'.join(['worker_files('] + [' {}'.format(f) for f in worker_files] +
[')']))
sb = ShuffleBuffer(
worker_files,
self._num_samples_per_file * len(worker_files),
lambda b: self._decode_record_batch(b),
self._shuffle_buffer_size,
self._shuffle_buffer_warmup_factor,
self._logger,
self._worker_rng_state,
)
for sample in iter(sb):
yield self._transform(sample)
| LDDL-main | lddl/torch/datasets.py |
from .bert import get_bert_pretrain_data_loader
| LDDL-main | lddl/torch/__init__.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import torch
def barrier():
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.barrier()
def get_rank():
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
def get_world_size():
if torch.distributed.is_available() and torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
else:
world_size = 1
return world_size
def get_nproc_per_node(local_rank):
if torch.distributed.is_available() and torch.distributed.is_initialized():
max_local_rank = torch.tensor(
local_rank,
device='cuda' if torch.distributed.get_backend() == 'nccl' else 'cpu',
)
torch.distributed.all_reduce(
max_local_rank,
op=torch.distributed.ReduceOp.MAX,
)
nproc_per_node = max_local_rank.item() + 1
else:
nproc_per_node = 1
return nproc_per_node
def get_num_nodes(local_rank=None, nproc_per_node=None):
if torch.distributed.is_available() and torch.distributed.is_initialized():
if nproc_per_node is None:
assert local_rank is not None
nproc_per_node = get_nproc_per_node(local_rank)
num_nodes = get_world_size() // nproc_per_node
else:
num_nodes = 1
return num_nodes
def get_node_rank(local_rank=None, nproc_per_node=None):
""" This assume the training processes are launched via
torch.distributed.launch.py. Therefore, the ordering scheme of
rank -> (node_rank, local_rank) mapping is:
0 -> (0, 0)
1 -> (0, 1)
...
nproc_per_node -> (1, 0)
nproc_per_node+1 -> (1, 1)
...
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
if nproc_per_node is None:
assert local_rank is not None
nproc_per_node = get_nproc_per_node(local_rank)
node_rank = get_rank() // nproc_per_node
else:
node_rank = 0
return node_rank
| LDDL-main | lddl/torch/utils.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import random
import torch
from lddl.random import choices
from .datasets import ParquetDataset
class Binned:
def __init__(self, dataloaders, base_seed=12345, start_epoch=0, logger=None):
self._dataloaders = dataloaders
self._base_seed = base_seed
self._epoch = start_epoch - 1
self._logger = logger
self._world_rng_state = None
def _init_rng_states(self):
orig_rng_state = random.getstate()
random.seed(self._base_seed + self._epoch)
self._world_rng_state = random.getstate()
random.setstate(orig_rng_state)
def _init_iter(self):
self._init_rng_states()
num_samples_remaining = [len(dl.dataset) for dl in self._dataloaders]
dataiters = [iter(dl) for dl in self._dataloaders]
return num_samples_remaining, dataiters
def __len__(self):
return sum((len(dl) for dl in self._dataloaders))
def _get_batch_size(self, batch):
raise NotImplementedError('Binned is an abstract class!')
def _choices(self, population, weights=None, cum_weights=None, k=1):
c, self._world_rng_state = choices(
population,
weights=weights,
cum_weights=cum_weights,
k=k,
rng_state=self._world_rng_state,
)
return c
def __iter__(self):
self._epoch += 1
num_samples_remaining, dataiters = self._init_iter()
for i in range(len(self)):
bin_id = self._choices(
list(range(len(dataiters))),
weights=num_samples_remaining,
k=1,
)[0]
self._logger.to('rank').info('{}-th iteration selects bin_id = {}'.format(
i, bin_id))
assert num_samples_remaining[bin_id] > 0
batch = next(dataiters[bin_id])
num_samples_remaining[bin_id] -= self._get_batch_size(batch)
yield batch
assert sum((nsr for nsr in num_samples_remaining)) == 0
class DataLoader(torch.utils.data.DataLoader):
def __len__(self):
if isinstance(self.dataset, ParquetDataset):
num_workers_per_rank = max(self.num_workers, 1)
num_files_per_worker = self.dataset.num_files_per_rank // num_workers_per_rank
num_samples_per_worker = self.dataset.num_samples_per_file * num_files_per_worker
num_batches_per_worker = (
(num_samples_per_worker - 1) // self.batch_size + 1)
return num_batches_per_worker * num_workers_per_rank
else:
super().__len__()
| LDDL-main | lddl/torch/dataloader.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import logging
import numpy as np
import os
import random
import torch
import transformers
from typing import List, Optional, Tuple, Union
from collections import deque
from lddl.utils import (get_all_parquets_under, get_all_bin_ids,
get_file_paths_for_bin_id, deserialize_np_array)
from .dataloader import Binned, DataLoader
from .datasets import ParquetDataset
from .log import DatasetLogger
from .utils import get_node_rank, get_nproc_per_node
def _decode_record_batch(b):
b = b.to_pydict()
if 'masked_lm_positions' in b:
assert 'masked_lm_labels' in b
columns = tuple((b[k] for k in (
'A',
'B',
'is_random_next',
'masked_lm_positions',
'masked_lm_labels',
) if k in b))
for sample in zip(*columns):
yield sample
class BertPretrainDataset(ParquetDataset):
def _decode_record_batch(self, b):
return _decode_record_batch(b)
class BertPretrainBinned(Binned):
def _get_batch_size(self, batch):
return batch['input_ids'].size(0)
def _to_encoded_inputs(
batch,
tokenizer,
sequence_length_alignment=8,
ignore_index=-1,
):
batch_size = len(batch)
As, Bs, are_random_next = [], [], []
static_masking = (len(batch[0]) > 3)
if static_masking:
assert len(batch[0]) == 5
all_masked_lm_positions, all_masked_lm_labels = [], []
# Unpack each field.
for sample in batch:
As.append(tuple(sample[0].split()))
Bs.append(tuple(sample[1].split()))
are_random_next.append(sample[2])
if static_masking:
all_masked_lm_positions.append(
torch.from_numpy(deserialize_np_array(sample[3]).astype(int)))
all_masked_lm_labels.append(sample[4].split())
# Figure out the sequence length of this batch.
batch_seq_len = max(
(len(tokens_A) + len(tokens_B) + 3 for tokens_A, tokens_B in zip(As, Bs)))
# Align the batch_seq_len to a multiple of sequence_length_alignment, because
# TC doesn't like it otherwise.
batch_seq_len = (((batch_seq_len - 1) // sequence_length_alignment + 1) *
sequence_length_alignment)
# Allocate the input torch.Tensor's.
input_ids = torch.zeros(batch_size, batch_seq_len, dtype=torch.long)
token_type_ids = torch.zeros_like(input_ids)
attention_mask = torch.zeros_like(input_ids)
if static_masking:
labels = torch.full_like(input_ids, ignore_index)
else:
special_tokens_mask = torch.zeros_like(input_ids)
# Fill in the input torch.Tensor's.
for sample_idx in range(batch_size):
tokens_A, tokens_B = As[sample_idx], Bs[sample_idx]
# Prepare the input token IDs.
tokens = ('[CLS]',) + tokens_A + ('[SEP]',) + tokens_B + ('[SEP]',)
input_ids[sample_idx, :len(tokens)] = torch.as_tensor(
tokenizer.convert_tokens_to_ids(tokens),
dtype=torch.long,
)
# Prepare the token type ids (segment ids).
start_idx = len(tokens_A) + 2
end_idx = len(tokens_A) + len(tokens_B) + 3
token_type_ids[sample_idx, start_idx:end_idx] = 1
# Prepare the attention mask (input mask).
attention_mask[sample_idx, :end_idx] = 1
if static_masking:
# Prepare the MLM labels.
labels[sample_idx, all_masked_lm_positions[sample_idx]] = torch.as_tensor(
tokenizer.convert_tokens_to_ids(all_masked_lm_labels[sample_idx]),
dtype=torch.long,
)
else:
# Prepare special_tokens_mask (for DataCollatorForLanguageModeling)
special_tokens_mask[sample_idx, 0] = 1
special_tokens_mask[sample_idx, len(tokens_A) + 1] = 1
special_tokens_mask[sample_idx, len(tokens_A) + len(tokens_B) + 2:] = 1
# Compose output dict.
encoded_inputs = {
'input_ids':
input_ids,
'token_type_ids':
token_type_ids,
'attention_mask':
attention_mask,
'next_sentence_labels':
torch.as_tensor(
are_random_next,
dtype=torch.long,
),
}
if static_masking:
encoded_inputs['labels'] = labels
else:
encoded_inputs['special_tokens_mask'] = special_tokens_mask
return encoded_inputs
def _mask_tokens(
inputs,
special_tokens_mask=None,
tokenizer=None,
mlm_probability=0.15,
ignore_index=-1,
):
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK,
10% random, 10% original.
"""
labels = inputs.clone()
# We sample a few tokens in each sequence for MLM training (with probability
# `mlm_probability`)
probability_matrix = torch.full(labels.shape, mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [
tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
for val in labels.tolist()
]
special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
# We only compute loss on masked tokens
labels[~masked_indices] = ignore_index
# 80% of the time, we replace masked input tokens with tokenizer.mask_token
# ([MASK])
indices_replaced = (torch.bernoulli(torch.full(labels.shape, 0.8)).bool() &
masked_indices)
inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(
tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = (torch.bernoulli(torch.full(labels.shape, 0.5)).bool() &
masked_indices & ~indices_replaced)
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens
# unchanged
return inputs, labels
def get_bert_pretrain_data_loader(
path,
local_rank=0,
shuffle_buffer_size=16384,
shuffle_buffer_warmup_factor=16,
tokenizer_class=transformers.BertTokenizerFast,
vocab_file=None,
tokenizer_kwargs={},
data_loader_class=DataLoader,
data_loader_kwargs={},
mlm_probability=0.15,
base_seed=12345,
log_dir=None,
log_level=logging.INFO,
return_raw_samples=False,
start_epoch=0,
sequence_length_alignment=8,
ignore_index=-1,
):
"""Gets a PyTorch DataLoader for the BERT pretraining task.
The LDDL DataLoader can be used in the same way as a normal PyTorch
DataLoader. The 'persistent_workers' attribute will always be enabled.
The LDDL DataLoader streams samples from disk into memory, and uses a shuffle
buffer to perform shuffling: at each iteration, a random sample from the
shuffle buffer is popped, and a new sample is pushed into the shuffle buffer
at this vacant location.
Args:
path: A string of the path pointing to the directory that contains the
pretraining dataset in the format of balanced parquet shards.
local_rank: The local rank ID (on this node) of the current pretraining
process.
shuffle_buffer_size: The size of the shuffle buffer.
shuffle_buffer_warmup_factor: At the beginning, the shuffle buffer is empty.
Therefore, in order to fill the shuffle buffer, at each iteration, more
samples need to be pushed into the shuffle buffer than being popped out
of. This factor indicates how many samples is pushed into the shuffle
buffer per 1 sample being popped out of the shuffle buffer, until the
shuffle buffer is full.
tokenizer_class: The HuggingFace tokenizer class for BERT pretraining.
vocab_file: The path to a vocab file, or the name of a pretrained model
registered on huggingface.co (e.g., 'bert-large-uncased') of which the
vocab file is downloaded.
tokenizer_kwargs: The arguments to the tokenizer class.
data_loader_class: The class of the DataLoader.
data_loader_kwargs: The arguments to the DataLoader class.
mlm_probability: The probability for masking tokens in the masked language
modeling task (in BERT pretraining).
base_seed: A base seed value on which other seeds used in the DataLoader are
based.
log_dir: The path to a directory to store the logs from the LDDL DataLoader.
log_level: The logging verbose level.
return_raw_samples: If True, returns the raw string pairs instead of token
indices.
start_epoch: The epoch number to start from. An epoch is defined as going
through every sample in a dataset once.
sequence_length_alignment: To get the input tensors of token indices, each
sequence in a batch will only be padded to the longest sequence in this
batch. However, certain hardware features might prefer the shapes of the
input tensors to meet certain conditions. For example, it's better for the
Tensor Core on NVIDIA GPUs if the dimensions of the input tensors are
divisible by 8. Therefore, this argument is an alignment factor such that
the sequences in a batch will be padded to the first sequence length
larger than the longest sequence in this batch and also divisible by this
alignment factor.
ignore_index: The label value for the unmasked tokens in the language
modeling task (in BERT pretraining).
Returns:
A PyTorch DataLoader that, in each iteration, yield:
- If return_raw_samples is False, a dict of 5 key-value pairs which are the
necessary input for BERT pretraining:
{
'input_ids': a torch.Tensor of size [batch_size, sequence_length],
'token_type_ids': a torch.Tensor of size [batch_size, sequence_length],
'attention_mask': a torch.Tensor of size [batch_size, sequence_length],
'labels': a torch.Tensor of size [batch_size, sequence_length],
'next_sentence_labels': a torch.Tensor of size [batch_size],
}
- If return_raw_samples is True, a list of the following lists:
[
strings of the first sequences in the sequence pairs,
strings of the second sequences in the sequence pairs,
bools that indicate whether the second sequences are the next sequences
for the first sequences,
numpy.ndarrays of positions of the masked tokens for the masked language
modeling task (only exists if static masking is enabled),
strings of space-seperated labels of the masked tokens for the masked
language modeling task (only exists if static masking is enabled),
]
Examples:
train_dataloader = lddl.torch.get_bert_pretrain_data_loader(
input_dir,
local_rank=local_rank,
vocab_file=vocab_file,
data_loader_kwargs={
'batch_size': batch_size,
'num_workers': num_workers,
'pin_memory': True,
},
log_level=logging.WARNING,
start_epoch=start_epoch,
)
for epoch in range(start_epoch, start_epoch + epochs):
for i, batch in enumerate(train_dataloader):
prediction_scores, seq_relationship_score = model(
input_ids=batch['input_ids'].to(device),
token_type_ids=batch['token_type_ids'].to(device),
attention_mask=batch['attention_mask'].to(device),
)
loss = criterion(
prediction_scores,
seq_relationship_score,
batch['labels'].to(device),
batch['next_sentence_labels'].to(device),
)
...
"""
assert isinstance(path, str)
assert isinstance(local_rank, int) and local_rank >= 0
assert isinstance(shuffle_buffer_size, int) and shuffle_buffer_size > 0
assert (isinstance(shuffle_buffer_warmup_factor, int) and
shuffle_buffer_warmup_factor > 0)
assert tokenizer_class in {
transformers.BertTokenizerFast, transformers.BertTokenizer
}
assert isinstance(vocab_file, str)
assert isinstance(tokenizer_kwargs, dict)
assert data_loader_class in {DataLoader}
assert isinstance(data_loader_kwargs, dict)
assert isinstance(mlm_probability, (int, float)) and 0 <= mlm_probability <= 1
assert isinstance(base_seed, int)
assert log_dir is None or isinstance(log_dir, str)
assert log_level in {
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
}
assert isinstance(return_raw_samples, bool)
assert isinstance(start_epoch, int)
if os.path.isfile(vocab_file):
tokenizer = tokenizer_class(vocab_file, **tokenizer_kwargs)
else:
tokenizer = tokenizer_class.from_pretrained(vocab_file, **tokenizer_kwargs)
def _batch_preprocess(batch):
with torch.no_grad():
encoded_inputs = _to_encoded_inputs(
batch,
tokenizer,
sequence_length_alignment=sequence_length_alignment,
ignore_index=ignore_index,
)
if 'special_tokens_mask' in encoded_inputs: # Dynamic masking.
special_tokens_mask = encoded_inputs.pop('special_tokens_mask', None)
(encoded_inputs['input_ids'], encoded_inputs['labels']) = _mask_tokens(
encoded_inputs['input_ids'],
special_tokens_mask=special_tokens_mask,
tokenizer=tokenizer,
mlm_probability=mlm_probability,
ignore_index=ignore_index,
)
return encoded_inputs
logger = DatasetLogger(
log_dir=log_dir,
node_rank=get_node_rank(nproc_per_node=get_nproc_per_node(local_rank)),
local_rank=local_rank,
log_level=log_level,
)
dataset_kwargs = {
'local_rank': local_rank,
'shuffle_buffer_size': shuffle_buffer_size,
'shuffle_buffer_warmup_factor': shuffle_buffer_warmup_factor,
'base_seed': base_seed,
'logger': logger,
'start_epoch': start_epoch,
}
extra_collate = data_loader_kwargs.get('collate_fn', lambda x: x)
if not return_raw_samples:
data_loader_kwargs['collate_fn'] = lambda batch: extra_collate(
_batch_preprocess(batch))
data_loader_kwargs['persistent_workers'] = True
# Find all the parquet file paths and figure out whether it is binned or
# un-binned.
all_file_paths = get_all_parquets_under(path)
bin_ids = get_all_bin_ids(all_file_paths)
if len(bin_ids) > 0:
data_loader = BertPretrainBinned(
[
data_loader_class(
BertPretrainDataset(
get_file_paths_for_bin_id(all_file_paths, bin_id),
**dataset_kwargs,
),
**data_loader_kwargs,
) for bin_id in bin_ids
],
base_seed=base_seed,
start_epoch=start_epoch,
logger=logger,
)
else: # un-binned
data_loader = data_loader_class(
BertPretrainDataset(all_file_paths, **dataset_kwargs),
**data_loader_kwargs,
)
return data_loader
| LDDL-main | lddl/torch/bert.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import argparse
import functools
import multiprocessing
import os
import subprocess
import tqdm
from .utils import download, parse_str_of_num_bytes
from lddl.utils import (expand_outdir_and_mkdir, mkdir,
get_all_files_paths_under, attach_bool_arg)
def _get_url():
return 'https://the-eye.eu/public/AI/pile_preliminary_components/books1.tar.gz'
def attach_args(parser=argparse.ArgumentParser("""
Books Downloader performs the following steps:
- Step 1: Download the compressed bookscorpus from {} into the directory
specified by the --outdir flag.
- Step 2: Unzip the compressed bookscorpus into raw text files of individual
books.
- Step 3: Shard the books into text shards in the 'source' subdirectory under
the directory specified by the --outdir flag. The text shards under the
`source` subdirectory can then be used as the input to the LDDL preprocessor.
All steps are executed by default. Each step, before it starts, expects the
previous steps already finish. You can turn Step 1 off by --no-download, turn
Step 2 off by --no-unzip, and turn Step 3 off by --no-shard.
Examples:
# Download the compressed bookscorpus into books/books1.tar.gz :
$ download_books --no-unzip --no-shard
$ tree books/ # tree can be installed via `sudo apt install tree`.
books/
└── books1.tar.gz
# Unzip books/books1.tar.gz into individual books:
$ download_books --no-download --no-shard
$ tree books/
books/
├── books1
│ ├── 2020-08-27-epub_urls.txt
│ └── epubtxt
│ ├── 1000-lines-magic-sequence.epub.txt
│ ├── 1000-yards-john-milton-1.epub.txt
│ ...
│ └── zorana-confessions-of-a-small-town-super-villain.epub.txt
├── books1.tar.gz
├── tar.err
└── tar.out
# Shard the books into text shards under books/source which can be read by
# the LDDL preprocessor as input.
$ download_books --no-download --no-unzip
$ tree books/
books/
├── books1
│ ├── 2020-08-27-epub_urls.txt
│ └── epubtxt
│ ├── 1000-lines-magic-sequence.epub.txt
│ ├── 1000-yards-john-milton-1.epub.txt
│ ...
│ └── zorana-confessions-of-a-small-town-super-villain.epub.txt
├── books1.tar.gz
├── source
│ ├── 0.txt
│ ...
│ └── 9.txt
├── tar.err
└── tar.out
# books/source is the input to the LDDL preprocessor.
# Or, we could do all 3 steps together:
$ download_books --outdir books/
""".format(_get_url()))):
parser.add_argument(
'--outdir',
type=str,
default=None,
required=True,
help='Path to the output directory. This directory will be created if not'
' already existed.',
)
defaults = {
'--download-chunk-size': 16 * 1024 * 1024,
'--num-shards': 10,
'--shard-num-processes': os.cpu_count(),
}
attach_bool_arg(
parser,
'download',
default=True,
help_str='--download is set by default. To skip Step 1, explicitly set '
'--no-download.',
)
attach_bool_arg(
parser,
'unzip',
default=True,
help_str='--unzip is set by default. To skip Step 2, explicitly set '
'--no-unzip.',
)
attach_bool_arg(
parser,
'shard',
default=True,
help_str='--shard is set by default. To skip Step 3, explicitly set '
'--no-shard.',
)
parser.add_argument(
'--download-chunk-size',
type=functools.partial(parse_str_of_num_bytes, return_str=False),
default=defaults['--download-chunk-size'],
metavar="n[KMG]",
help='The downloading will be performed in a streaming way by looping '
'over the following steps: (i) transfer a small chunk of data over the '
'network into the host memory, (ii) write this chunk onto disk. This flag'
' indicates the chunk size. Default: {}'.format(
defaults['--download-chunk-size']),
)
parser.add_argument(
'--num-shards',
type=int,
default=defaults['--num-shards'],
help='The number of text shards into which the books are aggregated. '
'Default: {}'.format(defaults['--num-shards']),
)
parser.add_argument(
'--shard-num-processes',
type=int,
default=defaults['--shard-num-processes'],
help='The number of processes used to shard all books. '
'Default: {}'.format(defaults['--shard-num-processes']),
)
return parser
def _shard_book(shard):
shard_path, books = shard
with open(shard_path, 'w', newline='\n') as shard_file:
one_line_books = []
for book in books:
with open(book, 'r', encoding='utf-8-sig', newline='\n') as book_file:
book_lines = (bl.strip() for bl in book_file)
book_lines = [bl for bl in book_lines if len(bl) > 0]
# The first token is the name of the book.
book_name = os.path.splitext(os.path.basename(book))[0]
one_line_books.append(' '.join([book_name] + book_lines))
shard_file.write('\n'.join(one_line_books))
def _shard_books(books_dir, shards_dir, num_shards, num_processes):
book_paths = [
f for f in get_all_files_paths_under(books_dir)
if os.path.splitext(f)[1] == '.txt'
]
shards = [(
os.path.join(shards_dir, '{}.txt'.format(shard_idx)),
book_paths[shard_idx::num_shards],
) for shard_idx in range(num_shards)]
with multiprocessing.Pool(num_processes) as p:
list(tqdm.tqdm(p.imap(_shard_book, shards), total=len(shards)))
def main(args):
args.outdir = expand_outdir_and_mkdir(args.outdir)
target_path = os.path.join(args.outdir, 'books1.tar.gz')
if args.download:
download(
_get_url(),
target_path,
chunk_size=args.download_chunk_size,
)
if args.unzip:
print('Unzipping {} ...'.format(target_path))
out_path = os.path.join(args.outdir, 'tar.out')
err_path = os.path.join(args.outdir, 'tar.err')
try:
subprocess.run(
['tar', '-xvzf', target_path, '-C', args.outdir],
check=True,
stdout=open(out_path, 'w'),
stderr=open(err_path, 'w'),
)
except subprocess.CalledProcessError as e:
print(e, 'Please check {} and {}'.format(out_path, err_path))
raise
if args.shard:
books_dir = os.path.join(args.outdir, 'books1', 'epubtxt')
print('Sharding {} ...'.format(books_dir))
dask_source_path = os.path.join(args.outdir, 'source')
mkdir(dask_source_path)
_shard_books(
books_dir,
dask_source_path,
args.num_shards,
args.shard_num_processes,
)
print('Dask source prepared at {} !'.format(dask_source_path))
def console_script():
main(attach_args().parse_args())
| LDDL-main | lddl/download/books.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import argparse
import datetime
import functools
import logging
import multiprocessing
import os
import socket
import threading
import time
import tqdm
from newsplease.crawler import commoncrawl_crawler
from lddl.utils import (expand_outdir_and_mkdir, mkdir,
get_all_files_paths_under, attach_bool_arg)
def attach_args(parser=argparse.ArgumentParser("""
Common Crawl Downloader performs the following steps:
- Step 1: Download the commoncrawl.org's news archive using newsplease
(https://github.com/fhamborg/news-please) and extract the raw text of the
articles to the directory specified by the --outdir flag.
- Step 2: Prepare and aggregate the raw text into text shards in the 'source'
subdirectory under the directory specified by the --outdir flag. The text
shards under the 'source' subdirectory can then be used as the input to the
LDDL preprocessor.
All steps are executed by default. Each step, before it starts, expects the
previous steps already finish. You can turn Step 1 off by --no-newsplease, and
turn Step 2 off by --no-shard.
Examples:
# Download the Common Crawl news archive from .warc files released in Oct. 2021
# and extract the news articles that are published from Jan. 3rd, 2000 to Mar.
# 1st, 2010 to common_crawl/txt/ :
$ download_common_crawl \
--outdir common_crawl/ \
--no-shard \
--warc-files-start-date 2021-10-01 \
--warc-files-end-date 2021-11-01 \
--start-date 2000-01-03 \
--end-date 2010-03-01
$ tree -L 1 common_crawl/ # tree can be installed via `sudo apt install tree`
common_crawl/
├── txt
└── warc
# Shard the news articles into text shards under common_crawl/source which can
# be read by the LDDL preprocessor as input:
$ download_common_crawl --outdir common_crawl/ --no-newsplease
$ tree -L 1 common_crawl/
common_crawl/
├── source
├── txt
└── warc
# common_crawl/source is the input to the LDDL preprocessor.
# Or, we could do all 2 steps together:
$ download_common_crawl \
--outdir common_crawl/ \
--warc-files-start-date 2021-10-01 \
--warc-files-end-date 2021-11-01 \
--start-date 2000-01-03 \
--end-date 2010-03-01
""")):
parser.add_argument(
'--outdir',
type=str,
default=None,
required=True,
help='Path to the output directory. This directory will be created if not'
' already existed.',
)
defaults = {
'--prefix': socket.gethostname(),
'--number-of-extraction-processes': os.cpu_count(),
'--valid-hosts': [],
'--start-date': None,
'--start-date-format': '%Y-%m-%d',
'--end-date': None,
'--end-date-format': '%Y-%m-%d',
'--warc-files-start-date': None,
'--warc-files-start-date-format': '%Y-%m-%d',
'--warc-files-end-date': None,
'--warc-files-end-date-format': '%Y-%m-%d',
'--articles-per-write': 1024,
'--langs': ['en'],
'--num-shards': 8,
'--number-of-sharding-processes': os.cpu_count(),
}
parser.add_argument(
'--prefix',
type=str,
default=defaults['--prefix'],
help='A prefix string that is included in the article ID and output file '
'name of the raw text. The is useful when you need to distribute Step 1 '
'to many nodes, then merge the downloaded raw text onto a single node to '
'perform Step 2. Default: {}'.format(defaults['--prefix']),
)
parser.add_argument(
'--number-of-extraction-processes',
type=int,
default=defaults['--number-of-extraction-processes'],
help='The number of processes used for raw text extraction by newsplease.'
' Default: {}'.format(defaults['--number-of-extraction-processes']),
)
parser.add_argument(
'--valid-hosts',
type=str,
nargs='*',
default=defaults['--valid-hosts'],
help='Only news articles from the hosts in this list are kept. '
'Default: {} (any host is OK); example: [\'elrancaguino.cl\']'.format(
defaults['--valid-hosts']),
)
parser.add_argument(
'--start-date',
type=str,
default=defaults['--start-date'],
help='Only news articles published after this start date are kept. '
'Default: {} (any date is OK as start date)'.format(
defaults['--start-date']),
)
parser.add_argument(
'--start-date-format',
type=str,
default=defaults['--start-date-format'],
help='The datetime format of the start date specified by --start-date. '
'Please refer to '
'https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes'
' for details of the datetime format string. Default: %%Y-%%m-%%d',
)
parser.add_argument(
'--end-date',
type=str,
default=defaults['--end-date'],
help='Only news articles published before this end date are kept. '
'Default: {} (any date is OK as end date)'.format(defaults['--end-date']),
)
parser.add_argument(
'--end-date-format',
type=str,
default=defaults['--end-date-format'],
help='The datetime format of the end date specified by --end-date. Please'
' refer to '
'https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes'
' for details of the datetime format string. Default: %%Y-%%m-%%d',
)
parser.add_argument(
'--warc-files-start-date',
type=str,
default=defaults['--warc-files-start-date'],
help='Only .warc files published after this start date are downloaded. '
'Therefore, you can use this flag to control how much data you want to '
'download. Default: {} (the date when Common Crawl founded)'.format(
defaults['--warc-files-start-date']),
)
parser.add_argument(
'--warc-files-start-date-format',
type=str,
default=defaults['--warc-files-start-date-format'],
help='The datetime format of the start date specified by '
'--warc-files-start-date. Pleas refer to '
'https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes'
' for details of the datetime format string. Default: %%Y-%%m-%%d',
)
parser.add_argument(
'--warc-files-end-date',
type=str,
default=defaults['--warc-files-end-date'],
help='Only .warc files published before this end date are downloaded. '
'Therefore, you can use this flag to control how much data you want to '
'download. Default: {} (today)'.format(defaults['--warc-files-end-date']),
)
parser.add_argument(
'--warc-files-end-date-format',
type=str,
default=defaults['--warc-files-end-date-format'],
help='The datetime format of the end date specified by '
'--warc-files-end-date. Pleas refer to '
'https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes'
' for details of the datetime format string. Default: %%Y-%%m-%%d',
)
attach_bool_arg(
parser,
'strict-date',
default=True,
help_str='If date filtering is strict and newsplease could not detect '
'the published date of an article, the article will be discarded. '
'--strict-date is set by default. To turn this off, explicitly set '
'--no-strict-date.',
)
attach_bool_arg(
parser,
'reuse-previously-downloaded-files',
default=True,
help_str='If reusing previously downloaded files, the script checks '
'whether a file has been downloaded already and uses that file instead of'
' downloading again. Note that there is no check whether the file has '
'been downloaded completely or is valid! '
'--reuse-previously-downloaded-files is set by default. To turn this off,'
' explicitly set --no-reuse-previously-downloaded-files.',
)
attach_bool_arg(
parser,
'continue-after-error',
default=True,
help_str='If this flag is set, downloading will continue even after '
'newsplease encounters an error. --continue-after-error is set by '
'default. To turn this off, explicitly set --no-continue-after-error.',
)
attach_bool_arg(
parser,
'show-download-progress',
default=False,
help_str='If this flag is set, show the progress of downloading the WARC '
'files. --show-download-progress is NOT set by default.',
)
attach_bool_arg(
parser,
'delete-warc-after-extraction',
default=True,
help_str='If this flag is set, the WARC file will be deleted after all '
'articles have been extracted from it. --delete-warc-after-extraction is'
' set by default. To turn this off, explicitly set '
'--no-delete-warc-after-extraction.',
)
attach_bool_arg(
parser,
'continue-process',
default=True,
help_str='If this flag is set, newsplease will continue extraction from '
'the latest fully downloaded but not fully extracted WARC files and then '
'crawling new WARC files. This assumes that the filter criteria have not '
'been changed since the previous run! --continue-process is set by '
'default. To turn this off, explicitly set --no-continue-process.',
)
parser.add_argument(
'--articles-per-write',
type=int,
default=defaults['--articles-per-write'],
help='The articles will be extracted in a streaming way by looping the '
'following steps: (i) download and extract a small number of articles, '
'(ii) write this small number of articles to disk. This flag indicates '
'how many articles cached in memory before a flushing write. Default: {}'.
format(defaults['--articles-per-write']),
)
parser.add_argument(
'--langs',
default=defaults['--langs'],
nargs='+',
choices=['en'],
help='Only news articles written in the languages in this list are kept. '
'Default: {}'.format(defaults['--langs']),
)
attach_bool_arg(
parser,
'newsplease',
default=True,
help_str='--newsplease is set by default. To skip Step 1, explicitly set'
' --no-newsplease.',
)
attach_bool_arg(
parser,
'shard',
default=True,
help_str='--shard is set by default. To skip Step 2, explicitly set '
'--no-shard.',
)
parser.add_argument(
'--num-shards',
type=int,
default=defaults['--num-shards'],
help='The number of text shards into which news articles are aggregated. '
'Default: {}'.format(defaults['--num-shards']),
)
parser.add_argument(
'--number-of-sharding-processes',
type=int,
default=defaults['--number-of-sharding-processes'],
help='The number of processes used to shard all news articles. '
'Default: {}'.format(defaults['--number-of-sharding-processes']),
)
return parser
class ThreadLocal(threading.local):
def __init__(self):
self.out_file = None
self.articles = []
self.articles_count = 0
self.warcs_count = 0
_thread_local = ThreadLocal()
def _flatten(s):
return ' '.join((l for l in s.splitlines()))
def _flush_articles(
out_file,
articles,
txt_dir,
warcs_count,
prefix=socket.gethostname(),
):
if out_file is None:
out_file = open(
os.path.join(
txt_dir,
'{}-{}-{}-{}-{}.txt'.format(
prefix,
os.getpid(),
threading.get_ident(),
warcs_count,
time.time_ns(),
),
),
'w',
)
print('{} opened for writing!'.format(out_file.name))
else:
out_file.write('\n')
out_file.write('\n'.join(articles))
articles.clear()
return out_file
def _on_valid_article_extracted(
article,
articles_per_write=None,
langs=None,
txt_dir=None,
prefix=socket.gethostname(),
):
if article.language in langs and article.maintext is not None:
_thread_local.articles.append('{}-{}-{}-{}-{}'.format(
prefix,
os.getpid(),
threading.get_ident(),
_thread_local.articles_count,
time.time_ns(),
) + ' ' + _flatten(article.maintext))
_thread_local.articles_count += 1
if len(_thread_local.articles) > articles_per_write:
_thread_local.out_file = _flush_articles(
_thread_local.out_file,
_thread_local.articles,
txt_dir,
_thread_local.warcs_count,
prefix=prefix,
)
def _on_warc_completed(
warc_path,
counter_article_passed,
counter_article_discarded,
counter_article_error,
counter_article_total,
counter_warc_processed,
txt_dir=None,
prefix=socket.gethostname(),
):
if len(_thread_local.articles) > 0:
_thread_local.out_file = _flush_articles(
_thread_local.out_file,
_thread_local.articles,
txt_dir,
_thread_local.warcs_count,
prefix=prefix,
)
if _thread_local.out_file is not None:
print('Closing {} !'.format(_thread_local.out_file.name))
_thread_local.out_file.close()
_thread_local.out_file = None
_thread_local.warcs_count += 1
def _aggregate_news(shard):
shard_path, news_paths = shard
with open(shard_path, 'w', newline='\n') as shard_file:
for i, news_path in enumerate(news_paths):
if i > 0:
shard_file.write('\n')
with open(news_path, 'r', newline='\n') as news_file:
shard_file.write(news_file.read())
def _shard_news(txt_dir, source_dir, num_shards, num_processes):
news_paths = [
f for f in get_all_files_paths_under(txt_dir)
if os.path.splitext(f)[1] == '.txt'
]
shards = [(
os.path.join(source_dir, '{}.txt'.format(shard_idx)),
news_paths[shard_idx::num_shards],
) for shard_idx in range(num_shards)]
with multiprocessing.Pool(num_processes) as p:
list(tqdm.tqdm(p.imap(_aggregate_news, shards), total=len(shards)))
def main(args):
if args.start_date is not None:
args.start_date = datetime.datetime.strptime(
args.start_date,
args.start_date_format,
)
if args.end_date is not None:
args.end_date = datetime.datetime.strptime(
args.end_date,
args.end_date_format,
)
if args.warc_files_start_date is not None:
args.warc_files_start_date = datetime.datetime.strptime(
args.warc_files_start_date,
args.warc_files_start_date_format,
)
if args.warc_files_end_date is not None:
args.warc_files_end_date = datetime.datetime.strptime(
args.warc_files_end_date,
args.warc_files_end_date_format,
)
args.outdir = expand_outdir_and_mkdir(args.outdir)
txt_dir = os.path.join(args.outdir, 'txt')
if args.newsplease:
mkdir(txt_dir)
commoncrawl_crawler.crawl_from_commoncrawl(
functools.partial(
_on_valid_article_extracted,
articles_per_write=args.articles_per_write,
langs=set(args.langs),
txt_dir=txt_dir,
prefix=args.prefix,
),
callback_on_warc_completed=functools.partial(
_on_warc_completed,
txt_dir=txt_dir,
prefix=args.prefix,
),
valid_hosts=args.valid_hosts,
start_date=args.start_date,
end_date=args.end_date,
warc_files_start_date=args.warc_files_start_date,
warc_files_end_date=args.warc_files_end_date,
strict_date=args.strict_date,
reuse_previously_downloaded_files=args.
reuse_previously_downloaded_files,
local_download_dir_warc=os.path.join(args.outdir, 'warc'),
continue_after_error=args.continue_after_error,
show_download_progress=args.show_download_progress,
number_of_extraction_processes=args.number_of_extraction_processes,
log_level=logging.WARNING,
delete_warc_after_extraction=args.delete_warc_after_extraction,
continue_process=args.continue_process,
fetch_images=False,
)
if args.shard:
source_dir = os.path.join(args.outdir, 'source')
mkdir(source_dir)
_shard_news(
txt_dir,
source_dir,
args.num_shards,
args.number_of_sharding_processes,
)
print('Dask source prepared at {} !'.format(source_dir))
def console_script():
main(attach_args().parse_args())
| LDDL-main | lddl/download/common_crawl.py |
LDDL-main | lddl/download/__init__.py |
|
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import argparse
import functools
import multiprocessing
import os
import shutil
from glob import glob
import subprocess
import tqdm
import gdown
from lddl.utils import (
expand_outdir_and_mkdir,
mkdir,
get_all_files_paths_under,
attach_bool_arg,
)
def attach_args(parser=argparse.ArgumentParser("""
OpenWebTextCorpus Downloader performs the following steps:
- Step 1: Download OpenWebTextCorpus
(https://skylion007.github.io/OpenWebTextCorpus/)
from provided google drive url and extract the raw text of the articles to
the directory specified by the --outdir flag.
- Step 2: Prepare and aggregate the raw text into text shards in the 'source'
subdirectory under the directory specified by the --outdir flag. The text
shards under the 'source' subdirectory can then be used as the input to the
LDDL preprocessor.
All steps are executed by default. Each step, before it starts, expects the
previous steps already finish. You can turn Step 1 off by --no-download, and
turn Step 2 off by --no-unzip and --no-shard.
""")):
parser.add_argument(
'--outdir',
type=str,
default=None,
required=True,
help='path to the output dir',
)
attach_bool_arg(
parser,
'download',
default=True,
help_str='--download is set by default. To skip download, explicitly set '
'--no-download.',
)
attach_bool_arg(
parser,
'unzip',
default=True,
help_str='--unzip is set by default. To skip unzip, explicitly set '
'--no-unzip.',
)
attach_bool_arg(
parser,
'shard',
default=True,
help_str='--shard is set by default. To skip shard, explicitly set '
'--no-shard.',
)
parser.add_argument(
'--num-shards',
type=int,
default=32,
help='number of shards',
)
parser.add_argument(
'--shard-num-processes',
type=int,
default=os.cpu_count(),
help='num of processes used to shard OpenWebTextCorpus',
)
parser.add_argument(
'--url',
type=str,
default='https://drive.google.com/uc?id=1EA5V0oetDCOke7afsktL_JDQ-ETtNOvx',
help='the google drive url of OpenWebTextCorpus',
)
return parser
def _shard_pages(shard):
shard_path, pages = shard
with open(shard_path, 'w', newline='\n') as shard_file:
one_line_pages = []
for page in pages:
text_paths = [
f for f in get_all_files_paths_under(page)
if os.path.splitext(f)[1] == '.txt'
]
page_lines = []
for text in text_paths:
with open(text, 'r', encoding='utf-8-sig', newline='\n') as page_file:
sub_page_lines = (pg.strip() for pg in page_file)
sub_page_lines = [pg for pg in sub_page_lines if len(pg) > 0]
page_lines.extend(sub_page_lines)
# The first token is the name of the page.
page_name = os.path.splitext(os.path.basename(page))[0]
one_line_pages.append(' '.join([page_name] + page_lines))
shard_file.write('\n'.join(one_line_pages))
def unzip_subset(subset, text_dir):
try:
subdir_name = subset.split('.xz')[0].split('/')[-1]
tmpdir_name = os.path.join('/tmp', subdir_name)
subdir_name = os.path.join(text_dir, subdir_name)
mkdir(subdir_name)
mkdir(tmpdir_name)
out_path = os.path.join(tmpdir_name, 'tar.out')
err_path = os.path.join(tmpdir_name, 'tar.err')
subprocess.run(
['tar', '-xvf', subset, '-C', subdir_name],
check=True,
stdout=open(out_path, 'w'),
stderr=open(err_path, 'w'),
)
shutil.rmtree(tmpdir_name)
except subprocess.CalledProcessError as e:
print(e, 'Please check {} and {}'.format(out_path, err_path))
raise
def unzip_merge_txt(openweb_dir, text_dir, num_processes):
subset_paths = [
f for f in get_all_files_paths_under(openweb_dir)
if os.path.splitext(f)[1] == '.xz'
]
with multiprocessing.Pool(num_processes) as p:
list(
tqdm.tqdm(p.map(functools.partial(unzip_subset, text_dir=text_dir),
subset_paths),
total=len(subset_paths)))
def _shard_openwebs(text_dir, shards_dir, num_shards, num_processes):
dir_paths = [d for d in glob(text_dir + '/*')]
shards = [(
os.path.join(shards_dir, '{}.txt'.format(shard_idx)),
dir_paths[shard_idx::num_shards],
) for shard_idx in range(num_shards)]
with multiprocessing.Pool(num_processes) as p:
list(tqdm.tqdm(p.imap(_shard_pages, shards), total=len(shards)))
def main(args):
args.outdir = expand_outdir_and_mkdir(args.outdir)
target_path = os.path.join(args.outdir, 'openwebtext.tar.xz')
if args.download:
gdown.download(args.url, target_path, quiet=False)
if args.unzip:
print('Unzipping {} ...'.format(target_path))
out_path = os.path.join(args.outdir, 'tar.out')
err_path = os.path.join(args.outdir, 'tar.err')
try:
subprocess.run(
['tar', '-xvf', target_path, '-C', args.outdir],
check=True,
stdout=open(out_path, 'w'),
stderr=open(err_path, 'w'),
)
except subprocess.CalledProcessError as e:
print(e, 'Please check {} and {}'.format(out_path, err_path))
raise
openweb_dir = os.path.join(args.outdir, 'openwebtext')
text_dir = os.path.join(args.outdir, 'txt')
mkdir(text_dir)
unzip_merge_txt(openweb_dir, text_dir, args.shard_num_processes)
if args.shard:
text_dir = os.path.join(args.outdir, 'txt')
print('Sharding {} ...'.format(text_dir))
dask_source_path = os.path.join(args.outdir, 'source')
mkdir(dask_source_path)
_shard_openwebs(
text_dir,
dask_source_path,
args.num_shards,
args.shard_num_processes,
)
print('Dask source prepared at {} !'.format(dask_source_path))
def console_script():
main(attach_args().parse_args())
| LDDL-main | lddl/download/openwebtext.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import os
import requests
import tqdm
def download(url, path, chunk_size=16 * 1024 * 1024):
with requests.get(url, stream=True) as r:
r.raise_for_status()
total_size = int(r.headers.get('content-length', 0))
progress_bar = tqdm.tqdm(total=total_size, unit='Bytes', unit_scale=True)
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
progress_bar.update(len(chunk))
f.write(chunk)
progress_bar.close()
def parse_str_of_num_bytes(s, return_str=False):
try:
power = 'kmg'.find(s[-1].lower()) + 1
size = float(s[:-1]) * 1024**power
except ValueError:
raise ValueError('Invalid size: {}'.format(s))
if return_str:
return s
else:
return int(size)
| LDDL-main | lddl/download/utils.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import argparse
import functools
import multiprocessing
import os
import subprocess
import tqdm
import xml.etree.ElementTree as ET
from .utils import download, parse_str_of_num_bytes
from lddl.utils import (expand_outdir_and_mkdir, mkdir,
get_all_files_paths_under, attach_bool_arg)
def _get_url(lang):
assert lang in {'en', 'zh'}
return ('https://dumps.wikimedia.org/{lang}wiki/latest'
'/{lang}wiki-latest-pages-articles.xml.bz2'.format(lang=lang))
def _get_download_target_filename(lang):
return 'wikicorpus-{}.xml.bz2'.format(lang)
def _prepare_one_shard(shard):
source_shard_path, extract_shard_path = shard
articles = []
with open(extract_shard_path, 'r', newline='\n') as extract_shard_file:
article_open = None
article_lines = []
for line in extract_shard_file:
if '<doc id=' in line:
article_open = line
elif '</doc>' in line:
article_id = 'wiki-' + ET.fromstring(article_open + line).attrib['id']
article_open = None
# article_lines[0] is the title
if len(article_lines) > 1:
# The first token is the article id.
articles.append(' '.join([article_id] + article_lines[1:]))
article_lines = []
else:
if article_open:
line = line.strip()
if len(line) > 0:
article_lines.append(line.strip())
if len(articles) > 0:
print('{} -> {}'.format(extract_shard_path, source_shard_path))
with open(source_shard_path, 'w', newline='\n') as source_shard_file:
source_shard_file.write('\n'.join(articles))
def _prepare_dask_source(extract_path, dask_source_path, num_processes):
extracted_shards_paths = [
p for p in get_all_files_paths_under(extract_path) if 'wiki_' in p
]
shards = [(os.path.join(dask_source_path, '{}.txt'.format(i)), esp)
for i, esp in enumerate(extracted_shards_paths)]
with multiprocessing.Pool(num_processes) as p:
list(tqdm.tqdm(p.imap(_prepare_one_shard, shards), total=len(shards)))
def _download_and_extract(
lang='en',
to_download=True,
to_extract=True,
to_prepare_source=True,
download_chunk_size=16 * 1024 * 1024,
extract_shard_size='128M',
outdir=None,
num_processes=os.cpu_count(),
):
if lang not in {'en', 'zh'}:
raise ValueError('Language {} not supported!'.format(lang))
url = _get_url(lang)
target_filename = _get_download_target_filename(lang)
target_path = os.path.join(outdir, target_filename)
if to_download:
download(url, target_path, chunk_size=download_chunk_size)
extract_path = os.path.join(outdir, 'extracted', lang)
if to_extract:
mkdir(extract_path)
print('Extracting {} ...'.format(target_path))
subprocess.run(
[
'python',
'-m',
'wikiextractor.WikiExtractor',
target_path,
'--output',
extract_path,
'--bytes',
extract_shard_size,
'--processes',
str(num_processes),
],
check=True,
stdout=open(os.path.join(extract_path, 'WikiExtractor.out'), 'w'),
stderr=open(os.path.join(extract_path, 'WikiExtractor.err'), 'w'),
)
if to_prepare_source:
print('Preparing dask source from {} ...'.format(extract_path))
dask_source_path = os.path.join(outdir, 'source', lang)
mkdir(dask_source_path)
_prepare_dask_source(extract_path, dask_source_path, num_processes)
print('Dask source prepared at {} !'.format(dask_source_path))
def attach_args(parser=argparse.ArgumentParser("""
Wikipedia Downloader performs the following steps:
- Step 1: Download the Wikipedia dumps from {} into the directory specified by
the --outdir flag.
- Step 2: Extract the raw text from the Wikipedia dumps which are originally in
the XML format.
- Step 3: Prepare and aggregate the raw text into text shards in the 'source'
subdirectory under the directory specified by the --outdir flag. The text
shards under the 'source' subdirectory can then be used as the input to the
LDDL preprocessor.
All steps are executed by default. Each step, before it starts, expects the
previous steps already finish. You can turn Step 1 off by --no-download, turn
Step 2 off by --no-extract, and turn Step 3 off by --no-prepare-source.
Examples:
# Download the English Wikipedia dumps into wikipedia/wikicorpus-en.xml.bz2 :
$ download_wikipedia --outdir wikipedia/ --no-extract --no-prepare-source
$ tree wikipedia/ # tree can be installed via `sudo apt install tree`.
wikipedia/
└── wikicorpus-en.xml.bz2
# Extract the raw text from the English Wikipedia dumps:
$ download_wikipedia --outdir wikipedia/ --no-download --no-prepare-source
$ tree wikipedia/
wikipedia/
├── extracted
│ └── en
│ ├── AA
│ │ ├── wiki_00
│ │ ├── wiki_01
│ │ ...
│ │ └── wiki_30
│ ├── WikiExtractor.err
│ └── WikiExtractor.out
└── wikicorpus-en.xml.bz2
# Prepare and aggregate the raw text into text shards under wikipedia/source
# which can be read by the LDDL preprocessor as input:
$ download_wikipedia --outdir wikipedia/ --no-download --no-extract
$ tree wikipedia/
wikipedia/
├── extracted
│ └── en
│ ├── AA
│ │ ├── wiki_00
│ │ ├── wiki_01
│ │ ...
│ │ └── wiki_30
│ ├── WikiExtractor.err
│ └── WikiExtractor.out
├── source
│ └── en
│ ├── 0.txt
│ ├── 1.txt
│ ...
│ └── 30.txt
└── wikicorpus-en.xml.bz2
# wikipedia/source/ is the input to the LDDL preprocessor.
# Or, we could do all 3 steps together:
$ download_wikipedia --outdir wikipedia/
""".format(_get_url('en')))):
parser.add_argument(
'--outdir',
type=str,
default=None,
required=True,
help='Path to the output directory. This directory will be created if not'
' already existed.',
)
defaults = {
'--langs': ['en'],
'--download-chunk-size': 16 * 1024 * 1024,
'--extract-shard-size': '512M',
'--num-processes': os.cpu_count(),
}
parser.add_argument(
'--langs',
default=defaults['--langs'],
nargs='+',
choices=['en', 'zh'],
help='Language of the wikipedia dumps to download. Default: {}'.format(
defaults['--langs']),
)
attach_bool_arg(
parser,
'download',
default=True,
help_str='--download is set by default. To skip Step 1, explicitly set '
'--no-download.',
)
attach_bool_arg(
parser,
'extract',
default=True,
help_str='--extract is set by default. To skip Step 2, explicitly set '
'--no-extract.')
attach_bool_arg(
parser,
'prepare-source',
default=True,
help_str='--prepare-source is set by default. To skip Step 3, explicitly '
'set --no-prepare-source.',
)
parser.add_argument(
'--download-chunk-size',
type=functools.partial(parse_str_of_num_bytes, return_str=False),
default=defaults['--download-chunk-size'],
metavar="n[KMG]",
help='The downloading will be performed in a streaming way by looping '
'over the following steps: (i) transfer a small chunk of data over the '
'network into the host memory, (ii) write this chunk onto disk. This flag'
' indicates the chunk size. Default: {}'.format(
defaults['--download-chunk-size']),
)
parser.add_argument(
'--extract-shard-size',
type=functools.partial(parse_str_of_num_bytes, return_str=True),
default=defaults['--extract-shard-size'],
metavar="n[KMG]",
help='The size of each text shard. Default: {}'.format(
defaults['--extract-shard-size']),
)
parser.add_argument(
'--num-processes',
type=int,
default=os.cpu_count(),
help='Num of processes to use. Default: {}'.format(
defaults['--num-processes']),
)
return parser
def main(args):
args.outdir = expand_outdir_and_mkdir(args.outdir)
for lang in args.langs:
_download_and_extract(
lang=lang,
to_download=args.download,
to_extract=args.extract,
to_prepare_source=args.prepare_source,
download_chunk_size=args.download_chunk_size,
extract_shard_size=args.extract_shard_size,
outdir=args.outdir,
num_processes=args.num_processes,
)
def console_script():
main(attach_args().parse_args())
| LDDL-main | lddl/download/wikipedia.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import logging
import os
import pathlib
def _get_logger_name(node_rank, local_rank=None, worker_rank=None):
if local_rank is None and worker_rank is None:
return 'node-{}'.format(node_rank)
elif worker_rank is None:
return 'node-{}_local-{}'.format(node_rank, local_rank)
else:
return 'node-{}_local-{}_worker-{}'.format(node_rank, local_rank,
worker_rank)
class DummyLogger:
def debug(self, msg, *args, **kwargs):
pass
def info(self, msg, *args, **kwargs):
pass
def warning(self, msg, *args, **kwargs):
pass
def error(self, msg, *args, **kwargs):
pass
def critical(self, msg, *args, **kwargs):
pass
def log(self, msg, *args, **kwargs):
pass
def exception(self, msg, *args, **kwargs):
pass
class DatasetLogger:
def __init__(
self,
log_dir=None,
node_rank=0,
local_rank=0,
log_level=logging.INFO,
):
self._log_dir = log_dir
self._node_rank = node_rank
self._local_rank = local_rank
self._worker_rank = None
self._log_level = log_level
if log_dir is not None:
pathlib.Path(log_dir).mkdir(parents=True, exist_ok=True)
# Create node level logger.
if local_rank == 0:
self._create_logger(_get_logger_name(node_rank))
# Create local_rank level logger.
self._create_logger(_get_logger_name(node_rank, local_rank=local_rank))
def _create_logger(self, name):
logger = logging.getLogger(name)
fmt = logging.Formatter(
'LDDL - %(asctime)s - %(filename)s:%(lineno)d:%(funcName)s - %(name)s '
'- %(levelname)s : %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(fmt)
logger.addHandler(stream_handler)
if self._log_dir is not None:
path = os.path.join(self._log_dir, '{}.txt'.format(name))
file_handler = logging.FileHandler(path)
file_handler.setFormatter(fmt)
logger.addHandler(file_handler)
logger.setLevel(self._log_level)
return logger
def init_for_worker(self, worker_rank):
if self._worker_rank is None:
self._worker_rank = worker_rank
self._create_logger(
_get_logger_name(
self._node_rank,
local_rank=self._local_rank,
worker_rank=worker_rank,
))
def to(self, which):
assert which in {'node', 'rank', 'worker'}
if which == 'node':
if (self._local_rank == 0 and
(self._worker_rank is None or self._worker_rank == 0)):
return logging.getLogger(_get_logger_name(self._node_rank))
else:
return DummyLogger()
elif which == 'rank':
if self._worker_rank is None or self._worker_rank == 0:
return logging.getLogger(
_get_logger_name(self._node_rank, local_rank=self._local_rank))
else:
return DummyLogger()
else: # which == 'worker'
return logging.getLogger(
_get_logger_name(
self._node_rank,
local_rank=self._local_rank,
worker_rank=self._worker_rank,
))
| LDDL-main | lddl/torch_mp/log.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import itertools
import json
import logging
import numpy as np
import os
import pathlib
import pyarrow.parquet as pq
import random
import torch
import warnings
from torch.utils.data import IterableDataset
from torch.utils.data import get_worker_info
from lddl.types import File
from lddl.utils import get_num_samples_of_parquet
from lddl.random import randrange, shuffle, sample
from .utils import (
get_rank,
get_world_size,
get_nproc_per_node,
get_num_nodes,
get_node_rank,
get_dp_size,
)
class ShuffleBuffer:
def __init__(self, files, max_num_samples_to_yield, decode_record_batch, size,
warmup_factor, logger, rng_state, samples_seen):
num_samples_wasted = (sum(
(f.num_samples for f in files)) - max_num_samples_to_yield)
assert 0 <= num_samples_wasted <= len(files)
self._files = files
self._max_num_samples_to_yield = max_num_samples_to_yield
self._decode_record_batch = decode_record_batch
self._size = size
self._warmup_factor = warmup_factor
self._logger = logger
self._rng_state = rng_state
self.samples_seen = samples_seen
@property
def num_samples(self):
return sum((f.num_samples for f in self._files))
def _randrange(self, stop):
n, self._rng_state = randrange(stop, rng_state=self._rng_state)
return n
def _shuffle(self, x):
self._rng_state = shuffle(x, rng_state=self._rng_state)
def __iter__(self):
buffer = []
num_samples_to_yield = min(
self._max_num_samples_to_yield,
sum((f.num_samples for f in self._files)) - self.samples_seen,
)
remaining_num_samples = num_samples_to_yield
for f in self._files:
self._logger.to('worker').info('Reading {}'.format(f.path))
if self.samples_seen > 0:
len_par = f.num_samples
# Skip entire parquet if possible
if len_par < self.samples_seen:
self.samples_seen -= len_par
continue
pq_table = pq.read_table(f.path)
if self.samples_seen > 0:
pq_table = pq_table.slice(self.samples_seen)
self.samples_seen = 0
for b in pq_table.to_batches():
for sample in self._decode_record_batch(b):
if remaining_num_samples <= 0:
return
if (len(buffer)
>= min(self._size,
(num_samples_to_yield - remaining_num_samples + 1) *
self._warmup_factor)):
replace_idx = self._randrange(len(buffer))
yield buffer[replace_idx]
buffer[replace_idx] = sample
remaining_num_samples -= 1
else:
buffer.append(sample)
self._shuffle(buffer)
for sample in buffer:
if remaining_num_samples <= 0:
return
yield sample
remaining_num_samples -= 1
class ParquetDataset(IterableDataset):
def __init__(
self,
file_paths,
samples_seen=0,
transform=lambda x: x,
local_rank=0,
dp_rank=0,
shuffle_buffer_size=16384,
shuffle_buffer_warmup_factor=16,
base_seed=12345,
logger=None,
start_epoch=0,
):
super().__init__()
self._transform = transform
self._local_rank = local_rank
self.dp_rank = dp_rank
self._shuffle_buffer_size = shuffle_buffer_size
self._shuffle_buffer_warmup_factor = shuffle_buffer_warmup_factor
self._base_seed = base_seed
self._rank = get_rank()
self._world_size = get_world_size()
self._nproc_per_node = get_nproc_per_node(local_rank)
self._num_dp_groups = get_dp_size(dp_rank)
self._num_nodes = get_num_nodes(nproc_per_node=self._nproc_per_node)
self._node_rank = get_node_rank(nproc_per_node=self._nproc_per_node)
self._epoch = start_epoch - 1
self._logger = logger
self.samples_seen = samples_seen
assert len(file_paths) % self._num_nodes == 0
assert len(file_paths) % self._world_size == 0
self._files = self._get_files(file_paths)
max_num_samples_per_file = max((f.num_samples for f in self._files))
min_num_samples_per_file = min((f.num_samples for f in self._files))
assert min_num_samples_per_file + 1 == max_num_samples_per_file
self._num_samples_per_file = min_num_samples_per_file
total_num_samples = sum((f.num_samples for f in self._files))
num_samples_lost = (total_num_samples -
self._num_samples_per_file * len(self._files))
self._logger.to('node').warning('lost {}/{}={}% samples in total'.format(
num_samples_lost,
total_num_samples,
num_samples_lost / total_num_samples * 100,
))
self._world_rng_state = None
self._worker_rng_state = None
def _get_files(self, file_paths):
all_files_num_samples = torch.zeros((len(file_paths),), dtype=torch.long)
if self._world_size > 1 and torch.distributed.get_backend() == 'nccl':
all_files_num_samples = all_files_num_samples.to('cuda')
# Figure out how many samples in each file.
num_samples_cache = {} # Map dirname to the dict of {basename: num_samples}
for idx in range(self._rank, len(file_paths), self._world_size):
fp = file_paths[idx]
dn = os.path.dirname(fp)
bn = os.path.basename(fp)
# Load the num_samples cache file if it exists.
if dn not in num_samples_cache:
nsfp = os.path.join(dn, '.num_samples.json')
try:
with open(nsfp, 'r') as nsf:
num_samples_cache[dn] = json.load(nsf)
except Exception as e:
self._logger.to('rank').warning('failed to load {}: {}'.format(
nsfp, e))
# Mark that the num_samples cache file doesn't exist for this
# directory.
num_samples_cache[dn] = None
if num_samples_cache[dn] is not None and bn in num_samples_cache[dn]:
all_files_num_samples[idx] = num_samples_cache[dn][bn]
else:
# Find out num_samples by loading the parquet table.
all_files_num_samples[idx] = get_num_samples_of_parquet(fp)
if self._world_size > 1:
# Sync. accross all ranks.
torch.distributed.all_reduce(
all_files_num_samples,
op=torch.distributed.ReduceOp.SUM,
)
all_files_num_samples = all_files_num_samples.tolist()
return [File(fp, ns) for fp, ns in zip(file_paths, all_files_num_samples)]
def __len__(self):
""" This function only returns how many samples per rank will be yielded
by this dataset.
Note that, len(dataloader), where dataloader is a PyTorch DataLoader
wrapping this dataset, does NOT return the accurate number of batches. This
is because, when (num_samples_per_file * num_files_per_worker) is not
divisible by batch_size, each worker is going to generate a partial batch
at the very end.
However, PyTorch DataLoader's __len__ only divide the number returned from
this function by batch_size, which would be smaller than the actual number
of batches by at most (num_workers - 1).
We need to patch PyTorch DataLoader function for this function to behave
correctly.
"""
return (self._num_samples_per_file * len(self._files) //
self._num_dp_groups) - self.samples_seen
@property
def num_samples_per_file(self):
return self._num_samples_per_file
@property
def num_files_per_rank(self):
return len(self._files) // self._num_dp_groups
def _decode_record_batch(self, b):
raise NotImplementedError('ParquetDataset is an abstract/interface class!')
def _world_identical_sample(self, population, k, counts=None):
s, self._world_rng_state = sample(
population,
k,
rng_state=self._world_rng_state,
)
return s
def _init_worker(self):
worker_info = get_worker_info()
if worker_info is None:
num_workers_per_rank = 1
worker_rank = 0
else:
num_workers_per_rank = worker_info.num_workers
worker_rank = worker_info.id
assert (len(self._files) % (self._world_size * num_workers_per_rank) == 0)
self._logger.init_for_worker(worker_rank)
return worker_rank, num_workers_per_rank
def _init_rng_states(self, worker_rank, num_workers_per_rank):
orig_rng_state = random.getstate()
random.seed(self._base_seed + self._epoch)
self._world_rng_state = random.getstate()
worker_seed_num = self._base_seed + (
self._epoch * self._world_size +
self.dp_rank) * num_workers_per_rank + worker_rank
random.seed(worker_seed_num)
self._worker_rng_state = random.getstate()
random.setstate(orig_rng_state)
def __iter__(self):
self._epoch += 1
worker_rank, num_workers_per_rank = self._init_worker()
self._init_rng_states(worker_rank, num_workers_per_rank)
files = self._world_identical_sample(self._files, k=len(self._files))
self._logger.to('node').warning('epoch = {}'.format(self._epoch))
rank_files = files[self.dp_rank::self._num_dp_groups]
worker_files = rank_files[worker_rank::num_workers_per_rank]
self.sb = ShuffleBuffer(worker_files,
self._num_samples_per_file * len(worker_files),
lambda b: self._decode_record_batch(b),
self._shuffle_buffer_size,
self._shuffle_buffer_warmup_factor, self._logger,
self._worker_rng_state, self.samples_seen)
for sample in iter(self.sb):
sample = self._transform(sample)
yield sample
self.samples_seen = 0
| LDDL-main | lddl/torch_mp/datasets.py |
from .bert import get_bert_pretrain_data_loader
| LDDL-main | lddl/torch_mp/__init__.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import torch
def barrier():
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.barrier()
def get_dp_size(dp_rank):
"""
This helper function will return how many data parallel groups we have in our
system.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
max_dp_rank = torch.tensor(
dp_rank,
device='cuda' if torch.distributed.get_backend() == 'nccl' else 'cpu',
)
torch.distributed.all_reduce(
max_dp_rank,
op=torch.distributed.ReduceOp.MAX,
)
dp_size = max_dp_rank.item() + 1
else:
dp_size = 1
return dp_size
def get_rank():
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
def get_world_size():
if torch.distributed.is_available() and torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
else:
world_size = 1
return world_size
def get_nproc_per_node(local_rank):
if torch.distributed.is_available() and torch.distributed.is_initialized():
max_local_rank = torch.tensor(
local_rank,
device='cuda' if torch.distributed.get_backend() == 'nccl' else 'cpu',
)
torch.distributed.all_reduce(
max_local_rank,
op=torch.distributed.ReduceOp.MAX,
)
nproc_per_node = max_local_rank.item() + 1
else:
nproc_per_node = 1
return nproc_per_node
def get_num_nodes(local_rank=None, nproc_per_node=None):
if torch.distributed.is_available() and torch.distributed.is_initialized():
if nproc_per_node is None:
assert local_rank is not None
nproc_per_node = get_nproc_per_node(local_rank)
num_nodes = get_world_size() // nproc_per_node
else:
num_nodes = 1
return num_nodes
def get_node_rank(local_rank=None, nproc_per_node=None):
""" This assume the training processes are launched via
torch.distributed.launch.py. Therefore, the ordering scheme of
rank -> (node_rank, local_rank) mapping is:
0 -> (0, 0)
1 -> (0, 1)
...
nproc_per_node -> (1, 0)
nproc_per_node+1 -> (1, 1)
...
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
if nproc_per_node is None:
assert local_rank is not None
nproc_per_node = get_nproc_per_node(local_rank)
node_rank = get_rank() // nproc_per_node
else:
node_rank = 0
return node_rank
| LDDL-main | lddl/torch_mp/utils.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import random
import torch
from lddl.random import choices
from .datasets import ParquetDataset
from .utils import get_rank
class Binned:
def __init__(self,
dataloaders,
base_seed=12345,
start_epoch=0,
global_batch_size=64,
logger=None):
self._dataloaders = dataloaders
self._base_seed = base_seed
self._epoch = start_epoch - 1
self._logger = logger
self._world_rng_state = None
self.current_iteration = 0
self.global_batch_size = global_batch_size
self.bin_id = None
self.global_batch = []
def _init_rng_states(self):
orig_rng_state = random.getstate()
random.seed(self._base_seed + self._epoch)
self._world_rng_state = random.getstate()
random.setstate(orig_rng_state)
def _init_iter(self):
self._init_rng_states()
num_samples_remaining = [len(dl.dataset) for dl in self._dataloaders]
dataiters = [iter(dl) for dl in self._dataloaders]
return num_samples_remaining, dataiters
def __len__(self):
return sum((len(dl) for dl in self._dataloaders))
def _get_batch_size(self, batch):
raise NotImplementedError('Binned is an abstract class!')
def _choices(self, population, weights=None, cum_weights=None, k=1):
c, self._world_rng_state = choices(
population,
weights=weights,
cum_weights=cum_weights,
k=k,
rng_state=self._world_rng_state,
)
return c
def get_samples_seen_datasets(self, samples_seen, batch_size):
num_samples_remaining, dataiters = self._init_iter()
# Skip epochs that have already been seen
self._epoch = samples_seen // sum(num_samples_remaining)
samples_seen = samples_seen % sum(num_samples_remaining)
self._init_rng_states()
if samples_seen > 0:
bins_samples_seen = [0] * len(self._dataloaders)
while samples_seen > 0:
bin_id = self._choices(
list(range(len(self._dataloaders))),
weights=num_samples_remaining,
k=1,
)[0]
num_samples_remaining[bin_id] -= self.global_batch_size
bins_samples_seen[bin_id] += self.global_batch_size
samples_seen -= self.global_batch_size
return bins_samples_seen, self._epoch
def set_next(self):
# At the end of the epoch setting Global_batch to None to let iterator know we are done
if max(self.num_samples_remaining) <= self.global_batch_size:
self.global_batch = None
else:
if self.global_batch == []:
self.bin_id = self._choices(
list(range(len(self.dataiters))),
weights=self.num_samples_remaining,
k=1,
)[0]
self.global_batch = next(self.dataiters[self.bin_id])
self.num_samples_remaining[self.bin_id] -= self.global_batch_size
self.current_iteration += 1
def get_seqlen(self):
return self.global_batch[0]['text'].shape[1]
def __next__(self):
if self.global_batch is None:
return StopIteration
else:
sample = self.global_batch.pop()
self.set_next()
return sample
def __iter__(self):
self._epoch += 1
self.num_samples_remaining, self.dataiters = self._init_iter()
self.set_next()
return self
class DataLoader(torch.utils.data.DataLoader):
def __len__(self):
if isinstance(self.dataset, ParquetDataset):
num_workers_per_rank = max(self.num_workers, 1)
num_files_per_worker = self.dataset.num_files_per_rank // num_workers_per_rank
num_samples_per_worker = self.dataset.num_samples_per_file * num_files_per_worker
num_batches_per_worker = (
(num_samples_per_worker - 1) // self.batch_size + 1)
return num_batches_per_worker * num_workers_per_rank
else:
super().__len__() | LDDL-main | lddl/torch_mp/dataloader.py |
#
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import logging
import numpy as np
import os
import random
import torch
import transformers
from typing import List, Optional, Tuple, Union
from collections import deque
from lddl.utils import (get_all_parquets_under, get_all_bin_ids,
get_file_paths_for_bin_id, deserialize_np_array)
from .dataloader import Binned, DataLoader
from .datasets import ParquetDataset
from .log import DatasetLogger
from .utils import get_node_rank, get_nproc_per_node
def _decode_record_batch(b):
b = b.to_pydict()
if 'masked_lm_positions' in b:
assert 'masked_lm_labels' in b
columns = tuple((b[k] for k in (
'A',
'B',
'is_random_next',
'masked_lm_positions',
'masked_lm_labels',
) if k in b))
for sample in zip(*columns):
yield sample
class BertPretrainDataset(ParquetDataset):
def _decode_record_batch(self, b):
return _decode_record_batch(b)
class BertPretrainBinned(Binned):
def _get_batch_size(self, batch):
return batch['text'].size(0)
def _to_encoded_inputs(
batch,
tokenizer,
micro_batch_size,
sequence_length_alignment=8,
ignore_index=-1,
):
#batch_size = len(batch)
As, Bs, are_random_next = [], [], []
static_masking = (len(batch[0]) > 3)
if static_masking:
assert len(batch[0]) == 5
all_masked_lm_positions, all_masked_lm_labels = [], []
# Unpack each field.
for sample in batch:
As.append(tuple(sample[0].split()))
Bs.append(tuple(sample[1].split()))
are_random_next.append(sample[2])
if static_masking:
all_masked_lm_positions.append(
torch.from_numpy(deserialize_np_array(sample[3]).astype(int)))
all_masked_lm_labels.append(sample[4].split())
# Figure out the sequence length of this batch.
batch_seq_len = max(
(len(tokens_A) + len(tokens_B) + 3 for tokens_A, tokens_B in zip(As, Bs)))
# Align the batch_seq_len to a multiple of sequence_length_alignment, because
# TC doesn't like it otherwise.
batch_seq_len = (((batch_seq_len - 1) // sequence_length_alignment + 1) *
sequence_length_alignment)
global_batch = []
global_batch_size = len(batch)
for i in range(0, global_batch_size, micro_batch_size):
# Allocate the input torch.Tensor's.
input_ids = torch.zeros(micro_batch_size, batch_seq_len, dtype=torch.long)
token_type_ids = torch.zeros_like(input_ids)
attention_mask = torch.zeros_like(input_ids)
if static_masking:
labels = torch.full_like(input_ids, ignore_index)
loss_mask = torch.zeros(micro_batch_size, batch_seq_len, dtype=torch.long)
mb_masked_lm_positions = all_masked_lm_positions[i:(i + micro_batch_size)]
for j, indices in enumerate(mb_masked_lm_positions):
loss_mask[j].scatter_(0, indices, 1.)
else:
special_tokens_mask = torch.zeros_like(input_ids)
# Fill in the input torch.Tensor's.
for sample_idx in range(i, (i + micro_batch_size)):
local_sample_index = sample_idx % micro_batch_size
tokens_A, tokens_B = As[sample_idx], Bs[sample_idx]
# Prepare the input token IDs.
tokens = ('[CLS]',) + tokens_A + ('[SEP]',) + tokens_B + ('[SEP]',)
input_ids[local_sample_index, :len(tokens)] = torch.as_tensor(
tokenizer.convert_tokens_to_ids(tokens),
dtype=torch.long,
)
# Prepare the token type ids (segment ids).
start_idx = len(tokens_A) + 2
end_idx = len(tokens_A) + len(tokens_B) + 3
token_type_ids[local_sample_index, start_idx:end_idx] = 1
# Prepare the attention mask (input mask).
attention_mask[local_sample_index, :end_idx] = 1
if static_masking:
# Prepare the MLM labels.
labels[local_sample_index,
all_masked_lm_positions[sample_idx]] = torch.as_tensor(
tokenizer.convert_tokens_to_ids(
all_masked_lm_labels[sample_idx]),
dtype=torch.long,
)
else:
# Note This is not supported for model parallel
# Prepare special_tokens_mask (for DataCollatorForLanguageModeling)
special_tokens_mask[sample_idx, 0] = 1
special_tokens_mask[sample_idx, len(tokens_A) + 1] = 1
special_tokens_mask[sample_idx, len(tokens_A) + len(tokens_B) + 2:] = 1
# Compose output dict.
encoded_inputs = {
'text':
input_ids,
'types':
token_type_ids,
'padding_mask':
attention_mask,
'is_random':
torch.as_tensor(
are_random_next[i:(i + micro_batch_size)],
dtype=torch.long,
),
}
if static_masking:
encoded_inputs['labels'] = labels
encoded_inputs['loss_mask'] = loss_mask
else:
encoded_inputs['special_tokens_mask'] = special_tokens_mask
global_batch.append(encoded_inputs)
return global_batch
def _mask_tokens(
inputs,
special_tokens_mask=None,
tokenizer=None,
mlm_probability=0.15,
ignore_index=-1,
):
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK,
10% random, 10% original.
"""
labels = inputs.clone()
# We sample a few tokens in each sequence for MLM training (with probability
# `mlm_probability`)
probability_matrix = torch.full(labels.shape, mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [
tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
for val in labels.tolist()
]
special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
# We only compute loss on masked tokens
labels[~masked_indices] = ignore_index
# 80% of the time, we replace masked input tokens with tokenizer.mask_token
# ([MASK])
indices_replaced = (torch.bernoulli(torch.full(labels.shape, 0.8)).bool() &
masked_indices)
inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(
tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = (torch.bernoulli(torch.full(labels.shape, 0.5)).bool() &
masked_indices & ~indices_replaced)
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens
# unchanged
return inputs, labels
"""
This data loader differs from the one in lddl.torch.bert by no longer building
from local_rank and instead building from data parallel rank instead.This
enables seeding all dataloaders in the same data parallel group identically
thus every dataloader in the the data parallel group will produce the same
samples.
"""
def get_bert_pretrain_data_loader(
path,
local_rank=0,
dp_rank=0,
shuffle_buffer_size=16384,
shuffle_buffer_warmup_factor=16,
tokenizer_class=transformers.BertTokenizerFast,
vocab_file=None,
tokenizer_kwargs={},
data_loader_class=DataLoader,
data_loader_kwargs={},
mlm_probability=0.15,
base_seed=12345,
log_dir=None,
log_level=logging.INFO,
return_raw_samples=False,
start_epoch=0,
sequence_length_alignment=8,
ignore_index=-1,
samples_seen=0,
micro_batch_size=64,
):
"""Gets a PyTorch DataLoader for the BERT pretraining task.
The LDDL DataLoader can be used in the same way as a normal PyTorch
DataLoader. The 'persistent_workers' attribute will always be enabled.
The LDDL DataLoader streams samples from disk into memory, and uses a shuffle
buffer to perform shuffling: at each iteration, a random sample from the
shuffle buffer is popped, and a new sample is pushed into the shuffle buffer
at this vacant location.
Args:
path: A string of the path pointing to the directory that contains the
pretraining dataset in the format of balanced parquet shards.
local_rank: The local rank ID (on this node) of the current pretraining
process.
shuffle_buffer_size: The size of the shuffle buffer.
shuffle_buffer_warmup_factor: At the beginning, the shuffle buffer is empty.
Therefore, in order to fill the shuffle buffer, at each iteration, more
samples need to be pushed into the shuffle buffer than being popped out
of. This factor indicates how many samples is pushed into the shuffle
buffer per 1 sample being popped out of the shuffle buffer, until the
shuffle buffer is full.
tokenizer_class: The HuggingFace tokenizer class for BERT pretraining.
vocab_file: The path to a vocab file, or the name of a pretrained model
registered on huggingface.co (e.g., 'bert-large-uncased') of which the
vocab file is downloaded.
tokenizer_kwargs: The arguments to the tokenizer class.
data_loader_class: The class of the DataLoader.
data_loader_kwargs: The arguments to the DataLoader class.
mlm_probability: The probability for masking tokens in the masked language
modeling task (in BERT pretraining).
base_seed: A base seed value on which other seeds used in the DataLoader are
based.
log_dir: The path to a directory to store the logs from the LDDL DataLoader.
log_level: The logging verbose level.
return_raw_samples: If True, returns the raw string pairs instead of token
indices.
start_epoch: The epoch number to start from. An epoch is defined as going
through every sample in a dataset once.
sequence_length_alignment: To get the input tensors of token indices, each
sequence in a batch will only be padded to the longest sequence in this
batch. However, certain hardware features might prefer the shapes of the
input tensors to meet certain conditions. For example, it's better for the
Tensor Core on NVIDIA GPUs if the dimensions of the input tensors are
divisible by 8. Therefore, this argument is an alignment factor such that
the sequences in a batch will be padded to the first sequence length
larger than the longest sequence in this batch and also divisible by this
alignment factor.
ignore_index: The label value for the unmasked tokens in the language
modeling task (in BERT pretraining).
Returns:
A PyTorch DataLoader that, in each iteration, yield:
- If return_raw_samples is False, a dict of 5 key-value pairs which are the
necessary input for BERT pretraining:
{
'input_ids': a torch.Tensor of size [batch_size, sequence_length],
'token_type_ids': a torch.Tensor of size [batch_size, sequence_length],
'attention_mask': a torch.Tensor of size [batch_size, sequence_length],
'labels': a torch.Tensor of size [batch_size, sequence_length],
'next_sentence_labels': a torch.Tensor of size [batch_size],
}
- If return_raw_samples is True, a list of the following lists:
[
strings of the first sequences in the sequence pairs,
strings of the second sequences in the sequence pairs,
bools that indicate whether the second sequences are the next sequences
for the first sequences,
numpy.ndarrays of positions of the masked tokens for the masked language
modeling task (only exists if static masking is enabled),
strings of space-seperated labels of the masked tokens for the masked
language modeling task (only exists if static masking is enabled),
]
Examples:
train_dataloader = lddl.torch.get_bert_pretrain_data_loader(
input_dir,
local_rank=local_rank,
vocab_file=vocab_file,
data_loader_kwargs={
'batch_size': batch_size,
'num_workers': num_workers,
'pin_memory': True,
},
log_level=logging.WARNING,
start_epoch=start_epoch,
)
for epoch in range(start_epoch, start_epoch + epochs):
for i, batch in enumerate(train_dataloader):
prediction_scores, seq_relationship_score = model(
input_ids=batch['input_ids'].to(device),
token_type_ids=batch['token_type_ids'].to(device),
attention_mask=batch['attention_mask'].to(device),
)
loss = criterion(
prediction_scores,
seq_relationship_score,
batch['labels'].to(device),
batch['next_sentence_labels'].to(device),
)
...
"""
assert isinstance(path, str)
assert isinstance(local_rank, int) and local_rank >= 0
assert isinstance(dp_rank, int) and dp_rank >= 0
assert isinstance(shuffle_buffer_size, int) and shuffle_buffer_size > 0
assert (isinstance(shuffle_buffer_warmup_factor, int) and
shuffle_buffer_warmup_factor > 0)
assert tokenizer_class in {
transformers.BertTokenizerFast, transformers.BertTokenizer
}
assert isinstance(vocab_file, str)
assert isinstance(tokenizer_kwargs, dict)
assert data_loader_class in {DataLoader}
assert isinstance(data_loader_kwargs, dict)
assert isinstance(mlm_probability, (int, float)) and 0 <= mlm_probability <= 1
assert isinstance(base_seed, int)
assert log_dir is None or isinstance(log_dir, str)
assert log_level in {
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
}
assert isinstance(return_raw_samples, bool)
assert isinstance(start_epoch, int)
if os.path.isfile(vocab_file):
tokenizer = tokenizer_class(vocab_file, **tokenizer_kwargs)
else:
tokenizer = tokenizer_class.from_pretrained(vocab_file, **tokenizer_kwargs)
def _batch_preprocess(batch, micro_batch_size):
with torch.no_grad():
encoded_inputs = _to_encoded_inputs(
batch,
tokenizer,
micro_batch_size,
sequence_length_alignment=sequence_length_alignment,
ignore_index=ignore_index,
)
if 'special_tokens_mask' in encoded_inputs: # Dynamic masking.
special_tokens_mask = encoded_inputs.pop('special_tokens_mask', None)
(encoded_inputs['input_ids'], encoded_inputs['labels']) = _mask_tokens(
encoded_inputs['input_ids'],
special_tokens_mask=special_tokens_mask,
tokenizer=tokenizer,
mlm_probability=mlm_probability,
ignore_index=ignore_index,
)
return encoded_inputs
logger = DatasetLogger(
log_dir=log_dir,
node_rank=get_node_rank(nproc_per_node=get_nproc_per_node(local_rank)),
local_rank=local_rank,
log_level=log_level,
)
dataset_kwargs = {
'local_rank': local_rank,
'dp_rank': dp_rank,
'shuffle_buffer_size': shuffle_buffer_size,
'shuffle_buffer_warmup_factor': shuffle_buffer_warmup_factor,
'base_seed': base_seed,
'logger': logger,
'start_epoch': start_epoch,
}
extra_collate = data_loader_kwargs.get('collate_fn', lambda x: x)
global_batch_size = data_loader_kwargs['batch_size']
if not return_raw_samples:
data_loader_kwargs['collate_fn'] = lambda batch: extra_collate(
_batch_preprocess(batch, micro_batch_size=micro_batch_size))
data_loader_kwargs['persistent_workers'] = True
# Find all the parquet file paths and figure out whether it is binned or
# un-binned.
all_file_paths = get_all_parquets_under(path)
bin_ids = get_all_bin_ids(all_file_paths)
if len(bin_ids) > 0:
if samples_seen > 0:
# temporary dataloader to find how many samples are in each bin
tmp_dl = BertPretrainBinned([
data_loader_class(
BertPretrainDataset(
get_file_paths_for_bin_id(all_file_paths, bin_id),
**dataset_kwargs,
),
**data_loader_kwargs,
) for bin_id in bin_ids
],
base_seed=base_seed,
logger=logger,
global_batch_size=global_batch_size)
bins_samples_seen, start_epoch = tmp_dl.get_samples_seen_datasets(
samples_seen, global_batch_size)
del tmp_dl
data_loader = BertPretrainBinned([
data_loader_class(
BertPretrainDataset(
get_file_paths_for_bin_id(all_file_paths, bin_id),
samples_seen=bins_samples_seen[i],
**dataset_kwargs,
),
**data_loader_kwargs,
) for i, bin_id in enumerate(bin_ids)
],
base_seed=base_seed,
start_epoch=start_epoch,
logger=logger,
global_batch_size=global_batch_size)
else:
data_loader = BertPretrainBinned([
data_loader_class(
BertPretrainDataset(
get_file_paths_for_bin_id(all_file_paths, bin_id),
**dataset_kwargs,
),
**data_loader_kwargs,
) for bin_id in bin_ids
],
base_seed=base_seed,
start_epoch=start_epoch,
logger=logger,
global_batch_size=global_batch_size)
else: # un-binned
data_loader = data_loader_class(
BertPretrainDataset(all_file_paths, **dataset_kwargs),
**data_loader_kwargs,
)
return data_loader | LDDL-main | lddl/torch_mp/bert.py |
#!/usr/bin/env python
"""
A script to lint and test ProxyFS jason RPC client library code.
"""
from __future__ import print_function, unicode_literals
from threading import Timer
import os
import argparse
import functools
import logging
import platform
import contextlib
import subprocess
import shutil
import sys
import tempfile
import time
COLORS = {"bright red": '1;31', "bright green": '1;32'}
@contextlib.contextmanager
def return_to_wd():
curdir = os.getcwd()
try:
yield
finally:
os.chdir(curdir)
@contextlib.contextmanager
def self_cleaning_tempdir(*args, **kwargs):
our_tempdir = tempfile.mkdtemp(*args, **kwargs)
try:
yield our_tempdir
finally:
shutil.rmtree(our_tempdir, ignore_errors=True)
def proxyfs_binary_path(binary):
try:
gopath = os.environ["GOPATH"]
except KeyError:
color_print("$GOPATH must be set", 'bright red')
os.exit(1)
return os.path.join(gopath, "bin", binary)
def color_print(content, color=None):
print("\x1b[{color}m{content}\x1b[0m".format(content=content,
color=COLORS[color]))
def proxyfs_package_path(package):
try:
gopath = os.environ["GOPATH"]
except KeyError:
color_print("$GOPATH must be set", 'bright red')
os.exit(1)
return os.path.join(gopath, "src/github.com/swiftstack/ProxyFS", package)
def color_print(content, color=None):
print("\x1b[{color}m{content}\x1b[0m".format(content=content,
color=COLORS[color]))
def report(task, success=False):
printer = color_print if sys.stdout.isatty() else lambda *a, **kw: print(*a)
if success:
printer("{} {}".format(task, "succeeded!"), color="bright green")
else:
printer("{} {}".format(task, "failed!"), color="bright red")
def build_jrpcclient(options):
failures = 0
full_lib_path = os.path.dirname(os.path.abspath(__file__))
print("Building Proxyfs RPC client library")
make_success = not(bool(subprocess.call((['make', 'clean']))))
failures += not make_success
make_success = not(bool(subprocess.call((['make', 'all']))))
failures += not make_success
if not options.no_install:
if 'Ubuntu' == platform.linux_distribution()[0]:
install_cmd = ['make', 'install']
if not options.deb_builder:
install_cmd.insert(0, 'sudo')
install_cmd.insert(1, '-E')
make_success = not(bool(subprocess.call(install_cmd)))
failures += not make_success
if 'CentOS Linux' == platform.linux_distribution()[0]:
install_cmd = ['make', 'installcentos']
if not options.deb_builder:
install_cmd.insert(0, 'sudo')
install_cmd.insert(1, '-E')
make_success = not(bool(subprocess.call(install_cmd)))
failures += not make_success
report("build_jrpcclient()", not failures)
return failures
def wait_for_proxyfsd(address, port, interval=0.5, max_iterations=60):
# We're importing requests here to allow build process to work without
# requests.
import requests
current_iteration = 0
is_proxyfs_up = False
while not is_proxyfs_up and current_iteration < max_iterations:
time.sleep(interval)
try:
r = requests.get('http://{}:{}'.format(address, port), timeout=3)
if r.status_code == 200:
is_proxyfs_up = True
except Exception:
pass
current_iteration += 1
return is_proxyfs_up
def test_jrpcclient():
private_ip_addr = "127.0.0.1"
ramswift_port = 4592 # arbitrary
jsonrpc_port = 12347 # 12347 instead of 12345 so that test can run if proxyfsd is already running
jsonrpc_fastport = 32347 # 32347 instead of 32345 so that test can run if proxyfsd is already running
http_port = 15347 # 15347 instead of 15346 so that test can run if proxyfsd is already running
color_printer = color_print if sys.stdout.isatty() else lambda *a, **kw: print(*a)
with self_cleaning_tempdir() as our_tempdir, open(os.devnull) as dev_null:
ramswift = subprocess.Popen(
[proxyfs_binary_path("ramswift"),
"saioramswift0.conf",
"Peer0.PrivateIPAddr={}".format(private_ip_addr),
"SwiftClient.NoAuthTCPPort={}".format(ramswift_port)],
stdout=dev_null, stderr=dev_null,
cwd=proxyfs_package_path("ramswift")
)
proxyfsd = subprocess.Popen(
[proxyfs_binary_path("proxyfsd"),
"saioproxyfsd0.conf",
"Logging.LogFilePath={}/{}".format(our_tempdir, "proxyfsd_jrpcclient.log"),
"Peer0.PrivateIPAddr={}".format(private_ip_addr),
"SwiftClient.NoAuthTCPPort={}".format(ramswift_port),
"JSONRPCServer.TCPPort={}".format(jsonrpc_port),
"JSONRPCServer.FastTCPPort={}".format(jsonrpc_fastport),
"HTTPServer.TCPPort={}".format(http_port)],
stdout=dev_null, stderr=dev_null,
cwd=proxyfs_package_path("proxyfsd")
)
# Make sure proxyfsd hasn't exited before we start the tests
proxyfsd.poll()
if proxyfsd.returncode:
color_printer("Before starting test, nonzero exit status returned from proxyfsd daemon: {}".format(proxyfsd.returncode), color="bright red")
report("jrpcclient tests", not proxyfsd.returncode)
# Print out proxyfsd's stdout since it exited unexpectedly
proxyfsd_logfile = "{}/{}".format(our_tempdir, "proxyfsd_jrpcclient.log")
logfile = open(proxyfsd_logfile, 'r')
print(logfile.read())
logfile.close()
# Clean up
ramswift.terminate()
return proxyfsd.returncode
rpc_config_string = "{}:{}/{}".format(private_ip_addr,
jsonrpc_port,
jsonrpc_fastport)
# wait a moment for proxyfsd to get set "Up()"
# wait_for_proxyfs(...) returns a boolean, but we'll let the rest of
# this script manage everything, just as it has been done until now and
# specifically manage the case where ProxyFS isn't up.
wait_for_proxyfsd(private_ip_addr, http_port)
jrpcclient_tests = subprocess.Popen(
[os.path.join(".", "test"),
"-r", rpc_config_string],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
cwd=os.getcwd()
)
# Put a time limit on the tests, in case they hang
def kill_proc(p):
color_printer("jrpcclient tests timed out!", color="bright red")
p.kill()
timeout_sec = 200
timer = Timer(timeout_sec, kill_proc, [jrpcclient_tests])
try:
timer.start()
if not options.verbose_jrpcclient:
# This line gets all jrpcclient stdout at once, waits till it's over
jrpcclient_test_stdout, _ = jrpcclient_tests.communicate()
# Emit test stdout only if there was a failure
if jrpcclient_tests.returncode:
print(jrpcclient_test_stdout)
else:
# I'm not confident in this code yet; deadlock may be possible.
# Get all jrpcclient stdout line by line.
# Doesn't continue until the test is done.
# (if thread is still running, it won't return)
while True:
line = jrpcclient_tests.stdout.readline()
print(line, end="")
if (line == '' and jrpcclient_tests.poll() != None):
break
finally:
timer.cancel()
proxyfsd.terminate()
time.sleep(0.5) # wait a moment for proxyfsd to get set "Down()"
ramswift.terminate()
report("jrpcclient tests", not jrpcclient_tests.returncode)
return jrpcclient_tests.returncode
def main(options):
failures = ""
#color_print(go_version[:-1], "bright green")
if not options.quiet:
logging.basicConfig(format="%(message)s", level=logging.INFO)
failures = build_jrpcclient(options)
if not options.just_build_libs and not options.deb_builder:
failures += test_jrpcclient()
return failures
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description=__doc__)
arg_parser.add_argument('--cover', '-cover',
action='store_const', const='-cover',
help="include coverage statistics in test output")
libs_group = arg_parser.add_mutually_exclusive_group()
libs_group.add_argument('--just-build-libs', action='store_true',
help="only build C libraries")
arg_parser.add_argument('--verbose-jrpcclient', action='store_true',
help="EXPERIMENTAL, DO NOT USE! "
"emit jrpcclient test stdout even if no failures")
arg_parser.add_argument('--no-install', action='store_true',
help="When building C libraries, do not attempt "
"to install resulting objects")
arg_parser.add_argument('--deb-builder', action='store_true',
help="Modify commands to run inside "
"swift-deb-builder")
arg_parser.add_argument('--quiet', '-q', action='store_true',
help="suppress printing of what commands are being run")
options = arg_parser.parse_args()
exit(main(options))
| proxyfs-jrpc-client-master | regression_test.py |
import sys, struct
headersize = 16
sizeof_u32 = 4
ucode = open(sys.argv[1]).read()[headersize:]
assert struct.calcsize("I") == sizeof_u32
fmt = "I" * (len(ucode) / sizeof_u32)
ints = struct.unpack(fmt, ucode)
print len(ucode), "bytes"
print "sig =", hex(sum(ints) & 0xffffffff)
| nvgpu-master | scripts/nvgpu_ucode/ucodesignature.py |
#! /usr/bin/env python
"""
Snapshot a project into another project and perform the necessary repo actions
to provide a commit message that can be used to trace back to the exact point
in the source repository.
"""
#todo:
# Support svn
# Allow renaming of the source dir in the destination path
# Check if a new snapshot is necessary?
#
import sys
#check the version number so that there is a good error message when argparse is not available.
#This checks for exactly 2.7 which is bad, but it is a python 2 script and argparse was introduced
#in 2.7 which is also the last version of python 2. If this script is updated for python 3 this
#will need to change, but for now it is not safe to allow 3.x to run this.
if sys.version_info[:2] != (2, 7):
print "Error snapshot requires python 2.7 detected version is %d.%d." % (sys.version_info[0], sys.version_info[1])
sys.exit(1)
import subprocess, argparse, re, doctest, os, datetime, traceback
def parse_cmdline(description):
parser = argparse.ArgumentParser(usage="snapshot.py [options] source destination", description=description)
parser.add_argument("-n", "--no-comit", action="store_false", dest="create_commit", default=True,
help="Do not perform a commit or create a commit message.")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose_mode", default=False,
help="Enable verbose mode.")
parser.add_argument("-d", "--debug", action="store_true", dest="debug_mode", default=False,
help="Enable debugging output.")
parser.add_argument("--no-validate-repo", action="store_true", dest="no_validate_repo", default=False,
help="Reduce the validation that the source and destination repos are clean to a warning.")
parser.add_argument("--source-repo", choices=["git","none"], default="",
help="Type of repository of the source, use none to skip all repository operations.")
parser.add_argument("--dest-repo", choices=["git","none"], default="",
help="Type of repository of the destination, use none to skip all repository operations.")
parser.add_argument("source", help="Source project to snapshot from.")
parser.add_argument("destination", help="Destination to snapshot too.")
options = parser.parse_args()
options = validate_options(options)
return options
#end parseCmdline
def validate_options(options):
apparent_source_repo_type="none"
apparent_dest_repo_type="none"
#prevent user from accidentally giving us a path that rsync will treat differently than expected.
options.source = options.source.rstrip(os.sep)
options.destination = options.destination.rstrip(os.sep)
options.source = os.path.abspath(options.source)
options.destination = os.path.abspath(options.destination)
if os.path.exists(options.source):
apparent_source_repo_type, source_root = deterimine_repo_type(options.source)
else:
raise RuntimeError("Could not find source directory of %s." % options.source)
options.source_root = source_root
if not os.path.exists(options.destination):
print "Could not find destination directory of %s so it will be created." % options.destination
os.makedirs(options.destination)
apparent_dest_repo_type, dest_root = deterimine_repo_type(options.destination)
options.dest_root = dest_root
#error on svn repo types for now
if apparent_source_repo_type == "svn" or apparent_dest_repo_type == "svn":
raise RuntimeError("SVN repositories are not supported at this time.")
if options.source_repo == "":
#source repo type is not specified to just using the apparent type.
options.source_repo = apparent_source_repo_type
else:
if options.source_repo != "none" and options.source_repo != apparent_source_repo_type:
raise RuntimeError("Specified source repository type of %s conflicts with determined type of %s" % \
(options.source_repo, apparent_source_repo_type))
if options.dest_repo == "":
#destination repo type is not specified to just using the apparent type.
options.dest_repo = apparent_dest_repo_type
else:
if options.dest_repo != "none" and options.dest_repo != apparent_dest_repo_type:
raise RuntimeError("Specified destination repository type of %s conflicts with determined type of %s" % \
(options.dest_repo, apparent_dest_repo_type))
return options
#end validate_options
def run_cmd(cmd, options, working_dir="."):
cmd_str = " ".join(cmd)
if options.verbose_mode:
print "Running command '%s' in dir %s." % (cmd_str, working_dir)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=working_dir)
proc_stdout, proc_stderr = proc.communicate()
ret_val = proc.wait()
if options.debug_mode:
print "==== %s stdout start ====" % cmd_str
print proc_stdout
print "==== %s stdout end ====" % cmd_str
print "==== %s stderr ====" % cmd_str
print proc_stderr
print "==== %s stderr ====" % cmd_str
if ret_val != 0:
raise RuntimeError("Command '%s' failed with error code %d. Error message:%s%s%sstdout:%s" % \
(cmd_str, ret_val, os.linesep, proc_stderr, os.linesep, proc_stdout))
return proc_stdout, proc_stderr
#end run_cmd
def deterimine_repo_type(location):
apparent_repo_type = "none"
while location != "":
if os.path.exists(os.path.join(location, ".git")):
apparent_repo_type = "git"
break
elif os.path.exists(os.path.join(location, ".svn")):
apparent_repo_type = "svn"
break
else:
location = location[:location.rfind(os.sep)]
return apparent_repo_type, location
#end deterimine_repo_type
def rsync(source, dest, options):
rsync_cmd = ["rsync", "-ar", "--delete"]
if options.debug_mode:
rsync_cmd.append("-v")
if options.source_repo == "git":
rsync_cmd.append("--exclude=.git")
rsync_cmd.append(options.source)
rsync_cmd.append(options.destination)
run_cmd(rsync_cmd, options)
#end rsync
def create_commit_message(commit_id, commit_log, project_name, project_location):
eol = os.linesep
message = "Snapshot of %s from commit %s" % (project_name, commit_id)
message += eol * 2
message += "From repository at %s" % project_location
message += eol * 2
message += "At commit:" + eol
message += commit_log
return message
#end create_commit_message
def find_git_commit_information(options):
r"""
>>> class fake_options:
... source="."
... verbose_mode=False
... debug_mode=False
>>> myoptions = fake_options()
>>> find_git_commit_information(myoptions)[2:]
('sems', 'software.sandia.gov:/git/sems')
"""
git_log_cmd = ["git", "log", "-1"]
output, error = run_cmd(git_log_cmd, options, options.source)
commit_match = re.match("commit ([0-9a-fA-F]+)", output)
commit_id = commit_match.group(1)
commit_log = output
git_remote_cmd = ["git", "remote", "-v"]
output, error = run_cmd(git_remote_cmd, options, options.source)
remote_match = re.search("origin\s([^ ]*/([^ ]+))", output, re.MULTILINE)
if not remote_match:
raise RuntimeError("Could not find origin of repo at %s. Consider using none for source repo type." % (options.source))
source_location = remote_match.group(1)
source_name = remote_match.group(2).strip()
if source_name[-1] == "/":
source_name = source_name[:-1]
return commit_id, commit_log, source_name, source_location
#end find_git_commit_information
def do_git_commit(message, options):
if options.verbose_mode:
print "Commiting to destination repository."
git_add_cmd = ["git", "add", "-A"]
run_cmd(git_add_cmd, options, options.destination)
git_commit_cmd = ["git", "commit", "-m%s" % message]
run_cmd(git_commit_cmd, options, options.destination)
git_log_cmd = ["git", "log", "--format=%h", "-1"]
commit_sha1, error = run_cmd(git_log_cmd, options, options.destination)
print "Commit %s was made to %s." % (commit_sha1.strip(), options.dest_root)
#end do_git_commit
def verify_git_repo_clean(location, options):
git_status_cmd = ["git", "status", "--porcelain"]
output, error = run_cmd(git_status_cmd, options, location)
if output != "":
if options.no_validate_repo == False:
raise RuntimeError("%s is not clean.%sPlease commit or stash all changes before running snapshot."
% (location, os.linesep))
else:
print "WARNING: %s is not clean. Proceeding anyway." % location
print "WARNING: This could lead to differences in the source and destination."
print "WARNING: It could also lead to extra files being included in the snapshot commit."
#end verify_git_repo_clean
def main(options):
if options.verbose_mode:
print "Snapshotting %s to %s." % (options.source, options.destination)
if options.source_repo == "git":
verify_git_repo_clean(options.source, options)
commit_id, commit_log, repo_name, repo_location = find_git_commit_information(options)
elif options.source_repo == "none":
commit_id = "N/A"
commit_log = "Unknown commit from %s snapshotted at: %s" % (options.source, datetime.datetime.now())
repo_name = options.source
repo_location = options.source
commit_message = create_commit_message(commit_id, commit_log, repo_name, repo_location) + os.linesep*2
if options.dest_repo == "git":
verify_git_repo_clean(options.destination, options)
rsync(options.source, options.destination, options)
if options.dest_repo == "git":
do_git_commit(commit_message, options)
elif options.dest_repo == "none":
file_name = "snapshot_message.txt"
message_file = open(file_name, "w")
message_file.write(commit_message)
message_file.close()
cwd = os.getcwd()
print "No commit done by request. Please use file at:"
print "%s%sif you wish to commit this to a repo later." % (cwd+"/"+file_name, os.linesep)
#end main
if (__name__ == "__main__"):
if ("--test" in sys.argv):
doctest.testmod()
sys.exit(0)
try:
options = parse_cmdline(__doc__)
main(options)
except RuntimeError, e:
print "Error occured:", e
if "--debug" in sys.argv:
traceback.print_exc()
sys.exit(1)
else:
sys.exit(0)
| kokkos-master | config/snapshot.py |
import ipyleaflet as ipl
class BaseMap:
def __init__(self):
self._m = ipl.Map(layers=[], crs=ipl.projections.Simple)
| energy-sdk-l2rpn-master | nvgridui/nvapp/mapcomponent.py |
energy-sdk-l2rpn-master | nvgridui/nvapp/__init__.py |
|
import os, sys
import importlib
# RELOAD_PATHS = ["/home/pavel/work/repos/L2RPN/individual/pavel/topo-sim/"]
def should_reload(m, RELOAD_PATHS):
if m is None:
return False
try:
for rp in RELOAD_PATHS:
if "__file__" in dir(m) and rp in m.__file__:
return True
except:
pass
return False
def reload(mod, RELOAD_PATHS):
# prepare list of modules that should be reloaded (by removing them)
dellst = []
for mname, m in sys.modules.items():
if should_reload(m, RELOAD_PATHS): # and mod != m:
dellst += [mname]
for mname in dellst:
if mname in sys.modules:
del sys.modules[mname]
# now reload
# importlib.reload(mod)
mod = importlib.import_module(modname)
return mod
def load_template(filename):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
def reload_module(modname: str):
if modname not in sys.modules:
if "." in modname:
a, b = modname.split(".")
importlib.import_module(a)
mod = importlib.import_module(modname)
else:
mod = sys.modules[modname]
RELOAD_PATHS = [os.path.dirname(mod.__file__) + "/"]
try:
mod = reload(mod, RELOAD_PATHS)
except:
mod = importlib.import_module(modname)
return mod
| energy-sdk-l2rpn-master | nvgridui/nvapp/common.py |
import json
import ipyvuetify as v
from .appLoader import AppLoader
from .common import load_template
# update the CSS a bit
# cdnstr = """
# <script>
# var html2canvas=require(["/nvgrid/assets/html2canvas.js"], function (h2c){
# html2canvas = h2c;
# });
# h2c=require(["https://github.com/niklasvh/html2canvas/releases/download/v1.3.2/html2canvas.js"], function (a) { h2c=a;})
# </script>
# """
# get_ipython().run_cell_magic("HTML", "", cdnstr)
get_ipython().run_cell_magic(
"HTML",
"",
"<style>\n.jp-Cell {\n margin:unset;\n padding: unset;\n}\n.jp-Cell:not(.jp-mod-noOutputs) .jp-Cell-outputWrapper{\n margin:unset;\n}\n.jp-Notebook {\n margin:unset;\n padding: unset;\n}\n.p-Widget {\n width: 100%;\n}\n</style>",
)
# get_ipython().run_cell_magic("matplotlib", "", "widget")
# load the app configuration
with open("app.json") as f:
j = json.load(f)
theapp = AppLoader(j)
# with theapp.app_output:
display(theapp)
| energy-sdk-l2rpn-master | nvgridui/nvapp/app.py |
Subsets and Splits