code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def evaluate_and_print_results(prefix, data_iterator, model,
args, timers, forward_step_func, verbose=False, step=None, summary_writer=None):
"""Helper function to evaluate and dump results on screen."""
lm_loss, gpt_loss, bert_loss, sent_loss, multi_loss = evaluate(data_iterator, model, args, timers, verbose=verbose,
forward_step_func=forward_step_func)
lm_ppl = math.exp(min(20, lm_loss))
report_evaluate_metrics(summary_writer, prefix, lm_loss, lm_ppl, gpt_loss, bert_loss, sent_loss, multi_loss, step)
return lm_loss
|
Helper function to evaluate and dump results on screen.
|
evaluate_and_print_results
|
python
|
THUDM/GLM
|
pretrain_glm.py
|
https://github.com/THUDM/GLM/blob/master/pretrain_glm.py
|
MIT
|
def get_train_val_test_data(args, tokenizer):
"""Load the data on rank zero and boradcast number of tokens to all GPUS."""
(train_data, val_data, test_data) = (None, None, None)
# Data loader only on rank 0 of each model parallel group.
if mpu.get_model_parallel_rank() == 0:
data_config = configure_data()
if args.block_lm:
data_set_type = "Block"
elif args.transformer_xl:
data_set_type = "GPT-XL"
else:
data_set_type = "GPT2"
data_config.set_defaults(data_set_type=data_set_type, transpose=False)
train_data, val_data, test_data = data_config.apply(args, tokenizer)
data_counts = torch.cuda.LongTensor([int(args.do_train), int(args.do_valid), int(args.do_test)])
else:
data_counts = torch.cuda.LongTensor([0, 0, 0])
# Broadcast num tokens.
torch.distributed.broadcast(data_counts,
mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
args.do_train = data_counts[0].item()
args.do_valid = data_counts[1].item()
args.do_test = data_counts[2].item()
return train_data, val_data, test_data
|
Load the data on rank zero and boradcast number of tokens to all GPUS.
|
get_train_val_test_data
|
python
|
THUDM/GLM
|
pretrain_glm.py
|
https://github.com/THUDM/GLM/blob/master/pretrain_glm.py
|
MIT
|
def print_params_min_max_norm(optimizer, iteration):
"""Print min, max, and norm of all parameters."""
index = 0
rank = torch.distributed.get_rank()
string = 'iteration, rank, index, model-parallel,min, max, norm\n'
optimizer_ = optimizer
if isinstance(optimizer, FP16_Optimizer):
optimizer_ = optimizer.optimizer
for param_group in optimizer_.param_groups:
for param in param_group['params']:
index += 1
min_ = param.data.min()
max_ = param.data.max()
norm = param.data.norm()
string += '{:7d}, {:4d}, {:4d}, {:2d}, '.format(
iteration, rank, index, int(param.model_parallel))
string += '{:.6E}, {:.6E}, {:.6E}\n'.format(min_, max_, norm)
print(string, flush=True)
|
Print min, max, and norm of all parameters.
|
print_params_min_max_norm
|
python
|
THUDM/GLM
|
utils.py
|
https://github.com/THUDM/GLM/blob/master/utils.py
|
MIT
|
def load_weights(src, dst, dst2src=False):
"""
Loads weights from src to dst via in place copy.
src is a huggingface gpt2model, while dst is one of our models.
dst2src=True loads parameters from our models into huggingface's.
^dst2src is still untested
"""
conv_layer = 'Conv1D' in str(type(src))
for n, p in src.named_parameters():
if dst2src:
data = dst._parameters[n].data
load = p.data
else:
data = p.data
load = dst._parameters[n].data
if conv_layer and 'weight' in n:
data = data.t().contiguous()
load.copy_(data)
|
Loads weights from src to dst via in place copy.
src is a huggingface gpt2model, while dst is one of our models.
dst2src=True loads parameters from our models into huggingface's.
^dst2src is still untested
|
load_weights
|
python
|
THUDM/GLM
|
utils.py
|
https://github.com/THUDM/GLM/blob/master/utils.py
|
MIT
|
def move_weights(our, oai, dst2src=False):
"""
Loads weights from `oai` to `our` via in place copy.
`oai` is a huggingface gpt2model, while `our` is one of our models.
dst2src=True loads parameters from our models into huggingface's.
^dst2src=True is still untested
"""
# while isinstance(our, (torchDDP, model.distributed.DistributedDataParallel, FP16_Module)):
# our=our.module
transformer_model = oai.transformer
load_weights(transformer_model.ln_f, our.transformer.final_layernorm, dst2src)
load_weights(transformer_model.wte, our.word_embeddings, dst2src)
load_weights(transformer_model.wpe, our.position_embeddings, dst2src)
for our_layer, oai_layer in zip(our.transformer.layers, oai.transformer.h):
load_transformer_layer(our_layer, oai_layer, dst2src)
|
Loads weights from `oai` to `our` via in place copy.
`oai` is a huggingface gpt2model, while `our` is one of our models.
dst2src=True loads parameters from our models into huggingface's.
^dst2src=True is still untested
|
move_weights
|
python
|
THUDM/GLM
|
utils.py
|
https://github.com/THUDM/GLM/blob/master/utils.py
|
MIT
|
def split_ds(ds, split=None, shuffle=True, save_splits=None, load_splits=None):
"""
Split a dataset into subsets given proportions of how
much to allocate per split. If a split is 0% returns None for that split.
Purpose: Useful for creating train/val/test splits
Arguments:
ds (Dataset or array-like): Data to be split.
split (1D array-like): proportions to split `ds`. `sum(splits) != 0`
shuffle (boolean): Randomly split dataset. Default: True
save_splits: save split indices to file
load_splits: load split indices from file
"""
if split is None:
split = [.8, .2, .0]
split_sum = sum(split)
if split_sum == 0:
raise Exception('Split cannot sum to 0.')
split = np.array(split)
split /= split_sum
ds_len = len(ds)
inds = np.arange(ds_len)
if shuffle:
rng = np.random.RandomState(1234)
rng.shuffle(inds)
if load_splits is not None:
inds = np.load(load_splits)
assert len(inds) == ds_len
print_rank_0(f"Load split indices from {load_splits}")
elif save_splits is not None:
if torch.distributed.get_rank() == 0:
np.save(save_splits, inds)
print(f"Save split indices to {save_splits}")
start_idx = 0
residual_idx = 0
rtn_ds = [None] * len(split)
for i, f in enumerate(split):
if f != 0:
proportion = ds_len * split[i]
residual_idx += proportion % 1
split_ = int(int(proportion) + residual_idx)
split_inds = inds[start_idx:start_idx + max(split_, 1)]
rtn_ds[i] = SplitDataset(ds, split_inds)
start_idx += split_
residual_idx %= 1
return rtn_ds
|
Split a dataset into subsets given proportions of how
much to allocate per split. If a split is 0% returns None for that split.
Purpose: Useful for creating train/val/test splits
Arguments:
ds (Dataset or array-like): Data to be split.
split (1D array-like): proportions to split `ds`. `sum(splits) != 0`
shuffle (boolean): Randomly split dataset. Default: True
save_splits: save split indices to file
load_splits: load split indices from file
|
split_ds
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def __getitem__(self, index):
"""process+tokenize string and return string,label,and stringlen"""
x = self.X[index]
if self.tokenizer is not None:
x = self.tokenizer.EncodeAsIds(x, self.preprocess_fn)
elif self.preprocess_fn is not None:
x = self.preprocess_fn(x)
y = self.Y[index]
if isinstance(y, str):
if self.tokenizer is not None:
y = self.tokenizer.EncodeAsIds(y, self.preprocess_fn)
elif self.preprocess_fn is not None:
y = self.preprocess_fn(y)
return {'text': x, 'length': len(x), 'label': y}
|
process+tokenize string and return string,label,and stringlen
|
__getitem__
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def write(self, writer_gen=None, path=None, skip_header=False):
"""
given a generator of metrics for each of the data points X_i,
write the metrics, text, and labels to a csv file
"""
if path is None:
path = self.path + '.results'
print('generating csv at ' + path)
with open(path, 'w') as csvfile:
c = csv.writer(csvfile, delimiter=self.delim)
if writer_gen is not None:
# if first item of generator is a header of what the metrics mean then write header to csv file
if not skip_header:
header = (self.label_key,) + tuple(next(writer_gen)) + (self.text_key,)
c.writerow(header)
for i, row in enumerate(writer_gen):
row = (self.Y[i],) + tuple(row) + (self.X[i],)
c.writerow(row)
else:
c.writerow([self.label_key, self.text_key])
for row in zip(self.Y, self.X):
c.writerow(row)
|
given a generator of metrics for each of the data points X_i,
write the metrics, text, and labels to a csv file
|
write
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def __getitem__(self, index):
"""gets the index'th string from the dataset"""
x = self.X[index]
if self.tokenizer is not None:
x = self.tokenizer.EncodeAsIds(x, self.preprocess_fn)
elif self.preprocess_fn is not None:
x = self.preprocess_fn(x)
y = self.Y[index]
if isinstance(y, str):
if self.tokenizer is not None:
y = self.tokenizer.EncodeAsIds(y, self.preprocess_fn)
elif self.preprocess_fn is not None:
y = self.preprocess_fn(y)
return {'text': x, 'length': len(x), 'label': y}
|
gets the index'th string from the dataset
|
__getitem__
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def write(self, writer_gen=None, path=None, skip_header=False):
"""
given a generator of metrics for each of the data points X_i,
write the metrics, text, and labels to a json file
"""
if path is None:
path = self.path + '.results'
jsons = []
if writer_gen is not None:
# if first item of generator is a header of what the metrics mean then write header to csv file
def gen_helper():
keys = {}
keys[0] = self.label_key
if not skip_header:
for idx, k in enumerate(tuple(next(writer_gen))):
keys[idx + 1] = k
for i, row in enumerate(writer_gen):
if i == 0 and skip_header:
for idx, _ in enumerate(row):
keys[idx + 1] = 'metric_%d' % (idx,)
j = {}
for idx, v in enumerate((self.Y[i],) + tuple(row)):
k = keys[idx]
j[k] = v
yield j
else:
def gen_helper():
for y in self.Y:
j = {}
j[self.label_key] = y
yield j
def out_stream():
for i, j in enumerate(gen_helper()):
j[self.text_key] = self.X[i]
yield j
self.save_json_stream(path, out_stream())
|
given a generator of metrics for each of the data points X_i,
write the metrics, text, and labels to a json file
|
write
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def __init__(self, ds, tokenizer,
max_seq_len=1024,
sample_across_doc=True,
non_sentence_start=0.0, filter_english=False, **kwargs):
"""
sentence_start: the stripped article must start with a complete sentence
"""
self.ds = ds
self.ds_len = len(self.ds)
self.num_samples = 1000 * self.ds_len
self.max_seq_len = max_seq_len
self.tokenizer = tokenizer
self.sample_across_doc = sample_across_doc
self.non_sentence_start = non_sentence_start
self.filter_english = filter_english
self.weighting, self.total_len = None, None
self.is_lazy = False
if self.filter_english:
import fasttext
self.model = fasttext.load_model('/mnt/lid.176.bin')
print_rank_0("Load language detection model")
if hasattr(self.ds, 'is_lazy') and self.ds.is_lazy:
self.is_lazy = True
self.init_weighting()
|
sentence_start: the stripped article must start with a complete sentence
|
__init__
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def __init__(self, ds, tokenizer,
max_seq_len=1024,
num_samples=None,
weighted=True,
sample_across_doc=True,
random_across_doc_sampling=True,
sentence_start=False, **kwargs):
"""
sentence_start: the stripped article must start with a complete sentence
"""
self.ds = ds
self.ds_len = len(self.ds)
self.num_samples = num_samples
if num_samples is None:
self.num_samples = 1000 * self.ds_len
self.max_seq_len = max_seq_len
self.tokenizer = tokenizer
self.weighted = weighted
self.sample_across_doc = sample_across_doc
self.random_across_doc_sampling = random_across_doc_sampling
self.sentence_start = sentence_start
self.weighting, self.total_len = None, None
self.is_lazy = False
if hasattr(self.ds, 'is_lazy') and self.ds.is_lazy:
self.is_lazy = True
self.init_weighting()
|
sentence_start: the stripped article must start with a complete sentence
|
__init__
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def sentence_tokenize(self, sent, sentence_num=0, beginning=False, ending=False):
"""tokenize sentence and get token types"""
tokens = self.tokenizer.EncodeAsIds(sent).tokenization
str_type = 'str' + str(sentence_num)
token_types = [self.tokenizer.get_type(str_type).Id] * len(tokens)
return tokens, token_types
|
tokenize sentence and get token types
|
sentence_tokenize
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def get_doc(self, idx):
"""gets text of document corresponding to idx"""
rtn = self.ds[idx]
if isinstance(rtn, dict):
rtn = rtn['text']
return rtn
|
gets text of document corresponding to idx
|
get_doc
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def create_random_sentencepair(self, target_seq_length, rng, np_rng):
"""
fetches a random sentencepair corresponding to rng state similar to
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L248-L294
"""
is_random_next = None
curr_strs = []
curr_str_types = []
curr_len = 0
while curr_len < 1:
curr_len = 0
doc_a = None
while doc_a is None:
if self.weighted:
# doc_a_idx = np_rng.choice(self.ds_len, p=self.weighting)
doc_a_idx = self.get_weighted_samples(np_rng)
else:
doc_a_idx = rng.randint(0, self.ds_len - 1)
doc_a = self.sentence_split(self.get_doc(doc_a_idx))
if not doc_a:
doc_a = None
random_start_a = rng.randint(0, len(doc_a) - 1)
while random_start_a < len(doc_a):
sentence = doc_a[random_start_a]
sentence, sentence_types = self.sentence_tokenize(sentence, 0, random_start_a == 0,
random_start_a == len(doc_a))
curr_strs.append(sentence)
curr_str_types.append(sentence_types)
curr_len += len(sentence)
if random_start_a == len(doc_a) - 1 or curr_len >= target_seq_length:
break
random_start_a = (random_start_a + 1)
if curr_strs:
num_a = 1
if len(curr_strs) >= 2:
num_a = rng.randint(0, len(curr_strs))
tokens_a = []
token_types_a = []
for j in range(num_a):
tokens_a.extend(curr_strs[j])
token_types_a.extend(curr_str_types[j])
tokens_b = []
token_types_b = []
is_random_next = False
if len(curr_strs) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
b_len = 0
while b_len < 1:
doc_b = None
while doc_b is None:
doc_b_idx = rng.randint(0, self.ds_len - 2)
doc_b_idx += int(doc_b_idx >= doc_a_idx)
doc_b = self.sentence_split(self.get_doc(doc_b_idx))
if not doc_b:
doc_b = None
random_start_b = rng.randint(0, len(doc_b) - 1)
while random_start_b < len(doc_b):
sentence_b = doc_b[random_start_b]
new_b_tokens, new_b_types = self.sentence_tokenize(sentence_b, 1, random_start_b == 0,
random_start_b == len(doc_b))
b_len += len(new_b_tokens)
tokens_b.extend(new_b_tokens)
token_types_b.extend(new_b_types)
if len(tokens_b) >= target_b_length:
break
random_start_b = (random_start_b + 1)
else:
is_random_next = False
for j in range(num_a, len(curr_strs)):
tokens_b.extend(curr_strs[j])
token_types_b.extend(curr_str_types[j])
return (tokens_a, token_types_a), (tokens_b, token_types_b), is_random_next
|
fetches a random sentencepair corresponding to rng state similar to
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L248-L294
|
create_random_sentencepair
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def truncate_seq_pair(self, a, b, max_seq_len, rng):
"""
Truncate sequence pair according to original BERT implementation:
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L391
"""
tokens_a, token_types_a = a
tokens_b, token_types_b = b
max_num_tokens = max_seq_len - 3
while True:
len_a = len(tokens_a)
len_b = len(tokens_b)
total_length = len_a + len_b
if total_length <= max_num_tokens:
break
if len(tokens_a) > len(tokens_b):
trunc_tokens = tokens_a
trunc_types = token_types_a
else:
trunc_tokens = tokens_b
trunc_types = token_types_b
assert len(trunc_tokens) >= 1
if rng.random() < 0.5:
trunc_tokens.pop(0)
trunc_types.pop(0)
else:
trunc_tokens.pop()
trunc_types.pop()
return (tokens_a, token_types_a), (tokens_b, token_types_b)
|
Truncate sequence pair according to original BERT implementation:
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L391
|
truncate_seq_pair
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def mask_token(self, idx, tokens, types, vocab_words, rng):
"""
helper function to mask `idx` token from `tokens` according to
section 3.3.1 of https://arxiv.org/pdf/1810.04805.pdf
"""
label = tokens[idx]
if rng.random() < 0.8:
new_label = self.tokenizer.get_command('MASK').Id
else:
if rng.random() < 0.5:
new_label = label
else:
new_label = rng.choice(vocab_words)
tokens[idx] = new_label
return label
|
helper function to mask `idx` token from `tokens` according to
section 3.3.1 of https://arxiv.org/pdf/1810.04805.pdf
|
mask_token
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def pad_seq(self, seq):
"""helper function to pad sequence pair"""
num_pad = max(0, self.max_seq_len - len(seq))
pad_mask = [0] * len(seq) + [1] * num_pad
seq += [self.tokenizer.get_command('pad').Id] * num_pad
return seq, pad_mask
|
helper function to pad sequence pair
|
pad_seq
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def create_masked_lm_predictions(self, a, b, mask_lm_prob, max_preds_per_seq, vocab_words, rng):
"""
Mask sequence pair for BERT training according to:
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L338
"""
tokens_a, token_types_a = a
tokens_b, token_types_b = b
tokens = [self.tokenizer.get_command('ENC').Id] + tokens_a + [
self.tokenizer.get_command('sep').Id] + tokens_b + [self.tokenizer.get_command('sep').Id]
token_types = [token_types_a[0]] + token_types_a + [token_types_a[0]] + token_types_b + [token_types_b[0]]
len_a = len(tokens_a)
len_b = len(tokens_b)
cand_indices = [idx + 1 for idx in range(len_a)] + [idx + 2 + len_a for idx in range(len_b)]
rng.shuffle(cand_indices)
output_tokens, pad_mask = self.pad_seq(list(tokens))
output_types, _ = self.pad_seq(list(token_types))
num_to_predict = min(max_preds_per_seq, max(1, int(round(len(tokens) * mask_lm_prob))))
mask = [0] * len(output_tokens)
mask_labels = [-1] * len(output_tokens)
for idx in sorted(cand_indices[:num_to_predict]):
mask[idx] = 1
label = self.mask_token(idx, output_tokens, output_types, vocab_words, rng)
mask_labels[idx] = label
return (output_tokens, output_types), mask, mask_labels, pad_mask
|
Mask sequence pair for BERT training according to:
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L338
|
create_masked_lm_predictions
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
|
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
|
filename_to_url
|
python
|
THUDM/GLM
|
data_utils/file_utils.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/file_utils.py
|
MIT
|
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
|
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
|
cached_path
|
python
|
THUDM/GLM
|
data_utils/file_utils.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/file_utils.py
|
MIT
|
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
|
Split a full s3 path into the bucket name and path.
|
split_s3_path
|
python
|
THUDM/GLM
|
data_utils/file_utils.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/file_utils.py
|
MIT
|
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
|
Wrapper function for s3 requests in order to create more helpful error
messages.
|
s3_request
|
python
|
THUDM/GLM
|
data_utils/file_utils.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/file_utils.py
|
MIT
|
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
|
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
|
get_from_cache
|
python
|
THUDM/GLM
|
data_utils/file_utils.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/file_utils.py
|
MIT
|
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
|
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
|
read_set_from_file
|
python
|
THUDM/GLM
|
data_utils/file_utils.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/file_utils.py
|
MIT
|
def exists_lazy(path, data_type='data'):
"""
Check if we've already made a lazy version of this file for the `data_type` field.
"""
if not os.path.exists(get_lazy_path(path)):
return False
contents = os.listdir(get_lazy_path(path))
if data_type not in contents:
return False
if data_type + '.len.pkl' not in contents:
return False
return True
|
Check if we've already made a lazy version of this file for the `data_type` field.
|
exists_lazy
|
python
|
THUDM/GLM
|
data_utils/lazy_loader.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/lazy_loader.py
|
MIT
|
def SetTokenizer(self, tokenizer):
"""
logic to set and remove (set to None) tokenizer.
combines preprocessing/tokenization into one callable.
"""
if tokenizer is None:
if not hasattr(self, '_tokenizer'):
self._tokenizer = tokenizer
else:
self._tokenizer = tokenizer
self.map_fn = ProcessorTokenizer(tokenizer, self.process_fn)
|
logic to set and remove (set to None) tokenizer.
combines preprocessing/tokenization into one callable.
|
SetTokenizer
|
python
|
THUDM/GLM
|
data_utils/lazy_loader.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/lazy_loader.py
|
MIT
|
def __getitem__(self, index):
"""
read file and splice strings based on string ending array `self.ends`
"""
if not isinstance(index, slice):
if index == 0:
start = 0
else:
start = self.ends[index - 1]
end = self.ends[index]
rtn = self.file_read(start, end)
if self.map_fn is not None:
rtn = self.map_fn(rtn)
else:
# if slice, fetch strings with 1 diskread and then splice in memory
chr_lens = self.ends[index]
if index.start == 0 or index.start is None:
start = 0
else:
start = self.ends[index.start - 1]
stop = chr_lens[-1]
strings = self.file_read(start, stop)
rtn = split_strings(strings, start, chr_lens)
if self.map_fn is not None:
rtn = [self.map_fn(s) for s in rtn]
return rtn
|
read file and splice strings based on string ending array `self.ends`
|
__getitem__
|
python
|
THUDM/GLM
|
data_utils/lazy_loader.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/lazy_loader.py
|
MIT
|
def _batch(self, batch):
"""extracts samples only pertaining to this worker's batch"""
start = self.rank*self.batch_size//self.world_size
end = (self.rank+1)*self.batch_size//self.world_size
return batch[start:end]
|
extracts samples only pertaining to this worker's batch
|
_batch
|
python
|
THUDM/GLM
|
data_utils/samplers.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/samplers.py
|
MIT
|
def data_iterator(self, _iter, wrap_around=False):
"""iterates through data and handles wrap around"""
for i, idx in enumerate(_iter):
if i < self.wrap_around%self.batch_size:
continue
if wrap_around:
self.wrap_around += 1
self.wrap_around %= self.batch_size
yield idx
|
iterates through data and handles wrap around
|
data_iterator
|
python
|
THUDM/GLM
|
data_utils/samplers.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/samplers.py
|
MIT
|
def _batch(self, batch):
"""extracts samples only pertaining to this worker's batch"""
start = self.rank*self.batch_size//self.world_size
end = (self.rank+1)*self.batch_size//self.world_size
return batch[start:end]
|
extracts samples only pertaining to this worker's batch
|
_batch
|
python
|
THUDM/GLM
|
data_utils/samplers.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/samplers.py
|
MIT
|
def make_tokenizer(tokenizer_type, corpus, model_path=None, vocab_size=None, model_type=None, pad_token=0,
character_coverage=1.0, command_tokens=None, type_tokens=None, fix_command_token=False, **kwargs):
"""
Helper function to instantiate a tokenizer given common combinations of options.
"""
tokenizer_class = tokenizer_type
if isinstance(tokenizer_class, str):
tokenizer_class = eval(tokenizer_class)
if tokenizer_class is BertWordPieceTokenizer:
return BertWordPieceTokenizer(model_type, **kwargs)
elif tokenizer_class is GPT2BPETokenizer:
if model_type is None:
model_type = 'gpt2'
return GPT2BPETokenizer(model_type, **kwargs)
elif tokenizer_class is ChineseSPTokenizer:
return ChineseSPTokenizer(fix_command_token=fix_command_token, **kwargs)
text_tokenizer = tokenizer_class(corpus=corpus, vocab_size=vocab_size, model_path=model_path, model_type=model_type,
pad_token=pad_token, character_coverage=character_coverage)
return Tokenizer(text_tokenizer, command_tokens, type_tokens)
|
Helper function to instantiate a tokenizer given common combinations of options.
|
make_tokenizer
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def EncodeAsIds(self, text, process_fn=None):
"""
encode text using text tokenizer and shift Id values for command tokens
"""
processed_text = text
if process_fn is not None:
processed_text = process_fn(processed_text)
def split_on_token(tok_extended: CommandToken, text):
result = []
tok = tok_extended.token
split_text = text.split(tok)
for i, sub_text in enumerate(split_text):
# CommandToken can control whitespace stripping around them.
# We use them for GPT2 and Roberta to have different behavior depending on the special token
# Cf. https://github.com/huggingface/transformers/pull/2778
# and https://github.com/huggingface/transformers/issues/3788
# Strip white spaces on the right
if tok_extended.rstrip and i > 0:
# A bit counter-intuitive but we strip the left of the string
# since tok_extended.rstrip means the special token is eating all white spaces on its right
sub_text = sub_text.lstrip()
# Strip white spaces on the left
if tok_extended.lstrip and i < len(split_text) - 1:
sub_text = sub_text.rstrip() # Opposite here
if i == 0 and not sub_text:
result.append(tok)
elif i == len(split_text) - 1:
if sub_text:
result.append(sub_text)
else:
pass
else:
if sub_text:
result.append(sub_text)
result.append(tok)
return result
def split_on_tokens(tok_list, text):
if not text.strip():
return []
if not tok_list:
return self.text_tokenizer.encode(text)
tokenized_text = []
text_list = [text]
for tok in tok_list:
tokenized_text = []
for sub_text in text_list:
if sub_text not in self._command_token_tokens:
tokenized_text.extend(split_on_token(tok, sub_text))
else:
tokenized_text.append(sub_text)
text_list = tokenized_text
return list(
itertools.chain.from_iterable(
(
self._encode(token) if token not in self._command_token_tokens else [
self.command_token_map[token].Id] for token in tokenized_text
)
)
)
no_split_tokens = self._command_tokens
Ids = split_on_tokens(no_split_tokens, processed_text)
tokenization = Tokenization(Ids, processed_text, text)
tokenization.set_command_tokens(self._command_tokens)
return tokenization
|
encode text using text tokenizer and shift Id values for command tokens
|
EncodeAsIds
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def EncodeAsTokens(self, text, process_fn=None):
"""
encode text as tokens using text tokenizer
"""
tokenization = self.text_tokenizer.EncodeAsTokens(text, process_fn=process_fn)
tokenization.set_command_tokens(self._command_tokens)
return tokenization
|
encode text as tokens using text tokenizer
|
EncodeAsTokens
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def IdToToken(self, Id, type_token=False):
"""convert Id to token accounting for command and type tokens"""
if isinstance(Id, (TypeToken, CommandToken)):
return Id.token
if type_token:
return self.type_id_map[Id].token
if Id < self.num_command_tokens:
return self.command_id_map[Id].token
return self.text_tokenizer.IdToToken(Id - self.num_command_tokens)
|
convert Id to token accounting for command and type tokens
|
IdToToken
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def TokenToId(self, token, type_token=False):
"""convert token to Id accounting for command and type tokens"""
if isinstance(token, (TypeToken, CommandToken)):
return token.Id
if type_token:
return self.type_token_map[token].Id
if token in self.command_token_map:
return self.command_token_map[token].Id
return self.text_tokenizer.TokenToId(token) + self.num_command_tokens
|
convert token to Id accounting for command and type tokens
|
TokenToId
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def DecodeIds(self, Ids, type_token=False):
"""
convert Ids to tokens accounting for command and type tokens, tokens
are joined and returned as a string.
"""
if type_token:
return ' '.join(Id.token if isinstance(Id, TypeToken) else self.type_id_map[Id].token for Id in Ids)
rtn_strs = []
current_str = []
if isinstance(Ids, Tokenization):
Ids = Ids.tokenization
for Id in Ids:
if isinstance(Id, CommandToken):
rtn_strs.append(self.text_tokenizer.DecodeIds(current_str))
current_str = []
rtn_strs.append(Id.token)
elif Id < self.num_command_tokens:
rtn_strs.append(self.text_tokenizer.DecodeIds(current_str))
current_str = []
rtn_strs.append(self.command_id_map[Id].token)
else:
current_str.append(Id - self.num_command_tokens)
if current_str != []:
rtn_strs.append(self.text_tokenizer.DecodeIds(current_str))
return ' '.join(rtn_strs)
|
convert Ids to tokens accounting for command and type tokens, tokens
are joined and returned as a string.
|
DecodeIds
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def DecodeTokens(self, Tokens, type_token=False):
"""
convert tokens to a string accounting for command and type tokens.
"""
if type_token:
return ' '.join(t.token if isinstance(t, TypeToken) else t for t in Tokens)
rtn_strs = []
current_str = []
if isinstance(Tokens, Tokenization):
Tokens = Tokens.tokenization
for t in Tokens:
if isinstance(t, CommandToken):
rtn_strs.append(self.text_tokenizer.DecodeTokens(current_str))
current_str = []
rtn_strs.append(t.token)
elif t in self.command_token_map:
rtn_strs.append(self.text_tokenizer.DecodeTokens(current_str))
current_str = []
rtn_strs.append(t)
else:
current_str.append(t)
if current_str != []:
rtn_strs.append(self.text_tokenizer.DecodeTokens(current_str))
return ' '.join(rtn_strs)
|
convert tokens to a string accounting for command and type tokens.
|
DecodeTokens
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def DecodeIds(self, Ids):
"""converts ascii ids to tokens before joining them into text"""
if isinstance(Ids, Tokenization):
Ids = Ids.tokenization
return ''.join([self.IdToToken(tok) for tok in Ids])
|
converts ascii ids to tokens before joining them into text
|
DecodeIds
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def DecodeTokens(self, Tokens):
"""just concatenates ascii tokens into text"""
if isinstance(Tokens, Tokenization):
Tokens = Tokens.tokenization
return ''.join(Tokens)
|
just concatenates ascii tokens into text
|
DecodeTokens
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def get_corpus_freq(dataset, filepath, filetype='tsv'):
"""
Take corpus, split it into sentences, and extract word frequencies.
Write frequencies to `filepath` as a tsv. Only write the first
MAX_SENTENCEPIECE_SENTENCES most common words to the file.
"""
nltk.download('punkt', download_dir="./nltk")
if filetype == 'tsv':
delimiter = '\t'
else:
delimiter = ','
print("compute corpus frequency\n", flush=True)
total_sentence_count = 0
maxlen = 0
freqs = {}
for entry in dataset:
if isinstance(entry, dict):
entry = entry['text']
lines = entry.strip().split('\n')
for line in lines:
sentences = nltk_tokenize.sent_tokenize(line)
total_sentence_count += len(sentences)
for sentence in sentences:
maxlen = max(len(line), maxlen)
for word in sentence.split():
if word not in freqs:
freqs[word] = 0
freqs[word] += 1
print("length of freqs before truncating " + str(len(freqs)), flush=True)
print("file path for freq " + str(filepath), flush=True)
freqs_sorted = {}
counter = 0
for word, count in sorted(freqs.items(), key=lambda x: x[1], reverse=True):
if counter >= MAX_SENTENCEPIECE_SENTENCES:
break
counter += 1
freqs_sorted[word] = count
print("length of freqs after trancating " + str(len(freqs_sorted)), flush=True)
with open(filepath, 'w') as f:
writer = csv.writer(f, delimiter=delimiter)
for k, v in freqs_sorted.items():
writer.writerow([str(k), str(v)])
return total_sentence_count, maxlen
|
Take corpus, split it into sentences, and extract word frequencies.
Write frequencies to `filepath` as a tsv. Only write the first
MAX_SENTENCEPIECE_SENTENCES most common words to the file.
|
get_corpus_freq
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def load_spm_model(self):
"""load sentencepiece model and parse vocab"""
if not os.path.exists(self.spm_model) and not self.spm_model.endswith('.model'):
self.spm_model = self.spm_model + '.model'
self.sp = spm.SentencePieceProcessor()
self.sp.Load(self.spm_model)
self.vocab_size = self.num_text_tokens = len(self.sp)
self._tokens = [self.IdToToken(t) for t in range(self.vocab_size)]
self._vocab = {t: i for i, t in enumerate(self._tokens)}
|
load sentencepiece model and parse vocab
|
load_spm_model
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def Train(self, corpus, num_text_tokens):
"""train sentencepiece model on corpus using word frequencies"""
self.num_text_tokens = num_text_tokens
use_model_path = self.spm_model
random_hash = str(random.randint(0, 2147483647))
if use_model_path is None:
use_model_path = random_hash
if use_model_path.endswith('.model'):
use_model_path = use_model_path[:use_model_path.rfind('.model')]
input_path = use_model_path + '.tsv.' + random_hash
line_count, maxlenline = get_corpus_freq(corpus, input_path)
line_count = min(line_count, MAX_SENTENCEPIECE_SENTENCES)
print('line count used as input_sentence_size ', line_count, flush=True)
print('training sentencepiece model', flush=True)
train_string = '--input={file_path} --model_prefix={model_prefix} --vocab_size={vocab_size}' \
+ ' --model_type={model_type} --character_coverage={character_coverage} ' \
+ '--input_sentence_size={input_sentence_size} ' \
+ '--input_format=tsv'
train_string = train_string.format(file_path=input_path, model_prefix=use_model_path,
vocab_size=num_text_tokens,
model_type=self.model_type, character_coverage=self.character_coverage,
input_sentence_size=int(line_count)) # , #)#,
print("calling spm.SentencePieceTrainer.Train(%s)" % (train_string), flush=True)
spm.SentencePieceTrainer.Train(train_string)
os.remove(input_path)
self.spm_model = use_model_path + '.model'
print('sentencepiece model written to ' + self.spm_model, flush=True)
|
train sentencepiece model on corpus using word frequencies
|
Train
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def DecodeTokens(self, Tokens):
"""converts sentencepiece tokens to a text string"""
if isinstance(Tokens, Tokenization):
Tokens = Tokens.tokenization
return self.sp.DecodeTokens(Tokens)
|
converts sentencepiece tokens to a text string
|
DecodeTokens
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def DecodeIds(self, Ids, type_token=False):
"""converts ids to wordpiece tokens and joins them as a text string"""
if type_token:
return ' '.join(Id.token if isinstance(Id, TypeToken) else self.type_id_map[Id].token for Id in Ids)
if isinstance(Ids, Tokenization):
Ids = Ids.tokenization
Tokens = []
for Id in Ids:
if Id in self.command_id_map:
Tokens.append(self.command_id_map[Id].token)
elif Id in self.text_tokenizer.ids_to_tokens:
Tokens.append(self.text_tokenizer.ids_to_tokens[Id])
new_tokens = []
for token in Tokens:
if token.startswith('##') and len(new_tokens) > 0:
new_tokens[-1] += token[2:]
else:
new_tokens.append(token)
return ' '.join(new_tokens)
|
converts ids to wordpiece tokens and joins them as a text string
|
DecodeIds
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def DecodeTokens(self, Tokens, type_token=False):
"""converts wordpiece tokens to a text string"""
if type_token:
return ' '.join(t.token if isinstance(t, TypeToken) else t for t in Tokens)
if isinstance(Tokens, Tokenization):
Tokens = Tokens.tokenization
return ' '.join(Tokens)
|
converts wordpiece tokens to a text string
|
DecodeTokens
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
|
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
|
get_pairs
|
python
|
THUDM/GLM
|
data_utils/tokenization_gpt2.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization_gpt2.py
|
MIT
|
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path]
special_tokens_file = None
else:
vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME)
special_tokens_file = os.path.join(pretrained_model_name_or_path, SPECIAL_TOKENS_NAME)
if not os.path.exists(special_tokens_file):
special_tokens_file = None
else:
logger.info("loading special tokens file {}".format(special_tokens_file))
# redirect to the cache, if necessary
# try:
# resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
# resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir)
# except EnvironmentError:
# logger.error(
# "Model name '{}' was not found in model name list ({}). "
# "We assumed '{}' was a path or url but couldn't find files {} and {} "
# "at this path or url.".format(
# pretrained_model_name_or_path,
# ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
# pretrained_model_name_or_path,
# vocab_file, merges_file))
# return None
# if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file:
# logger.info("loading vocabulary file {}".format(vocab_file))
# logger.info("loading merges file {}".format(merges_file))
# else:
# logger.info("loading vocabulary file {} from cache at {}".format(
# vocab_file, resolved_vocab_file))
# logger.info("loading merges file {} from cache at {}".format(
# merges_file, resolved_merges_file))
resolved_vocab_file = vocab_file
resolved_merges_file = merges_file
logger.info("loading vocabulary file {}".format(vocab_file))
logger.info("loading merges file {}".format(merges_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
if special_tokens_file and 'special_tokens' not in kwargs:
special_tokens = open(special_tokens_file, encoding='utf-8').read().split('\n')[:-1]
else:
special_tokens = kwargs.pop('special_tokens', [])
tokenizer = cls(resolved_vocab_file, resolved_merges_file, special_tokens=special_tokens, *inputs, **kwargs)
return tokenizer
|
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
|
from_pretrained
|
python
|
THUDM/GLM
|
data_utils/tokenization_gpt2.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization_gpt2.py
|
MIT
|
def set_special_tokens(self, special_tokens):
""" Add a list of additional tokens to the encoder.
The additional tokens are indexed starting from the last index of the
current vocabulary in the order of the `special_tokens` list.
"""
if not special_tokens:
self.special_tokens = {}
self.special_tokens_decoder = {}
return
self.special_tokens = dict((tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens))
self.special_tokens_decoder = {v:k for k, v in self.special_tokens.items()}
logger.info("Special tokens {}".format(self.special_tokens))
|
Add a list of additional tokens to the encoder.
The additional tokens are indexed starting from the last index of the
current vocabulary in the order of the `special_tokens` list.
|
set_special_tokens
|
python
|
THUDM/GLM
|
data_utils/tokenization_gpt2.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization_gpt2.py
|
MIT
|
def convert_tokens_to_ids(self, tokens):
""" Converts a sequence of tokens into ids using the vocab. """
ids = []
if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)):
if tokens in self.special_tokens:
return self.special_tokens[tokens]
else:
return self.encoder.get(tokens, 0)
for token in tokens:
if token in self.special_tokens:
ids.append(self.special_tokens[token])
else:
ids.append(self.encoder.get(token, 0))
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this OpenAI GPT model ({} > {}). Running this"
" sequence through the model will result in indexing errors".format(len(ids), self.max_len)
)
return ids
|
Converts a sequence of tokens into ids using the vocab.
|
convert_tokens_to_ids
|
python
|
THUDM/GLM
|
data_utils/tokenization_gpt2.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization_gpt2.py
|
MIT
|
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
"""Converts a sequence of ids in BPE tokens using the vocab."""
tokens = []
for i in ids:
if i in self.special_tokens_decoder:
if not skip_special_tokens:
tokens.append(self.special_tokens_decoder[i])
else:
tokens.append(self.decoder[i])
return tokens
|
Converts a sequence of ids in BPE tokens using the vocab.
|
convert_ids_to_tokens
|
python
|
THUDM/GLM
|
data_utils/tokenization_gpt2.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization_gpt2.py
|
MIT
|
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary and merge files to a directory."""
if not os.path.isdir(vocab_path):
logger.error("Vocabulary path ({}) should be a directory".format(vocab_path))
return
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
merge_file = os.path.join(vocab_path, MERGES_NAME)
special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME)
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write(u'#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(merge_file))
index = token_index
writer.write(' '.join(bpe_tokens) + u'\n')
index += 1
index = len(self.encoder)
with open(special_tokens_file, 'w', encoding='utf-8') as writer:
for token, token_index in sorted(self.special_tokens.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving special tokens vocabulary to {}: BPE indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(special_tokens_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file, merge_file, special_tokens_file
|
Save the tokenizer vocabulary and merge files to a directory.
|
save_vocabulary
|
python
|
THUDM/GLM
|
data_utils/tokenization_gpt2.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization_gpt2.py
|
MIT
|
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
|
Runs basic whitespace cleaning and splitting on a piece of text.
|
whitespace_tokenize
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
"""
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
|
Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
|
__init__
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
|
Converts a sequence of tokens into ids using the vocab.
|
convert_tokens_to_ids
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
|
Converts a sequence of ids in wordpiece tokens using the vocab.
|
convert_ids_to_tokens
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
|
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
|
from_pretrained
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
|
Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
|
__init__
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
|
Strips accents from a piece of text.
|
_run_strip_accents
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
|
Splits punctuation on a piece of text.
|
_run_split_on_punc
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
|
Adds whitespace around any CJK character.
|
_tokenize_chinese_chars
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
|
Checks whether CP is the codepoint of a CJK character.
|
_is_chinese_char
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
|
Performs invalid character removal and whitespace cleanup on text.
|
_clean_text
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
|
Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
|
tokenize
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
|
Checks whether `chars` is a whitespace character.
|
_is_whitespace
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
|
Checks whether `chars` is a control character.
|
_is_control
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
Checks whether `chars` is a punctuation character.
|
_is_punctuation
|
python
|
THUDM/GLM
|
data_utils/wordpiece.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/wordpiece.py
|
MIT
|
def get_dataset(name, tokenizer, pre_tokenize, data_parallel_rank, loader_scatter=None, no_lazy_loader=False,
half_lazy_loader=False):
"""gets dataset object based on keyword args and file at `path`"""
global_rank = torch.distributed.get_rank()
if not supported_corpus(name):
raise NotImplementedError('dataset %s is not supported' % name)
dataset = corpora.NAMED_CORPORA[name]
path = dataset.PATH
if issubclass(dataset, corpora.PromptReader):
if not (exists_lazy(path, data_type='prompt') and exists_lazy(path, data_type='text')) and not (
loader_scatter is not None and exists_scatter(path, data_type='prompt',
scatter_num=loader_scatter) and exists_scatter(path,
data_type='text',
scatter_num=loader_scatter)):
# create cached version of dataset for lazy loading if it doesn't exist
if global_rank == 0:
print(f"Creating lazy loader for dataset {name}")
prompt_writer = LazyWriter(path, data_type='prompt', is_array=pre_tokenize)
text_writer = LazyWriter(path, data_type='text', is_array=pre_tokenize)
writers = {'prompt': prompt_writer, 'text': text_writer}
reader = dataset(writers=writers, tokenizer=tokenizer, tokenize=pre_tokenize)
reader.process()
prompt_writer.close()
text_writer.close()
else:
while not os.path.exists(LazyWriter.get_len_path(path, data_type='prompt')):
time.sleep(1)
map_fn = (lambda x: x.tolist()) if pre_tokenize else None
if loader_scatter is not None:
if not (exists_scatter(path, data_type='prompt', scatter_num=loader_scatter) and exists_scatter(path,
data_type='text',
scatter_num=loader_scatter)):
if global_rank == 0:
print(f"Creating scatter loader for dataset {name}")
prompts = LazyLoader(path, data_type='prompt', map_fn=map_fn, mem_map=True,
is_array=pre_tokenize)
texts = LazyLoader(path, data_type='text', map_fn=map_fn, mem_map=True,
is_array=pre_tokenize)
indices = list(range(len(texts)))
random.shuffle(indices)
segment_length = (len(indices) - 1) // loader_scatter + 1
for i in range(loader_scatter):
scatter_path = get_scatter_path(path, scatter_rank=i)
prompt_writer = LazyWriter(scatter_path, data_type='prompt', is_array=pre_tokenize)
text_writer = LazyWriter(scatter_path, data_type='text', is_array=pre_tokenize)
for idx in indices[i * segment_length: (i + 1) * segment_length]:
prompt_writer.write(prompts[idx])
text_writer.write(texts[idx])
prompt_writer.close()
text_writer.close()
else:
while not (
exists_scatter(path, data_type='prompt', scatter_num=loader_scatter) and exists_scatter(
path, data_type='text', scatter_num=loader_scatter)):
time.sleep(1)
scatter_path = get_scatter_path(path, scatter_rank=data_parallel_rank % loader_scatter)
print(f"Rank {global_rank} is using scatter from {scatter_path}")
prompts = LazyLoader(scatter_path, data_type='prompt', map_fn=map_fn, mem_map=True,
is_array=pre_tokenize, load_memory=no_lazy_loader, half_load=half_lazy_loader)
texts = LazyLoader(scatter_path, data_type='text', map_fn=map_fn, mem_map=True,
is_array=pre_tokenize, load_memory=no_lazy_loader, half_load=half_lazy_loader)
else:
prompts = LazyLoader(path, data_type='prompt', map_fn=map_fn, mem_map=True,
is_array=pre_tokenize, load_memory=no_lazy_loader, half_load=half_lazy_loader)
texts = LazyLoader(path, data_type='text', map_fn=map_fn, mem_map=True,
is_array=pre_tokenize, load_memory=no_lazy_loader, half_load=half_lazy_loader)
text = corpora.PromptDataset(prompt_loader=prompts, text_loader=texts, tokenizer=tokenizer,
to_tokenize=not pre_tokenize)
if loader_scatter is None:
if global_rank == 0:
print(f"Create dataset {name} with {len(text)} documents")
for i in range(10):
rand_id = i if i < 5 else random.randrange(len(text))
sample_tokens = text[rand_id]['tokens'][:1024]
print(sample_tokens)
print(tokenizer.DecodeIds(sample_tokens).encode('utf-8'))
else:
for scatter_id in range(loader_scatter):
if data_parallel_rank % loader_scatter == scatter_id and data_parallel_rank // loader_scatter == 0:
print(f"Create dataset {name} at scatter {scatter_id} with {len(text)} documents")
for i in range(10):
sample_tokens = text[i]['tokens'][:1024]
print(sample_tokens)
print(tokenizer.DecodeIds(sample_tokens))
torch.distributed.barrier()
return text
elif issubclass(dataset, corpora.KeyReader):
if not (exists_lazy(path, data_type='text') and exists_lazy(path, data_type='mask')):
# create cached version of dataset for lazy loading if it doesn't exist
if global_rank == 0:
text_writer = LazyWriter(path, data_type='text', is_array=pre_tokenize)
mask_writer = LazyWriter(path, data_type='mask', is_array=True)
writers = {'mask': mask_writer, 'text': text_writer}
dataset(writers=writers, tokenizer=tokenizer, tokenize=pre_tokenize)
mask_writer.close()
text_writer.close()
else:
while not os.path.exists(LazyWriter.get_len_path(path, data_type='mask')):
time.sleep(1)
map_fn = (lambda x: x.tolist()) if pre_tokenize else None
masks = LazyLoader(path, data_type='mask', map_fn=map_fn, mem_map=True, is_array=True)
texts = LazyLoader(path, data_type='text', map_fn=map_fn, mem_map=True, is_array=pre_tokenize)
text = corpora.KeyDataset(mask_loader=masks, text_loader=texts, tokenizer=tokenizer,
to_tokenize=not pre_tokenize)
return text
|
gets dataset object based on keyword args and file at `path`
|
get_dataset
|
python
|
THUDM/GLM
|
data_utils/__init__.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/__init__.py
|
MIT
|
def make_dataset(path, seq_length, mem_length, shuffle=True, split=None, tokenizer=None,
sample_one_document=False, pre_tokenize=False, ds_type='', save_splits=None, load_splits=None,
save_test_data=None, no_lazy_loader=False, loader_scatter=None, data_parallel_rank=None,
filter_english=False, non_sentence_start=0.0, half_lazy_loader=False, **kwargs):
"""function to create datasets+tokenizers for common options"""
if split is None:
split = [1.]
# get one or multiple datasets and concatenate
if isinstance(path, str):
ds = get_dataset(path, tokenizer=tokenizer, pre_tokenize=pre_tokenize, no_lazy_loader=no_lazy_loader,
loader_scatter=loader_scatter, data_parallel_rank=data_parallel_rank,
half_lazy_loader=half_lazy_loader)
else:
ds = [get_dataset(p, tokenizer=tokenizer, pre_tokenize=pre_tokenize, no_lazy_loader=no_lazy_loader,
loader_scatter=loader_scatter, data_parallel_rank=data_parallel_rank,
half_lazy_loader=half_lazy_loader) for p in path]
ds = ConcatDataset(ds)
# Split dataset into train/val/test (and wrap bert dataset)
def wrap_dataset(dataset):
if ds_type.lower() == 'bert':
presplit_sentences = kwargs['presplit_sentences'] if 'presplit_sentences' in kwargs else False
dataset = BertSentencepairDataset(dataset, max_seq_len=seq_length, presplit_sentences=presplit_sentences)
elif ds_type.lower() == 'gpt-xl':
assert pre_tokenize
dataset = XLDataset(dataset, tokenizer, max_seq_len=seq_length, mem_len=mem_length,
sample_across_doc=not sample_one_document)
elif ds_type.lower() == 'gpt2':
dataset = GPT2Dataset(dataset, tokenizer, max_seq_len=seq_length, sample_across_doc=not sample_one_document)
elif ds_type.lower() == 'block':
dataset = BlockDataset(dataset, tokenizer, max_seq_len=seq_length,
sample_across_doc=not sample_one_document, filter_english=filter_english,
non_sentence_start=non_sentence_start)
return dataset
if should_split(split):
ds = split_ds(ds, split, shuffle=shuffle, save_splits=save_splits, load_splits=load_splits)
if save_test_data is not None and torch.distributed.get_rank() == 0:
test_ds = ds[-1]
with open(save_test_data, "w", encoding='utf-8') as output:
for data in test_ds:
text = data['tokens']
text = tokenizer.DecodeIds(text)
output.write(text)
output.write("\n")
print(f"Write test data to {save_test_data}")
ds = [wrap_dataset(d) if d is not None else None for d in ds]
else:
ds = wrap_dataset(ds)
return ds
|
function to create datasets+tokenizers for common options
|
make_dataset
|
python
|
THUDM/GLM
|
data_utils/__init__.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/__init__.py
|
MIT
|
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
|
Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure.
|
conversion_helper
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def clip_master_grads(self, max_norm, norm_type=2):
"""
Clips fp32 master gradients via ``torch.nn.utils.clip_grad_norm``.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the current fp32 gradients (viewed as a single vector).
.. warning::
Returns -1 if the most recently computed fp16 gradients overflowed (that is, if ``self.overflow`` is ``True``).
"""
if not self.overflow:
fp32_params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
fp32_params.append(param)
return self.clip_grad_norm(fp32_params, max_norm, norm_type)
else:
return -1
|
Clips fp32 master gradients via ``torch.nn.utils.clip_grad_norm``.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the current fp32 gradients (viewed as a single vector).
.. warning::
Returns -1 if the most recently computed fp16 gradients overflowed (that is, if ``self.overflow`` is ``True``).
|
clip_master_grads
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
state_dict['fp32_from_fp16'] = self.fp32_from_fp16_groups
return state_dict
|
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
|
state_dict
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
self.first_closure_call_this_step = state_dict['first_closure_call_this_step']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current_group, saved_group in zip(self.fp32_from_fp16_groups, state_dict['fp32_from_fp16']):
for current, saved in zip(current_group, saved_group):
current.data.copy_(saved.data)
|
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
|
load_state_dict
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def step(self, closure=None): # could add clip option.
"""
If no closure is supplied, :attr:`step` should be called after
``fp16_optimizer_obj.backward(loss)``.
:attr:`step` updates the fp32 master copy of parameters using the optimizer supplied to
:class:`FP16_Optimizer`'s constructor, then copies the updated fp32 params into the fp16 params
originally referenced by :class:`FP16_Optimizer`'s constructor, so the user may immediately run
another forward pass using their model.
If a closure is supplied, :attr:`step` may be called without a prior call to
:attr:`backward(loss)`.
This control flow is identical to `ordinary Pytorch optimizer use`_ with closures.
However, the user should take care that any ``loss.backward()`` call within the closure
has been replaced by ``fp16_optimizer_obj.backward(loss)``.
Args:
closure (optional): Closure that will be supplied to the underlying optimizer originally passed to :class:`FP16_Optimizer`'s constructor. closure should call :attr:`zero_grad()` on the :class:`FP16_Optimizer` object, compute the loss, call :attr:`backward(loss)`, and return the loss.
Example with closure::
# optimizer is assumed to be an FP16_Optimizer object, previously constructed from an
# existing pytorch optimizer.
for input, target in dataset:
def closure():
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# loss.backward() becomes:
optimizer.backward(loss)
return loss
optimizer.step(closure)
.. warning::
Currently, calling :attr:`step` with a closure is not compatible with dynamic loss scaling.
.. _`ordinary Pytorch optimizer use`:
http://pytorch.org/docs/master/optim.html#optimizer-step-closure
"""
scale = self.loss_scaler.loss_scale
self._update_scale(self.overflow)
if self.overflow:
self.maybe_print("OVERFLOW! Skipping step. Attempted loss scale: {}, reducing to {}"
.format(scale, self.loss_scale))
return
if closure is not None:
retval = self._step_with_closure(closure)
else:
retval = self.optimizer.step()
self._master_params_to_model_params()
return retval
|
If no closure is supplied, :attr:`step` should be called after
``fp16_optimizer_obj.backward(loss)``.
:attr:`step` updates the fp32 master copy of parameters using the optimizer supplied to
:class:`FP16_Optimizer`'s constructor, then copies the updated fp32 params into the fp16 params
originally referenced by :class:`FP16_Optimizer`'s constructor, so the user may immediately run
another forward pass using their model.
If a closure is supplied, :attr:`step` may be called without a prior call to
:attr:`backward(loss)`.
This control flow is identical to `ordinary Pytorch optimizer use`_ with closures.
However, the user should take care that any ``loss.backward()`` call within the closure
has been replaced by ``fp16_optimizer_obj.backward(loss)``.
Args:
closure (optional): Closure that will be supplied to the underlying optimizer originally passed to :class:`FP16_Optimizer`'s constructor. closure should call :attr:`zero_grad()` on the :class:`FP16_Optimizer` object, compute the loss, call :attr:`backward(loss)`, and return the loss.
Example with closure::
# optimizer is assumed to be an FP16_Optimizer object, previously constructed from an
# existing pytorch optimizer.
for input, target in dataset:
def closure():
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# loss.backward() becomes:
optimizer.backward(loss)
return loss
optimizer.step(closure)
.. warning::
Currently, calling :attr:`step` with a closure is not compatible with dynamic loss scaling.
.. _`ordinary Pytorch optimizer use`:
http://pytorch.org/docs/master/optim.html#optimizer-step-closure
|
step
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def backward(self, loss, update_master_grads=True, retain_graph=False):
"""
:attr:`backward` performs the following conceptual steps:
1. fp32_loss = loss.float() (see first Note below)
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's leaves (which may be fp16, fp32, or a mixture, depending how your model was defined).
4. fp16 grads are then copied to the master params' ``.grad`` attributes (see second Note), which are guaranteed to be fp32.
5. Finally, master grads are divided by loss_scale.
In this way, after :attr:`backward`, the master params have fresh gradients,
and :attr:`step` may be called.
.. note::
:attr:`backward` internally converts the loss to fp32 before applying the loss scale.
This provides some additional safety against overflow if the user has supplied an
fp16 loss value.
However, for maximum overflow safety, the user should
compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to
:attr:`backward`.
.. warning::
The gradients found in a model's leaves after the call to
:attr:`backward` should not be regarded as valid in general,
because it's possible
they have been scaled (and in the case of dynamic loss scaling,
the scale factor may change over time).
If the user wants to inspect gradients after a call to :attr:`backward`,
only the master gradients should be regarded as valid. These can be retrieved via
:attr:`inspect_master_grad_data()`.
Args:
loss: The loss output by the user's model. loss may be either float or half (but see first Note above).
update_master_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay the copy, which is useful to eliminate redundant fp16->fp32 grad copies if :attr:`backward` is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling :attr:`update_master_grads` before calling :attr:`step`.
retain_graph (bool, optional, default=False): Forwards the usual ``retain_graph=True`` option to the internal call to ``loss.backward``. If ``retain_graph`` is being used to accumulate gradient values from multiple backward passes before calling ``optimizer.step``, passing ``update_master_grads=False`` is also recommended (see Example below).
Example::
# Ordinary operation:
optimizer.backward(loss)
# Naive operation with multiple losses (technically valid, but less efficient):
# fp32 grads will be correct after the second call, but
# the first call incurs an unnecessary fp16->fp32 grad copy.
optimizer.backward(loss1)
optimizer.backward(loss2)
# More efficient way to handle multiple losses:
# The fp16->fp32 grad copy is delayed until fp16 grads from all
# losses have been accumulated.
optimizer.backward(loss1, update_master_grads=False)
optimizer.backward(loss2, update_master_grads=False)
optimizer.update_master_grads()
"""
# To consider: try multiple backward passes using retain_grad=True to find
# a loss scale that works. After you find a loss scale that works, do a final dummy
# backward pass with retain_graph=False to tear down the graph. Doing this would avoid
# discarding the iteration, but probably wouldn't improve overall efficiency.
self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)
if update_master_grads:
self.update_master_grads()
|
:attr:`backward` performs the following conceptual steps:
1. fp32_loss = loss.float() (see first Note below)
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's leaves (which may be fp16, fp32, or a mixture, depending how your model was defined).
4. fp16 grads are then copied to the master params' ``.grad`` attributes (see second Note), which are guaranteed to be fp32.
5. Finally, master grads are divided by loss_scale.
In this way, after :attr:`backward`, the master params have fresh gradients,
and :attr:`step` may be called.
.. note::
:attr:`backward` internally converts the loss to fp32 before applying the loss scale.
This provides some additional safety against overflow if the user has supplied an
fp16 loss value.
However, for maximum overflow safety, the user should
compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to
:attr:`backward`.
.. warning::
The gradients found in a model's leaves after the call to
:attr:`backward` should not be regarded as valid in general,
because it's possible
they have been scaled (and in the case of dynamic loss scaling,
the scale factor may change over time).
If the user wants to inspect gradients after a call to :attr:`backward`,
only the master gradients should be regarded as valid. These can be retrieved via
:attr:`inspect_master_grad_data()`.
Args:
loss: The loss output by the user's model. loss may be either float or half (but see first Note above).
update_master_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay the copy, which is useful to eliminate redundant fp16->fp32 grad copies if :attr:`backward` is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling :attr:`update_master_grads` before calling :attr:`step`.
retain_graph (bool, optional, default=False): Forwards the usual ``retain_graph=True`` option to the internal call to ``loss.backward``. If ``retain_graph`` is being used to accumulate gradient values from multiple backward passes before calling ``optimizer.step``, passing ``update_master_grads=False`` is also recommended (see Example below).
Example::
# Ordinary operation:
optimizer.backward(loss)
# Naive operation with multiple losses (technically valid, but less efficient):
# fp32 grads will be correct after the second call, but
# the first call incurs an unnecessary fp16->fp32 grad copy.
optimizer.backward(loss1)
optimizer.backward(loss2)
# More efficient way to handle multiple losses:
# The fp16->fp32 grad copy is delayed until fp16 grads from all
# losses have been accumulated.
optimizer.backward(loss1, update_master_grads=False)
optimizer.backward(loss2, update_master_grads=False)
optimizer.update_master_grads()
|
backward
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def update_master_grads(self):
"""
Copy the ``.grad`` attribute from stored references to fp16 parameters to
the ``.grad`` attribute of the fp32 master parameters that are directly
updated by the optimizer. :attr:`update_master_grads` only needs to be called if
``fp16_optimizer_obj.backward`` was called with ``update_master_grads=False``.
"""
if self.dynamic_loss_scale:
self._check_overflow()
if self.overflow: return
self._model_grads_to_master_grads()
self._downscale_master()
|
Copy the ``.grad`` attribute from stored references to fp16 parameters to
the ``.grad`` attribute of the fp32 master parameters that are directly
updated by the optimizer. :attr:`update_master_grads` only needs to be called if
``fp16_optimizer_obj.backward`` was called with ``update_master_grads=False``.
|
update_master_grads
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def inspect_master_grad_data(self):
"""
When running with :class:`FP16_Optimizer`,
``.grad`` attributes of a model's fp16 leaves should not be
regarded as truthful, because they might be scaled.
After a call to :attr:`fp16_optimizer_obj.backward(loss)`, if no overflow was encountered,
the fp32 master params' ``.grad``
attributes will contain valid gradients properly divided by the loss scale. However,
because :class:`FP16_Optimizer` flattens some parameters, accessing them may be
nonintuitive. :attr:`inspect_master_grad_data`
allows those gradients to be viewed with shapes corresponding to their associated model leaves.
Returns:
List of lists (one list for each parameter group). The list for each parameter group
is a list of the ``.grad.data`` attributes of the fp32 master params belonging to that group.
"""
if self.overflow:
print("Warning: calling FP16_Optimizer.inspect_master_grad_data while in an overflow state. "
"Gradients are currently invalid (may be inf, nan, or stale). Returning None.")
return None
else:
# The optimizer owns only references to master params.
master_grads_data = []
for param_group in self.optimizer.param_groups:
master_grads_this_group = []
for param in param_group['params']:
if param.grad is not None:
master_grads_this_group.append(param.grad.data)
else:
master_grads_this_group.append(None)
master_grads_data.append(master_grads_this_group)
return master_grads_data
|
When running with :class:`FP16_Optimizer`,
``.grad`` attributes of a model's fp16 leaves should not be
regarded as truthful, because they might be scaled.
After a call to :attr:`fp16_optimizer_obj.backward(loss)`, if no overflow was encountered,
the fp32 master params' ``.grad``
attributes will contain valid gradients properly divided by the loss scale. However,
because :class:`FP16_Optimizer` flattens some parameters, accessing them may be
nonintuitive. :attr:`inspect_master_grad_data`
allows those gradients to be viewed with shapes corresponding to their associated model leaves.
Returns:
List of lists (one list for each parameter group). The list for each parameter group
is a list of the ``.grad.data`` attributes of the fp32 master params belonging to that group.
|
inspect_master_grad_data
|
python
|
THUDM/GLM
|
fp16/fp16.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16.py
|
MIT
|
def BN_convert_float(module):
"""
Utility function for network_to_half().
Retained for legacy purposes.
"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
module.float()
for child in module.children():
BN_convert_float(child)
return module
|
Utility function for network_to_half().
Retained for legacy purposes.
|
BN_convert_float
|
python
|
THUDM/GLM
|
fp16/fp16util.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16util.py
|
MIT
|
def convert_module(module, dtype):
"""
Converts a module's immediate parameters and buffers to dtype.
"""
for param in module.parameters(recurse=False):
if param is not None:
if param.data.dtype.is_floating_point:
param.data = param.data.to(dtype=dtype)
if param._grad is not None and param._grad.data.dtype.is_floating_point:
param._grad.data = param._grad.data.to(dtype=dtype)
for buf in module.buffers(recurse=False):
if buf is not None and buf.data.dtype.is_floating_point:
buf.data = buf.data.to(dtype=dtype)
|
Converts a module's immediate parameters and buffers to dtype.
|
convert_module
|
python
|
THUDM/GLM
|
fp16/fp16util.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16util.py
|
MIT
|
def convert_network(network, dtype):
"""
Converts a network's parameters and buffers to dtype.
"""
for module in network.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
continue
convert_module(module, dtype)
return network
|
Converts a network's parameters and buffers to dtype.
|
convert_network
|
python
|
THUDM/GLM
|
fp16/fp16util.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16util.py
|
MIT
|
def prep_param_lists(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors([param.data for param in model_params]).float()
except:
print("Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer.")
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params
|
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
|
prep_param_lists
|
python
|
THUDM/GLM
|
fp16/fp16util.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16util.py
|
MIT
|
def model_grads_to_master_grads(model_params, master_params, flat_master=False):
"""
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
"""
if flat_master:
# The flattening may incur one more deep copy than is necessary.
master_params[0].grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model_params]))
else:
for model, master in zip(model_params, master_params):
if model.grad is not None:
if master.grad is None:
master.grad = Variable(master.data.new(*master.data.size()))
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
|
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
|
model_grads_to_master_grads
|
python
|
THUDM/GLM
|
fp16/fp16util.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16util.py
|
MIT
|
def master_params_to_model_params(model_params, master_params, flat_master=False):
"""
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
"""
if flat_master:
for model, master in zip(model_params,
_unflatten_dense_tensors(master_params[0].data, model_params)):
model.data.copy_(master)
else:
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
|
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
|
master_params_to_model_params
|
python
|
THUDM/GLM
|
fp16/fp16util.py
|
https://github.com/THUDM/GLM/blob/master/fp16/fp16util.py
|
MIT
|
def scaled_init_method(mean, std, num_layers):
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = std / math.sqrt(2.0 * num_layers)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=mean, std=std)
return init_
|
Init method based on N(0, sigma/sqrt(2*num_layers).
|
scaled_init_method
|
python
|
THUDM/GLM
|
model/modeling_bert.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_bert.py
|
MIT
|
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
|
Load tf checkpoints in a pytorch model
|
load_tf_weights_in_bert
|
python
|
THUDM/GLM
|
model/modeling_bert.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_bert.py
|
MIT
|
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
deep_init=False,
fp32_layernorm=False,
fp32_embedding=False,
fp32_tokentypes=False,
layernorm_epsilon=1e-12):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.deep_init = deep_init
self.fp32_layernorm = fp32_layernorm
self.fp32_embedding = fp32_embedding
self.layernorm_epsilon = layernorm_epsilon
self.fp32_tokentypes = fp32_tokentypes
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
|
Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
|
__init__
|
python
|
THUDM/GLM
|
model/modeling_bert.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_bert.py
|
MIT
|
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
|
Constructs a `BertConfig` from a Python dictionary of parameters.
|
from_dict
|
python
|
THUDM/GLM
|
model/modeling_bert.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_bert.py
|
MIT
|
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
|
Constructs a `BertConfig` from a json file of parameters.
|
from_json_file
|
python
|
THUDM/GLM
|
model/modeling_bert.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_bert.py
|
MIT
|
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
|
Construct a layernorm module in the TF style (epsilon inside the square root).
|
__init__
|
python
|
THUDM/GLM
|
model/modeling_bert.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_bert.py
|
MIT
|
def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None,
fp32_layernorm=False, fp32_embedding=False, layernorm_epsilon=1e-12,
fp32_tokentypes=False, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]
else:
archive_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception("Attempted Path Traversal in Tar File")
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(archive, tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
config.fp32_layernorm = fp32_layernorm
config.fp32_embedding = fp32_embedding
config.layernorm_epsilon = layernorm_epsilon
config.fp32_tokentypes = fp32_tokentypes
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
return model
|
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
|
from_pretrained
|
python
|
THUDM/GLM
|
model/modeling_bert.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_bert.py
|
MIT
|
def init_method_normal(std=0.02):
"""Init method based on normal distribution.
This is only used for embeddings. The transformer has its
own initializer.
"""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
|
Init method based on normal distribution.
This is only used for embeddings. The transformer has its
own initializer.
|
init_method_normal
|
python
|
THUDM/GLM
|
model/modeling_glm.py
|
https://github.com/THUDM/GLM/blob/master/model/modeling_glm.py
|
MIT
|
def _check_data_types(keys, data, target_dtype):
"""Check that all the keys have the same target data type."""
for key in keys:
assert data[key].dtype == target_dtype, '{} has data type {} which '\
'is different than {}'.format(key, data[key].dtype, target_dtype)
|
Check that all the keys have the same target data type.
|
_check_data_types
|
python
|
THUDM/GLM
|
mpu/data.py
|
https://github.com/THUDM/GLM/blob/master/mpu/data.py
|
MIT
|
def _build_key_size_numel_dictionaries(keys, data):
"""Build the size on rank 0 and broadcast."""
max_dim = _MAX_DATA_DIM
sizes = [0 for _ in range(max_dim) for _ in keys]
# Pack the sizes on rank zero.
if get_model_parallel_rank() == 0:
offset = 0
for key in keys:
assert data[key].dim() < max_dim, 'you should increase MAX_DATA_DIM'
size = data[key].size()
for i, s in enumerate(size):
sizes[i + offset] = s
offset += max_dim
# Move to GPU and broadcast.
sizes_cuda = torch.cuda.LongTensor(sizes)
torch.distributed.broadcast(sizes_cuda, get_model_parallel_src_rank(),
group=get_model_parallel_group())
# Move back to cpu and unpack.
sizes_cpu = sizes_cuda.cpu()
key_size = {}
key_numel = {}
total_numel = 0
offset = 0
for key in keys:
i = 0
size = []
numel = 1
while sizes_cpu[offset + i] > 0:
this_size = sizes_cpu[offset + i]
size.append(this_size)
numel *= this_size
i += 1
key_size[key] = size
key_numel[key] = numel
total_numel += numel
offset += max_dim
return key_size, key_numel, total_numel
|
Build the size on rank 0 and broadcast.
|
_build_key_size_numel_dictionaries
|
python
|
THUDM/GLM
|
mpu/data.py
|
https://github.com/THUDM/GLM/blob/master/mpu/data.py
|
MIT
|
def broadcast_data(keys, data, datatype):
"""Broadcast data from rank zero of each model parallel group to the
members of the same model parallel group.
Arguments:
keys: list of keys in the data disctionary to be broadcasted
data: data dictionary of string keys and cpu tensor values.
datatype: torch data type of all tensors in data associated
with keys.
"""
# Build (key, size) and (key, number of elements) dictionaries along
# with the total number of elements on all ranks.
key_size, key_numel, total_numel = _build_key_size_numel_dictionaries(keys,
data)
# Pack on rank zero.
if get_model_parallel_rank() == 0:
# Check that all keys have the same data type.
_check_data_types(keys, data, datatype)
# Flatten the data associated with the keys
flatten_data = torch.cat(
[data[key].contiguous().view(-1) for key in keys], dim=0).cuda()
else:
flatten_data = torch.empty(total_numel,
device=torch.cuda.current_device(),
dtype=datatype)
# Boradcast
torch.distributed.broadcast(flatten_data, get_model_parallel_src_rank(),
group=get_model_parallel_group())
# Unpack
output = {}
offset = 0
for key in keys:
size = key_size[key]
numel = key_numel[key]
output[key] = flatten_data.narrow(0, offset, numel).view(size)
offset += numel
return output
|
Broadcast data from rank zero of each model parallel group to the
members of the same model parallel group.
Arguments:
keys: list of keys in the data disctionary to be broadcasted
data: data dictionary of string keys and cpu tensor values.
datatype: torch data type of all tensors in data associated
with keys.
|
broadcast_data
|
python
|
THUDM/GLM
|
mpu/data.py
|
https://github.com/THUDM/GLM/blob/master/mpu/data.py
|
MIT
|
def clip_grad_norm(parameters, max_norm, norm_type=2):
"""Clips gradient norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
# Take max across all GPUs.
torch.distributed.all_reduce(total_norm_cuda,
op=torch.distributed.ReduceOp.MAX,
group=get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0
for p in parameters:
if p.model_parallel or (get_model_parallel_rank() == 0):
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
# Sum across all model parallel GPUs.
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
torch.distributed.all_reduce(total_norm_cuda,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group())
total_norm = total_norm_cuda[0].item() ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
p.grad.data.mul_(clip_coef)
return total_norm
|
Clips gradient norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
|
clip_grad_norm
|
python
|
THUDM/GLM
|
mpu/grads.py
|
https://github.com/THUDM/GLM/blob/master/mpu/grads.py
|
MIT
|
def initialize_model_parallel(model_parallel_size_):
"""
Initialize model data parallel groups.
Arguments:
model_parallel_size: number of GPUs used to parallelize model.
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
use 2 GPUs to parallelize the model. The present function will
create 4 model parallel groups and 2 data parallel grous as:
4 model parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
2 data parallel groups:
[g0, g2, g4, g6], [g1, g3, g5, g7]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
if torch.distributed.get_rank() == 0:
print('> initializing model parallel with size {}'.format(
model_parallel_size_))
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
world_size = torch.distributed.get_world_size()
model_parallel_size = min(model_parallel_size_, world_size)
ensure_divisibility(world_size, model_parallel_size)
rank = torch.distributed.get_rank()
# Build the data parallel groups.
global _DATA_PARALLEL_GROUP
assert _DATA_PARALLEL_GROUP is None, \
'data parallel group is already initialized'
for i in range(model_parallel_size):
ranks = range(i, world_size, model_parallel_size)
group = torch.distributed.new_group(ranks)
if i == (rank % model_parallel_size):
_DATA_PARALLEL_GROUP = group
# Build the model parallel groups.
global _MODEL_PARALLEL_GROUP
assert _MODEL_PARALLEL_GROUP is None, \
'model parallel group is already initialized'
for i in range(world_size // model_parallel_size):
ranks = range(i * model_parallel_size,
(i + 1) * model_parallel_size)
group = torch.distributed.new_group(ranks)
if i == (rank // model_parallel_size):
_MODEL_PARALLEL_GROUP = group
|
Initialize model data parallel groups.
Arguments:
model_parallel_size: number of GPUs used to parallelize model.
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
use 2 GPUs to parallelize the model. The present function will
create 4 model parallel groups and 2 data parallel grous as:
4 model parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
2 data parallel groups:
[g0, g2, g4, g6], [g1, g3, g5, g7]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
|
initialize_model_parallel
|
python
|
THUDM/GLM
|
mpu/initialize.py
|
https://github.com/THUDM/GLM/blob/master/mpu/initialize.py
|
MIT
|
def model_parallel_is_initialized():
"""Check if model and data parallel groups are initialized."""
if _MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None:
return False
return True
|
Check if model and data parallel groups are initialized.
|
model_parallel_is_initialized
|
python
|
THUDM/GLM
|
mpu/initialize.py
|
https://github.com/THUDM/GLM/blob/master/mpu/initialize.py
|
MIT
|
def get_model_parallel_group():
"""Get the model parallel group the caller rank belongs to."""
assert _MODEL_PARALLEL_GROUP is not None, \
'model parallel group is not initialized'
return _MODEL_PARALLEL_GROUP
|
Get the model parallel group the caller rank belongs to.
|
get_model_parallel_group
|
python
|
THUDM/GLM
|
mpu/initialize.py
|
https://github.com/THUDM/GLM/blob/master/mpu/initialize.py
|
MIT
|
def get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
assert _DATA_PARALLEL_GROUP is not None, \
'data parallel group is not initialized'
return _DATA_PARALLEL_GROUP
|
Get the data parallel group the caller rank belongs to.
|
get_data_parallel_group
|
python
|
THUDM/GLM
|
mpu/initialize.py
|
https://github.com/THUDM/GLM/blob/master/mpu/initialize.py
|
MIT
|
def get_model_parallel_src_rank():
"""Calculate the global rank corresponding to a local rank zeor
in the model parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_model_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
|
Calculate the global rank corresponding to a local rank zeor
in the model parallel group.
|
get_model_parallel_src_rank
|
python
|
THUDM/GLM
|
mpu/initialize.py
|
https://github.com/THUDM/GLM/blob/master/mpu/initialize.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.