repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
blp
blp-master/retrieval.py
import os import os.path as osp from collections import defaultdict import torch import torch.nn.functional as F from tqdm import tqdm from transformers import BertTokenizer from logging import Logger from sacred import Experiment from sacred.observers import MongoObserver from sacred.run import Run import json import pytrec_eval import numpy as np import scipy.stats import nltk from data import DROPPED from data import GloVeTokenizer import utils OUT_PATH = 'output/' device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') ex = Experiment() ex.logger = utils.get_logger() # Set up database logs uri = os.environ.get('DB_URI') database = os.environ.get('DB_NAME') if all([uri, database]): ex.observers.append(MongoObserver(uri, database)) def remove_stopwords(text): tokens = nltk.word_tokenize(text) text = ' '.join([t for t in tokens if t.lower() not in DROPPED]) return text @ex.config def config(): dim = 128 model = 'bert-dkrl' rel_model = 'transe' max_len = 64 emb_batch_size = 512 checkpoint = 'output/model-348.pt' run_file = 'data/DBpedia-Entity/runs/v2/bm25f-ca_v2.run' queries_file = 'data/DBpedia-Entity/collection/v2/queries-v2_stopped.txt' descriptions_file = 'data/DBpedia-Entity/runs/v2/' \ 'bm25f-ca_v2-descriptions.txt' qrels_file = 'data/DBpedia-Entity/collection/v2/qrels-v2.txt' folds_file = 'data/DBpedia-Entity/collection/v2/folds/all_queries.json' @ex.capture def embed_entities(dim, model, rel_model, max_len, emb_batch_size, checkpoint, run_file, descriptions_file, drop_stopwords, _log: Logger): def encode_batch(batch): tokenized_data = tokenizer.batch_encode_plus(batch, max_length=max_len, pad_to_max_length=True, return_token_type_ids=False, return_tensors='pt') tokens = tokenized_data['input_ids'].to(device) masks = tokenized_data['attention_mask'].float().to(device) return encoder.encode(tokens.to(device), masks.to(device)) if model.startswith('bert') or model == 'blp': tokenizer = BertTokenizer.from_pretrained('bert-base-cased') else: tokenizer = GloVeTokenizer('data/glove/glove.6B.300d-maps.pt') encoder = utils.get_model(model, dim, rel_model, encoder_name='bert-base-cased', loss_fn='margin', num_entities=0, num_relations=1, regularizer=0.0).to(device) encoder = torch.nn.DataParallel(encoder) state_dict = torch.load(checkpoint, map_location=device) # We don't need relation embeddings for this task state_dict.pop('module.rel_emb.weight', None) encoder.load_state_dict(state_dict, strict=False) encoder = encoder.module for param in encoder.parameters(): param.requires_grad = False # Encode entity descriptions run_file_name = osp.splitext(osp.basename(run_file))[0] get_entity_embeddings = True qent_checkpoint = osp.join(osp.dirname(checkpoint), f'{run_file_name}-qent-{osp.basename(checkpoint)}') if osp.exists(qent_checkpoint): _log.info(f'Loading entity embeddings from {qent_checkpoint}') ent_embeddings = torch.load(qent_checkpoint, map_location=device) get_entity_embeddings = False else: ent_embeddings = [] entity2idx = dict() descriptions_batch = [] progress = tqdm(desc='Encoding entity descriptions', disable=not get_entity_embeddings) with open(descriptions_file) as f: for i, line in enumerate(f): values = line.strip().split('\t') entity = values[0] entity2idx[entity] = i if get_entity_embeddings: text = ' '.join(values[1:]) if drop_stopwords: text = remove_stopwords(text) descriptions_batch.append(text) if len(descriptions_batch) == emb_batch_size: embedding = encode_batch(descriptions_batch) ent_embeddings.append(embedding) descriptions_batch = [] progress.update(emb_batch_size) if get_entity_embeddings: if len(descriptions_batch) > 0: embedding = encode_batch(descriptions_batch) ent_embeddings.append(embedding) ent_embeddings = torch.cat(ent_embeddings) torch.save(ent_embeddings, qent_checkpoint) _log.info(f'Saved entity embeddings to {qent_checkpoint}') progress.close() return ent_embeddings, entity2idx, encoder, tokenizer def rerank_on_fold(fold, qrels, baseline_run, id2query, tokenizer, encoder, entity2idx, ent_embeddings, alpha, drop_stopwords): train_run = dict() qrel_run = dict() for query_id in fold: results = baseline_run[query_id] # Encode query query = id2query[query_id] if drop_stopwords: query = remove_stopwords(query) query_tokens = tokenizer.encode(query, return_tensors='pt', max_length=64) query_embedding = encoder.encode(query_tokens.to(device), text_mask=None) # Get embeddings of entities to rerank for this query ent_ids_to_rerank = [] original_scores = [] selected_results = [] missing_results = [] missing_scores = [] for entity, orig_score in results.items(): if entity in entity2idx: ent_ids_to_rerank.append(entity2idx[entity]) original_scores.append(orig_score) selected_results.append(entity) else: missing_results.append(entity) missing_scores.append(orig_score) candidate_embeddings = ent_embeddings[ent_ids_to_rerank] candidate_embeddings = F.normalize(candidate_embeddings, dim=-1) query_embedding = F.normalize(query_embedding, dim=-1) # Compute relevance scores = candidate_embeddings @ query_embedding.t() scores = scores.flatten().cpu().tolist() + [0] * len(missing_scores) results_scores = zip(selected_results + missing_results, scores, original_scores + missing_scores) results_scores = [[result, alpha * s1 + (1 - alpha) * s2] for result, s1, s2 in results_scores] train_run[query_id] = {r: s for r, s in results_scores} qrel_run[query_id] = qrels[query_id] evaluator = pytrec_eval.RelevanceEvaluator(qrel_run, {'ndcg_cut_100'}) train_results = evaluator.evaluate(train_run) mean = np.mean([res['ndcg_cut_100'] for res in train_results.values()]) return mean, train_run @ex.automain def rerank(model, rel_model, run_file, queries_file, qrels_file, folds_file, _run: Run, _log: Logger): drop_stopwords = model in {'bert-bow', 'bert-dkrl', 'glove-bow', 'glove-dkrl'} ent_embeddings, entity2idx, encoder, tokenizer = embed_entities( drop_stopwords=drop_stopwords) # Read queries id2query = dict() with open(queries_file) as f: for line in f: values = line.strip().split('\t') query_id = values[0] query = ' '.join(values[1:]) id2query[query_id] = query # Read baseline and ground truth rankings baseline_run = defaultdict(dict) qrels = defaultdict(dict) for query_dict, file in ((baseline_run, run_file), (qrels, qrels_file)): with open(file) as f: for line in f: values = line.strip().split() if len(values) >= 6: query_id, q0, entity, rank, score, *_ = values score = float(score) else: query_id, q0, entity, score = values score = int(score) query_dict[query_id][entity] = score # Read query folds with open(folds_file) as f: folds = json.load(f) # Keep only query type of interest new_baseline_run = {} new_qrels = {} for f in folds.values(): relevant_queries = f['testing'] for query_id in relevant_queries: new_baseline_run.update({query_id: baseline_run[query_id]}) new_qrels.update({query_id: qrels[query_id]}) baseline_run = new_baseline_run qrels = new_qrels # Choose best reranking on training set alpha_choices = np.linspace(0, 1, 20) test_run = dict() for i, (idx, fold) in enumerate(folds.items()): train_queries = fold['training'] best_result = 0.0 best_alpha = alpha_choices[0] for alpha in alpha_choices: result, _ = rerank_on_fold(train_queries, qrels, baseline_run, id2query, tokenizer, encoder, entity2idx, ent_embeddings, alpha, drop_stopwords) if result > best_result: best_result = result best_alpha = alpha _log.info(f'[Fold {i + 1}/{len(folds)}]' f' Best training result: {best_result:.3f}' f' with alpha={best_alpha:.3}') test_queries = fold['testing'] fold_mean, fold_run = rerank_on_fold(test_queries, qrels, baseline_run, id2query, tokenizer, encoder, entity2idx, ent_embeddings, best_alpha, drop_stopwords) _log.info(f'Test fold result: {fold_mean:.3f}') test_run.update(fold_run) _log.info(f'Finished hyperparameter search') _log.info(f'Saving run file') output_run_path = osp.join(OUT_PATH, f'{_run._id}.run') with open(output_run_path, 'w') as f: for query, results in test_run.items(): ranking = sorted(results.items(), key=lambda x: x[1], reverse=True) for i, (entity, score) in enumerate(ranking): f.write( f'{query} Q0 {entity} {i + 1} {score} {model}-{rel_model}\n') metrics = {'ndcg_cut_10', 'ndcg_cut_100'} evaluator = pytrec_eval.RelevanceEvaluator(qrels, metrics) baseline_results = evaluator.evaluate(baseline_run) # This shouldn't be necessary, but there seems to be a bug that requires # to instantiate the evaluator again, otherwise only one metric is obtained # See https://github.com/cvangysel/pytrec_eval/issues/22 evaluator = pytrec_eval.RelevanceEvaluator(qrels, metrics) test_results = evaluator.evaluate(test_run) for metric in metrics: baseline_mean = np.mean( [res[metric] for res in baseline_results.values()]) test_mean = np.mean([res[metric] for res in test_results.values()]) _log.info(f'Metric: {metric}') _log.info(f'Baseline result: {baseline_mean:.3f}') _log.info(f'Test result: {test_mean:.3f}') first_scores = [baseline_results[query_id][metric] for query_id in baseline_results] second_scores = [test_results[query_id][metric] for query_id in baseline_results] _log.info(scipy.stats.ttest_rel(first_scores, second_scores))
11,801
37.194175
82
py
blp
blp-master/models.py
import torch import torch.nn as nn import torch.nn.functional as F from transformers import BertModel class LinkPrediction(nn.Module): """A general link prediction model with a lookup table for relation embeddings.""" def __init__(self, dim, rel_model, loss_fn, num_relations, regularizer): super().__init__() self.dim = dim self.normalize_embs = False self.regularizer = regularizer if rel_model == 'transe': self.score_fn = transe_score self.normalize_embs = True elif rel_model == 'distmult': self.score_fn = distmult_score elif rel_model == 'complex': self.score_fn = complex_score elif rel_model == 'simple': self.score_fn = simple_score else: raise ValueError(f'Unknown relational model {rel_model}.') self.rel_emb = nn.Embedding(num_relations, self.dim) nn.init.xavier_uniform_(self.rel_emb.weight.data) if loss_fn == 'margin': self.loss_fn = margin_loss elif loss_fn == 'nll': self.loss_fn = nll_loss else: raise ValueError(f'Unkown loss function {loss_fn}') def encode(self, *args, **kwargs): ent_emb = self._encode_entity(*args, **kwargs) if self.normalize_embs: ent_emb = F.normalize(ent_emb, dim=-1) return ent_emb def _encode_entity(self, *args, **kwargs): raise NotImplementedError def forward(self, *args, **kwargs): raise NotImplementedError def compute_loss(self, ent_embs, rels, neg_idx): batch_size = ent_embs.shape[0] # Scores for positive samples rels = self.rel_emb(rels) heads, tails = torch.chunk(ent_embs, chunks=2, dim=1) pos_scores = self.score_fn(heads, tails, rels) if self.regularizer > 0: reg_loss = self.regularizer * l2_regularization(heads, tails, rels) else: reg_loss = 0 # Scores for negative samples neg_embs = ent_embs.view(batch_size * 2, -1)[neg_idx] heads, tails = torch.chunk(neg_embs, chunks=2, dim=2) neg_scores = self.score_fn(heads.squeeze(), tails.squeeze(), rels) model_loss = self.loss_fn(pos_scores, neg_scores) return model_loss + reg_loss class InductiveLinkPrediction(LinkPrediction): """Description-based Link Prediction (DLP).""" def _encode_entity(self, text_tok, text_mask): raise NotImplementedError def forward(self, text_tok, text_mask, rels=None, neg_idx=None): batch_size, _, num_text_tokens = text_tok.shape # Encode text into an entity representation from its description ent_embs = self.encode(text_tok.view(-1, num_text_tokens), text_mask.view(-1, num_text_tokens)) if rels is None and neg_idx is None: # Forward is being used to compute entity embeddings only out = ent_embs else: # Forward is being used to compute link prediction loss ent_embs = ent_embs.view(batch_size, 2, -1) out = self.compute_loss(ent_embs, rels, neg_idx) return out class BertEmbeddingsLP(InductiveLinkPrediction): """BERT for Link Prediction (BLP).""" def __init__(self, dim, rel_model, loss_fn, num_relations, encoder_name, regularizer): super().__init__(dim, rel_model, loss_fn, num_relations, regularizer) self.encoder = BertModel.from_pretrained(encoder_name, output_attentions=False, output_hidden_states=False) hidden_size = self.encoder.config.hidden_size self.enc_linear = nn.Linear(hidden_size, self.dim, bias=False) def _encode_entity(self, text_tok, text_mask): # Extract BERT representation of [CLS] token embs = self.encoder(text_tok, text_mask)[0][:, 0] embs = self.enc_linear(embs) return embs class WordEmbeddingsLP(InductiveLinkPrediction): """Description encoder with pretrained embeddings, obtained from BERT or a specified tensor file. """ def __init__(self, rel_model, loss_fn, num_relations, regularizer, dim=None, encoder_name=None, embeddings=None): if not encoder_name and not embeddings: raise ValueError('Must provided one of encoder_name or embeddings') if encoder_name is not None: encoder = BertModel.from_pretrained(encoder_name) embeddings = encoder.embeddings.word_embeddings else: emb_tensor = torch.load(embeddings) num_embeddings, embedding_dim = emb_tensor.shape embeddings = nn.Embedding(num_embeddings, embedding_dim) embeddings.weight.data = emb_tensor if dim is None: dim = embeddings.embedding_dim super().__init__(dim, rel_model, loss_fn, num_relations, regularizer) self.embeddings = embeddings def _encode_entity(self, text_tok, text_mask): raise NotImplementedError class BOW(WordEmbeddingsLP): """Bag-of-words (BOW) description encoder, with BERT low-level embeddings. """ def _encode_entity(self, text_tok, text_mask=None): if text_mask is None: text_mask = torch.ones_like(text_tok, dtype=torch.float) # Extract average of word embeddings embs = self.embeddings(text_tok) lengths = torch.sum(text_mask, dim=-1, keepdim=True) embs = torch.sum(text_mask.unsqueeze(dim=-1) * embs, dim=1) embs = embs / lengths return embs class DKRL(WordEmbeddingsLP): """Description-Embodied Knowledge Representation Learning (DKRL) with CNN encoder, after Zuo, Yukun, et al. "Representation learning of knowledge graphs with entity attributes and multimedia descriptions." """ def __init__(self, dim, rel_model, loss_fn, num_relations, regularizer, encoder_name=None, embeddings=None): super().__init__(rel_model, loss_fn, num_relations, regularizer, dim, encoder_name, embeddings) emb_dim = self.embeddings.embedding_dim self.conv1 = nn.Conv1d(emb_dim, self.dim, kernel_size=2) self.conv2 = nn.Conv1d(self.dim, self.dim, kernel_size=2) def _encode_entity(self, text_tok, text_mask): if text_mask is None: text_mask = torch.ones_like(text_tok, dtype=torch.float) # Extract word embeddings and mask padding embs = self.embeddings(text_tok) * text_mask.unsqueeze(dim=-1) # Reshape to (N, C, L) embs = embs.transpose(1, 2) text_mask = text_mask.unsqueeze(1) # Pass through CNN, adding padding for valid convolutions # and masking outputs due to padding embs = F.pad(embs, [0, 1]) embs = self.conv1(embs) embs = embs * text_mask if embs.shape[2] >= 4: kernel_size = 4 elif embs.shape[2] == 1: kernel_size = 1 else: kernel_size = 2 embs = F.max_pool1d(embs, kernel_size=kernel_size) text_mask = F.max_pool1d(text_mask, kernel_size=kernel_size) embs = torch.tanh(embs) embs = F.pad(embs, [0, 1]) embs = self.conv2(embs) lengths = torch.sum(text_mask, dim=-1) embs = torch.sum(embs * text_mask, dim=-1) / lengths embs = torch.tanh(embs) return embs class TransductiveLinkPrediction(LinkPrediction): def __init__(self, dim, rel_model, loss_fn, num_entities, num_relations, regularizer): super().__init__(dim, rel_model, loss_fn, num_relations, regularizer) self.ent_emb = nn.Embedding(num_entities, dim) nn.init.xavier_uniform_(self.ent_emb.weight.data) def _encode_entity(self, entities): return self.ent_emb(entities) def forward(self, pos_pairs, rels, neg_idx): embs = self.encode(pos_pairs) return self.compute_loss(embs, rels, neg_idx) def transe_score(heads, tails, rels): return -torch.norm(heads + rels - tails, dim=-1, p=1) def distmult_score(heads, tails, rels): return torch.sum(heads * rels * tails, dim=-1) def complex_score(heads, tails, rels): heads_re, heads_im = torch.chunk(heads, chunks=2, dim=-1) tails_re, tails_im = torch.chunk(tails, chunks=2, dim=-1) rels_re, rels_im = torch.chunk(rels, chunks=2, dim=-1) return torch.sum(rels_re * heads_re * tails_re + rels_re * heads_im * tails_im + rels_im * heads_re * tails_im - rels_im * heads_im * tails_re, dim=-1) def simple_score(heads, tails, rels): heads_h, heads_t = torch.chunk(heads, chunks=2, dim=-1) tails_h, tails_t = torch.chunk(tails, chunks=2, dim=-1) rel_a, rel_b = torch.chunk(rels, chunks=2, dim=-1) return torch.sum(heads_h * rel_a * tails_t + tails_h * rel_b * heads_t, dim=-1) / 2 def margin_loss(pos_scores, neg_scores): loss = 1 - pos_scores + neg_scores loss[loss < 0] = 0 return loss.mean() def nll_loss(pos_scores, neg_scores): return (F.softplus(-pos_scores).mean() + F.softplus(neg_scores).mean()) / 2 def l2_regularization(heads, tails, rels): reg_loss = 0.0 for tensor in (heads, tails, rels): reg_loss += torch.mean(tensor ** 2) return reg_loss / 3.0
9,514
34.636704
79
py
blp
blp-master/train.py
import os import os.path as osp import networkx as nx import torch from torch.optim import Adam from torch.utils.data import DataLoader from sacred.run import Run from logging import Logger from sacred import Experiment from sacred.observers import MongoObserver from transformers import BertTokenizer, get_linear_schedule_with_warmup from collections import defaultdict import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, balanced_accuracy_score import joblib from data import CATEGORY_IDS from data import GraphDataset, TextGraphDataset, GloVeTokenizer import models import utils OUT_PATH = 'output/' device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') ex = Experiment() ex.logger = utils.get_logger() # Set up database logs uri = os.environ.get('DB_URI') database = os.environ.get('DB_NAME') if all([uri, database]): ex.observers.append(MongoObserver(uri, database)) @ex.config def config(): dataset = 'umls' inductive = True dim = 128 model = 'blp' rel_model = 'transe' loss_fn = 'margin' encoder_name = 'bert-base-cased' regularizer = 0 max_len = 32 num_negatives = 64 lr = 2e-5 use_scheduler = True batch_size = 64 emb_batch_size = 512 eval_batch_size = 64 max_epochs = 40 checkpoint = None use_cached_text = False @ex.capture @torch.no_grad() def eval_link_prediction(model, triples_loader, text_dataset, entities, epoch, emb_batch_size, _run: Run, _log: Logger, prefix='', max_num_batches=None, filtering_graph=None, new_entities=None, return_embeddings=False): compute_filtered = filtering_graph is not None mrr_by_position = torch.zeros(3, dtype=torch.float).to(device) mrr_pos_counts = torch.zeros_like(mrr_by_position) rel_categories = triples_loader.dataset.rel_categories.to(device) mrr_by_category = torch.zeros([2, 4], dtype=torch.float).to(device) mrr_cat_count = torch.zeros([1, 4], dtype=torch.float).to(device) hit_positions = [1, 3, 10] k_values = torch.tensor([hit_positions], device=device) hits_at_k = {pos: 0.0 for pos in hit_positions} mrr = 0.0 mrr_filt = 0.0 hits_at_k_filt = {pos: 0.0 for pos in hit_positions} if device != torch.device('cpu'): model = model.module if isinstance(model, models.InductiveLinkPrediction): num_entities = entities.shape[0] if compute_filtered: max_ent_id = max(filtering_graph.nodes) else: max_ent_id = entities.max() ent2idx = utils.make_ent2idx(entities, max_ent_id) else: # Transductive models have a lookup table of embeddings num_entities = model.ent_emb.num_embeddings ent2idx = torch.arange(num_entities) entities = ent2idx # Create embedding lookup table for evaluation ent_emb = torch.zeros((num_entities, model.dim), dtype=torch.float, device=device) idx = 0 num_iters = np.ceil(num_entities / emb_batch_size) iters_count = 0 while idx < num_entities: # Get a batch of entity IDs and encode them batch_ents = entities[idx:idx + emb_batch_size] if isinstance(model, models.InductiveLinkPrediction): # Encode with entity descriptions data = text_dataset.get_entity_description(batch_ents) text_tok, text_mask, text_len = data batch_emb = model(text_tok.unsqueeze(1).to(device), text_mask.unsqueeze(1).to(device)) else: # Encode from lookup table batch_emb = model(batch_ents) ent_emb[idx:idx + batch_ents.shape[0]] = batch_emb iters_count += 1 if iters_count % np.ceil(0.2 * num_iters) == 0: _log.info(f'[{idx + batch_ents.shape[0]:,}/{num_entities:,}]') idx += emb_batch_size ent_emb = ent_emb.unsqueeze(0) num_predictions = 0 _log.info('Computing metrics on set of triples') total = len(triples_loader) if max_num_batches is None else max_num_batches for i, triples in enumerate(triples_loader): if max_num_batches is not None and i == max_num_batches: break heads, tails, rels = torch.chunk(triples, chunks=3, dim=1) # Map entity IDs to positions in ent_emb heads = ent2idx[heads].to(device) tails = ent2idx[tails].to(device) assert heads.min() >= 0 assert tails.min() >= 0 # Embed triple head_embs = ent_emb.squeeze()[heads] tail_embs = ent_emb.squeeze()[tails] rel_embs = model.rel_emb(rels.to(device)) # Score all possible heads and tails heads_predictions = model.score_fn(ent_emb, tail_embs, rel_embs) tails_predictions = model.score_fn(head_embs, ent_emb, rel_embs) pred_ents = torch.cat((heads_predictions, tails_predictions)) true_ents = torch.cat((heads, tails)) num_predictions += pred_ents.shape[0] reciprocals, hits = utils.get_metrics(pred_ents, true_ents, k_values) mrr += reciprocals.sum().item() hits_sum = hits.sum(dim=0) for j, k in enumerate(hit_positions): hits_at_k[k] += hits_sum[j].item() if compute_filtered: filters = utils.get_triple_filters(triples, filtering_graph, num_entities, ent2idx) heads_filter, tails_filter = filters # Filter entities by assigning them the lowest score in the batch filter_mask = torch.cat((heads_filter, tails_filter)).to(device) pred_ents[filter_mask] = pred_ents.min() - 1.0 reciprocals, hits = utils.get_metrics(pred_ents, true_ents, k_values) mrr_filt += reciprocals.sum().item() hits_sum = hits.sum(dim=0) for j, k in enumerate(hit_positions): hits_at_k_filt[k] += hits_sum[j].item() reciprocals = reciprocals.squeeze() if new_entities is not None: by_position = utils.split_by_new_position(triples, reciprocals, new_entities) batch_mrr_by_position, batch_mrr_pos_counts = by_position mrr_by_position += batch_mrr_by_position mrr_pos_counts += batch_mrr_pos_counts if triples_loader.dataset.has_rel_categories: by_category = utils.split_by_category(triples, reciprocals, rel_categories) batch_mrr_by_cat, batch_mrr_cat_count = by_category mrr_by_category += batch_mrr_by_cat mrr_cat_count += batch_mrr_cat_count if (i + 1) % int(0.2 * total) == 0: _log.info(f'[{i + 1:,}/{total:,}]') _log.info(f'The total number of predictions is {num_predictions:,}') for hits_dict in (hits_at_k, hits_at_k_filt): for k in hits_dict: hits_dict[k] /= num_predictions mrr = mrr / num_predictions mrr_filt = mrr_filt / num_predictions log_str = f'{prefix} mrr: {mrr:.4f} ' _run.log_scalar(f'{prefix}_mrr', mrr, epoch) for k, value in hits_at_k.items(): log_str += f'hits@{k}: {value:.4f} ' _run.log_scalar(f'{prefix}_hits@{k}', value, epoch) if compute_filtered: log_str += f'mrr_filt: {mrr_filt:.4f} ' _run.log_scalar(f'{prefix}_mrr_filt', mrr_filt, epoch) for k, value in hits_at_k_filt.items(): log_str += f'hits@{k}_filt: {value:.4f} ' _run.log_scalar(f'{prefix}_hits@{k}_filt', value, epoch) _log.info(log_str) if new_entities is not None and compute_filtered: mrr_pos_counts[mrr_pos_counts < 1.0] = 1.0 mrr_by_position = mrr_by_position / mrr_pos_counts log_str = '' for i, t in enumerate((f'{prefix}_mrr_filt_both_new', f'{prefix}_mrr_filt_head_new', f'{prefix}_mrr_filt_tail_new')): value = mrr_by_position[i].item() log_str += f'{t}: {value:.4f} ' _run.log_scalar(t, value, epoch) _log.info(log_str) if compute_filtered and triples_loader.dataset.has_rel_categories: mrr_cat_count[mrr_cat_count < 1.0] = 1.0 mrr_by_category = mrr_by_category / mrr_cat_count for i, case in enumerate(['pred_head', 'pred_tail']): log_str = f'{case} ' for cat, cat_id in CATEGORY_IDS.items(): log_str += f'{cat}_mrr: {mrr_by_category[i, cat_id]:.4f} ' _log.info(log_str) if return_embeddings: out = (mrr, ent_emb) else: out = (mrr, None) return out @ex.command def link_prediction(dataset, inductive, dim, model, rel_model, loss_fn, encoder_name, regularizer, max_len, num_negatives, lr, use_scheduler, batch_size, emb_batch_size, eval_batch_size, max_epochs, checkpoint, use_cached_text, _run: Run, _log: Logger): drop_stopwords = model in {'bert-bow', 'bert-dkrl', 'glove-bow', 'glove-dkrl'} prefix = 'ind-' if inductive and model != 'transductive' else '' triples_file = f'data/{dataset}/{prefix}train.tsv' if device != torch.device('cpu'): num_devices = torch.cuda.device_count() if batch_size % num_devices != 0: raise ValueError(f'Batch size ({batch_size}) must be a multiple of' f' the number of CUDA devices ({num_devices})') _log.info(f'CUDA devices used: {num_devices}') else: num_devices = 1 _log.info('Training on CPU') if model == 'transductive': train_data = GraphDataset(triples_file, num_negatives, write_maps_file=True, num_devices=num_devices) else: if model.startswith('bert') or model == 'blp': tokenizer = BertTokenizer.from_pretrained(encoder_name) else: tokenizer = GloVeTokenizer('data/glove/glove.6B.300d-maps.pt') train_data = TextGraphDataset(triples_file, num_negatives, max_len, tokenizer, drop_stopwords, write_maps_file=True, use_cached_text=use_cached_text, num_devices=num_devices) train_loader = DataLoader(train_data, batch_size, shuffle=True, collate_fn=train_data.collate_fn, num_workers=0, drop_last=True) train_eval_loader = DataLoader(train_data, eval_batch_size) valid_data = GraphDataset(f'data/{dataset}/{prefix}dev.tsv') valid_loader = DataLoader(valid_data, eval_batch_size) test_data = GraphDataset(f'data/{dataset}/{prefix}test.tsv') test_loader = DataLoader(test_data, eval_batch_size) # Build graph with all triples to compute filtered metrics if dataset != 'Wikidata5M': graph = nx.MultiDiGraph() all_triples = torch.cat((train_data.triples, valid_data.triples, test_data.triples)) graph.add_weighted_edges_from(all_triples.tolist()) train_ent = set(train_data.entities.tolist()) train_val_ent = set(valid_data.entities.tolist()).union(train_ent) train_val_test_ent = set(test_data.entities.tolist()).union(train_val_ent) val_new_ents = train_val_ent.difference(train_ent) test_new_ents = train_val_test_ent.difference(train_val_ent) else: graph = None train_ent = set(train_data.entities.tolist()) train_val_ent = set(valid_data.entities.tolist()) train_val_test_ent = set(test_data.entities.tolist()) val_new_ents = test_new_ents = None _run.log_scalar('num_train_entities', len(train_ent)) train_ent = torch.tensor(list(train_ent)) train_val_ent = torch.tensor(list(train_val_ent)) train_val_test_ent = torch.tensor(list(train_val_test_ent)) model = utils.get_model(model, dim, rel_model, loss_fn, len(train_val_test_ent), train_data.num_rels, encoder_name, regularizer) if checkpoint is not None: model.load_state_dict(torch.load(checkpoint, map_location='cpu')) if device != torch.device('cpu'): model = torch.nn.DataParallel(model).to(device) optimizer = Adam(model.parameters(), lr=lr) total_steps = len(train_loader) * max_epochs if use_scheduler: warmup = int(0.2 * total_steps) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup, num_training_steps=total_steps) best_valid_mrr = 0.0 checkpoint_file = osp.join(OUT_PATH, f'model-{_run._id}.pt') for epoch in range(1, max_epochs + 1): train_loss = 0 for step, data in enumerate(train_loader): loss = model(*data).mean() optimizer.zero_grad() loss.backward() optimizer.step() if use_scheduler: scheduler.step() train_loss += loss.item() if step % int(0.05 * len(train_loader)) == 0: _log.info(f'Epoch {epoch}/{max_epochs} ' f'[{step}/{len(train_loader)}]: {loss.item():.6f}') _run.log_scalar('batch_loss', loss.item()) _run.log_scalar('train_loss', train_loss / len(train_loader), epoch) if dataset != 'Wikidata5M': _log.info('Evaluating on sample of training set') eval_link_prediction(model, train_eval_loader, train_data, train_ent, epoch, emb_batch_size, prefix='train', max_num_batches=len(valid_loader)) _log.info('Evaluating on validation set') val_mrr, _ = eval_link_prediction(model, valid_loader, train_data, train_val_ent, epoch, emb_batch_size, prefix='valid') # Keep checkpoint of best performing model (based on raw MRR) if val_mrr > best_valid_mrr: best_valid_mrr = val_mrr torch.save(model.state_dict(), checkpoint_file) # Evaluate with best performing checkpoint if max_epochs > 0: model.load_state_dict(torch.load(checkpoint_file)) if dataset == 'Wikidata5M': graph = nx.MultiDiGraph() graph.add_weighted_edges_from(valid_data.triples.tolist()) _log.info('Evaluating on validation set (with filtering)') eval_link_prediction(model, valid_loader, train_data, train_val_ent, max_epochs + 1, emb_batch_size, prefix='valid', filtering_graph=graph, new_entities=val_new_ents) if dataset == 'Wikidata5M': graph = nx.MultiDiGraph() graph.add_weighted_edges_from(test_data.triples.tolist()) _log.info('Evaluating on test set') _, ent_emb = eval_link_prediction(model, test_loader, train_data, train_val_test_ent, max_epochs + 1, emb_batch_size, prefix='test', filtering_graph=graph, new_entities=test_new_ents, return_embeddings=True) # Save final entity embeddings obtained with trained encoder torch.save(ent_emb, osp.join(OUT_PATH, f'ent_emb-{_run._id}.pt')) torch.save(train_val_test_ent, osp.join(OUT_PATH, f'ents-{_run._id}.pt')) @ex.command def node_classification(dataset, checkpoint, _run: Run, _log: Logger): ent_emb = torch.load(f'output/ent_emb-{checkpoint}.pt', map_location='cpu') if isinstance(ent_emb, tuple): ent_emb = ent_emb[0] ent_emb = ent_emb.squeeze().numpy() num_embs, emb_dim = ent_emb.shape _log.info(f'Loaded {num_embs} embeddings with dim={emb_dim}') emb_ids = torch.load(f'output/ents-{checkpoint}.pt', map_location='cpu') ent2idx = utils.make_ent2idx(emb_ids, max_ent_id=emb_ids.max()).numpy() maps = torch.load(f'data/{dataset}/maps.pt') ent_ids = maps['ent_ids'] class2label = defaultdict(lambda: len(class2label)) splits = ['train', 'dev', 'test'] split_2data = dict() for split in splits: with open(f'data/{dataset}/{split}-ents-class.txt') as f: idx = [] labels = [] for line in f: entity, ent_class = line.strip().split() entity_id = ent_ids[entity] entity_idx = ent2idx[entity_id] idx.append(entity_idx) labels.append(class2label[ent_class]) x = ent_emb[idx] y = np.array(labels) split_2data[split] = (x, y) x_train, y_train = split_2data['train'] x_dev, y_dev = split_2data['dev'] x_test, y_test = split_2data['test'] best_dev_metric = 0.0 best_c = 0 for k in range(-4, 2): c = 10 ** -k model = LogisticRegression(C=c, multi_class='multinomial', max_iter=1000) model.fit(x_train, y_train) dev_preds = model.predict(x_dev) dev_acc = accuracy_score(y_dev, dev_preds) _log.info(f'{c:.3f} - {dev_acc:.3f}') if dev_acc > best_dev_metric: best_dev_metric = dev_acc best_c = c _log.info(f'Best regularization coefficient: {best_c:.4f}') model = LogisticRegression(C=best_c, multi_class='multinomial', max_iter=1000) x_train_all = np.concatenate((x_train, x_dev)) y_train_all = np.concatenate((y_train, y_dev)) model.fit(x_train_all, y_train_all) for metric_fn in (accuracy_score, balanced_accuracy_score): train_preds = model.predict(x_train_all) train_metric = metric_fn(y_train_all, train_preds) test_preds = model.predict(x_test) test_metric = metric_fn(y_test, test_preds) _log.info(f'Train {metric_fn.__name__}: {train_metric:.3f}') _log.info(f'Test {metric_fn.__name__}: {test_metric:.3f}') id_to_class = {v: k for k, v in class2label.items()} joblib.dump({'model': model, 'id_to_class': id_to_class}, osp.join('output', f'classifier-{checkpoint}.joblib')) ex.run_commandline()
18,945
38.063918
83
py
blp
blp-master/data/utils.py
import sys from tqdm import tqdm from argparse import ArgumentParser import networkx as nx import random import os.path as osp from collections import Counter, defaultdict import torch import rdflib def parse_triples(triples_file): """Read a file containing triples, with head, relation, and tail separated by space. Returns list of lists.""" triples = [] rel_counts = Counter() file = open(triples_file) for line in file: head, rel, tail = line.split() triples.append([head, tail, rel]) rel_counts[rel] += 1 return triples, rel_counts def read_entity_types(entity2type_file): type2entities = defaultdict(set) with open(entity2type_file) as f: for line in f: entity, label = line.strip().split() type2entities[label].add(entity) return dict(type2entities) def get_safely_removed_edges(graph, node, rel_counts, min_edges_left=100): """Get counts of edge removed by type, after safely removing a given node. Safely removing a node entails checking that no nodes are left disconnected, and not removing edge types with count less than a given amount. """ neighbors = set(nx.all_neighbors(graph, node)) removed_rel_counts = Counter() removed_edges = [] for m in neighbors: # Check if m has more than 2 neighbors (node, and potentially itself) # before continuing m_neighborhood = set(nx.all_neighbors(graph, m)) if len(m_neighborhood) > 2: # Check edges in both directions between node and m pair = [node, m] for i in range(2): edge_dict = graph.get_edge_data(*pair) if edge_dict is not None: # Check that removing the edges between node and m # does not leave less than min_edges_left edges = edge_dict.values() for edge in edges: rel = edge['weight'] edges_left = rel_counts[rel] - removed_rel_counts[rel] if edges_left >= min_edges_left: removed_rel_counts[rel] += 1 head, tail = pair removed_edges.append((head, tail, rel)) else: return None # Don't count self-loops twice if node == m: break pair = list(reversed(pair)) else: return None return removed_edges, removed_rel_counts def drop_entities(triples_file, train_size=0.8, valid_size=0.1, test_size=0.1, seed=0, types_file=None): """Drop entities from a graph, to create training, validation and test splits. Entities are dropped so that no disconnected nodes are left in the training graph. Dropped entities are distributed between disjoint validation and test sets. """ splits_sum = train_size + valid_size + test_size if splits_sum < 0 or splits_sum > 1: raise ValueError('Sum of split sizes must be between greater than 0' ' and less than or equal to 1.') use_types = types_file is not None if use_types: type2entities = read_entity_types(types_file) types = list(type2entities.keys()) random.seed(seed) graph = nx.MultiDiGraph() triples, rel_counts = parse_triples(triples_file) graph.add_weighted_edges_from(triples) original_num_edges = graph.number_of_edges() original_num_nodes = graph.number_of_nodes() print(f'Loaded graph with {graph.number_of_nodes():,} entities ' f'and {graph.number_of_edges():,} edges') dropped_entities = [] dropped_edges = dict() num_to_drop = int(original_num_nodes * (1 - train_size)) num_val = int(original_num_nodes * valid_size) num_test = int(original_num_nodes * test_size) print(f'Removing {num_to_drop:,} entities...') progress = tqdm(total=num_to_drop, file=sys.stdout) while len(dropped_entities) < num_to_drop: if use_types: # Sample an entity with probability proportional to its type count # (minus 1 to keep at least one entity of any type) weights = [len(type2entities[t]) - 1 for t in types] rand_type = random.choices(types, weights, k=1)[0] rand_ent = random.choice(list(type2entities[rand_type])) else: # Sample an entity uniformly at random rand_ent = random.choice(list(graph.nodes)) removed_tuple = get_safely_removed_edges(graph, rand_ent, rel_counts) if removed_tuple is not None: removed_edges, removed_counts = removed_tuple dropped_edges[rand_ent] = removed_edges graph.remove_node(rand_ent) dropped_entities.append(rand_ent) rel_counts.subtract(removed_counts) if use_types: type2entities[rand_type].remove(rand_ent) progress.update(1) progress.close() # Are there indeed no disconnected nodes? assert len(list(nx.isolates(graph))) == 0 # Did we keep track of removed edges correctly? num_removed_edges = sum(map(len, dropped_edges.values())) assert num_removed_edges + graph.number_of_edges() == original_num_edges # Test entities MUST come from first slice! This guarantees that # validation entities don't have edges with them (because nodes were # removed in sequence) test_ents = set(dropped_entities[:num_test]) val_ents = set(dropped_entities[num_test:num_test + num_val]) train_ents = set(graph.nodes()) # Check that entity sets are disjoint assert len(train_ents.intersection(val_ents)) == 0 assert len(train_ents.intersection(test_ents)) == 0 assert len(val_ents.intersection(test_ents)) == 0 # Check that validation graph does not contain test entities val_graph = nx.MultiDiGraph() val_edges = [] for entity in val_ents: val_edges += dropped_edges[entity] val_graph.add_weighted_edges_from(val_edges) assert len(set(val_graph.nodes()).intersection(test_ents)) == 0 names = ('train', 'dev', 'test') dirname = osp.dirname(triples_file) prefix = 'ind-' for entity_set, set_name in zip((train_ents, val_ents, test_ents), names): # Save file with entities for set with open(osp.join(dirname, f'{set_name}-ents.txt'), 'w') as file: file.writelines('\n'.join(entity_set)) if set_name == 'train': # Triples for train split are saved later continue # Save file with triples for entities in set with open(osp.join(dirname, f'{prefix}{set_name}.tsv'), 'w') as file: for entity in entity_set: triples = dropped_edges[entity] for head, tail, rel in triples: file.write(f'{head}\t{rel}\t{tail}\n') with open(osp.join(dirname, f'{prefix}train.tsv'), 'w') as train_file: for head, tail, rel in graph.edges(data=True): train_file.write(f'{head}\t{rel["weight"]}\t{tail}\n') print(f'Dropped {len(val_ents):,} entities for validation' f' and {len(test_ents):,} for test.') print(f'{graph.number_of_nodes():,} entities are left for training.') print(f'Saved output files to {dirname}/') def load_embeddings(embs_file): """Read a file containing a word followed by its embedding, as float values separated by whitespace. Args: embs_file: str, path to file Returns: tensor of shape (vocabulary, embedding_dimension), type torch.float dict, mapping words (str) to id (int). """ filename, _ = osp.splitext(embs_file) word2idx = {} word_embeddings = [] progress = tqdm() with open(embs_file) as file: for i, line in enumerate(file): word, *embedding = line.split(' ') word2idx[word] = i word_embeddings.append([float(e) for e in embedding]) progress.update(1) progress.close() word_embeddings = torch.tensor(word_embeddings) # Add embedding for out-of-vocabulary words unk_emb = torch.mean(word_embeddings, dim=0, keepdim=True) word_embeddings = torch.cat((word_embeddings, unk_emb)) word2idx['[UNK]'] = len(word2idx) torch.save(word_embeddings, f'{filename}.pt') torch.save(word2idx, f'{filename}-maps.pt') def categorize_relations(triples_file): """Given a set of triples, assign a category to a relation from the following: 1 to 1 1 to many many to 1 many to many Results are saved back to disk. Args: triples_file: str, file containing triples of the form head relation tail """ graph = nx.MultiDiGraph() triples, rel_counts = parse_triples(triples_file) graph.add_weighted_edges_from(triples) rel2heads_count = defaultdict(list) rel2tails_count = defaultdict(list) for entity in graph.nodes: rel2heads_entity_count = Counter() # Fix entity as tail, and check all heads in_edges = graph.in_edges(entity, data=True) for u, v, edge in in_edges: rel2heads_entity_count[edge['weight']] += 1 for rel, counts in rel2heads_entity_count.items(): rel2heads_count[rel].append(counts) rel2tails_entity_count = Counter() # Fix entity as head, and check all tails out_edges = graph.out_edges(entity, data=True) for u, v, edge in out_edges: rel2tails_entity_count[edge['weight']] += 1 for rel, counts in rel2tails_entity_count.items(): rel2tails_count[rel].append(counts) rel2category = dict() for rel in rel2heads_count: head_counts = rel2heads_count[rel] tail_counts = rel2tails_count[rel] head_avg = sum(head_counts)/len(head_counts) tail_avg = sum(tail_counts)/len(tail_counts) head_category = '1' if head_avg < 1.5 else 'many' tail_category = '1' if tail_avg < 1.5 else 'many' rel2category[rel] = f'{head_category}-to-{tail_category}' print('Relation category statistics:') cat_counts = Counter(rel2category.values()) for category, count in cat_counts.items(): proportion = 100 * count/len(rel2category) print(f'{category:13} {count:3} {proportion:4.1f}%') dirname = osp.dirname(triples_file) output_path = osp.join(dirname, 'relations-cat.txt') with open(output_path, 'w') as f: for relation, category in rel2category.items(): f.write(f'{relation}\t{category}\n') print(f'Saved relation categories to {output_path}') def get_ranking_descriptions(run_file, dbpedia_file, redirects_file): # Read run file and get unique set of entities print('Reading unique entities from run file...') entities = set() with open(run_file) as f: for line in f: values = line.strip().split() entities.add(values[2]) basename = osp.splitext(osp.basename(run_file))[0] output_file = osp.join(osp.dirname(run_file), basename + '-descriptions.txt') missing_file = osp.join(osp.dirname(run_file), basename + '-missing.txt') dbpedia_ns = 'http://dbpedia.org/resource/' dbpedia_prefix = 'dbpedia:' print('Reading redirects...') redir2entities = defaultdict(set) with open(redirects_file) as f: for line in f: values = line.strip().split() norm_uri = values[0].replace(dbpedia_ns, dbpedia_prefix, 1) redirect = values[2] if norm_uri in entities: redir2entities[redirect].add(norm_uri) # Iterate over DBpedia dump and keep required descriptions print('Retrieving descriptions of entities...') read_entities = set() progress = tqdm(file=sys.stdout) with open(dbpedia_file) as f, open(output_file, 'w') as out: for line in f: g = rdflib.Graph().parse(data=line, format='n3') for (page, rel, description) in g: norm_uri = f'<{page.replace(dbpedia_ns, dbpedia_prefix, 1)}>' if norm_uri in entities and norm_uri not in read_entities: read_entities.add(norm_uri) out.write(f'{norm_uri}\t{description.value}\n') page_n3_format = page.n3() if page_n3_format in redir2entities: for entity in redir2entities[page_n3_format]: if entity not in read_entities: read_entities.add(entity) out.write(f'{entity}\t{description.value}\n') if len(read_entities) == len(entities): break progress.update() progress.close() with open(missing_file, 'w') as f: for entity in entities.difference(read_entities): f.write(f'{entity}\n') print(f'Retrieved {len(read_entities):,} descriptions, out of' f' {len(entities):,} entities.') print(f'Descriptions saved in {output_file}') print(f'Entities with missing descriptions saved in {missing_file}') if __name__ == '__main__': parser = ArgumentParser() parser.add_argument('command', choices=['drop_entities', 'load_embs', 'categorize', 'get_ranking_descriptions']) parser.add_argument('--file', help='Input file') parser.add_argument('--dbp_file', help='DBpedia ttl file with rdf:comment' ' field for entities') parser.add_argument('--redirects_file', help='File redirecting entities') parser.add_argument('--types_file', help='Tab-separated file of entities' ' and their type', default=None) parser.add_argument('--train_size', help='Fraction of entities used for' ' training.', default=0.8, type=float) parser.add_argument('--seed', help='Random seed', default=0) args = parser.parse_args() if args.command == 'drop_entities': drop_entities(args.file, train_size=args.train_size, seed=args.seed, types_file=args.types_file) elif args.command == 'load_embs': load_embeddings(args.file) elif args.command == 'categorize': categorize_relations(args.file) elif args.command == 'get_ranking_descriptions': if args.file is None or args.dbp_file is None: raise ValueError('--file and --dbp_file must be provided to' ' get_ranking_descriptions') get_ranking_descriptions(args.file, args.dbp_file, args.redirects_file)
14,869
36.455919
79
py
models
models-master/official/core/base_task.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Defines the base task abstraction.""" import abc import functools from typing import Optional from absl import logging import tensorflow as tf from official.core import config_definitions from official.modeling import optimization from official.modeling import performance from official.modeling.privacy import configs from official.modeling.privacy import ops OptimizationConfig = optimization.OptimizationConfig RuntimeConfig = config_definitions.RuntimeConfig DifferentialPrivacyConfig = configs.DifferentialPrivacyConfig class Task(tf.Module, metaclass=abc.ABCMeta): """A single-replica view of training procedure. Tasks provide artifacts for training/validation procedures, including loading/iterating over Datasets, training/validation steps, calculating the loss and customized metrics with reduction. """ # Special keys in train/validate step returned logs. loss = "loss" def __init__(self, params, logging_dir: Optional[str] = None, name: Optional[str] = None): """Task initialization. Args: params: the task configuration instance, which can be any of dataclass, ConfigDict, namedtuple, etc. logging_dir: a string pointing to where the model, summaries etc. will be saved. You can also write additional stuff in this directory. name: the task name. """ super().__init__(name=name) self._task_config = params self._logging_dir = ( logging_dir or "" ) # Empty directory hints current working dir. @property def task_config(self): return self._task_config @property def logging_dir(self) -> str: return self._logging_dir @classmethod def create_optimizer(cls, optimizer_config: OptimizationConfig, runtime_config: Optional[RuntimeConfig] = None, dp_config: Optional[DifferentialPrivacyConfig] = None): """Creates an TF optimizer from configurations. Args: optimizer_config: the parameters of the Optimization settings. runtime_config: the parameters of the runtime. dp_config: the parameter of differential privacy. Returns: A tf.optimizers.Optimizer object. """ gradient_transformers = None if dp_config is not None: logging.info("Adding differential privacy transform with config %s.", dp_config.as_dict()) noise_stddev = dp_config.clipping_norm * dp_config.noise_multiplier gradient_transformers = [ functools.partial( ops.clip_l2_norm, l2_norm_clip=dp_config.clipping_norm), functools.partial( ops.add_noise, noise_stddev=noise_stddev) ] opt_factory = optimization.OptimizerFactory(optimizer_config) optimizer = opt_factory.build_optimizer( opt_factory.build_learning_rate(), gradient_transformers=gradient_transformers ) # Configuring optimizer when loss_scale is set in runtime config. This helps # avoiding overflow/underflow for float16 computations. if runtime_config: optimizer = performance.configure_optimizer( optimizer, use_float16=runtime_config.mixed_precision_dtype == "float16", loss_scale=runtime_config.loss_scale) return optimizer def initialize(self, model: tf.keras.Model): """[Optional] A callback function used as CheckpointManager's init_fn. This function will be called when no checkpoint is found for the model. If there is a checkpoint, the checkpoint will be loaded and this function will not be called. You can use this callback function to load a pretrained checkpoint, saved under a directory other than the model_dir. Args: model: The keras.Model built or used by this task. """ ckpt_dir_or_file = self.task_config.init_checkpoint logging.info("Trying to load pretrained checkpoint from %s", ckpt_dir_or_file) if ckpt_dir_or_file and tf.io.gfile.isdir(ckpt_dir_or_file): ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) if not ckpt_dir_or_file: logging.info("No checkpoint file found from %s. Will not load.", ckpt_dir_or_file) return if hasattr(model, "checkpoint_items"): checkpoint_items = model.checkpoint_items else: checkpoint_items = dict(model=model) ckpt = tf.train.Checkpoint(**checkpoint_items) status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() logging.info("Finished loading pretrained checkpoint from %s", ckpt_dir_or_file) def build_model(self) -> tf.keras.Model: """[Optional] Creates model architecture. Returns: A model instance. """ # pytype: disable=bad-return-type # typed-keras @abc.abstractmethod def build_inputs(self, params, input_context: Optional[tf.distribute.InputContext] = None): """Returns a dataset or a nested structure of dataset functions. Dataset functions define per-host datasets with the per-replica batch size. With distributed training, this method runs on remote hosts. Args: params: hyperparams to create input pipelines, which can be any of dataclass, ConfigDict, namedtuple, etc. input_context: optional distribution input pipeline context. Returns: A nested structure of per-replica input functions. """ def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor: """Standard interface to compute losses. Args: labels: optional label tensors. model_outputs: a nested structure of output tensors. aux_losses: auxiliary loss tensors, i.e. `losses` in keras.Model. Returns: The total loss tensor. """ del model_outputs, labels if aux_losses is None: losses = [tf.constant(0.0, dtype=tf.float32)] else: losses = aux_losses total_loss = tf.add_n(losses) return total_loss def build_metrics(self, training: bool = True): """Gets streaming metrics for training/validation.""" del training return [] def process_metrics(self, metrics, labels, model_outputs, **kwargs): """Process and update metrics. Called when using custom training loop API. Args: metrics: a nested structure of metrics objects. The return of function self.build_metrics. labels: a tensor or a nested structure of tensors. model_outputs: a tensor or a nested structure of tensors. For example, output of the keras model built by self.build_model. **kwargs: other args. """ for metric in metrics: metric.update_state(labels, model_outputs) def process_compiled_metrics(self, compiled_metrics, labels, model_outputs): """Process and update compiled_metrics. call when using compile/fit API. Args: compiled_metrics: the compiled metrics (model.compiled_metrics). labels: a tensor or a nested structure of tensors. model_outputs: a tensor or a nested structure of tensors. For example, output of the keras model built by self.build_model. """ compiled_metrics.update_state(labels, model_outputs) def train_step(self, inputs, model: tf.keras.Model, optimizer: tf.keras.optimizers.Optimizer, metrics=None): """Does forward and backward. With distribution strategies, this method runs on devices. Args: inputs: a dictionary of input tensors. model: the model, forward pass definition. optimizer: the optimizer for this training step. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ if isinstance(inputs, tuple) and len(inputs) == 2: features, labels = inputs else: features, labels = inputs, inputs with tf.GradientTape() as tape: outputs = model(features, training=True) # Computes per-replica loss. if model.compiled_loss: loss = model.compiled_loss( labels, outputs, regularization_losses=model.losses) loss += self.build_losses( labels=labels, model_outputs=outputs, aux_losses=None) else: loss = self.build_losses( labels=labels, model_outputs=outputs, aux_losses=model.losses) # Scales loss as the default gradients allreduce performs sum inside the # optimizer. scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync # For mixed precision, when a LossScaleOptimizer is used, the loss is # scaled to avoid numeric underflow. if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer): scaled_loss = optimizer.get_scaled_loss(scaled_loss) tvars = model.trainable_variables grads = tape.gradient(scaled_loss, tvars) if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer): grads = optimizer.get_unscaled_gradients(grads) optimizer.apply_gradients(list(zip(grads, tvars))) logs = {self.loss: loss} if metrics: self.process_metrics(metrics, labels, outputs) if model.compiled_metrics: self.process_compiled_metrics(model.compiled_metrics, labels, outputs) logs.update({m.name: m.result() for m in metrics or []}) logs.update({m.name: m.result() for m in model.metrics}) return logs def validation_step(self, inputs, model: tf.keras.Model, metrics=None): """Validation step. With distribution strategies, this method runs on devices. Args: inputs: a dictionary of input tensors. model: the keras.Model. metrics: a nested structure of metrics objects. Returns: A dictionary of logs. """ if isinstance(inputs, tuple) and len(inputs) == 2: features, labels = inputs else: features, labels = inputs, inputs outputs = self.inference_step(features, model) loss = self.build_losses( labels=labels, model_outputs=outputs, aux_losses=model.losses) logs = {self.loss: loss} if metrics: self.process_metrics(metrics, labels, outputs) if model.compiled_metrics: self.process_compiled_metrics(model.compiled_metrics, labels, outputs) logs.update({m.name: m.result() for m in metrics or []}) logs.update({m.name: m.result() for m in model.metrics}) return logs def inference_step(self, inputs, model: tf.keras.Model): """Performs the forward step. With distribution strategies, this method runs on devices. Args: inputs: a dictionary of input tensors. model: the keras.Model. Returns: Model outputs. """ return model(inputs, training=False) def aggregate_logs(self, state, step_logs): """Optional aggregation over logs returned from a validation step. Given step_logs from a validation step, this function aggregates the logs after each eval_step() (see eval_reduce() function in official/core/base_trainer.py). It runs on CPU and can be used to aggregate metrics during validation, when there are too many metrics that cannot fit into TPU memory. Note that this may increase latency due to data transfer between TPU and CPU. Also, the step output from a validation step may be a tuple with elements from replicas, and a concatenation of the elements is needed in such case. Args: state: The current state of training, for example, it can be a sequence of metrics. step_logs: Logs from a validation step. Can be a dictionary. """ pass def reduce_aggregated_logs(self, aggregated_logs, global_step: Optional[tf.Tensor] = None): """Optional reduce of aggregated logs over validation steps. This function reduces aggregated logs at the end of validation, and can be used to compute the final metrics. It runs on CPU and in each eval_end() in base trainer (see eval_end() function in official/core/base_trainer.py). Args: aggregated_logs: Aggregated logs over multiple validation steps. global_step: An optional variable of global step. Returns: A dictionary of reduced results. """ return {}
12,958
34.897507
80
py
models
models-master/official/core/savedmodel_checkpoint_manager_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time from typing import Iterable import tensorflow as tf from official.core import savedmodel_checkpoint_manager def _models_exist(checkpoint_path: str, models: Iterable[str]) -> bool: for model_name in models: if not tf.io.gfile.isdir( os.path.join( savedmodel_checkpoint_manager.make_saved_modules_directory_name( checkpoint_path), model_name)): return False return True class _ModelForTest(tf.keras.Model): def __init__(self, hidden_size: int = 8): super().__init__() self.dense = tf.keras.layers.Dense(hidden_size) @tf.function(input_signature=[tf.TensorSpec([None, 16])]) def call(self, inputs): return self.dense(inputs) @property def saved_model_signatures(self): # Build SavedModel signatures. return dict(serving_default=self.call) class CheckpointManagerTest(tf.test.TestCase): def _create_manager(self, max_to_keep: int = 1) -> tf.train.CheckpointManager: """Sets up SavedModelCheckpointManager object. Args: max_to_keep: max number of savedmodels to keep. Returns: created savedmodel manager. """ models = { 'model_1': _ModelForTest(12), 'model_2': _ModelForTest(14), } checkpoint = tf.train.Checkpoint() manager = savedmodel_checkpoint_manager.SavedModelCheckpointManager( checkpoint=checkpoint, directory=self.get_temp_dir(), max_to_keep=max_to_keep, modules_to_export=models) return manager def test_max_to_keep(self): manager = self._create_manager() models = manager.modules_to_export first_path = manager.save() second_path = manager.save() savedmodel = savedmodel_checkpoint_manager.make_saved_modules_directory_name( manager.latest_checkpoint) self.assertEqual(savedmodel, manager.latest_savedmodel) self.assertTrue(_models_exist(second_path, models.keys())) self.assertFalse(_models_exist(first_path, models.keys())) def test_returns_none_after_timeout(self): manager = self._create_manager() start = time.time() ret = manager.wait_for_new_savedmodel( None, timeout=1.0, seconds_to_sleep=0.5) end = time.time() self.assertIsNone(ret) # We've waited 0.5 second. self.assertGreater(end, start + 0.5) # The timeout kicked in. self.assertLess(end, start + 0.6) def test_saved_model_iterator(self): manager = self._create_manager(max_to_keep=2) self.assertIsNotNone(manager.save(checkpoint_number=1)) self.assertIsNotNone(manager.save(checkpoint_number=2)) self.assertIsNotNone(manager.save(checkpoint_number=3)) # Savedmodels are in time order. expected_savedmodels = manager.savedmodels # Order not guaranteed. existing_savedmodels = manager.get_existing_savedmodels() savedmodels = list(manager.savedmodels_iterator(timeout=3.0)) self.assertEqual(savedmodels, expected_savedmodels) self.assertEqual(set(savedmodels), set(existing_savedmodels)) def test_saved_model_iterator_timeout_fn(self): manager = self._create_manager() timeout_fn_calls = [0] def timeout_fn(): timeout_fn_calls[0] += 1 return timeout_fn_calls[0] > 3 results = list( manager.savedmodels_iterator(timeout=0.1, timeout_fn=timeout_fn)) self.assertEqual([], results) self.assertEqual(4, timeout_fn_calls[0]) if __name__ == '__main__': tf.test.main()
4,045
31.111111
81
py
models
models-master/official/core/train_utils_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for official.core.train_utils.""" import json import os import pprint import numpy as np import tensorflow as tf from official.core import exp_factory from official.core import test_utils from official.core import train_utils from official.modeling import hyperparams @exp_factory.register_config_factory('foo') def foo(): """Multitask experiment for test.""" experiment_config = hyperparams.Config( default_params={ 'runtime': { 'tpu': 'fake', }, 'task': { 'model': { 'model_id': 'bar', }, }, 'trainer': { 'train_steps': -1, 'validation_steps': -1, }, }) return experiment_config class TrainUtilsTest(tf.test.TestCase): def test_get_leaf_nested_dict(self): d = {'a': {'i': {'x': 5}}} self.assertEqual(train_utils.get_leaf_nested_dict(d, ['a', 'i', 'x']), 5) def test_get_leaf_nested_dict_not_leaf(self): with self.assertRaisesRegex(KeyError, 'The value extracted with keys.*'): d = {'a': {'i': {'x': 5}}} train_utils.get_leaf_nested_dict(d, ['a', 'i']) def test_get_leaf_nested_dict_path_not_exist_missing_key(self): with self.assertRaisesRegex(KeyError, 'Path not exist while traversing .*'): d = {'a': {'i': {'x': 5}}} train_utils.get_leaf_nested_dict(d, ['a', 'i', 'y']) def test_get_leaf_nested_dict_path_not_exist_out_of_range(self): with self.assertRaisesRegex(KeyError, 'Path not exist while traversing .*'): d = {'a': {'i': {'x': 5}}} train_utils.get_leaf_nested_dict(d, ['a', 'i', 'z']) def test_get_leaf_nested_dict_path_not_exist_meets_leaf(self): with self.assertRaisesRegex(KeyError, 'Path not exist while traversing .*'): d = {'a': {'i': 5}} train_utils.get_leaf_nested_dict(d, ['a', 'i', 'z']) def test_cast_leaf_nested_dict(self): d = {'a': {'i': {'x': '123'}}, 'b': 456.5} d = train_utils.cast_leaf_nested_dict(d, int) self.assertEqual(d['a']['i']['x'], 123) self.assertEqual(d['b'], 456) def test_write_model_params_keras_model(self): inputs = np.zeros([2, 3]) model = test_utils.FakeKerasModel() model(inputs) # Must do forward pass to build the model. filepath = os.path.join(self.create_tempdir(), 'model_params.txt') train_utils.write_model_params(model, filepath) actual = tf.io.gfile.GFile(filepath, 'r').read().splitlines() expected = [ 'fake_keras_model/dense/kernel:0 [3, 4]', 'fake_keras_model/dense/bias:0 [4]', 'fake_keras_model/dense_1/kernel:0 [4, 4]', 'fake_keras_model/dense_1/bias:0 [4]', '', 'Total params: 36', ] self.assertEqual(actual, expected) def test_write_model_params_module(self): inputs = np.zeros([2, 3], dtype=np.float32) model = test_utils.FakeModule(3, name='fake_module') model(inputs) # Must do forward pass to build the model. filepath = os.path.join(self.create_tempdir(), 'model_params.txt') train_utils.write_model_params(model, filepath) actual = tf.io.gfile.GFile(filepath, 'r').read().splitlines() expected = [ 'fake_module/dense/b:0 [4]', 'fake_module/dense/w:0 [3, 4]', 'fake_module/dense_1/b:0 [4]', 'fake_module/dense_1/w:0 [4, 4]', '', 'Total params: 36', ] self.assertEqual(actual, expected) def test_construct_experiment_from_flags(self): options = train_utils.ParseConfigOptions( experiment='foo', config_file=[], tpu='bar', tf_data_service='', params_override='task.model.model_id=new,' 'trainer.train_steps=10,' 'trainer.validation_steps=11') builder = train_utils.ExperimentParser(options) params_from_obj = builder.parse() params_from_func = train_utils.parse_configuration(options) pp = pprint.PrettyPrinter() self.assertEqual( pp.pformat(params_from_obj.as_dict()), pp.pformat(params_from_func.as_dict())) self.assertEqual(params_from_obj.runtime.tpu, 'bar') self.assertEqual(params_from_obj.task.model.model_id, 'new') self.assertEqual(params_from_obj.trainer.train_steps, 10) self.assertEqual(params_from_obj.trainer.validation_steps, 11) class BestCheckpointExporterTest(tf.test.TestCase): def test_maybe_export(self): model_dir = self.create_tempdir().full_path best_ckpt_path = os.path.join(model_dir, 'best_ckpt-1') metric_name = 'test_metric|metric_1' exporter = train_utils.BestCheckpointExporter( model_dir, metric_name, 'higher') v = tf.Variable(1.0) checkpoint = tf.train.Checkpoint(v=v) ret = exporter.maybe_export_checkpoint( checkpoint, {'test_metric': {'metric_1': 5.0}}, 100) with self.subTest(name='Successful first save.'): self.assertEqual(ret, True) v_2 = tf.Variable(2.0) checkpoint_2 = tf.train.Checkpoint(v=v_2) checkpoint_2.restore(best_ckpt_path) self.assertEqual(v_2.numpy(), 1.0) v = tf.Variable(3.0) checkpoint = tf.train.Checkpoint(v=v) ret = exporter.maybe_export_checkpoint( checkpoint, {'test_metric': {'metric_1': 6.0}}, 200) with self.subTest(name='Successful better metic save.'): self.assertEqual(ret, True) v_2 = tf.Variable(2.0) checkpoint_2 = tf.train.Checkpoint(v=v_2) checkpoint_2.restore(best_ckpt_path) self.assertEqual(v_2.numpy(), 3.0) v = tf.Variable(5.0) checkpoint = tf.train.Checkpoint(v=v) ret = exporter.maybe_export_checkpoint( checkpoint, {'test_metric': {'metric_1': 1.0}}, 300) with self.subTest(name='Worse metic no save.'): self.assertEqual(ret, False) v_2 = tf.Variable(2.0) checkpoint_2 = tf.train.Checkpoint(v=v_2) checkpoint_2.restore(best_ckpt_path) self.assertEqual(v_2.numpy(), 3.0) def test_export_best_eval_metric(self): model_dir = self.create_tempdir().full_path metric_name = 'test_metric|metric_1' exporter = train_utils.BestCheckpointExporter(model_dir, metric_name, 'higher') exporter.export_best_eval_metric({'test_metric': {'metric_1': 5.0}}, 100) with tf.io.gfile.GFile(os.path.join(model_dir, 'info.json'), 'rb') as reader: metric = json.loads(reader.read()) self.assertAllEqual( metric, {'test_metric': {'metric_1': 5.0}, 'best_ckpt_global_step': 100.0}) def test_export_best_eval_metric_skips_non_scalar_values(self): model_dir = self.create_tempdir().full_path metric_name = 'test_metric|metric_1' exporter = train_utils.BestCheckpointExporter(model_dir, metric_name, 'higher') image = tf.zeros(shape=[16, 8, 1]) eval_logs = {'test_metric': {'metric_1': 5.0, 'image': image}} exporter.export_best_eval_metric(eval_logs, 100) with tf.io.gfile.GFile(os.path.join(model_dir, 'info.json'), 'rb') as reader: metric = json.loads(reader.read()) self.assertAllEqual( metric, {'test_metric': {'metric_1': 5.0}, 'best_ckpt_global_step': 100.0}) if __name__ == '__main__': tf.test.main()
7,893
35.546296
80
py
models
models-master/official/core/base_trainer.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Standard Trainer implementation. The base trainer implements the Orbit `StandardTrainable` and `StandardEvaluable` interfaces. Trainers inside this project should be interchangable and independent on model architectures and tasks. """ import functools from typing import Union, Optional from absl import logging import gin import orbit import tensorflow as tf from official.core import base_task from official.core import config_definitions from official.modeling import optimization ExperimentConfig = config_definitions.ExperimentConfig TrainerConfig = config_definitions.TrainerConfig class _AsyncTrainer(orbit.StandardTrainer, orbit.StandardEvaluator): """Trainer class for both sync and async Strategy.""" def init_async(self): """Initializes the Async Trainer base class.""" assert isinstance(self._strategy, tf.distribute.Strategy) self._is_async = isinstance( self._strategy, tf.distribute.experimental.ParameterServerStrategy) self._coordinator = None if self._is_async: self._coordinator = ( tf.distribute.experimental.coordinator.ClusterCoordinator( self._strategy)) def coordinator_for_async( self, ) -> tf.distribute.experimental.coordinator.ClusterCoordinator: if not self._coordinator: raise ValueError( "Coordinator uninitialized for async run. Call init_async() first." ) return self._coordinator def join(self): """Join all async steps. Only useful in aysnc training.""" if getattr(self, "_is_async", False): self.coordinator_for_async().join() def create_train_loop_fn(self): """Creates a eval loop from the given step function and options.""" train_loop_fn = super().create_train_loop_fn() if getattr(self, "_is_async", False): def _async_loop_fn(iterator, num_steps): self.coordinator_for_async().schedule( train_loop_fn, args=(iterator, num_steps) ) return _async_loop_fn else: return train_loop_fn def create_eval_loop_fn(self, has_state: bool): """Creates a training loop from the given step function and options.""" eval_loop_fn = super().create_eval_loop_fn(has_state) if getattr(self, "_is_async", False): if has_state: raise ValueError( "Stateful eval loop is not supported in async training.") def _async_loop_fn(iterator, num_steps, state=None, reduce_fn=None): assert state is None assert reduce_fn is None self.coordinator_for_async().schedule( eval_loop_fn, args=(iterator, num_steps) ) return _async_loop_fn else: return eval_loop_fn def distribute_dataset(self, dataset_or_fn, *args, **kwargs): """A utility function to help create a `tf.distribute.DistributedDataset`. Args: dataset_or_fn: A instance of `tf.data.Dataset`, or a "dataset function" returning a `tf.data.Dataset`. If it is a function, it may optionally have an argument named `input_context` which will be passed a `tf.distribute.InputContext` instance. *args: Any positional arguments to pass through to `dataset_or_fn`. **kwargs: Any keyword arguments to pass through to `dataset_or_fn`. Returns: A distributed Dataset. """ if getattr(self, "_is_async", False): per_worker_dataset_fn = functools.partial( orbit.utils.make_distributed_dataset, self._strategy, dataset_or_fn, *args, **kwargs) per_worker_dataset_fn = tf.function(per_worker_dataset_fn) return self.coordinator_for_async().create_per_worker_dataset( per_worker_dataset_fn ) else: return orbit.utils.make_distributed_dataset(self._strategy, dataset_or_fn, *args, **kwargs) def get_runtime_options(config: ExperimentConfig): """Get tf.distribute.RunOptions from config.""" xla_options = {} if config.runtime.tpu_enable_xla_dynamic_padder is not None: xla_options["enable_xla_dynamic_padder"] = ( config.runtime.tpu_enable_xla_dynamic_padder) return tf.distribute.RunOptions( experimental_xla_options=tf.tpu.XLAOptions(**xla_options)) @gin.configurable class Trainer(_AsyncTrainer): """Implements the common trainer shared for TensorFlow models.""" # pylint: disable=super-init-not-called def __init__( self, config: ExperimentConfig, task: base_task.Task, model: tf.keras.Model, optimizer: tf.optimizers.Optimizer, train: bool = True, evaluate: bool = True, train_dataset: Optional[Union[tf.data.Dataset, tf.distribute.DistributedDataset]] = None, validation_dataset: Optional[Union[ tf.data.Dataset, tf.distribute.DistributedDataset]] = None, checkpoint_exporter=None): """Initialize common trainer for TensorFlow models. Args: config: An `ExperimentConfig` instance specifying experiment config. task: A base_task.Task instance. model: The model instance, e.g. a tf.keras.Model instance. optimizer: tf.optimizers.Optimizer instance. train: bool, whether or not this trainer will be used for training. default to True. evaluate: bool, whether or not this trainer will be used for evaluation. default to True. train_dataset: a dataset object created for training. With tf.distribute, it needs to be a `DistributedDataset`. validation_dataset: a dataset object created for evaluation. With tf.distribute, it needs to be a `DistributedDataset`. The evaluator will create a dataset iterator for each eval round, so the dataset does not need to repeat. checkpoint_exporter: an object that has the `maybe_export_checkpoint` interface. """ # Gets the current distribution strategy. If not inside any strategy scope, # it gets a single-replica no-op strategy. self._strategy = tf.distribute.get_strategy() self._validate_params( config, check_train_data=train_dataset is None, check_validation_data=validation_dataset is None) self._config = config self._task = task self._model = model self._optimizer = optimizer self._checkpoint_exporter = checkpoint_exporter self._recovery = None # Runtime options are only applied to train_step. # We use default for eval_step. self._runtime_options = get_runtime_options(config) # Creates a shadow copy of the weights to store weights moving average. if isinstance(self._optimizer, optimization.ExponentialMovingAverage ) and not self._optimizer.has_shadow_copy: self._optimizer.shadow_copy(self._model) # global_step increases by 1 after each training iteration. # We should have global_step.numpy() == self.optimizer.iterations.numpy() # when there is only 1 optimizer. self._global_step = orbit.utils.create_global_step() if hasattr(self.model, "checkpoint_items"): checkpoint_items = self.model.checkpoint_items else: checkpoint_items = {} self._checkpoint = tf.train.Checkpoint( global_step=self.global_step, model=self.model, optimizer=self.optimizer, **checkpoint_items) self._train_loss = tf.keras.metrics.Mean("training_loss", dtype=tf.float32) self._validation_loss = tf.keras.metrics.Mean( "validation_loss", dtype=tf.float32) model_metrics = model.metrics if hasattr(model, "metrics") else [] self.init_async() if train: self._train_metrics = self.task.build_metrics( training=True) + model_metrics train_dataset = train_dataset or self.distribute_dataset( self.task.build_inputs, self.config.task.train_data) orbit.StandardTrainer.__init__( self, train_dataset, options=orbit.StandardTrainerOptions( use_tf_while_loop=config.trainer.train_tf_while_loop, use_tf_function=config.trainer.train_tf_function, use_tpu_summary_optimization=config.trainer.allow_tpu_summary)) if evaluate: self._validation_metrics = self.task.build_metrics( training=False) + model_metrics validation_dataset = validation_dataset or self.distribute_dataset( self.task.build_inputs, self.config.task.validation_data) orbit.StandardEvaluator.__init__( self, validation_dataset, options=orbit.StandardEvaluatorOptions( use_tf_function=config.trainer.eval_tf_function, use_tf_while_loop=config.trainer.eval_tf_while_loop)) def _validate_params(self, config, check_train_data=True, check_validation_data=True): r"""Validates if the configuration object passed to the Trainer. The experiment configuration should be structured as: \trainer \task \train_data \validation_data Args: config: a namedtuple, dataclass, ConfigDict, etc. check_train_data: whether to check task.train_data field. check_validation_data: whether to check task.validation_data field. """ if not hasattr(config, "trainer"): raise AttributeError("The trainer requires the configuration contains an" " attribute `trainer`.") if not hasattr(config, "task"): raise AttributeError("The trainer requires the configuration contains an" " attribute `task`.") if check_train_data and not hasattr(config.task, "train_data"): raise AttributeError("The trainer requires the configuration contains an" " attribute `task.train_data`.") if check_validation_data and not hasattr(config.task, "validation_data"): raise AttributeError("The trainer requires the configuration contains an" " attribute `task.validation_data`.") @property def strategy(self): return self._strategy @property def config(self): return self._config @property def task(self): return self._task @property def model(self): return self._model @property def optimizer(self): if hasattr(self, "_optimizer"): return self._optimizer else: return None @property def global_step(self): return self._global_step @property def train_loss(self): """Accesses the training loss metric object.""" return self._train_loss @property def validation_loss(self): """Accesses the validation loss metric object.""" return self._validation_loss @property def train_metrics(self): """Accesses all training metric objects.""" return self._train_metrics @property def validation_metrics(self): """Accesses all validation metric metric objects.""" return self._validation_metrics def initialize(self): """A callback function. This function will be called when no checkpoint found for the model. If there is a checkpoint, the checkpoint will be loaded and this function will not be called. Tasks may use this callback function to load a pretrained checkpoint, saved under a directory other than the model_dir. """ self.task.initialize(self.model) @property def checkpoint(self): """Accesses the training checkpoint.""" return self._checkpoint @property def checkpoint_exporter(self): """Accesses the checkpoint exporter.""" return self._checkpoint_exporter def train_loop_end(self): """See base class.""" self.join() logs = {} for metric in self.train_metrics + [self.train_loss]: logs[metric.name] = metric.result() metric.reset_states() if callable(self.optimizer.learning_rate): # Maybe a self-implemented optimizer does not have `optimizer.iterations`. # So just to be safe here. if hasattr(self.optimizer, "iterations"): logs["learning_rate"] = self.optimizer.learning_rate( self.optimizer.iterations) else: logs["learning_rate"] = self.optimizer.learning_rate(self.global_step) else: logs["learning_rate"] = self.optimizer.learning_rate return logs def next_train_inputs(self, iterator): """Fetches the next inputs for the model during train. This method consumes the input iterator and returns the next inputs for the model. This method provides a way to control how to fetch the next model input, and what data to send to the model. This function runs in eager mode. Args: iterator: Dataset iterator to generate the next inputs from. Returns: The inputs to the model. """ return next(iterator) def train_step(self, iterator): """See base class.""" def step_fn(inputs): if self.config.runtime.enable_xla and (self.config.runtime.num_gpus > 0): task_train_step = tf.function(self.task.train_step, jit_compile=True) else: task_train_step = self.task.train_step logs = task_train_step( inputs, model=self.model, optimizer=self.optimizer, metrics=self.train_metrics) self._train_loss.update_state(logs[self.task.loss]) self.global_step.assign_add(1) inputs = self.next_train_inputs(iterator) self.strategy.run(step_fn, args=(inputs,), options=self._runtime_options) def eval_begin(self): """Sets up metrics.""" for metric in self.validation_metrics + [self.validation_loss]: metric.reset_states() # Swaps weights to test on weights moving average. if self.optimizer and isinstance(self.optimizer, optimization.ExponentialMovingAverage): self.optimizer.swap_weights() def next_eval_inputs(self, iterator): """Fetches the next inputs for the model during eval. This method consumes the input iterator and returns the next inputs for the model and an additional logs dict. The output dict remains in the host (not sent to GPUs/TPUs) and is merged with the model outputs which will be processed later in `aggregate_logs`. This is useful for sending extra logs downstream that are not compatible with the accelerators. This function runs in eager mode. Args: iterator: Dataset iterator to generate the next inputs from. Returns: The inputs to the model, and an additional logs dictionnary. The logs are not passed to the model, instead they are merged with model output logs. """ passthrough_logs = dict() return next(iterator), passthrough_logs def eval_step(self, iterator): """See base class.""" def step_fn(inputs): logs = self.task.validation_step( inputs, model=self.model, metrics=self.validation_metrics) if self.task.loss in logs: self._validation_loss.update_state(logs[self.task.loss]) return logs inputs, passthrough_logs = self.next_eval_inputs(iterator) distributed_outputs = self.strategy.run(step_fn, args=(inputs,)) logs = tf.nest.map_structure( self.strategy.experimental_local_results, distributed_outputs ) if set(logs.keys()) & set(passthrough_logs.keys()): logging.warning( ( "Conflict between the pasthrough log keys and the returned model" " log keys. Found %r keys in the passthrough logs and %r keys in" " the model logs. Model log keys takes precedence." ), logs.keys(), passthrough_logs.keys(), ) return passthrough_logs | logs def eval_end(self, aggregated_logs=None): """Processes evaluation results.""" self.join() logs = {} for metric in self.validation_metrics: logs[metric.name] = metric.result() if self.validation_loss.count.numpy() != 0: logs[self.validation_loss.name] = self.validation_loss.result() else: # `self.validation_loss` metric was not updated, because the validation # loss was not returned from the task's `validation_step` method. logging.info("The task did not report validation loss.") if aggregated_logs: metrics = self.task.reduce_aggregated_logs( aggregated_logs, global_step=self.global_step) logs.update(metrics) if self._checkpoint_exporter: self._checkpoint_exporter.maybe_export_checkpoint( self.checkpoint, logs, self.global_step.numpy()) metric_name = self.config.trainer.best_checkpoint_eval_metric logs["best_" + metric_name] = self._checkpoint_exporter.best_ckpt_logs[metric_name] # Swaps back weights after testing when EMA is used. # This happens after best checkpoint export so that average weights used for # eval are exported instead of regular weights. if self.optimizer and isinstance(self.optimizer, optimization.ExponentialMovingAverage): self.optimizer.swap_weights() return logs def eval_reduce(self, state=None, step_outputs=None): return self.task.aggregate_logs(state, step_outputs)
17,783
35.073022
80
py
models
models-master/official/core/train_utils.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training utils.""" import dataclasses import inspect import json import os import pprint from typing import Any, Callable, Dict, List, Optional, Union from absl import logging import gin import numpy as np import orbit import tensorflow as tf # pylint: disable=g-direct-tensorflow-import from tensorflow.python.framework import ops from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2_as_graph # pylint: enable=g-direct-tensorflow-import from official.core import base_task from official.core import base_trainer from official.core import config_definitions from official.core import exp_factory from official.modeling import hyperparams BEST_CHECKPOINT_NAME = 'best_ckpt' def get_leaf_nested_dict(d: Dict[str, Any], keys: List[str]) -> Dict[str, Any]: """Get leaf from a dictionary with arbitrary depth with a list of keys. Args: d: The dictionary to extract value from. keys: The list of keys to extract values recursively. Returns: The value of the leaf. Raises: KeyError: If the value of keys extracted is a dictionary. """ leaf = d for k in keys: if not isinstance(leaf, dict) or k not in leaf: raise KeyError( 'Path not exist while traversing the dictionary: d with keys' ': %s.' % keys) leaf = leaf[k] if isinstance(leaf, dict): raise KeyError('The value extracted with keys: %s is not a leaf of the ' 'dictionary: %s.' % (keys, d)) return leaf def cast_leaf_nested_dict(d: Dict[str, Any], cast_fn: Callable[[Any], Any]) -> Dict[str, Any]: """Cast the leaves of a dictionary with arbitrary depth in place. Args: d: The dictionary to extract value from. cast_fn: The casting function. Returns: A dictionray with the same structure as d. """ for key, value in d.items(): if isinstance(value, dict): d[key] = cast_leaf_nested_dict(value, cast_fn) else: d[key] = cast_fn(value) return d def _filter_leaf_nested_dict( d: Dict[str, Any], predicate: Callable[[Any], bool] ) -> Dict[str, Any]: """Filters the leaves of a dictionary with arbitrary depth in place. Args: d: The dictionary to extract value from. predicate: A function that will be called on every leave item. When the function returns True the leave will be kept. Otherwise the leave will be dropped. Returns: A new dictionray with filtered result. """ result = {} for key, value in d.items(): if isinstance(value, dict): result[key] = _filter_leaf_nested_dict(value, predicate) elif predicate(value): result[key] = value return result def maybe_create_best_ckpt_exporter(params: config_definitions.ExperimentConfig, data_dir: str) -> Any: """Maybe create a BestCheckpointExporter object, according to the config.""" export_subdir = params.trainer.best_checkpoint_export_subdir metric_name = params.trainer.best_checkpoint_eval_metric metric_comp = params.trainer.best_checkpoint_metric_comp if data_dir and export_subdir and metric_name: best_ckpt_dir = os.path.join(data_dir, export_subdir) best_ckpt_exporter = BestCheckpointExporter(best_ckpt_dir, metric_name, metric_comp) logging.info( 'Created the best checkpoint exporter. ' 'data_dir: %s, export_subdir: %s, metric_name: %s', data_dir, export_subdir, metric_name) else: best_ckpt_exporter = None return best_ckpt_exporter class BestCheckpointExporter: """Keeps track of the best result, and saves its checkpoint. Orbit will support an API for checkpoint exporter. This class will be used together with orbit once this functionality is ready. """ def __init__(self, export_dir: str, metric_name: str, metric_comp: str): """Initialization. Args: export_dir: The directory that will contain exported checkpoints. metric_name: Indicates which metric to look at, when determining which result is better. If eval_logs being passed to maybe_export_checkpoint is a nested dictionary, use `|` as a seperator for different layers. metric_comp: Indicates how to compare results. Either `lower` or `higher`. """ self._export_dir = export_dir self._metric_name = metric_name.split('|') self._metric_comp = metric_comp if self._metric_comp not in ('lower', 'higher'): raise ValueError('best checkpoint metric comp must be one of ' 'higher, lower. Got: {}'.format(self._metric_comp)) tf.io.gfile.makedirs(os.path.dirname(self.best_ckpt_logs_path)) self._best_ckpt_logs = self._maybe_load_best_eval_metric() self._checkpoint_manager = None def _get_checkpoint_manager(self, checkpoint): """Gets an existing checkpoint manager or creates a new one.""" if self._checkpoint_manager is None or (self._checkpoint_manager.checkpoint != checkpoint): logging.info('Creates a new checkpoint manager.') self._checkpoint_manager = tf.train.CheckpointManager( checkpoint, directory=self._export_dir, max_to_keep=1, checkpoint_name=BEST_CHECKPOINT_NAME) return self._checkpoint_manager def maybe_export_checkpoint( self, checkpoint, eval_logs, global_step, write_logs=True) -> bool: """Compare eval_logs with past eval_logs and export checkpoint if better.""" logging.info('[BestCheckpointExporter] received eval_logs: %s, at step: %d', eval_logs, global_step) if self._best_ckpt_logs is None or self._new_metric_is_better( self._best_ckpt_logs, eval_logs): self._best_ckpt_logs = eval_logs if write_logs: self.export_best_eval_metric(self._best_ckpt_logs, global_step) self._get_checkpoint_manager(checkpoint).save() return True return False def _maybe_load_best_eval_metric(self): if not tf.io.gfile.exists(self.best_ckpt_logs_path): return None with tf.io.gfile.GFile(self.best_ckpt_logs_path, 'r') as reader: return json.loads(reader.read()) def _new_metric_is_better(self, old_logs, new_logs): """Check if the metric in new_logs is better than the metric in old_logs.""" old_value = float( orbit.utils.get_value( get_leaf_nested_dict(old_logs, self._metric_name))) new_value = float( orbit.utils.get_value( get_leaf_nested_dict(new_logs, self._metric_name))) logging.info('[BestCheckpointExporter] comparing results. old: %f, new: %f', old_value, new_value) if self._metric_comp == 'higher': if new_value > old_value: logging.info('[BestCheckpointExporter] ' 'the new number is better since it is higher.') return True else: # self._metric_comp == 'lower': if new_value < old_value: logging.info('[BestCheckpointExporter] ' 'the new number is better since it is lower.') return True return False def export_best_eval_metric(self, eval_logs, global_step): """Export evaluation results of the best checkpoint into a json file.""" # eval_log_ext may contains non-scalar tensors, such as image data when # `allow_image_summary` is True. Here we only keep scalar tensors. eval_logs_ext = _filter_leaf_nested_dict( eval_logs, lambda x: tf.rank(x) <= 1 ) eval_logs_ext['best_ckpt_global_step'] = global_step eval_logs_ext = cast_leaf_nested_dict( eval_logs_ext, lambda x: float(orbit.utils.get_value(x))) # Saving json file is very fast. with tf.io.gfile.GFile(self.best_ckpt_logs_path, 'w') as writer: writer.write(json.dumps(eval_logs_ext, indent=4) + '\n') @property def best_ckpt_logs(self): return self._best_ckpt_logs @property def best_ckpt_logs_path(self): return os.path.join(self._export_dir, 'info.json') @property def best_ckpt_path(self): """Returns the best ckpt path or None if there is no ckpt yet.""" return tf.train.latest_checkpoint(self._export_dir) def create_optimizer(task: base_task.Task, params: config_definitions.ExperimentConfig ) -> tf.keras.optimizers.Optimizer: """A create optimizer util to be backward compatability with new args.""" if 'dp_config' in inspect.signature(task.create_optimizer).parameters: dp_config = None if hasattr(params.task, 'differential_privacy_config'): dp_config = params.task.differential_privacy_config optimizer = task.create_optimizer( params.trainer.optimizer_config, params.runtime, dp_config=dp_config) else: if hasattr(params.task, 'differential_privacy_config' ) and params.task.differential_privacy_config is not None: raise ValueError('Differential privacy config is specified but ' 'task.create_optimizer api does not accept it.') optimizer = task.create_optimizer( params.trainer.optimizer_config, params.runtime) return optimizer @gin.configurable def create_trainer(params: config_definitions.ExperimentConfig, task: base_task.Task, train: bool, evaluate: bool, checkpoint_exporter: Optional[BestCheckpointExporter] = None, trainer_cls=base_trainer.Trainer) -> base_trainer.Trainer: """Create trainer.""" logging.info('Running default trainer.') model = task.build_model() optimizer = create_optimizer(task, params) return trainer_cls( params, task, model=model, optimizer=optimizer, train=train, evaluate=evaluate, checkpoint_exporter=checkpoint_exporter) @dataclasses.dataclass class ParseConfigOptions: """Use this dataclass instead of FLAGS to customize parse_configuration().""" experiment: str config_file: List[str] tpu: str = '' tf_data_service: str = '' params_override: str = '' def __contains__(self, name): return name in dataclasses.asdict(self) class ExperimentParser: """Constructs the Experiment config from Flags or equivalent object. Most of the cases, users only need to call the `parse()` function: ``` builder = ExperimentParser(FLAGS) params = builder.parse() ``` The advanced users can modify the flow by calling the parse_*() functions separately. """ def __init__(self, flags_obj): self._flags_obj = flags_obj def parse(self): """Overrall process of constructing Experiment config.""" params = self.base_experiment() params = self.parse_config_file(params) params = self.parse_runtime(params) params = self.parse_data_service(params) params = self.parse_params_override(params) return params def base_experiment(self): """Get the base experiment config from --experiment field.""" if self._flags_obj.experiment is None: raise ValueError('The flag --experiment must be specified.') return exp_factory.get_exp_config(self._flags_obj.experiment) def parse_config_file(self, params): """Override the configs of params from the config_file.""" for config_file in self._flags_obj.config_file or []: params = hyperparams.override_params_dict( params, config_file, is_strict=True) return params def parse_runtime(self, params): """Override the runtime configs of params from flags.""" # Override the TPU address and tf.data service address. params.override({ 'runtime': { 'tpu': self._flags_obj.tpu, }, }) return params def parse_data_service(self, params): """Override the data service configs of params from flags.""" if ('tf_data_service' in self._flags_obj and self._flags_obj.tf_data_service and isinstance(params.task, config_definitions.TaskConfig)): params.override({ 'task': { 'train_data': { 'tf_data_service_address': self._flags_obj.tf_data_service, }, 'validation_data': { 'tf_data_service_address': self._flags_obj.tf_data_service, } } }) return params def parse_params_override(self, params): # Get the second level of override from `--params_override`. # `--params_override` is typically used as a further override over the # template. For example, one may define a particular template for training # ResNet50 on ImageNet in a config file and pass it via `--config_file`, # then define different learning rates and pass it via `--params_override`. if self._flags_obj.params_override: params = hyperparams.override_params_dict( params, self._flags_obj.params_override, is_strict=True) return params def parse_configuration(flags_obj, lock_return=True, print_return=True): """Parses ExperimentConfig from flags.""" params = ExperimentParser(flags_obj).parse() params.validate() if lock_return: params.lock() if print_return: pp = pprint.PrettyPrinter() logging.info('Final experiment parameters:\n%s', pp.pformat(params.as_dict())) return params def serialize_config(params: config_definitions.ExperimentConfig, model_dir: str): """Serializes and saves the experiment config.""" if model_dir is None: raise ValueError('model_dir must be specified, but got None') params_save_path = os.path.join(model_dir, 'params.yaml') logging.info('Saving experiment configuration to %s', params_save_path) tf.io.gfile.makedirs(model_dir) hyperparams.save_params_dict_to_yaml(params, params_save_path) def save_gin_config(filename_suffix: str, model_dir: str): """Serializes and saves the experiment config.""" gin_save_path = os.path.join( model_dir, 'operative_config.{}.gin'.format(filename_suffix)) logging.info('Saving gin configurations to %s', gin_save_path) tf.io.gfile.makedirs(model_dir) with tf.io.gfile.GFile(gin_save_path, 'w') as f: f.write(gin.operative_config_str()) def read_global_step_from_checkpoint(ckpt_file_path): """Read global step from checkpoint, or get global step from its filename.""" global_step = tf.Variable(-1, dtype=tf.int64) ckpt = tf.train.Checkpoint(global_step=global_step) try: ckpt.restore(ckpt_file_path).expect_partial() global_step_maybe_restored = global_step.numpy() except tf.errors.InvalidArgumentError: global_step_maybe_restored = -1 if global_step_maybe_restored == -1: raise ValueError('global_step not found in checkpoint {}. ' 'If you want to run finetune eval jobs, you need to ' 'make sure that your pretrain model writes ' 'global_step in its checkpoints.'.format(ckpt_file_path)) global_step_restored = global_step.numpy() logging.info('get global_step %d from checkpoint %s', global_step_restored, ckpt_file_path) return global_step_restored def write_json_summary(log_dir, global_step, eval_metrics): """Dump evaluation metrics to json file.""" serializable_dict = {} for name, value in eval_metrics.items(): if hasattr(value, 'numpy'): serializable_dict[name] = str(value.numpy()) else: serializable_dict[name] = str(value) output_json = os.path.join(log_dir, 'metrics-{}.json'.format(global_step)) logging.info('Evaluation results at pretrain step %d: %s', global_step, serializable_dict) with tf.io.gfile.GFile(output_json, 'w') as writer: writer.write(json.dumps(serializable_dict, indent=4) + '\n') def write_summary(summary_writer, global_step, eval_metrics): """Write evaluation metrics to TF summary.""" numeric_dict = {} for name, value in eval_metrics.items(): numeric_dict[name] = float(orbit.utils.get_value(value)) with summary_writer.as_default(): for name, value in numeric_dict.items(): tf.summary.scalar(name, value, step=global_step) summary_writer.flush() def remove_ckpts(model_dir): """Remove model checkpoints, so we can restart.""" ckpts = os.path.join(model_dir, 'ckpt-*') logging.info('removing checkpoint files %s', ckpts) for file_to_remove in tf.io.gfile.glob(ckpts): tf.io.gfile.rmtree(file_to_remove) file_to_remove = os.path.join(model_dir, 'checkpoint') if tf.io.gfile.exists(file_to_remove): tf.io.gfile.remove(file_to_remove) def write_model_params(model: Union[tf.Module, tf.keras.Model], output_path: str) -> None: """Writes the model parameters and shapes to a file. Args: model: A model instance. output_path: Output file path. """ with tf.io.gfile.GFile(output_path, 'w') as f: total_params = 0 for var in model.variables: shape = tf.shape(var) total_params += tf.math.reduce_prod(shape).numpy() f.write(f'{var.name} {shape.numpy().tolist()}\n') f.write(f'\nTotal params: {total_params}\n') def try_count_params( model: Union[tf.Module, tf.keras.Model], trainable_only: bool = False): """Count the number of parameters if model is possible. Args: model: Try to count the number of params in this model. trainable_only: Whether to calculate trainable params only. This flag is not used when the model has `count_params` attribute. Returns: The number of parameters or None. """ if hasattr(model, 'count_params'): try: return model.count_params() except ValueError: logging.info('Number of trainable params unknown, because the build() ' 'methods in keras layers were not called. This is probably ' 'because the model was not feed any input, e.g., the max ' 'train step already reached before this run.') return None else: total_params = 0 variables = model.trainable_variables if trainable_only else model.variables for var in variables: shape = tf.shape(var) total_params += tf.math.reduce_prod(shape).numpy() return total_params def try_count_flops(model: Union[tf.Module, tf.keras.Model], inputs_kwargs: Optional[Dict[str, Any]] = None, output_path: Optional[str] = None): """Counts and returns model FLOPs. Args: model: A model instance. inputs_kwargs: An optional dictionary of argument pairs specifying inputs' shape specifications to getting corresponding concrete function. output_path: A file path to write the profiling results to. Returns: The model's FLOPs. """ if hasattr(model, 'inputs'): try: # Get input shape and set batch size to 1. if model.inputs: inputs = [ tf.TensorSpec([1] + input.shape[1:], input.dtype) for input in model.inputs ] concrete_func = tf.function(model).get_concrete_function(inputs) # If model.inputs is invalid, try to use the input to get concrete # function for model.call (subclass model). else: concrete_func = tf.function(model.call).get_concrete_function( **inputs_kwargs) frozen_func, _ = convert_variables_to_constants_v2_as_graph(concrete_func) # Calculate FLOPs. run_meta = tf.compat.v1.RunMetadata() opts = tf.compat.v1.profiler.ProfileOptionBuilder.float_operation() if output_path is not None: opts['output'] = f'file:outfile={output_path}' else: opts['output'] = 'none' flops = tf.compat.v1.profiler.profile( graph=frozen_func.graph, run_meta=run_meta, options=opts) return flops.total_float_ops except Exception as e: # pylint: disable=broad-except logging.info( 'Failed to count model FLOPs with error %s, because the build() ' 'methods in keras layers were not called. This is probably because ' 'the model was not feed any input, e.g., the max train step already ' 'reached before this run.', e) return None return None @ops.RegisterStatistics('Einsum', 'flops') def _einsum_flops(graph, node): """Calculates the compute resources needed for Einsum.""" assert len(node.input) == 2 x_shape = tf.compat.v1.graph_util.tensor_shape_from_node_def_name( graph, node.input[0]) y_shape = tf.compat.v1.graph_util.tensor_shape_from_node_def_name( graph, node.input[1]) x_shape.assert_is_fully_defined() y_shape.assert_is_fully_defined() x_shape = x_shape.as_list() y_shape = y_shape.as_list() equation = str(node.attr['equation']) equation = ( equation.replace('s:', '') .replace('"', '') .replace(' ', '') .replace('\n', '') ) x_str = equation.split(',')[0] y_r_str = equation.split(',')[1] y_str = y_r_str.split('->')[0] r_str = y_r_str.split('->')[1] shape_dic = {} contracted = set() for indice in x_str + y_str: if indice in x_str: indice_dim = x_shape[x_str.find(indice)] elif indice in y_str: indice_dim = y_shape[y_str.find(indice)] else: raise ValueError('indice {} not found in inputs'.format(indice)) shape_dic[indice] = indice_dim if indice not in r_str: contracted.add(indice) madds = np.prod([shape_dic[indice] for indice in r_str]) * ( np.prod([shape_dic[indice] for indice in contracted])) flops = 2 * madds return ops.OpStats('flops', flops)
22,106
35.181669
103
py
models
models-master/official/core/actions.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides TFM orbit actions and associated helper functions/classes.""" import os from typing import List from absl import logging import gin import orbit import tensorflow as tf from official.core import base_trainer from official.core import config_definitions from official.modeling import optimization class PruningAction: """Train action to updates pruning related information. This action updates pruning steps at the end of trainig loop, and log pruning metrics to tensorboard. This action must be used when training a pruned model to avoid pruning error. """ def __init__( self, export_dir: str, model: tf.keras.Model, optimizer: tf.keras.optimizers.Optimizer, ): """Initializes the instance. Args: export_dir: `str` for the export directory of the pruning summaries. model: `tf.keras.Model` model instance used for training. This will be used to assign a pruning step to each prunable weight. optimizer: `tf.keras.optimizers.Optimizer` optimizer instance used for training. This will be used to find the current training steps. """ # TODO(b/221490190): Avoid local import when the bug is fixed. import tensorflow_model_optimization as tfmot # pylint: disable=g-import-not-at-top self._optimizer = optimizer self.update_pruning_step = tfmot.sparsity.keras.UpdatePruningStep() self.update_pruning_step.set_model(model) self.update_pruning_step.on_train_begin() self.pruning_summaries = tfmot.sparsity.keras.PruningSummaries( log_dir=export_dir) model.optimizer = optimizer self.pruning_summaries.set_model(model) def __call__(self, output: orbit.runner.Output): """Update pruning step and log pruning summaries. Args: output: The train output. """ self.update_pruning_step.on_epoch_end(batch=None) self.pruning_summaries.on_epoch_begin(epoch=None) class EMACheckpointing: """Eval action to save checkpoint with average weights when EMA is used. This action swaps the weights of the model with the average weights, then it saves the checkpoint under export_dir/ema_checkpoints. Checkpointing is expensive for large models, so doing this action in eval is more efficient than training. """ def __init__(self, export_dir: str, optimizer: tf.keras.optimizers.Optimizer, checkpoint: tf.train.Checkpoint, max_to_keep: int = 1): """Initializes the instance. Args: export_dir: `str` for the export directory of the EMA average weights. optimizer: `tf.keras.optimizers.Optimizer` optimizer instance used for training. This will be used to swap the model weights with the average weigths. checkpoint: `tf.train.Checkpoint` instance. max_to_keep: `int` for max checkpoints to keep in ema_checkpoints subdir. """ if not isinstance(optimizer, optimization.ExponentialMovingAverage): raise ValueError('Optimizer has to be instance of' 'optimization.ExponentialMovingAverage for' 'EMACheckpointing action') export_dir = os.path.join(export_dir, 'ema_checkpoints') tf.io.gfile.makedirs(os.path.dirname(export_dir)) self._optimizer = optimizer self._checkpoint = checkpoint self._checkpoint_manager = tf.train.CheckpointManager( checkpoint, directory=export_dir, max_to_keep=max_to_keep, checkpoint_name='average_weights') def __call__(self, output: orbit.runner.Output): """Swaps model weights, and saves the checkpoint. Args: output: The train or eval output. """ self._optimizer.swap_weights() self._checkpoint_manager.save(checkpoint_number=self._optimizer.iterations) self._optimizer.swap_weights() class RecoveryAction: """Train action to recover from loss blowup. Checks the loss value by the given threshold. If applicable, recover the model by reading the checkpoint on disk. """ def __init__(self, checkpoint_manager: tf.train.CheckpointManager): self.checkpoint_manager = checkpoint_manager def __call__(self, _): """Recovers the training by triggering checkpoint restoration.""" # Loads the previous good checkpoint. checkpoint_path = self.checkpoint_manager.restore_or_initialize() logging.warning('Recovering the model from checkpoint: %s.', checkpoint_path) class RecoveryCondition: """Recovery Condition.""" def __init__(self, global_step: tf.Variable, loss_upper_bound: float, recovery_begin_steps: int = 0, recovery_max_trials: int = 3): self.recover_counter = 0 self.recovery_begin_steps = recovery_begin_steps self.recovery_max_trials = recovery_max_trials self.loss_upper_bound = loss_upper_bound self.global_step = global_step def __call__(self, outputs: orbit.runner.Output): loss_value = outputs['training_loss'] if tf.math.is_nan(loss_value): self.recover_counter += 1 if self.recover_counter > self.recovery_max_trials: raise RuntimeError( 'The loss value is NaN after training loop and it happens %d times.' % self.recover_counter) return True if (self.global_step >= self.recovery_begin_steps and loss_value > self.loss_upper_bound): self.recover_counter += 1 if self.recover_counter > self.recovery_max_trials: raise RuntimeError( f'The loss value is {loss_value}, which is larger than the bound {self.loss_upper_bound}, happens {self.recover_counter} times.' ) return True return False @gin.configurable def get_eval_actions(params: config_definitions.ExperimentConfig, trainer: base_trainer.Trainer, model_dir: str) -> List[orbit.Action]: """Gets eval actions for TFM trainer.""" eval_actions = [] # Adds ema checkpointing action to save the average weights under # ema_checkpoints subdir. if isinstance(trainer.optimizer, optimization.ExponentialMovingAverage): eval_actions.append( EMACheckpointing( export_dir=model_dir, optimizer=trainer.optimizer, checkpoint=trainer.checkpoint, max_to_keep=params.trainer.max_to_keep)) return eval_actions @gin.configurable def get_train_actions( params: config_definitions.ExperimentConfig, trainer: base_trainer.Trainer, model_dir: str, checkpoint_manager: tf.train.CheckpointManager) -> List[orbit.Action]: """Gets train actions for TFM trainer.""" train_actions = [] # Adds pruning callback actions. if hasattr(params.task, 'pruning') and params.task.pruning: train_actions.append( PruningAction( export_dir=model_dir, model=trainer.model, optimizer=trainer.optimizer)) if params.trainer.recovery_max_trials >= 0: recovery_condition = RecoveryCondition( global_step=trainer.global_step, loss_upper_bound=params.trainer.loss_upper_bound, recovery_begin_steps=params.trainer.recovery_begin_steps, recovery_max_trials=params.trainer.recovery_max_trials, ) recover_action = orbit.actions.ConditionalAction( condition=recovery_condition, action=RecoveryAction(checkpoint_manager), ) train_actions.append(recover_action) if ( params.trainer.preemption_on_demand_checkpoint and trainer.strategy.cluster_resolver ): on_demand_checkpoint_action = orbit.actions.SaveCheckpointIfPreempted( trainer.strategy.cluster_resolver, checkpoint_manager, trainer.global_step, keep_running_after_save=True, ) train_actions.append(on_demand_checkpoint_action) return train_actions
8,466
34.725738
140
py
models
models-master/official/core/export_base_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for official.core.export_base.""" import os from typing import Any, Dict, Mapping, Text import tensorflow as tf from official.core import export_base class TestModule(export_base.ExportModule): @tf.function def serve(self, inputs: tf.Tensor) -> Mapping[Text, tf.Tensor]: x = inputs if self.preprocessor is None else self.preprocessor( inputs=inputs) x = self.inference_step(x) x = self.postprocessor(x) if self.postprocessor else x return {'outputs': x} def get_inference_signatures( self, function_keys: Dict[Text, Text]) -> Mapping[Text, Any]: input_signature = tf.TensorSpec(shape=[None, None], dtype=tf.float32) return {'foo': self.serve.get_concrete_function(input_signature)} class ExportBaseTest(tf.test.TestCase): def test_export_module(self): tmp_dir = self.get_temp_dir() model = tf.keras.layers.Dense(2) inputs = tf.ones([2, 4], tf.float32) expected_output = model(inputs, training=False) module = TestModule(params=None, model=model) ckpt_path = tf.train.Checkpoint(model=model).save( os.path.join(tmp_dir, 'ckpt')) export_dir = export_base.export( module, ['foo'], export_savedmodel_dir=tmp_dir, checkpoint_path=ckpt_path, timestamped=True) self.assertTrue(os.path.exists(os.path.join(export_dir, 'saved_model.pb'))) self.assertTrue( os.path.exists( os.path.join(export_dir, 'variables', 'variables.index'))) self.assertTrue( os.path.exists( os.path.join(export_dir, 'variables', 'variables.data-00000-of-00001'))) imported = tf.saved_model.load(export_dir) output = imported.signatures['foo'](inputs) self.assertAllClose(output['outputs'].numpy(), expected_output.numpy()) def test_custom_inference_step(self): tmp_dir = self.get_temp_dir() model = tf.keras.layers.Dense(2) inputs = tf.ones([2, 4], tf.float32) def _inference_step(inputs, model): return tf.nn.softmax(model(inputs, training=False)) module = TestModule( params=None, model=model, inference_step=_inference_step) expected_output = _inference_step(inputs, model) ckpt_path = tf.train.Checkpoint(model=model).save( os.path.join(tmp_dir, 'ckpt')) export_dir = export_base.export( module, ['foo'], export_savedmodel_dir=tmp_dir, checkpoint_path=ckpt_path, timestamped=False) imported = tf.saved_model.load(export_dir) output = imported.signatures['foo'](inputs) self.assertAllClose(output['outputs'].numpy(), expected_output.numpy()) def test_processors(self): model = tf.Module() inputs = tf.zeros((), tf.float32) def _inference_step(inputs, model): del model return inputs + 1.0 def _preprocessor(inputs): print(inputs) return inputs + 0.1 module = TestModule( params=None, model=model, inference_step=_inference_step, preprocessor=_preprocessor) output = module.serve(inputs) self.assertAllClose(output['outputs'].numpy(), 1.1) class _PostProcessor(tf.Module): def __call__(self, inputs): return inputs + 0.01 module = TestModule( params=None, model=model, inference_step=_inference_step, preprocessor=_preprocessor, postprocessor=_PostProcessor()) output = module.serve(inputs) self.assertAllClose(output['outputs'].numpy(), 1.11) def test_get_timestamped_export_dir(self): export_dir = self.get_temp_dir() timed_dir = export_base.get_timestamped_export_dir( export_dir_base=export_dir) self.assertFalse(tf.io.gfile.exists(timed_dir)) self.assertIn(export_dir, str(timed_dir)) if __name__ == '__main__': tf.test.main()
4,426
32.037313
79
py
models
models-master/official/core/train_lib.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TFM common training driver library.""" # pytype: disable=attribute-error import os import tempfile from typing import Any, List, Mapping, Optional, Tuple # Import libraries from absl import logging import orbit import tensorflow as tf from official.core import actions from official.core import base_task from official.core import base_trainer from official.core import config_definitions from official.core import train_utils maybe_create_best_ckpt_exporter = train_utils.maybe_create_best_ckpt_exporter class OrbitExperimentRunner: """Runs experiment with Orbit training loop. The default experiment runner for model garden experiments. User can customize the experiment pipeline by subclassing this class and replacing components or functions. For example, an experiment runner with customized checkpoint manager: ```python class MyExpRunnerWithExporter(OrbitExperimentRunner): def _maybe_build_checkpoint_manager(sefl): # Replaces the default CheckpointManger with a customized one. return MyCheckpointManager(*args) # In user code, instead of the orginal # `OrbitExperimentRunner(..).run(mode)`, now user can do: MyExpRunnerWithExporter(**needed_kwargs).run(mode) ``` Similar override can be done to other components. """ def __init__( self, distribution_strategy: tf.distribute.Strategy, task: base_task.Task, mode: str, params: config_definitions.ExperimentConfig, model_dir: str, run_post_eval: bool = False, save_summary: bool = True, train_actions: Optional[List[orbit.Action]] = None, eval_actions: Optional[List[orbit.Action]] = None, trainer: Optional[base_trainer.Trainer] = None, controller_cls=orbit.Controller, summary_manager: Optional[orbit.utils.SummaryManager] = None, eval_summary_manager: Optional[orbit.utils.SummaryManager] = None, enable_async_checkpointing: bool = False, ): """Constructor. Args: distribution_strategy: A distribution strategy. task: A Task instance. mode: A 'str', specifying the mode. Can be 'train', 'eval', 'train_and_eval' or 'continuous_eval'. params: ExperimentConfig instance. model_dir: A 'str', a path to store model checkpoints and summaries. run_post_eval: Whether to run post eval once after training, metrics logs are returned. save_summary: Whether to save train and validation summary. train_actions: Optional list of Orbit train actions. eval_actions: Optional list of Orbit eval actions. trainer: the base_trainer.Trainer instance. It should be created within the strategy.scope(). controller_cls: The controller class to manage the train and eval process. Must be a orbit.Controller subclass. summary_manager: Instance of the summary manager to override default summary manager. eval_summary_manager: Instance of the eval summary manager to override default eval summary manager. enable_async_checkpointing: Optional boolean indicating whether to enable async checkpoint saving. """ self.strategy = distribution_strategy or tf.distribute.get_strategy() self._params = params self._model_dir = model_dir self._mode = mode self._run_post_eval = run_post_eval self._trainer = trainer or self._build_trainer( task, train='train' in mode, evaluate=('eval' in mode) or run_post_eval) assert self.trainer is not None self._checkpoint_manager = self._maybe_build_checkpoint_manager() self._summary_manager = summary_manager self._eval_summary_manager = eval_summary_manager self._controller = self._build_controller( trainer=self.trainer if 'train' in mode else None, evaluator=self.trainer, save_summary=save_summary, train_actions=train_actions, eval_actions=eval_actions, controller_cls=controller_cls, enable_async_checkpointing=enable_async_checkpointing) @property def params(self) -> config_definitions.ExperimentConfig: """The whole experiment parameters object.""" return self._params @property def model_dir(self) -> str: """Path to the model folder, which stores checkpoints, params, log, etc.""" return self._model_dir @property def trainer(self) -> base_trainer.Trainer: """The underlying Orbit Trainer object.""" return self._trainer @property def checkpoint_manager(self) -> Optional[tf.train.CheckpointManager]: """The CheckpointManager that stores the checkpoints in a train job.""" return self._checkpoint_manager @property def controller(self) -> orbit.Controller: """The Orbit controller object.""" return self._controller def _build_trainer(self, task: base_task.Task, train: bool, evaluate: bool) -> base_trainer.Trainer: """Create trainer.""" with self.strategy.scope(): trainer = train_utils.create_trainer( self.params, task, train=train, evaluate=evaluate, checkpoint_exporter=self._build_best_checkpoint_exporter()) return trainer def _build_best_checkpoint_exporter(self): return maybe_create_best_ckpt_exporter(self.params, self.model_dir) def _maybe_build_checkpoint_manager( self) -> Optional[tf.train.CheckpointManager]: """Maybe create a CheckpointManager.""" assert self.trainer is not None if self.trainer.checkpoint: if self.model_dir is None: raise ValueError('model_dir must be specified, but got None') if (not self.strategy) or self.strategy.extended.should_checkpoint: ckpt_path = self.model_dir max_to_keep = self.params.trainer.max_to_keep else: # In multi worker training we need every worker to save checkpoint, # because variables can trigger synchronization on read and # synchronization needs all workers to participate. To avoid workers # overriding each other we save to a temporary directory on non-chief # workers. ckpt_path = tempfile.mkdtemp() max_to_keep = 1 checkpoint_manager = tf.train.CheckpointManager( self.trainer.checkpoint, directory=ckpt_path, max_to_keep=max_to_keep, step_counter=self.trainer.global_step, checkpoint_interval=self.params.trainer.checkpoint_interval, init_fn=self.trainer.initialize) else: checkpoint_manager = None return checkpoint_manager def _build_controller( self, trainer, evaluator, save_summary: bool = True, train_actions: Optional[List[orbit.Action]] = None, eval_actions: Optional[List[orbit.Action]] = None, controller_cls=orbit.Controller, enable_async_checkpointing: bool = False, ) -> orbit.Controller: """Builds a Orbit controler.""" train_actions = [] if not train_actions else train_actions if trainer: checkpoint_manager = self.checkpoint_manager assert checkpoint_manager, 'Checkpoint manager required but undefined.' train_actions += actions.get_train_actions( self.params, trainer, self.model_dir, checkpoint_manager=checkpoint_manager, ) eval_actions = [] if not eval_actions else eval_actions if evaluator: eval_actions += actions.get_eval_actions(self.params, evaluator, self.model_dir) if save_summary: eval_summary_dir = os.path.join( self.model_dir, self.params.trainer.validation_summary_subdir ) else: eval_summary_dir = None controller = controller_cls( strategy=self.strategy, trainer=trainer, evaluator=evaluator, global_step=self.trainer.global_step, steps_per_loop=self.params.trainer.steps_per_loop, checkpoint_manager=self.checkpoint_manager, enable_async_checkpointing=enable_async_checkpointing, summary_dir=os.path.join(self.model_dir, 'train') if (save_summary) else None, eval_summary_dir=eval_summary_dir, summary_interval=self.params.trainer.summary_interval if (save_summary) else None, train_actions=train_actions, eval_actions=eval_actions, summary_manager=self._summary_manager if hasattr(self, '_summary_manager') else None, eval_summary_manager=self._eval_summary_manager if hasattr(self, '_eval_summary_manager') else None, ) return controller def run(self) -> Tuple[tf.keras.Model, Mapping[str, Any]]: """Run experiments by mode. Returns: A 2-tuple of (model, eval_logs). model: `tf.keras.Model` instance. eval_logs: returns eval metrics logs when run_post_eval is set to True, otherwise, returns {}. """ mode = self._mode params = self.params logging.info('Starts to execute mode: %s', mode) with self.strategy.scope(): if mode == 'train' or mode == 'train_and_post_eval': self.controller.train(steps=params.trainer.train_steps) elif mode == 'train_and_eval': self.controller.train_and_evaluate( train_steps=params.trainer.train_steps, eval_steps=params.trainer.validation_steps, eval_interval=params.trainer.validation_interval) elif mode == 'eval': self.controller.evaluate(steps=params.trainer.validation_steps) elif mode == 'continuous_eval': def timeout_fn(): if self.trainer.global_step.numpy() >= params.trainer.train_steps: return True return False self.controller.evaluate_continuously( steps=params.trainer.validation_steps, timeout=params.trainer.continuous_eval_timeout, timeout_fn=timeout_fn) else: raise NotImplementedError('The mode is not implemented: %s' % mode) num_params = train_utils.try_count_params(self.trainer.model) if num_params is not None: logging.info('Number of trainable params in model: %f Millions.', num_params / 10.**6) flops = train_utils.try_count_flops(self.trainer.model) if flops is not None: logging.info('FLOPs (multi-adds) in model: %f Billions.', flops / 10.**9 / 2) if self._run_post_eval or mode == 'train_and_post_eval': with self.strategy.scope(): return self.trainer.model, self.controller.evaluate( steps=params.trainer.validation_steps) else: return self.trainer.model, {} def run_experiment( distribution_strategy: tf.distribute.Strategy, task: base_task.Task, mode: str, params: config_definitions.ExperimentConfig, model_dir: str, run_post_eval: bool = False, save_summary: bool = True, train_actions: Optional[List[orbit.Action]] = None, eval_actions: Optional[List[orbit.Action]] = None, trainer: Optional[base_trainer.Trainer] = None, controller_cls=orbit.Controller, summary_manager: Optional[orbit.utils.SummaryManager] = None, eval_summary_manager: Optional[orbit.utils.SummaryManager] = None, enable_async_checkpointing: bool = False, ) -> Tuple[tf.keras.Model, Mapping[str, Any]]: """Runs train/eval configured by the experiment params. Args: distribution_strategy: A distribution distribution_strategy. task: A Task instance. mode: A 'str', specifying the mode. Can be 'train', 'eval', 'train_and_eval' or 'continuous_eval'. params: ExperimentConfig instance. model_dir: A 'str', a path to store model checkpoints and summaries. run_post_eval: Whether to run post eval once after training, metrics logs are returned. save_summary: Whether to save train and validation summary. train_actions: Optional list of Orbit train actions. eval_actions: Optional list of Orbit eval actions. trainer: the base_trainer.Trainer instance. It should be created within the strategy.scope(). controller_cls: The controller class to manage the train and eval process. Must be a orbit.Controller subclass. summary_manager: Instance of the summary manager to override default summary manager. eval_summary_manager: Instance of the eval summary manager to override default eval summary manager. enable_async_checkpointing: Optional boolean indicating whether to enable async checkpoint saving. Returns: A 2-tuple of (model, eval_logs). model: `tf.keras.Model` instance. eval_logs: returns eval metrics logs when run_post_eval is set to True, otherwise, returns {}. """ runner = OrbitExperimentRunner( distribution_strategy=distribution_strategy, task=task, mode=mode, params=params, model_dir=model_dir, run_post_eval=run_post_eval, save_summary=save_summary, train_actions=train_actions, eval_actions=eval_actions, trainer=trainer, controller_cls=controller_cls, summary_manager=summary_manager, eval_summary_manager=eval_summary_manager, enable_async_checkpointing=enable_async_checkpointing, ) return runner.run()
13,911
36.297587
80
py
models
models-master/official/core/actions_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for TFM actions.""" import os from absl.testing import parameterized import numpy as np import orbit import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.core import actions from official.modeling import optimization class TestModel(tf.keras.Model): def __init__(self): super().__init__() self.value = tf.Variable(0.0) self.dense = tf.keras.layers.Dense(2) _ = self.dense(tf.zeros((2, 2), tf.float32)) def call(self, x, training=None): return self.value + x class ActionsTest(tf.test.TestCase, parameterized.TestCase): @combinations.generate( combinations.combine( distribution=[ strategy_combinations.cloud_tpu_strategy, strategy_combinations.one_device_strategy, ],)) def test_ema_checkpointing(self, distribution): with distribution.scope(): directory = self.create_tempdir() model = TestModel() optimizer = tf.keras.optimizers.SGD() optimizer = optimization.ExponentialMovingAverage( optimizer, trainable_weights_only=False) # Creats average weights for the model variables. Average weights are # initialized to zero. optimizer.shadow_copy(model) checkpoint = tf.train.Checkpoint(model=model) # Changes model.value to 3, average value is still 0. model.value.assign(3) # Checks model.value is 3 self.assertEqual(model(0.), 3) ema_action = actions.EMACheckpointing(directory, optimizer, checkpoint) ema_action({}) self.assertNotEmpty( tf.io.gfile.glob(os.path.join(directory, 'ema_checkpoints'))) checkpoint.read( tf.train.latest_checkpoint( os.path.join(directory, 'ema_checkpoints'))) # Checks model.value is 0 after swapping. self.assertEqual(model(0.), 0) # Raises an error for a normal optimizer. with self.assertRaisesRegex(ValueError, 'Optimizer has to be instance of.*'): _ = actions.EMACheckpointing(directory, tf.keras.optimizers.SGD(), checkpoint) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.cloud_tpu_strategy, strategy_combinations.one_device_strategy_gpu, ],)) def test_recovery_condition(self, distribution): with distribution.scope(): global_step = orbit.utils.create_global_step() recover_condition = actions.RecoveryCondition( global_step, loss_upper_bound=0.5, recovery_max_trials=2) outputs = {'training_loss': 0.6} self.assertTrue(recover_condition(outputs)) self.assertTrue(recover_condition(outputs)) with self.assertRaises(RuntimeError): recover_condition(outputs) global_step = orbit.utils.create_global_step() recover_condition = actions.RecoveryCondition( global_step, loss_upper_bound=0.5, recovery_max_trials=2) outputs = {'training_loss': tf.constant([np.nan], tf.float32)} self.assertTrue(recover_condition(outputs)) self.assertTrue(recover_condition(outputs)) with self.assertRaises(RuntimeError): recover_condition(outputs) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.one_device_strategy_gpu, strategy_combinations.one_device_strategy, ],)) def test_pruning(self, distribution): with distribution.scope(): directory = self.get_temp_dir() model = TestModel() optimizer = tf.keras.optimizers.SGD() pruning = actions.PruningAction(directory, model, optimizer) pruning({}) if __name__ == '__main__': tf.test.main()
4,515
33.212121
77
py
models
models-master/official/core/export_base.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base class for model export.""" import abc import functools import time from typing import Any, Callable, Dict, Mapping, List, Optional, Text, Union from absl import logging import tensorflow as tf MAX_DIRECTORY_CREATION_ATTEMPTS = 10 class ExportModule(tf.Module, metaclass=abc.ABCMeta): """Base Export Module.""" def __init__(self, params, model: Union[tf.Module, tf.keras.Model], inference_step: Optional[Callable[..., Any]] = None, *, preprocessor: Optional[Callable[..., Any]] = None, postprocessor: Optional[Callable[..., Any]] = None): """Instantiates an ExportModel. Examples: `inference_step` must be a function that has `model` as an kwarg or the second positional argument. ``` def _inference_step(inputs, model=None): return model(inputs, training=False) module = ExportModule(params, model, inference_step=_inference_step) ``` `preprocessor` and `postprocessor` could be either functions or `tf.Module`. The usages of preprocessor and postprocessor are managed by the implementation of `serve()` method. Args: params: A dataclass for parameters to the module. model: A model instance which contains weights and forward computation. inference_step: An optional callable to forward-pass the model. If not specified, it creates a parital function with `model` as an required kwarg. preprocessor: An optional callable to preprocess the inputs. postprocessor: An optional callable to postprocess the model outputs. """ super().__init__(name=None) self.model = model self.params = params if inference_step is not None: self.inference_step = functools.partial(inference_step, model=self.model) else: if issubclass(type(model), tf.keras.Model): # Default to self.model.call instead of self.model.__call__ to avoid # keras tracing logic designed for training. # Since most of Model Garden's call doesn't not have training kwargs # or the default is False, we don't pass anything here. # Please pass custom inference step if your model has training=True as # default. self.inference_step = self.model.call else: self.inference_step = functools.partial( self.model.__call__, training=False) self.preprocessor = preprocessor self.postprocessor = postprocessor @abc.abstractmethod def serve(self) -> Mapping[Text, tf.Tensor]: """The bare inference function which should run on all devices. Expecting tensors are passed in through keyword arguments. Returns a dictionary of tensors, when the keys will be used inside the SignatureDef. """ @abc.abstractmethod def get_inference_signatures( self, function_keys: Dict[Text, Text]) -> Mapping[Text, Any]: """Get defined function signatures.""" def export(export_module: ExportModule, function_keys: Union[List[Text], Dict[Text, Text]], export_savedmodel_dir: Text, checkpoint_path: Optional[Text] = None, timestamped: bool = True, save_options: Optional[tf.saved_model.SaveOptions] = None, checkpoint: Optional[tf.train.Checkpoint] = None) -> Text: """Exports to SavedModel format. Args: export_module: a ExportModule with the keras Model and serving tf.functions. function_keys: a list of string keys to retrieve pre-defined serving signatures. The signaute keys will be set with defaults. If a dictionary is provided, the values will be used as signature keys. export_savedmodel_dir: Output saved model directory. checkpoint_path: Object-based checkpoint path or directory. timestamped: Whether to export the savedmodel to a timestamped directory. save_options: `SaveOptions` for `tf.saved_model.save`. checkpoint: An optional tf.train.Checkpoint. If provided, the export module will use it to read the weights. Returns: The savedmodel directory path. """ ckpt_dir_or_file = checkpoint_path if ckpt_dir_or_file is not None and tf.io.gfile.isdir(ckpt_dir_or_file): ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) if ckpt_dir_or_file: if checkpoint is None: checkpoint = tf.train.Checkpoint(model=export_module.model) checkpoint.read( ckpt_dir_or_file).assert_existing_objects_matched().expect_partial() if isinstance(function_keys, list): if len(function_keys) == 1: function_keys = { function_keys[0]: tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY } else: raise ValueError( 'If the function_keys is a list, it must contain a single element. %s' % function_keys) signatures = export_module.get_inference_signatures(function_keys) if timestamped: export_dir = get_timestamped_export_dir(export_savedmodel_dir).decode( 'utf-8') else: export_dir = export_savedmodel_dir tf.saved_model.save( export_module, export_dir, signatures=signatures, options=save_options) return export_dir def get_timestamped_export_dir(export_dir_base): """Builds a path to a new subdirectory within the base directory. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name. """ attempts = 0 while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS: timestamp = int(time.time()) result_dir = tf.io.gfile.join( tf.compat.as_bytes(export_dir_base), tf.compat.as_bytes(str(timestamp))) if not tf.io.gfile.exists(result_dir): # Collisions are still possible (though extremely unlikely): this # directory is not actually created yet, but it will be almost # instantly on return from this function. return result_dir time.sleep(1) attempts += 1 logging.warning('Directory %s already exists; retrying (attempt %s/%s)', str(result_dir), attempts, MAX_DIRECTORY_CREATION_ATTEMPTS) raise RuntimeError('Failed to obtain a unique export directory name after ' f'{MAX_DIRECTORY_CREATION_ATTEMPTS} attempts.')
7,025
37.393443
80
py
models
models-master/official/core/base_trainer_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tensorflow_models.core.trainers.trainer.""" # pylint: disable=g-direct-tensorflow-import import gc import multiprocessing import os import sys from absl.testing import parameterized import orbit import portpicker import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.core import base_trainer as trainer_lib from official.core import config_definitions as cfg from official.core import train_lib from official.utils.testing import mock_task TPU_TEST = 'test_tpu' in sys.argv[0] GPU_TEST = 'test_gpu' in sys.argv[0] def all_strategy_combinations(): return combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.cloud_tpu_strategy, strategy_combinations.one_device_strategy_gpu, ],) def create_in_process_cluster(num_workers, num_ps): """Creates and starts local servers and returns the cluster_resolver.""" worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)] ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)] cluster_dict = {} cluster_dict['worker'] = ['localhost:%s' % port for port in worker_ports] if num_ps > 0: cluster_dict['ps'] = ['localhost:%s' % port for port in ps_ports] cluster_spec = tf.train.ClusterSpec(cluster_dict) # Workers need some inter_ops threads to work properly. worker_config = tf.compat.v1.ConfigProto() if multiprocessing.cpu_count() < num_workers + 1: worker_config.inter_op_parallelism_threads = num_workers + 1 for i in range(num_workers): tf.distribute.Server( cluster_spec, job_name='worker', task_index=i, config=worker_config, protocol='grpc') for i in range(num_ps): tf.distribute.Server( cluster_spec, job_name='ps', task_index=i, protocol='grpc') cluster_resolver = tf.distribute.cluster_resolver.SimpleClusterResolver( cluster_spec, rpc_layer='grpc') return cluster_resolver def dataset_fn(input_context=None): del input_context def dummy_data(_): return tf.zeros((1, 1), dtype=tf.float32) dataset = tf.data.Dataset.range(1) dataset = dataset.repeat() dataset = dataset.map( dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE) return dataset class MockAsyncTrainer(trainer_lib._AsyncTrainer): """Mock AsyncTrainer to test the _AsyncTrainer class.""" def __init__(self): self._strategy = tf.distribute.get_strategy() self.init_async() self.global_step = tf.Variable( 0, dtype=tf.int64, name='global_step', trainable=False, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) self.eval_global_step = tf.Variable( 0, dtype=tf.int64, name='eval_global_step', trainable=False, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) train_dataset = self.distribute_dataset(dataset_fn) orbit.StandardTrainer.__init__( self, train_dataset, options=orbit.StandardTrainerOptions()) validation_dataset = self.distribute_dataset(dataset_fn) orbit.StandardEvaluator.__init__( self, validation_dataset, options=orbit.StandardEvaluatorOptions(use_tf_while_loop=True)) def train_loop_begin(self): self.global_step.assign(0) def train_step(self, iterator): def replica_step(_): self.global_step.assign_add(1) self._strategy.run(replica_step, args=(next(iterator),)) def train_loop_end(self): self.join() return self.global_step.numpy() def eval_begin(self): self.eval_global_step.assign(0) def eval_step(self, iterator): def replica_step(_): self.eval_global_step.assign_add(1) self._strategy.run(replica_step, args=(next(iterator),)) def eval_end(self): self.join() return self.eval_global_step.numpy() class TrainerTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): super().setUp() self._config = cfg.ExperimentConfig( trainer=cfg.TrainerConfig( optimizer_config=cfg.OptimizationConfig({ 'optimizer': { 'type': 'sgd' }, 'learning_rate': { 'type': 'constant' } }))) def tearDown(self): gc.collect() # This will only contain uncollectable garbage, i.e. reference cycles # involving objects with __del__ defined. self.assertEmpty(gc.garbage) super().tearDown() def create_test_trainer(self, config, model_dir=None, task=None): task = task or mock_task.MockTask(config.task, logging_dir=model_dir) ckpt_exporter = train_lib.maybe_create_best_ckpt_exporter(config, model_dir) trainer = trainer_lib.Trainer( config, task, model=task.build_model(), optimizer=task.create_optimizer(config.trainer.optimizer_config, config.runtime), checkpoint_exporter=ckpt_exporter) return trainer @combinations.generate(all_strategy_combinations()) def test_trainer_train(self, distribution): with distribution.scope(): trainer = self.create_test_trainer(self._config) logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32)) self.assertIn('training_loss', logs) self.assertIn('learning_rate', logs) @combinations.generate(all_strategy_combinations()) def test_trainer_passing_datasets(self, distribution): with distribution.scope(): task = mock_task.MockTask(self._config) train_dataset = orbit.utils.make_distributed_dataset( distribution, task.build_inputs, self._config.task.train_data) validation_dataset = orbit.utils.make_distributed_dataset( distribution, task.build_inputs, self._config.task.validation_data) self._config.task.train_data = None self._config.task.validation_data = None trainer = trainer_lib.Trainer( self._config, task, model=task.build_model(), optimizer=task.create_optimizer(self._config.trainer.optimizer_config, self._config.runtime), train_dataset=train_dataset, validation_dataset=validation_dataset) logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32)) self.assertIn('training_loss', logs) self.assertIn('learning_rate', logs) logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32)) self.assertIn('validation_loss', logs) def test_base_async_trainer(self): if TPU_TEST or GPU_TEST: self.skipTest('Aysnc training is not available on GPU/GPU.') num_workers = 3 num_ps = 2 cluster_resolver = create_in_process_cluster(num_workers, num_ps) distribution = tf.distribute.experimental.ParameterServerStrategy( cluster_resolver) with distribution.scope(): trainer = MockAsyncTrainer() trainer.init_async() self.assertIsInstance( trainer._coordinator, tf.distribute.experimental.coordinator.ClusterCoordinator) self.assertEqual(trainer.train(tf.constant(10)), 10) self.assertEqual(trainer.evaluate(tf.constant(11)), 11) def test_async_trainer_train(self): if TPU_TEST or GPU_TEST: self.skipTest('Aysnc training is not available on GPU/TPU.') num_workers = 3 num_ps = 2 cluster_resolver = create_in_process_cluster(num_workers, num_ps) distribution = tf.distribute.experimental.ParameterServerStrategy( cluster_resolver) with distribution.scope(): config = cfg.ExperimentConfig(**self._config.as_dict()) config.trainer.eval_tf_while_loop = True trainer = self.create_test_trainer(config) logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32)) self.assertIn('training_loss', logs) self.assertIn('learning_rate', logs) def test_async_trainer_validate(self): if TPU_TEST or GPU_TEST: self.skipTest('Aysnc training is not available on GPU/GPU.') num_workers = 3 num_ps = 2 cluster_resolver = create_in_process_cluster(num_workers, num_ps) distribution = tf.distribute.experimental.ParameterServerStrategy( cluster_resolver) with distribution.scope(): config = cfg.ExperimentConfig(**self._config.as_dict()) config.trainer.eval_tf_while_loop = True trainer = self.create_test_trainer(config) logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32)) self.assertIn('acc', logs) self.assertIn('validation_loss', logs) @combinations.generate(all_strategy_combinations()) def test_trainer_validate(self, distribution): with distribution.scope(): trainer = self.create_test_trainer(self._config) logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32)) self.assertEqual(logs['counter'], 5. * distribution.num_replicas_in_sync) self.assertIn('validation_loss', logs) @combinations.generate(all_strategy_combinations()) def test_trainer_validate_without_loss(self, distribution): class MockTaskWithoutValidationLoss(mock_task.MockTask): def validation_step(self, inputs, model, metrics=None): # Disable validation loss. logs = super().validation_step(inputs, model) del logs[self.loss] return logs with distribution.scope(): task = MockTaskWithoutValidationLoss() trainer = self.create_test_trainer(self._config, task=task) logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32)) self.assertEqual(logs['counter'], 5. * distribution.num_replicas_in_sync) self.assertNotIn('validation_loss', logs) @combinations.generate( combinations.combine( mixed_precision_dtype=['float32', 'bfloat16', 'float16'], loss_scale=[None, 'dynamic', 128, 256], )) def test_configure_optimizer(self, mixed_precision_dtype, loss_scale): config = cfg.ExperimentConfig( runtime=cfg.RuntimeConfig( mixed_precision_dtype=mixed_precision_dtype, loss_scale=loss_scale), trainer=cfg.TrainerConfig( optimizer_config=cfg.OptimizationConfig({ 'optimizer': { 'type': 'sgd' }, 'learning_rate': { 'type': 'constant' }, }))) trainer = self.create_test_trainer(config) if mixed_precision_dtype == 'float16': self.assertIsInstance(trainer.optimizer, tf.keras.mixed_precision.LossScaleOptimizer) if loss_scale in (None, 'dynamic'): self.assertTrue(trainer.optimizer.dynamic) else: self.assertFalse(trainer.optimizer.dynamic) self.assertEqual(trainer.optimizer.initial_scale, loss_scale) else: self.assertIsInstance( trainer.optimizer, (tf.keras.optimizers.SGD, tf.keras.optimizers.legacy.SGD)) metrics = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32)) self.assertIn('training_loss', metrics) def test_export_best_ckpt(self): config = cfg.ExperimentConfig( trainer=cfg.TrainerConfig( best_checkpoint_export_subdir='best_ckpt', best_checkpoint_eval_metric='acc', optimizer_config=cfg.OptimizationConfig({ 'optimizer': { 'type': 'sgd' }, 'learning_rate': { 'type': 'constant' } }))) model_dir = self.get_temp_dir() trainer = self.create_test_trainer(config, model_dir=model_dir) trainer.train(tf.convert_to_tensor(1, dtype=tf.int32)) trainer.evaluate(tf.convert_to_tensor(1, dtype=tf.int32)) self.assertTrue( tf.io.gfile.exists(os.path.join(model_dir, 'best_ckpt', 'info.json'))) def test_model_with_compiled_loss(self): task = mock_task.MockTask() model = task.build_model() model.compile(loss=tf.keras.losses.CategoricalCrossentropy()) trainer = trainer_lib.Trainer( self._config, task, model=model, optimizer=task.create_optimizer(self._config.trainer.optimizer_config)) logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32)) self.assertIn('training_loss', logs) if __name__ == '__main__': tf.test.main()
13,011
34.747253
80
py
models
models-master/official/core/test_utils.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utils for testing.""" import tensorflow as tf class FakeKerasModel(tf.keras.Model): """Fake keras model for testing.""" def __init__(self): super().__init__() self.dense = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(4, activation=tf.nn.relu) def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks return self.dense2(self.dense(inputs)) class _Dense(tf.Module): """A dense layer.""" def __init__(self, input_dim, output_size, name=None): super().__init__(name=name) with self.name_scope: self.w = tf.Variable( tf.random.normal([input_dim, output_size]), name='w') self.b = tf.Variable(tf.zeros([output_size]), name='b') @tf.Module.with_name_scope def __call__(self, x): y = tf.matmul(x, self.w) + self.b return tf.nn.relu(y) class FakeModule(tf.Module): """Fake model using tf.Module for testing.""" def __init__(self, input_size, name=None): super().__init__(name=name) with self.name_scope: self.dense = _Dense(input_size, 4, name='dense') self.dense2 = _Dense(4, 4, name='dense_1') @tf.Module.with_name_scope def __call__(self, x): return self.dense2(self.dense(x))
1,877
30.3
100
py
models
models-master/official/nlp/optimization.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Legacy functions and classes related to optimization.""" from absl import logging import gin import tensorflow as tf from official.modeling.optimization import lamb from official.modeling.optimization import legacy_adamw AdamWeightDecay = legacy_adamw.AdamWeightDecay LAMB = lamb.LAMB class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule): """Applies a warmup schedule on a given learning rate decay schedule.""" def __init__(self, initial_learning_rate, decay_schedule_fn, warmup_steps, power=1.0, name=None): super(WarmUp, self).__init__() self.initial_learning_rate = initial_learning_rate self.warmup_steps = warmup_steps self.power = power self.decay_schedule_fn = decay_schedule_fn self.name = name def __call__(self, step): with tf.name_scope(self.name or 'WarmUp') as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. global_step_float = tf.cast(step, tf.float32) warmup_steps_float = tf.cast(self.warmup_steps, tf.float32) warmup_percent_done = global_step_float / warmup_steps_float warmup_learning_rate = ( self.initial_learning_rate * tf.math.pow(warmup_percent_done, self.power)) return tf.cond( global_step_float < warmup_steps_float, lambda: warmup_learning_rate, lambda: self.decay_schedule_fn(step), name=name) def get_config(self): return { 'initial_learning_rate': self.initial_learning_rate, 'decay_schedule_fn': self.decay_schedule_fn, 'warmup_steps': self.warmup_steps, 'power': self.power, 'name': self.name } @gin.configurable def create_optimizer(init_lr, num_train_steps, num_warmup_steps, end_lr=0.0, optimizer_type='adamw', beta_1=0.9, poly_power=1.0): """Creates an optimizer with learning rate schedule.""" # Implements linear decay of the learning rate. lr_schedule = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=init_lr, decay_steps=num_train_steps, end_learning_rate=end_lr, power=poly_power) if num_warmup_steps: lr_schedule = WarmUp( initial_learning_rate=init_lr, decay_schedule_fn=lr_schedule, warmup_steps=num_warmup_steps) if optimizer_type == 'adamw': logging.info('using Adamw optimizer') optimizer = AdamWeightDecay( learning_rate=lr_schedule, weight_decay_rate=0.01, beta_1=beta_1, beta_2=0.999, epsilon=1e-6, exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias']) elif optimizer_type == 'lamb': logging.info('using Lamb optimizer') optimizer = LAMB( learning_rate=lr_schedule, weight_decay_rate=0.01, beta_1=beta_1, beta_2=0.999, epsilon=1e-6, exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'], ) else: raise ValueError('Unsupported optimizer type: ', optimizer_type) return optimizer
3,878
33.026316
78
py
models
models-master/official/nlp/tools/export_tfhub_lib_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests export_tfhub_lib.""" import os import tempfile from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow import estimator as tf_estimator import tensorflow_hub as hub import tensorflow_text as text from sentencepiece import SentencePieceTrainer from official.legacy.bert import configs from official.modeling import tf_utils from official.nlp.configs import encoders from official.nlp.modeling import layers from official.nlp.modeling import models from official.nlp.tools import export_tfhub_lib def _get_bert_config_or_encoder_config(use_bert_config, hidden_size, num_hidden_layers, encoder_type="albert", vocab_size=100): """Generates config args for export_tfhub_lib._create_model(). Args: use_bert_config: bool. If True, returns legacy BertConfig. hidden_size: int. num_hidden_layers: int. encoder_type: str. Can be ['albert', 'bert', 'bert_v2']. If use_bert_config == True, then model_type is not used. vocab_size: int. Returns: bert_config, encoder_config. Only one is not None. If `use_bert_config` == True, the first config is valid. Otherwise `bert_config` == None. """ if use_bert_config: bert_config = configs.BertConfig( vocab_size=vocab_size, hidden_size=hidden_size, intermediate_size=32, max_position_embeddings=128, num_attention_heads=2, num_hidden_layers=num_hidden_layers) encoder_config = None else: bert_config = None if encoder_type == "albert": encoder_config = encoders.EncoderConfig( type="albert", albert=encoders.AlbertEncoderConfig( vocab_size=vocab_size, embedding_width=16, hidden_size=hidden_size, intermediate_size=32, max_position_embeddings=128, num_attention_heads=2, num_layers=num_hidden_layers, dropout_rate=0.1)) else: # encoder_type can be 'bert' or 'bert_v2'. model_config = encoders.BertEncoderConfig( vocab_size=vocab_size, embedding_size=16, hidden_size=hidden_size, intermediate_size=32, max_position_embeddings=128, num_attention_heads=2, num_layers=num_hidden_layers, dropout_rate=0.1) kwargs = {"type": encoder_type, encoder_type: model_config} encoder_config = encoders.EncoderConfig(**kwargs) return bert_config, encoder_config def _get_vocab_or_sp_model_dummy(temp_dir, use_sp_model): """Returns tokenizer asset args for export_tfhub_lib.export_model().""" dummy_file = os.path.join(temp_dir, "dummy_file.txt") with tf.io.gfile.GFile(dummy_file, "w") as f: f.write("dummy content") if use_sp_model: vocab_file, sp_model_file = None, dummy_file else: vocab_file, sp_model_file = dummy_file, None return vocab_file, sp_model_file def _read_asset(asset: tf.saved_model.Asset): return tf.io.gfile.GFile(asset.asset_path.numpy()).read() def _find_lambda_layers(layer): """Returns list of all Lambda layers in a Keras model.""" if isinstance(layer, tf.keras.layers.Lambda): return [layer] elif hasattr(layer, "layers"): # It's nested, like a Model. result = [] for l in layer.layers: result += _find_lambda_layers(l) return result else: return [] class ExportModelTest(tf.test.TestCase, parameterized.TestCase): """Tests exporting a Transformer Encoder model as a SavedModel. This covers export from an Encoder checkpoint to a SavedModel without the .mlm subobject. This is no longer preferred, but still useful for models like Electra that are trained without the MLM task. The export code is generic. This test focuses on two main cases (the most important ones in practice when this was written in 2020): - BERT built from a legacy BertConfig, for use with BertTokenizer. - ALBERT built from an EncoderConfig (as a representative of all other choices beyond BERT, for use with SentencepieceTokenizer (the one alternative to BertTokenizer). """ @parameterized.named_parameters( ("Bert_Legacy", True, None), ("Albert", False, "albert"), ("BertEncoder", False, "bert"), ("BertEncoderV2", False, "bert_v2")) def test_export_model(self, use_bert, encoder_type): # Create the encoder and export it. hidden_size = 16 num_hidden_layers = 1 bert_config, encoder_config = _get_bert_config_or_encoder_config( use_bert, hidden_size=hidden_size, num_hidden_layers=num_hidden_layers, encoder_type=encoder_type) bert_model, encoder = export_tfhub_lib._create_model( bert_config=bert_config, encoder_config=encoder_config, with_mlm=False) self.assertEmpty( _find_lambda_layers(bert_model), "Lambda layers are non-portable since they serialize Python bytecode.") model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint") checkpoint = tf.train.Checkpoint(encoder=encoder) checkpoint.save(os.path.join(model_checkpoint_dir, "test")) model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir) vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy( self.get_temp_dir(), use_sp_model=not use_bert) export_path = os.path.join(self.get_temp_dir(), "hub") export_tfhub_lib.export_model( export_path=export_path, bert_config=bert_config, encoder_config=encoder_config, model_checkpoint_path=model_checkpoint_path, with_mlm=False, vocab_file=vocab_file, sp_model_file=sp_model_file, do_lower_case=True) # Restore the exported model. hub_layer = hub.KerasLayer(export_path, trainable=True) # Check legacy tokenization data. if use_bert: self.assertTrue(hub_layer.resolved_object.do_lower_case.numpy()) self.assertEqual("dummy content", _read_asset(hub_layer.resolved_object.vocab_file)) self.assertFalse(hasattr(hub_layer.resolved_object, "sp_model_file")) else: self.assertFalse(hasattr(hub_layer.resolved_object, "do_lower_case")) self.assertFalse(hasattr(hub_layer.resolved_object, "vocab_file")) self.assertEqual("dummy content", _read_asset(hub_layer.resolved_object.sp_model_file)) # Check restored weights. self.assertEqual( len(bert_model.trainable_weights), len(hub_layer.trainable_weights)) for source_weight, hub_weight in zip(bert_model.trainable_weights, hub_layer.trainable_weights): self.assertAllClose(source_weight.numpy(), hub_weight.numpy()) # Check computation. seq_length = 10 dummy_ids = np.zeros((2, seq_length), dtype=np.int32) input_dict = dict( input_word_ids=dummy_ids, input_mask=dummy_ids, input_type_ids=dummy_ids) hub_output = hub_layer(input_dict) source_output = bert_model(input_dict) encoder_output = encoder(input_dict) self.assertEqual(hub_output["pooled_output"].shape, (2, hidden_size)) self.assertEqual(hub_output["sequence_output"].shape, (2, seq_length, hidden_size)) self.assertLen(hub_output["encoder_outputs"], num_hidden_layers) for key in ("pooled_output", "sequence_output", "encoder_outputs"): self.assertAllClose(source_output[key], hub_output[key]) self.assertAllClose(source_output[key], encoder_output[key]) # The "default" output of BERT as a text representation is pooled_output. self.assertAllClose(hub_output["pooled_output"], hub_output["default"]) # Test that training=True makes a difference (activates dropout). def _dropout_mean_stddev(training, num_runs=20): input_ids = np.array([[14, 12, 42, 95, 99]], np.int32) input_dict = dict( input_word_ids=input_ids, input_mask=np.ones_like(input_ids), input_type_ids=np.zeros_like(input_ids)) outputs = np.concatenate([ hub_layer(input_dict, training=training)["pooled_output"] for _ in range(num_runs) ]) return np.mean(np.std(outputs, axis=0)) self.assertLess(_dropout_mean_stddev(training=False), 1e-6) self.assertGreater(_dropout_mean_stddev(training=True), 1e-3) # Test propagation of seq_length in shape inference. input_word_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32) input_mask = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32) input_type_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32) input_dict = dict( input_word_ids=input_word_ids, input_mask=input_mask, input_type_ids=input_type_ids) output_dict = hub_layer(input_dict) pooled_output = output_dict["pooled_output"] sequence_output = output_dict["sequence_output"] encoder_outputs = output_dict["encoder_outputs"] self.assertEqual(pooled_output.shape.as_list(), [None, hidden_size]) self.assertEqual(sequence_output.shape.as_list(), [None, seq_length, hidden_size]) self.assertLen(encoder_outputs, num_hidden_layers) class ExportModelWithMLMTest(tf.test.TestCase, parameterized.TestCase): """Tests exporting a Transformer Encoder model as a SavedModel. This covers export from a Pretrainer checkpoint to a SavedModel including the .mlm subobject, which is the preferred way since 2020. The export code is generic. This test focuses on two main cases (the most important ones in practice when this was written in 2020): - BERT built from a legacy BertConfig, for use with BertTokenizer. - ALBERT built from an EncoderConfig (as a representative of all other choices beyond BERT, for use with SentencepieceTokenizer (the one alternative to BertTokenizer). """ def test_copy_pooler_dense_to_encoder(self): encoder_config = encoders.EncoderConfig( type="bert", bert=encoders.BertEncoderConfig( hidden_size=24, intermediate_size=48, num_layers=2)) cls_heads = [ layers.ClassificationHead( inner_dim=24, num_classes=2, name="next_sentence") ] encoder = encoders.build_encoder(encoder_config) pretrainer = models.BertPretrainerV2( encoder_network=encoder, classification_heads=cls_heads, mlm_activation=tf_utils.get_activation( encoder_config.get().hidden_activation)) # Makes sure the pretrainer variables are created. _ = pretrainer(pretrainer.inputs) checkpoint = tf.train.Checkpoint(**pretrainer.checkpoint_items) model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint") checkpoint.save(os.path.join(model_checkpoint_dir, "test")) vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy( self.get_temp_dir(), use_sp_model=True) export_path = os.path.join(self.get_temp_dir(), "hub") export_tfhub_lib.export_model( export_path=export_path, encoder_config=encoder_config, model_checkpoint_path=tf.train.latest_checkpoint(model_checkpoint_dir), with_mlm=True, copy_pooler_dense_to_encoder=True, vocab_file=vocab_file, sp_model_file=sp_model_file, do_lower_case=True) # Restores a hub KerasLayer. hub_layer = hub.KerasLayer(export_path, trainable=True) dummy_ids = np.zeros((2, 10), dtype=np.int32) input_dict = dict( input_word_ids=dummy_ids, input_mask=dummy_ids, input_type_ids=dummy_ids) hub_pooled_output = hub_layer(input_dict)["pooled_output"] encoder_outputs = encoder(input_dict) # Verify that hub_layer's pooled_output is the same as the output of next # sentence prediction's dense layer. pretrained_pooled_output = cls_heads[0].dense( (encoder_outputs["sequence_output"][:, 0, :])) self.assertAllClose(hub_pooled_output, pretrained_pooled_output) # But the pooled_output between encoder and hub_layer are not the same. encoder_pooled_output = encoder_outputs["pooled_output"] self.assertNotAllClose(hub_pooled_output, encoder_pooled_output) @parameterized.named_parameters( ("Bert", True), ("Albert", False), ) def test_export_model_with_mlm(self, use_bert): # Create the encoder and export it. hidden_size = 16 num_hidden_layers = 2 bert_config, encoder_config = _get_bert_config_or_encoder_config( use_bert, hidden_size, num_hidden_layers) bert_model, pretrainer = export_tfhub_lib._create_model( bert_config=bert_config, encoder_config=encoder_config, with_mlm=True) self.assertEmpty( _find_lambda_layers(bert_model), "Lambda layers are non-portable since they serialize Python bytecode.") bert_model_with_mlm = bert_model.mlm model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint") checkpoint = tf.train.Checkpoint(**pretrainer.checkpoint_items) checkpoint.save(os.path.join(model_checkpoint_dir, "test")) model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir) vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy( self.get_temp_dir(), use_sp_model=not use_bert) export_path = os.path.join(self.get_temp_dir(), "hub") export_tfhub_lib.export_model( export_path=export_path, bert_config=bert_config, encoder_config=encoder_config, model_checkpoint_path=model_checkpoint_path, with_mlm=True, vocab_file=vocab_file, sp_model_file=sp_model_file, do_lower_case=True) # Restore the exported model. hub_layer = hub.KerasLayer(export_path, trainable=True) # Check legacy tokenization data. if use_bert: self.assertTrue(hub_layer.resolved_object.do_lower_case.numpy()) self.assertEqual("dummy content", _read_asset(hub_layer.resolved_object.vocab_file)) self.assertFalse(hasattr(hub_layer.resolved_object, "sp_model_file")) else: self.assertFalse(hasattr(hub_layer.resolved_object, "do_lower_case")) self.assertFalse(hasattr(hub_layer.resolved_object, "vocab_file")) self.assertEqual("dummy content", _read_asset(hub_layer.resolved_object.sp_model_file)) # Check restored weights. # Note that we set `_auto_track_sub_layers` to False when exporting the # SavedModel, so hub_layer has the same number of weights as bert_model; # otherwise, hub_layer will have extra weights from its `mlm` subobject. self.assertEqual( len(bert_model.trainable_weights), len(hub_layer.trainable_weights)) for source_weight, hub_weight in zip(bert_model.trainable_weights, hub_layer.trainable_weights): self.assertAllClose(source_weight, hub_weight) # Check computation. seq_length = 10 dummy_ids = np.zeros((2, seq_length), dtype=np.int32) input_dict = dict( input_word_ids=dummy_ids, input_mask=dummy_ids, input_type_ids=dummy_ids) hub_outputs_dict = hub_layer(input_dict) source_outputs_dict = bert_model(input_dict) encoder_outputs_dict = pretrainer.encoder_network( [dummy_ids, dummy_ids, dummy_ids]) self.assertEqual(hub_outputs_dict["pooled_output"].shape, (2, hidden_size)) self.assertEqual(hub_outputs_dict["sequence_output"].shape, (2, seq_length, hidden_size)) for output_key in ("pooled_output", "sequence_output", "encoder_outputs"): self.assertAllClose(source_outputs_dict[output_key], hub_outputs_dict[output_key]) self.assertAllClose(source_outputs_dict[output_key], encoder_outputs_dict[output_key]) # The "default" output of BERT as a text representation is pooled_output. self.assertAllClose(hub_outputs_dict["pooled_output"], hub_outputs_dict["default"]) # Test that training=True makes a difference (activates dropout). def _dropout_mean_stddev(training, num_runs=20): input_ids = np.array([[14, 12, 42, 95, 99]], np.int32) input_dict = dict( input_word_ids=input_ids, input_mask=np.ones_like(input_ids), input_type_ids=np.zeros_like(input_ids)) outputs = np.concatenate([ hub_layer(input_dict, training=training)["pooled_output"] for _ in range(num_runs) ]) return np.mean(np.std(outputs, axis=0)) self.assertLess(_dropout_mean_stddev(training=False), 1e-6) self.assertGreater(_dropout_mean_stddev(training=True), 1e-3) # Checks sub-object `mlm`. self.assertTrue(hasattr(hub_layer.resolved_object, "mlm")) self.assertLen(hub_layer.resolved_object.mlm.trainable_variables, len(bert_model_with_mlm.trainable_weights)) self.assertLen(hub_layer.resolved_object.mlm.trainable_variables, len(pretrainer.trainable_weights)) for source_weight, hub_weight, pretrainer_weight in zip( bert_model_with_mlm.trainable_weights, hub_layer.resolved_object.mlm.trainable_variables, pretrainer.trainable_weights): self.assertAllClose(source_weight, hub_weight) self.assertAllClose(source_weight, pretrainer_weight) max_predictions_per_seq = 4 mlm_positions = np.zeros((2, max_predictions_per_seq), dtype=np.int32) input_dict = dict( input_word_ids=dummy_ids, input_mask=dummy_ids, input_type_ids=dummy_ids, masked_lm_positions=mlm_positions) hub_mlm_outputs_dict = hub_layer.resolved_object.mlm(input_dict) source_mlm_outputs_dict = bert_model_with_mlm(input_dict) for output_key in ("pooled_output", "sequence_output", "mlm_logits", "encoder_outputs"): self.assertAllClose(hub_mlm_outputs_dict[output_key], source_mlm_outputs_dict[output_key]) pretrainer_mlm_logits_output = pretrainer(input_dict)["mlm_logits"] self.assertAllClose(hub_mlm_outputs_dict["mlm_logits"], pretrainer_mlm_logits_output) # Test that training=True makes a difference (activates dropout). def _dropout_mean_stddev_mlm(training, num_runs=20): input_ids = np.array([[14, 12, 42, 95, 99]], np.int32) mlm_position_ids = np.array([[1, 2, 3, 4]], np.int32) input_dict = dict( input_word_ids=input_ids, input_mask=np.ones_like(input_ids), input_type_ids=np.zeros_like(input_ids), masked_lm_positions=mlm_position_ids) outputs = np.concatenate([ hub_layer.resolved_object.mlm(input_dict, training=training)["pooled_output"] for _ in range(num_runs) ]) return np.mean(np.std(outputs, axis=0)) self.assertLess(_dropout_mean_stddev_mlm(training=False), 1e-6) self.assertGreater(_dropout_mean_stddev_mlm(training=True), 1e-3) # Test propagation of seq_length in shape inference. input_word_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32) input_mask = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32) input_type_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32) input_dict = dict( input_word_ids=input_word_ids, input_mask=input_mask, input_type_ids=input_type_ids) hub_outputs_dict = hub_layer(input_dict) self.assertEqual(hub_outputs_dict["pooled_output"].shape.as_list(), [None, hidden_size]) self.assertEqual(hub_outputs_dict["sequence_output"].shape.as_list(), [None, seq_length, hidden_size]) _STRING_NOT_TO_LEAK = "private_path_component_" class ExportPreprocessingTest(tf.test.TestCase, parameterized.TestCase): def _make_vocab_file(self, vocab, filename="vocab.txt", add_mask_token=False): """Creates wordpiece vocab file with given words plus special tokens. The tokens of the resulting model are, in this order: [PAD], [UNK], [CLS], [SEP], [MASK]*, ...vocab... *=if requested by args. This function also accepts wordpieces that start with the ## continuation marker, but avoiding those makes this function interchangeable with _make_sp_model_file(), up to the extra dimension returned by BertTokenizer. Args: vocab: a list of strings with the words or wordpieces to put into the model's vocabulary. Do not include special tokens here. filename: Optionally, a filename (relative to the temporary directory created by this function). add_mask_token: an optional bool, whether to include a [MASK] token. Returns: The absolute filename of the created vocab file. """ full_vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]" ] + ["[MASK]"] * add_mask_token + vocab path = os.path.join( tempfile.mkdtemp( dir=self.get_temp_dir(), # New subdir each time. prefix=_STRING_NOT_TO_LEAK), filename) with tf.io.gfile.GFile(path, "w") as f: f.write("\n".join(full_vocab + [""])) return path def _make_sp_model_file(self, vocab, prefix="spm", add_mask_token=False): """Creates Sentencepiece word model with given words plus special tokens. The tokens of the resulting model are, in this order: <pad>, <unk>, [CLS], [SEP], [MASK]*, ...vocab..., <s>, </s> *=if requested by args. The words in the input vocab are plain text, without the whitespace marker. That makes this function interchangeable with _make_vocab_file(). Args: vocab: a list of strings with the words to put into the model's vocabulary. Do not include special tokens here. prefix: an optional string, to change the filename prefix for the model (relative to the temporary directory created by this function). add_mask_token: an optional bool, whether to include a [MASK] token. Returns: The absolute filename of the created Sentencepiece model file. """ model_prefix = os.path.join( tempfile.mkdtemp(dir=self.get_temp_dir()), # New subdir each time. prefix) input_file = model_prefix + "_train_input.txt" # Create input text for training the sp model from the tokens provided. # Repeat tokens, the earlier the more, because they are sorted by frequency. input_text = [] for i, token in enumerate(vocab): input_text.append(" ".join([token] * (len(vocab) - i))) with tf.io.gfile.GFile(input_file, "w") as f: f.write("\n".join(input_text + [""])) control_symbols = "[CLS],[SEP]" full_vocab_size = len(vocab) + 6 # <pad>, <unk>, [CLS], [SEP], <s>, </s>. if add_mask_token: control_symbols += ",[MASK]" full_vocab_size += 1 flags = dict( model_prefix=model_prefix, model_type="word", input=input_file, pad_id=0, unk_id=1, control_symbols=control_symbols, vocab_size=full_vocab_size, bos_id=full_vocab_size - 2, eos_id=full_vocab_size - 1) SentencePieceTrainer.Train(" ".join( ["--{}={}".format(k, v) for k, v in flags.items()])) return model_prefix + ".model" def _do_export(self, vocab, do_lower_case, default_seq_length=128, tokenize_with_offsets=True, use_sp_model=False, experimental_disable_assert=False, add_mask_token=False): """Runs SavedModel export and returns the export_path.""" export_path = tempfile.mkdtemp(dir=self.get_temp_dir()) vocab_file = sp_model_file = None if use_sp_model: sp_model_file = self._make_sp_model_file( vocab, add_mask_token=add_mask_token) else: vocab_file = self._make_vocab_file(vocab, add_mask_token=add_mask_token) export_tfhub_lib.export_preprocessing( export_path, vocab_file=vocab_file, sp_model_file=sp_model_file, do_lower_case=do_lower_case, tokenize_with_offsets=tokenize_with_offsets, default_seq_length=default_seq_length, experimental_disable_assert=experimental_disable_assert) # Invalidate the original filename to verify loading from the SavedModel. tf.io.gfile.remove(sp_model_file or vocab_file) return export_path def test_no_leaks(self): """Tests not leaking the path to the original vocab file.""" path = self._do_export(["d", "ef", "abc", "xy"], do_lower_case=True, use_sp_model=False) with tf.io.gfile.GFile(os.path.join(path, "saved_model.pb"), "rb") as f: self.assertFalse( # pylint: disable=g-generic-assert _STRING_NOT_TO_LEAK.encode("ascii") in f.read()) @parameterized.named_parameters(("Bert", False), ("Sentencepiece", True)) def test_exported_callables(self, use_sp_model): preprocess = tf.saved_model.load( self._do_export( ["d", "ef", "abc", "xy"], do_lower_case=True, # TODO(b/181866850): drop this. tokenize_with_offsets=not use_sp_model, # TODO(b/175369555): drop this. experimental_disable_assert=True, use_sp_model=use_sp_model)) def fold_dim(rt): """Removes the word/subword distinction of BertTokenizer.""" return rt if use_sp_model else rt.merge_dims(1, 2) # .tokenize() inputs = tf.constant(["abc d ef", "ABC D EF d"]) token_ids = preprocess.tokenize(inputs) self.assertAllEqual( fold_dim(token_ids), tf.ragged.constant([[6, 4, 5], [6, 4, 5, 4]])) special_tokens_dict = { k: v.numpy().item() # Expecting eager Tensor, converting to Python. for k, v in preprocess.tokenize.get_special_tokens_dict().items() } self.assertDictEqual( special_tokens_dict, dict( padding_id=0, start_of_sequence_id=2, end_of_segment_id=3, vocab_size=4 + 6 if use_sp_model else 4 + 4)) # .tokenize_with_offsets() if use_sp_model: # TODO(b/181866850): Enable tokenize_with_offsets when it works and test. self.assertFalse(hasattr(preprocess, "tokenize_with_offsets")) else: token_ids, start_offsets, limit_offsets = ( preprocess.tokenize_with_offsets(inputs)) self.assertAllEqual( fold_dim(token_ids), tf.ragged.constant([[6, 4, 5], [6, 4, 5, 4]])) self.assertAllEqual( fold_dim(start_offsets), tf.ragged.constant([[0, 4, 6], [0, 4, 6, 9]])) self.assertAllEqual( fold_dim(limit_offsets), tf.ragged.constant([[3, 5, 8], [3, 5, 8, 10]])) self.assertIs(preprocess.tokenize.get_special_tokens_dict, preprocess.tokenize_with_offsets.get_special_tokens_dict) # Root callable. bert_inputs = preprocess(inputs) self.assertAllEqual(bert_inputs["input_word_ids"].shape.as_list(), [2, 128]) self.assertAllEqual( bert_inputs["input_word_ids"][:, :10], tf.constant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0], [2, 6, 4, 5, 4, 3, 0, 0, 0, 0]])) self.assertAllEqual(bert_inputs["input_mask"].shape.as_list(), [2, 128]) self.assertAllEqual( bert_inputs["input_mask"][:, :10], tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0]])) self.assertAllEqual(bert_inputs["input_type_ids"].shape.as_list(), [2, 128]) self.assertAllEqual( bert_inputs["input_type_ids"][:, :10], tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])) # .bert_pack_inputs() inputs_2 = tf.constant(["d xy", "xy abc"]) token_ids_2 = preprocess.tokenize(inputs_2) bert_inputs = preprocess.bert_pack_inputs([token_ids, token_ids_2], seq_length=256) self.assertAllEqual(bert_inputs["input_word_ids"].shape.as_list(), [2, 256]) self.assertAllEqual( bert_inputs["input_word_ids"][:, :10], tf.constant([[2, 6, 4, 5, 3, 4, 7, 3, 0, 0], [2, 6, 4, 5, 4, 3, 7, 6, 3, 0]])) self.assertAllEqual(bert_inputs["input_mask"].shape.as_list(), [2, 256]) self.assertAllEqual( bert_inputs["input_mask"][:, :10], tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0]])) self.assertAllEqual(bert_inputs["input_type_ids"].shape.as_list(), [2, 256]) self.assertAllEqual( bert_inputs["input_type_ids"][:, :10], tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 0]])) # For BertTokenizer only: repeat relevant parts for do_lower_case=False, # default_seq_length=10, experimental_disable_assert=False, # tokenize_with_offsets=False, and without folding the word/subword dimension. def test_cased_length10(self): preprocess = tf.saved_model.load( self._do_export(["d", "##ef", "abc", "ABC"], do_lower_case=False, default_seq_length=10, tokenize_with_offsets=False, use_sp_model=False, experimental_disable_assert=False)) inputs = tf.constant(["abc def", "ABC DEF"]) token_ids = preprocess.tokenize(inputs) self.assertAllEqual(token_ids, tf.ragged.constant([[[6], [4, 5]], [[7], [1]]])) self.assertFalse(hasattr(preprocess, "tokenize_with_offsets")) bert_inputs = preprocess(inputs) self.assertAllEqual( bert_inputs["input_word_ids"], tf.constant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0], [2, 7, 1, 3, 0, 0, 0, 0, 0, 0]])) self.assertAllEqual( bert_inputs["input_mask"], tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0]])) self.assertAllEqual( bert_inputs["input_type_ids"], tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])) inputs_2 = tf.constant(["d ABC", "ABC abc"]) token_ids_2 = preprocess.tokenize(inputs_2) bert_inputs = preprocess.bert_pack_inputs([token_ids, token_ids_2]) # Test default seq_length=10. self.assertAllEqual( bert_inputs["input_word_ids"], tf.constant([[2, 6, 4, 5, 3, 4, 7, 3, 0, 0], [2, 7, 1, 3, 7, 6, 3, 0, 0, 0]])) self.assertAllEqual( bert_inputs["input_mask"], tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]])) self.assertAllEqual( bert_inputs["input_type_ids"], tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 0, 0, 0]])) # XLA requires fixed shapes for tensors found in graph mode. # Statically known shapes in Python are a particularly firm way to # guarantee that, and they are generally more convenient to work with. # We test that the exported SavedModel plays well with TF's shape # inference when applied to fully or partially known input shapes. @parameterized.named_parameters(("Bert", False), ("Sentencepiece", True)) def test_shapes(self, use_sp_model): preprocess = tf.saved_model.load( self._do_export( ["abc", "def"], do_lower_case=True, # TODO(b/181866850): drop this. tokenize_with_offsets=not use_sp_model, # TODO(b/175369555): drop this. experimental_disable_assert=True, use_sp_model=use_sp_model)) def expected_bert_input_shapes(batch_size, seq_length): return dict( input_word_ids=[batch_size, seq_length], input_mask=[batch_size, seq_length], input_type_ids=[batch_size, seq_length]) for batch_size in [7, None]: if use_sp_model: token_out_shape = [batch_size, None] # No word/subword distinction. else: token_out_shape = [batch_size, None, None] self.assertEqual( _result_shapes_in_tf_function(preprocess.tokenize, tf.TensorSpec([batch_size], tf.string)), token_out_shape, "with batch_size=%s" % batch_size) # TODO(b/181866850): Enable tokenize_with_offsets when it works and test. if use_sp_model: self.assertFalse(hasattr(preprocess, "tokenize_with_offsets")) else: self.assertEqual( _result_shapes_in_tf_function( preprocess.tokenize_with_offsets, tf.TensorSpec([batch_size], tf.string)), [token_out_shape] * 3, "with batch_size=%s" % batch_size) self.assertEqual( _result_shapes_in_tf_function( preprocess.bert_pack_inputs, [tf.RaggedTensorSpec([batch_size, None, None], tf.int32)] * 2, seq_length=256), expected_bert_input_shapes(batch_size, 256), "with batch_size=%s" % batch_size) self.assertEqual( _result_shapes_in_tf_function(preprocess, tf.TensorSpec([batch_size], tf.string)), expected_bert_input_shapes(batch_size, 128), "with batch_size=%s" % batch_size) @parameterized.named_parameters(("Bert", False), ("Sentencepiece", True)) def test_reexport(self, use_sp_model): """Test that preprocess keeps working after another save/load cycle.""" path1 = self._do_export( ["d", "ef", "abc", "xy"], do_lower_case=True, default_seq_length=10, tokenize_with_offsets=False, experimental_disable_assert=True, # TODO(b/175369555): drop this. use_sp_model=use_sp_model) path2 = path1.rstrip("/") + ".2" model1 = tf.saved_model.load(path1) tf.saved_model.save(model1, path2) # Delete the first SavedModel to test that the sceond one loads by itself. # https://github.com/tensorflow/tensorflow/issues/46456 reports such a # failure case for BertTokenizer. tf.io.gfile.rmtree(path1) model2 = tf.saved_model.load(path2) inputs = tf.constant(["abc d ef", "ABC D EF d"]) bert_inputs = model2(inputs) self.assertAllEqual( bert_inputs["input_word_ids"], tf.constant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0], [2, 6, 4, 5, 4, 3, 0, 0, 0, 0]])) self.assertAllEqual( bert_inputs["input_mask"], tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0]])) self.assertAllEqual( bert_inputs["input_type_ids"], tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])) @parameterized.named_parameters(("Bert", True), ("Albert", False)) def test_preprocessing_for_mlm(self, use_bert): """Combines both SavedModel types and TF.text helpers for MLM.""" # Create the preprocessing SavedModel with a [MASK] token. non_special_tokens = [ "hello", "world", "nice", "movie", "great", "actors", "quick", "fox", "lazy", "dog" ] preprocess = tf.saved_model.load( self._do_export( non_special_tokens, do_lower_case=True, tokenize_with_offsets=use_bert, # TODO(b/181866850): drop this. experimental_disable_assert=True, # TODO(b/175369555): drop this. add_mask_token=True, use_sp_model=not use_bert)) vocab_size = len(non_special_tokens) + (5 if use_bert else 7) # Create the encoder SavedModel with an .mlm subobject. hidden_size = 16 num_hidden_layers = 2 bert_config, encoder_config = _get_bert_config_or_encoder_config( use_bert_config=use_bert, hidden_size=hidden_size, num_hidden_layers=num_hidden_layers, vocab_size=vocab_size) _, pretrainer = export_tfhub_lib._create_model( bert_config=bert_config, encoder_config=encoder_config, with_mlm=True) model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint") checkpoint = tf.train.Checkpoint(**pretrainer.checkpoint_items) checkpoint.save(os.path.join(model_checkpoint_dir, "test")) model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir) vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy( # Not used below. self.get_temp_dir(), use_sp_model=not use_bert) encoder_export_path = os.path.join(self.get_temp_dir(), "encoder_export") export_tfhub_lib.export_model( export_path=encoder_export_path, bert_config=bert_config, encoder_config=encoder_config, model_checkpoint_path=model_checkpoint_path, with_mlm=True, vocab_file=vocab_file, sp_model_file=sp_model_file, do_lower_case=True) encoder = tf.saved_model.load(encoder_export_path) # Get special tokens from the vocab (and vocab size). special_tokens_dict = preprocess.tokenize.get_special_tokens_dict() self.assertEqual(int(special_tokens_dict["vocab_size"]), vocab_size) padding_id = int(special_tokens_dict["padding_id"]) self.assertEqual(padding_id, 0) start_of_sequence_id = int(special_tokens_dict["start_of_sequence_id"]) self.assertEqual(start_of_sequence_id, 2) end_of_segment_id = int(special_tokens_dict["end_of_segment_id"]) self.assertEqual(end_of_segment_id, 3) mask_id = int(special_tokens_dict["mask_id"]) self.assertEqual(mask_id, 4) # A batch of 3 segment pairs. raw_segments = [ tf.constant(["hello", "nice movie", "quick fox"]), tf.constant(["world", "great actors", "lazy dog"]) ] batch_size = 3 # Misc hyperparameters. seq_length = 10 max_selections_per_seq = 2 # Tokenize inputs. tokenized_segments = [preprocess.tokenize(s) for s in raw_segments] # Trim inputs to eventually fit seq_lentgh. num_special_tokens = len(raw_segments) + 1 trimmed_segments = text.WaterfallTrimmer( seq_length - num_special_tokens).trim(tokenized_segments) # Combine input segments into one input sequence. input_ids, segment_ids = text.combine_segments( trimmed_segments, start_of_sequence_id=start_of_sequence_id, end_of_segment_id=end_of_segment_id) # Apply random masking controlled by policy objects. (masked_input_ids, masked_lm_positions, masked_ids) = text.mask_language_model( input_ids=input_ids, item_selector=text.RandomItemSelector( max_selections_per_seq, selection_rate=0.5, # Adjusted for the short test examples. unselectable_ids=[start_of_sequence_id, end_of_segment_id]), mask_values_chooser=text.MaskValuesChooser( vocab_size=vocab_size, mask_token=mask_id, # Always put [MASK] to have a predictable result. mask_token_rate=1.0, random_token_rate=0.0)) # Pad to fixed-length Transformer encoder inputs. input_word_ids, _ = text.pad_model_inputs( masked_input_ids, seq_length, pad_value=padding_id) input_type_ids, input_mask = text.pad_model_inputs( segment_ids, seq_length, pad_value=0) masked_lm_positions, _ = text.pad_model_inputs( masked_lm_positions, max_selections_per_seq, pad_value=0) masked_lm_positions = tf.cast(masked_lm_positions, tf.int32) num_predictions = int(tf.shape(masked_lm_positions)[1]) # Test transformer inputs. self.assertEqual(num_predictions, max_selections_per_seq) expected_word_ids = np.array([ # [CLS] hello [SEP] world [SEP] [2, 5, 3, 6, 3, 0, 0, 0, 0, 0], # [CLS] nice movie [SEP] great actors [SEP] [2, 7, 8, 3, 9, 10, 3, 0, 0, 0], # [CLS] brown fox [SEP] lazy dog [SEP] [2, 11, 12, 3, 13, 14, 3, 0, 0, 0] ]) for i in range(batch_size): for j in range(num_predictions): k = int(masked_lm_positions[i, j]) if k != 0: expected_word_ids[i, k] = 4 # [MASK] self.assertAllEqual(input_word_ids, expected_word_ids) # Call the MLM head of the Transformer encoder. mlm_inputs = dict( input_word_ids=input_word_ids, input_mask=input_mask, input_type_ids=input_type_ids, masked_lm_positions=masked_lm_positions, ) mlm_outputs = encoder.mlm(mlm_inputs) self.assertEqual(mlm_outputs["pooled_output"].shape, (batch_size, hidden_size)) self.assertEqual(mlm_outputs["sequence_output"].shape, (batch_size, seq_length, hidden_size)) self.assertEqual(mlm_outputs["mlm_logits"].shape, (batch_size, num_predictions, vocab_size)) self.assertLen(mlm_outputs["encoder_outputs"], num_hidden_layers) # A real trainer would now compute the loss of mlm_logits # trying to predict the masked_ids. del masked_ids # Unused. @parameterized.named_parameters(("Bert", False), ("Sentencepiece", True)) def test_special_tokens_in_estimator(self, use_sp_model): """Tests getting special tokens without an Eager init context.""" preprocess_export_path = self._do_export(["d", "ef", "abc", "xy"], do_lower_case=True, use_sp_model=use_sp_model, tokenize_with_offsets=False) def _get_special_tokens_dict(obj): """Returns special tokens of restored tokenizer as Python values.""" if tf.executing_eagerly(): special_tokens_numpy = { k: v.numpy() for k, v in obj.get_special_tokens_dict() } else: with tf.Graph().as_default(): # This code expects `get_special_tokens_dict()` to be a tf.function # with no dependencies (bound args) from the context it was loaded in, # and boldly assumes that it can just be called in a dfferent context. special_tokens_tensors = obj.get_special_tokens_dict() with tf.compat.v1.Session() as sess: special_tokens_numpy = sess.run(special_tokens_tensors) return { k: v.item() # Numpy to Python. for k, v in special_tokens_numpy.items() } def input_fn(): self.assertFalse(tf.executing_eagerly()) # Build a preprocessing Model. sentences = tf.keras.layers.Input(shape=[], dtype=tf.string) preprocess = tf.saved_model.load(preprocess_export_path) tokenize = hub.KerasLayer(preprocess.tokenize) special_tokens_dict = _get_special_tokens_dict(tokenize.resolved_object) for k, v in special_tokens_dict.items(): self.assertIsInstance(v, int, "Unexpected type for {}".format(k)) tokens = tokenize(sentences) packed_inputs = layers.BertPackInputs( 4, special_tokens_dict=special_tokens_dict)( tokens) preprocessing = tf.keras.Model(sentences, packed_inputs) # Map the dataset. ds = tf.data.Dataset.from_tensors( (tf.constant(["abc", "D EF"]), tf.constant([0, 1]))) ds = ds.map(lambda features, labels: (preprocessing(features), labels)) return ds def model_fn(features, labels, mode): del labels # Unused. return tf_estimator.EstimatorSpec( mode=mode, predictions=features["input_word_ids"]) estimator = tf_estimator.Estimator(model_fn=model_fn) outputs = list(estimator.predict(input_fn)) self.assertAllEqual(outputs, np.array([[2, 6, 3, 0], [2, 4, 5, 3]])) # TODO(b/175369555): Remove that code and its test. @parameterized.named_parameters(("Bert", False), ("Sentencepiece", True)) def test_check_no_assert(self, use_sp_model): """Tests the self-check during export without assertions.""" preprocess_export_path = self._do_export(["d", "ef", "abc", "xy"], do_lower_case=True, use_sp_model=use_sp_model, tokenize_with_offsets=False, experimental_disable_assert=False) with self.assertRaisesRegex(AssertionError, r"failed to suppress \d+ Assert ops"): export_tfhub_lib._check_no_assert(preprocess_export_path) def _result_shapes_in_tf_function(fn, *args, **kwargs): """Returns shapes (as lists) observed on the result of `fn`. Args: fn: A callable. *args: TensorSpecs for Tensor-valued arguments and actual values for Python-valued arguments to fn. **kwargs: Same for keyword arguments. Returns: The nest of partial tensor shapes (as lists) that is statically known inside tf.function(fn)(*args, **kwargs) for the nest of its results. """ # Use a captured mutable container for a side outout from the wrapper. uninitialized = "uninitialized!" result_shapes_container = [uninitialized] assert result_shapes_container[0] is uninitialized @tf.function def shape_reporting_wrapper(*args, **kwargs): result = fn(*args, **kwargs) result_shapes_container[0] = tf.nest.map_structure( lambda x: x.shape.as_list(), result) return result shape_reporting_wrapper.get_concrete_function(*args, **kwargs) assert result_shapes_container[0] is not uninitialized return result_shapes_container[0] if __name__ == "__main__": tf.test.main()
46,347
41.875116
80
py
models
models-master/official/nlp/tools/export_tfhub_lib.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Library of components of export_tfhub.py. See docstring there for more.""" import contextlib import hashlib import os import tempfile from typing import Optional, Text, Tuple # Import libraries from absl import logging import tensorflow as tf # pylint: disable=g-direct-tensorflow-import TODO(b/175369555): Remove these. from tensorflow.core.protobuf import saved_model_pb2 from tensorflow.python.ops import control_flow_assert # pylint: enable=g-direct-tensorflow-import from official.legacy.bert import configs from official.modeling import tf_utils from official.nlp.configs import encoders from official.nlp.modeling import layers from official.nlp.modeling import models from official.nlp.modeling import networks def get_bert_encoder(bert_config): """Returns a BertEncoder with dict outputs.""" bert_encoder = networks.BertEncoder( vocab_size=bert_config.vocab_size, hidden_size=bert_config.hidden_size, num_layers=bert_config.num_hidden_layers, num_attention_heads=bert_config.num_attention_heads, intermediate_size=bert_config.intermediate_size, activation=tf_utils.get_activation(bert_config.hidden_act), dropout_rate=bert_config.hidden_dropout_prob, attention_dropout_rate=bert_config.attention_probs_dropout_prob, max_sequence_length=bert_config.max_position_embeddings, type_vocab_size=bert_config.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=bert_config.initializer_range), embedding_width=bert_config.embedding_size, dict_outputs=True) return bert_encoder def get_do_lower_case(do_lower_case, vocab_file=None, sp_model_file=None): """Returns do_lower_case, replacing None by a guess from vocab file name.""" if do_lower_case is not None: return do_lower_case elif vocab_file: do_lower_case = "uncased" in vocab_file logging.info("Using do_lower_case=%s based on name of vocab_file=%s", do_lower_case, vocab_file) return do_lower_case elif sp_model_file: do_lower_case = True # All public ALBERTs (as of Oct 2020) do it. logging.info("Defaulting to do_lower_case=%s for Sentencepiece tokenizer", do_lower_case) return do_lower_case else: raise ValueError("Must set vocab_file or sp_model_file.") def _create_model( *, bert_config: Optional[configs.BertConfig] = None, encoder_config: Optional[encoders.EncoderConfig] = None, with_mlm: bool, ) -> Tuple[tf.keras.Model, tf.keras.Model]: """Creates the model to export and the model to restore the checkpoint. Args: bert_config: A legacy `BertConfig` to create a `BertEncoder` object. Exactly one of encoder_config and bert_config must be set. encoder_config: An `EncoderConfig` to create an encoder of the configured type (`BertEncoder` or other). with_mlm: A bool to control the second component of the result. If True, will create a `BertPretrainerV2` object; otherwise, will create a `BertEncoder` object. Returns: A Tuple of (1) a Keras model that will be exported, (2) a `BertPretrainerV2` object or `BertEncoder` object depending on the value of `with_mlm` argument, which contains the first model and will be used for restoring weights from the checkpoint. """ if (bert_config is not None) == (encoder_config is not None): raise ValueError("Exactly one of `bert_config` and `encoder_config` " "can be specified, but got %s and %s" % (bert_config, encoder_config)) if bert_config is not None: encoder = get_bert_encoder(bert_config) else: encoder = encoders.build_encoder(encoder_config) # Convert from list of named inputs to dict of inputs keyed by name. # Only the latter accepts a dict of inputs after restoring from SavedModel. if isinstance(encoder.inputs, list) or isinstance(encoder.inputs, tuple): encoder_inputs_dict = {x.name: x for x in encoder.inputs} else: # encoder.inputs by default is dict for BertEncoderV2. encoder_inputs_dict = encoder.inputs encoder_output_dict = encoder(encoder_inputs_dict) # For interchangeability with other text representations, # add "default" as an alias for BERT's whole-input reptesentations. encoder_output_dict["default"] = encoder_output_dict["pooled_output"] core_model = tf.keras.Model( inputs=encoder_inputs_dict, outputs=encoder_output_dict) if with_mlm: if bert_config is not None: hidden_act = bert_config.hidden_act else: assert encoder_config is not None hidden_act = encoder_config.get().hidden_activation pretrainer = models.BertPretrainerV2( encoder_network=encoder, mlm_activation=tf_utils.get_activation(hidden_act)) if isinstance(pretrainer.inputs, dict): pretrainer_inputs_dict = pretrainer.inputs else: pretrainer_inputs_dict = {x.name: x for x in pretrainer.inputs} pretrainer_output_dict = pretrainer(pretrainer_inputs_dict) mlm_model = tf.keras.Model( inputs=pretrainer_inputs_dict, outputs=pretrainer_output_dict) # Set `_auto_track_sub_layers` to False, so that the additional weights # from `mlm` sub-object will not be included in the core model. # TODO(b/169210253): Use a public API when available. core_model._auto_track_sub_layers = False # pylint: disable=protected-access core_model.mlm = mlm_model return core_model, pretrainer else: return core_model, encoder def export_model(export_path: Text, *, bert_config: Optional[configs.BertConfig] = None, encoder_config: Optional[encoders.EncoderConfig] = None, model_checkpoint_path: Text, with_mlm: bool, copy_pooler_dense_to_encoder: bool = False, vocab_file: Optional[Text] = None, sp_model_file: Optional[Text] = None, do_lower_case: Optional[bool] = None) -> None: """Exports an Encoder as SavedModel after restoring pre-trained weights. The exported SavedModel implements a superset of the Encoder API for Text embeddings with Transformer Encoders described at https://www.tensorflow.org/hub/common_saved_model_apis/text. In particular, the exported SavedModel can be used in the following way: ``` # Calls default interface (encoder only). encoder = hub.load(...) encoder_inputs = dict( input_word_ids=..., # Shape [batch, seq_length], dtype=int32 input_mask=..., # Shape [batch, seq_length], dtype=int32 input_type_ids=..., # Shape [batch, seq_length], dtype=int32 ) encoder_outputs = encoder(encoder_inputs) assert encoder_outputs.keys() == { "pooled_output", # Shape [batch_size, width], dtype=float32 "default", # Alias for "pooled_output" (aligns with other models). "sequence_output" # Shape [batch_size, seq_length, width], dtype=float32 "encoder_outputs", # List of Tensors with outputs of all transformer layers. } ``` If `with_mlm` is True, the exported SavedModel can also be called in the following way: ``` # Calls expanded interface that includes logits of the Masked Language Model. mlm_inputs = dict( input_word_ids=..., # Shape [batch, seq_length], dtype=int32 input_mask=..., # Shape [batch, seq_length], dtype=int32 input_type_ids=..., # Shape [batch, seq_length], dtype=int32 masked_lm_positions=..., # Shape [batch, num_predictions], dtype=int32 ) mlm_outputs = encoder.mlm(mlm_inputs) assert mlm_outputs.keys() == { "pooled_output", # Shape [batch, width], dtype=float32 "sequence_output", # Shape [batch, seq_length, width], dtype=float32 "encoder_outputs", # List of Tensors with outputs of all transformer layers. "mlm_logits" # Shape [batch, num_predictions, vocab_size], dtype=float32 } ``` Args: export_path: The SavedModel output directory. bert_config: An optional `configs.BertConfig` object. Note: exactly one of `bert_config` and following `encoder_config` must be specified. encoder_config: An optional `encoders.EncoderConfig` object. model_checkpoint_path: The path to the checkpoint. with_mlm: Whether to export the additional mlm sub-object. copy_pooler_dense_to_encoder: Whether to copy the pooler's dense layer used in the next sentence prediction task to the encoder. vocab_file: The path to the wordpiece vocab file, or None. sp_model_file: The path to the sentencepiece model file, or None. Exactly one of vocab_file and sp_model_file must be set. do_lower_case: Whether to lower-case text before tokenization. """ if with_mlm: core_model, pretrainer = _create_model( bert_config=bert_config, encoder_config=encoder_config, with_mlm=with_mlm) encoder = pretrainer.encoder_network # It supports both the new pretrainer checkpoint produced by TF-NLP and # the checkpoint converted from TF1 (original BERT, SmallBERTs). checkpoint_items = pretrainer.checkpoint_items checkpoint = tf.train.Checkpoint(**checkpoint_items) else: core_model, encoder = _create_model( bert_config=bert_config, encoder_config=encoder_config, with_mlm=with_mlm) checkpoint = tf.train.Checkpoint( model=encoder, # Legacy checkpoints. encoder=encoder) checkpoint.restore(model_checkpoint_path).assert_existing_objects_matched() if copy_pooler_dense_to_encoder: logging.info("Copy pooler's dense layer to the encoder.") pooler_checkpoint = tf.train.Checkpoint( **{"next_sentence.pooler_dense": encoder.pooler_layer}) pooler_checkpoint.restore( model_checkpoint_path).assert_existing_objects_matched() # Before SavedModels for preprocessing appeared in Oct 2020, the encoders # provided this information to let users do preprocessing themselves. # We keep doing that for now. It helps users to upgrade incrementally. # Moreover, it offers an escape hatch for advanced users who want the # full vocab, not the high-level operations from the preprocessing model. if vocab_file: core_model.vocab_file = tf.saved_model.Asset(vocab_file) if do_lower_case is None: raise ValueError("Must pass do_lower_case if passing vocab_file.") core_model.do_lower_case = tf.Variable(do_lower_case, trainable=False) elif sp_model_file: # This was used by ALBERT, with implied values of do_lower_case=True # and strip_diacritics=True. core_model.sp_model_file = tf.saved_model.Asset(sp_model_file) else: raise ValueError("Must set vocab_file or sp_model_file") core_model.save(export_path, include_optimizer=False, save_format="tf") class BertPackInputsSavedModelWrapper(tf.train.Checkpoint): """Wraps a BertPackInputs layer for export to SavedModel. The wrapper object is suitable for use with `tf.saved_model.save()` and `.load()`. The wrapper object is callable with inputs and outputs like the BertPackInputs layer, but differs from saving an unwrapped Keras object: - The inputs can be a list of 1 or 2 RaggedTensors of dtype int32 and ragged rank 1 or 2. (In Keras, saving to a tf.function in a SavedModel would fix the number of RaggedTensors and their ragged rank.) - The call accepts an optional keyword argument `seq_length=` to override the layer's .seq_length hyperparameter. (In Keras, a hyperparameter could not be changed after saving to a tf.function in a SavedModel.) """ def __init__(self, bert_pack_inputs: layers.BertPackInputs): super().__init__() # Preserve the layer's configured seq_length as a default but make it # overridable. Having this dynamically determined default argument # requires self.__call__ to be defined in this indirect way. default_seq_length = bert_pack_inputs.seq_length @tf.function(autograph=False) def call(inputs, seq_length=default_seq_length): return layers.BertPackInputs.bert_pack_inputs( inputs, seq_length=seq_length, start_of_sequence_id=bert_pack_inputs.start_of_sequence_id, end_of_segment_id=bert_pack_inputs.end_of_segment_id, padding_id=bert_pack_inputs.padding_id) self.__call__ = call for ragged_rank in range(1, 3): for num_segments in range(1, 3): _ = self.__call__.get_concrete_function([ tf.RaggedTensorSpec([None] * (ragged_rank + 1), dtype=tf.int32) for _ in range(num_segments) ], seq_length=tf.TensorSpec( [], tf.int32)) def create_preprocessing(*, vocab_file: Optional[str] = None, sp_model_file: Optional[str] = None, do_lower_case: bool, tokenize_with_offsets: bool, default_seq_length: int) -> tf.keras.Model: """Returns a preprocessing Model for given tokenization parameters. This function builds a Keras Model with attached subobjects suitable for saving to a SavedModel. The resulting SavedModel implements the Preprocessor API for Text embeddings with Transformer Encoders described at https://www.tensorflow.org/hub/common_saved_model_apis/text. Args: vocab_file: The path to the wordpiece vocab file, or None. sp_model_file: The path to the sentencepiece model file, or None. Exactly one of vocab_file and sp_model_file must be set. This determines the type of tokenzer that is used. do_lower_case: Whether to do lower case. tokenize_with_offsets: Whether to include the .tokenize_with_offsets subobject. default_seq_length: The sequence length of preprocessing results from root callable. This is also the default sequence length for the bert_pack_inputs subobject. Returns: A tf.keras.Model object with several attached subobjects, suitable for saving as a preprocessing SavedModel. """ # Select tokenizer. if bool(vocab_file) == bool(sp_model_file): raise ValueError("Must set exactly one of vocab_file, sp_model_file") if vocab_file: tokenize = layers.BertTokenizer( vocab_file=vocab_file, lower_case=do_lower_case, tokenize_with_offsets=tokenize_with_offsets) else: tokenize = layers.SentencepieceTokenizer( model_file_path=sp_model_file, lower_case=do_lower_case, strip_diacritics=True, # Strip diacritics to follow ALBERT model. tokenize_with_offsets=tokenize_with_offsets) # The root object of the preprocessing model can be called to do # one-shot preprocessing for users with single-sentence inputs. sentences = tf.keras.layers.Input(shape=(), dtype=tf.string, name="sentences") if tokenize_with_offsets: tokens, start_offsets, limit_offsets = tokenize(sentences) else: tokens = tokenize(sentences) pack = layers.BertPackInputs( seq_length=default_seq_length, special_tokens_dict=tokenize.get_special_tokens_dict()) model_inputs = pack(tokens) preprocessing = tf.keras.Model(sentences, model_inputs) # Individual steps of preprocessing are made available as named subobjects # to enable more general preprocessing. For saving, they need to be Models # in their own right. preprocessing.tokenize = tf.keras.Model(sentences, tokens) # Provide an equivalent to tokenize.get_special_tokens_dict(). preprocessing.tokenize.get_special_tokens_dict = tf.train.Checkpoint() preprocessing.tokenize.get_special_tokens_dict.__call__ = tf.function( lambda: tokenize.get_special_tokens_dict(), # pylint: disable=[unnecessary-lambda] input_signature=[]) if tokenize_with_offsets: preprocessing.tokenize_with_offsets = tf.keras.Model( sentences, [tokens, start_offsets, limit_offsets]) preprocessing.tokenize_with_offsets.get_special_tokens_dict = ( preprocessing.tokenize.get_special_tokens_dict) # Conceptually, this should be # preprocessing.bert_pack_inputs = tf.keras.Model(tokens, model_inputs) # but technicalities require us to use a wrapper (see comments there). # In particular, seq_length can be overridden when calling this. preprocessing.bert_pack_inputs = BertPackInputsSavedModelWrapper(pack) return preprocessing def _move_to_tmpdir(file_path: Optional[Text], tmpdir: Text) -> Optional[Text]: """Returns new path with same basename and hash of original path.""" if file_path is None: return None olddir, filename = os.path.split(file_path) hasher = hashlib.sha1() hasher.update(olddir.encode("utf-8")) target_dir = os.path.join(tmpdir, hasher.hexdigest()) target_file = os.path.join(target_dir, filename) tf.io.gfile.mkdir(target_dir) tf.io.gfile.copy(file_path, target_file) return target_file def export_preprocessing(export_path: Text, *, vocab_file: Optional[Text] = None, sp_model_file: Optional[Text] = None, do_lower_case: bool, tokenize_with_offsets: bool, default_seq_length: int, experimental_disable_assert: bool = False) -> None: """Exports preprocessing to a SavedModel for TF Hub.""" with tempfile.TemporaryDirectory() as tmpdir: # TODO(b/175369555): Remove experimental_disable_assert and its use. with _maybe_disable_assert(experimental_disable_assert): preprocessing = create_preprocessing( vocab_file=_move_to_tmpdir(vocab_file, tmpdir), sp_model_file=_move_to_tmpdir(sp_model_file, tmpdir), do_lower_case=do_lower_case, tokenize_with_offsets=tokenize_with_offsets, default_seq_length=default_seq_length) preprocessing.save(export_path, include_optimizer=False, save_format="tf") if experimental_disable_assert: _check_no_assert(export_path) # It helps the unit test to prevent stray copies of the vocab file. if tf.io.gfile.exists(tmpdir): raise IOError("Failed to clean up TemporaryDirectory") # TODO(b/175369555): Remove all workarounds for this bug of TensorFlow 2.4 # when this bug is no longer a concern for publishing new models. # TensorFlow 2.4 has a placement issue with Assert ops in tf.functions called # from Dataset.map() on a TPU worker. They end up on the TPU coordinator, # and invoking them from the TPU worker is either inefficient (when possible) # or impossible (notably when using "headless" TPU workers on Cloud that do not # have a channel to the coordinator). The bug has been fixed in time for TF 2.5. # To work around this, the following code avoids Assert ops in the exported # SavedModels. It monkey-patches calls to tf.Assert from inside TensorFlow and # replaces them by a no-op while building the exported model. This is fragile, # so _check_no_assert() validates the result. The resulting model should be fine # to read on future versions of TF, even if this workaround at export time # may break eventually. (Failing unit tests will tell.) def _dont_assert(condition, data, summarize=None, name="Assert"): """The no-op version of tf.Assert installed by _maybe_disable_assert.""" del condition, data, summarize # Unused. if tf.executing_eagerly(): return with tf.name_scope(name): return tf.no_op(name="dont_assert") @contextlib.contextmanager def _maybe_disable_assert(disable_assert): """Scoped monkey patch of control_flow_assert.Assert to a no-op.""" if not disable_assert: yield return original_assert = control_flow_assert.Assert control_flow_assert.Assert = _dont_assert yield control_flow_assert.Assert = original_assert def _check_no_assert(saved_model_path): """Raises AssertionError if SavedModel contains Assert ops.""" saved_model_filename = os.path.join(saved_model_path, "saved_model.pb") with tf.io.gfile.GFile(saved_model_filename, "rb") as f: saved_model = saved_model_pb2.SavedModel.FromString(f.read()) assert_nodes = [] graph_def = saved_model.meta_graphs[0].graph_def assert_nodes += [ "node '{}' in global graph".format(n.name) for n in graph_def.node if n.op == "Assert" ] for fdef in graph_def.library.function: assert_nodes += [ "node '{}' in function '{}'".format(n.name, fdef.signature.name) for n in fdef.node_def if n.op == "Assert" ] if assert_nodes: raise AssertionError( "Internal tool error: " "failed to suppress {} Assert ops in SavedModel:\n{}".format( len(assert_nodes), "\n".join(assert_nodes[:10])))
21,391
42.303644
89
py
models
models-master/official/nlp/tools/tf2_bert_encoder_checkpoint_converter.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A converter from a V1 BERT encoder checkpoint to a V2 encoder checkpoint. The conversion will yield an object-oriented checkpoint that can be used to restore a BertEncoder or BertPretrainerV2 object (see the `converted_model` FLAG below). """ import os from absl import app from absl import flags import tensorflow as tf from official.legacy.bert import configs from official.modeling import tf_utils from official.nlp.modeling import models from official.nlp.modeling import networks from official.nlp.tools import tf1_bert_checkpoint_converter_lib FLAGS = flags.FLAGS flags.DEFINE_string("bert_config_file", None, "Bert configuration file to define core bert layers.") flags.DEFINE_string( "checkpoint_to_convert", None, "Initial checkpoint from a pretrained BERT model core (that is, only the " "BertModel, with no task heads.)") flags.DEFINE_string("converted_checkpoint_path", None, "Name for the created object-based V2 checkpoint.") flags.DEFINE_string("checkpoint_model_name", "encoder", "The name of the model when saving the checkpoint, i.e., " "the checkpoint will be saved using: " "tf.train.Checkpoint(FLAGS.checkpoint_model_name=model).") flags.DEFINE_enum( "converted_model", "encoder", ["encoder", "pretrainer"], "Whether to convert the checkpoint to a `BertEncoder` model or a " "`BertPretrainerV2` model (with mlm but without classification heads).") def _create_bert_model(cfg): """Creates a BERT keras core model from BERT configuration. Args: cfg: A `BertConfig` to create the core model. Returns: A BertEncoder network. """ bert_encoder = networks.BertEncoder( vocab_size=cfg.vocab_size, hidden_size=cfg.hidden_size, num_layers=cfg.num_hidden_layers, num_attention_heads=cfg.num_attention_heads, intermediate_size=cfg.intermediate_size, activation=tf_utils.get_activation(cfg.hidden_act), dropout_rate=cfg.hidden_dropout_prob, attention_dropout_rate=cfg.attention_probs_dropout_prob, max_sequence_length=cfg.max_position_embeddings, type_vocab_size=cfg.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=cfg.initializer_range), embedding_width=cfg.embedding_size) return bert_encoder def _create_bert_pretrainer_model(cfg): """Creates a BERT keras core model from BERT configuration. Args: cfg: A `BertConfig` to create the core model. Returns: A BertPretrainerV2 model. """ bert_encoder = _create_bert_model(cfg) pretrainer = models.BertPretrainerV2( encoder_network=bert_encoder, mlm_activation=tf_utils.get_activation(cfg.hidden_act), mlm_initializer=tf.keras.initializers.TruncatedNormal( stddev=cfg.initializer_range)) # Makes sure the pretrainer variables are created. _ = pretrainer(pretrainer.inputs) return pretrainer def convert_checkpoint(bert_config, output_path, v1_checkpoint, checkpoint_model_name="model", converted_model="encoder"): """Converts a V1 checkpoint into an OO V2 checkpoint.""" output_dir, _ = os.path.split(output_path) tf.io.gfile.makedirs(output_dir) # Create a temporary V1 name-converted checkpoint in the output directory. temporary_checkpoint_dir = os.path.join(output_dir, "temp_v1") temporary_checkpoint = os.path.join(temporary_checkpoint_dir, "ckpt") tf1_bert_checkpoint_converter_lib.convert( checkpoint_from_path=v1_checkpoint, checkpoint_to_path=temporary_checkpoint, num_heads=bert_config.num_attention_heads, name_replacements=( tf1_bert_checkpoint_converter_lib.BERT_V2_NAME_REPLACEMENTS), permutations=tf1_bert_checkpoint_converter_lib.BERT_V2_PERMUTATIONS, exclude_patterns=["adam", "Adam"]) if converted_model == "encoder": model = _create_bert_model(bert_config) elif converted_model == "pretrainer": model = _create_bert_pretrainer_model(bert_config) else: raise ValueError("Unsupported converted_model: %s" % converted_model) # Create a V2 checkpoint from the temporary checkpoint. tf1_bert_checkpoint_converter_lib.create_v2_checkpoint( model, temporary_checkpoint, output_path, checkpoint_model_name) # Clean up the temporary checkpoint, if it exists. try: tf.io.gfile.rmtree(temporary_checkpoint_dir) except tf.errors.OpError: # If it doesn't exist, we don't need to clean it up; continue. pass def main(argv): if len(argv) > 1: raise app.UsageError("Too many command-line arguments.") output_path = FLAGS.converted_checkpoint_path v1_checkpoint = FLAGS.checkpoint_to_convert checkpoint_model_name = FLAGS.checkpoint_model_name converted_model = FLAGS.converted_model bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file) convert_checkpoint( bert_config=bert_config, output_path=output_path, v1_checkpoint=v1_checkpoint, checkpoint_model_name=checkpoint_model_name, converted_model=converted_model) if __name__ == "__main__": app.run(main)
5,826
35.192547
78
py
models
models-master/official/nlp/tools/tf2_albert_encoder_checkpoint_converter.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A converter from a tf1 ALBERT encoder checkpoint to a tf2 encoder checkpoint. The conversion will yield an object-oriented checkpoint that can be used to restore an AlbertEncoder object. """ import os from absl import app from absl import flags import tensorflow as tf from official.legacy.albert import configs from official.modeling import tf_utils from official.nlp.modeling import models from official.nlp.modeling import networks from official.nlp.tools import tf1_bert_checkpoint_converter_lib FLAGS = flags.FLAGS flags.DEFINE_string("albert_config_file", None, "Albert configuration file to define core bert layers.") flags.DEFINE_string( "checkpoint_to_convert", None, "Initial checkpoint from a pretrained BERT model core (that is, only the " "BertModel, with no task heads.)") flags.DEFINE_string("converted_checkpoint_path", None, "Name for the created object-based V2 checkpoint.") flags.DEFINE_string("checkpoint_model_name", "encoder", "The name of the model when saving the checkpoint, i.e., " "the checkpoint will be saved using: " "tf.train.Checkpoint(FLAGS.checkpoint_model_name=model).") flags.DEFINE_enum( "converted_model", "encoder", ["encoder", "pretrainer"], "Whether to convert the checkpoint to a `AlbertEncoder` model or a " "`BertPretrainerV2` model (with mlm but without classification heads).") ALBERT_NAME_REPLACEMENTS = ( ("bert/encoder/", ""), ("bert/", ""), ("embeddings/word_embeddings", "word_embeddings/embeddings"), ("embeddings/position_embeddings", "position_embedding/embeddings"), ("embeddings/token_type_embeddings", "type_embeddings/embeddings"), ("embeddings/LayerNorm", "embeddings/layer_norm"), ("embedding_hidden_mapping_in", "embedding_projection"), ("group_0/inner_group_0/", ""), ("attention_1/self", "self_attention"), ("attention_1/output/dense", "self_attention/attention_output"), ("transformer/LayerNorm/", "transformer/self_attention_layer_norm/"), ("ffn_1/intermediate/dense", "intermediate"), ("ffn_1/intermediate/output/dense", "output"), ("transformer/LayerNorm_1/", "transformer/output_layer_norm/"), ("pooler/dense", "pooler_transform"), ("cls/predictions", "bert/cls/predictions"), ("cls/predictions/output_bias", "cls/predictions/output_bias/bias"), ("cls/seq_relationship/output_bias", "predictions/transform/logits/bias"), ("cls/seq_relationship/output_weights", "predictions/transform/logits/kernel"), ) def _create_albert_model(cfg): """Creates an ALBERT keras core model from BERT configuration. Args: cfg: A `AlbertConfig` to create the core model. Returns: A keras model. """ albert_encoder = networks.AlbertEncoder( vocab_size=cfg.vocab_size, hidden_size=cfg.hidden_size, embedding_width=cfg.embedding_size, num_layers=cfg.num_hidden_layers, num_attention_heads=cfg.num_attention_heads, intermediate_size=cfg.intermediate_size, activation=tf_utils.get_activation(cfg.hidden_act), dropout_rate=cfg.hidden_dropout_prob, attention_dropout_rate=cfg.attention_probs_dropout_prob, max_sequence_length=cfg.max_position_embeddings, type_vocab_size=cfg.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=cfg.initializer_range)) return albert_encoder def _create_pretrainer_model(cfg): """Creates a pretrainer with AlbertEncoder from ALBERT configuration. Args: cfg: A `BertConfig` to create the core model. Returns: A BertPretrainerV2 model. """ albert_encoder = _create_albert_model(cfg) pretrainer = models.BertPretrainerV2( encoder_network=albert_encoder, mlm_activation=tf_utils.get_activation(cfg.hidden_act), mlm_initializer=tf.keras.initializers.TruncatedNormal( stddev=cfg.initializer_range)) # Makes sure masked_lm layer's variables in pretrainer are created. _ = pretrainer(pretrainer.inputs) return pretrainer def convert_checkpoint(bert_config, output_path, v1_checkpoint, checkpoint_model_name, converted_model="encoder"): """Converts a V1 checkpoint into an OO V2 checkpoint.""" output_dir, _ = os.path.split(output_path) # Create a temporary V1 name-converted checkpoint in the output directory. temporary_checkpoint_dir = os.path.join(output_dir, "temp_v1") temporary_checkpoint = os.path.join(temporary_checkpoint_dir, "ckpt") tf1_bert_checkpoint_converter_lib.convert( checkpoint_from_path=v1_checkpoint, checkpoint_to_path=temporary_checkpoint, num_heads=bert_config.num_attention_heads, name_replacements=ALBERT_NAME_REPLACEMENTS, permutations=tf1_bert_checkpoint_converter_lib.BERT_V2_PERMUTATIONS, exclude_patterns=["adam", "Adam"]) # Create a V2 checkpoint from the temporary checkpoint. if converted_model == "encoder": model = _create_albert_model(bert_config) elif converted_model == "pretrainer": model = _create_pretrainer_model(bert_config) else: raise ValueError("Unsupported converted_model: %s" % converted_model) tf1_bert_checkpoint_converter_lib.create_v2_checkpoint( model, temporary_checkpoint, output_path, checkpoint_model_name) # Clean up the temporary checkpoint, if it exists. try: tf.io.gfile.rmtree(temporary_checkpoint_dir) except tf.errors.OpError: # If it doesn't exist, we don't need to clean it up; continue. pass def main(_): output_path = FLAGS.converted_checkpoint_path v1_checkpoint = FLAGS.checkpoint_to_convert checkpoint_model_name = FLAGS.checkpoint_model_name converted_model = FLAGS.converted_model albert_config = configs.AlbertConfig.from_json_file(FLAGS.albert_config_file) convert_checkpoint(albert_config, output_path, v1_checkpoint, checkpoint_model_name, converted_model=converted_model) if __name__ == "__main__": app.run(main)
6,676
38.046784
80
py
models
models-master/official/nlp/finetuning/binary_helper.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The helper for finetuning binaries.""" import json import math import sys from typing import Any, Dict, List, Optional from absl import logging import tensorflow as tf from official.core import config_definitions as cfg from official.modeling import hyperparams from official.nlp.configs import encoders from official.nlp.data import question_answering_dataloader from official.nlp.data import sentence_prediction_dataloader from official.nlp.data import tagging_dataloader from official.nlp.tasks import question_answering from official.nlp.tasks import sentence_prediction from official.nlp.tasks import tagging def override_trainer_cfg(trainer_cfg: cfg.TrainerConfig, learning_rate: float, num_epoch: int, global_batch_size: int, warmup_ratio: float, training_data_size: int, eval_data_size: int, num_eval_per_epoch: int, best_checkpoint_export_subdir: str, best_checkpoint_eval_metric: str, best_checkpoint_metric_comp: str): """Overrides a `cfg.TrainerConfig` object.""" steps_per_epoch = training_data_size // global_batch_size train_steps = steps_per_epoch * num_epoch # TODO(b/165081095): always set to -1 after the bug is resolved. if eval_data_size: eval_steps = int(math.ceil(eval_data_size / global_batch_size)) else: eval_steps = -1 # exhaust the validation data. warmp_steps = int(train_steps * warmup_ratio) validation_interval = steps_per_epoch // num_eval_per_epoch trainer_cfg.override({ 'optimizer_config': { 'learning_rate': { 'type': 'polynomial', 'polynomial': { 'decay_steps': train_steps, 'initial_learning_rate': learning_rate, 'end_learning_rate': 0, } }, 'optimizer': { 'type': 'adamw', }, 'warmup': { 'polynomial': { 'warmup_steps': warmp_steps, }, 'type': 'polynomial', }, }, 'train_steps': train_steps, 'validation_interval': validation_interval, 'validation_steps': eval_steps, 'best_checkpoint_export_subdir': best_checkpoint_export_subdir, 'best_checkpoint_eval_metric': best_checkpoint_eval_metric, 'best_checkpoint_metric_comp': best_checkpoint_metric_comp, }) def load_model_config_file(model_config_file: str) -> Dict[str, Any]: """Loads bert config json file or `encoders.EncoderConfig` in yaml file.""" if not model_config_file: # model_config_file may be empty when using tf.hub. return {} try: encoder_config = encoders.EncoderConfig() encoder_config = hyperparams.override_params_dict( encoder_config, model_config_file, is_strict=True) logging.info('Load encoder_config yaml file from %s.', model_config_file) return encoder_config.as_dict() except KeyError: pass logging.info('Load bert config json file from %s', model_config_file) with tf.io.gfile.GFile(model_config_file, 'r') as reader: text = reader.read() config = json.loads(text) def get_value(key1, key2): if key1 in config and key2 in config: raise ValueError('Unexpected that both %s and %s are in config.' % (key1, key2)) return config[key1] if key1 in config else config[key2] def get_value_or_none(key): return config[key] if key in config else None # Support both legacy bert_config attributes and the new config attributes. return { 'bert': { 'attention_dropout_rate': get_value('attention_dropout_rate', 'attention_probs_dropout_prob'), 'dropout_rate': get_value('dropout_rate', 'hidden_dropout_prob'), 'hidden_activation': get_value('hidden_activation', 'hidden_act'), 'hidden_size': config['hidden_size'], 'embedding_size': get_value_or_none('embedding_size'), 'initializer_range': config['initializer_range'], 'intermediate_size': config['intermediate_size'], 'max_position_embeddings': config['max_position_embeddings'], 'num_attention_heads': config['num_attention_heads'], 'num_layers': get_value('num_layers', 'num_hidden_layers'), 'type_vocab_size': config['type_vocab_size'], 'vocab_size': config['vocab_size'], } } def override_sentence_prediction_task_config( task_cfg: sentence_prediction.SentencePredictionConfig, model_config_file: str, init_checkpoint: str, hub_module_url: str, global_batch_size: int, train_input_path: str, validation_input_path: str, seq_length: int, num_classes: int, metric_type: Optional[str] = 'accuracy', label_type: Optional[str] = 'int'): """Overrides a `SentencePredictionConfig` object.""" task_cfg.override({ 'init_checkpoint': init_checkpoint, 'metric_type': metric_type, 'model': { 'num_classes': num_classes, 'encoder': load_model_config_file(model_config_file), }, 'hub_module_url': hub_module_url, 'train_data': { 'drop_remainder': True, 'global_batch_size': global_batch_size, 'input_path': train_input_path, 'is_training': True, 'seq_length': seq_length, 'label_type': label_type, }, 'validation_data': { 'drop_remainder': False, 'global_batch_size': global_batch_size, 'input_path': validation_input_path, 'is_training': False, 'seq_length': seq_length, 'label_type': label_type, } }) def override_qa_task_config( task_cfg: question_answering.QuestionAnsweringConfig, model_config_file: str, init_checkpoint: str, hub_module_url: str, global_batch_size: int, train_input_path: str, validation_input_path: str, seq_length: int, tokenization: str, vocab_file: str, do_lower_case: bool, version_2_with_negative: bool): """Overrides a `QuestionAnsweringConfig` object.""" task_cfg.override({ 'init_checkpoint': init_checkpoint, 'model': { 'encoder': load_model_config_file(model_config_file), }, 'hub_module_url': hub_module_url, 'train_data': { 'drop_remainder': True, 'global_batch_size': global_batch_size, 'input_path': train_input_path, 'is_training': True, 'seq_length': seq_length, }, 'validation_data': { 'do_lower_case': do_lower_case, 'drop_remainder': False, 'global_batch_size': global_batch_size, 'input_path': validation_input_path, 'is_training': False, 'seq_length': seq_length, 'tokenization': tokenization, 'version_2_with_negative': version_2_with_negative, 'vocab_file': vocab_file, } }) def override_tagging_task_config(task_cfg: tagging.TaggingConfig, model_config_file: str, init_checkpoint: str, hub_module_url: str, global_batch_size: int, train_input_path: str, validation_input_path: str, seq_length: int, class_names: List[str]): """Overrides a `TaggingConfig` object.""" task_cfg.override({ 'init_checkpoint': init_checkpoint, 'model': { 'encoder': load_model_config_file(model_config_file), }, 'hub_module_url': hub_module_url, 'train_data': { 'drop_remainder': True, 'global_batch_size': global_batch_size, 'input_path': train_input_path, 'is_training': True, 'seq_length': seq_length, }, 'validation_data': { 'drop_remainder': False, 'global_batch_size': global_batch_size, 'input_path': validation_input_path, 'is_training': False, 'seq_length': seq_length, }, 'class_names': class_names, }) def write_glue_classification(task, model, input_file, output_file, predict_batch_size, seq_length, class_names, label_type='int', min_float_value=None, max_float_value=None): """Makes classification predictions for glue and writes to output file. Args: task: `Task` instance. model: `keras.Model` instance. input_file: Input test data file path. output_file: Output test data file path. predict_batch_size: Batch size for prediction. seq_length: Input sequence length. class_names: List of string class names. label_type: String denoting label type ('int', 'float'), defaults to 'int'. min_float_value: If set, predictions will be min-clipped to this value (only for regression when `label_type` is set to 'float'). Defaults to `None` (no clipping). max_float_value: If set, predictions will be max-clipped to this value (only for regression when `label_type` is set to 'float'). Defaults to `None` (no clipping). """ if label_type not in ('int', 'float'): raise ValueError('Unsupported `label_type`. Given: %s, expected `int` or ' '`float`.' % label_type) data_config = sentence_prediction_dataloader.SentencePredictionDataConfig( input_path=input_file, global_batch_size=predict_batch_size, is_training=False, seq_length=seq_length, label_type=label_type, drop_remainder=False, include_example_id=True) predictions = sentence_prediction.predict(task, data_config, model) if label_type == 'float': min_float_value = (-sys.float_info.max if min_float_value is None else min_float_value) max_float_value = ( sys.float_info.max if max_float_value is None else max_float_value) # Clip predictions to range [min_float_value, max_float_value]. predictions = [ min(max(prediction, min_float_value), max_float_value) for prediction in predictions ] with tf.io.gfile.GFile(output_file, 'w') as writer: writer.write('index\tprediction\n') for index, prediction in enumerate(predictions): if label_type == 'float': # Regression. writer.write('%d\t%.3f\n' % (index, prediction)) else: # Classification. writer.write('%d\t%s\n' % (index, class_names[prediction])) def write_superglue_classification(task, model, input_file, output_file, predict_batch_size, seq_length, class_names, label_type='int'): """Makes classification predictions for superglue and writes to output file. Args: task: `Task` instance. model: `keras.Model` instance. input_file: Input test data file path. output_file: Output test data file path. predict_batch_size: Batch size for prediction. seq_length: Input sequence length. class_names: List of string class names. label_type: String denoting label type ('int', 'float'), defaults to 'int'. """ if label_type not in 'int': raise ValueError('Unsupported `label_type`. Given: %s, expected `int` or ' '`float`.' % label_type) data_config = sentence_prediction_dataloader.SentencePredictionDataConfig( input_path=input_file, global_batch_size=predict_batch_size, is_training=False, seq_length=seq_length, label_type=label_type, drop_remainder=False, include_example_id=True) predictions = sentence_prediction.predict(task, data_config, model) with tf.io.gfile.GFile(output_file, 'w') as writer: for index, prediction in enumerate(predictions): if label_type == 'int': # Classification. writer.write('{"idx": %d, "label": %s}\n' % (index, class_names[prediction])) def write_xtreme_classification(task, model, input_file, output_file, predict_batch_size, seq_length, class_names, translated_input_file=None, test_time_aug_wgt=0.3): """Makes classification predictions for xtreme and writes to output file.""" data_config = sentence_prediction_dataloader.SentencePredictionDataConfig( input_path=input_file, seq_length=seq_length, is_training=False, label_type='int', global_batch_size=predict_batch_size, drop_remainder=False, include_example_id=True) if translated_input_file is not None: data_config_aug = ( sentence_prediction_dataloader.SentencePredictionDataConfig( input_path=translated_input_file, seq_length=seq_length, is_training=False, label_type='int', global_batch_size=predict_batch_size, drop_remainder=False, include_example_id=True)) else: data_config_aug = None predictions = sentence_prediction.predict(task, data_config, model, data_config_aug, test_time_aug_wgt) with tf.io.gfile.GFile(output_file, 'w') as writer: for prediction in predictions: writer.write('%s\n' % class_names[prediction]) def write_question_answering(task, model, input_file, output_file, predict_batch_size, seq_length, tokenization, vocab_file, do_lower_case, version_2_with_negative=False): """Makes question answering predictions and writes to output file.""" data_config = question_answering_dataloader.QADataConfig( do_lower_case=do_lower_case, doc_stride=128, drop_remainder=False, global_batch_size=predict_batch_size, input_path=input_file, is_training=False, query_length=64, seq_length=seq_length, tokenization=tokenization, version_2_with_negative=version_2_with_negative, vocab_file=vocab_file) all_predictions, _, _ = question_answering.predict(task, data_config, model) with tf.io.gfile.GFile(output_file, 'w') as writer: writer.write(json.dumps(all_predictions, indent=4) + '\n') def write_tagging(task, model, input_file, output_file, predict_batch_size, seq_length): """Makes tagging predictions and writes to output file.""" data_config = tagging_dataloader.TaggingDataConfig( input_path=input_file, is_training=False, seq_length=seq_length, global_batch_size=predict_batch_size, drop_remainder=False, include_sentence_id=True) results = tagging.predict(task, data_config, model) class_names = task.task_config.class_names last_sentence_id = -1 with tf.io.gfile.GFile(output_file, 'w') as writer: for sentence_id, _, predict_ids in results: token_labels = [class_names[x] for x in predict_ids] assert sentence_id == last_sentence_id or ( sentence_id == last_sentence_id + 1) if sentence_id != last_sentence_id and last_sentence_id != -1: writer.write('\n') writer.write('\n'.join(token_labels)) writer.write('\n') last_sentence_id = sentence_id
16,761
36.58296
80
py
models
models-master/official/nlp/serving/export_savedmodel_util.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common library to export a SavedModel from the export module.""" from typing import Dict, List, Optional, Union, Any import tensorflow as tf from official.core import export_base get_timestamped_export_dir = export_base.get_timestamped_export_dir def export(export_module: export_base.ExportModule, function_keys: Union[List[str], Dict[str, str]], export_savedmodel_dir: str, checkpoint_path: Optional[str] = None, timestamped: bool = True, module_key: Optional[str] = None, checkpoint_kwargs: Optional[Dict[str, Any]] = None) -> str: """Exports to SavedModel format. Args: export_module: a ExportModule with the keras Model and serving tf.functions. function_keys: a list of string keys to retrieve pre-defined serving signatures. The signaute keys will be set with defaults. If a dictionary is provided, the values will be used as signature keys. export_savedmodel_dir: Output saved model directory. checkpoint_path: Object-based checkpoint path or directory. timestamped: Whether to export the savedmodel to a timestamped directory. module_key: Optional string to identify a checkpoint object to load for the model in the export module. checkpoint_kwargs: Optional dict used as keyword args to create the checkpoint object. Not used if module_key is present. Returns: The savedmodel directory path. """ save_options = tf.saved_model.SaveOptions(function_aliases={ 'tpu_candidate': export_module.serve, }) if module_key: kwargs = {module_key: export_module.model} checkpoint = tf.train.Checkpoint(**kwargs) elif checkpoint_kwargs: checkpoint = tf.train.Checkpoint(**checkpoint_kwargs) else: checkpoint = None return export_base.export( export_module, function_keys, export_savedmodel_dir, checkpoint_path, timestamped, save_options, checkpoint=checkpoint)
2,583
37
80
py
models
models-master/official/nlp/serving/serving_modules.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Serving export modules for TF Model Garden NLP models.""" # pylint:disable=missing-class-docstring import dataclasses from typing import Dict, List, Optional, Text import tensorflow as tf import tensorflow_text as tf_text from official.core import export_base from official.modeling.hyperparams import base_config from official.nlp.data import sentence_prediction_dataloader def features_to_int32(features: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]: """Converts tf.int64 features to tf.int32, keep other features the same. tf.Example only supports tf.int64, but the TPU only supports tf.int32. Args: features: Input tensor dictionary. Returns: Features with tf.int64 converted to tf.int32. """ converted_features = {} for name, tensor in features.items(): if tensor.dtype == tf.int64: converted_features[name] = tf.cast(tensor, tf.int32) else: converted_features[name] = tensor return converted_features class SentencePrediction(export_base.ExportModule): """The export module for the sentence prediction task.""" @dataclasses.dataclass class Params(base_config.Config): inputs_only: bool = True parse_sequence_length: Optional[int] = None use_v2_feature_names: bool = True # For text input processing. text_fields: Optional[List[str]] = None # Either specify these values for preprocessing by Python code... tokenization: str = "WordPiece" # WordPiece or SentencePiece # Text vocab file if tokenization is WordPiece, or sentencepiece.ModelProto # file if tokenization is SentencePiece. vocab_file: str = "" lower_case: bool = True # ...or load preprocessing from a SavedModel at this location. preprocessing_hub_module_url: str = "" def __init__(self, params, model: tf.keras.Model, inference_step=None): super().__init__(params, model, inference_step) if params.use_v2_feature_names: self.input_word_ids_field = "input_word_ids" self.input_type_ids_field = "input_type_ids" else: self.input_word_ids_field = "input_ids" self.input_type_ids_field = "segment_ids" if params.text_fields: self._text_processor = sentence_prediction_dataloader.TextProcessor( seq_length=params.parse_sequence_length, vocab_file=params.vocab_file, tokenization=params.tokenization, lower_case=params.lower_case, preprocessing_hub_module_url=params.preprocessing_hub_module_url) def _serve_tokenized_input(self, input_word_ids, input_mask=None, input_type_ids=None) -> tf.Tensor: if input_type_ids is None: # Requires CLS token is the first token of inputs. input_type_ids = tf.zeros_like(input_word_ids) if input_mask is None: # The mask has 1 for real tokens and 0 for padding tokens. input_mask = tf.where( tf.equal(input_word_ids, 0), tf.zeros_like(input_word_ids), tf.ones_like(input_word_ids)) inputs = dict( input_word_ids=input_word_ids, input_mask=input_mask, input_type_ids=input_type_ids) return self.inference_step(inputs) @tf.function def serve(self, input_word_ids, input_mask=None, input_type_ids=None) -> Dict[str, tf.Tensor]: return dict( outputs=self._serve_tokenized_input(input_word_ids, input_mask, input_type_ids)) @tf.function def serve_probability(self, input_word_ids, input_mask=None, input_type_ids=None) -> Dict[str, tf.Tensor]: return dict( outputs=tf.nn.softmax( self._serve_tokenized_input(input_word_ids, input_mask, input_type_ids))) @tf.function def serve_examples(self, inputs) -> Dict[str, tf.Tensor]: sequence_length = self.params.parse_sequence_length inputs_only = self.params.inputs_only name_to_features = { self.input_word_ids_field: tf.io.FixedLenFeature([sequence_length], tf.int64), } if not inputs_only: name_to_features.update({ "input_mask": tf.io.FixedLenFeature([sequence_length], tf.int64), self.input_type_ids_field: tf.io.FixedLenFeature([sequence_length], tf.int64) }) features = tf.io.parse_example(inputs, name_to_features) features = features_to_int32(features) return self.serve( features[self.input_word_ids_field], input_mask=None if inputs_only else features["input_mask"], input_type_ids=None if inputs_only else features[self.input_type_ids_field]) @tf.function def serve_text_examples(self, inputs) -> Dict[str, tf.Tensor]: name_to_features = {} for text_field in self.params.text_fields: name_to_features[text_field] = tf.io.FixedLenFeature([], tf.string) features = tf.io.parse_example(inputs, name_to_features) segments = [features[x] for x in self.params.text_fields] model_inputs = self._text_processor(segments) if self.params.inputs_only: return self.serve(input_word_ids=model_inputs["input_word_ids"]) return self.serve(**model_inputs) def get_inference_signatures(self, function_keys: Dict[Text, Text]): signatures = {} valid_keys = ("serve", "serve_examples", "serve_text_examples") for func_key, signature_key in function_keys.items(): if func_key not in valid_keys: raise ValueError("Invalid function key for the module: %s with key %s. " "Valid keys are: %s" % (self.__class__, func_key, valid_keys)) if func_key == "serve": if self.params.inputs_only: signatures[signature_key] = self.serve.get_concrete_function( input_word_ids=tf.TensorSpec( shape=[None, None], dtype=tf.int32, name="input_word_ids")) else: signatures[signature_key] = self.serve.get_concrete_function( input_word_ids=tf.TensorSpec( shape=[None, None], dtype=tf.int32, name="input_word_ids"), input_mask=tf.TensorSpec( shape=[None, None], dtype=tf.int32, name="input_mask"), input_type_ids=tf.TensorSpec( shape=[None, None], dtype=tf.int32, name="input_type_ids")) if func_key == "serve_examples": signatures[signature_key] = self.serve_examples.get_concrete_function( tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")) if func_key == "serve_text_examples": signatures[ signature_key] = self.serve_text_examples.get_concrete_function( tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")) return signatures class MaskedLM(export_base.ExportModule): """The export module for the Bert Pretrain (MaskedLM) task.""" def __init__(self, params, model: tf.keras.Model, inference_step=None): super().__init__(params, model, inference_step) if params.use_v2_feature_names: self.input_word_ids_field = "input_word_ids" self.input_type_ids_field = "input_type_ids" else: self.input_word_ids_field = "input_ids" self.input_type_ids_field = "segment_ids" @dataclasses.dataclass class Params(base_config.Config): cls_head_name: str = "next_sentence" use_v2_feature_names: bool = True parse_sequence_length: Optional[int] = None max_predictions_per_seq: Optional[int] = None @tf.function def serve(self, input_word_ids, input_mask, input_type_ids, masked_lm_positions) -> Dict[str, tf.Tensor]: inputs = dict( input_word_ids=input_word_ids, input_mask=input_mask, input_type_ids=input_type_ids, masked_lm_positions=masked_lm_positions) outputs = self.inference_step(inputs) return dict(classification=outputs[self.params.cls_head_name]) @tf.function def serve_examples(self, inputs) -> Dict[str, tf.Tensor]: sequence_length = self.params.parse_sequence_length max_predictions_per_seq = self.params.max_predictions_per_seq name_to_features = { self.input_word_ids_field: tf.io.FixedLenFeature([sequence_length], tf.int64), "input_mask": tf.io.FixedLenFeature([sequence_length], tf.int64), self.input_type_ids_field: tf.io.FixedLenFeature([sequence_length], tf.int64), "masked_lm_positions": tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64) } features = tf.io.parse_example(inputs, name_to_features) features = features_to_int32(features) return self.serve( input_word_ids=features[self.input_word_ids_field], input_mask=features["input_mask"], input_type_ids=features[self.input_word_ids_field], masked_lm_positions=features["masked_lm_positions"]) def get_inference_signatures(self, function_keys: Dict[Text, Text]): signatures = {} valid_keys = ("serve", "serve_examples") for func_key, signature_key in function_keys.items(): if func_key not in valid_keys: raise ValueError("Invalid function key for the module: %s with key %s. " "Valid keys are: %s" % (self.__class__, func_key, valid_keys)) if func_key == "serve": signatures[signature_key] = self.serve.get_concrete_function( input_word_ids=tf.TensorSpec( shape=[None, None], dtype=tf.int32, name="input_word_ids"), input_mask=tf.TensorSpec( shape=[None, None], dtype=tf.int32, name="input_mask"), input_type_ids=tf.TensorSpec( shape=[None, None], dtype=tf.int32, name="input_type_ids"), masked_lm_positions=tf.TensorSpec( shape=[None, None], dtype=tf.int32, name="masked_lm_positions")) if func_key == "serve_examples": signatures[signature_key] = self.serve_examples.get_concrete_function( tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")) return signatures class QuestionAnswering(export_base.ExportModule): """The export module for the question answering task.""" @dataclasses.dataclass class Params(base_config.Config): parse_sequence_length: Optional[int] = None use_v2_feature_names: bool = True def __init__(self, params, model: tf.keras.Model, inference_step=None): super().__init__(params, model, inference_step) if params.use_v2_feature_names: self.input_word_ids_field = "input_word_ids" self.input_type_ids_field = "input_type_ids" else: self.input_word_ids_field = "input_ids" self.input_type_ids_field = "segment_ids" @tf.function def serve(self, input_word_ids, input_mask=None, input_type_ids=None) -> Dict[str, tf.Tensor]: if input_mask is None: # The mask has 1 for real tokens and 0 for padding tokens. input_mask = tf.where( tf.equal(input_word_ids, 0), tf.zeros_like(input_word_ids), tf.ones_like(input_word_ids)) inputs = dict( input_word_ids=input_word_ids, input_mask=input_mask, input_type_ids=input_type_ids) outputs = self.inference_step(inputs) return dict(start_logits=outputs[0], end_logits=outputs[1]) @tf.function def serve_examples(self, inputs) -> Dict[str, tf.Tensor]: sequence_length = self.params.parse_sequence_length name_to_features = { self.input_word_ids_field: tf.io.FixedLenFeature([sequence_length], tf.int64), "input_mask": tf.io.FixedLenFeature([sequence_length], tf.int64), self.input_type_ids_field: tf.io.FixedLenFeature([sequence_length], tf.int64) } features = tf.io.parse_example(inputs, name_to_features) features = features_to_int32(features) return self.serve( input_word_ids=features[self.input_word_ids_field], input_mask=features["input_mask"], input_type_ids=features[self.input_type_ids_field]) def get_inference_signatures(self, function_keys: Dict[Text, Text]): signatures = {} valid_keys = ("serve", "serve_examples") for func_key, signature_key in function_keys.items(): if func_key not in valid_keys: raise ValueError("Invalid function key for the module: %s with key %s. " "Valid keys are: %s" % (self.__class__, func_key, valid_keys)) if func_key == "serve": signatures[signature_key] = self.serve.get_concrete_function( input_word_ids=tf.TensorSpec( shape=[None, None], dtype=tf.int32, name="input_word_ids"), input_mask=tf.TensorSpec( shape=[None, None], dtype=tf.int32, name="input_mask"), input_type_ids=tf.TensorSpec( shape=[None, None], dtype=tf.int32, name="input_type_ids")) if func_key == "serve_examples": signatures[signature_key] = self.serve_examples.get_concrete_function( tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")) return signatures class Tagging(export_base.ExportModule): """The export module for the tagging task.""" @dataclasses.dataclass class Params(base_config.Config): parse_sequence_length: Optional[int] = None use_v2_feature_names: bool = True output_encoder_outputs: bool = False def __init__(self, params, model: tf.keras.Model, inference_step=None): super().__init__(params, model, inference_step) if params.use_v2_feature_names: self.input_word_ids_field = "input_word_ids" self.input_type_ids_field = "input_type_ids" else: self.input_word_ids_field = "input_ids" self.input_type_ids_field = "segment_ids" @tf.function def serve(self, input_word_ids, input_mask, input_type_ids) -> Dict[str, tf.Tensor]: inputs = dict( input_word_ids=input_word_ids, input_mask=input_mask, input_type_ids=input_type_ids) outputs = self.inference_step(inputs) if self.params.output_encoder_outputs: return dict( logits=outputs["logits"], encoder_outputs=outputs["encoder_outputs"]) else: return dict(logits=outputs["logits"]) @tf.function def serve_examples(self, inputs) -> Dict[str, tf.Tensor]: sequence_length = self.params.parse_sequence_length name_to_features = { self.input_word_ids_field: tf.io.FixedLenFeature([sequence_length], tf.int64), "input_mask": tf.io.FixedLenFeature([sequence_length], tf.int64), self.input_type_ids_field: tf.io.FixedLenFeature([sequence_length], tf.int64) } features = tf.io.parse_example(inputs, name_to_features) features = features_to_int32(features) return self.serve( input_word_ids=features[self.input_word_ids_field], input_mask=features["input_mask"], input_type_ids=features[self.input_type_ids_field]) def get_inference_signatures(self, function_keys: Dict[Text, Text]): signatures = {} valid_keys = ("serve", "serve_examples") for func_key, signature_key in function_keys.items(): if func_key not in valid_keys: raise ValueError("Invalid function key for the module: %s with key %s. " "Valid keys are: %s" % (self.__class__, func_key, valid_keys)) if func_key == "serve": signatures[signature_key] = self.serve.get_concrete_function( input_word_ids=tf.TensorSpec( shape=[None, None], dtype=tf.int32, name=self.input_word_ids_field), input_mask=tf.TensorSpec( shape=[None, None], dtype=tf.int32, name="input_mask"), input_type_ids=tf.TensorSpec( shape=[None, None], dtype=tf.int32, name=self.input_type_ids_field)) if func_key == "serve_examples": signatures[signature_key] = self.serve_examples.get_concrete_function( tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")) return signatures class Translation(export_base.ExportModule): """The export module for the translation task.""" @dataclasses.dataclass class Params(base_config.Config): sentencepiece_model_path: str = "" # Needs to be specified if padded_decode is True/on TPUs. batch_size: Optional[int] = None def __init__(self, params, model: tf.keras.Model, inference_step=None): super().__init__(params, model, inference_step) self._sp_tokenizer = tf_text.SentencepieceTokenizer( model=tf.io.gfile.GFile(params.sentencepiece_model_path, "rb").read(), add_eos=True) try: empty_str_tokenized = self._sp_tokenizer.tokenize("").numpy() except tf.errors.InternalError: raise ValueError( "EOS token not in tokenizer vocab." "Please make sure the tokenizer generates a single token for an " "empty string.") self._eos_id = empty_str_tokenized.item() self._batch_size = params.batch_size @tf.function def serve(self, inputs) -> Dict[str, tf.Tensor]: return self.inference_step(inputs) @tf.function def serve_text(self, text: tf.Tensor) -> Dict[str, tf.Tensor]: tokenized = self._sp_tokenizer.tokenize(text).to_tensor(0) return self._sp_tokenizer.detokenize( self.serve({"inputs": tokenized})["outputs"]) def get_inference_signatures(self, function_keys: Dict[Text, Text]): signatures = {} valid_keys = ("serve_text") for func_key, signature_key in function_keys.items(): if func_key not in valid_keys: raise ValueError("Invalid function key for the module: %s with key %s. " "Valid keys are: %s" % (self.__class__, func_key, valid_keys)) if func_key == "serve_text": signatures[signature_key] = self.serve_text.get_concrete_function( tf.TensorSpec(shape=[self._batch_size], dtype=tf.string, name="text")) return signatures
18,889
39.976139
80
py
models
models-master/official/nlp/configs/encoders.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Transformer Encoders. Includes configurations and factory methods. """ import dataclasses from typing import Optional, Sequence import gin import tensorflow as tf from official.modeling import hyperparams from official.modeling import tf_utils from official.nlp.modeling import layers from official.nlp.modeling import networks from official.projects.bigbird import encoder as bigbird_encoder @dataclasses.dataclass class BertEncoderConfig(hyperparams.Config): """BERT encoder configuration.""" vocab_size: int = 30522 hidden_size: int = 768 num_layers: int = 12 num_attention_heads: int = 12 hidden_activation: str = "gelu" intermediate_size: int = 3072 dropout_rate: float = 0.1 attention_dropout_rate: float = 0.1 max_position_embeddings: int = 512 type_vocab_size: int = 2 initializer_range: float = 0.02 embedding_size: Optional[int] = None output_range: Optional[int] = None return_all_encoder_outputs: bool = False return_attention_scores: bool = False # Pre/Post-LN Transformer norm_first: bool = False @dataclasses.dataclass class MobileBertEncoderConfig(hyperparams.Config): """MobileBERT encoder configuration. Attributes: word_vocab_size: number of words in the vocabulary. word_embed_size: word embedding size. type_vocab_size: number of word types. max_sequence_length: maximum length of input sequence. num_blocks: number of transformer block in the encoder model. hidden_size: the hidden size for the transformer block. num_attention_heads: number of attention heads in the transformer block. intermediate_size: the size of the "intermediate" (a.k.a., feed forward) layer. hidden_activation: the non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: dropout probability for the hidden layers. attention_probs_dropout_prob: dropout probability of the attention probabilities. intra_bottleneck_size: the size of bottleneck. initializer_range: The stddev of the truncated_normal_initializer for initializing all weight matrices. use_bottleneck_attention: Use attention inputs from the bottleneck transformation. If true, the following `key_query_shared_bottleneck` will be ignored. key_query_shared_bottleneck: whether to share linear transformation for keys and queries. num_feedforward_networks: number of stacked feed-forward networks. normalization_type: the type of normalization_type, only 'no_norm' and 'layer_norm' are supported. 'no_norm' represents the element-wise linear transformation for the student model, as suggested by the original MobileBERT paper. 'layer_norm' is used for the teacher model. classifier_activation: if using the tanh activation for the final representation of the [CLS] token in fine-tuning. """ word_vocab_size: int = 30522 word_embed_size: int = 128 type_vocab_size: int = 2 max_sequence_length: int = 512 num_blocks: int = 24 hidden_size: int = 512 num_attention_heads: int = 4 intermediate_size: int = 4096 hidden_activation: str = "gelu" hidden_dropout_prob: float = 0.1 attention_probs_dropout_prob: float = 0.1 intra_bottleneck_size: int = 1024 initializer_range: float = 0.02 use_bottleneck_attention: bool = False key_query_shared_bottleneck: bool = False num_feedforward_networks: int = 1 normalization_type: str = "layer_norm" classifier_activation: bool = True input_mask_dtype: str = "int32" @dataclasses.dataclass class AlbertEncoderConfig(hyperparams.Config): """ALBERT encoder configuration.""" vocab_size: int = 30000 embedding_width: int = 128 hidden_size: int = 768 num_layers: int = 12 num_attention_heads: int = 12 hidden_activation: str = "gelu" intermediate_size: int = 3072 dropout_rate: float = 0.0 attention_dropout_rate: float = 0.0 max_position_embeddings: int = 512 type_vocab_size: int = 2 initializer_range: float = 0.02 @dataclasses.dataclass class BigBirdEncoderConfig(hyperparams.Config): """BigBird encoder configuration.""" vocab_size: int = 50358 hidden_size: int = 768 num_layers: int = 12 num_attention_heads: int = 12 hidden_activation: str = "gelu" intermediate_size: int = 3072 dropout_rate: float = 0.1 attention_dropout_rate: float = 0.1 # Pre/Post-LN Transformer norm_first: bool = False max_position_embeddings: int = 4096 num_rand_blocks: int = 3 block_size: int = 64 type_vocab_size: int = 16 initializer_range: float = 0.02 embedding_width: Optional[int] = None use_gradient_checkpointing: bool = False @dataclasses.dataclass class KernelEncoderConfig(hyperparams.Config): """Linear encoder configuration.""" vocab_size: int = 30522 hidden_size: int = 768 num_layers: int = 12 num_attention_heads: int = 12 hidden_activation: str = "gelu" intermediate_size: int = 3072 dropout_rate: float = 0.1 attention_dropout_rate: float = 0.1 # Pre/Post-LN Transformer norm_first: bool = False max_position_embeddings: int = 512 type_vocab_size: int = 2 initializer_range: float = 0.02 embedding_size: Optional[int] = None feature_transform: str = "exp" num_random_features: int = 256 redraw: bool = False is_short_seq: bool = False begin_kernel: int = 0 scale: Optional[float] = None @dataclasses.dataclass class ReuseEncoderConfig(hyperparams.Config): """Reuse encoder configuration.""" vocab_size: int = 30522 hidden_size: int = 768 num_layers: int = 12 num_attention_heads: int = 12 hidden_activation: str = "gelu" intermediate_size: int = 3072 dropout_rate: float = 0.1 attention_dropout_rate: float = 0.1 max_position_embeddings: int = 512 type_vocab_size: int = 2 initializer_range: float = 0.02 embedding_size: Optional[int] = None output_range: Optional[int] = None return_all_encoder_outputs: bool = False # Pre/Post-LN Transformer norm_first: bool = False # Reuse transformer reuse_attention: int = -1 use_relative_pe: bool = False pe_max_seq_length: int = 512 max_reuse_layer_idx: int = 6 @dataclasses.dataclass class XLNetEncoderConfig(hyperparams.Config): """XLNet encoder configuration.""" vocab_size: int = 32000 num_layers: int = 24 hidden_size: int = 1024 num_attention_heads: int = 16 head_size: int = 64 inner_size: int = 4096 inner_activation: str = "gelu" dropout_rate: float = 0.1 attention_dropout_rate: float = 0.1 attention_type: str = "bi" bi_data: bool = False tie_attention_biases: bool = False memory_length: int = 0 same_length: bool = False clamp_length: int = -1 reuse_length: int = 0 use_cls_mask: bool = False embedding_width: int = 1024 initializer_range: float = 0.02 two_stream: bool = False @dataclasses.dataclass class QueryBertConfig(hyperparams.Config): """Query BERT encoder configuration.""" vocab_size: int = 30522 hidden_size: int = 768 num_layers: int = 12 num_attention_heads: int = 12 hidden_activation: str = "gelu" intermediate_size: int = 3072 dropout_rate: float = 0.1 attention_dropout_rate: float = 0.1 max_position_embeddings: int = 512 type_vocab_size: int = 2 initializer_range: float = 0.02 embedding_size: Optional[int] = None output_range: Optional[int] = None return_all_encoder_outputs: bool = False return_attention_scores: bool = False # Pre/Post-LN Transformer norm_first: bool = False @dataclasses.dataclass class FNetEncoderConfig(hyperparams.Config): """FNet encoder configuration.""" vocab_size: int = 30522 hidden_size: int = 768 num_layers: int = 12 num_attention_heads: int = 12 inner_activation: str = "gelu" inner_dim: int = 3072 output_dropout: float = 0.1 attention_dropout: float = 0.1 max_sequence_length: int = 512 type_vocab_size: int = 2 initializer_range: float = 0.02 embedding_width: Optional[int] = None output_range: Optional[int] = None norm_first: bool = False use_fft: bool = False attention_layers: Sequence[int] = () @dataclasses.dataclass class SparseMixerEncoderConfig(hyperparams.Config): """SparseMixer encoder configuration.""" vocab_size: int = 30522 hidden_size: int = 768 num_layers: int = 14 moe_layers: Sequence[int] = (5, 6, 7, 8) attention_layers: Sequence[int] = (10, 11, 12, 13) num_experts: int = 16 train_capacity_factor: float = 1. eval_capacity_factor: float = 1. examples_per_group: float = 1. use_fft: bool = False num_attention_heads: int = 8 max_sequence_length: int = 512 type_vocab_size: int = 2 inner_dim: int = 3072 inner_activation: str = "gelu" output_dropout: float = 0.1 attention_dropout: float = 0.1 initializer_range: float = 0.02 output_range: Optional[int] = None embedding_width: Optional[int] = None norm_first: bool = False @dataclasses.dataclass class EncoderConfig(hyperparams.OneOfConfig): """Encoder configuration.""" type: Optional[str] = "bert" albert: AlbertEncoderConfig = dataclasses.field( default_factory=AlbertEncoderConfig ) bert: BertEncoderConfig = dataclasses.field(default_factory=BertEncoderConfig) bert_v2: BertEncoderConfig = dataclasses.field( default_factory=BertEncoderConfig ) bigbird: BigBirdEncoderConfig = dataclasses.field( default_factory=BigBirdEncoderConfig ) kernel: KernelEncoderConfig = dataclasses.field( default_factory=KernelEncoderConfig ) mobilebert: MobileBertEncoderConfig = dataclasses.field( default_factory=MobileBertEncoderConfig ) reuse: ReuseEncoderConfig = dataclasses.field( default_factory=ReuseEncoderConfig ) xlnet: XLNetEncoderConfig = dataclasses.field( default_factory=XLNetEncoderConfig ) query_bert: QueryBertConfig = dataclasses.field( default_factory=QueryBertConfig ) fnet: FNetEncoderConfig = dataclasses.field(default_factory=FNetEncoderConfig) sparse_mixer: SparseMixerEncoderConfig = dataclasses.field( default_factory=SparseMixerEncoderConfig ) # If `any` is used, the encoder building relies on any.BUILDER. any: hyperparams.Config = dataclasses.field( default_factory=hyperparams.Config ) @gin.configurable def build_encoder(config: EncoderConfig, embedding_layer: Optional[tf.keras.layers.Layer] = None, encoder_cls=None, bypass_config: bool = False): """Instantiate a Transformer encoder network from EncoderConfig. Args: config: the one-of encoder config, which provides encoder parameters of a chosen encoder. embedding_layer: an external embedding layer passed to the encoder. encoder_cls: an external encoder cls not included in the supported encoders, usually used by gin.configurable. bypass_config: whether to ignore config instance to create the object with `encoder_cls`. Returns: An encoder instance. """ if bypass_config: return encoder_cls() encoder_type = config.type encoder_cfg = config.get() if encoder_cls and encoder_cls.__name__ == "EncoderScaffold": embedding_cfg = dict( vocab_size=encoder_cfg.vocab_size, type_vocab_size=encoder_cfg.type_vocab_size, hidden_size=encoder_cfg.hidden_size, max_seq_length=encoder_cfg.max_position_embeddings, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), dropout_rate=encoder_cfg.dropout_rate, ) hidden_cfg = dict( num_attention_heads=encoder_cfg.num_attention_heads, intermediate_size=encoder_cfg.intermediate_size, intermediate_activation=tf_utils.get_activation( encoder_cfg.hidden_activation), dropout_rate=encoder_cfg.dropout_rate, attention_dropout_rate=encoder_cfg.attention_dropout_rate, kernel_initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), ) kwargs = dict( embedding_cfg=embedding_cfg, hidden_cfg=hidden_cfg, num_hidden_instances=encoder_cfg.num_layers, pooled_output_dim=encoder_cfg.hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), return_all_layer_outputs=encoder_cfg.return_all_encoder_outputs, dict_outputs=True) return encoder_cls(**kwargs) if encoder_type == "any": encoder = encoder_cfg.BUILDER(encoder_cfg) if not isinstance(encoder, (tf.Module, tf.keras.Model, tf.keras.layers.Layer)): raise ValueError("The BUILDER returns an unexpected instance. The " "`build_encoder` should returns a tf.Module, " "tf.keras.Model or tf.keras.layers.Layer. However, " f"we get {encoder.__class__}") return encoder if encoder_type == "mobilebert": return networks.MobileBERTEncoder( word_vocab_size=encoder_cfg.word_vocab_size, word_embed_size=encoder_cfg.word_embed_size, type_vocab_size=encoder_cfg.type_vocab_size, max_sequence_length=encoder_cfg.max_sequence_length, num_blocks=encoder_cfg.num_blocks, hidden_size=encoder_cfg.hidden_size, num_attention_heads=encoder_cfg.num_attention_heads, intermediate_size=encoder_cfg.intermediate_size, intermediate_act_fn=encoder_cfg.hidden_activation, hidden_dropout_prob=encoder_cfg.hidden_dropout_prob, attention_probs_dropout_prob=encoder_cfg.attention_probs_dropout_prob, intra_bottleneck_size=encoder_cfg.intra_bottleneck_size, initializer_range=encoder_cfg.initializer_range, use_bottleneck_attention=encoder_cfg.use_bottleneck_attention, key_query_shared_bottleneck=encoder_cfg.key_query_shared_bottleneck, num_feedforward_networks=encoder_cfg.num_feedforward_networks, normalization_type=encoder_cfg.normalization_type, classifier_activation=encoder_cfg.classifier_activation, input_mask_dtype=encoder_cfg.input_mask_dtype) if encoder_type == "albert": return networks.AlbertEncoder( vocab_size=encoder_cfg.vocab_size, embedding_width=encoder_cfg.embedding_width, hidden_size=encoder_cfg.hidden_size, num_layers=encoder_cfg.num_layers, num_attention_heads=encoder_cfg.num_attention_heads, max_sequence_length=encoder_cfg.max_position_embeddings, type_vocab_size=encoder_cfg.type_vocab_size, intermediate_size=encoder_cfg.intermediate_size, activation=tf_utils.get_activation(encoder_cfg.hidden_activation), dropout_rate=encoder_cfg.dropout_rate, attention_dropout_rate=encoder_cfg.attention_dropout_rate, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), dict_outputs=True) if encoder_type == "bigbird": # TODO(frederickliu): Support use_gradient_checkpointing and update # experiments to use the EncoderScaffold only. if encoder_cfg.use_gradient_checkpointing: return bigbird_encoder.BigBirdEncoder( vocab_size=encoder_cfg.vocab_size, hidden_size=encoder_cfg.hidden_size, num_layers=encoder_cfg.num_layers, num_attention_heads=encoder_cfg.num_attention_heads, intermediate_size=encoder_cfg.intermediate_size, activation=tf_utils.get_activation(encoder_cfg.hidden_activation), dropout_rate=encoder_cfg.dropout_rate, attention_dropout_rate=encoder_cfg.attention_dropout_rate, num_rand_blocks=encoder_cfg.num_rand_blocks, block_size=encoder_cfg.block_size, max_position_embeddings=encoder_cfg.max_position_embeddings, type_vocab_size=encoder_cfg.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), embedding_width=encoder_cfg.embedding_width, use_gradient_checkpointing=encoder_cfg.use_gradient_checkpointing) embedding_cfg = dict( vocab_size=encoder_cfg.vocab_size, type_vocab_size=encoder_cfg.type_vocab_size, hidden_size=encoder_cfg.hidden_size, max_seq_length=encoder_cfg.max_position_embeddings, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), dropout_rate=encoder_cfg.dropout_rate) attention_cfg = dict( num_heads=encoder_cfg.num_attention_heads, key_dim=int(encoder_cfg.hidden_size // encoder_cfg.num_attention_heads), kernel_initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), max_rand_mask_length=encoder_cfg.max_position_embeddings, num_rand_blocks=encoder_cfg.num_rand_blocks, from_block_size=encoder_cfg.block_size, to_block_size=encoder_cfg.block_size, ) hidden_cfg = dict( num_attention_heads=encoder_cfg.num_attention_heads, intermediate_size=encoder_cfg.intermediate_size, intermediate_activation=tf_utils.get_activation( encoder_cfg.hidden_activation), dropout_rate=encoder_cfg.dropout_rate, attention_dropout_rate=encoder_cfg.attention_dropout_rate, norm_first=encoder_cfg.norm_first, kernel_initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), attention_cls=layers.BigBirdAttention, attention_cfg=attention_cfg) kwargs = dict( embedding_cfg=embedding_cfg, hidden_cls=layers.TransformerScaffold, hidden_cfg=hidden_cfg, num_hidden_instances=encoder_cfg.num_layers, mask_cls=layers.BigBirdMasks, mask_cfg=dict(block_size=encoder_cfg.block_size), pooled_output_dim=encoder_cfg.hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), return_all_layer_outputs=False, dict_outputs=True, layer_idx_as_attention_seed=True) return networks.EncoderScaffold(**kwargs) if encoder_type == "kernel": embedding_cfg = dict( vocab_size=encoder_cfg.vocab_size, type_vocab_size=encoder_cfg.type_vocab_size, hidden_size=encoder_cfg.hidden_size, max_seq_length=encoder_cfg.max_position_embeddings, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), dropout_rate=encoder_cfg.dropout_rate) attention_cfg = dict( num_heads=encoder_cfg.num_attention_heads, key_dim=int(encoder_cfg.hidden_size // encoder_cfg.num_attention_heads), kernel_initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), feature_transform=encoder_cfg.feature_transform, num_random_features=encoder_cfg.num_random_features, redraw=encoder_cfg.redraw, is_short_seq=encoder_cfg.is_short_seq, begin_kernel=encoder_cfg.begin_kernel, scale=encoder_cfg.scale, ) hidden_cfg = dict( num_attention_heads=encoder_cfg.num_attention_heads, intermediate_size=encoder_cfg.intermediate_size, intermediate_activation=tf_utils.get_activation( encoder_cfg.hidden_activation), dropout_rate=encoder_cfg.dropout_rate, attention_dropout_rate=encoder_cfg.attention_dropout_rate, norm_first=encoder_cfg.norm_first, kernel_initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), attention_cls=layers.KernelAttention, attention_cfg=attention_cfg) kwargs = dict( embedding_cfg=embedding_cfg, hidden_cls=layers.TransformerScaffold, hidden_cfg=hidden_cfg, num_hidden_instances=encoder_cfg.num_layers, mask_cls=layers.KernelMask, pooled_output_dim=encoder_cfg.hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), return_all_layer_outputs=False, dict_outputs=True, layer_idx_as_attention_seed=True) return networks.EncoderScaffold(**kwargs) if encoder_type == "xlnet": return networks.XLNetBase( vocab_size=encoder_cfg.vocab_size, num_layers=encoder_cfg.num_layers, hidden_size=encoder_cfg.hidden_size, num_attention_heads=encoder_cfg.num_attention_heads, head_size=encoder_cfg.head_size, inner_size=encoder_cfg.inner_size, dropout_rate=encoder_cfg.dropout_rate, attention_dropout_rate=encoder_cfg.attention_dropout_rate, attention_type=encoder_cfg.attention_type, bi_data=encoder_cfg.bi_data, two_stream=encoder_cfg.two_stream, tie_attention_biases=encoder_cfg.tie_attention_biases, memory_length=encoder_cfg.memory_length, clamp_length=encoder_cfg.clamp_length, reuse_length=encoder_cfg.reuse_length, inner_activation=encoder_cfg.inner_activation, use_cls_mask=encoder_cfg.use_cls_mask, embedding_width=encoder_cfg.embedding_width, initializer=tf.keras.initializers.RandomNormal( stddev=encoder_cfg.initializer_range)) if encoder_type == "reuse": embedding_cfg = dict( vocab_size=encoder_cfg.vocab_size, type_vocab_size=encoder_cfg.type_vocab_size, hidden_size=encoder_cfg.hidden_size, max_seq_length=encoder_cfg.max_position_embeddings, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), dropout_rate=encoder_cfg.dropout_rate) hidden_cfg = dict( num_attention_heads=encoder_cfg.num_attention_heads, inner_dim=encoder_cfg.intermediate_size, inner_activation=tf_utils.get_activation( encoder_cfg.hidden_activation), output_dropout=encoder_cfg.dropout_rate, attention_dropout=encoder_cfg.attention_dropout_rate, norm_first=encoder_cfg.norm_first, kernel_initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), reuse_attention=encoder_cfg.reuse_attention, use_relative_pe=encoder_cfg.use_relative_pe, pe_max_seq_length=encoder_cfg.pe_max_seq_length, max_reuse_layer_idx=encoder_cfg.max_reuse_layer_idx) kwargs = dict( embedding_cfg=embedding_cfg, hidden_cls=layers.ReuseTransformer, hidden_cfg=hidden_cfg, num_hidden_instances=encoder_cfg.num_layers, pooled_output_dim=encoder_cfg.hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), return_all_layer_outputs=False, dict_outputs=True, feed_layer_idx=True, recursive=True) return networks.EncoderScaffold(**kwargs) if encoder_type == "query_bert": embedding_layer = layers.FactorizedEmbedding( vocab_size=encoder_cfg.vocab_size, embedding_width=encoder_cfg.embedding_size, output_dim=encoder_cfg.hidden_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), name="word_embeddings") return networks.BertEncoderV2( vocab_size=encoder_cfg.vocab_size, hidden_size=encoder_cfg.hidden_size, num_layers=encoder_cfg.num_layers, num_attention_heads=encoder_cfg.num_attention_heads, intermediate_size=encoder_cfg.intermediate_size, activation=tf_utils.get_activation(encoder_cfg.hidden_activation), dropout_rate=encoder_cfg.dropout_rate, attention_dropout_rate=encoder_cfg.attention_dropout_rate, max_sequence_length=encoder_cfg.max_position_embeddings, type_vocab_size=encoder_cfg.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), output_range=encoder_cfg.output_range, embedding_layer=embedding_layer, return_all_encoder_outputs=encoder_cfg.return_all_encoder_outputs, return_attention_scores=encoder_cfg.return_attention_scores, dict_outputs=True, norm_first=encoder_cfg.norm_first) if encoder_type == "fnet": return networks.FNet( vocab_size=encoder_cfg.vocab_size, hidden_size=encoder_cfg.hidden_size, num_layers=encoder_cfg.num_layers, num_attention_heads=encoder_cfg.num_attention_heads, inner_dim=encoder_cfg.inner_dim, inner_activation=tf_utils.get_activation(encoder_cfg.inner_activation), output_dropout=encoder_cfg.output_dropout, attention_dropout=encoder_cfg.attention_dropout, max_sequence_length=encoder_cfg.max_sequence_length, type_vocab_size=encoder_cfg.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), output_range=encoder_cfg.output_range, embedding_width=encoder_cfg.embedding_width, embedding_layer=embedding_layer, norm_first=encoder_cfg.norm_first, use_fft=encoder_cfg.use_fft, attention_layers=encoder_cfg.attention_layers) if encoder_type == "sparse_mixer": return networks.SparseMixer( vocab_size=encoder_cfg.vocab_size, hidden_size=encoder_cfg.hidden_size, num_layers=encoder_cfg.num_layers, moe_layers=encoder_cfg.moe_layers, attention_layers=encoder_cfg.attention_layers, num_experts=encoder_cfg.num_experts, train_capacity_factor=encoder_cfg.train_capacity_factor, eval_capacity_factor=encoder_cfg.eval_capacity_factor, examples_per_group=encoder_cfg.examples_per_group, use_fft=encoder_cfg.use_fft, num_attention_heads=encoder_cfg.num_attention_heads, max_sequence_length=encoder_cfg.max_sequence_length, type_vocab_size=encoder_cfg.type_vocab_size, inner_dim=encoder_cfg.inner_dim, inner_activation=tf_utils.get_activation(encoder_cfg.inner_activation), output_dropout=encoder_cfg.output_dropout, attention_dropout=encoder_cfg.attention_dropout, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), output_range=encoder_cfg.output_range, embedding_width=encoder_cfg.embedding_width, norm_first=encoder_cfg.norm_first, embedding_layer=embedding_layer) bert_encoder_cls = networks.BertEncoder if encoder_type == "bert_v2": bert_encoder_cls = networks.BertEncoderV2 # Uses the default BERTEncoder configuration schema to create the encoder. # If it does not match, please add a switch branch by the encoder type. return bert_encoder_cls( vocab_size=encoder_cfg.vocab_size, hidden_size=encoder_cfg.hidden_size, num_layers=encoder_cfg.num_layers, num_attention_heads=encoder_cfg.num_attention_heads, intermediate_size=encoder_cfg.intermediate_size, activation=tf_utils.get_activation(encoder_cfg.hidden_activation), dropout_rate=encoder_cfg.dropout_rate, attention_dropout_rate=encoder_cfg.attention_dropout_rate, max_sequence_length=encoder_cfg.max_position_embeddings, type_vocab_size=encoder_cfg.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), output_range=encoder_cfg.output_range, embedding_width=encoder_cfg.embedding_size, embedding_layer=embedding_layer, return_all_encoder_outputs=encoder_cfg.return_all_encoder_outputs, return_attention_scores=encoder_cfg.return_attention_scores, dict_outputs=True, norm_first=encoder_cfg.norm_first)
28,534
39.190141
80
py
models
models-master/official/nlp/modeling/__init__.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """NLP Modeling Library. This library provides a set of Keras primitives (`tf.keras.Layer` and `tf.keras.Model`) that can be assembled into transformer-based models. They are flexible, validated, interoperable, and both TF1 and TF2 compatible. """ from official.nlp.modeling import layers from official.nlp.modeling import losses from official.nlp.modeling import models from official.nlp.modeling import networks from official.nlp.modeling import ops
1,062
39.884615
77
py
models
models-master/official/nlp/modeling/networks/classification.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classification and regression network.""" # pylint: disable=g-classes-have-attributes import collections import tensorflow as tf from tensorflow.python.util import deprecation @tf.keras.utils.register_keras_serializable(package='Text') class Classification(tf.keras.Model): """Classification network head for BERT modeling. This network implements a simple classifier head based on a dense layer. If num_classes is one, it can be considered as a regression problem. *Note* that the network is constructed by [Keras Functional API](https://keras.io/guides/functional_api/). Args: input_width: The innermost dimension of the input tensor to this network. num_classes: The number of classes that this network should classify to. If equal to 1, a regression problem is assumed. activation: The activation, if any, for the dense layer in this network. initializer: The initializer for the dense layer in this network. Defaults to a Glorot uniform initializer. output: The output style for this network. Can be either `logits` or `predictions`. """ @deprecation.deprecated(None, 'Classification as a network is deprecated. ' 'Please use the layers.ClassificationHead instead.') def __init__(self, input_width, num_classes, initializer='glorot_uniform', output='logits', **kwargs): cls_output = tf.keras.layers.Input( shape=(input_width,), name='cls_output', dtype=tf.float32) logits = tf.keras.layers.Dense( num_classes, activation=None, kernel_initializer=initializer, name='predictions/transform/logits')( cls_output) if output == 'logits': output_tensors = logits elif output == 'predictions': policy = tf.keras.mixed_precision.global_policy() if policy.name == 'mixed_bfloat16': # b/158514794: bf16 is not stable with post-softmax cross-entropy. policy = tf.float32 output_tensors = tf.keras.layers.Activation( tf.nn.log_softmax, dtype=policy)( logits) else: raise ValueError( ('Unknown `output` value "%s". `output` can be either "logits" or ' '"predictions"') % output) super().__init__( inputs=[cls_output], outputs=output_tensors, **kwargs) # b/164516224 # Once we've created the network using the Functional API, we call # super().__init__ as though we were invoking the Functional API Model # constructor, resulting in this object having all the properties of a model # created using the Functional API. Once super().__init__ is called, we # can assign attributes to `self` - note that all `self` assignments are # below this line. config_dict = { 'input_width': input_width, 'num_classes': num_classes, 'initializer': initializer, 'output': output, } # We are storing the config dict as a namedtuple here to ensure checkpoint # compatibility with an earlier version of this model which did not track # the config dict attribute. TF does not track immutable attrs which # do not contain Trackables, so by creating a config namedtuple instead of # a dict we avoid tracking it. config_cls = collections.namedtuple('Config', config_dict.keys()) self._config = config_cls(**config_dict) self.logits = logits def get_config(self): return dict(self._config._asdict()) @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
4,222
38.101852
80
py
models
models-master/official/nlp/modeling/networks/packed_sequence_embedding.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """An embedding network supporting packed sequences and position ids.""" # pylint: disable=g-classes-have-attributes import collections import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling import layers @tf.keras.utils.register_keras_serializable(package='Text') class PackedSequenceEmbedding(tf.keras.Model): """An embedding network supporting packed sequences and position ids. This network implements an embedding layer similar to the one described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). On top of it, it supports to (1) pack multiple sequences into one sequence and (2) allow additional "position_ids" as input. Args: vocab_size: The size of the token vocabulary. type_vocab_size: The size of the type vocabulary. embedding_width: Width of token embeddings. hidden_size: The output size for this encoder. max_seq_length: The maximum sequence length for this encoder. initializer: The initializer for the embedding portion of this encoder. dropout_rate: The dropout rate to apply before the encoding layers. pack_multiple_sequences: If `True`, we can feed multiple sequences into one sequence for training and inference (they don't impact each other). use_position_id: Whether to expect `position_ids` as an input to the network. If False, the `position_ids` will be inferred: (1) when pack_multiple_sequences is False, we assume the position ids are `0, 1, 2, ..., seq_length - 1`; (2) when `pack_multiple_sequences` is `True`, there may be multiple sub sequences, and for each sub sequence, its position ids start from 0, 1, 2, ... """ def __init__(self, vocab_size, type_vocab_size, embedding_width, hidden_size, max_seq_length, initializer, dropout_rate, use_position_id=False, pack_multiple_sequences=False, **kwargs): initializer = tf.keras.initializers.get(initializer) if embedding_width is None: embedding_width = hidden_size config_dict = { 'vocab_size': vocab_size, 'type_vocab_size': type_vocab_size, 'embedding_width': embedding_width, 'hidden_size': hidden_size, 'max_seq_length': max_seq_length, 'initializer': tf.keras.initializers.serialize(initializer), 'dropout_rate': dropout_rate, 'use_position_id': use_position_id, 'pack_multiple_sequences': pack_multiple_sequences, } word_ids = tf.keras.layers.Input( shape=(None,), dtype=tf.int32, name='input_word_ids') mask = tf.keras.layers.Input( shape=(None,), dtype=tf.int32, name='input_mask') type_ids = tf.keras.layers.Input( shape=(None,), dtype=tf.int32, name='input_type_ids') inputs = [word_ids, mask, type_ids] if use_position_id: position_ids = tf.keras.layers.Input( shape=(None,), dtype=tf.int32, name='position_ids') inputs.append(position_ids) else: position_ids = None if pack_multiple_sequences: sub_seq_mask = PackedSequenceMask()(word_ids) else: sub_seq_mask = None embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), name='word_embeddings') word_embeddings = embedding_layer(word_ids) # Always uses dynamic slicing for simplicity. position_embedding_layer = PositionEmbeddingWithSubSeqMask( initializer=tf_utils.clone_initializer(initializer), use_dynamic_slicing=True, max_sequence_length=max_seq_length, name='position_embedding') position_embeddings = position_embedding_layer( word_embeddings, position_ids, sub_seq_mask) type_embeddings = ( layers.OnDeviceEmbedding( vocab_size=type_vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), use_one_hot=True, name='type_embeddings')(type_ids)) embeddings = tf.keras.layers.Add()( [word_embeddings, position_embeddings, type_embeddings]) embeddings = tf.keras.layers.LayerNormalization( name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)( embeddings) embeddings = tf.keras.layers.Dropout( rate=dropout_rate, dtype=tf.float32)( embeddings) if embedding_width != hidden_size: embeddings = tf.keras.layers.EinsumDense( '...x,xy->...y', output_shape=hidden_size, bias_axes=None, kernel_initializer=tf_utils.clone_initializer(initializer), name='embedding_projection')( embeddings) attention_mask = layers.SelfAttentionMask()(embeddings, mask) if sub_seq_mask is not None: attention_mask = tf.keras.layers.Lambda( lambda x: x[0] * tf.cast(x[1], x[0].dtype))( [attention_mask, sub_seq_mask]) outputs = [embeddings, attention_mask] super().__init__( inputs=inputs, outputs=outputs, **kwargs) # TF does not track immutable attrs which do not contain Trackables, # so by creating a config namedtuple instead of a dict we avoid tracking it. config_cls = collections.namedtuple('Config', config_dict.keys()) self._config = config_cls(**config_dict) self._embedding_layer = embedding_layer self._position_embedding_layer = position_embedding_layer def get_embedding_table(self): return self._embedding_layer.embeddings def get_config(self): return dict(self._config._asdict()) @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @tf.keras.utils.register_keras_serializable(package='Text') class PackedSequenceMask(tf.keras.layers.Layer): """A layer to create a mask to indicate multiple sub sequences.""" def call(self, input_ids): """Implements call() for the layer. Args: input_ids: int32 Tensor of shape [batch_size, seq_length]. Returns: boolean Tensor of shape [batch_size, seq_length, seq_length]. [x, y, z] is True if for x'th instance in a batch, y'th token and z'th token are from the same sub sequence. """ # Suppose # - the first token in the parent sequence is [CLS]. # - every sequence starts from [CLS]. # - every sequence only contains one [CLS]. seq_start_token = input_ids[:, 0:1] seq_start_loc = tf.cast(tf.equal(input_ids, seq_start_token), tf.int32) # Set different ids for different sub sequences. seq_ids = tf.expand_dims(tf.cumsum(seq_start_loc, -1), -1) return tf.equal(seq_ids, tf.transpose(seq_ids, [0, 2, 1])) @tf.keras.utils.register_keras_serializable(package='Text') class PositionEmbeddingWithSubSeqMask(tf.keras.layers.Layer): """Creates a positional embedding with sub-sequence masking. This layer creates a positional embedding as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). On top of it, it supports `position_ids` and `sub_sequence_mask` tensors. This layer can be set up to either create a statically shaped slice or a dynamically shaped slice. If `use_dynamic_slicing` is True, the input tensor can have a dynamic 1st dimension, while if `use_dynamic_slicing` is False the input size must be fixed. Args: initializer: The initializer to use for the embedding weights. Defaults to "glorot_uniform". use_dynamic_slicing: Whether to use the dynamic slicing path. max_sequence_length: The maximum size of the dynamic sequence. Only applicable if `use_dynamic_slicing` is True. """ def __init__(self, initializer='glorot_uniform', use_dynamic_slicing=False, max_sequence_length=None, **kwargs): # We need to have a default dtype of float32, since the inputs (which Keras # usually uses to infer the dtype) will always be int32. if 'dtype' not in kwargs: kwargs['dtype'] = 'float32' super().__init__(**kwargs) if use_dynamic_slicing and max_sequence_length is None: raise ValueError( 'If `use_dynamic_slicing` is True, `max_sequence_length` must be set.' ) self._max_sequence_length = max_sequence_length self._initializer = tf.keras.initializers.get(initializer) self._use_dynamic_slicing = use_dynamic_slicing def get_config(self): config = { 'max_sequence_length': self._max_sequence_length, 'initializer': tf.keras.initializers.serialize(self._initializer), 'use_dynamic_slicing': self._use_dynamic_slicing, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape): """Implements build() for the layer.""" dimension_list = input_shape.as_list() if len(dimension_list) != 3: raise ValueError('PositionEmbedding expects a 3-dimensional input tensor ' 'of shape [batch, sequence, width]') seq_length = dimension_list[1] width = dimension_list[2] # If we are not using dynamic slicing, we must assume that the sequence # length is fixed and max_sequence_length should not be specified. if not self._use_dynamic_slicing: if seq_length is None: raise ValueError( 'PositionEmbedding must have `use_dynamic_slicing` set ' 'to True (and max_sequence_length set) when the ' 'sequence (1st) dimension of the input is None.') if self._max_sequence_length is not None: raise ValueError( 'When `use_dynamic_slicing` is False, max_sequence_length should ' 'not be specified and we ought to use seq_length to get the ' 'variable shape.') if self._max_sequence_length is not None: weight_sequence_length = self._max_sequence_length else: weight_sequence_length = seq_length self._position_embeddings = self.add_weight( 'embeddings', shape=[weight_sequence_length, width], initializer=self._initializer) super().build(input_shape) def call(self, inputs, position_ids=None, sub_sequence_mask=None): """Implements call() for the layer. When `position_ids` is specified, it will return the position embeddings corresponding to this `position_ids`; otherwise, `position_ids` will be inferred in the following way: (1) When `sub_sequence_mask` is None, we assume the position ids are 0, 1, 2, ..., seq_length - 1. (2) When `sub_sequence_mask` is specified, there may be multiple sub sequences, and for each sub sequence, its position ids start from 0, 1, 2, ... Args: inputs: Word embeddings in shape [batch, seq_length, embedding_dim]. position_ids: An optional int32 tensor in shape [batch, seq_length]. sub_sequence_mask: An optional bool tensor in shape [batch, seq_length, seq_length]. [x, y, z] is True if for x'th instance in a batch, y'th token and z'th token are from the same sub sequence. Returns: The position embeddings in shape [batch, seq_length, embedding_dim]. """ input_shape = tf_utils.get_shape_list(inputs, expected_rank=3) if self._use_dynamic_slicing: position_embeddings = self._position_embeddings[:input_shape[1], :] else: position_embeddings = self._position_embeddings if position_ids is not None: return tf.gather(position_embeddings, position_ids) if sub_sequence_mask is None: return tf.broadcast_to(position_embeddings, input_shape) else: sub_sequence_mask = tf.cast(sub_sequence_mask, tf.int32) # For each sub sequence, its position ids start from 0, 1, 2, ... position_ids = tf.linalg.diag_part(tf.cumsum(sub_sequence_mask, -1)) - 1 return tf.gather(position_embeddings, position_ids)
12,800
39.381703
80
py
models
models-master/official/nlp/modeling/networks/funnel_transformer_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for transformer-based bert encoder network.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.modeling.networks import funnel_transformer class SingleLayerModel(tf.keras.Model): def __init__(self, layer): super().__init__() self.layer = layer def call(self, inputs): return self.layer(inputs) class FunnelTransformerEncoderTest(parameterized.TestCase, tf.test.TestCase): def tearDown(self): super(FunnelTransformerEncoderTest, self).tearDown() tf.keras.mixed_precision.set_global_policy("float32") @parameterized.named_parameters( ("mix_truncated_avg_rezero", "mixed_float16", tf.float16, "truncated_avg", "ReZeroTransformer"), ("float32_truncated_avg_rezero", "float32", tf.float32, "truncated_avg", "ReZeroTransformer"), ("mix_truncated_avg", "mixed_float16", tf.float16, "truncated_avg", "TransformerEncoderBlock"), ("float32_truncated_avg", "float32", tf.float32, "truncated_avg", "TransformerEncoderBlock"), ("mix_max", "mixed_float16", tf.float16, "max", "TransformerEncoderBlock"), ("float32_max", "float32", tf.float32, "max", "TransformerEncoderBlock"), ("mix_avg", "mixed_float16", tf.float16, "avg", "TransformerEncoderBlock"), ("float32_avg", "float32", tf.float32, "avg", "TransformerEncoderBlock")) def test_network_creation(self, policy, pooled_dtype, pool_type, transformer_cls): tf.keras.mixed_precision.set_global_policy(policy) hidden_size = 32 sequence_length = 21 pool_stride = 2 num_layers = 3 # Create a small FunnelTransformerEncoder for testing. test_network = funnel_transformer.FunnelTransformerEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=num_layers, pool_stride=pool_stride, pool_type=pool_type, max_sequence_length=sequence_length, unpool_length=0, transformer_cls=transformer_cls) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network([word_ids, mask, type_ids]) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] self.assertIsInstance(test_network.transformer_layers, list) self.assertLen(test_network.transformer_layers, num_layers) self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense) # Stride=2 compresses sequence length to half the size at each layer. # For pool_type = max or avg, # this configuration gives each layer of seq length: 21->11->6->3. # For pool_type = truncated_avg, # seq length: 21->10->5->2. if pool_type in ["max", "avg"]: expected_data_shape = [None, 3, hidden_size] else: expected_data_shape = [None, 2, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. # If float_dtype is set to float16, the data output is float32 (from a layer # norm) and pool output should be float16. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(pooled_dtype, pooled.dtype) @parameterized.named_parameters( ("append_dense_inputs", True), ("dense_inputs_at_sequence_begin", False), ) def test_network_creation_dense(self, append_dense_inputs): tf.keras.mixed_precision.set_global_policy("mixed_float16") pool_type = "avg" hidden_size = 32 sequence_length = 21 dense_sequence_length = 3 pool_stride = 2 num_layers = 3 # Create a small FunnelTransformerEncoder for testing. test_network = funnel_transformer.FunnelTransformerEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=num_layers, pool_stride=pool_stride, pool_type=pool_type, max_sequence_length=sequence_length + dense_sequence_length, unpool_length=0, transformer_cls="TransformerEncoderBlock", append_dense_inputs=append_dense_inputs) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dense_inputs = tf.keras.Input( shape=(dense_sequence_length, hidden_size), dtype=tf.float32) dense_mask = tf.keras.Input(shape=(dense_sequence_length,), dtype=tf.int32) dense_type_ids = tf.keras.Input( shape=(dense_sequence_length,), dtype=tf.int32) dict_outputs = test_network( [word_ids, mask, type_ids, dense_inputs, dense_mask, dense_type_ids]) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] self.assertIsInstance(test_network.transformer_layers, list) self.assertLen(test_network.transformer_layers, num_layers) self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense) # Stride=2 compresses sequence length to half the size at each layer. # For pool_type = max or avg, # this configuration gives each layer of seq length: 24->12->6->3. expected_data_shape = [None, 3, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) @parameterized.named_parameters( ("frac_pool_rezero", "ReZeroTransformer"), ("frac_pool_vanilla", "TransformerEncoderBlock"), ) def test_fractional_pooling(self, transformer_cls): hidden_size = 16 sequence_length = 32 pool_strides = [1.33333, 3, 2, 1] num_layers = 4 pool_type = "truncated_avg" test_network = funnel_transformer.FunnelTransformerEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=num_layers, pool_stride=pool_strides, pool_type=pool_type, max_sequence_length=sequence_length, unpool_length=0, transformer_cls=transformer_cls) word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network([word_ids, mask, type_ids]) data = dict_outputs["sequence_output"] expected_data_shape = [None, 4, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) def test_invalid_stride_and_num_layers(self): hidden_size = 32 num_layers = 3 pool_stride = [2, 2] unpool_length = 1 with self.assertRaisesRegex(ValueError, "pool_stride and num_layers are not equal"): _ = funnel_transformer.FunnelTransformerEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=num_layers, pool_stride=pool_stride, unpool_length=unpool_length) @parameterized.named_parameters( ("no_stride_no_unpool", 1, 0), ("stride_list_with_unpool", [2, 3, 4], 1), ("large_stride_with_unpool", 3, 1), ("large_stride_with_large_unpool", 5, 10), ("no_stride_with_unpool", 1, 1), ) def test_all_encoder_outputs_network_creation(self, pool_stride, unpool_length): hidden_size = 32 sequence_length = 21 num_layers = 3 # Create a small FunnelTransformerEncoder for testing. test_network = funnel_transformer.FunnelTransformerEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=num_layers, pool_stride=pool_stride, unpool_length=unpool_length) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network([word_ids, mask, type_ids]) all_encoder_outputs = dict_outputs["encoder_outputs"] pooled = dict_outputs["pooled_output"] expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertLen(all_encoder_outputs, num_layers) if isinstance(pool_stride, int): pool_stride = [pool_stride] * num_layers for layer_pool_stride, data in zip(pool_stride, all_encoder_outputs): expected_data_shape[1] = unpool_length + ( expected_data_shape[1] + layer_pool_stride - 1 - unpool_length) // layer_pool_stride self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype) self.assertAllEqual(tf.float32, pooled.dtype) @parameterized.named_parameters( ("all_sequence", None, 3, 0), ("output_range", 1, 1, 0), ("all_sequence_with_unpool", None, 4, 1), ("output_range_with_unpool", 1, 1, 1), ("output_range_with_large_unpool", 1, 1, 2), ) def test_network_invocation(self, output_range, out_seq_len, unpool_length): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 pool_stride = 2 # Create a small FunnelTransformerEncoder for testing. test_network = funnel_transformer.FunnelTransformerEncoder( vocab_size=vocab_size, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, pool_stride=pool_stride, unpool_length=unpool_length) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network([word_ids, mask, type_ids], output_range=output_range) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[1], out_seq_len) # output_range # Creates a FunnelTransformerEncoder with max_sequence_length != # sequence_length max_sequence_length = 128 test_network = funnel_transformer.FunnelTransformerEncoder( vocab_size=vocab_size, hidden_size=hidden_size, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, pool_stride=pool_stride) dict_outputs = test_network([word_ids, mask, type_ids]) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[1], 3) # Creates a FunnelTransformerEncoder with embedding_width != hidden_size test_network = funnel_transformer.FunnelTransformerEncoder( vocab_size=vocab_size, hidden_size=hidden_size, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, embedding_width=16, pool_stride=pool_stride) dict_outputs = test_network([word_ids, mask, type_ids]) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[-1], hidden_size) self.assertTrue(hasattr(test_network, "_embedding_projection")) def test_embeddings_as_inputs(self): hidden_size = 32 sequence_length = 21 # Create a small BertEncoder for testing. test_network = funnel_transformer.FunnelTransformerEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, pool_stride=2, ) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) test_network.build( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids) ) embeddings = test_network.get_embedding_layer()(word_ids) # Calls with the embeddings. dict_outputs = test_network( dict( input_word_embeddings=embeddings, input_mask=mask, input_type_ids=type_ids, ) ) all_encoder_outputs = dict_outputs["encoder_outputs"] pooled = dict_outputs["pooled_output"] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype) self.assertAllEqual(tf.float32, pooled.dtype) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( vocab_size=100, hidden_size=32, num_layers=3, num_attention_heads=2, max_sequence_length=21, type_vocab_size=12, inner_dim=1223, inner_activation="relu", output_dropout=0.05, attention_dropout=0.22, initializer="glorot_uniform", output_range=-1, embedding_width=16, embedding_layer=None, norm_first=False, pool_type="max", pool_stride=2, unpool_length=0, transformer_cls="TransformerEncoderBlock") network = funnel_transformer.FunnelTransformerEncoder(**kwargs) expected_config = dict(kwargs) expected_config["inner_activation"] = tf.keras.activations.serialize( tf.keras.activations.get(expected_config["inner_activation"])) expected_config["initializer"] = tf.keras.initializers.serialize( tf.keras.initializers.get(expected_config["initializer"])) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = funnel_transformer.FunnelTransformerEncoder.from_config( network.get_config()) # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) # Tests model saving/loading. model_path = self.get_temp_dir() + "/model" network_wrapper = SingleLayerModel(network) # One forward-path to ensure input_shape. batch_size = 3 sequence_length = 21 vocab_size = 100 num_types = 12 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) _ = network_wrapper.predict([word_id_data, mask_data, type_id_data]) network_wrapper.save(model_path) _ = tf.keras.models.load_model(model_path) if __name__ == "__main__": tf.test.main()
17,167
39.682464
80
py
models
models-master/official/nlp/modeling/networks/packed_sequence_embedding_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for official.nlp.modeling.networks.packed_sequence_embedding.""" # Import libraries from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.modeling.networks import packed_sequence_embedding class PackedSequenceEmbeddingTest(tf.test.TestCase, parameterized.TestCase): def tearDown(self): super(PackedSequenceEmbeddingTest, self).tearDown() tf.keras.mixed_precision.set_global_policy('float32') @parameterized.parameters([ (True, True, True), (False, False, True), (False, True, False), (True, False, False), ]) def test_network_creation(self, use_position_id, pack_multiple_sequences, use_float16): """Validate that the Keras object can be created.""" if use_float16: tf.keras.mixed_precision.set_global_policy('mixed_float16') seq_length = 16 vocab_size = 100 max_position_embeddings = 32 type_vocab_size = 2 embedding_width = 16 hidden_size = 32 embedding_cfg = dict( vocab_size=vocab_size, type_vocab_size=2, embedding_width=embedding_width, hidden_size=hidden_size, max_seq_length=max_position_embeddings, initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), dropout_rate=0.1, use_position_id=use_position_id, pack_multiple_sequences=pack_multiple_sequences, ) test_object = packed_sequence_embedding.PackedSequenceEmbedding( **embedding_cfg) input_word_ids = tf.keras.Input(shape=(seq_length,), dtype=tf.int32) input_mask = tf.keras.Input(shape=(seq_length,), dtype=tf.int32) input_type_ids = tf.keras.Input(shape=(seq_length,), dtype=tf.int32) network_inputs = { 'input_word_ids': input_word_ids, 'input_mask': input_mask, 'input_type_ids': input_type_ids, } if use_position_id: network_inputs['position_ids'] = tf.keras.Input( shape=(seq_length,), dtype=tf.int32) embedding, mask = test_object(network_inputs) # Create a model based off of this network: model = tf.keras.Model(network_inputs, [embedding, mask]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint(vocab_size, size=(batch_size, seq_length)) mask_data = np.random.randint(2, size=(batch_size, seq_length)) type_id_data = np.random.randint( type_vocab_size, size=(batch_size, seq_length)) feed_input = { 'input_word_ids': word_id_data, 'input_mask': mask_data, 'input_type_ids': type_id_data, } if use_position_id: feed_input['position_ids'] = np.random.randint( seq_length, size=(batch_size, seq_length)) embeddings, attention_mask = model.predict(feed_input) expected_embeddings_shape = [3, seq_length, hidden_size] expected_attention_mask_shape = [3, seq_length, seq_length] self.assertAllEqual(expected_embeddings_shape, embeddings.shape) self.assertAllEqual(expected_attention_mask_shape, attention_mask.shape) def test_serialize_deserialize(self): tf.keras.mixed_precision.set_global_policy('mixed_float16') # Create a network object that sets all of its config options. embedding_cfg = dict( vocab_size=100, type_vocab_size=2, embedding_width=64, hidden_size=64, max_seq_length=32, initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), dropout_rate=0.1, use_position_id=True, pack_multiple_sequences=False, ) network = packed_sequence_embedding.PackedSequenceEmbedding(**embedding_cfg) expected_config = dict(embedding_cfg) expected_config['initializer'] = tf.keras.initializers.serialize( tf.keras.initializers.get(expected_config['initializer'])) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = packed_sequence_embedding.PackedSequenceEmbedding.from_config( network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == '__main__': tf.test.main()
5,066
36.533333
80
py
models
models-master/official/nlp/modeling/networks/xlnet_base_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Keras based XLNet model.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow.python.distribute import combinations from official.nlp.modeling.networks import xlnet_base class RelativePositionEncodingTest(tf.test.TestCase): def test_positional_embedding(self): """A low-dimensional example is tested. With len(pos_seq)=2 and d_model=4: pos_seq = [[1.], [0.]] inv_freq = [1., 0.01] pos_seq x inv_freq = [[1, 0.01], [0., 0.]] pos_emb = [[sin(1.), sin(0.01), cos(1.), cos(0.01)], [sin(0.), sin(0.), cos(0.), cos(0.)]] = [[0.84147096, 0.00999983, 0.54030228, 0.99994999], [0., 0., 1., 1.]] """ target = np.array([[[0.84147096, 0.00999983, 0.54030228, 0.99994999], [0., 0., 1., 1.]]]) hidden_size = 4 pos_seq = tf.range(1, -1, -1.0) # [1., 0.] encoding_layer = xlnet_base.RelativePositionEncoding( hidden_size=hidden_size) encoding = encoding_layer(pos_seq, batch_size=None).numpy().astype(float) self.assertAllClose(encoding, target) class ComputePositionEncodingTest(tf.test.TestCase, parameterized.TestCase): @combinations.generate(combinations.combine( attention_type=["uni", "bi"], bi_data=[False, True], )) def test_compute_position_encoding_smoke(self, attention_type, bi_data): hidden_size = 4 batch_size = 4 total_length = 8 seq_length = 4 position_encoding_layer = xlnet_base.RelativePositionEncoding( hidden_size=hidden_size) encoding = xlnet_base._compute_positional_encoding( attention_type=attention_type, position_encoding_layer=position_encoding_layer, hidden_size=hidden_size, batch_size=batch_size, total_length=total_length, seq_length=seq_length, clamp_length=2, bi_data=bi_data, dtype=tf.float32) self.assertEqual(encoding.shape[0], batch_size) self.assertEqual(encoding.shape[2], hidden_size) class CausalAttentionMaskTests(tf.test.TestCase): def test_casual_attention_mask_with_no_memory(self): seq_length, memory_length = 3, 0 causal_attention_mask = xlnet_base._create_causal_attention_mask( seq_length=seq_length, memory_length=memory_length) expected_output = np.array([[1, 0, 0], [1, 1, 0], [1, 1, 1]]) self.assertAllClose(causal_attention_mask, expected_output) def test_casual_attention_mask_with_memory(self): seq_length, memory_length = 3, 2 causal_attention_mask = xlnet_base._create_causal_attention_mask( seq_length=seq_length, memory_length=memory_length) expected_output = np.array([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 1]]) self.assertAllClose(causal_attention_mask, expected_output) def test_causal_attention_mask_with_same_length(self): seq_length, memory_length = 3, 2 causal_attention_mask = xlnet_base._create_causal_attention_mask( seq_length=seq_length, memory_length=memory_length, same_length=True) expected_output = np.array([[1, 1, 1, 0, 0], [0, 1, 1, 1, 0], [0, 0, 1, 1, 1]]) self.assertAllClose(causal_attention_mask, expected_output) class MaskComputationTests(tf.test.TestCase, parameterized.TestCase): @combinations.generate(combinations.combine( use_input_mask=[False, True], use_permutation_mask=[False, True], attention_type=["uni", "bi"], memory_length=[0, 4], )) def test_compute_attention_mask_smoke(self, use_input_mask, use_permutation_mask, attention_type, memory_length): """Tests coverage and functionality for different configurations.""" batch_size = 2 seq_length = 8 if use_input_mask: input_mask = tf.zeros(shape=(batch_size, seq_length)) else: input_mask = None if use_permutation_mask: permutation_mask = tf.zeros(shape=(batch_size, seq_length, seq_length)) else: permutation_mask = None _, content_mask = xlnet_base._compute_attention_mask( input_mask=input_mask, permutation_mask=permutation_mask, attention_type=attention_type, seq_length=seq_length, memory_length=memory_length, batch_size=batch_size, dtype=tf.float32) expected_mask_shape = (batch_size, 1, seq_length, seq_length + memory_length) if use_input_mask or use_permutation_mask: self.assertEqual(content_mask.shape, expected_mask_shape) def test_no_input_masks(self): query_mask, content_mask = xlnet_base._compute_attention_mask( input_mask=None, permutation_mask=None, attention_type="uni", seq_length=8, memory_length=2, batch_size=2, dtype=tf.float32) self.assertIsNone(query_mask) self.assertIsNone(content_mask) def test_input_mask_no_permutation(self): """Tests if an input mask is provided but not permutation. In the case that only one of input mask or permutation mask is provided and the attention type is bidirectional, the query mask should be a broadcasted version of the provided mask. Content mask should be a broadcasted version of the query mask, where the diagonal is 0s. """ seq_length = 4 batch_size = 1 memory_length = 0 input_mask = np.array([[1, 1, 0, 0]]) permutation_mask = None expected_query_mask = input_mask[None, None, :, :] expected_content_mask = np.array([[[ [1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0], [1, 1, 0, 1]]]]) query_mask, content_mask = xlnet_base._compute_attention_mask( input_mask=input_mask, permutation_mask=permutation_mask, attention_type="bi", seq_length=seq_length, memory_length=memory_length, batch_size=batch_size, dtype=tf.float32) self.assertAllClose(query_mask, expected_query_mask) self.assertAllClose(content_mask, expected_content_mask) def test_permutation_mask_no_input_mask(self): """Tests if a permutation mask is provided but not input.""" seq_length = 2 batch_size = 1 memory_length = 0 input_mask = None permutation_mask = np.array([ [[1, 0], [1, 0]], ]) expected_query_mask = permutation_mask[:, None, :, :] expected_content_mask = np.array([[[ [1, 0], [1, 1]]]]) query_mask, content_mask = xlnet_base._compute_attention_mask( input_mask=input_mask, permutation_mask=permutation_mask, attention_type="bi", seq_length=seq_length, memory_length=memory_length, batch_size=batch_size, dtype=tf.float32) self.assertAllClose(query_mask, expected_query_mask) self.assertAllClose(content_mask, expected_content_mask) def test_permutation_and_input_mask(self): """Tests if both an input and permutation mask are provided.""" seq_length = 4 batch_size = 1 memory_length = 0 input_mask = np.array([[1, 1, 0, 0]]) permutation_mask = np.array([[ [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [1, 1, 1, 0], ]]) expected_query_mask = np.array([[[ [0, 1, 0, 0], [1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 0, 0]]]]) expected_content_mask = np.array([[[ [1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0], [1, 1, 0, 1]]]]) query_mask, content_mask = xlnet_base._compute_attention_mask( input_mask=input_mask, permutation_mask=permutation_mask, attention_type="bi", seq_length=seq_length, memory_length=memory_length, batch_size=batch_size, dtype=tf.float32) self.assertAllClose(query_mask, expected_query_mask) self.assertAllClose(content_mask, expected_content_mask) def test_permutation_input_uni_mask(self): """Tests if an input, permutation and causal mask are provided.""" seq_length = 4 batch_size = 1 memory_length = 0 input_mask = np.array([[1, 1, 1, 0]]) permutation_mask = np.array([[ [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [1, 1, 1, 0], ]]) expected_query_mask = np.array([[[ [0, 0, 0, 0], [1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]]]) expected_content_mask = np.array([[[ [1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0], [1, 1, 1, 1]]]]) query_mask, content_mask = xlnet_base._compute_attention_mask( input_mask=input_mask, permutation_mask=permutation_mask, attention_type="uni", seq_length=seq_length, memory_length=memory_length, batch_size=batch_size, dtype=tf.float32) self.assertAllClose(query_mask, expected_query_mask) self.assertAllClose(content_mask, expected_content_mask) class SegmentMatrixTests(tf.test.TestCase): def test_no_segment_ids(self): segment_matrix = xlnet_base._compute_segment_matrix( segment_ids=None, memory_length=2, batch_size=1, use_cls_mask=False) self.assertIsNone(segment_matrix) def test_basic(self): batch_size = 1 memory_length = 0 segment_ids = np.array([ [1, 1, 2, 1] ]) expected_segment_matrix = np.array([[ [False, False, True, False], [False, False, True, False], [True, True, False, True], [False, False, True, False] ]]) segment_matrix = xlnet_base._compute_segment_matrix( segment_ids=segment_ids, memory_length=memory_length, batch_size=batch_size, use_cls_mask=False) self.assertAllClose(segment_matrix, expected_segment_matrix) def test_basic_with_memory(self): batch_size = 1 memory_length = 1 segment_ids = np.array([ [1, 1, 2, 1] ]) expected_segment_matrix = np.array([[ [True, False, False, True, False], [True, False, False, True, False], [True, True, True, False, True], [True, False, False, True, False] ]]).astype(int) segment_matrix = tf.cast(xlnet_base._compute_segment_matrix( segment_ids=segment_ids, memory_length=memory_length, batch_size=batch_size, use_cls_mask=False), dtype=tf.uint8) self.assertAllClose(segment_matrix, expected_segment_matrix) def dont_test_basic_with_class_mask(self): # TODO(allencwang) - this test should pass but illustrates the legacy issue # of using class mask. Enable once addressed. batch_size = 1 memory_length = 0 segment_ids = np.array([ [1, 1, 2, 1] ]) expected_segment_matrix = np.array([[ [False, False, True, False], [False, False, True, False], [True, True, False, True], [False, False, True, False] ]]).astype(int) segment_matrix = tf.cast(xlnet_base._compute_segment_matrix( segment_ids=segment_ids, memory_length=memory_length, batch_size=batch_size, use_cls_mask=True), dtype=tf.uint8) self.assertAllClose(segment_matrix, expected_segment_matrix) class XLNetModelTests(tf.test.TestCase): def _generate_data(self, batch_size, seq_length, num_predictions=None): """Generates sample XLNet data for testing.""" sequence_shape = (batch_size, seq_length) if num_predictions is not None: target_mapping = tf.random.uniform( shape=(batch_size, num_predictions, seq_length)) return { "input_ids": np.random.randint(10, size=sequence_shape, dtype="int32"), "segment_ids": np.random.randint(2, size=sequence_shape, dtype="int32"), "input_mask": np.random.randint(2, size=sequence_shape).astype("float32"), "permutation_mask": np.random.randint( 2, size=(batch_size, seq_length, seq_length)).astype("float32"), "target_mapping": target_mapping, "masked_tokens": tf.random.uniform(shape=sequence_shape), } def test_xlnet_model(self): batch_size = 2 seq_length = 8 num_predictions = 2 hidden_size = 4 xlnet_model = xlnet_base.XLNetBase( vocab_size=32000, num_layers=2, hidden_size=hidden_size, num_attention_heads=2, head_size=2, inner_size=2, dropout_rate=0., attention_dropout_rate=0., attention_type="bi", bi_data=True, initializer=tf.keras.initializers.RandomNormal(stddev=0.1), two_stream=False, tie_attention_biases=True, reuse_length=0, inner_activation="relu") input_data = self._generate_data(batch_size=batch_size, seq_length=seq_length, num_predictions=num_predictions) model_output = xlnet_model(**input_data) self.assertEqual(model_output[0].shape, (batch_size, seq_length, hidden_size)) def test_get_config(self): xlnet_model = xlnet_base.XLNetBase( vocab_size=32000, num_layers=12, hidden_size=36, num_attention_heads=12, head_size=12, inner_size=12, dropout_rate=0., attention_dropout_rate=0., attention_type="bi", bi_data=True, initializer=tf.keras.initializers.RandomNormal(stddev=0.1), two_stream=False, tie_attention_biases=True, memory_length=0, reuse_length=0, inner_activation="relu") config = xlnet_model.get_config() new_xlnet = xlnet_base.XLNetBase.from_config(config) self.assertEqual(config, new_xlnet.get_config()) if __name__ == "__main__": tf.random.set_seed(0) tf.test.main()
14,778
31.696903
80
py
models
models-master/official/nlp/modeling/networks/bert_encoder_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for transformer-based bert encoder network.""" # Import libraries from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.modeling.networks import bert_encoder class BertEncoderTest(tf.test.TestCase, parameterized.TestCase): def tearDown(self): super(BertEncoderTest, self).tearDown() tf.keras.mixed_precision.set_global_policy("float32") @parameterized.named_parameters( ("encoder_v2", bert_encoder.BertEncoderV2), ("encoder_v1", bert_encoder.BertEncoder), ) def test_dict_outputs_network_creation(self, encoder_cls): hidden_size = 32 sequence_length = 21 # Create a small BertEncoder for testing. if encoder_cls is bert_encoder.BertEncoderV2: kwargs = {} else: kwargs = dict(dict_outputs=True) test_network = encoder_cls( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, **kwargs) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] self.assertIsInstance(test_network.transformer_layers, list) self.assertLen(test_network.transformer_layers, 3) self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float32, pooled.dtype) @parameterized.named_parameters( ("encoder_v2", bert_encoder.BertEncoderV2), ("encoder_v1", bert_encoder.BertEncoder), ) def test_dict_outputs_all_encoder_outputs_network_creation(self, encoder_cls): hidden_size = 32 sequence_length = 21 # Create a small BertEncoder for testing. test_network = encoder_cls( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, dict_outputs=True) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) all_encoder_outputs = dict_outputs["encoder_outputs"] pooled = dict_outputs["pooled_output"] expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertLen(all_encoder_outputs, 3) for data in all_encoder_outputs: self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype) self.assertAllEqual(tf.float32, pooled.dtype) @parameterized.named_parameters( ("encoder_v2", bert_encoder.BertEncoderV2), ("encoder_v1", bert_encoder.BertEncoder), ) def test_dict_outputs_network_creation_return_attention_scores( self, encoder_cls): hidden_size = 32 sequence_length = 21 num_attention_heads = 5 num_layers = 3 # Create a small BertEncoder for testing. test_network = encoder_cls( vocab_size=100, hidden_size=hidden_size, num_attention_heads=num_attention_heads, num_layers=num_layers, return_attention_scores=True, dict_outputs=True) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) all_attention_outputs = dict_outputs["attention_scores"] expected_data_shape = [ None, num_attention_heads, sequence_length, sequence_length ] self.assertLen(all_attention_outputs, num_layers) for data in all_attention_outputs: self.assertAllEqual(expected_data_shape, data.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, all_attention_outputs[-1].dtype) @parameterized.named_parameters( ("encoder_v2", bert_encoder.BertEncoderV2), ("encoder_v1", bert_encoder.BertEncoder), ) def test_dict_outputs_network_creation_with_float16_dtype(self, encoder_cls): hidden_size = 32 sequence_length = 21 tf.keras.mixed_precision.set_global_policy("mixed_float16") # Create a small BertEncoder for testing. test_network = encoder_cls( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, dict_outputs=True) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # If float_dtype is set to float16, the data output is float32 (from a layer # norm) and pool output should be float16. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float16, pooled.dtype) @parameterized.named_parameters( ("all_sequence_encoder_v1", bert_encoder.BertEncoder, None, 21), ("output_range_encoder_v1", bert_encoder.BertEncoder, 1, 1), ("all_sequence_encoder_v2", bert_encoder.BertEncoderV2, None, 21), ("output_range_encoder_v2", bert_encoder.BertEncoderV2, 1, 1), ) def test_dict_outputs_network_invocation( self, encoder_cls, output_range, out_seq_len): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 # Create a small BertEncoder for testing. test_network = encoder_cls( vocab_size=vocab_size, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, output_range=output_range, dict_outputs=True) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[1], out_seq_len) # Creates a BertEncoder with max_sequence_length != sequence_length max_sequence_length = 128 test_network = encoder_cls( vocab_size=vocab_size, hidden_size=hidden_size, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, dict_outputs=True) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[1], sequence_length) # Creates a BertEncoder with embedding_width != hidden_size test_network = encoder_cls( vocab_size=vocab_size, hidden_size=hidden_size, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, embedding_width=16, dict_outputs=True) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[-1], hidden_size) self.assertTrue(hasattr(test_network, "_embedding_projection")) def test_embeddings_as_inputs(self): hidden_size = 32 sequence_length = 21 # Create a small BertEncoder for testing. test_network = bert_encoder.BertEncoderV2( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) test_network.build( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) embeddings = test_network.get_embedding_layer()(word_ids) # Calls with the embeddings. dict_outputs = test_network( dict( input_word_embeddings=embeddings, input_mask=mask, input_type_ids=type_ids)) all_encoder_outputs = dict_outputs["encoder_outputs"] pooled = dict_outputs["pooled_output"] expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertLen(all_encoder_outputs, 3) for data in all_encoder_outputs: self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype) self.assertAllEqual(tf.float32, pooled.dtype) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( vocab_size=100, hidden_size=32, num_layers=3, num_attention_heads=2, max_sequence_length=21, type_vocab_size=12, inner_dim=1223, inner_activation="relu", output_dropout=0.05, attention_dropout=0.22, initializer="glorot_uniform", output_range=-1, embedding_width=16, embedding_layer=None, norm_first=False) network = bert_encoder.BertEncoder(**kwargs) # Validate that the config can be forced to JSON. _ = network.to_json() # Tests model saving/loading. model_path = self.get_temp_dir() + "/model" network.save(model_path) _ = tf.keras.models.load_model(model_path) def test_network_creation(self): hidden_size = 32 sequence_length = 21 # Create a small BertEncoder for testing. test_network = bert_encoder.BertEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) self.assertIsInstance(test_network.transformer_layers, list) self.assertLen(test_network.transformer_layers, 3) self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float32, pooled.dtype) test_network_dict = bert_encoder.BertEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, dict_outputs=True) # Create the inputs (note that the first dimension is implicit). inputs = dict( input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids) _ = test_network_dict(inputs) test_network_dict.set_weights(test_network.get_weights()) batch_size = 2 vocab_size = 100 num_types = 2 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) list_outputs = test_network([word_id_data, mask_data, type_id_data]) dict_outputs = test_network_dict( dict( input_word_ids=word_id_data, input_mask=mask_data, input_type_ids=type_id_data)) self.assertAllEqual(list_outputs[0], dict_outputs["sequence_output"]) self.assertAllEqual(list_outputs[1], dict_outputs["pooled_output"]) def test_all_encoder_outputs_network_creation(self): hidden_size = 32 sequence_length = 21 # Create a small BertEncoder for testing. test_network = bert_encoder.BertEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, return_all_encoder_outputs=True) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) all_encoder_outputs, pooled = test_network([word_ids, mask, type_ids]) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertLen(all_encoder_outputs, 3) for data in all_encoder_outputs: self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype) self.assertAllEqual(tf.float32, pooled.dtype) def test_attention_scores_output_network_creation(self): hidden_size = 32 sequence_length = 21 num_attention_heads = 5 num_layers = 3 # Create a small BertEncoder for testing. test_network = bert_encoder.BertEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=num_attention_heads, num_layers=num_layers, return_attention_scores=True) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) _, _, all_attention_outputs = test_network([word_ids, mask, type_ids]) expected_data_shape = [ None, num_attention_heads, sequence_length, sequence_length ] self.assertLen(all_attention_outputs, num_layers) for data in all_attention_outputs: self.assertAllEqual(expected_data_shape, data.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, all_attention_outputs[-1].dtype) def test_network_creation_with_float16_dtype(self): hidden_size = 32 sequence_length = 21 tf.keras.mixed_precision.set_global_policy("mixed_float16") # Create a small BertEncoder for testing. test_network = bert_encoder.BertEncoder( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # If float_dtype is set to float16, the data output is float32 (from a layer # norm) and pool output should be float16. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float16, pooled.dtype) @parameterized.named_parameters( ("all_sequence", None, 21), ("output_range", 1, 1), ) def test_network_invocation(self, output_range, out_seq_len): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 # Create a small BertEncoder for testing. test_network = bert_encoder.BertEncoder( vocab_size=vocab_size, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, output_range=output_range) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[1], out_seq_len) # Creates a BertEncoder with max_sequence_length != sequence_length max_sequence_length = 128 test_network = bert_encoder.BertEncoder( vocab_size=vocab_size, hidden_size=hidden_size, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types) data, pooled = test_network([word_ids, mask, type_ids]) model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[1], sequence_length) # Creates a BertEncoder with embedding_width != hidden_size test_network = bert_encoder.BertEncoder( vocab_size=vocab_size, hidden_size=hidden_size, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, embedding_width=16) data, pooled = test_network([word_ids, mask, type_ids]) model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) outputs = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(outputs[0].shape[-1], hidden_size) self.assertTrue(hasattr(test_network, "_embedding_projection")) class BertEncoderV2CompatibilityTest(tf.test.TestCase): def tearDown(self): super().tearDown() tf.keras.mixed_precision.set_global_policy("float32") def test_weights_forward_compatible(self): batch_size = 3 hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 kwargs = dict( vocab_size=vocab_size, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, type_vocab_size=num_types) word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) data = dict( input_word_ids=word_id_data, input_mask=mask_data, input_type_ids=type_id_data) # Create small BertEncoders for testing. new_net = bert_encoder.BertEncoderV2(**kwargs) _ = new_net(data) kwargs["dict_outputs"] = True old_net = bert_encoder.BertEncoder(**kwargs) _ = old_net(data) new_net._embedding_layer.set_weights(old_net._embedding_layer.get_weights()) new_net._position_embedding_layer.set_weights( old_net._position_embedding_layer.get_weights()) new_net._type_embedding_layer.set_weights( old_net._type_embedding_layer.get_weights()) new_net._embedding_norm_layer.set_weights( old_net._embedding_norm_layer.get_weights()) # embedding_dropout has no weights. if hasattr(old_net, "_embedding_projection"): new_net._embedding_projection.set_weights( old_net._embedding_projection.get_weights()) # attention_mask_layer has no weights. new_net._pooler_layer.set_weights(old_net._pooler_layer.get_weights()) for otl, ntl in zip(old_net._transformer_layers, new_net._transformer_layers): ntl.set_weights(otl.get_weights()) def check_output_close(data, net1, net2): output1 = net1(data) output2 = net2(data) for key in output1: self.assertAllClose(output1[key], output2[key]) check_output_close(data, old_net, new_net) def test_checkpoint_forward_compatible(self): batch_size = 3 hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 kwargs = dict( vocab_size=vocab_size, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, type_vocab_size=num_types) word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) data = dict( input_word_ids=word_id_data, input_mask=mask_data, input_type_ids=type_id_data) kwargs["dict_outputs"] = True old_net = bert_encoder.BertEncoder(**kwargs) old_net_outputs = old_net(data) ckpt = tf.train.Checkpoint(net=old_net) path = ckpt.save(self.get_temp_dir()) del kwargs["dict_outputs"] new_net = bert_encoder.BertEncoderV2(**kwargs) new_ckpt = tf.train.Checkpoint(net=new_net) status = new_ckpt.restore(path) status.assert_existing_objects_matched() # assert_consumed will fail because the old model has redundant nodes. new_net_outputs = new_net(data) self.assertAllEqual(old_net_outputs.keys(), new_net_outputs.keys()) for key in old_net_outputs: self.assertAllClose(old_net_outputs[key], new_net_outputs[key]) def test_keras_model_checkpoint_forward_compatible(self): batch_size = 3 hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 kwargs = dict( vocab_size=vocab_size, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, output_range=None) word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) data = dict( input_word_ids=word_id_data, input_mask=mask_data, input_type_ids=type_id_data) kwargs["dict_outputs"] = True old_net = bert_encoder.BertEncoder(**kwargs) inputs = old_net.inputs outputs = old_net(inputs) old_model = tf.keras.Model(inputs=inputs, outputs=outputs) old_model_outputs = old_model(data) ckpt = tf.train.Checkpoint(net=old_model) path = ckpt.save(self.get_temp_dir()) del kwargs["dict_outputs"] new_net = bert_encoder.BertEncoderV2(**kwargs) inputs = new_net.inputs outputs = new_net(inputs) new_model = tf.keras.Model(inputs=inputs, outputs=outputs) new_ckpt = tf.train.Checkpoint(net=new_model) status = new_ckpt.restore(path) status.assert_existing_objects_matched() new_model_outputs = new_model(data) self.assertAllEqual(old_model_outputs.keys(), new_model_outputs.keys()) for key in old_model_outputs: self.assertAllClose(old_model_outputs[key], new_model_outputs[key]) if __name__ == "__main__": tf.test.main()
26,920
38.358187
80
py
models
models-master/official/nlp/modeling/networks/funnel_transformer.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Funnel Transformer network.""" # pylint: disable=g-classes-have-attributes import math from typing import Any, Callable, Optional, Sequence, Union from absl import logging import numpy as np import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling import layers _Initializer = Union[str, tf.keras.initializers.Initializer] _Activation = Union[str, Callable[..., Any]] _MAX = 'max' _AVG = 'avg' _TRUNCATED_AVG = 'truncated_avg' _transformer_cls2str = { layers.TransformerEncoderBlock: 'TransformerEncoderBlock', layers.ReZeroTransformer: 'ReZeroTransformer' } _str2transformer_cls = { 'TransformerEncoderBlock': layers.TransformerEncoderBlock, 'ReZeroTransformer': layers.ReZeroTransformer } _approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True) def _get_policy_dtype(): try: return tf.keras.mixed_precision.global_policy().compute_dtype or tf.float32 except AttributeError: # tf1 has no attribute 'global_policy' return tf.float32 def _pool_and_concat(mask, unpool_length: int, strides: Union[Sequence[int], int], axes: Union[Sequence[int], int]): """Pools the mask along a given axis with stride. It also skips first unpool_length elements. Args: mask: Tensor to be pooled. unpool_length: Leading elements to be skipped. strides: Strides for the given axes. axes: Axes to pool the Tensor. Returns: Pooled and concatenated Tensor. """ # Wraps the axes as a list. if isinstance(axes, int): axes = [axes] if isinstance(strides, int): strides = [strides] * len(axes) else: if len(strides) != len(axes): raise ValueError('The lengths of strides and axes need to match.') # Bypass no pooling cases. if np.all(np.array(strides) == 1): return mask for axis, stride in zip(axes, strides): # Skips first `unpool_length` tokens. unpool_tensor_shape = [slice(None)] * axis + [slice(None, unpool_length)] unpool_tensor = mask[unpool_tensor_shape] # Pools the second half. pool_tensor_shape = [slice(None)] * axis + [ slice(unpool_length, None, stride) ] pool_tensor = mask[pool_tensor_shape] mask = tf.concat((unpool_tensor, pool_tensor), axis=axis) return mask def _create_fractional_pool_transform(sl: int, pool_factor: float): """Create pooling transform for fractional pooling factor.""" assert pool_factor > 1.0, '`pool_factor` should be > 1.0.' psl = int(sl / pool_factor) gcd_ = math.gcd(sl, psl) # It is expected chunk_sl and chunk_psl are small integers. # The transform is built by tiling a [chunk_sl, chunk_psl] submatrix # gcd_ times. The submatrix sums to chunk_psl. chunk_sl = sl // gcd_ chunk_psl = psl // gcd_ num_one_entries = chunk_psl - 1 num_frac_entries = chunk_sl - (chunk_psl - 1) # The transform is of shape [sl, psl]. transform = np.zeros((sl, psl)) for i in range(sl // chunk_sl): row_start = chunk_sl * i col_start = chunk_psl * i for idx in range(num_one_entries): transform[row_start + idx][col_start + idx] = 1.0 for idx in range(num_frac_entries): transform[row_start + num_one_entries + idx][ col_start + num_one_entries ] = (1.0 / num_frac_entries) return tf.constant(transform, dtype=_get_policy_dtype()) def _create_truncated_avg_transforms( seq_length: int, pool_strides: Sequence[int] ): """Computes pooling transforms. The pooling_transform is of shape [seq_length, seq_length//pool_stride] and pooling_transform[i,j] = 1.0/pool_stride if i//pool_stride == j 0.0 otherwise. It's in essense average pooling but truncate the final window if it seq_length % pool_stride != 0. For seq_length==6 and pool_stride==2, it is [[ 0.5, 0.0, 0.0 ], [ 0.5, 0.0, 0.0 ], [ 0.0, 0.5, 0.0 ], [ 0.0, 0.5, 0.0 ], [ 0.0, 0.0, 0.5 ], [ 0.0, 0.0, 0.5 ]] Args: seq_length: int, sequence length. pool_strides: Sequence of pooling strides for each layer. Returns: pooling_transforms: Sequence of pooling transforms (Tensors) for each layer. """ pooling_transforms = [] for pool_stride in pool_strides: if pool_stride == 1: pooling_transforms.append(None) else: pooled_seq_length = int(seq_length / pool_stride) if (1.0 * pool_stride).is_integer(): pfac, sl, psl = pool_stride, seq_length, pooled_seq_length transform = [ [1.0 if (i // pfac) == j else 0.0 for j in range(psl)] for i in range(sl) ] transform = ( tf.constant(transform, dtype=_get_policy_dtype()) / pool_stride ) else: transform = _create_fractional_pool_transform(seq_length, pool_stride) pooling_transforms.append(transform) seq_length = pooled_seq_length return pooling_transforms def _create_truncated_avg_masks(input_mask: tf.Tensor, pool_strides: Sequence[int], transforms: Sequence[tf.Tensor]): """Computes attention masks. For [1,1,1,0,0] Args: input_mask: Tensor of shape [batch_size, seq_length]. pool_strides: Sequence of pooling strides for each layer. transforms: Sequence of off-diagonal matrices filling with 0.0 and 1/pool_stride. Returns: attention_masks: Sequence of attention masks for each layer. """ def create_2d_mask(from_length, mask): return tf.einsum('F,BT->BFT', tf.ones([from_length], dtype=mask.dtype), mask) attention_masks = [] seq_length = tf.shape(input_mask)[-1] layer_mask = tf.cast(input_mask, dtype=_get_policy_dtype()) for pool_stride, transform in zip(pool_strides, transforms): if pool_stride == 1: attention_masks.append(create_2d_mask(seq_length, layer_mask)) else: pooled_seq_length = tf.cast( tf.cast(seq_length, tf.float32) / tf.cast(pool_stride, tf.float32), tf.int32, ) attention_masks.append(create_2d_mask(pooled_seq_length, layer_mask)) layer_mask = tf.cast( tf.einsum('BF,FT->BT', layer_mask, transform) > 0.0, dtype=layer_mask.dtype, ) seq_length = pooled_seq_length del seq_length return attention_masks @tf.keras.utils.register_keras_serializable(package='Text') class FunnelTransformerEncoder(tf.keras.layers.Layer): """Funnel Transformer-based encoder network. Funnel Transformer Implementation of https://arxiv.org/abs/2006.03236. This implementation utilizes the base framework with Bert (https://arxiv.org/abs/1810.04805). Its output is compatible with `BertEncoder`. Args: vocab_size: The size of the token vocabulary. hidden_size: The size of the transformer hidden layers. num_layers: The number of transformer layers. num_attention_heads: The number of attention heads for each transformer. The hidden size must be divisible by the number of attention heads. max_sequence_length: The maximum sequence length that this encoder can consume. If None, max_sequence_length uses the value from sequence length. This determines the variable shape for positional embeddings. type_vocab_size: The number of types that the 'type_ids' input can take. inner_dim: The output dimension of the first Dense layer in a two-layer feedforward network for each transformer. inner_activation: The activation for the first Dense layer in a two-layer feedforward network for each transformer. output_dropout: Dropout probability for the post-attention and output dropout. attention_dropout: The dropout rate to use for the attention layers within the transformer layers. pool_type: Pooling type. Choose from ['max', 'avg', 'truncated_avg']. pool_stride: An int or a list of ints. Pooling stride(s) to compress the sequence length. If set to int, each layer will have the same stride size. If set to list, the number of elements needs to match num_layers. unpool_length: Leading n tokens to be skipped from pooling. initializer: The initialzer to use for all weights in this encoder. output_range: The sequence output range, [0, output_range), by slicing the target sequence of the last transformer layer. `None` means the entire target sequence will attend to the source sequence, which yields the full output. embedding_width: The width of the word embeddings. If the embedding width is not equal to hidden size, embedding parameters will be factorized into two matrices in the shape of ['vocab_size', 'embedding_width'] and ['embedding_width', 'hidden_size'] ('embedding_width' is usually much smaller than 'hidden_size'). embedding_layer: An optional Layer instance which will be called to generate embeddings for the input word IDs. norm_first: Whether to normalize inputs to attention and intermediate dense layers. If set False, output of attention and intermediate dense layers is normalized. This does not apply to ReZero. transformer_cls: str or a keras Layer. This is the base TransformerBlock the funnel encoder relies on. share_rezero: bool. Whether to share ReZero alpha between the attention layer and the ffn layer. This option is specific to ReZero. with_dense_inputs: Whether to accept dense embeddings as the input. """ def __init__( self, vocab_size: int, hidden_size: int = 768, num_layers: int = 12, num_attention_heads: int = 12, max_sequence_length: int = 512, type_vocab_size: int = 16, inner_dim: int = 3072, inner_activation: _Activation = _approx_gelu, output_dropout: float = 0.1, attention_dropout: float = 0.1, pool_type: str = _MAX, pool_stride: Union[int, Sequence[Union[int, float]]] = 2, unpool_length: int = 0, initializer: _Initializer = tf.keras.initializers.TruncatedNormal( stddev=0.02 ), output_range: Optional[int] = None, embedding_width: Optional[int] = None, embedding_layer: Optional[tf.keras.layers.Layer] = None, norm_first: bool = False, transformer_cls: Union[ str, tf.keras.layers.Layer ] = layers.TransformerEncoderBlock, share_rezero: bool = False, append_dense_inputs: bool = False, **kwargs ): super().__init__(**kwargs) if output_range is not None: logging.warning('`output_range` is available as an argument for `call()`.' 'The `output_range` as __init__ argument is deprecated.') activation = tf.keras.activations.get(inner_activation) initializer = tf.keras.initializers.get(initializer) if embedding_width is None: embedding_width = hidden_size if embedding_layer is None: self._embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), name='word_embeddings') else: self._embedding_layer = embedding_layer self._position_embedding_layer = layers.PositionEmbedding( initializer=tf_utils.clone_initializer(initializer), max_length=max_sequence_length, name='position_embedding') self._type_embedding_layer = layers.OnDeviceEmbedding( vocab_size=type_vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), use_one_hot=True, name='type_embeddings') self._embedding_norm_layer = tf.keras.layers.LayerNormalization( name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32) self._embedding_dropout = tf.keras.layers.Dropout( rate=output_dropout, name='embedding_dropout') # We project the 'embedding' output to 'hidden_size' if it is not already # 'hidden_size'. self._embedding_projection = None if embedding_width != hidden_size: self._embedding_projection = tf.keras.layers.EinsumDense( '...x,xy->...y', output_shape=hidden_size, bias_axes='y', kernel_initializer=tf_utils.clone_initializer(initializer), name='embedding_projection') self._transformer_layers = [] self._attention_mask_layer = layers.SelfAttentionMask( name='self_attention_mask') # Will raise an error if the string is not supported. if isinstance(transformer_cls, str): transformer_cls = _str2transformer_cls[transformer_cls] self._num_layers = num_layers for i in range(num_layers): layer = transformer_cls( num_attention_heads=num_attention_heads, intermediate_size=inner_dim, inner_dim=inner_dim, intermediate_activation=inner_activation, inner_activation=inner_activation, output_dropout=output_dropout, attention_dropout=attention_dropout, norm_first=norm_first, kernel_initializer=tf_utils.clone_initializer(initializer), share_rezero=share_rezero, name='transformer/layer_%d' % i) self._transformer_layers.append(layer) self._pooler_layer = tf.keras.layers.Dense( units=hidden_size, activation='tanh', kernel_initializer=tf_utils.clone_initializer(initializer), name='pooler_transform') if isinstance(pool_stride, int): # TODO(b/197133196): Pooling layer can be shared. pool_strides = [pool_stride] * num_layers else: if len(pool_stride) != num_layers: raise ValueError('Lengths of pool_stride and num_layers are not equal.') pool_strides = pool_stride is_fractional_pooling = False in [ (1.0 * pool_stride).is_integer() for pool_stride in pool_strides ] if is_fractional_pooling and pool_type in [_MAX, _AVG]: raise ValueError( 'Fractional pooling is only supported for' ' `pool_type`=`truncated_average`' ) # TODO(crickwu): explore tf.keras.layers.serialize method. if pool_type == _MAX: pool_cls = tf.keras.layers.MaxPooling1D elif pool_type == _AVG: pool_cls = tf.keras.layers.AveragePooling1D elif pool_type == _TRUNCATED_AVG: # TODO(b/203665205): unpool_length should be implemented. if unpool_length != 0: raise ValueError('unpool_length is not supported by truncated_avg now.') else: raise ValueError('pool_type not supported.') if pool_type in (_MAX, _AVG): self._att_input_pool_layers = [] for layer_pool_stride in pool_strides: att_input_pool_layer = pool_cls( pool_size=layer_pool_stride, strides=layer_pool_stride, padding='same', name='att_input_pool_layer') self._att_input_pool_layers.append(att_input_pool_layer) self._max_sequence_length = max_sequence_length self._pool_strides = pool_strides # This is a list here. self._unpool_length = unpool_length self._pool_type = pool_type self._append_dense_inputs = append_dense_inputs self._config = { 'vocab_size': vocab_size, 'hidden_size': hidden_size, 'num_layers': num_layers, 'num_attention_heads': num_attention_heads, 'max_sequence_length': max_sequence_length, 'type_vocab_size': type_vocab_size, 'inner_dim': inner_dim, 'inner_activation': tf.keras.activations.serialize(activation), 'output_dropout': output_dropout, 'attention_dropout': attention_dropout, 'initializer': tf.keras.initializers.serialize(initializer), 'output_range': output_range, 'embedding_width': embedding_width, 'embedding_layer': embedding_layer, 'norm_first': norm_first, 'pool_type': pool_type, 'pool_stride': pool_stride, 'unpool_length': unpool_length, 'transformer_cls': _transformer_cls2str.get( transformer_cls, str(transformer_cls) ), } self.inputs = dict( input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32)) def call(self, inputs, output_range: Optional[tf.Tensor] = None): # inputs are [word_ids, mask, type_ids] word_embeddings = None if isinstance(inputs, (list, tuple)): logging.warning('List inputs to %s are discouraged.', self.__class__) if len(inputs) == 3: word_ids, mask, type_ids = inputs dense_inputs = None dense_mask = None dense_type_ids = None elif len(inputs) == 6: word_ids, mask, type_ids, dense_inputs, dense_mask, dense_type_ids = ( inputs ) else: raise ValueError( 'Unexpected inputs to %s with length at %d.' % (self.__class__, len(inputs)) ) elif isinstance(inputs, dict): word_ids = inputs.get('input_word_ids') mask = inputs.get('input_mask') type_ids = inputs.get('input_type_ids') word_embeddings = inputs.get('input_word_embeddings', None) dense_inputs = inputs.get('dense_inputs', None) dense_mask = inputs.get('dense_mask', None) dense_type_ids = inputs.get('dense_type_ids', None) else: raise ValueError('Unexpected inputs type to %s.' % self.__class__) if word_embeddings is None: word_embeddings = self._embedding_layer(word_ids) if dense_inputs is not None: # Allow concatenation of the dense embeddings at sequence end if requested # and `unpool_length`` is set as zero if self._append_dense_inputs: if self._unpool_length != 0: raise ValueError( 'unpool_length is not supported by append_dense_inputs now.' ) word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1) type_ids = tf.concat([type_ids, dense_type_ids], axis=1) mask = tf.concat([mask, dense_mask], axis=1) else: # Concat the dense embeddings at sequence begin so unpool_len can # control embedding not being pooled. word_embeddings = tf.concat([dense_inputs, word_embeddings], axis=1) type_ids = tf.concat([dense_type_ids, type_ids], axis=1) mask = tf.concat([dense_mask, mask], axis=1) # absolute position embeddings position_embeddings = self._position_embedding_layer(word_embeddings) type_embeddings = self._type_embedding_layer(type_ids) embeddings = tf.keras.layers.add( [word_embeddings, position_embeddings, type_embeddings]) embeddings = self._embedding_norm_layer(embeddings) embeddings = self._embedding_dropout(embeddings) if self._embedding_projection is not None: embeddings = self._embedding_projection(embeddings) attention_mask = self._attention_mask_layer(embeddings, mask) encoder_outputs = [] x = embeddings # TODO(b/195972228): attention_mask can be co-generated with pooling. if self._pool_type in (_MAX, _AVG): attention_mask = _pool_and_concat( attention_mask, unpool_length=self._unpool_length, strides=self._pool_strides[0], axes=[1]) for i, layer in enumerate(self._transformer_layers): # Bypass no pooling cases. if self._pool_strides[i] == 1: x = layer([x, x, attention_mask]) else: # Pools layer for compressing the query length. pooled_inputs = self._att_input_pool_layers[i]( x[:, self._unpool_length:, :]) query_inputs = tf.concat( values=(tf.cast( x[:, :self._unpool_length, :], dtype=pooled_inputs.dtype), pooled_inputs), axis=1) x = layer([query_inputs, x, attention_mask], output_range=output_range if i == self._num_layers - 1 else None) # Pools the corresponding attention_mask. if i < len(self._transformer_layers) - 1: attention_mask = _pool_and_concat( attention_mask, unpool_length=self._unpool_length, strides=[self._pool_strides[i + 1], self._pool_strides[i]], axes=[1, 2]) encoder_outputs.append(x) elif self._pool_type == _TRUNCATED_AVG: # Compute the attention masks and pooling transforms. # Note we do not compute this in __init__ due to inference converter issue # b/215659399. pooling_transforms = _create_truncated_avg_transforms( self._max_sequence_length, self._pool_strides) attention_masks = _create_truncated_avg_masks(mask, self._pool_strides, pooling_transforms) for i, layer in enumerate(self._transformer_layers): attention_mask = attention_masks[i] transformer_output_range = None if i == self._num_layers - 1: transformer_output_range = output_range # Bypass no pooling cases. if self._pool_strides[i] == 1: x = layer([x, x, attention_mask], output_range=transformer_output_range) else: pooled_inputs = tf.einsum( 'BFD,FT->BTD', tf.cast(x[:, self._unpool_length:, :], _get_policy_dtype() ), # extra casting for faster mixed computation. pooling_transforms[i]) query_inputs = tf.concat( values=(tf.cast( x[:, :self._unpool_length, :], dtype=pooled_inputs.dtype), pooled_inputs), axis=1) x = layer([query_inputs, x, attention_mask], output_range=transformer_output_range) encoder_outputs.append(x) last_encoder_output = encoder_outputs[-1] first_token_tensor = last_encoder_output[:, 0, :] pooled_output = self._pooler_layer(first_token_tensor) return dict( word_embeddings=word_embeddings, embedding_output=embeddings, sequence_output=encoder_outputs[-1], pooled_output=pooled_output, encoder_outputs=encoder_outputs) def get_embedding_table(self): return self._embedding_layer.embeddings def get_embedding_layer(self): return self._embedding_layer def get_config(self): return dict(self._config) @property def transformer_layers(self): """List of Transformer layers in the encoder.""" return self._transformer_layers @property def pooler_layer(self): """The pooler dense layer after the transformer layers.""" return self._pooler_layer @classmethod def from_config(cls, config, custom_objects=None): if 'embedding_layer' in config and config['embedding_layer'] is not None: warn_string = ( 'You are reloading a model that was saved with a ' 'potentially-shared embedding layer object. If you contine to ' 'train this model, the embedding layer will no longer be shared. ' 'To work around this, load the model outside of the Keras API.') print('WARNING: ' + warn_string) logging.warn(warn_string) return cls(**config)
23,970
37.170382
80
py
models
models-master/official/nlp/modeling/networks/albert_encoder_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for ALBERT transformer-based text encoder network.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.modeling.networks import albert_encoder class AlbertEncoderTest(tf.test.TestCase, parameterized.TestCase): def tearDown(self): super(AlbertEncoderTest, self).tearDown() tf.keras.mixed_precision.set_global_policy("float32") @parameterized.named_parameters( dict(testcase_name="default", expected_dtype=tf.float32), dict(testcase_name="with_float16_dtype", expected_dtype=tf.float16), ) def test_network_creation(self, expected_dtype): hidden_size = 32 sequence_length = 21 kwargs = dict( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3) if expected_dtype == tf.float16: tf.keras.mixed_precision.set_global_policy("mixed_float16") # Create a small TransformerEncoder for testing. test_network = albert_encoder.AlbertEncoder(**kwargs) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # If float_dtype is set to float16, the data output is float32 (from a layer # norm) and pool output should be float16. self.assertEqual(tf.float32, data.dtype) self.assertEqual(expected_dtype, pooled.dtype) # ALBERT has additonal 'embedding_hidden_mapping_in' weights and # it shares transformer weights. self.assertNotEmpty( [x for x in test_network.weights if "embedding_projection/" in x.name]) self.assertNotEmpty( [x for x in test_network.weights if "transformer/" in x.name]) self.assertEmpty( [x for x in test_network.weights if "transformer/layer" in x.name]) def test_network_invocation(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 num_layers = 3 # Create a small TransformerEncoder for testing. test_network = albert_encoder.AlbertEncoder( vocab_size=vocab_size, embedding_width=8, hidden_size=hidden_size, num_attention_heads=2, num_layers=num_layers, type_vocab_size=num_types) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) list_outputs = model.predict([word_id_data, mask_data, type_id_data]) # Creates a TransformerEncoder with max_sequence_length != sequence_length max_sequence_length = 128 test_network = albert_encoder.AlbertEncoder( vocab_size=vocab_size, embedding_width=8, hidden_size=hidden_size, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=num_layers, type_vocab_size=num_types) model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) _ = model.predict([word_id_data, mask_data, type_id_data]) # Tests dictionary outputs. test_network_dict = albert_encoder.AlbertEncoder( vocab_size=vocab_size, embedding_width=8, hidden_size=hidden_size, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=num_layers, type_vocab_size=num_types, dict_outputs=True) _ = test_network_dict([word_ids, mask, type_ids]) test_network_dict.set_weights(test_network.get_weights()) list_outputs = test_network([word_id_data, mask_data, type_id_data]) dict_outputs = test_network_dict( dict( input_word_ids=word_id_data, input_mask=mask_data, input_type_ids=type_id_data)) self.assertAllEqual(list_outputs[0], dict_outputs["sequence_output"]) self.assertAllEqual(list_outputs[1], dict_outputs["pooled_output"]) self.assertLen(dict_outputs["pooled_output"], num_layers) def test_serialize_deserialize(self): tf.keras.mixed_precision.set_global_policy("mixed_float16") # Create a network object that sets all of its config options. kwargs = dict( vocab_size=100, embedding_width=8, hidden_size=32, num_layers=3, num_attention_heads=2, max_sequence_length=21, type_vocab_size=12, intermediate_size=1223, activation="relu", dropout_rate=0.05, attention_dropout_rate=0.22, initializer="glorot_uniform") network = albert_encoder.AlbertEncoder(**kwargs) expected_config = dict(kwargs) expected_config["activation"] = tf.keras.activations.serialize( tf.keras.activations.get(expected_config["activation"])) expected_config["initializer"] = tf.keras.initializers.serialize( tf.keras.initializers.get(expected_config["initializer"])) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = ( albert_encoder.AlbertEncoder.from_config( network.get_config())) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == "__main__": tf.test.main()
7,107
38.270718
80
py
models
models-master/official/nlp/modeling/networks/span_labeling.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Span labeling network.""" # pylint: disable=g-classes-have-attributes import collections import tensorflow as tf from official.modeling import tf_utils def _apply_paragraph_mask(logits, paragraph_mask): """Applies a position mask to calculated logits.""" masked_logits = logits * (paragraph_mask) - 1e30 * (1 - paragraph_mask) return tf.nn.log_softmax(masked_logits, -1), masked_logits @tf.keras.utils.register_keras_serializable(package='Text') class SpanLabeling(tf.keras.Model): """Span labeling network head for BERT modeling. This network implements a simple single-span labeler based on a dense layer. *Note* that the network is constructed by [Keras Functional API](https://keras.io/guides/functional_api/). Args: input_width: The innermost dimension of the input tensor to this network. activation: The activation, if any, for the dense layer in this network. initializer: The initializer for the dense layer in this network. Defaults to a Glorot uniform initializer. output: The output style for this network. Can be either `logits` or `predictions`. """ def __init__(self, input_width, activation=None, initializer='glorot_uniform', output='logits', **kwargs): sequence_data = tf.keras.layers.Input( shape=(None, input_width), name='sequence_data', dtype=tf.float32) intermediate_logits = tf.keras.layers.Dense( 2, # This layer predicts start location and end location. activation=activation, kernel_initializer=initializer, name='predictions/transform/logits')( sequence_data) start_logits, end_logits = self._split_output_tensor(intermediate_logits) start_predictions = tf.keras.layers.Activation(tf.nn.log_softmax)( start_logits) end_predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(end_logits) if output == 'logits': output_tensors = [start_logits, end_logits] elif output == 'predictions': output_tensors = [start_predictions, end_predictions] else: raise ValueError( ('Unknown `output` value "%s". `output` can be either "logits" or ' '"predictions"') % output) # b/164516224 # Once we've created the network using the Functional API, we call # super().__init__ as though we were invoking the Functional API Model # constructor, resulting in this object having all the properties of a model # created using the Functional API. Once super().__init__ is called, we # can assign attributes to `self` - note that all `self` assignments are # below this line. super().__init__( inputs=[sequence_data], outputs=output_tensors, **kwargs) config_dict = { 'input_width': input_width, 'activation': activation, 'initializer': initializer, 'output': output, } # We are storing the config dict as a namedtuple here to ensure checkpoint # compatibility with an earlier version of this model which did not track # the config dict attribute. TF does not track immutable attrs which # do not contain Trackables, so by creating a config namedtuple instead of # a dict we avoid tracking it. config_cls = collections.namedtuple('Config', config_dict.keys()) self._config = config_cls(**config_dict) self.start_logits = start_logits self.end_logits = end_logits def _split_output_tensor(self, tensor): transposed_tensor = tf.transpose(tensor, [2, 0, 1]) return tf.unstack(transposed_tensor) def get_config(self): return dict(self._config._asdict()) @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) class XLNetSpanLabeling(tf.keras.layers.Layer): """Span labeling network head for XLNet on SQuAD2.0. This networks implements a span-labeler based on dense layers and question possibility classification. This is the complex version seen in the original XLNet implementation. This applies a dense layer to the input sequence data to predict the start positions, and then uses either the true start positions (if training) or beam search to predict the end positions. **Note: `compute_with_beam_search` will not work with the Functional API (https://www.tensorflow.org/guide/keras/functional). Args: input_width: The innermost dimension of the input tensor to this network. start_n_top: Beam size for span start. end_n_top: Beam size for span end. activation: The activation, if any, for the dense layer in this network. dropout_rate: The dropout rate used for answer classification. initializer: The initializer for the dense layer in this network. Defaults to a Glorot uniform initializer. """ def __init__(self, input_width, start_n_top=5, end_n_top=5, activation='tanh', dropout_rate=0., initializer='glorot_uniform', **kwargs): super().__init__(**kwargs) self._config = { 'input_width': input_width, 'activation': activation, 'initializer': initializer, 'start_n_top': start_n_top, 'end_n_top': end_n_top, 'dropout_rate': dropout_rate, } if start_n_top <= 1: raise ValueError('`start_n_top` must be greater than 1.') self._start_n_top = start_n_top self._end_n_top = end_n_top self.start_logits_dense = tf.keras.layers.Dense( units=1, kernel_initializer=tf_utils.clone_initializer(initializer), name='predictions/transform/start_logits') self.end_logits_inner_dense = tf.keras.layers.Dense( units=input_width, kernel_initializer=tf_utils.clone_initializer(initializer), activation=activation, name='predictions/transform/end_logits/inner') self.end_logits_layer_norm = tf.keras.layers.LayerNormalization( axis=-1, epsilon=1e-12, name='predictions/transform/end_logits/layernorm') self.end_logits_output_dense = tf.keras.layers.Dense( units=1, kernel_initializer=tf_utils.clone_initializer(initializer), name='predictions/transform/end_logits/output') self.answer_logits_inner = tf.keras.layers.Dense( units=input_width, kernel_initializer=tf_utils.clone_initializer(initializer), activation=activation, name='predictions/transform/answer_logits/inner') self.answer_logits_dropout = tf.keras.layers.Dropout(rate=dropout_rate) self.answer_logits_output = tf.keras.layers.Dense( units=1, kernel_initializer=tf_utils.clone_initializer(initializer), use_bias=False, name='predictions/transform/answer_logits/output') def end_logits(self, inputs): """Computes the end logits. Input shapes into the inner, layer norm, output layers should match. During training, inputs shape should be [batch_size, seq_length, input_width]. During inference, input shapes should be [batch_size, seq_length, start_n_top, input_width]. Args: inputs: The input for end logits. Returns: Calculated end logits. """ if len(tf.shape(inputs)) == 3: # inputs: [B, S, H] -> [B, S, 1, H] inputs = tf.expand_dims(inputs, axis=2) end_logits = self.end_logits_inner_dense(inputs) end_logits = self.end_logits_layer_norm(end_logits) end_logits = self.end_logits_output_dense(end_logits) end_logits = tf.squeeze(end_logits) return end_logits def call(self, sequence_data, class_index, paragraph_mask=None, start_positions=None, training=False): """Implements call(). Einsum glossary: - b: the batch size. - l: the sequence length. - h: the hidden size, or input width. - k: the start/end top n. Args: sequence_data: The input sequence data of shape `(batch_size, seq_length, input_width)`. class_index: The class indices of the inputs of shape `(batch_size,)`. paragraph_mask: Invalid position mask such as query and special symbols (e.g. PAD, SEP, CLS) of shape `(batch_size,)`. start_positions: The start positions of each example of shape `(batch_size,)`. training: Whether or not this is the training phase. Returns: A dictionary with the keys `start_predictions`, `end_predictions`, `start_logits`, `end_logits`. If inference, then `start_top_predictions`, `start_top_index`, `end_top_predictions`, `end_top_index` are also included. """ paragraph_mask = tf.cast(paragraph_mask, dtype=sequence_data.dtype) class_index = tf.reshape(class_index, [-1]) seq_length = tf.shape(sequence_data)[1] start_logits = self.start_logits_dense(sequence_data) start_logits = tf.squeeze(start_logits, -1) start_predictions, masked_start_logits = _apply_paragraph_mask( start_logits, paragraph_mask) compute_with_beam_search = not training or start_positions is None if compute_with_beam_search: # Compute end logits using beam search. start_top_predictions, start_top_index = tf.nn.top_k( start_predictions, k=self._start_n_top) start_index = tf.one_hot( start_top_index, depth=seq_length, axis=-1, dtype=tf.float32) # start_index: [batch_size, end_n_top, seq_length] start_features = tf.einsum('blh,bkl->bkh', sequence_data, start_index) start_features = tf.tile(start_features[:, None, :, :], [1, seq_length, 1, 1]) # start_features: [batch_size, seq_length, end_n_top, input_width] end_input = tf.tile(sequence_data[:, :, None], [1, 1, self._start_n_top, 1]) end_input = tf.concat([end_input, start_features], axis=-1) # end_input: [batch_size, seq_length, end_n_top, 2*input_width] paragraph_mask = paragraph_mask[:, None, :] end_logits = self.end_logits(end_input) # Note: this will fail if start_n_top is not >= 1. end_logits = tf.transpose(end_logits, [0, 2, 1]) else: start_positions = tf.reshape(start_positions, [-1]) start_index = tf.one_hot( start_positions, depth=seq_length, axis=-1, dtype=tf.float32) # start_index: [batch_size, seq_length] start_features = tf.einsum('blh,bl->bh', sequence_data, start_index) start_features = tf.tile(start_features[:, None, :], [1, seq_length, 1]) # start_features: [batch_size, seq_length, input_width] end_input = tf.concat([sequence_data, start_features], axis=-1) # end_input: [batch_size, seq_length, 2*input_width] end_logits = self.end_logits(end_input) end_predictions, masked_end_logits = _apply_paragraph_mask( end_logits, paragraph_mask) output_dict = dict( start_predictions=start_predictions, end_predictions=end_predictions, start_logits=masked_start_logits, end_logits=masked_end_logits) if not training: end_top_predictions, end_top_index = tf.nn.top_k( end_predictions, k=self._end_n_top) end_top_predictions = tf.reshape( end_top_predictions, [-1, self._start_n_top * self._end_n_top]) end_top_index = tf.reshape( end_top_index, [-1, self._start_n_top * self._end_n_top]) output_dict['start_top_predictions'] = start_top_predictions output_dict['start_top_index'] = start_top_index output_dict['end_top_predictions'] = end_top_predictions output_dict['end_top_index'] = end_top_index # get the representation of CLS class_index = tf.one_hot(class_index, seq_length, axis=-1, dtype=tf.float32) class_feature = tf.einsum('blh,bl->bh', sequence_data, class_index) # get the representation of START start_p = tf.nn.softmax(masked_start_logits, axis=-1) start_feature = tf.einsum('blh,bl->bh', sequence_data, start_p) answer_feature = tf.concat([start_feature, class_feature], -1) answer_feature = self.answer_logits_inner(answer_feature) answer_feature = self.answer_logits_dropout(answer_feature) class_logits = self.answer_logits_output(answer_feature) class_logits = tf.squeeze(class_logits, -1) output_dict['class_logits'] = class_logits return output_dict def get_config(self): return self._config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
13,195
37.697947
80
py
models
models-master/official/nlp/modeling/networks/encoder_scaffold_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for EncoderScaffold network.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from official.modeling import activations from official.nlp.modeling import layers from official.nlp.modeling.networks import encoder_scaffold # Test class that wraps a standard transformer layer. If this layer is called # at any point, the list passed to the config object will be filled with a # boolean 'True'. We register this class as a Keras serializable so we can # test serialization below. @tf.keras.utils.register_keras_serializable(package="TestOnly") class ValidatedTransformerLayer(layers.Transformer): def __init__(self, call_list, call_class=None, **kwargs): super(ValidatedTransformerLayer, self).__init__(**kwargs) self.list = call_list self.call_class = call_class def call(self, inputs): self.list.append(True) return super(ValidatedTransformerLayer, self).call(inputs) def get_config(self): config = super(ValidatedTransformerLayer, self).get_config() config["call_list"] = self.list config["call_class"] = tf.keras.utils.get_registered_name(self.call_class) return config # Test class that wraps a standard self attention mask layer. # If this layer is called at any point, the list passed to the config # object will be filled with a # boolean 'True'. We register this class as a Keras serializable so we can # test serialization below. @tf.keras.utils.register_keras_serializable(package="TestOnly") class ValidatedMaskLayer(layers.SelfAttentionMask): def __init__(self, call_list, call_class=None, **kwargs): super(ValidatedMaskLayer, self).__init__(**kwargs) self.list = call_list self.call_class = call_class def call(self, inputs, mask): self.list.append(True) return super(ValidatedMaskLayer, self).call(inputs, mask) def get_config(self): config = super(ValidatedMaskLayer, self).get_config() config["call_list"] = self.list config["call_class"] = tf.keras.utils.get_registered_name(self.call_class) return config @tf.keras.utils.register_keras_serializable(package="TestLayerOnly") class TestLayer(tf.keras.layers.Layer): pass class EncoderScaffoldLayerClassTest(tf.test.TestCase, parameterized.TestCase): def tearDown(self): super(EncoderScaffoldLayerClassTest, self).tearDown() tf.keras.mixed_precision.set_global_policy("float32") @parameterized.named_parameters( dict(testcase_name="only_final_output", return_all_layer_outputs=False), dict(testcase_name="all_layer_outputs", return_all_layer_outputs=True)) def test_network_creation(self, return_all_layer_outputs): hidden_size = 32 sequence_length = 21 num_hidden_instances = 3 embedding_cfg = { "vocab_size": 100, "type_vocab_size": 16, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } call_list = [] hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "call_list": call_list } mask_call_list = [] mask_cfg = { "call_list": mask_call_list } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=num_hidden_instances, pooled_output_dim=hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cls=ValidatedTransformerLayer, hidden_cfg=hidden_cfg, mask_cls=ValidatedMaskLayer, mask_cfg=mask_cfg, embedding_cfg=embedding_cfg, layer_norm_before_pooling=True, return_all_layer_outputs=return_all_layer_outputs) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) output_data, pooled = test_network([word_ids, mask, type_ids]) if return_all_layer_outputs: self.assertIsInstance(output_data, list) self.assertLen(output_data, num_hidden_instances) data = output_data[-1] else: data = output_data self.assertIsInstance(test_network.hidden_layers, list) self.assertLen(test_network.hidden_layers, num_hidden_instances) self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float32, pooled.dtype) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") self.assertTrue(hasattr(test_network, "_output_layer_norm")) def test_network_creation_with_float16_dtype(self): tf.keras.mixed_precision.set_global_policy("mixed_float16") hidden_size = 32 sequence_length = 21 embedding_cfg = { "vocab_size": 100, "type_vocab_size": 16, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, pooled_output_dim=hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cfg=embedding_cfg) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # If float_dtype is set to float16, the data output is float32 (from a layer # norm) and pool output should be float16. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float16, pooled.dtype) def test_network_invocation(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 embedding_cfg = { "vocab_size": vocab_size, "type_vocab_size": num_types, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, pooled_output_dim=hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cfg=embedding_cfg, dict_outputs=True) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) outputs = test_network([word_ids, mask, type_ids]) # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], outputs) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) preds = model.predict([word_id_data, mask_data, type_id_data]) self.assertEqual(preds["pooled_output"].shape, (3, hidden_size)) # Creates a EncoderScaffold with max_sequence_length != sequence_length num_types = 7 embedding_cfg = { "vocab_size": vocab_size, "type_vocab_size": num_types, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length * 2, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, pooled_output_dim=hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cfg=embedding_cfg) outputs = test_network([word_ids, mask, type_ids]) model = tf.keras.Model([word_ids, mask, type_ids], outputs) _ = model.predict([word_id_data, mask_data, type_id_data]) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. hidden_size = 32 sequence_length = 21 embedding_cfg = { "vocab_size": 100, "type_vocab_size": 16, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), } # Create a small EncoderScaffold for testing. network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, pooled_output_dim=hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cfg=embedding_cfg) # Create another network object from the first object's config. new_network = encoder_scaffold.EncoderScaffold.from_config( network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) class Embeddings(tf.keras.Model): def __init__(self, vocab_size, hidden_size): super().__init__() self.inputs = [ tf.keras.layers.Input( shape=(None,), dtype=tf.int32, name="input_word_ids"), tf.keras.layers.Input(shape=(None,), dtype=tf.int32, name="input_mask") ] self.attention_mask = layers.SelfAttentionMask() self.embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=hidden_size, initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), name="word_embeddings") def call(self, inputs): word_ids, mask = inputs word_embeddings = self.embedding_layer(word_ids) return word_embeddings, self.attention_mask([word_embeddings, mask]) class EncoderScaffoldEmbeddingNetworkTest(tf.test.TestCase): def test_network_invocation(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 # Build an embedding network to swap in for the default network. This one # will have 2 inputs (mask and word_ids) instead of 3, and won't use # positional embeddings. network = Embeddings(vocab_size, hidden_size) hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, pooled_output_dim=hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cls=network) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask]) # Create a model based off of this network: model = tf.keras.Model([word_ids, mask], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) _ = model.predict([word_id_data, mask_data]) def test_serialize_deserialize(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 # Build an embedding network to swap in for the default network. This one # will have 2 inputs (mask and word_ids) instead of 3, and won't use # positional embeddings. word_ids = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name="input_word_ids") mask = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name="input_mask") embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=hidden_size, initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), name="word_embeddings") word_embeddings = embedding_layer(word_ids) attention_mask = layers.SelfAttentionMask()([word_embeddings, mask]) network = tf.keras.Model([word_ids, mask], [word_embeddings, attention_mask]) hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, pooled_output_dim=hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cls=network, embedding_data=embedding_layer.embeddings) # Create another network object from the first object's config. new_network = encoder_scaffold.EncoderScaffold.from_config( test_network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(test_network.get_config(), new_network.get_config()) # Create a model based off of the old and new networks: word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = new_network([word_ids, mask]) new_model = tf.keras.Model([word_ids, mask], [data, pooled]) data, pooled = test_network([word_ids, mask]) model = tf.keras.Model([word_ids, mask], [data, pooled]) # Copy the weights between models. new_model.set_weights(model.get_weights()) # Invoke the models. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) data, cls = model.predict([word_id_data, mask_data]) new_data, new_cls = new_model.predict([word_id_data, mask_data]) # The output should be equal. self.assertAllEqual(data, new_data) self.assertAllEqual(cls, new_cls) # We should not be able to get a reference to the embedding data. with self.assertRaisesRegex(RuntimeError, ".*does not have a reference.*"): new_network.get_embedding_table() class EncoderScaffoldHiddenInstanceTest( tf.test.TestCase, parameterized.TestCase): def test_network_invocation(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 embedding_cfg = { "vocab_size": vocab_size, "type_vocab_size": num_types, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } call_list = [] hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "call_list": call_list } mask_call_list = [] mask_cfg = { "call_list": mask_call_list } # Create a small EncoderScaffold for testing. This time, we pass an already- # instantiated layer object. xformer = ValidatedTransformerLayer(**hidden_cfg) xmask = ValidatedMaskLayer(**mask_cfg) test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, pooled_output_dim=hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cls=xformer, mask_cls=xmask, embedding_cfg=embedding_cfg) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) _ = model.predict([word_id_data, mask_data, type_id_data]) # If call_list[0] exists and is True, the passed layer class was # called as part of the graph creation. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") def test_hidden_cls_list(self): hidden_size = 32 sequence_length = 10 vocab_size = 57 embedding_network = Embeddings(vocab_size, hidden_size) call_list = [] hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "call_list": call_list } mask_call_list = [] mask_cfg = { "call_list": mask_call_list } # Create a small EncoderScaffold for testing. This time, we pass an already- # instantiated layer object. xformer = ValidatedTransformerLayer(**hidden_cfg) xmask = ValidatedMaskLayer(**mask_cfg) test_network_a = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, pooled_output_dim=hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cls=xformer, mask_cls=xmask, embedding_cls=embedding_network) # Create a network b with same embedding and hidden layers as network a. test_network_b = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, pooled_output_dim=hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), mask_cls=xmask, embedding_cls=test_network_a.embedding_network, hidden_cls=test_network_a.hidden_layers) # Create a network c with same embedding but fewer hidden layers compared to # network a and b. hidden_layers = test_network_a.hidden_layers hidden_layers.pop() test_network_c = encoder_scaffold.EncoderScaffold( num_hidden_instances=2, pooled_output_dim=hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), mask_cls=xmask, embedding_cls=test_network_a.embedding_network, hidden_cls=hidden_layers) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) # Create model based off of network a: data_a, pooled_a = test_network_a([word_ids, mask]) model_a = tf.keras.Model([word_ids, mask], [data_a, pooled_a]) # Create model based off of network b: data_b, pooled_b = test_network_b([word_ids, mask]) model_b = tf.keras.Model([word_ids, mask], [data_b, pooled_b]) # Create model based off of network b: data_c, pooled_c = test_network_c([word_ids, mask]) model_c = tf.keras.Model([word_ids, mask], [data_c, pooled_c]) batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) output_a, _ = model_a.predict([word_id_data, mask_data]) output_b, _ = model_b.predict([word_id_data, mask_data]) output_c, _ = model_c.predict([word_id_data, mask_data]) # Outputs from model a and b should be the same since they share the same # embedding and hidden layers. self.assertAllEqual(output_a, output_b) # Outputs from model a and c shouldn't be the same since they share the same # embedding layer but different number of hidden layers. self.assertNotAllEqual(output_a, output_c) @parameterized.parameters(True, False) def test_serialize_deserialize(self, use_hidden_cls_instance): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 embedding_cfg = { "vocab_size": vocab_size, "type_vocab_size": num_types, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } call_list = [] hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "call_list": call_list, "call_class": TestLayer } mask_call_list = [] mask_cfg = {"call_list": mask_call_list, "call_class": TestLayer} # Create a small EncoderScaffold for testing. This time, we pass an already- # instantiated layer object. kwargs = dict( num_hidden_instances=3, pooled_output_dim=hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), embedding_cfg=embedding_cfg) if use_hidden_cls_instance: xformer = ValidatedTransformerLayer(**hidden_cfg) xmask = ValidatedMaskLayer(**mask_cfg) test_network = encoder_scaffold.EncoderScaffold( hidden_cls=xformer, mask_cls=xmask, **kwargs) else: test_network = encoder_scaffold.EncoderScaffold( hidden_cls=ValidatedTransformerLayer, hidden_cfg=hidden_cfg, mask_cls=ValidatedMaskLayer, mask_cfg=mask_cfg, **kwargs) # Create another network object from the first object's config. new_network = encoder_scaffold.EncoderScaffold.from_config( test_network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(test_network.get_config(), new_network.get_config()) # Create a model based off of the old and new networks: word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = new_network([word_ids, mask, type_ids]) new_model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) data, pooled = test_network([word_ids, mask, type_ids]) model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Copy the weights between models. new_model.set_weights(model.get_weights()) # Invoke the models. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) data, cls = model.predict([word_id_data, mask_data, type_id_data]) new_data, new_cls = new_model.predict( [word_id_data, mask_data, type_id_data]) # The output should be equal. self.assertAllEqual(data, new_data) self.assertAllEqual(cls, new_cls) if __name__ == "__main__": tf.test.main()
29,056
35.503769
80
py
models
models-master/official/nlp/modeling/networks/bert_dense_encoder_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for transformer-based bert encoder network with dense features as inputs.""" # Import libraries from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.modeling.networks import bert_encoder class BertEncoderV2Test(tf.test.TestCase, parameterized.TestCase): def tearDown(self): super(BertEncoderV2Test, self).tearDown() tf.keras.mixed_precision.set_global_policy("float32") def test_dict_outputs_network_creation(self): hidden_size = 32 sequence_length = 21 dense_sequence_length = 20 # Create a small dense BertEncoderV2 for testing. kwargs = {} test_network = bert_encoder.BertEncoderV2( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, with_dense_inputs=True, **kwargs) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dense_inputs = tf.keras.Input( shape=(dense_sequence_length, hidden_size), dtype=tf.float32) dense_mask = tf.keras.Input(shape=(dense_sequence_length,), dtype=tf.int32) dense_type_ids = tf.keras.Input( shape=(dense_sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict( input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids, dense_inputs=dense_inputs, dense_mask=dense_mask, dense_type_ids=dense_type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] self.assertIsInstance(test_network.transformer_layers, list) self.assertLen(test_network.transformer_layers, 3) self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense) expected_data_shape = [ None, sequence_length + dense_sequence_length, hidden_size ] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float32, pooled.dtype) def test_dict_outputs_all_encoder_outputs_network_creation(self): hidden_size = 32 sequence_length = 21 dense_sequence_length = 20 # Create a small BertEncoder for testing. test_network = bert_encoder.BertEncoderV2( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, dict_outputs=True, with_dense_inputs=True) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dense_inputs = tf.keras.Input( shape=(dense_sequence_length, hidden_size), dtype=tf.float32) dense_mask = tf.keras.Input(shape=(dense_sequence_length,), dtype=tf.int32) dense_type_ids = tf.keras.Input( shape=(dense_sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict( input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids, dense_inputs=dense_inputs, dense_mask=dense_mask, dense_type_ids=dense_type_ids)) all_encoder_outputs = dict_outputs["encoder_outputs"] pooled = dict_outputs["pooled_output"] expected_data_shape = [ None, sequence_length + dense_sequence_length, hidden_size ] expected_pooled_shape = [None, hidden_size] self.assertLen(all_encoder_outputs, 3) for data in all_encoder_outputs: self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype) self.assertAllEqual(tf.float32, pooled.dtype) def test_dict_outputs_network_creation_with_float16_dtype(self): hidden_size = 32 sequence_length = 21 dense_sequence_length = 20 tf.keras.mixed_precision.set_global_policy("mixed_float16") # Create a small BertEncoder for testing. test_network = bert_encoder.BertEncoderV2( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, dict_outputs=True, with_dense_inputs=True) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dense_inputs = tf.keras.Input( shape=(dense_sequence_length, hidden_size), dtype=tf.float32) dense_mask = tf.keras.Input(shape=(dense_sequence_length,), dtype=tf.int32) dense_type_ids = tf.keras.Input( shape=(dense_sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict( input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids, dense_inputs=dense_inputs, dense_mask=dense_mask, dense_type_ids=dense_type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] expected_data_shape = [ None, sequence_length + dense_sequence_length, hidden_size ] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # If float_dtype is set to float16, the data output is float32 (from a layer # norm) and pool output should be float16. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float16, pooled.dtype) @parameterized.named_parameters( ("all_sequence_encoder_v2", bert_encoder.BertEncoderV2, None, 41), ("output_range_encoder_v2", bert_encoder.BertEncoderV2, 1, 1), ) def test_dict_outputs_network_invocation( self, encoder_cls, output_range, out_seq_len): hidden_size = 32 sequence_length = 21 dense_sequence_length = 20 vocab_size = 57 num_types = 7 # Create a small BertEncoder for testing. test_network = encoder_cls( vocab_size=vocab_size, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, dict_outputs=True, with_dense_inputs=True, output_range=output_range) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dense_inputs = tf.keras.Input( shape=(dense_sequence_length, hidden_size), dtype=tf.float32) dense_mask = tf.keras.Input(shape=(dense_sequence_length,), dtype=tf.int32) dense_type_ids = tf.keras.Input( shape=(dense_sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict( input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids, dense_inputs=dense_inputs, dense_mask=dense_mask, dense_type_ids=dense_type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] # Create a model based off of this network: model = tf.keras.Model( [word_ids, mask, type_ids, dense_inputs, dense_mask, dense_type_ids], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) dense_input_data = np.random.rand(batch_size, dense_sequence_length, hidden_size) dense_mask_data = np.random.randint( 2, size=(batch_size, dense_sequence_length)) dense_type_ids_data = np.random.randint( num_types, size=(batch_size, dense_sequence_length)) outputs = model.predict([ word_id_data, mask_data, type_id_data, dense_input_data, dense_mask_data, dense_type_ids_data ]) self.assertEqual(outputs[0].shape[1], out_seq_len) # Creates a BertEncoder with max_sequence_length != sequence_length max_sequence_length = 128 test_network = encoder_cls( vocab_size=vocab_size, hidden_size=hidden_size, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, dict_outputs=True) dict_outputs = test_network( dict( input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids, dense_inputs=dense_inputs, dense_mask=dense_mask, dense_type_ids=dense_type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] model = tf.keras.Model( [word_ids, mask, type_ids, dense_inputs, dense_mask, dense_type_ids], [data, pooled]) outputs = model.predict([ word_id_data, mask_data, type_id_data, dense_input_data, dense_mask_data, dense_type_ids_data ]) self.assertEqual(outputs[0].shape[1], sequence_length + dense_sequence_length) # Creates a BertEncoder with embedding_width != hidden_size embedding_width = 16 test_network = bert_encoder.BertEncoderV2( vocab_size=vocab_size, hidden_size=hidden_size, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, embedding_width=embedding_width, dict_outputs=True) dense_inputs = tf.keras.Input( shape=(dense_sequence_length, embedding_width), dtype=tf.float32) dense_input_data = np.zeros( (batch_size, dense_sequence_length, embedding_width), dtype=float) dict_outputs = test_network( dict( input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids, dense_inputs=dense_inputs, dense_mask=dense_mask, dense_type_ids=dense_type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] model = tf.keras.Model( [word_ids, mask, type_ids, dense_inputs, dense_mask, dense_type_ids], [data, pooled]) outputs = model.predict([ word_id_data, mask_data, type_id_data, dense_input_data, dense_mask_data, dense_type_ids_data ]) self.assertEqual(outputs[0].shape[-1], hidden_size) self.assertTrue(hasattr(test_network, "_embedding_projection")) def test_embeddings_as_inputs(self): hidden_size = 32 sequence_length = 21 dense_sequence_length = 20 # Create a small BertEncoder for testing. test_network = bert_encoder.BertEncoderV2( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, num_layers=3, with_dense_inputs=True) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dense_inputs = tf.keras.Input( shape=(dense_sequence_length, hidden_size), dtype=tf.float32) dense_mask = tf.keras.Input(shape=(dense_sequence_length,), dtype=tf.int32) dense_type_ids = tf.keras.Input( shape=(dense_sequence_length,), dtype=tf.int32) test_network.build( dict( input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids, dense_inputs=dense_inputs, dense_mask=dense_mask, dense_type_ids=dense_type_ids)) embeddings = test_network.get_embedding_layer()(word_ids) # Calls with the embeddings. dict_outputs = test_network( dict( input_word_embeddings=embeddings, input_mask=mask, input_type_ids=type_ids, dense_inputs=dense_inputs, dense_mask=dense_mask, dense_type_ids=dense_type_ids)) all_encoder_outputs = dict_outputs["encoder_outputs"] pooled = dict_outputs["pooled_output"] expected_data_shape = [ None, sequence_length + dense_sequence_length, hidden_size ] expected_pooled_shape = [None, hidden_size] self.assertLen(all_encoder_outputs, 3) for data in all_encoder_outputs: self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype) self.assertAllEqual(tf.float32, pooled.dtype) if __name__ == "__main__": tf.test.main()
14,229
37.150134
85
py
models
models-master/official/nlp/modeling/networks/encoder_scaffold.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Transformer-based text encoder network.""" # pylint: disable=g-classes-have-attributes import copy import inspect from absl import logging import gin import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling import layers @tf.keras.utils.register_keras_serializable(package='Text') @gin.configurable class EncoderScaffold(tf.keras.Model): """Bi-directional Transformer-based encoder network scaffold. This network allows users to flexibly implement an encoder similar to the one described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). In this network, users can choose to provide a custom embedding subnetwork (which will replace the standard embedding logic) and/or a custom hidden layer class (which will replace the Transformer instantiation in the encoder). For each of these custom injection points, users can pass either a class or a class instance. If a class is passed, that class will be instantiated using the `embedding_cfg` or `hidden_cfg` argument, respectively; if an instance is passed, that instance will be invoked. (In the case of hidden_cls, the instance will be invoked 'num_hidden_instances' times. If the hidden_cls is not overridden, a default transformer layer will be instantiated. *Note* that the network is constructed by [Keras Functional API](https://keras.io/guides/functional_api/). Args: pooled_output_dim: The dimension of pooled output. pooler_layer_initializer: The initializer for the classification layer. embedding_cls: The class or instance to use to embed the input data. This class or instance defines the inputs to this encoder and outputs (1) embeddings tensor with shape `(batch_size, seq_length, hidden_size)` and (2) attention masking with tensor `(batch_size, seq_length, seq_length)`. If `embedding_cls` is not set, a default embedding network (from the original BERT paper) will be created. embedding_cfg: A dict of kwargs to pass to the embedding_cls, if it needs to be instantiated. If `embedding_cls` is not set, a config dict must be passed to `embedding_cfg` with the following values: `vocab_size`: The size of the token vocabulary. `type_vocab_size`: The size of the type vocabulary. `hidden_size`: The hidden size for this encoder. `max_seq_length`: The maximum sequence length for this encoder. `seq_length`: The sequence length for this encoder. `initializer`: The initializer for the embedding portion of this encoder. `dropout_rate`: The dropout rate to apply before the encoding layers. embedding_data: A reference to the embedding weights that will be used to train the masked language model, if necessary. This is optional, and only needed if (1) you are overriding `embedding_cls` and (2) are doing standard pretraining. num_hidden_instances: The number of times to instantiate and/or invoke the hidden_cls. hidden_cls: Three types of input are supported: (1) class (2) instance (3) list of classes or instances, to encode the input data. If `hidden_cls` is not set, a KerasBERT transformer layer will be used as the encoder class. If `hidden_cls` is a list of classes or instances, these classes (instances) are sequentially instantiated (invoked) on top of embedding layer. Mixing classes and instances in the list is allowed. hidden_cfg: A dict of kwargs to pass to the hidden_cls, if it needs to be instantiated. If hidden_cls is not set, a config dict must be passed to `hidden_cfg` with the following values: `num_attention_heads`: The number of attention heads. The hidden size must be divisible by `num_attention_heads`. `intermediate_size`: The intermediate size of the transformer. `intermediate_activation`: The activation to apply in the transfomer. `dropout_rate`: The overall dropout rate for the transformer layers. `attention_dropout_rate`: The dropout rate for the attention layers. `kernel_initializer`: The initializer for the transformer layers. mask_cls: The class to generate masks passed into hidden_cls() from inputs and 2D mask indicating positions we can attend to. It is the caller's job to make sure the output of the mask_layer can be used by hidden_layer. A mask_cls is usually mapped to a hidden_cls. mask_cfg: A dict of kwargs pass to mask_cls. layer_norm_before_pooling: Whether to add a layer norm before the pooling layer. You probably want to turn this on if you set `norm_first=True` in transformer layers. return_all_layer_outputs: Whether to output sequence embedding outputs of all encoder transformer layers. dict_outputs: Whether to use a dictionary as the model outputs. layer_idx_as_attention_seed: Whether to include layer_idx in attention_cfg in hidden_cfg. feed_layer_idx: whether the scaffold should feed layer index to hidden_cls. recursive: whether to pass the second return of the hidden layer as the last element among the inputs. None will be passed as the initial state. """ def __init__(self, pooled_output_dim, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), embedding_cls=None, embedding_cfg=None, embedding_data=None, num_hidden_instances=1, hidden_cls=layers.Transformer, hidden_cfg=None, mask_cls=layers.SelfAttentionMask, mask_cfg=None, layer_norm_before_pooling=False, return_all_layer_outputs=False, dict_outputs=False, layer_idx_as_attention_seed=False, feed_layer_idx=False, recursive=False, **kwargs): if embedding_cls: if inspect.isclass(embedding_cls): embedding_network = embedding_cls( **embedding_cfg) if embedding_cfg else embedding_cls() else: embedding_network = embedding_cls inputs = embedding_network.inputs embeddings, attention_mask = embedding_network(inputs) embedding_layer = None position_embedding_layer = None type_embedding_layer = None embedding_norm_layer = None else: embedding_network = None seq_length = embedding_cfg.get('seq_length', None) word_ids = tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='input_word_ids') mask = tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='input_mask') type_ids = tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='input_type_ids') inputs = [word_ids, mask, type_ids] embedding_layer = layers.OnDeviceEmbedding( vocab_size=embedding_cfg['vocab_size'], embedding_width=embedding_cfg['hidden_size'], initializer=tf_utils.clone_initializer(embedding_cfg['initializer']), name='word_embeddings') word_embeddings = embedding_layer(word_ids) # Always uses dynamic slicing for simplicity. position_embedding_layer = layers.PositionEmbedding( initializer=tf_utils.clone_initializer(embedding_cfg['initializer']), max_length=embedding_cfg['max_seq_length'], name='position_embedding') position_embeddings = position_embedding_layer(word_embeddings) type_embedding_layer = layers.OnDeviceEmbedding( vocab_size=embedding_cfg['type_vocab_size'], embedding_width=embedding_cfg['hidden_size'], initializer=tf_utils.clone_initializer(embedding_cfg['initializer']), use_one_hot=True, name='type_embeddings') type_embeddings = type_embedding_layer(type_ids) embeddings = tf.keras.layers.Add()( [word_embeddings, position_embeddings, type_embeddings]) embedding_norm_layer = tf.keras.layers.LayerNormalization( name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32) embeddings = embedding_norm_layer(embeddings) embeddings = ( tf.keras.layers.Dropout( rate=embedding_cfg['dropout_rate'])(embeddings)) mask_cfg = {} if mask_cfg is None else mask_cfg if inspect.isclass(mask_cls): mask_layer = mask_cls(**mask_cfg) else: mask_layer = mask_cls attention_mask = mask_layer(embeddings, mask) data = embeddings layer_output_data = [] hidden_layers = [] hidden_cfg = hidden_cfg if hidden_cfg else {} if isinstance(hidden_cls, list) and len(hidden_cls) != num_hidden_instances: raise RuntimeError( ('When input hidden_cls to EncoderScaffold %s is a list, it must ' 'contain classes or instances with size specified by ' 'num_hidden_instances, got %d vs %d.') % self.name, len(hidden_cls), num_hidden_instances) # Consider supporting customized init states. recursive_states = None for i in range(num_hidden_instances): if isinstance(hidden_cls, list): cur_hidden_cls = hidden_cls[i] else: cur_hidden_cls = hidden_cls if inspect.isclass(cur_hidden_cls): if hidden_cfg and 'attention_cfg' in hidden_cfg and ( layer_idx_as_attention_seed): hidden_cfg = copy.deepcopy(hidden_cfg) hidden_cfg['attention_cfg']['seed'] = i if feed_layer_idx: hidden_cfg['layer_idx'] = i layer = cur_hidden_cls(**hidden_cfg) else: layer = cur_hidden_cls if recursive: data, recursive_states = layer([data, attention_mask, recursive_states]) else: data = layer([data, attention_mask]) layer_output_data.append(data) hidden_layers.append(layer) if layer_norm_before_pooling: # Normalize the final output. output_layer_norm = tf.keras.layers.LayerNormalization( name='final_layer_norm', axis=-1, epsilon=1e-12) layer_output_data[-1] = output_layer_norm(layer_output_data[-1]) last_layer_output = layer_output_data[-1] # Applying a tf.slice op (through subscript notation) to a Keras tensor # like this will create a SliceOpLambda layer. This is better than a Lambda # layer with Python code, because that is fundamentally less portable. first_token_tensor = last_layer_output[:, 0, :] pooler_layer_initializer = tf.keras.initializers.get( pooler_layer_initializer) pooler_layer = tf.keras.layers.Dense( units=pooled_output_dim, activation='tanh', kernel_initializer=pooler_layer_initializer, name='cls_transform') cls_output = pooler_layer(first_token_tensor) if dict_outputs: outputs = dict( sequence_output=layer_output_data[-1], pooled_output=cls_output, encoder_outputs=layer_output_data, ) elif return_all_layer_outputs: outputs = [layer_output_data, cls_output] else: outputs = [layer_output_data[-1], cls_output] # b/164516224 # Once we've created the network using the Functional API, we call # super().__init__ as though we were invoking the Functional API Model # constructor, resulting in this object having all the properties of a model # created using the Functional API. Once super().__init__ is called, we # can assign attributes to `self` - note that all `self` assignments are # below this line. super().__init__( inputs=inputs, outputs=outputs, **kwargs) self._hidden_cls = hidden_cls self._hidden_cfg = hidden_cfg self._mask_cls = mask_cls self._mask_cfg = mask_cfg self._num_hidden_instances = num_hidden_instances self._pooled_output_dim = pooled_output_dim self._pooler_layer_initializer = pooler_layer_initializer self._embedding_cls = embedding_cls self._embedding_cfg = embedding_cfg self._embedding_data = embedding_data self._layer_norm_before_pooling = layer_norm_before_pooling self._return_all_layer_outputs = return_all_layer_outputs self._dict_outputs = dict_outputs self._kwargs = kwargs self._embedding_layer = embedding_layer self._embedding_network = embedding_network self._position_embedding_layer = position_embedding_layer self._type_embedding_layer = type_embedding_layer self._embedding_norm_layer = embedding_norm_layer self._hidden_layers = hidden_layers if self._layer_norm_before_pooling: self._output_layer_norm = output_layer_norm self._pooler_layer = pooler_layer self._layer_idx_as_attention_seed = layer_idx_as_attention_seed logging.info('EncoderScaffold configs: %s', self.get_config()) def get_config(self): config_dict = { 'num_hidden_instances': self._num_hidden_instances, 'pooled_output_dim': self._pooled_output_dim, 'pooler_layer_initializer': tf.keras.initializers.serialize( self._pooler_layer_initializer), 'embedding_cls': self._embedding_network, 'embedding_cfg': self._embedding_cfg, 'layer_norm_before_pooling': self._layer_norm_before_pooling, 'return_all_layer_outputs': self._return_all_layer_outputs, 'dict_outputs': self._dict_outputs, 'layer_idx_as_attention_seed': self._layer_idx_as_attention_seed } cfgs = { 'hidden_cfg': self._hidden_cfg, 'mask_cfg': self._mask_cfg } for cfg_name, cfg in cfgs.items(): if cfg: config_dict[cfg_name] = {} for k, v in cfg.items(): # `self._hidden_cfg` may contain `class`, e.g., when `hidden_cfg` is # `TransformerScaffold`, `attention_cls` argument can be a `class`. if inspect.isclass(v): config_dict[cfg_name][k] = tf.keras.utils.get_registered_name(v) else: config_dict[cfg_name][k] = v clss = { 'hidden_cls': self._hidden_cls, 'mask_cls': self._mask_cls } for cls_name, cls in clss.items(): if inspect.isclass(cls): key = '{}_string'.format(cls_name) config_dict[key] = tf.keras.utils.get_registered_name(cls) else: config_dict[cls_name] = cls config_dict.update(self._kwargs) return config_dict @classmethod def from_config(cls, config, custom_objects=None): cls_names = ['hidden_cls', 'mask_cls'] for cls_name in cls_names: cls_string = '{}_string'.format(cls_name) if cls_string in config: config[cls_name] = tf.keras.utils.get_registered_object( config[cls_string], custom_objects=custom_objects) del config[cls_string] return cls(**config) def get_embedding_table(self): if self._embedding_network is None: # In this case, we don't have a custom embedding network and can return # the standard embedding data. return self._embedding_layer.embeddings if self._embedding_data is None: raise RuntimeError(('The EncoderScaffold %s does not have a reference ' 'to the embedding data. This is required when you ' 'pass a custom embedding network to the scaffold. ' 'It is also possible that you are trying to get ' 'embedding data from an embedding scaffold with a ' 'custom embedding network where the scaffold has ' 'been serialized and deserialized. Unfortunately, ' 'accessing custom embedding references after ' 'serialization is not yet supported.') % self.name) else: return self._embedding_data @property def embedding_network(self): if self._embedding_network is None: raise RuntimeError( ('The EncoderScaffold %s does not have a reference ' 'to the embedding network. This is required when you ' 'pass a custom embedding network to the scaffold.') % self.name) return self._embedding_network @property def hidden_layers(self): """List of hidden layers in the encoder.""" return self._hidden_layers @property def pooler_layer(self): """The pooler dense layer after the transformer layers.""" return self._pooler_layer
17,143
42.183879
80
py
models
models-master/official/nlp/modeling/networks/sparse_mixer.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Sparse Mixer encoder network. Based on ["Sparse Mixers: Combining MoE and Mixing to build a more efficient BERT"](https://arxiv.org/abs/2205.12399). """ # pylint: disable=g-classes-have-attributes from typing import Any, Callable, Optional, Sequence, Union from absl import logging import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling import layers _Activation = Union[str, Callable[..., Any]] _Initializer = Union[str, tf.keras.initializers.Initializer] _approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True) class SparseMixer(tf.keras.layers.Layer): """Sparse Mixer encoder network. Based on ["Sparse Mixers: Combining MoE and Mixing to build a more efficient BERT"](https://arxiv.org/abs/2205.12399). Sparse Mixer is an efficient encoder network that replaces typical Transformer encoder blocks with a combination of linear mixing and sparsely activated Mixture-of-Experts (MoE) sublayers. This implementation defaults to the canonical Sparse Mixer Base model. To use the "Fast Sparse Mixer" configuration, set `*_capacity_factor`=0.5. This yields a sparser and faster variant of the canonical Sparse Mixer model, in which each expert processes roughly 50% less tokens. Notes: - The underlying MoeLayer uses the Keras add_loss() and add_metric() APIs to propagate auxiliary MoE losses and metrics. Any model using this network, should collect these losses and, if desired, metrics. - The input length is fixed to 'max_sequence_length' to accomodate the mixing mechanisms. Args: vocab_size: The size of the token vocabulary. hidden_size: The size of the transformer hidden layers. num_layers: The number of transformer layers. moe_layers: Specifies which layers, if any, should be sparsely activated Mixture-of-Experts (MoE) layers. The remaining [0, num_layers) setminus moe_layers will use the vanilla MLP sublayers. Defaults to placing MoE layers in the middle of the model. attention_layers: Specifies which layers, if any, should be attention layers in the encoder. The remaining [0, num_layers) setminus attention_layers will use the specified `mixing_mechanism`. If using attention layers, a good rule of thumb is to place them in the final few layers. num_experts: Number of experts. Experts are themselves MLP modules, with the same `inner_dim` and `inner_activation` as the vanilla MLP sublayers. train_capacity_factor: Scaling factor to increase the expert token capacity during training. See layers.MoeLayer for further details. The "Fast Sparse Mixer" increases model sparsity (and speed) by using a capacity factor of 0.5. eval_capacity_factor: As above, but used during evaluation. max_group_size: The total number of tokens on each device is subdivided into groups of this size. Router computations are then performed on a per-group basis. See layers.MoeLayer for further details. mixing_mechanism: Type of mixing mechanism used in place of self-attention layers. Defaults to 'Linear' mixing. use_fft: Only used for spectral mixing mechanisms. Determines whether to use Fast Fourier Transform (True) or the Discrete Fourier Transform (DFT) matrix (False; default) to compute the Fourier Transform. See layers.FourierTransformLayer or layers.HartleyTransformLayer for advice. num_attention_heads: The number of attention heads for each transformer. The hidden size must be divisible by the number of attention heads. max_sequence_length: The only sequence length that this encoder can consume. This determines the variable shape for positional embeddings and the size of the mixing matrices. type_vocab_size: The number of types that the 'type_ids' input can take. inner_dim: The output dimension of the first Dense layer in a two-layer feedforward network for each transformer. inner_activation: The activation for the first Dense layer in a two-layer feedforward network for each transformer. output_dropout: Dropout probability for the post-attention and output dropout. attention_dropout: The dropout rate to use for the attention layers within the transformer layers. initializer: The initializer to use for all weights in this encoder. output_range: The sequence output range, [0, output_range), by slicing the target sequence of the last transformer layer. `None` means the entire target sequence will attend to the source sequence, which yields the full output. embedding_width: The width of the word embeddings. If the embedding width is not equal to hidden size, embedding parameters will be factorized into two matrices in the shape of ['vocab_size', 'embedding_width'] and ['embedding_width', 'hidden_size'] ('embedding_width' is usually much smaller than 'hidden_size'). embedding_layer: An optional Layer instance which will be called to generate embeddings for the input word IDs. norm_first: Whether to normalize inputs to attention and intermediate dense layers. If set False, output of attention and intermediate dense layers is normalized. with_dense_inputs: Whether to accept dense embeddings as the input. export_metrics: Whether to export metrics using Keras add_metric API. """ def __init__( self, vocab_size: int, hidden_size: int = 512, num_layers: int = 14, moe_layers: Sequence[int] = (5, 6, 7, 8), attention_layers: Sequence[int] = (10, 11, 12, 13), num_experts: int = 16, train_capacity_factor: float = 1., eval_capacity_factor: float = 1., examples_per_group: float = 1., mixing_mechanism: layers.MixingMechanism = layers.MixingMechanism.LINEAR, use_fft: bool = False, num_attention_heads: int = 8, max_sequence_length: int = 512, type_vocab_size: int = 16, inner_dim: int = 2048, inner_activation: _Activation = _approx_gelu, output_dropout: float = 0.1, attention_dropout: float = 0.1, initializer: _Initializer = tf.keras.initializers.TruncatedNormal( stddev=0.02), output_range: Optional[int] = None, embedding_width: Optional[int] = None, embedding_layer: Optional[tf.keras.layers.Layer] = None, norm_first: bool = False, with_dense_inputs: bool = False, export_metrics: bool = True, **kwargs): super().__init__(**kwargs) activation = tf.keras.activations.get(inner_activation) initializer = tf.keras.initializers.get(initializer) if embedding_width is None: embedding_width = hidden_size self._config = { 'vocab_size': vocab_size, 'hidden_size': hidden_size, 'num_layers': num_layers, 'moe_layers': moe_layers, 'num_experts': num_experts, 'train_capacity_factor': train_capacity_factor, 'eval_capacity_factor': eval_capacity_factor, 'examples_per_group': examples_per_group, 'mixing_mechanism': mixing_mechanism, 'use_fft': use_fft, 'attention_layers': attention_layers, 'num_attention_heads': num_attention_heads, 'max_sequence_length': max_sequence_length, 'type_vocab_size': type_vocab_size, 'inner_dim': inner_dim, 'inner_activation': tf.keras.activations.serialize(activation), 'output_dropout': output_dropout, 'attention_dropout': attention_dropout, 'initializer': tf.keras.initializers.serialize(initializer), 'output_range': output_range, 'embedding_width': embedding_width, 'embedding_layer': embedding_layer, 'norm_first': norm_first, 'with_dense_inputs': with_dense_inputs, 'export_metrics': export_metrics, } if embedding_layer is None: self._embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), name='word_embeddings') else: self._embedding_layer = embedding_layer self._position_embedding_layer = layers.PositionEmbedding( initializer=tf_utils.clone_initializer(initializer), max_length=max_sequence_length, name='position_embedding') self._type_embedding_layer = layers.OnDeviceEmbedding( vocab_size=type_vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), use_one_hot=True, name='type_embeddings') self._embedding_norm_layer = tf.keras.layers.LayerNormalization( name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32) self._embedding_dropout = tf.keras.layers.Dropout( rate=output_dropout, name='embedding_dropout') # We project the 'embedding' output to 'hidden_size' if it is not already # 'hidden_size'. self._embedding_projection = None if embedding_width != hidden_size: self._embedding_projection = tf.keras.layers.EinsumDense( '...x,xy->...y', output_shape=hidden_size, bias_axes='y', kernel_initializer=tf_utils.clone_initializer(initializer), name='embedding_projection') self._transformer_layers = [] for layer in range(num_layers): if layer in attention_layers: mixing_layer = layers.MultiHeadAttention( num_heads=num_attention_heads, key_dim=int(hidden_size // num_attention_heads), dropout=attention_dropout, use_bias=True, kernel_initializer=tf_utils.clone_initializer(initializer), name='self_attention', ) else: mixing_layer = self._init_mixing_sublayer(layer) if layer in moe_layers: feedforward_layer = layers.MoeLayer( experts=layers.FeedForwardExperts( num_experts=num_experts, d_ff=inner_dim, output_dropout=output_dropout, activation=inner_activation, kernel_initializer=tf_utils.clone_initializer(initializer), name='experts'), router=layers.ExpertsChooseMaskedRouter( num_experts=num_experts, kernel_initializer=tf_utils.clone_initializer(initializer), export_metrics=export_metrics, name='router'), train_capacity_factor=train_capacity_factor, eval_capacity_factor=eval_capacity_factor, examples_per_group=examples_per_group, name='moe') else: feedforward_layer = None # Fallback to default (dense) MLP class block = layers.TransformerScaffold( num_attention_heads=num_attention_heads, inner_dim=inner_dim, inner_activation=inner_activation, attention_cls=mixing_layer, feedforward_cls=feedforward_layer, output_dropout=output_dropout, attention_dropout=attention_dropout, norm_first=norm_first, output_range=output_range if layer == num_layers - 1 else None, kernel_initializer=tf_utils.clone_initializer(initializer), name='transformer/layer_%d' % layer) self._transformer_layers.append(block) self._attention_mask_layer = layers.SelfAttentionMask( name='self_attention_mask') self._pooler_layer = tf.keras.layers.Dense( units=hidden_size, activation='tanh', kernel_initializer=tf_utils.clone_initializer(initializer), name='pooler_transform') if with_dense_inputs: self.inputs = dict( # The total length of token ids and dense inputs still has to be # max_sequence_length. It is checked in call(). input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), dense_inputs=tf.keras.Input( shape=(None, embedding_width), dtype=tf.float32), dense_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), dense_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), ) else: self.inputs = dict( input_word_ids=tf.keras.Input( shape=(max_sequence_length,), dtype=tf.int32), input_mask=tf.keras.Input( shape=(max_sequence_length,), dtype=tf.int32), input_type_ids=tf.keras.Input( shape=(max_sequence_length,), dtype=tf.int32)) self._max_sequence_length = max_sequence_length def call(self, inputs): word_embeddings = None if isinstance(inputs, dict): word_ids = inputs.get('input_word_ids') mask = inputs.get('input_mask') type_ids = inputs.get('input_type_ids') word_embeddings = inputs.get('input_word_embeddings', None) dense_inputs = inputs.get('dense_inputs', None) dense_mask = inputs.get('dense_mask', None) dense_type_ids = inputs.get('dense_type_ids', None) else: raise ValueError('Unexpected inputs type (%s) to %s.' % (type(inputs), self.__class__)) if word_embeddings is None: word_embeddings = self._embedding_layer(word_ids) if dense_inputs is not None: # Concat the dense embeddings at sequence end. word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1) type_ids = tf.concat([type_ids, dense_type_ids], axis=1) mask = tf.concat([mask, dense_mask], axis=1) # SparseMixer: Sequence length must be the same as `max_sequence_length`. word_embeddings = tf.ensure_shape(word_embeddings, [None, self._max_sequence_length, None]) # Absolute position embeddings. position_embeddings = self._position_embedding_layer(word_embeddings) type_embeddings = self._type_embedding_layer(type_ids) embeddings = word_embeddings + position_embeddings + type_embeddings embeddings = self._embedding_norm_layer(embeddings) embeddings = self._embedding_dropout(embeddings) if self._embedding_projection is not None: embeddings = self._embedding_projection(embeddings) attention_mask = self._attention_mask_layer(embeddings, mask) encoder_outputs = [] x = embeddings for layer in self._transformer_layers: x = layer([x, attention_mask]) encoder_outputs.append(x) last_encoder_output = encoder_outputs[-1] first_token_tensor = last_encoder_output[:, 0, :] pooled_output = self._pooler_layer(first_token_tensor) output = dict( sequence_output=encoder_outputs[-1], pooled_output=pooled_output, encoder_outputs=encoder_outputs) return output def get_embedding_table(self): return self._embedding_layer.embeddings def get_embedding_layer(self): return self._embedding_layer def get_config(self): return dict(self._config) @property def transformer_layers(self): """List of Transformer layers in the encoder.""" return self._transformer_layers @property def pooler_layer(self): """The pooler dense layer after the transformer layers.""" return self._pooler_layer @classmethod def from_config(cls, config, custom_objects=None): if 'embedding_layer' in config and config['embedding_layer'] is not None: warn_string = ( 'You are reloading a model that was saved with a ' 'potentially-shared embedding layer object. If you contine to ' 'train this model, the embedding layer will no longer be shared. ' 'To work around this, load the model outside of the Keras API.') print('WARNING: ' + warn_string) logging.warn(warn_string) return cls(**config) def _init_mixing_sublayer(self, layer: int): """Initializes config-dependent mixing sublayer.""" if self._config['mixing_mechanism'] == layers.MixingMechanism.FOURIER: mixing_sublayer = layers.FourierTransformLayer( use_fft=self._config['use_fft'], name='fourier_transform') elif self._config['mixing_mechanism'] == layers.MixingMechanism.HARTLEY: mixing_sublayer = layers.HartleyTransformLayer( use_fft=self._config['use_fft'], name='hartley_transform') elif self._config['mixing_mechanism'] == layers.MixingMechanism.LINEAR: mixing_sublayer = layers.LinearTransformLayer( kernel_initializer=tf_utils.clone_initializer( self._config['initializer']), name='linear_transform') else: raise ValueError('Unsupported mixing mechanism: %s' % self._config['mixing_mechanism']) return mixing_sublayer
17,450
41.87715
80
py
models
models-master/official/nlp/modeling/networks/xlnet_base.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras-based XLNet Model.""" from absl import logging import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling import layers from official.nlp.modeling.layers import transformer_xl _SEG_ID_CLS = 2 def _create_causal_attention_mask( seq_length, memory_length, dtype=tf.float32, same_length=False): """Creates a causal attention mask with a single-sided context. When applying the attention mask in `MultiHeadRelativeAttention`, the attention scores are of shape `[(batch dimensions), S, S + M]`, where: - S = sequence length. - M = memory length. In a simple case where S = 2, M = 1, here is a simple illustration of the `attention_scores` matrix, where `a` represents an attention function: token_0 [[a(token_0, mem_0) a(token_0, token_0) a(token_0, token_1)], token_1 [a(token_1, mem_0) a(token_1, token_0) a(token_1, token_1)]] mem_0 token_0 token_1 For uni-directional attention, we want to mask out values in the attention scores that represent a(token_i, token_j) where j > i. We can achieve this by concatenating 0s (representing memory positions) with a strictly upper triangular matrix of 1s. We then flip the matrix values in order to match the representation where real values are 1s. Args: seq_length: int, The length of each sequence. memory_length: int, The length of memory blocks. dtype: dtype of the mask. same_length: bool, whether to use the same attention length for each token. Returns: A unidirectional attention mask of shape `[seq_length, seq_length + memory_length]`. E.g.: [[1. 1. 1. 0. 0. 0.] [1. 1. 1. 1. 0. 0.] [1. 1. 1. 1. 1. 0.] [1. 1. 1. 1. 1. 1.]] """ ones_matrix = tf.ones([seq_length, seq_length], dtype=dtype) upper_triangular = tf.linalg.band_part(ones_matrix, 0, -1) diagonal = tf.linalg.band_part(ones_matrix, 0, 0) padding = tf.zeros([seq_length, memory_length], dtype=dtype) causal_attention_mask = tf.concat( [padding, upper_triangular - diagonal], 1) if same_length: lower_triangular = tf.linalg.band_part(ones_matrix, -1, 0) strictly_lower_triangular = lower_triangular - diagonal causal_attention_mask = tf.concat( [causal_attention_mask[:, :seq_length] + strictly_lower_triangular, causal_attention_mask[:, seq_length:]], 1) return 1 - causal_attention_mask def _combine_masks(mask1, mask2, dtype, how="and"): """Combines two masks. Use "and" if trying to combine two existing masks. Use "or" if trying to flip a few positions to "real". Args: mask1: tf.Tensor, input mask 1 mask2: tf.Tensor, input mask 2 dtype: tf.dtype how: Which logical operation should run. Returns: The combined input masks. """ if how == "and": operator = tf.math.logical_and else: operator = tf.math.logical_or return tf.cast(operator( tf.cast(mask1, tf.bool), tf.cast(mask2, tf.bool)), dtype=dtype) def _compute_attention_mask( input_mask, permutation_mask, attention_type, seq_length, memory_length, batch_size, dtype=tf.float32): """Combines all input attention masks for XLNet. In XLNet modeling, `0` represents tokens that can be attended, and `1` represents tokens that cannot be attended. For XLNet pre-training and fine tuning, there are a few masks used: - Causal attention mask: If the attention type is unidirectional, then all tokens after the current position cannot be attended to. - Input mask: when generating data, padding is added to a max sequence length to make all sequences the same length. This masks out real tokens (`0`) from padding tokens (`1`). - Permutation mask: during XLNet pretraining, the input sequence is factorized into a factorization sequence `z`. During partial prediction, `z` is split at a cutting point `c` (an index of the factorization sequence) and prediction is only applied to all tokens after `c`. Therefore, tokens at factorization positions `i` > `c` can be attended to and tokens at factorization positions `i` <= `c` cannot be attended to. This function broadcasts and combines all attention masks to produce the query attention mask and the content attention mask. Args: input_mask: Tensor, the input mask related to padding. Input shape: `(B, S)`. permutation_mask: Tensor, the permutation mask used in partial prediction. Input shape: `(B, S, S)`. attention_type: str, the attention type. Can be "uni" (directional) or "bi" (directional). seq_length: int, the length of each sequence. memory_length: int the length of memory blocks. batch_size: int, the batch size. dtype: The dtype of the masks. Returns: attention_mask, content_attention_mask: The position and context-based attention masks and content attention masks, respectively. """ attention_mask = None # `1` values mean do not attend to this position. if attention_type == "uni": causal_attention_mask = _create_causal_attention_mask( seq_length=seq_length, memory_length=memory_length, dtype=dtype) causal_attention_mask = causal_attention_mask[None, None, :, :] # `causal_attention_mask`: [1, 1, S, S + M] # input_mask: [B, S] # permutation_mask: [B, S, S] if input_mask is not None and permutation_mask is not None: data_mask = _combine_masks(input_mask[:, None, :], permutation_mask, dtype) elif input_mask is not None and permutation_mask is None: data_mask = input_mask[:, None, :] elif input_mask is None and permutation_mask is not None: data_mask = permutation_mask else: data_mask = None # data_mask: [B, S, S] or [B, 1, S] if data_mask is not None: # All positions within state can be attended to. state_mask = tf.ones([batch_size, tf.shape(data_mask)[1], memory_length], dtype=dtype) # state_mask: [B, 1, M] or [B, S, M] data_mask = tf.concat([state_mask, data_mask], 2) # data_mask: [B, 1, S + M] or [B, S, S + M] if attention_type == "uni": attention_mask = _combine_masks(causal_attention_mask, data_mask[:, None, :, :], dtype=dtype) else: attention_mask = data_mask[:, None, :, :] if attention_mask is not None: # Construct the content attention mask. # This ensures that the mask allows the model to attend to positions in # content positions (e.g. the content diagonal). non_target_mask = tf.concat( [tf.zeros([seq_length, memory_length], dtype=dtype), tf.eye(seq_length, dtype=dtype)], axis=-1) content_attention_mask = _combine_masks( attention_mask, non_target_mask, how="or", dtype=dtype) else: content_attention_mask = None return attention_mask, content_attention_mask def _compute_segment_matrix( segment_ids, memory_length, batch_size, use_cls_mask): """Computes the segment embedding matrix. XLNet introduced segment-based attention for attention calculations. This extends the idea of relative encodings in Transformer XL by considering whether or not two positions are within the same segment, rather than which segments they come from. This function generates a segment matrix by broadcasting provided segment IDs in two different dimensions and checking where values are equal. This output matrix shows `True` whenever two tokens are NOT in the same segment and `False` whenever they are. Args: segment_ids: A Tensor of size `[B, S]` that represents which segment each token belongs to. memory_length: int, the length of memory blocks. batch_size: int, the batch size. use_cls_mask: bool, whether or not to introduce cls mask in input sequences. Returns: A boolean Tensor of size `[B, S, S + M]`, where `True` means that two tokens are NOT in the same segment, and `False` means they are in the same segment. """ if segment_ids is None: return None memory_padding = tf.zeros([batch_size, memory_length], dtype=segment_ids.dtype) padded_segment_ids = tf.concat([memory_padding, segment_ids], 1) # segment_ids: [B, S] # padded_segment_ids: [B, S + M] if use_cls_mask: # `1` indicates not in the same segment. # Target result: [B, S, S + M] # segment_ids: [B, S] # padded_segment_ids: [B, S + M] broadcasted_segment_class_indices = ( tf.equal(segment_ids, tf.constant([_SEG_ID_CLS]))[:, :, None]) broadcasted_padded_class_indices = ( tf.equal( padded_segment_ids, tf.constant([_SEG_ID_CLS]))[:, None, :]) class_index_matrix = tf.logical_or(broadcasted_segment_class_indices, broadcasted_padded_class_indices) segment_matrix = tf.equal(segment_ids[:, :, None], padded_segment_ids[:, None, :]) segment_matrix = tf.logical_or(class_index_matrix, segment_matrix) else: # TODO(allencwang) - address this legacy mismatch from `use_cls_mask`. segment_matrix = tf.logical_not( tf.equal(segment_ids[:, :, None], padded_segment_ids[:, None, :])) return segment_matrix def _compute_positional_encoding( attention_type, position_encoding_layer, hidden_size, batch_size, total_length, seq_length, clamp_length, bi_data, dtype=tf.float32): """Computes the relative position encoding. Args: attention_type: str, the attention type. Can be "uni" (directional) or "bi" (directional). position_encoding_layer: An instance of `RelativePositionEncoding`. hidden_size: int, the hidden size. batch_size: int, the batch size. total_length: int, the sequence length added to the memory length. seq_length: int, the length of each sequence. clamp_length: int, clamp all relative distances larger than clamp_length. -1 means no clamping. bi_data: bool, whether to use bidirectional input pipeline. Usually set to True during pretraining and False during finetuning. dtype: the dtype of the encoding. Returns: A Tensor, representing the position encoding. """ freq_seq = tf.range(0, hidden_size, 2.0) if dtype is not None and dtype != tf.float32: freq_seq = tf.cast(freq_seq, dtype=dtype) if attention_type == "bi": beg, end = total_length, -seq_length elif attention_type == "uni": beg, end = total_length, -1 else: raise ValueError("Unknown `attention_type` {}.".format(attention_type)) if bi_data: forward_position_sequence = tf.range(beg, end, -1.0) backward_position_sequence = tf.range(-beg, -end, 1.0) if dtype is not None and dtype != tf.float32: forward_position_sequence = tf.cast(forward_position_sequence, dtype=dtype) backward_position_sequence = tf.cast(backward_position_sequence, dtype=dtype) if clamp_length > 0: forward_position_sequence = tf.clip_by_value( forward_position_sequence, -clamp_length, clamp_length) backward_position_sequence = tf.clip_by_value( backward_position_sequence, -clamp_length, clamp_length) if batch_size is not None: forward_positional_encoding = position_encoding_layer( forward_position_sequence, batch_size // 2) backward_positional_encoding = position_encoding_layer( backward_position_sequence, batch_size // 2) else: forward_positional_encoding = position_encoding_layer( forward_position_sequence, None) backward_positional_encoding = position_encoding_layer( backward_position_sequence, None) relative_position_encoding = tf.concat( [forward_positional_encoding, backward_positional_encoding], axis=0) else: forward_position_sequence = tf.range(beg, end, -1.0) if dtype is not None and dtype != tf.float32: forward_position_sequence = tf.cast( forward_position_sequence, dtype=dtype) if clamp_length > 0: forward_position_sequence = tf.clip_by_value( forward_position_sequence, -clamp_length, clamp_length) relative_position_encoding = position_encoding_layer( forward_position_sequence, batch_size) return relative_position_encoding class RelativePositionEncoding(tf.keras.layers.Layer): """Creates a relative positional encoding. This layer creates a relative positional encoding as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" (https://arxiv.org/abs/1901.02860). Rather than an absolute position embedding as in Transformer, this formulation represents position as the relative distance between tokens using sinusoidal positional embeddings. Note: This layer is currently experimental. Attributes: hidden_size: The dimensionality of the input embeddings. """ def __init__(self, hidden_size, **kwargs): super().__init__(**kwargs) self._hidden_size = hidden_size self._inv_freq = 1.0 / (10000.0**( tf.range(0, self._hidden_size, 2.0) / self._hidden_size)) def call(self, pos_seq, batch_size=None): """Implements call() for the layer. Args: pos_seq: A 1-D `Tensor` batch_size: The optionally provided batch size that tiles the relative positional encoding. Returns: The relative positional encoding of shape: [batch_size, len(pos_seq), hidden_size] if batch_size is provided, else [1, len(pos_seq), hidden_size]. """ sinusoid_input = tf.einsum("i,d->id", pos_seq, self._inv_freq) relative_position_encoding = tf.concat([tf.sin(sinusoid_input), tf.cos(sinusoid_input)], -1) relative_position_encoding = relative_position_encoding[None, :, :] if batch_size is not None: relative_position_encoding = tf.tile(relative_position_encoding, [batch_size, 1, 1]) return relative_position_encoding @tf.keras.utils.register_keras_serializable(package="Text") class XLNetBase(tf.keras.layers.Layer): """Base XLNet model. Attributes: vocab_size: int, the number of tokens in vocabulary. num_layers: int, the number of layers. hidden_size: int, the hidden size. num_attention_heads: int, the number of attention heads. head_size: int, the dimension size of each attention head. inner_size: int, the hidden size in feed-forward layers. dropout_rate: float, dropout rate. attention_dropout_rate: float, dropout rate on attention probabilities. attention_type: str, "uni" or "bi". bi_data: bool, whether to use bidirectional input pipeline. Usually set to True during pretraining and False during finetuning. initializer: A tf initializer. two_stream: bool, whether or not to use `TwoStreamRelativeAttention` used in the XLNet pretrainer. If `False`, then it will use `MultiHeadRelativeAttention` as in Transformer XL. tie_attention_biases: bool, whether or not to tie the biases together. Usually set to `True`. Used for backwards compatibility. memory_length: int, the number of tokens to cache. same_length: bool, whether to use the same attention length for each token. clamp_length: int, clamp all relative distances larger than clamp_length. -1 means no clamping. reuse_length: int, the number of tokens in the currect batch to be cached and reused in the future. inner_activation: str, "relu" or "gelu". use_cls_mask: bool, whether or not cls mask is included in the input sequences. embedding_width: The width of the word embeddings. If the embedding width is not equal to hidden size, embedding parameters will be factorized into two matrices in the shape of ["vocab_size", "embedding_width"] and ["embedding_width", "hidden_size"] ("embedding_width" is usually much smaller than "hidden_size"). embedding_layer: The word embedding layer. `None` means we will create a new embedding layer. Otherwise, we will reuse the given embedding layer. This parameter is originally added for ELECTRA model which needs to tie the generator embeddings with the discriminator embeddings. """ def __init__(self, vocab_size, num_layers, hidden_size, num_attention_heads, head_size, inner_size, dropout_rate, attention_dropout_rate, attention_type, bi_data, initializer, two_stream=False, tie_attention_biases=True, memory_length=None, clamp_length=-1, reuse_length=None, inner_activation="relu", use_cls_mask=False, embedding_width=None, **kwargs): super().__init__(**kwargs) self._vocab_size = vocab_size self._initializer = initializer self._attention_type = attention_type self._num_layers = num_layers self._hidden_size = hidden_size self._num_attention_heads = num_attention_heads self._head_size = head_size self._inner_size = inner_size self._inner_activation = inner_activation self._dropout_rate = dropout_rate self._attention_dropout_rate = attention_dropout_rate self._tie_attention_biases = tie_attention_biases self._two_stream = two_stream self._memory_length = memory_length self._reuse_length = reuse_length self._bi_data = bi_data self._clamp_length = clamp_length self._use_cls_mask = use_cls_mask self._segment_embedding = None self._mask_embedding = None self._embedding_width = embedding_width if embedding_width is None: embedding_width = hidden_size self._embedding_layer = layers.OnDeviceEmbedding( vocab_size=self._vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(self._initializer), dtype=tf.float32, name="word_embedding") self._dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) self.embedding_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) self.position_encoding = RelativePositionEncoding(self._hidden_size) self._transformer_xl = transformer_xl.TransformerXL( vocab_size=vocab_size, num_layers=num_layers, hidden_size=hidden_size, num_attention_heads=num_attention_heads, head_size=head_size, inner_size=inner_size, dropout_rate=dropout_rate, attention_dropout_rate=attention_dropout_rate, initializer=initializer, two_stream=two_stream, tie_attention_biases=tie_attention_biases, memory_length=memory_length, reuse_length=reuse_length, inner_activation=inner_activation, name="transformer_xl") def get_config(self): config = { "vocab_size": self._vocab_size, "num_layers": self._num_layers, "hidden_size": self._hidden_size, "num_attention_heads": self._num_attention_heads, "head_size": self._head_size, "inner_size": self._inner_size, "dropout_rate": self._dropout_rate, "attention_dropout_rate": self._attention_dropout_rate, "attention_type": self._attention_type, "bi_data": self._bi_data, "initializer": self._initializer, "two_stream": self._two_stream, "tie_attention_biases": self._tie_attention_biases, "memory_length": self._memory_length, "clamp_length": self._clamp_length, "reuse_length": self._reuse_length, "inner_activation": self._inner_activation, "use_cls_mask": self._use_cls_mask, "embedding_width": self._embedding_width, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def get_embedding_lookup_table(self): """Returns the embedding layer weights.""" return self._embedding_layer.embeddings def __call__(self, input_ids, segment_ids=None, input_mask=None, state=None, permutation_mask=None, target_mapping=None, masked_tokens=None, **kwargs): # Uses dict to feed inputs into call() in order to keep state as a python # list. inputs = { "input_ids": input_ids, "segment_ids": segment_ids, "input_mask": input_mask, "state": state, "permutation_mask": permutation_mask, "target_mapping": target_mapping, "masked_tokens": masked_tokens } return super().__call__(inputs, **kwargs) def call(self, inputs): """Implements call() for the layer.""" input_ids = inputs["input_ids"] segment_ids = inputs["segment_ids"] input_mask = inputs["input_mask"] state = inputs["state"] permutation_mask = inputs["permutation_mask"] target_mapping = inputs["target_mapping"] masked_tokens = inputs["masked_tokens"] batch_size = tf.shape(input_ids)[0] seq_length = tf.shape(input_ids)[1] if state is not None: memory_length = tf.shape(state[0])[1] else: memory_length = 0 total_length = memory_length + seq_length if self._two_stream and masked_tokens is None: raise ValueError("`masked_tokens` must be provided in order to " "initialize the query stream in " "`TwoStreamRelativeAttention`.") if masked_tokens is not None and not self._two_stream: logging.warning("`masked_tokens` is provided but `two_stream` is not " "enabled. Please enable `two_stream` to enable two " "stream attention.") if input_mask is not None: dtype = input_mask.dtype elif permutation_mask is not None: dtype = permutation_mask.dtype else: dtype = tf.int32 query_attention_mask, content_attention_mask = _compute_attention_mask( input_mask=input_mask, permutation_mask=permutation_mask, attention_type=self._attention_type, seq_length=seq_length, memory_length=memory_length, batch_size=batch_size, dtype=dtype) relative_position_encoding = _compute_positional_encoding( attention_type=self._attention_type, position_encoding_layer=self.position_encoding, hidden_size=self._hidden_size, batch_size=batch_size, total_length=total_length, seq_length=seq_length, clamp_length=self._clamp_length, bi_data=self._bi_data, dtype=tf.float32) relative_position_encoding = self.embedding_dropout( relative_position_encoding) if segment_ids is None: segment_embedding = None segment_matrix = None else: if self._segment_embedding is None: self._segment_embedding = self.add_weight( "seg_embed", shape=[self._num_layers, 2, self._num_attention_heads, self._head_size], dtype=tf.float32, initializer=tf_utils.clone_initializer(self._initializer)) segment_embedding = self._segment_embedding segment_matrix = _compute_segment_matrix( segment_ids=segment_ids, memory_length=memory_length, batch_size=batch_size, use_cls_mask=self._use_cls_mask) word_embeddings = self._embedding_layer(input_ids) content_stream = self._dropout(word_embeddings) if self._two_stream: if self._mask_embedding is None: self._mask_embedding = self.add_weight( "mask_emb/mask_emb", shape=[1, 1, self._hidden_size], dtype=tf.float32) if target_mapping is None: masked_tokens = masked_tokens[:, :, None] masked_token_embedding = ( masked_tokens * self._mask_embedding + (1 - masked_tokens) * word_embeddings) else: masked_token_embedding = tf.tile( self._mask_embedding, [batch_size, tf.shape(target_mapping)[1], 1]) query_stream = self._dropout(masked_token_embedding) else: query_stream = None return self._transformer_xl( content_stream=content_stream, query_stream=query_stream, target_mapping=target_mapping, state=state, relative_position_encoding=relative_position_encoding, segment_matrix=segment_matrix, segment_embedding=segment_embedding, content_attention_mask=content_attention_mask, query_attention_mask=query_attention_mask)
25,857
35.368495
80
py
models
models-master/official/nlp/modeling/networks/sparse_mixer_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Sparse Mixer encoder network.""" from typing import Sequence from absl.testing import parameterized import tensorflow as tf from official.nlp.modeling import layers from official.nlp.modeling.networks import sparse_mixer class SparseMixerTest(parameterized.TestCase, tf.test.TestCase): def tearDown(self): super().tearDown() tf.keras.mixed_precision.set_global_policy("float32") @parameterized.named_parameters( dict( testcase_name="sparse_mixer", mixing_mechanism=layers.MixingMechanism.LINEAR, moe_layers=(1,), attention_layers=(2,)), dict( testcase_name="fnet", mixing_mechanism=layers.MixingMechanism.FOURIER, moe_layers=(), attention_layers=()), dict( testcase_name="sparse_hnet", mixing_mechanism=layers.MixingMechanism.HARTLEY, moe_layers=(0, 1, 2), attention_layers=(1, 2)), dict( testcase_name="sparse_bert", mixing_mechanism=layers.MixingMechanism.LINEAR, moe_layers=(0, 1, 2), # All layers use MoE attention_layers=(0, 1, 2)), # All layers use attention ) def test_network(self, mixing_mechanism: layers.MixingMechanism, attention_layers: Sequence[int], moe_layers: Sequence[int]): num_layers = 3 hidden_size = 16 sequence_length = 32 test_network = sparse_mixer.SparseMixer( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, max_sequence_length=sequence_length, num_layers=num_layers, moe_layers=moe_layers, num_experts=8, mixing_mechanism=mixing_mechanism, attention_layers=attention_layers) batch_size = 4 word_ids = tf.keras.Input( shape=(sequence_length,), batch_size=batch_size, dtype=tf.int32) mask = tf.keras.Input( shape=(sequence_length,), batch_size=batch_size, dtype=tf.int32) type_ids = tf.keras.Input( shape=(sequence_length,), batch_size=batch_size, dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] self.assertIsInstance(test_network.transformer_layers, list) self.assertLen(test_network.transformer_layers, 3) self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense) expected_data_shape = [batch_size, sequence_length, hidden_size] expected_pooled_shape = [batch_size, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float32, pooled.dtype) def test_embeddings_as_inputs(self): hidden_size = 32 sequence_length = 8 test_network = sparse_mixer.SparseMixer( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, max_sequence_length=sequence_length, num_layers=3, moe_layers=(1,), num_experts=4, attention_layers=(2,)) batch_size = 2 word_ids = tf.keras.Input( shape=(sequence_length), batch_size=batch_size, dtype=tf.int32) mask = tf.keras.Input( shape=(sequence_length,), batch_size=batch_size, dtype=tf.int32) type_ids = tf.keras.Input( shape=(sequence_length,), batch_size=batch_size, dtype=tf.int32) test_network.build( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) embeddings = test_network.get_embedding_layer()(word_ids) # Calls with the embeddings. dict_outputs = test_network( dict( input_word_embeddings=embeddings, input_mask=mask, input_type_ids=type_ids)) all_encoder_outputs = dict_outputs["encoder_outputs"] pooled = dict_outputs["pooled_output"] expected_data_shape = [batch_size, sequence_length, hidden_size] expected_pooled_shape = [batch_size, hidden_size] self.assertLen(all_encoder_outputs, 3) for data in all_encoder_outputs: self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype) self.assertAllEqual(tf.float32, pooled.dtype) if __name__ == "__main__": tf.test.main()
5,192
35.0625
80
py
models
models-master/official/nlp/modeling/networks/fnet_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for FNet encoder network.""" from typing import Sequence from absl.testing import parameterized import tensorflow as tf from official.nlp.modeling import layers from official.nlp.modeling.networks import fnet class FNetTest(parameterized.TestCase, tf.test.TestCase): def tearDown(self): super(FNetTest, self).tearDown() tf.keras.mixed_precision.set_global_policy("float32") @parameterized.named_parameters( ("fnet", layers.MixingMechanism.FOURIER, ()), ("fnet_hybrid", layers.MixingMechanism.FOURIER, (1, 2)), ("hnet", layers.MixingMechanism.HARTLEY, ()), ("hnet_hybrid", layers.MixingMechanism.HARTLEY, (1, 2)), ("linear", layers.MixingMechanism.LINEAR, ()), ("linear_hybrid", layers.MixingMechanism.LINEAR, (0,)), ("bert", layers.MixingMechanism.FOURIER, (0, 1, 2)), ) def test_network(self, mixing_mechanism: layers.MixingMechanism, attention_layers: Sequence[int]): num_layers = 3 hidden_size = 32 sequence_length = 21 test_network = fnet.FNet( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, max_sequence_length=sequence_length, num_layers=num_layers, mixing_mechanism=mixing_mechanism, attention_layers=attention_layers) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) dict_outputs = test_network( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) data = dict_outputs["sequence_output"] pooled = dict_outputs["pooled_output"] self.assertIsInstance(test_network.transformer_layers, list) self.assertLen(test_network.transformer_layers, 3) self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float32, pooled.dtype) def test_embeddings_as_inputs(self): hidden_size = 32 sequence_length = 21 test_network = fnet.FNet( vocab_size=100, hidden_size=hidden_size, num_attention_heads=2, max_sequence_length=sequence_length, num_layers=3) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) test_network.build( dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)) embeddings = test_network.get_embedding_layer()(word_ids) # Calls with the embeddings. dict_outputs = test_network( dict( input_word_embeddings=embeddings, input_mask=mask, input_type_ids=type_ids)) all_encoder_outputs = dict_outputs["encoder_outputs"] pooled = dict_outputs["pooled_output"] expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertLen(all_encoder_outputs, 3) for data in all_encoder_outputs: self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype) self.assertAllEqual(tf.float32, pooled.dtype) if __name__ == "__main__": tf.test.main()
4,543
36.866667
80
py
models
models-master/official/nlp/modeling/networks/__init__.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Networks are combinations of `tf.keras` layers (and possibly other networks). They are `tf.keras` models that would not be trained alone. It encapsulates common network structures like a transformer encoder into an easily handled object with a standardized configuration. """ from official.nlp.modeling.networks.albert_encoder import AlbertEncoder from official.nlp.modeling.networks.bert_encoder import BertEncoder from official.nlp.modeling.networks.bert_encoder import BertEncoderV2 from official.nlp.modeling.networks.classification import Classification from official.nlp.modeling.networks.encoder_scaffold import EncoderScaffold from official.nlp.modeling.networks.fnet import FNet from official.nlp.modeling.networks.funnel_transformer import FunnelTransformerEncoder from official.nlp.modeling.networks.mobile_bert_encoder import MobileBERTEncoder from official.nlp.modeling.networks.packed_sequence_embedding import PackedSequenceEmbedding from official.nlp.modeling.networks.span_labeling import SpanLabeling from official.nlp.modeling.networks.span_labeling import XLNetSpanLabeling from official.nlp.modeling.networks.sparse_mixer import SparseMixer from official.nlp.modeling.networks.xlnet_base import XLNetBase
1,839
53.117647
92
py
models
models-master/official/nlp/modeling/networks/fnet.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """FNet encoder network. Based on ["FNet: Mixing Tokens with Fourier Transforms"] (https://aclanthology.org/2022.naacl-main.319/). """ # pylint: disable=g-classes-have-attributes from typing import Any, Callable, Optional, Sequence, Union from absl import logging import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling import layers _Activation = Union[str, Callable[..., Any]] _Initializer = Union[str, tf.keras.initializers.Initializer] _approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True) class FNet(tf.keras.layers.Layer): """FNet encoder network. Based on ["FNet: Mixing Tokens with Fourier Transforms"] (https://aclanthology.org/2022.naacl-main.319/). FNet is an efficient Transformer-like encoder network that replaces self-attention sublayers with Fourier sublayers. This implementation defaults to the canonical FNet Base model, but the network also supports more general mixing models (e.g. 'Linear', 'HNet') and hybrid models (e.g. 'FNet-Hybrid') models that use both mixing and self-attention layers. The input length is fixed to 'max_sequence_length'. Args: vocab_size: The size of the token vocabulary. hidden_size: The size of the transformer hidden layers. num_layers: The number of transformer layers. mixing_mechanism: Type of mixing mechanism used in place of self-attention layers. Defaults to FNet ('Fourier') mixing. use_fft: Only used for spectral mixing mechanisms. Determines whether to use Fast Fourier Transform (True) or the Discrete Fourier Transform (DFT) matrix (False; default) to compute the Fourier Transform. See layers.FourierTransformLayer or layers.HartleyTransformLayer for advice. attention_layers: Specifies which layers, if any, should be attention layers in the encoder. The remaining [0, num_layers) setminus attention_layers will use the specified `mixing_mechanism`. If using attention layers, a good rule of thumb is to place them in the final few layers. num_attention_heads: The number of attention heads for each transformer. The hidden size must be divisible by the number of attention heads. max_sequence_length: The only sequence length that this encoder can consume. This determines the variable shape for positional embeddings and the size of the mixing matrices. type_vocab_size: The number of types that the 'type_ids' input can take. inner_dim: The output dimension of the first Dense layer in a two-layer feedforward network for each transformer. inner_activation: The activation for the first Dense layer in a two-layer feedforward network for each transformer. output_dropout: Dropout probability for the post-attention and output dropout. attention_dropout: The dropout rate to use for the attention layers within the transformer layers. initializer: The initializer to use for all weights in this encoder. output_range: The sequence output range, [0, output_range), by slicing the target sequence of the last transformer layer. `None` means the entire target sequence will attend to the source sequence, which yields the full output. embedding_width: The width of the word embeddings. If the embedding width is not equal to hidden size, embedding parameters will be factorized into two matrices in the shape of ['vocab_size', 'embedding_width'] and ['embedding_width', 'hidden_size'] ('embedding_width' is usually much smaller than 'hidden_size'). embedding_layer: An optional Layer instance which will be called to generate embeddings for the input word IDs. norm_first: Whether to normalize inputs to attention and intermediate dense layers. If set False, output of attention and intermediate dense layers is normalized. with_dense_inputs: Whether to accept dense embeddings as the input. """ def __init__( self, vocab_size: int, hidden_size: int = 768, num_layers: int = 12, mixing_mechanism: layers.MixingMechanism = layers.MixingMechanism.FOURIER, use_fft: bool = False, attention_layers: Sequence[int] = (), num_attention_heads: int = 12, max_sequence_length: int = 512, type_vocab_size: int = 16, inner_dim: int = 3072, inner_activation: _Activation = _approx_gelu, output_dropout: float = 0.1, attention_dropout: float = 0.1, initializer: _Initializer = tf.keras.initializers.TruncatedNormal( stddev=0.02), output_range: Optional[int] = None, embedding_width: Optional[int] = None, embedding_layer: Optional[tf.keras.layers.Layer] = None, norm_first: bool = False, with_dense_inputs: bool = False, **kwargs): super().__init__(**kwargs) activation = tf.keras.activations.get(inner_activation) initializer = tf.keras.initializers.get(initializer) if embedding_width is None: embedding_width = hidden_size self._config = { 'vocab_size': vocab_size, 'hidden_size': hidden_size, 'num_layers': num_layers, 'mixing_mechanism': mixing_mechanism, 'use_fft': use_fft, 'attention_layers': attention_layers, 'num_attention_heads': num_attention_heads, 'max_sequence_length': max_sequence_length, 'type_vocab_size': type_vocab_size, 'inner_dim': inner_dim, 'inner_activation': tf.keras.activations.serialize(activation), 'output_dropout': output_dropout, 'attention_dropout': attention_dropout, 'initializer': tf.keras.initializers.serialize(initializer), 'output_range': output_range, 'embedding_width': embedding_width, 'embedding_layer': embedding_layer, 'norm_first': norm_first, 'with_dense_inputs': with_dense_inputs, } if embedding_layer is None: self._embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), name='word_embeddings') else: self._embedding_layer = embedding_layer self._position_embedding_layer = layers.PositionEmbedding( initializer=tf_utils.clone_initializer(initializer), max_length=max_sequence_length, name='position_embedding') self._type_embedding_layer = layers.OnDeviceEmbedding( vocab_size=type_vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), use_one_hot=True, name='type_embeddings') self._embedding_norm_layer = tf.keras.layers.LayerNormalization( name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32) self._embedding_dropout = tf.keras.layers.Dropout( rate=output_dropout, name='embedding_dropout') # We project the 'embedding' output to 'hidden_size' if it is not already # 'hidden_size'. self._embedding_projection = None if embedding_width != hidden_size: self._embedding_projection = tf.keras.layers.EinsumDense( '...x,xy->...y', output_shape=hidden_size, bias_axes='y', kernel_initializer=tf_utils.clone_initializer(initializer), name='embedding_projection') self._transformer_layers = [] for layer in range(num_layers): if layer in attention_layers: mixing_layer = layers.MultiHeadAttention( num_heads=num_attention_heads, key_dim=int(hidden_size // num_attention_heads), dropout=attention_dropout, use_bias=True, kernel_initializer=tf_utils.clone_initializer(initializer), name='self_attention', ) else: mixing_layer = self._init_mixing_sublayer(layer) block = layers.TransformerScaffold( num_attention_heads=num_attention_heads, inner_dim=inner_dim, inner_activation=inner_activation, attention_cls=mixing_layer, feedforward_cls=None, # Fallback to default FeedForward class output_dropout=output_dropout, attention_dropout=attention_dropout, norm_first=norm_first, output_range=output_range if layer == num_layers - 1 else None, kernel_initializer=tf_utils.clone_initializer(initializer), name='transformer/layer_%d' % layer) self._transformer_layers.append(block) self._attention_mask_layer = layers.SelfAttentionMask( name='self_attention_mask') self._pooler_layer = tf.keras.layers.Dense( units=hidden_size, activation='tanh', kernel_initializer=tf_utils.clone_initializer(initializer), name='pooler_transform') if with_dense_inputs: self.inputs = dict( # The total length of token ids and dense inputs still has to be # max_sequence_length. It is checked in call(). input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), dense_inputs=tf.keras.Input( shape=(None, embedding_width), dtype=tf.float32), dense_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), dense_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), ) else: self.inputs = dict( input_word_ids=tf.keras.Input( shape=(max_sequence_length,), dtype=tf.int32), input_mask=tf.keras.Input( shape=(max_sequence_length,), dtype=tf.int32), input_type_ids=tf.keras.Input( shape=(max_sequence_length,), dtype=tf.int32)) self._max_sequence_length = max_sequence_length def call(self, inputs): word_embeddings = None if isinstance(inputs, dict): word_ids = inputs.get('input_word_ids') mask = inputs.get('input_mask') type_ids = inputs.get('input_type_ids') word_embeddings = inputs.get('input_word_embeddings', None) dense_inputs = inputs.get('dense_inputs', None) dense_mask = inputs.get('dense_mask', None) dense_type_ids = inputs.get('dense_type_ids', None) else: raise ValueError('Unexpected inputs type (%s) to %s.' % (type(inputs), self.__class__)) if word_embeddings is None: word_embeddings = self._embedding_layer(word_ids) if dense_inputs is not None: # Concat the dense embeddings at sequence end. word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1) type_ids = tf.concat([type_ids, dense_type_ids], axis=1) mask = tf.concat([mask, dense_mask], axis=1) # FNet: Sequence length must be the same as `max_sequence_length`. word_embeddings = tf.ensure_shape(word_embeddings, [None, self._max_sequence_length, None]) # Absolute position embeddings. position_embeddings = self._position_embedding_layer(word_embeddings) type_embeddings = self._type_embedding_layer(type_ids) embeddings = word_embeddings + position_embeddings + type_embeddings embeddings = self._embedding_norm_layer(embeddings) embeddings = self._embedding_dropout(embeddings) if self._embedding_projection is not None: embeddings = self._embedding_projection(embeddings) attention_mask = self._attention_mask_layer(embeddings, mask) encoder_outputs = [] x = embeddings for layer in self._transformer_layers: x = layer([x, attention_mask]) encoder_outputs.append(x) last_encoder_output = encoder_outputs[-1] first_token_tensor = last_encoder_output[:, 0, :] pooled_output = self._pooler_layer(first_token_tensor) output = dict( sequence_output=encoder_outputs[-1], pooled_output=pooled_output, encoder_outputs=encoder_outputs) return output def get_embedding_table(self): return self._embedding_layer.embeddings def get_embedding_layer(self): return self._embedding_layer def get_config(self): return dict(self._config) @property def transformer_layers(self): """List of Transformer layers in the encoder.""" return self._transformer_layers @property def pooler_layer(self): """The pooler dense layer after the transformer layers.""" return self._pooler_layer @classmethod def from_config(cls, config, custom_objects=None): if 'embedding_layer' in config and config['embedding_layer'] is not None: warn_string = ( 'You are reloading a model that was saved with a ' 'potentially-shared embedding layer object. If you contine to ' 'train this model, the embedding layer will no longer be shared. ' 'To work around this, load the model outside of the Keras API.') print('WARNING: ' + warn_string) logging.warn(warn_string) return cls(**config) def _init_mixing_sublayer(self, layer: int): """Initializes config-dependent mixing sublayer.""" if self._config['mixing_mechanism'] == layers.MixingMechanism.FOURIER: mixing_sublayer = layers.FourierTransformLayer( use_fft=self._config['use_fft'], name='fourier_transform') elif self._config['mixing_mechanism'] == layers.MixingMechanism.HARTLEY: mixing_sublayer = layers.HartleyTransformLayer( use_fft=self._config['use_fft'], name='hartley_transform') elif self._config['mixing_mechanism'] == layers.MixingMechanism.LINEAR: mixing_sublayer = layers.LinearTransformLayer( kernel_initializer=tf_utils.clone_initializer( self._config['initializer']), name='linear_transform') else: raise ValueError('Unsupported mixing mechanism: %s' % self._config['mixing_mechanism']) return mixing_sublayer
14,576
40.411932
80
py
models
models-master/official/nlp/modeling/networks/mobile_bert_encoder_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.modeling import models from official.nlp.modeling.networks import mobile_bert_encoder def generate_fake_input(batch_size=1, seq_len=5, vocab_size=10000, seed=0): """Generate consistent fake integer input sequences.""" np.random.seed(seed) fake_input = [] for _ in range(batch_size): fake_input.append([]) for _ in range(seq_len): fake_input[-1].append(np.random.randint(0, vocab_size)) fake_input = np.asarray(fake_input) return fake_input class MobileBertEncoderTest(parameterized.TestCase, tf.test.TestCase): @parameterized.named_parameters( ('default_setting', 'relu', True, 'no_norm', False), ('gelu', 'gelu', True, 'no_norm', False), ('kq_not_shared', 'relu', False, 'no_norm', False), ('layer_norm', 'relu', True, 'layer_norm', False), ('use_pooler', 'relu', True, 'no_norm', True), ('with_pooler_layer', 'relu', True, 'layer_norm', False)) def test_mobilebert_encoder(self, act_fn, kq_shared_bottleneck, normalization_type, use_pooler): hidden_size = 32 sequence_length = 16 num_blocks = 3 test_network = mobile_bert_encoder.MobileBERTEncoder( word_vocab_size=100, hidden_size=hidden_size, num_blocks=num_blocks, intermediate_act_fn=act_fn, key_query_shared_bottleneck=kq_shared_bottleneck, normalization_type=normalization_type, classifier_activation=use_pooler) word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) outputs = test_network([word_ids, mask, type_ids]) layer_output, pooler_output = outputs['sequence_output'], outputs[ 'pooled_output'] self.assertIsInstance(test_network.transformer_layers, list) self.assertLen(test_network.transformer_layers, num_blocks) layer_output_shape = [None, sequence_length, hidden_size] self.assertAllEqual(layer_output.shape.as_list(), layer_output_shape) pooler_output_shape = [None, hidden_size] self.assertAllEqual(pooler_output.shape.as_list(), pooler_output_shape) self.assertAllEqual(tf.float32, layer_output.dtype) def test_mobilebert_encoder_return_all_layer_output(self): hidden_size = 32 sequence_length = 16 num_blocks = 3 test_network = mobile_bert_encoder.MobileBERTEncoder( word_vocab_size=100, hidden_size=hidden_size, num_blocks=num_blocks) word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) outputs = test_network([word_ids, mask, type_ids]) all_layer_output = outputs['encoder_outputs'] self.assertIsInstance(all_layer_output, list) self.assertLen(all_layer_output, num_blocks + 1) @parameterized.parameters('int32', 'float32') def test_mobilebert_encoder_invocation(self, input_mask_dtype): vocab_size = 100 hidden_size = 32 sequence_length = 16 num_blocks = 3 test_network = mobile_bert_encoder.MobileBERTEncoder( word_vocab_size=vocab_size, hidden_size=hidden_size, num_blocks=num_blocks, input_mask_dtype=input_mask_dtype) word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=input_mask_dtype) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) outputs = test_network([word_ids, mask, type_ids]) model = tf.keras.Model([word_ids, mask, type_ids], outputs) input_seq = generate_fake_input( batch_size=1, seq_len=sequence_length, vocab_size=vocab_size) input_mask = generate_fake_input( batch_size=1, seq_len=sequence_length, vocab_size=2) token_type = generate_fake_input( batch_size=1, seq_len=sequence_length, vocab_size=2) outputs = model.predict([input_seq, input_mask, token_type]) sequence_output_shape = [1, sequence_length, hidden_size] self.assertAllEqual(outputs['sequence_output'].shape, sequence_output_shape) pooled_output_shape = [1, hidden_size] self.assertAllEqual(outputs['pooled_output'].shape, pooled_output_shape) def test_mobilebert_encoder_invocation_with_attention_score(self): vocab_size = 100 hidden_size = 32 sequence_length = 16 num_blocks = 3 test_network = mobile_bert_encoder.MobileBERTEncoder( word_vocab_size=vocab_size, hidden_size=hidden_size, num_blocks=num_blocks) word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) outputs = test_network([word_ids, mask, type_ids]) model = tf.keras.Model([word_ids, mask, type_ids], outputs) input_seq = generate_fake_input( batch_size=1, seq_len=sequence_length, vocab_size=vocab_size) input_mask = generate_fake_input( batch_size=1, seq_len=sequence_length, vocab_size=2) token_type = generate_fake_input( batch_size=1, seq_len=sequence_length, vocab_size=2) outputs = model.predict([input_seq, input_mask, token_type]) self.assertLen(outputs['attention_scores'], num_blocks) @parameterized.named_parameters( ('sequence_classification', models.BertClassifier, [None, 5]), ('token_classification', models.BertTokenClassifier, [None, 16, 5])) def test_mobilebert_encoder_for_downstream_task(self, task, prediction_shape): hidden_size = 32 sequence_length = 16 mobilebert_encoder = mobile_bert_encoder.MobileBERTEncoder( word_vocab_size=100, hidden_size=hidden_size) num_classes = 5 classifier = task(network=mobilebert_encoder, num_classes=num_classes) word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) prediction = classifier([word_ids, mask, type_ids]) if task == models.BertTokenClassifier: prediction = prediction['logits'] self.assertAllEqual(prediction.shape.as_list(), prediction_shape) if __name__ == '__main__': tf.test.main()
7,105
40.8
80
py
models
models-master/official/nlp/modeling/networks/albert_encoder.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ALBERT (https://arxiv.org/abs/1810.04805) text encoder network.""" # pylint: disable=g-classes-have-attributes import collections import tensorflow as tf from official.modeling import activations from official.modeling import tf_utils from official.nlp.modeling import layers @tf.keras.utils.register_keras_serializable(package='Text') class AlbertEncoder(tf.keras.Model): """ALBERT (https://arxiv.org/abs/1810.04805) text encoder network. This network implements the encoder described in the paper "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations" (https://arxiv.org/abs/1909.11942). Compared with BERT (https://arxiv.org/abs/1810.04805), ALBERT refactorizes embedding parameters into two smaller matrices and shares parameters across layers. The default values for this object are taken from the ALBERT-Base implementation described in the paper. *Note* that the network is constructed by Keras Functional API. Args: vocab_size: The size of the token vocabulary. embedding_width: The width of the word embeddings. If the embedding width is not equal to hidden size, embedding parameters will be factorized into two matrices in the shape of `(vocab_size, embedding_width)` and `(embedding_width, hidden_size)`, where `embedding_width` is usually much smaller than `hidden_size`. hidden_size: The size of the transformer hidden layers. num_layers: The number of transformer layers. num_attention_heads: The number of attention heads for each transformer. The hidden size must be divisible by the number of attention heads. max_sequence_length: The maximum sequence length that this encoder can consume. If None, max_sequence_length uses the value from sequence length. This determines the variable shape for positional embeddings. type_vocab_size: The number of types that the 'type_ids' input can take. intermediate_size: The intermediate size for the transformer layers. activation: The activation to use for the transformer layers. dropout_rate: The dropout rate to use for the transformer layers. attention_dropout_rate: The dropout rate to use for the attention layers within the transformer layers. initializer: The initialzer to use for all weights in this encoder. dict_outputs: Whether to use a dictionary as the model outputs. """ def __init__(self, vocab_size, embedding_width=128, hidden_size=768, num_layers=12, num_attention_heads=12, max_sequence_length=512, type_vocab_size=16, intermediate_size=3072, activation=activations.gelu, dropout_rate=0.1, attention_dropout_rate=0.1, initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), dict_outputs=False, **kwargs): activation = tf.keras.activations.get(activation) initializer = tf.keras.initializers.get(initializer) word_ids = tf.keras.layers.Input( shape=(None,), dtype=tf.int32, name='input_word_ids') mask = tf.keras.layers.Input( shape=(None,), dtype=tf.int32, name='input_mask') type_ids = tf.keras.layers.Input( shape=(None,), dtype=tf.int32, name='input_type_ids') if embedding_width is None: embedding_width = hidden_size embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), name='word_embeddings') word_embeddings = embedding_layer(word_ids) # Always uses dynamic slicing for simplicity. position_embedding_layer = layers.PositionEmbedding( initializer=tf_utils.clone_initializer(initializer), max_length=max_sequence_length, name='position_embedding') position_embeddings = position_embedding_layer(word_embeddings) type_embeddings = ( layers.OnDeviceEmbedding( vocab_size=type_vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), use_one_hot=True, name='type_embeddings')(type_ids)) embeddings = tf.keras.layers.Add()( [word_embeddings, position_embeddings, type_embeddings]) embeddings = ( tf.keras.layers.LayerNormalization( name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)(embeddings)) embeddings = (tf.keras.layers.Dropout(rate=dropout_rate)(embeddings)) # We project the 'embedding' output to 'hidden_size' if it is not already # 'hidden_size'. if embedding_width != hidden_size: embeddings = tf.keras.layers.EinsumDense( '...x,xy->...y', output_shape=hidden_size, bias_axes='y', kernel_initializer=tf_utils.clone_initializer(initializer), name='embedding_projection')( embeddings) data = embeddings attention_mask = layers.SelfAttentionMask()(data, mask) shared_layer = layers.TransformerEncoderBlock( num_attention_heads=num_attention_heads, inner_dim=intermediate_size, inner_activation=activation, output_dropout=dropout_rate, attention_dropout=attention_dropout_rate, kernel_initializer=tf_utils.clone_initializer(initializer), name='transformer') encoder_outputs = [] for _ in range(num_layers): data = shared_layer([data, attention_mask]) encoder_outputs.append(data) # Applying a tf.slice op (through subscript notation) to a Keras tensor # like this will create a SliceOpLambda layer. This is better than a Lambda # layer with Python code, because that is fundamentally less portable. first_token_tensor = data[:, 0, :] cls_output = tf.keras.layers.Dense( units=hidden_size, activation='tanh', kernel_initializer=tf_utils.clone_initializer(initializer), name='pooler_transform')( first_token_tensor) if dict_outputs: outputs = dict( sequence_output=data, encoder_outputs=encoder_outputs, pooled_output=cls_output, ) else: outputs = [data, cls_output] # b/164516224 # Once we've created the network using the Functional API, we call # super().__init__ as though we were invoking the Functional API Model # constructor, resulting in this object having all the properties of a model # created using the Functional API. Once super().__init__ is called, we # can assign attributes to `self` - note that all `self` assignments are # below this line. super().__init__( inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs) config_dict = { 'vocab_size': vocab_size, 'embedding_width': embedding_width, 'hidden_size': hidden_size, 'num_layers': num_layers, 'num_attention_heads': num_attention_heads, 'max_sequence_length': max_sequence_length, 'type_vocab_size': type_vocab_size, 'intermediate_size': intermediate_size, 'activation': tf.keras.activations.serialize(activation), 'dropout_rate': dropout_rate, 'attention_dropout_rate': attention_dropout_rate, 'initializer': tf.keras.initializers.serialize(initializer), } # We are storing the config dict as a namedtuple here to ensure checkpoint # compatibility with an earlier version of this model which did not track # the config dict attribute. TF does not track immutable attrs which # do not contain Trackables, so by creating a config namedtuple instead of # a dict we avoid tracking it. config_cls = collections.namedtuple('Config', config_dict.keys()) self._config = config_cls(**config_dict) self._embedding_layer = embedding_layer self._position_embedding_layer = position_embedding_layer def get_embedding_table(self): return self._embedding_layer.embeddings def get_config(self): return dict(self._config._asdict()) @classmethod def from_config(cls, config): return cls(**config)
8,877
40.877358
80
py
models
models-master/official/nlp/modeling/networks/mobile_bert_encoder.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MobileBERT text encoder network.""" import gin import tensorflow as tf from official.nlp.modeling import layers @gin.configurable class MobileBERTEncoder(tf.keras.Model): """A Keras functional API implementation for MobileBERT encoder.""" def __init__(self, word_vocab_size=30522, word_embed_size=128, type_vocab_size=2, max_sequence_length=512, num_blocks=24, hidden_size=512, num_attention_heads=4, intermediate_size=512, intermediate_act_fn='relu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, intra_bottleneck_size=128, initializer_range=0.02, use_bottleneck_attention=False, key_query_shared_bottleneck=True, num_feedforward_networks=4, normalization_type='no_norm', classifier_activation=False, input_mask_dtype='int32', **kwargs): """Class initialization. Args: word_vocab_size: Number of words in the vocabulary. word_embed_size: Word embedding size. type_vocab_size: Number of word types. max_sequence_length: Maximum length of input sequence. num_blocks: Number of transformer block in the encoder model. hidden_size: Hidden size for the transformer block. num_attention_heads: Number of attention heads in the transformer block. intermediate_size: The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: Dropout probability for the hidden layers. attention_probs_dropout_prob: Dropout probability of the attention probabilities. intra_bottleneck_size: Size of bottleneck. initializer_range: The stddev of the `truncated_normal_initializer` for initializing all weight matrices. use_bottleneck_attention: Use attention inputs from the bottleneck transformation. If true, the following `key_query_shared_bottleneck` will be ignored. key_query_shared_bottleneck: Whether to share linear transformation for keys and queries. num_feedforward_networks: Number of stacked feed-forward networks. normalization_type: The type of normalization_type, only `no_norm` and `layer_norm` are supported. `no_norm` represents the element-wise linear transformation for the student model, as suggested by the original MobileBERT paper. `layer_norm` is used for the teacher model. classifier_activation: If using the tanh activation for the final representation of the `[CLS]` token in fine-tuning. input_mask_dtype: The dtype of `input_mask` tensor, which is one of the input tensors of this encoder. Defaults to `int32`. If you want to use `tf.lite` quantization, which does not support `Cast` op, please set this argument to `tf.float32` and feed `input_mask` tensor with values in `float32` to avoid `tf.cast` in the computation. **kwargs: Other keyworded and arguments. """ self._self_setattr_tracking = False initializer = tf.keras.initializers.TruncatedNormal( stddev=initializer_range) # layer instantiation self.embedding_layer = layers.MobileBertEmbedding( word_vocab_size=word_vocab_size, word_embed_size=word_embed_size, type_vocab_size=type_vocab_size, output_embed_size=hidden_size, max_sequence_length=max_sequence_length, normalization_type=normalization_type, initializer=initializer, dropout_rate=hidden_dropout_prob) self._transformer_layers = [] for layer_idx in range(num_blocks): transformer = layers.MobileBertTransformer( hidden_size=hidden_size, num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, intermediate_act_fn=intermediate_act_fn, hidden_dropout_prob=hidden_dropout_prob, attention_probs_dropout_prob=attention_probs_dropout_prob, intra_bottleneck_size=intra_bottleneck_size, use_bottleneck_attention=use_bottleneck_attention, key_query_shared_bottleneck=key_query_shared_bottleneck, num_feedforward_networks=num_feedforward_networks, normalization_type=normalization_type, initializer=initializer, name=f'transformer_layer_{layer_idx}') self._transformer_layers.append(transformer) # input tensor input_ids = tf.keras.layers.Input( shape=(None,), dtype=tf.int32, name='input_word_ids') input_mask = tf.keras.layers.Input( shape=(None,), dtype=input_mask_dtype, name='input_mask') type_ids = tf.keras.layers.Input( shape=(None,), dtype=tf.int32, name='input_type_ids') self.inputs = [input_ids, input_mask, type_ids] # The dtype of `attention_mask` will the same as the dtype of `input_mask`. attention_mask = layers.SelfAttentionMask()(input_mask, input_mask) # build the computation graph all_layer_outputs = [] all_attention_scores = [] embedding_output = self.embedding_layer(input_ids, type_ids) all_layer_outputs.append(embedding_output) prev_output = embedding_output for layer_idx in range(num_blocks): layer_output, attention_score = self._transformer_layers[layer_idx]( prev_output, attention_mask, return_attention_scores=True) all_layer_outputs.append(layer_output) all_attention_scores.append(attention_score) prev_output = layer_output first_token = tf.squeeze(prev_output[:, 0:1, :], axis=1) if classifier_activation: self._pooler_layer = tf.keras.layers.EinsumDense( 'ab,bc->ac', output_shape=hidden_size, activation=tf.tanh, bias_axes='c', kernel_initializer=initializer, name='pooler') first_token = self._pooler_layer(first_token) else: self._pooler_layer = None outputs = dict( sequence_output=prev_output, pooled_output=first_token, encoder_outputs=all_layer_outputs, attention_scores=all_attention_scores) super().__init__( inputs=self.inputs, outputs=outputs, **kwargs) def get_embedding_table(self): return self.embedding_layer.word_embedding.embeddings def get_embedding_layer(self): return self.embedding_layer.word_embedding @property def transformer_layers(self): """List of Transformer layers in the encoder.""" return self._transformer_layers @property def pooler_layer(self): """The pooler dense layer after the transformer layers.""" return self._pooler_layer
7,533
39.945652
80
py
models
models-master/official/nlp/modeling/networks/bert_encoder.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Transformer-based BERT encoder network.""" # pylint: disable=g-classes-have-attributes from typing import Any, Callable, Optional, Union from absl import logging import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling import layers _Initializer = Union[str, tf.keras.initializers.Initializer] _Activation = Union[str, Callable[..., Any]] _approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True) class BertEncoderV2(tf.keras.layers.Layer): """Bi-directional Transformer-based encoder network. This network implements a bi-directional Transformer-based encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). It includes the embedding lookups and transformer layers, but not the masked language model or classification task networks. The default values for this object are taken from the BERT-Base implementation in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding". Args: vocab_size: The size of the token vocabulary. hidden_size: The size of the transformer hidden layers. num_layers: The number of transformer layers. num_attention_heads: The number of attention heads for each transformer. The hidden size must be divisible by the number of attention heads. max_sequence_length: The maximum sequence length that this encoder can consume. This determines the variable shape for positional embeddings. type_vocab_size: The number of types that the 'type_ids' input can take. inner_dim: The output dimension of the first Dense layer in a two-layer feedforward network for each transformer. inner_activation: The activation for the first Dense layer in a two-layer feedforward network for each transformer. output_dropout: Dropout probability for the post-attention and output dropout. attention_dropout: The dropout rate to use for the attention layers within the transformer layers. initializer: The initialzer to use for all weights in this encoder. output_range: The sequence output range, [0, output_range), by slicing the target sequence of the last transformer layer. `None` means the entire target sequence will attend to the source sequence, which yields the full output. embedding_width: The width of the word embeddings. If the embedding width is not equal to hidden size, embedding parameters will be factorized into two matrices in the shape of ['vocab_size', 'embedding_width'] and ['embedding_width', 'hidden_size'] ('embedding_width' is usually much smaller than 'hidden_size'). embedding_layer: An optional Layer instance which will be called to generate embeddings for the input word IDs. norm_first: Whether to normalize inputs to attention and intermediate dense layers. If set False, output of attention and intermediate dense layers is normalized. with_dense_inputs: Whether to accept dense embeddings as the input. return_attention_scores: Whether to add an additional output containing the attention scores of all transformer layers. This will be a list of length `num_layers`, and each element will be in the shape [batch_size, num_attention_heads, seq_dim, seq_dim]. """ def __init__( self, vocab_size: int, hidden_size: int = 768, num_layers: int = 12, num_attention_heads: int = 12, max_sequence_length: int = 512, type_vocab_size: int = 16, inner_dim: int = 3072, inner_activation: _Activation = _approx_gelu, output_dropout: float = 0.1, attention_dropout: float = 0.1, initializer: _Initializer = tf.keras.initializers.TruncatedNormal( stddev=0.02), output_range: Optional[int] = None, embedding_width: Optional[int] = None, embedding_layer: Optional[tf.keras.layers.Layer] = None, norm_first: bool = False, with_dense_inputs: bool = False, return_attention_scores: bool = False, **kwargs): # Pops kwargs that are used in V1 implementation. if 'dict_outputs' in kwargs: kwargs.pop('dict_outputs') if 'return_all_encoder_outputs' in kwargs: kwargs.pop('return_all_encoder_outputs') if 'intermediate_size' in kwargs: inner_dim = kwargs.pop('intermediate_size') if 'activation' in kwargs: inner_activation = kwargs.pop('activation') if 'dropout_rate' in kwargs: output_dropout = kwargs.pop('dropout_rate') if 'attention_dropout_rate' in kwargs: attention_dropout = kwargs.pop('attention_dropout_rate') super().__init__(**kwargs) self._output_range = output_range activation = tf.keras.activations.get(inner_activation) initializer = tf.keras.initializers.get(initializer) if embedding_width is None: embedding_width = hidden_size if embedding_layer is None: self._embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), name='word_embeddings') else: self._embedding_layer = embedding_layer self._position_embedding_layer = layers.PositionEmbedding( initializer=tf_utils.clone_initializer(initializer), max_length=max_sequence_length, name='position_embedding') self._type_embedding_layer = layers.OnDeviceEmbedding( vocab_size=type_vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), use_one_hot=True, name='type_embeddings') self._embedding_norm_layer = tf.keras.layers.LayerNormalization( name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32) self._embedding_dropout = tf.keras.layers.Dropout( rate=output_dropout, name='embedding_dropout') # We project the 'embedding' output to 'hidden_size' if it is not already # 'hidden_size'. self._embedding_projection = None if embedding_width != hidden_size: self._embedding_projection = tf.keras.layers.EinsumDense( '...x,xy->...y', output_shape=hidden_size, bias_axes='y', kernel_initializer=tf_utils.clone_initializer(initializer), name='embedding_projection') self._transformer_layers = [] self._attention_mask_layer = layers.SelfAttentionMask( name='self_attention_mask') self._num_layers = num_layers for i in range(num_layers): layer = layers.TransformerEncoderBlock( num_attention_heads=num_attention_heads, inner_dim=inner_dim, inner_activation=inner_activation, output_dropout=output_dropout, attention_dropout=attention_dropout, norm_first=norm_first, return_attention_scores=return_attention_scores, kernel_initializer=tf_utils.clone_initializer(initializer), name='transformer/layer_%d' % i) self._transformer_layers.append(layer) self._pooler_layer = tf.keras.layers.Dense( units=hidden_size, activation='tanh', kernel_initializer=tf_utils.clone_initializer(initializer), name='pooler_transform') self._config = { 'vocab_size': vocab_size, 'hidden_size': hidden_size, 'num_layers': num_layers, 'num_attention_heads': num_attention_heads, 'max_sequence_length': max_sequence_length, 'type_vocab_size': type_vocab_size, 'inner_dim': inner_dim, 'inner_activation': tf_utils.serialize_activation( activation, use_legacy_format=True ), 'output_dropout': output_dropout, 'attention_dropout': attention_dropout, 'initializer': tf_utils.serialize_initializer( initializer, use_legacy_format=True ), 'output_range': output_range, 'embedding_width': embedding_width, 'embedding_layer': embedding_layer, 'norm_first': norm_first, 'with_dense_inputs': with_dense_inputs, 'return_attention_scores': return_attention_scores, } if with_dense_inputs: self.inputs = dict( input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), dense_inputs=tf.keras.Input( shape=(None, embedding_width), dtype=tf.float32), dense_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), dense_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), ) else: self.inputs = dict( input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32)) def call(self, inputs): word_embeddings = None if isinstance(inputs, dict): word_ids = inputs.get('input_word_ids') mask = inputs.get('input_mask') type_ids = inputs.get('input_type_ids') word_embeddings = inputs.get('input_word_embeddings', None) dense_inputs = inputs.get('dense_inputs', None) dense_mask = inputs.get('dense_mask', None) dense_type_ids = inputs.get('dense_type_ids', None) else: raise ValueError('Unexpected inputs type to %s.' % self.__class__) if word_embeddings is None: word_embeddings = self._embedding_layer(word_ids) if dense_inputs is not None: mask = tf.concat([mask, dense_mask], axis=1) embeddings = self._get_embeddings(word_ids, type_ids, word_embeddings, dense_inputs, dense_type_ids) embeddings = self._embedding_norm_layer(embeddings) embeddings = self._embedding_dropout(embeddings) if self._embedding_projection is not None: embeddings = self._embedding_projection(embeddings) attention_mask = self._attention_mask_layer(embeddings, mask) encoder_outputs = [] attention_outputs = [] x = embeddings for i, layer in enumerate(self._transformer_layers): transformer_output_range = None if i == self._num_layers - 1: transformer_output_range = self._output_range x = layer([x, attention_mask], output_range=transformer_output_range) if self._config['return_attention_scores']: x, attention_scores = x attention_outputs.append(attention_scores) encoder_outputs.append(x) last_encoder_output = encoder_outputs[-1] first_token_tensor = last_encoder_output[:, 0, :] pooled_output = self._pooler_layer(first_token_tensor) output = dict( sequence_output=encoder_outputs[-1], pooled_output=pooled_output, encoder_outputs=encoder_outputs) if self._config['return_attention_scores']: output['attention_scores'] = attention_outputs return output def get_embedding_table(self): return self._embedding_layer.embeddings def get_embedding_layer(self): return self._embedding_layer def get_config(self): return dict(self._config) @property def transformer_layers(self): """List of Transformer layers in the encoder.""" return self._transformer_layers @property def pooler_layer(self): """The pooler dense layer after the transformer layers.""" return self._pooler_layer @classmethod def from_config(cls, config, custom_objects=None): if 'embedding_layer' in config and config['embedding_layer'] is not None: warn_string = ( 'You are reloading a model that was saved with a ' 'potentially-shared embedding layer object. If you contine to ' 'train this model, the embedding layer will no longer be shared. ' 'To work around this, load the model outside of the Keras API.') print('WARNING: ' + warn_string) logging.warn(warn_string) return cls(**config) def _get_embeddings(self, word_ids: tf.Tensor, type_ids: tf.Tensor, word_embeddings: Optional[tf.Tensor], dense_inputs: Optional[tf.Tensor], dense_type_ids: Optional[tf.Tensor]) -> tf.Tensor: if word_embeddings is None: word_embeddings = self._embedding_layer(word_ids) if dense_inputs is not None: # Concat the dense embeddings at sequence end. word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1) type_ids = tf.concat([type_ids, dense_type_ids], axis=1) type_embeddings = self._type_embedding_layer(type_ids) # absolute position embeddings. position_embeddings = self._position_embedding_layer(word_embeddings) return word_embeddings + position_embeddings + type_embeddings @tf.keras.utils.register_keras_serializable(package='Text') class BertEncoder(tf.keras.Model): """Bi-directional Transformer-based encoder network. This network implements a bi-directional Transformer-based encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). It includes the embedding lookups and transformer layers, but not the masked language model or classification task networks. The default values for this object are taken from the BERT-Base implementation in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding". *Note* that the network is constructed by [Keras Functional API](https://keras.io/guides/functional_api/). Args: vocab_size: The size of the token vocabulary. hidden_size: The size of the transformer hidden layers. num_layers: The number of transformer layers. num_attention_heads: The number of attention heads for each transformer. The hidden size must be divisible by the number of attention heads. max_sequence_length: The maximum sequence length that this encoder can consume. If None, max_sequence_length uses the value from sequence length. This determines the variable shape for positional embeddings. type_vocab_size: The number of types that the 'type_ids' input can take. inner_dim: The output dimension of the first Dense layer in a two-layer feedforward network for each transformer. inner_activation: The activation for the first Dense layer in a two-layer feedforward network for each transformer. output_dropout: Dropout probability for the post-attention and output dropout. attention_dropout: The dropout rate to use for the attention layers within the transformer layers. initializer: The initialzer to use for all weights in this encoder. output_range: The sequence output range, [0, output_range), by slicing the target sequence of the last transformer layer. `None` means the entire target sequence will attend to the source sequence, which yields the full output. embedding_width: The width of the word embeddings. If the embedding width is not equal to hidden size, embedding parameters will be factorized into two matrices in the shape of ['vocab_size', 'embedding_width'] and ['embedding_width', 'hidden_size'] ('embedding_width' is usually much smaller than 'hidden_size'). embedding_layer: An optional Layer instance which will be called to generate embeddings for the input word IDs. norm_first: Whether to normalize inputs to attention and intermediate dense layers. If set False, output of attention and intermediate dense layers is normalized. dict_outputs: Whether to use a dictionary as the model outputs. return_all_encoder_outputs: Whether to output sequence embedding outputs of all encoder transformer layers. Note: when the following `dict_outputs` argument is True, all encoder outputs are always returned in the dict, keyed by `encoder_outputs`. return_attention_scores: Whether to add an additional output containing the attention scores of all transformer layers. This will be a list of length `num_layers`, and each element will be in the shape [batch_size, num_attention_heads, seq_dim, seq_dim]. """ def __init__( self, vocab_size, hidden_size=768, num_layers=12, num_attention_heads=12, max_sequence_length=512, type_vocab_size=16, inner_dim=3072, inner_activation=lambda x: tf.keras.activations.gelu(x, approximate=True), output_dropout=0.1, attention_dropout=0.1, initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), output_range=None, embedding_width=None, embedding_layer=None, norm_first=False, dict_outputs=False, return_all_encoder_outputs=False, return_attention_scores: bool = False, **kwargs): if 'sequence_length' in kwargs: kwargs.pop('sequence_length') logging.warning('`sequence_length` is a deprecated argument to ' '`BertEncoder`, which has no effect for a while. Please ' 'remove `sequence_length` argument.') # Handles backward compatible kwargs. if 'intermediate_size' in kwargs: inner_dim = kwargs.pop('intermediate_size') if 'activation' in kwargs: inner_activation = kwargs.pop('activation') if 'dropout_rate' in kwargs: output_dropout = kwargs.pop('dropout_rate') if 'attention_dropout_rate' in kwargs: attention_dropout = kwargs.pop('attention_dropout_rate') activation = tf.keras.activations.get(inner_activation) initializer = tf.keras.initializers.get(initializer) word_ids = tf.keras.layers.Input( shape=(None,), dtype=tf.int32, name='input_word_ids') mask = tf.keras.layers.Input( shape=(None,), dtype=tf.int32, name='input_mask') type_ids = tf.keras.layers.Input( shape=(None,), dtype=tf.int32, name='input_type_ids') if embedding_width is None: embedding_width = hidden_size if embedding_layer is None: embedding_layer_inst = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), name='word_embeddings') else: embedding_layer_inst = embedding_layer word_embeddings = embedding_layer_inst(word_ids) # Always uses dynamic slicing for simplicity. position_embedding_layer = layers.PositionEmbedding( initializer=tf_utils.clone_initializer(initializer), max_length=max_sequence_length, name='position_embedding') position_embeddings = position_embedding_layer(word_embeddings) type_embedding_layer = layers.OnDeviceEmbedding( vocab_size=type_vocab_size, embedding_width=embedding_width, initializer=tf_utils.clone_initializer(initializer), use_one_hot=True, name='type_embeddings') type_embeddings = type_embedding_layer(type_ids) embeddings = tf.keras.layers.Add()( [word_embeddings, position_embeddings, type_embeddings]) embedding_norm_layer = tf.keras.layers.LayerNormalization( name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32) embeddings = embedding_norm_layer(embeddings) embeddings = (tf.keras.layers.Dropout(rate=output_dropout)(embeddings)) # We project the 'embedding' output to 'hidden_size' if it is not already # 'hidden_size'. if embedding_width != hidden_size: embedding_projection = tf.keras.layers.EinsumDense( '...x,xy->...y', output_shape=hidden_size, bias_axes='y', kernel_initializer=tf_utils.clone_initializer(initializer), name='embedding_projection') embeddings = embedding_projection(embeddings) else: embedding_projection = None transformer_layers = [] data = embeddings attention_mask = layers.SelfAttentionMask()(data, mask) encoder_outputs = [] attention_outputs = [] for i in range(num_layers): transformer_output_range = None if i == num_layers - 1: transformer_output_range = output_range layer = layers.TransformerEncoderBlock( num_attention_heads=num_attention_heads, inner_dim=inner_dim, inner_activation=inner_activation, output_dropout=output_dropout, attention_dropout=attention_dropout, norm_first=norm_first, return_attention_scores=return_attention_scores, kernel_initializer=tf_utils.clone_initializer(initializer), name='transformer/layer_%d' % i) transformer_layers.append(layer) data = layer([data, attention_mask], output_range=transformer_output_range) if return_attention_scores: data, attention_scores = data attention_outputs.append(attention_scores) encoder_outputs.append(data) last_encoder_output = encoder_outputs[-1] # Applying a tf.slice op (through subscript notation) to a Keras tensor # like this will create a SliceOpLambda layer. This is better than a Lambda # layer with Python code, because that is fundamentally less portable. first_token_tensor = last_encoder_output[:, 0, :] pooler_layer = tf.keras.layers.Dense( units=hidden_size, activation='tanh', kernel_initializer=tf_utils.clone_initializer(initializer), name='pooler_transform') cls_output = pooler_layer(first_token_tensor) outputs = dict( sequence_output=encoder_outputs[-1], pooled_output=cls_output, encoder_outputs=encoder_outputs, ) if return_attention_scores: outputs['attention_scores'] = attention_outputs if dict_outputs: super().__init__( inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs) else: cls_output = outputs['pooled_output'] if return_all_encoder_outputs: encoder_outputs = outputs['encoder_outputs'] outputs = [encoder_outputs, cls_output] else: sequence_output = outputs['sequence_output'] outputs = [sequence_output, cls_output] if return_attention_scores: outputs.append(attention_outputs) super().__init__( # pylint: disable=bad-super-call inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs) self._pooler_layer = pooler_layer self._transformer_layers = transformer_layers self._embedding_norm_layer = embedding_norm_layer self._embedding_layer = embedding_layer_inst self._position_embedding_layer = position_embedding_layer self._type_embedding_layer = type_embedding_layer if embedding_projection is not None: self._embedding_projection = embedding_projection config_dict = { 'vocab_size': vocab_size, 'hidden_size': hidden_size, 'num_layers': num_layers, 'num_attention_heads': num_attention_heads, 'max_sequence_length': max_sequence_length, 'type_vocab_size': type_vocab_size, 'inner_dim': inner_dim, 'inner_activation': tf_utils.serialize_activation( activation, use_legacy_format=True ), 'output_dropout': output_dropout, 'attention_dropout': attention_dropout, 'initializer': tf_utils.serialize_initializer( initializer, use_legacy_format=True ), 'output_range': output_range, 'embedding_width': embedding_width, 'embedding_layer': embedding_layer, 'norm_first': norm_first, 'dict_outputs': dict_outputs, 'return_attention_scores': return_attention_scores, } # pylint: disable=protected-access self._setattr_tracking = False self._config = config_dict self._setattr_tracking = True # pylint: enable=protected-access def get_embedding_table(self): return self._embedding_layer.embeddings def get_embedding_layer(self): return self._embedding_layer def get_config(self): return self._config @property def transformer_layers(self): """List of Transformer layers in the encoder.""" return self._transformer_layers @property def pooler_layer(self): """The pooler dense layer after the transformer layers.""" return self._pooler_layer @classmethod def from_config(cls, config, custom_objects=None): if 'embedding_layer' in config and config['embedding_layer'] is not None: warn_string = ( 'You are reloading a model that was saved with a ' 'potentially-shared embedding layer object. If you contine to ' 'train this model, the embedding layer will no longer be shared. ' 'To work around this, load the model outside of the Keras API.') print('WARNING: ' + warn_string) logging.warn(warn_string) return cls(**config)
25,604
39.837321
80
py
models
models-master/official/nlp/modeling/networks/classification_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for classification network.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.modeling.networks import classification class ClassificationTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters(1, 10) def test_network_creation(self, num_classes): """Validate that the Keras object can be created.""" input_width = 512 test_object = classification.Classification( input_width=input_width, num_classes=num_classes) # Create a 2-dimensional input (the first dimension is implicit). cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) output = test_object(cls_data) # Validate that the outputs are of the expected shape. expected_output_shape = [None, num_classes] self.assertEqual(expected_output_shape, output.shape.as_list()) @parameterized.parameters(1, 10) def test_network_invocation(self, num_classes): """Validate that the Keras object can be invoked.""" input_width = 512 test_object = classification.Classification( input_width=input_width, num_classes=num_classes, output='predictions') # Create a 2-dimensional input (the first dimension is implicit). cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) output = test_object(cls_data) # Invoke the network as part of a Model. model = tf.keras.Model(cls_data, output) input_data = 10 * np.random.random_sample((3, input_width)) _ = model.predict(input_data) def test_network_invocation_with_internal_logits(self): """Validate that the logit outputs are correct.""" input_width = 512 num_classes = 10 test_object = classification.Classification( input_width=input_width, num_classes=num_classes, output='predictions') # Create a 2-dimensional input (the first dimension is implicit). cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) output = test_object(cls_data) model = tf.keras.Model(cls_data, output) logits_model = tf.keras.Model(test_object.inputs, test_object.logits) batch_size = 3 input_data = 10 * np.random.random_sample((batch_size, input_width)) outputs = model.predict(input_data) logits = logits_model.predict(input_data) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, num_classes) self.assertEqual(expected_output_shape, outputs.shape) self.assertEqual(expected_output_shape, logits.shape) # Ensure that the logits, when softmaxed, create the outputs. input_tensor = tf.keras.Input(expected_output_shape[1:]) output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) softmax_model = tf.keras.Model(input_tensor, output_tensor) calculated_softmax = softmax_model.predict(logits) self.assertAllClose(outputs, calculated_softmax) @parameterized.parameters(1, 10) def test_network_invocation_with_internal_and_external_logits( self, num_classes): """Validate that the logit outputs are correct.""" input_width = 512 test_object = classification.Classification( input_width=input_width, num_classes=num_classes, output='logits') # Create a 2-dimensional input (the first dimension is implicit). cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) output = test_object(cls_data) model = tf.keras.Model(cls_data, output) logits_model = tf.keras.Model(test_object.inputs, test_object.logits) batch_size = 3 input_data = 10 * np.random.random_sample((batch_size, input_width)) outputs = model.predict(input_data) logits = logits_model.predict(input_data) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, num_classes) self.assertEqual(expected_output_shape, outputs.shape) self.assertEqual(expected_output_shape, logits.shape) self.assertAllClose(outputs, logits) def test_network_invocation_with_logit_output(self): """Validate that the logit outputs are correct.""" input_width = 512 num_classes = 10 test_object = classification.Classification( input_width=input_width, num_classes=num_classes, output='predictions') logit_object = classification.Classification( input_width=input_width, num_classes=num_classes, output='logits') logit_object.set_weights(test_object.get_weights()) # Create a 2-dimensional input (the first dimension is implicit). cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) output = test_object(cls_data) logit_output = logit_object(cls_data) model = tf.keras.Model(cls_data, output) logits_model = tf.keras.Model(cls_data, logit_output) batch_size = 3 input_data = 10 * np.random.random_sample((batch_size, input_width)) outputs = model.predict(input_data) logits = logits_model.predict(input_data) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, num_classes) self.assertEqual(expected_output_shape, outputs.shape) self.assertEqual(expected_output_shape, logits.shape) # Ensure that the logits, when softmaxed, create the outputs. input_tensor = tf.keras.Input(expected_output_shape[1:]) output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) softmax_model = tf.keras.Model(input_tensor, output_tensor) calculated_softmax = softmax_model.predict(logits) self.assertAllClose(outputs, calculated_softmax) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. network = classification.Classification( input_width=128, num_classes=10, initializer='zeros', output='predictions') # Create another network object from the first object's config. new_network = classification.Classification.from_config( network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) def test_unknown_output_type_fails(self): with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'): _ = classification.Classification( input_width=128, num_classes=10, output='bad') if __name__ == '__main__': tf.test.main()
7,061
39.586207
79
py
models
models-master/official/nlp/modeling/networks/span_labeling_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for span_labeling network.""" import numpy as np import tensorflow as tf from official.nlp.modeling.networks import span_labeling class SpanLabelingTest(tf.test.TestCase): def test_network_creation(self): """Validate that the Keras object can be created.""" sequence_length = 15 input_width = 512 test_network = span_labeling.SpanLabeling( input_width=input_width, output='predictions') # Create a 3-dimensional input (the first dimension is implicit). sequence_data = tf.keras.Input( shape=(sequence_length, input_width), dtype=tf.float32) start_outputs, end_outputs = test_network(sequence_data) # Validate that the outputs are of the expected shape. expected_output_shape = [None, sequence_length] self.assertEqual(expected_output_shape, start_outputs.shape.as_list()) self.assertEqual(expected_output_shape, end_outputs.shape.as_list()) def test_network_invocation(self): """Validate that the Keras object can be invoked.""" sequence_length = 15 input_width = 512 test_network = span_labeling.SpanLabeling(input_width=input_width) # Create a 3-dimensional input (the first dimension is implicit). sequence_data = tf.keras.Input( shape=(sequence_length, input_width), dtype=tf.float32) outputs = test_network(sequence_data) model = tf.keras.Model(sequence_data, outputs) # Invoke the network as part of a Model. batch_size = 3 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, input_width)) start_outputs, end_outputs = model.predict(input_data) # Validate that the outputs are of the expected shape. expected_output_shape = (batch_size, sequence_length) self.assertEqual(expected_output_shape, start_outputs.shape) self.assertEqual(expected_output_shape, end_outputs.shape) def test_network_invocation_with_internal_logit_output(self): """Validate that the logit outputs are correct.""" sequence_length = 15 input_width = 512 test_network = span_labeling.SpanLabeling( input_width=input_width, output='predictions') # Create a 3-dimensional input (the first dimension is implicit). sequence_data = tf.keras.Input( shape=(sequence_length, input_width), dtype=tf.float32) output = test_network(sequence_data) model = tf.keras.Model(sequence_data, output) logit_model = tf.keras.Model( test_network.inputs, [test_network.start_logits, test_network.end_logits]) batch_size = 3 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, input_width)) start_outputs, end_outputs = model.predict(input_data) start_logits, end_logits = logit_model.predict(input_data) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, sequence_length) self.assertEqual(expected_output_shape, start_outputs.shape) self.assertEqual(expected_output_shape, end_outputs.shape) self.assertEqual(expected_output_shape, start_logits.shape) self.assertEqual(expected_output_shape, end_logits.shape) # Ensure that the logits, when softmaxed, create the outputs. input_tensor = tf.keras.Input(expected_output_shape[1:]) output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) softmax_model = tf.keras.Model(input_tensor, output_tensor) start_softmax = softmax_model.predict(start_logits) self.assertAllClose(start_outputs, start_softmax) end_softmax = softmax_model.predict(end_logits) self.assertAllClose(end_outputs, end_softmax) def test_network_invocation_with_external_logit_output(self): """Validate that the logit outputs are correct.""" sequence_length = 15 input_width = 512 test_network = span_labeling.SpanLabeling( input_width=input_width, output='predictions') logit_network = span_labeling.SpanLabeling( input_width=input_width, output='logits') logit_network.set_weights(test_network.get_weights()) # Create a 3-dimensional input (the first dimension is implicit). sequence_data = tf.keras.Input( shape=(sequence_length, input_width), dtype=tf.float32) output = test_network(sequence_data) logit_output = logit_network(sequence_data) model = tf.keras.Model(sequence_data, output) logit_model = tf.keras.Model(sequence_data, logit_output) batch_size = 3 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, input_width)) start_outputs, end_outputs = model.predict(input_data) start_logits, end_logits = logit_model.predict(input_data) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, sequence_length) self.assertEqual(expected_output_shape, start_outputs.shape) self.assertEqual(expected_output_shape, end_outputs.shape) self.assertEqual(expected_output_shape, start_logits.shape) self.assertEqual(expected_output_shape, end_logits.shape) # Ensure that the logits, when softmaxed, create the outputs. input_tensor = tf.keras.Input(expected_output_shape[1:]) output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) softmax_model = tf.keras.Model(input_tensor, output_tensor) start_softmax = softmax_model.predict(start_logits) self.assertAllClose(start_outputs, start_softmax) end_softmax = softmax_model.predict(end_logits) self.assertAllClose(end_outputs, end_softmax) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. network = span_labeling.SpanLabeling( input_width=128, activation='relu', initializer='zeros', output='predictions') # Create another network object from the first object's config. new_network = span_labeling.SpanLabeling.from_config(network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) def test_unknown_output_type_fails(self): with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'): _ = span_labeling.SpanLabeling(input_width=10, output='bad') class XLNetSpanLabelingTest(tf.test.TestCase): def test_basic_invocation_train(self): batch_size = 2 seq_length = 8 hidden_size = 4 sequence_data = np.random.uniform( size=(batch_size, seq_length, hidden_size)).astype('float32') paragraph_mask = np.random.uniform( size=(batch_size, seq_length)).astype('float32') class_index = np.random.uniform(size=(batch_size)).astype('uint8') start_positions = np.zeros(shape=(batch_size)).astype('uint8') layer = span_labeling.XLNetSpanLabeling( input_width=hidden_size, start_n_top=2, end_n_top=2, activation='tanh', dropout_rate=0., initializer='glorot_uniform') output = layer(sequence_data=sequence_data, class_index=class_index, paragraph_mask=paragraph_mask, start_positions=start_positions, training=True) expected_keys = { 'start_logits', 'end_logits', 'class_logits', 'start_predictions', 'end_predictions', } self.assertSetEqual(expected_keys, set(output.keys())) def test_basic_invocation_beam_search(self): batch_size = 2 seq_length = 8 hidden_size = 4 top_n = 5 sequence_data = np.random.uniform( size=(batch_size, seq_length, hidden_size)).astype('float32') paragraph_mask = np.random.uniform( size=(batch_size, seq_length)).astype('float32') class_index = np.random.uniform(size=(batch_size)).astype('uint8') layer = span_labeling.XLNetSpanLabeling( input_width=hidden_size, start_n_top=top_n, end_n_top=top_n, activation='tanh', dropout_rate=0., initializer='glorot_uniform') output = layer(sequence_data=sequence_data, class_index=class_index, paragraph_mask=paragraph_mask, training=False) expected_keys = { 'start_top_predictions', 'end_top_predictions', 'class_logits', 'start_top_index', 'end_top_index', 'start_logits', 'end_logits', 'start_predictions', 'end_predictions' } self.assertSetEqual(expected_keys, set(output.keys())) def test_subclass_invocation(self): """Tests basic invocation of this layer wrapped in a subclass.""" seq_length = 8 hidden_size = 4 batch_size = 2 sequence_data = tf.keras.Input(shape=(seq_length, hidden_size), dtype=tf.float32) class_index = tf.keras.Input(shape=(), dtype=tf.uint8) paragraph_mask = tf.keras.Input(shape=(seq_length), dtype=tf.float32) start_positions = tf.keras.Input(shape=(), dtype=tf.int32) layer = span_labeling.XLNetSpanLabeling( input_width=hidden_size, start_n_top=5, end_n_top=5, activation='tanh', dropout_rate=0., initializer='glorot_uniform') output = layer(sequence_data=sequence_data, class_index=class_index, paragraph_mask=paragraph_mask, start_positions=start_positions) model = tf.keras.Model( inputs={ 'sequence_data': sequence_data, 'class_index': class_index, 'paragraph_mask': paragraph_mask, 'start_positions': start_positions, }, outputs=output) sequence_data = tf.random.uniform( shape=(batch_size, seq_length, hidden_size), dtype=tf.float32) paragraph_mask = tf.random.uniform( shape=(batch_size, seq_length), dtype=tf.float32) class_index = tf.ones(shape=(batch_size,), dtype=tf.uint8) start_positions = tf.random.uniform( shape=(batch_size,), maxval=5, dtype=tf.int32) inputs = dict(sequence_data=sequence_data, paragraph_mask=paragraph_mask, class_index=class_index, start_positions=start_positions) output = model(inputs) self.assertIsInstance(output, dict) # Test `call` without training flag. output = model(inputs, training=False) self.assertIsInstance(output, dict) # Test `call` with training flag. # Note: this fails due to incompatibility with the functional API. with self.assertRaisesRegex(AssertionError, 'Could not compute output KerasTensor'): model(inputs, training=True) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. network = span_labeling.XLNetSpanLabeling( input_width=128, start_n_top=5, end_n_top=1, activation='tanh', dropout_rate=0.34, initializer='zeros') # Create another network object from the first object's config. new_network = span_labeling.XLNetSpanLabeling.from_config( network.get_config()) # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == '__main__': tf.test.main()
11,974
38.391447
79
py
models
models-master/official/nlp/modeling/models/bert_pretrainer.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT Pre-training model.""" # pylint: disable=g-classes-have-attributes import collections import copy from typing import List, Optional from absl import logging import gin import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling import layers from official.nlp.modeling import networks @tf.keras.utils.register_keras_serializable(package='Text') class BertPretrainer(tf.keras.Model): """BERT pretraining model. [Note] Please use the new `BertPretrainerV2` for your projects. The BertPretrainer allows a user to pass in a transformer stack, and instantiates the masked language model and classification networks that are used to create the training objectives. *Note* that the model is constructed by [Keras Functional API](https://keras.io/guides/functional_api/). Args: network: A transformer network. This network should output a sequence output and a classification output. num_classes: Number of classes to predict from the classification network. num_token_predictions: Number of tokens to predict from the masked LM. embedding_table: Embedding table of a network. If None, the "network.get_embedding_table()" is used. activation: The activation (if any) to use in the masked LM network. If None, no activation will be used. initializer: The initializer (if any) to use in the masked LM and classification networks. Defaults to a Glorot uniform initializer. output: The output style for this network. Can be either `logits` or `predictions`. """ def __init__(self, network, num_classes, num_token_predictions, embedding_table=None, activation=None, initializer='glorot_uniform', output='logits', **kwargs): # We want to use the inputs of the passed network as the inputs to this # Model. To do this, we need to keep a copy of the network inputs for use # when we construct the Model object at the end of init. (We keep a copy # because we'll be adding another tensor to the copy later.) network_inputs = network.inputs inputs = copy.copy(network_inputs) # Because we have a copy of inputs to create this Model object, we can # invoke the Network object with its own input tensors to start the Model. # Note that, because of how deferred construction happens, we can't use # the copy of the list here - by the time the network is invoked, the list # object contains the additional input added below. sequence_output, cls_output = network(network_inputs) # The encoder network may get outputs from all layers. if isinstance(sequence_output, list): sequence_output = sequence_output[-1] if isinstance(cls_output, list): cls_output = cls_output[-1] sequence_output_length = sequence_output.shape.as_list()[1] if sequence_output_length is not None and (sequence_output_length < num_token_predictions): raise ValueError( "The passed network's output length is %s, which is less than the " 'requested num_token_predictions %s.' % (sequence_output_length, num_token_predictions)) masked_lm_positions = tf.keras.layers.Input( shape=(num_token_predictions,), name='masked_lm_positions', dtype=tf.int32) inputs.append(masked_lm_positions) if embedding_table is None: embedding_table = network.get_embedding_table() masked_lm = layers.MaskedLM( embedding_table=embedding_table, activation=activation, initializer=tf_utils.clone_initializer(initializer), output=output, name='cls/predictions') lm_outputs = masked_lm( sequence_output, masked_positions=masked_lm_positions) classification = networks.Classification( input_width=cls_output.shape[-1], num_classes=num_classes, initializer=tf_utils.clone_initializer(initializer), output=output, name='classification') sentence_outputs = classification(cls_output) super(BertPretrainer, self).__init__( inputs=inputs, outputs=dict(masked_lm=lm_outputs, classification=sentence_outputs), **kwargs) # b/164516224 # Once we've created the network using the Functional API, we call # super().__init__ as though we were invoking the Functional API Model # constructor, resulting in this object having all the properties of a model # created using the Functional API. Once super().__init__ is called, we # can assign attributes to `self` - note that all `self` assignments are # below this line. config_dict = { 'network': network, 'num_classes': num_classes, 'num_token_predictions': num_token_predictions, 'activation': activation, 'initializer': initializer, 'output': output, } # We are storing the config dict as a namedtuple here to ensure checkpoint # compatibility with an earlier version of this model which did not track # the config dict attribute. TF does not track immutable attrs which # do not contain Trackables, so by creating a config namedtuple instead of # a dict we avoid tracking it. config_cls = collections.namedtuple('Config', config_dict.keys()) self._config = config_cls(**config_dict) self.encoder = network self.classification = classification self.masked_lm = masked_lm def get_config(self): return dict(self._config._asdict()) @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @tf.keras.utils.register_keras_serializable(package='Text') @gin.configurable class BertPretrainerV2(tf.keras.Model): """BERT pretraining model V2. Adds the masked language model head and optional classification heads upon the transformer encoder. Args: encoder_network: A transformer network. This network should output a sequence output and a classification output. mlm_activation: The activation (if any) to use in the masked LM network. If None, no activation will be used. mlm_initializer: The initializer (if any) to use in the masked LM. Default to a Glorot uniform initializer. classification_heads: A list of optional head layers to transform on encoder sequence outputs. customized_masked_lm: A customized masked_lm layer. If None, will create a standard layer from `layers.MaskedLM`; if not None, will use the specified masked_lm layer. Above arguments `mlm_activation` and `mlm_initializer` will be ignored. name: The name of the model. Inputs: Inputs defined by the encoder network, plus `masked_lm_positions` as a dictionary. Outputs: A dictionary of `lm_output`, classification head outputs keyed by head names, and also outputs from `encoder_network`, keyed by `sequence_output` and `encoder_outputs` (if any). """ def __init__( self, encoder_network: tf.keras.Model, mlm_activation=None, mlm_initializer='glorot_uniform', classification_heads: Optional[List[tf.keras.layers.Layer]] = None, customized_masked_lm: Optional[tf.keras.layers.Layer] = None, name: str = 'bert', **kwargs): super().__init__(self, name=name, **kwargs) self._config = { 'encoder_network': encoder_network, 'mlm_initializer': mlm_initializer, 'mlm_activation': mlm_activation, 'classification_heads': classification_heads, 'name': name, } self.encoder_network = encoder_network # Makes sure the weights are built. _ = self.encoder_network(self.encoder_network.inputs) inputs = copy.copy(self.encoder_network.inputs) self.classification_heads = classification_heads or [] if len(set([cls.name for cls in self.classification_heads])) != len( self.classification_heads): raise ValueError('Classification heads should have unique names.') self.masked_lm = customized_masked_lm or layers.MaskedLM( embedding_table=self.encoder_network.get_embedding_table(), activation=mlm_activation, initializer=mlm_initializer, name='cls/predictions') masked_lm_positions = tf.keras.layers.Input( shape=(None,), name='masked_lm_positions', dtype=tf.int32) if isinstance(inputs, dict): inputs['masked_lm_positions'] = masked_lm_positions else: inputs.append(masked_lm_positions) self.inputs = inputs def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks if isinstance(inputs, list): logging.warning('List inputs to BertPretrainer are discouraged.') inputs = dict([ (ref.name, tensor) for ref, tensor in zip(self.inputs, inputs) ]) outputs = dict() encoder_network_outputs = self.encoder_network(inputs) if isinstance(encoder_network_outputs, list): outputs['pooled_output'] = encoder_network_outputs[1] # When `encoder_network` was instantiated with return_all_encoder_outputs # set to True, `encoder_network_outputs[0]` is a list containing # all transformer layers' output. if isinstance(encoder_network_outputs[0], list): outputs['encoder_outputs'] = encoder_network_outputs[0] outputs['sequence_output'] = encoder_network_outputs[0][-1] else: outputs['sequence_output'] = encoder_network_outputs[0] elif isinstance(encoder_network_outputs, dict): outputs = encoder_network_outputs else: raise ValueError('encoder_network\'s output should be either a list ' 'or a dict, but got %s' % encoder_network_outputs) sequence_output = outputs['sequence_output'] # Inference may not have masked_lm_positions and mlm_logits is not needed. if 'masked_lm_positions' in inputs: masked_lm_positions = inputs['masked_lm_positions'] outputs['mlm_logits'] = self.masked_lm( sequence_output, masked_positions=masked_lm_positions) for cls_head in self.classification_heads: cls_outputs = cls_head(sequence_output) if isinstance(cls_outputs, dict): outputs.update(cls_outputs) else: outputs[cls_head.name] = cls_outputs return outputs @property def checkpoint_items(self): """Returns a dictionary of items to be additionally checkpointed.""" items = dict(encoder=self.encoder_network, masked_lm=self.masked_lm) for head in self.classification_heads: for key, item in head.checkpoint_items.items(): items['.'.join([head.name, key])] = item return items def get_config(self): return self._config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
11,480
39.712766
100
py
models
models-master/official/nlp/modeling/models/bert_pretrainer_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for BERT pretrainer model.""" import itertools from absl.testing import parameterized import tensorflow as tf from official.nlp.modeling import layers from official.nlp.modeling import networks from official.nlp.modeling.models import bert_pretrainer class BertPretrainerTest(tf.test.TestCase, parameterized.TestCase): def test_bert_pretrainer(self): """Validate that the Keras object can be created.""" # Build a transformer network to use within the BERT trainer. vocab_size = 100 sequence_length = 512 test_network = networks.BertEncoder( vocab_size=vocab_size, num_layers=2, max_sequence_length=sequence_length) # Create a BERT trainer with the created network. num_classes = 3 num_token_predictions = 2 bert_trainer_model = bert_pretrainer.BertPretrainer( test_network, num_classes=num_classes, num_token_predictions=num_token_predictions) # Create a set of 2-dimensional inputs (the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) masked_lm_positions = tf.keras.Input( shape=(num_token_predictions,), dtype=tf.int32) # Invoke the trainer model on the inputs. This causes the layer to be built. outputs = bert_trainer_model( [word_ids, mask, type_ids, masked_lm_positions]) # Validate that the outputs are of the expected shape. expected_lm_shape = [None, num_token_predictions, vocab_size] expected_classification_shape = [None, num_classes] self.assertAllEqual(expected_lm_shape, outputs['masked_lm'].shape.as_list()) self.assertAllEqual(expected_classification_shape, outputs['classification'].shape.as_list()) def test_bert_trainer_tensor_call(self): """Validate that the Keras object can be invoked.""" # Build a transformer network to use within the BERT trainer. test_network = networks.BertEncoder(vocab_size=100, num_layers=2) # Create a BERT trainer with the created network. bert_trainer_model = bert_pretrainer.BertPretrainer( test_network, num_classes=2, num_token_predictions=2) # Create a set of 2-dimensional data tensors to feed into the model. word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) lm_mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) # Invoke the trainer model on the tensors. In Eager mode, this does the # actual calculation. (We can't validate the outputs, since the network is # too complex: this simply ensures we're not hitting runtime errors.) _ = bert_trainer_model([word_ids, mask, type_ids, lm_mask]) def test_serialize_deserialize(self): """Validate that the BERT trainer can be serialized and deserialized.""" # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.BertEncoder( vocab_size=100, num_layers=2, max_sequence_length=5) # Create a BERT trainer with the created network. (Note that all the args # are different, so we can catch any serialization mismatches.) bert_trainer_model = bert_pretrainer.BertPretrainer( test_network, num_classes=4, num_token_predictions=3) # Create another BERT trainer via serialization and deserialization. config = bert_trainer_model.get_config() new_bert_trainer_model = bert_pretrainer.BertPretrainer.from_config(config) # Validate that the config can be forced to JSON. _ = new_bert_trainer_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(bert_trainer_model.get_config(), new_bert_trainer_model.get_config()) class BertPretrainerV2Test(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters(itertools.product( (False, True), (False, True), (False, True), (False, True), )) def test_bert_pretrainerv2(self, dict_outputs, return_all_encoder_outputs, use_customized_masked_lm, has_masked_lm_positions): """Validate that the Keras object can be created.""" # Build a transformer network to use within the BERT trainer. del dict_outputs, return_all_encoder_outputs vocab_size = 100 sequence_length = 512 hidden_size = 48 num_layers = 2 test_network = networks.BertEncoderV2( vocab_size=vocab_size, num_layers=num_layers, hidden_size=hidden_size, max_sequence_length=sequence_length) _ = test_network(test_network.inputs) # Create a BERT trainer with the created network. if use_customized_masked_lm: customized_masked_lm = layers.MaskedLM( embedding_table=test_network.get_embedding_table()) else: customized_masked_lm = None bert_trainer_model = bert_pretrainer.BertPretrainerV2( encoder_network=test_network, customized_masked_lm=customized_masked_lm) num_token_predictions = 20 # Create a set of 2-dimensional inputs (the first dimension is implicit). inputs = dict( input_word_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)) if has_masked_lm_positions: inputs['masked_lm_positions'] = tf.keras.Input( shape=(num_token_predictions,), dtype=tf.int32) # Invoke the trainer model on the inputs. This causes the layer to be built. outputs = bert_trainer_model(inputs) has_encoder_outputs = True # dict_outputs or return_all_encoder_outputs expected_keys = ['sequence_output', 'pooled_output'] if has_encoder_outputs: expected_keys.append('encoder_outputs') if has_masked_lm_positions: expected_keys.append('mlm_logits') self.assertSameElements(outputs.keys(), expected_keys) # Validate that the outputs are of the expected shape. expected_lm_shape = [None, num_token_predictions, vocab_size] if has_masked_lm_positions: self.assertAllEqual(expected_lm_shape, outputs['mlm_logits'].shape.as_list()) expected_sequence_output_shape = [None, sequence_length, hidden_size] self.assertAllEqual(expected_sequence_output_shape, outputs['sequence_output'].shape.as_list()) expected_pooled_output_shape = [None, hidden_size] self.assertAllEqual(expected_pooled_output_shape, outputs['pooled_output'].shape.as_list()) def test_multiple_cls_outputs(self): """Validate that the Keras object can be created.""" # Build a transformer network to use within the BERT trainer. vocab_size = 100 sequence_length = 512 hidden_size = 48 num_layers = 2 test_network = networks.BertEncoderV2( vocab_size=vocab_size, num_layers=num_layers, hidden_size=hidden_size, max_sequence_length=sequence_length) bert_trainer_model = bert_pretrainer.BertPretrainerV2( encoder_network=test_network, classification_heads=[layers.MultiClsHeads( inner_dim=5, cls_list=[('foo', 2), ('bar', 3)])]) num_token_predictions = 20 # Create a set of 2-dimensional inputs (the first dimension is implicit). inputs = dict( input_word_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32), masked_lm_positions=tf.keras.Input( shape=(num_token_predictions,), dtype=tf.int32)) # Invoke the trainer model on the inputs. This causes the layer to be built. outputs = bert_trainer_model(inputs) self.assertEqual(outputs['foo'].shape.as_list(), [None, 2]) self.assertEqual(outputs['bar'].shape.as_list(), [None, 3]) def test_v2_serialize_deserialize(self): """Validate that the BERT trainer can be serialized and deserialized.""" # Build a transformer network to use within the BERT trainer. test_network = networks.BertEncoderV2(vocab_size=100, num_layers=2) # Create a BERT trainer with the created network. (Note that all the args # are different, so we can catch any serialization mismatches.) bert_trainer_model = bert_pretrainer.BertPretrainerV2( encoder_network=test_network) # Create another BERT trainer via serialization and deserialization. config = bert_trainer_model.get_config() new_bert_trainer_model = bert_pretrainer.BertPretrainerV2.from_config( config) # Validate that the config can be forced to JSON. _ = new_bert_trainer_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(bert_trainer_model.get_config(), new_bert_trainer_model.get_config()) if __name__ == '__main__': tf.test.main()
9,900
41.676724
80
py
models
models-master/official/nlp/modeling/models/bert_classifier_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for BERT trainer network.""" from absl.testing import parameterized import tensorflow as tf from official.nlp.modeling import layers from official.nlp.modeling import networks from official.nlp.modeling.models import bert_classifier class BertClassifierTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(('single_cls', 1, False), ('3_cls', 3, False), ('3_cls_dictoutputs', 3, True)) def test_bert_trainer(self, num_classes, dict_outputs): """Validate that the Keras object can be created.""" # Build a transformer network to use within the BERT trainer. vocab_size = 100 sequence_length = 512 test_network = networks.BertEncoder( vocab_size=vocab_size, num_layers=2, dict_outputs=dict_outputs) # Create a BERT trainer with the created network. bert_trainer_model = bert_classifier.BertClassifier( test_network, num_classes=num_classes) # Create a set of 2-dimensional inputs (the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) # Invoke the trainer model on the inputs. This causes the layer to be built. cls_outs = bert_trainer_model([word_ids, mask, type_ids]) # Validate that the outputs are of the expected shape. expected_classification_shape = [None, num_classes] self.assertAllEqual(expected_classification_shape, cls_outs.shape.as_list()) @parameterized.named_parameters( ('single_cls', 1, False), ('2_cls', 2, False), ('single_cls_custom_head', 1, True), ('2_cls_custom_head', 2, True)) def test_bert_trainer_tensor_call(self, num_classes, use_custom_head): """Validate that the Keras object can be invoked.""" # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.BertEncoder(vocab_size=100, num_layers=2) cls_head = layers.GaussianProcessClassificationHead( inner_dim=0, num_classes=num_classes) if use_custom_head else None # Create a BERT trainer with the created network. bert_trainer_model = bert_classifier.BertClassifier( test_network, num_classes=num_classes, cls_head=cls_head) # Create a set of 2-dimensional data tensors to feed into the model. word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) # Invoke the trainer model on the tensors. In Eager mode, this does the # actual calculation. (We can't validate the outputs, since the network is # too complex: this simply ensures we're not hitting runtime errors.) _ = bert_trainer_model([word_ids, mask, type_ids]) @parameterized.named_parameters( ('default_cls_head', None), ('sngp_cls_head', layers.GaussianProcessClassificationHead( inner_dim=0, num_classes=4))) def test_serialize_deserialize(self, cls_head): """Validate that the BERT trainer can be serialized and deserialized.""" # Build a transformer network to use within the BERT trainer. test_network = networks.BertEncoder(vocab_size=100, num_layers=2) # Create a BERT trainer with the created network. (Note that all the args # are different, so we can catch any serialization mismatches.) bert_trainer_model = bert_classifier.BertClassifier( test_network, num_classes=4, initializer='zeros', cls_head=cls_head) # Create another BERT trainer via serialization and deserialization. config = bert_trainer_model.get_config() new_bert_trainer_model = bert_classifier.BertClassifier.from_config(config) # Validate that the config can be forced to JSON. _ = new_bert_trainer_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(bert_trainer_model.get_config(), new_bert_trainer_model.get_config()) if __name__ == '__main__': tf.test.main()
4,815
43.592593
80
py
models
models-master/official/nlp/modeling/models/bert_span_labeler.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT Question Answering model.""" # pylint: disable=g-classes-have-attributes import collections import tensorflow as tf from official.nlp.modeling import networks @tf.keras.utils.register_keras_serializable(package='Text') class BertSpanLabeler(tf.keras.Model): """Span labeler model based on a BERT-style transformer-based encoder. This is an implementation of the network structure surrounding a transformer encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). The BertSpanLabeler allows a user to pass in a transformer encoder, and instantiates a span labeling network based on a single dense layer. *Note* that the model is constructed by [Keras Functional API](https://keras.io/guides/functional_api/). Args: network: A transformer network. This network should output a sequence output and a classification output. Furthermore, it should expose its embedding table via a `get_embedding_table` method. initializer: The initializer (if any) to use in the span labeling network. Defaults to a Glorot uniform initializer. output: The output style for this network. Can be either `logit`' or `predictions`. """ def __init__(self, network, initializer='glorot_uniform', output='logits', **kwargs): # We want to use the inputs of the passed network as the inputs to this # Model. To do this, we need to keep a handle to the network inputs for use # when we construct the Model object at the end of init. inputs = network.inputs # Because we have a copy of inputs to create this Model object, we can # invoke the Network object with its own input tensors to start the Model. outputs = network(inputs) if isinstance(outputs, list): sequence_output = outputs[0] else: sequence_output = outputs['sequence_output'] # The input network (typically a transformer model) may get outputs from all # layers. When this case happens, we retrieve the last layer output. if isinstance(sequence_output, list): sequence_output = sequence_output[-1] # This is an instance variable for ease of access to the underlying task # network. span_labeling = networks.SpanLabeling( input_width=sequence_output.shape[-1], initializer=initializer, output=output, name='span_labeling') start_logits, end_logits = span_labeling(sequence_output) # Use identity layers wrapped in lambdas to explicitly name the output # tensors. This allows us to use string-keyed dicts in Keras fit/predict/ # evaluate calls. start_logits = tf.keras.layers.Lambda( tf.identity, name='start_positions')( start_logits) end_logits = tf.keras.layers.Lambda( tf.identity, name='end_positions')( end_logits) logits = [start_logits, end_logits] # b/164516224 # Once we've created the network using the Functional API, we call # super().__init__ as though we were invoking the Functional API Model # constructor, resulting in this object having all the properties of a model # created using the Functional API. Once super().__init__ is called, we # can assign attributes to `self` - note that all `self` assignments are # below this line. super(BertSpanLabeler, self).__init__( inputs=inputs, outputs=logits, **kwargs) self._network = network config_dict = { 'network': network, 'initializer': initializer, 'output': output, } # We are storing the config dict as a namedtuple here to ensure checkpoint # compatibility with an earlier version of this model which did not track # the config dict attribute. TF does not track immutable attrs which # do not contain Trackables, so by creating a config namedtuple instead of # a dict we avoid tracking it. config_cls = collections.namedtuple('Config', config_dict.keys()) self._config = config_cls(**config_dict) self.span_labeling = span_labeling @property def checkpoint_items(self): return dict(encoder=self._network) def get_config(self): return dict(self._config._asdict()) @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
4,997
38.666667
80
py
models
models-master/official/nlp/modeling/models/bert_token_classifier_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for BERT token classifier.""" from absl.testing import parameterized import tensorflow as tf from official.nlp.modeling import networks from official.nlp.modeling.models import bert_token_classifier class BertTokenClassifierTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters((True, True), (False, False)) def test_bert_trainer(self, dict_outputs, output_encoder_outputs): """Validate that the Keras object can be created.""" # Build a transformer network to use within the BERT trainer. vocab_size = 100 sequence_length = 512 hidden_size = 768 test_network = networks.BertEncoder( vocab_size=vocab_size, num_layers=2, max_sequence_length=sequence_length, dict_outputs=dict_outputs, hidden_size=hidden_size) # Create a BERT trainer with the created network. num_classes = 3 bert_trainer_model = bert_token_classifier.BertTokenClassifier( test_network, num_classes=num_classes, output_encoder_outputs=output_encoder_outputs) # Create a set of 2-dimensional inputs (the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) # Invoke the trainer model on the inputs. This causes the layer to be built. outputs = bert_trainer_model([word_ids, mask, type_ids]) if output_encoder_outputs: logits = outputs['logits'] encoder_outputs = outputs['encoder_outputs'] self.assertAllEqual(encoder_outputs.shape.as_list(), [None, sequence_length, hidden_size]) else: logits = outputs['logits'] # Validate that the outputs are of the expected shape. expected_classification_shape = [None, sequence_length, num_classes] self.assertAllEqual(expected_classification_shape, logits.shape.as_list()) def test_bert_trainer_tensor_call(self): """Validate that the Keras object can be invoked.""" # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.BertEncoder( vocab_size=100, num_layers=2, max_sequence_length=2) # Create a BERT trainer with the created network. bert_trainer_model = bert_token_classifier.BertTokenClassifier( test_network, num_classes=2) # Create a set of 2-dimensional data tensors to feed into the model. word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) # Invoke the trainer model on the tensors. In Eager mode, this does the # actual calculation. (We can't validate the outputs, since the network is # too complex: this simply ensures we're not hitting runtime errors.) _ = bert_trainer_model([word_ids, mask, type_ids]) def test_serialize_deserialize(self): """Validate that the BERT trainer can be serialized and deserialized.""" # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.BertEncoder( vocab_size=100, num_layers=2, max_sequence_length=5) # Create a BERT trainer with the created network. (Note that all the args # are different, so we can catch any serialization mismatches.) bert_trainer_model = bert_token_classifier.BertTokenClassifier( test_network, num_classes=4, initializer='zeros', output='predictions') # Create another BERT trainer via serialization and deserialization. config = bert_trainer_model.get_config() new_bert_trainer_model = ( bert_token_classifier.BertTokenClassifier.from_config(config)) # Validate that the config can be forced to JSON. _ = new_bert_trainer_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(bert_trainer_model.get_config(), new_bert_trainer_model.get_config()) if __name__ == '__main__': tf.test.main()
4,835
41.421053
80
py
models
models-master/official/nlp/modeling/models/electra_pretrainer_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for ELECTRA pre trainer network.""" import tensorflow as tf from official.nlp.modeling import networks from official.nlp.modeling.models import electra_pretrainer class ElectraPretrainerTest(tf.test.TestCase): def test_electra_pretrainer(self): """Validate that the Keras object can be created.""" # Build a transformer network to use within the ELECTRA trainer. vocab_size = 100 sequence_length = 512 test_generator_network = networks.BertEncoder( vocab_size=vocab_size, num_layers=2, max_sequence_length=sequence_length, dict_outputs=True) test_discriminator_network = networks.BertEncoder( vocab_size=vocab_size, num_layers=2, max_sequence_length=sequence_length, dict_outputs=True) # Create a ELECTRA trainer with the created network. num_classes = 3 num_token_predictions = 2 eletrca_trainer_model = electra_pretrainer.ElectraPretrainer( generator_network=test_generator_network, discriminator_network=test_discriminator_network, vocab_size=vocab_size, num_classes=num_classes, num_token_predictions=num_token_predictions, disallow_correct=True) # Create a set of 2-dimensional inputs (the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) lm_positions = tf.keras.Input( shape=(num_token_predictions,), dtype=tf.int32) lm_ids = tf.keras.Input(shape=(num_token_predictions,), dtype=tf.int32) inputs = { 'input_word_ids': word_ids, 'input_mask': mask, 'input_type_ids': type_ids, 'masked_lm_positions': lm_positions, 'masked_lm_ids': lm_ids } # Invoke the trainer model on the inputs. This causes the layer to be built. outputs = eletrca_trainer_model(inputs) lm_outs = outputs['lm_outputs'] cls_outs = outputs['sentence_outputs'] disc_logits = outputs['disc_logits'] disc_label = outputs['disc_label'] # Validate that the outputs are of the expected shape. expected_lm_shape = [None, num_token_predictions, vocab_size] expected_classification_shape = [None, num_classes] expected_disc_logits_shape = [None, sequence_length] expected_disc_label_shape = [None, sequence_length] self.assertAllEqual(expected_lm_shape, lm_outs.shape.as_list()) self.assertAllEqual(expected_classification_shape, cls_outs.shape.as_list()) self.assertAllEqual(expected_disc_logits_shape, disc_logits.shape.as_list()) self.assertAllEqual(expected_disc_label_shape, disc_label.shape.as_list()) def test_electra_trainer_tensor_call(self): """Validate that the Keras object can be invoked.""" # Build a transformer network to use within the ELECTRA trainer. (Here, we # use a short sequence_length for convenience.) test_generator_network = networks.BertEncoder( vocab_size=100, num_layers=4, max_sequence_length=3, dict_outputs=True) test_discriminator_network = networks.BertEncoder( vocab_size=100, num_layers=4, max_sequence_length=3, dict_outputs=True) # Create a ELECTRA trainer with the created network. eletrca_trainer_model = electra_pretrainer.ElectraPretrainer( generator_network=test_generator_network, discriminator_network=test_discriminator_network, vocab_size=100, num_classes=2, num_token_predictions=2) # Create a set of 2-dimensional data tensors to feed into the model. word_ids = tf.constant([[1, 1, 1], [2, 2, 2]], dtype=tf.int32) mask = tf.constant([[1, 1, 1], [1, 0, 0]], dtype=tf.int32) type_ids = tf.constant([[1, 1, 1], [2, 2, 2]], dtype=tf.int32) lm_positions = tf.constant([[0, 1], [0, 2]], dtype=tf.int32) lm_ids = tf.constant([[10, 20], [20, 30]], dtype=tf.int32) inputs = { 'input_word_ids': word_ids, 'input_mask': mask, 'input_type_ids': type_ids, 'masked_lm_positions': lm_positions, 'masked_lm_ids': lm_ids } # Invoke the trainer model on the tensors. In Eager mode, this does the # actual calculation. (We can't validate the outputs, since the network is # too complex: this simply ensures we're not hitting runtime errors.) _ = eletrca_trainer_model(inputs) def test_serialize_deserialize(self): """Validate that the ELECTRA trainer can be serialized and deserialized.""" # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_generator_network = networks.BertEncoder( vocab_size=100, num_layers=4, max_sequence_length=3) test_discriminator_network = networks.BertEncoder( vocab_size=100, num_layers=4, max_sequence_length=3) # Create a ELECTRA trainer with the created network. (Note that all the args # are different, so we can catch any serialization mismatches.) electra_trainer_model = electra_pretrainer.ElectraPretrainer( generator_network=test_generator_network, discriminator_network=test_discriminator_network, vocab_size=100, num_classes=2, num_token_predictions=2) # Create another BERT trainer via serialization and deserialization. config = electra_trainer_model.get_config() new_electra_trainer_model = electra_pretrainer.ElectraPretrainer.from_config( config) # Validate that the config can be forced to JSON. _ = new_electra_trainer_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(electra_trainer_model.get_config(), new_electra_trainer_model.get_config()) if __name__ == '__main__': tf.test.main()
6,503
41.509804
81
py
models
models-master/official/nlp/modeling/models/dual_encoder.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Trainer network for dual encoder style models.""" # pylint: disable=g-classes-have-attributes import collections import tensorflow as tf from official.nlp.modeling import layers @tf.keras.utils.register_keras_serializable(package='Text') class DualEncoder(tf.keras.Model): """A dual encoder model based on a transformer-based encoder. This is an implementation of the dual encoder network structure based on the transfomer stack, as described in ["Language-agnostic BERT Sentence Embedding"](https://arxiv.org/abs/2007.01852) The DualEncoder allows a user to pass in a transformer stack, and build a dual encoder model based on the transformer stack. Args: network: A transformer network which should output an encoding output. max_seq_length: The maximum allowed sequence length for transformer. normalize: If set to True, normalize the encoding produced by transfomer. logit_scale: The scaling factor of dot products when doing training. logit_margin: The margin between positive and negative when doing training. output: The output style for this network. Can be either `logits` or `predictions`. If set to `predictions`, it will output the embedding producted by transformer network. """ def __init__(self, network: tf.keras.Model, max_seq_length: int = 32, normalize: bool = True, logit_scale: float = 1.0, logit_margin: float = 0.0, output: str = 'logits', **kwargs) -> None: if output == 'logits': left_word_ids = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='left_word_ids') left_mask = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='left_mask') left_type_ids = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='left_type_ids') else: # Keep the consistant with legacy BERT hub module input names. left_word_ids = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids') left_mask = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='input_mask') left_type_ids = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids') left_inputs = [left_word_ids, left_mask, left_type_ids] left_outputs = network(left_inputs) if isinstance(left_outputs, list): left_sequence_output, left_encoded = left_outputs else: left_sequence_output = left_outputs['sequence_output'] left_encoded = left_outputs['pooled_output'] if normalize: left_encoded = tf.keras.layers.Lambda( lambda x: tf.nn.l2_normalize(x, axis=1))( left_encoded) if output == 'logits': right_word_ids = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='right_word_ids') right_mask = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='right_mask') right_type_ids = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='right_type_ids') right_inputs = [right_word_ids, right_mask, right_type_ids] right_outputs = network(right_inputs) if isinstance(right_outputs, list): _, right_encoded = right_outputs else: right_encoded = right_outputs['pooled_output'] if normalize: right_encoded = tf.keras.layers.Lambda( lambda x: tf.nn.l2_normalize(x, axis=1))( right_encoded) dot_products = layers.MatMulWithMargin( logit_scale=logit_scale, logit_margin=logit_margin, name='dot_product') inputs = [ left_word_ids, left_mask, left_type_ids, right_word_ids, right_mask, right_type_ids ] left_logits, right_logits = dot_products(left_encoded, right_encoded) outputs = dict(left_logits=left_logits, right_logits=right_logits) elif output == 'predictions': inputs = [left_word_ids, left_mask, left_type_ids] # To keep consistent with legacy BERT hub modules, the outputs are # "pooled_output" and "sequence_output". outputs = dict( sequence_output=left_sequence_output, pooled_output=left_encoded) else: raise ValueError('output type %s is not supported' % output) # b/164516224 # Once we've created the network using the Functional API, we call # super().__init__ as though we were invoking the Functional API Model # constructor, resulting in this object having all the properties of a model # created using the Functional API. Once super().__init__ is called, we # can assign attributes to `self` - note that all `self` assignments are # below this line. super(DualEncoder, self).__init__(inputs=inputs, outputs=outputs, **kwargs) config_dict = { 'network': network, 'max_seq_length': max_seq_length, 'normalize': normalize, 'logit_scale': logit_scale, 'logit_margin': logit_margin, 'output': output, } # We are storing the config dict as a namedtuple here to ensure checkpoint # compatibility with an earlier version of this model which did not track # the config dict attribute. TF does not track immutable attrs which # do not contain Trackables, so by creating a config namedtuple instead of # a dict we avoid tracking it. config_cls = collections.namedtuple('Config', config_dict.keys()) self._config = config_cls(**config_dict) self.network = network def get_config(self): return dict(self._config._asdict()) @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def checkpoint_items(self): """Returns a dictionary of items to be additionally checkpointed.""" items = dict(encoder=self.network) return items
6,583
39.392638
80
py
models
models-master/official/nlp/modeling/models/seq2seq_transformer.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implement Seq2Seq Transformer model by TF official NLP library. Model paper: https://arxiv.org/pdf/1706.03762.pdf """ import math import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling import layers from official.nlp.modeling.ops import beam_search EOS_ID = 1 class Seq2SeqTransformer(tf.keras.Model): """Transformer model with Keras. Implemented as described in: https://arxiv.org/pdf/1706.03762.pdf The Transformer model consists of an encoder and decoder. The input is an int sequence (or a batch of sequences). The encoder produces a continuous representation, and the decoder uses the encoder output to generate probabilities for the output sequence. """ def __init__(self, vocab_size=33708, embedding_width=512, dropout_rate=0.0, padded_decode=False, decode_max_length=None, extra_decode_length=0, beam_size=4, alpha=0.6, encoder_layer=None, decoder_layer=None, eos_id=EOS_ID, **kwargs): """Initialize layers to build Transformer model. Args: vocab_size: Size of vocabulary. embedding_width: Size of hidden layer for embedding. dropout_rate: Dropout probability. padded_decode: Whether to max_sequence_length padding is used. If set False, max_sequence_length padding is not used. decode_max_length: maximum number of steps to decode a sequence. extra_decode_length: Beam search will run extra steps to decode. beam_size: Number of beams for beam search alpha: The strength of length normalization for beam search. encoder_layer: An initialized encoder layer. decoder_layer: An initialized decoder layer. eos_id: Id of end of sentence token. **kwargs: other keyword arguments. """ super().__init__(**kwargs) self._vocab_size = vocab_size self._embedding_width = embedding_width self._dropout_rate = dropout_rate self._padded_decode = padded_decode self._decode_max_length = decode_max_length self._extra_decode_length = extra_decode_length self._beam_size = beam_size self._alpha = alpha self._eos_id = eos_id self.embedding_lookup = layers.OnDeviceEmbedding( vocab_size=self._vocab_size, embedding_width=self._embedding_width, initializer=tf.random_normal_initializer( mean=0., stddev=self._embedding_width**-0.5), scale_factor=self._embedding_width**0.5) self.encoder_layer = encoder_layer self.decoder_layer = decoder_layer self.position_embedding = layers.RelativePositionEmbedding( hidden_size=self._embedding_width) self.encoder_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) self.decoder_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) def get_config(self): config = { "vocab_size": self._vocab_size, "hidden_size": self._embedding_width, "dropout_rate": self._dropout_rate, "padded_decode": self._padded_decode, "decode_max_length": self._decode_max_length, "eos_id": self._eos_id, "extra_decode_length": self._extra_decode_length, "beam_size": self._beam_size, "alpha": self._alpha, "encoder_layer": self.encoder_layer, "decoder_layer": self.decoder_layer, } base_config = super(Seq2SeqTransformer, self).get_config() return dict(list(base_config.items()) + list(config.items())) def _embedding_linear(self, embedding_matrix, x): """Uses embeddings as linear transformation weights.""" embedding_matrix = tf.cast(embedding_matrix, dtype=self.compute_dtype) x = tf.cast(x, dtype=self.compute_dtype) batch_size = tf.shape(x)[0] length = tf.shape(x)[1] hidden_size = tf.shape(x)[2] vocab_size = tf.shape(embedding_matrix)[0] x = tf.reshape(x, [-1, hidden_size]) logits = tf.matmul(x, embedding_matrix, transpose_b=True) return tf.reshape(logits, [batch_size, length, vocab_size]) def _parse_inputs(self, inputs): """Parses the `call` inputs and returns an uniformed output.""" sources = inputs.get("inputs", None) input_mask = inputs.get("input_masks", None) embedded = inputs.get("embedded_inputs", None) if sources is None and embedded is not None: embedded_inputs = embedded boolean_mask = input_mask input_shape = tf_utils.get_shape_list(embedded, expected_rank=3) source_dtype = embedded.dtype elif sources is not None: embedded_inputs = self.embedding_lookup(sources) boolean_mask = tf.not_equal(sources, 0) input_shape = tf_utils.get_shape_list(sources, expected_rank=2) source_dtype = sources.dtype else: raise KeyError( "The call method expects either `inputs` or `embedded_inputs` and " "`input_masks` as input features.") return embedded_inputs, boolean_mask, input_shape, source_dtype def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks """Calculate target logits or inferred target sequences. Args: inputs: a dictionary of tensors. Feature `inputs` (optional): int tensor with shape `[batch_size, input_length]`. Feature `embedded_inputs` (optional): float tensor with shape `[batch_size, input_length, embedding_width]`. Feature `targets` (optional): None or int tensor with shape `[batch_size, target_length]`. Feature `input_masks` (optional): When providing the `embedded_inputs`, the dictionary must provide a boolean mask marking the filled time steps. The shape of the tensor is `[batch_size, input_length]`. Either `inputs` or `embedded_inputs` and `input_masks` must be present in the input dictionary. In the second case the projection of the integer tokens to the transformer embedding space is skipped and `input_masks` is expected to be present. Returns: If targets is defined, then return logits for each word in the target sequence, which is a float tensor with shape `(batch_size, target_length, vocab_size)`. If target is `None`, then generate output sequence one token at a time and returns a dictionary { outputs: `(batch_size, decoded_length)` scores: `(batch_size, 1)`} Even when `float16` is used, the output tensor(s) are always `float32`. Raises: NotImplementedError: If try to use padded decode method on CPU/GPUs. """ # Prepare inputs to the layer stack by adding positional encodings and # applying dropout. targets = inputs.get("targets", None) (embedded_inputs, boolean_mask, input_shape, source_dtype) = self._parse_inputs(inputs) embedding_mask = tf.cast(boolean_mask, embedded_inputs.dtype) embedded_inputs *= tf.expand_dims(embedding_mask, -1) # Attention_mask generation. attention_mask = tf.cast( tf.reshape(boolean_mask, [input_shape[0], 1, input_shape[1]]), dtype=source_dtype) broadcast_ones = tf.ones( shape=[input_shape[0], input_shape[1], 1], dtype=source_dtype) attention_mask = broadcast_ones * attention_mask pos_encoding = self.position_embedding(embedded_inputs) pos_encoding = tf.cast(pos_encoding, embedded_inputs.dtype) encoder_inputs = embedded_inputs + pos_encoding encoder_inputs = self.encoder_dropout(encoder_inputs) encoder_outputs = self.encoder_layer( encoder_inputs, attention_mask=attention_mask) if targets is None: if self._padded_decode: max_decode_length = self._decode_max_length else: max_decode_length = self._decode_max_length or ( tf.shape(encoder_outputs)[1] + self._extra_decode_length) symbols_to_logits_fn = self._get_symbols_to_logits_fn(max_decode_length) batch_size = tf.shape(encoder_outputs)[0] # Create initial set of IDs that will be passed to symbols_to_logits_fn. initial_ids = tf.zeros([batch_size], dtype=tf.int32) # Create cache storing decoder attention values for each layer. init_decode_length = (max_decode_length if self._padded_decode else 0) num_heads = self.decoder_layer.num_attention_heads dim_per_head = self._embedding_width // num_heads # Cache dtype needs to match beam_search dtype. # pylint: disable=g-complex-comprehension cache = { str(layer): { "key": tf.zeros( [batch_size, init_decode_length, num_heads, dim_per_head], dtype=self.compute_dtype), "value": tf.zeros( [batch_size, init_decode_length, num_heads, dim_per_head], dtype=self.compute_dtype) } for layer in range(self.decoder_layer.num_layers) } # pylint: enable=g-complex-comprehension # Add encoder output and attention bias to the cache. encoder_outputs = tf.cast(encoder_outputs, dtype=self.compute_dtype) attention_mask = tf.cast( tf.reshape(boolean_mask, [input_shape[0], 1, input_shape[1]]), dtype=self.compute_dtype) cache["encoder_outputs"] = encoder_outputs cache["encoder_decoder_attention_mask"] = attention_mask # Use beam search to find the top beam_size sequences and scores. decoded_ids, scores = beam_search.sequence_beam_search( symbols_to_logits_fn=symbols_to_logits_fn, initial_ids=initial_ids, initial_cache=cache, vocab_size=self._vocab_size, beam_size=self._beam_size, alpha=self._alpha, max_decode_length=max_decode_length, eos_id=self._eos_id, padded_decode=self._padded_decode, dtype=self.compute_dtype) # Get the top sequence for each batch element top_decoded_ids = decoded_ids[:, 0, 1:] top_scores = scores[:, 0] return {"outputs": top_decoded_ids, "scores": top_scores} # Shift targets to the right, and remove the last element targets = tf.pad(targets, [[0, 0], [1, 0]])[:, :-1] decoder_inputs = self.embedding_lookup(targets) length = tf.shape(decoder_inputs)[1] pos_encoding = self.position_embedding(decoder_inputs) pos_encoding = tf.cast(pos_encoding, embedded_inputs.dtype) decoder_inputs += pos_encoding decoder_inputs = self.decoder_dropout(decoder_inputs) decoder_shape = tf_utils.get_shape_list(decoder_inputs, expected_rank=3) batch_size = decoder_shape[0] decoder_length = decoder_shape[1] self_attention_mask = tf.linalg.band_part(tf.ones([length, length]), -1, 0) self_attention_mask = tf.reshape(self_attention_mask, [1, length, length]) self_attention_mask = tf.tile(self_attention_mask, [batch_size, 1, 1]) attention_mask = tf.cast( tf.expand_dims(boolean_mask, axis=1), dtype=source_dtype) attention_mask = tf.tile(attention_mask, [1, decoder_length, 1]) outputs = self.decoder_layer( decoder_inputs, encoder_outputs, self_attention_mask=self_attention_mask, cross_attention_mask=attention_mask) logits = self._embedding_linear(self.embedding_lookup.embeddings, outputs) # Model outputs should be float32 to avoid numeric issues. # https://www.tensorflow.org/guide/mixed_precision#building_the_model logits = tf.cast(logits, tf.float32) return logits def _get_symbols_to_logits_fn(self, max_decode_length): """Returns a decoding function that calculates logits of the next tokens.""" timing_signal = self.position_embedding( inputs=None, length=max_decode_length + 1) timing_signal = tf.cast(timing_signal, dtype=self.compute_dtype) decoder_self_attention_mask = tf.linalg.band_part( tf.ones([max_decode_length, max_decode_length], dtype=self.compute_dtype), -1, 0) decoder_self_attention_mask = tf.reshape( decoder_self_attention_mask, [1, max_decode_length, max_decode_length]) def symbols_to_logits_fn(ids, i, cache): """Generate logits for next potential IDs. Args: ids: Current decoded sequences. int tensor with shape `(batch_size * beam_size, i + 1)`. i: Loop index. cache: Dictionary of values storing the encoder output, encoder-decoder attention bias, and previous decoder attention values. Returns: Tuple of (logits with shape `(batch_size * beam_size, vocab_size)`, updated cache values) """ # Set decoder input to the last generated IDs decoder_input = ids[:, -1:] # Preprocess decoder input by getting embeddings and adding timing signal. decoder_input = self.embedding_lookup(decoder_input) decoder_input += timing_signal[i] if self._padded_decode: # indexing does not work on TPU. bias_shape = decoder_self_attention_mask.shape.as_list() self_attention_mask = tf.slice(decoder_self_attention_mask, [0, i, 0], [bias_shape[0], 1, bias_shape[2]]) else: self_attention_mask = decoder_self_attention_mask[:, i:i + 1, :i + 1] decoder_shape = tf_utils.get_shape_list(decoder_input, expected_rank=3) batch_size = decoder_shape[0] decoder_length = decoder_shape[1] self_attention_mask = tf.tile(self_attention_mask, [batch_size, 1, 1]) attention_mask = cache.get("encoder_decoder_attention_mask") attention_mask = tf.tile(attention_mask, [1, decoder_length, 1]) decoder_outputs = self.decoder_layer( decoder_input, cache.get("encoder_outputs"), self_attention_mask=self_attention_mask, cross_attention_mask=attention_mask, cache=cache, decode_loop_step=i if self._padded_decode else None) decoder_outputs = tf.cast(decoder_outputs, dtype=self.compute_dtype) logits = self._embedding_linear(self.embedding_lookup.embeddings, decoder_outputs) logits = tf.squeeze(logits, axis=[1]) return logits, cache return symbols_to_logits_fn class TransformerEncoder(tf.keras.layers.Layer): """Transformer encoder. Transformer encoder is made up of N identical layers. Each layer is composed of the sublayers: 1. Self-attention layer 2. Feedforward network (which is 2 fully-connected layers) """ def __init__(self, num_layers=6, num_attention_heads=8, intermediate_size=2048, activation="relu", dropout_rate=0.0, attention_dropout_rate=0.0, use_bias=False, norm_first=True, norm_epsilon=1e-6, intermediate_dropout=0.0, **kwargs): """Initialize a Transformer encoder. Args: num_layers: Number of layers. num_attention_heads: Number of attention heads. intermediate_size: Size of the intermediate (Feedforward) layer. activation: Activation for the intermediate layer. dropout_rate: Dropout probability. attention_dropout_rate: Dropout probability for attention layers. use_bias: Whether to enable use_bias in attention layer. If set False, use_bias in attention layer is disabled. norm_first: Whether to normalize inputs to attention and intermediate dense layers. If set False, output of attention and intermediate dense layers is normalized. norm_epsilon: Epsilon value to initialize normalization layers. intermediate_dropout: Dropout probability for intermediate_dropout_layer. **kwargs: key word arguemnts passed to tf.keras.layers.Layer. """ super(TransformerEncoder, self).__init__(**kwargs) self.num_layers = num_layers self.num_attention_heads = num_attention_heads self._intermediate_size = intermediate_size self._activation = activation self._dropout_rate = dropout_rate self._attention_dropout_rate = attention_dropout_rate self._use_bias = use_bias self._norm_first = norm_first self._norm_epsilon = norm_epsilon self._intermediate_dropout = intermediate_dropout def build(self, input_shape): """Implements build() for the layer.""" self.encoder_layers = [] for i in range(self.num_layers): self.encoder_layers.append( layers.TransformerEncoderBlock( num_attention_heads=self.num_attention_heads, inner_dim=self._intermediate_size, inner_activation=self._activation, output_dropout=self._dropout_rate, attention_dropout=self._attention_dropout_rate, use_bias=self._use_bias, norm_first=self._norm_first, norm_epsilon=self._norm_epsilon, inner_dropout=self._intermediate_dropout, attention_initializer=attention_initializer(input_shape[2]), name=("layer_%d" % i))) self.output_normalization = tf.keras.layers.LayerNormalization( epsilon=self._norm_epsilon, dtype="float32") super(TransformerEncoder, self).build(input_shape) def get_config(self): config = { "num_layers": self.num_layers, "num_attention_heads": self.num_attention_heads, "intermediate_size": self._intermediate_size, "activation": self._activation, "dropout_rate": self._dropout_rate, "attention_dropout_rate": self._attention_dropout_rate, "use_bias": self._use_bias, "norm_first": self._norm_first, "norm_epsilon": self._norm_epsilon, "intermediate_dropout": self._intermediate_dropout } base_config = super(TransformerEncoder, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, encoder_inputs, attention_mask=None): """Return the output of the encoder. Args: encoder_inputs: A tensor with shape `(batch_size, input_length, hidden_size)`. attention_mask: A mask for the encoder self-attention layer with shape `(batch_size, input_length, input_length)`. Returns: Output of encoder which is a `float32` tensor with shape `(batch_size, input_length, hidden_size)`. """ for layer_idx in range(self.num_layers): encoder_inputs = self.encoder_layers[layer_idx]( [encoder_inputs, attention_mask]) output_tensor = encoder_inputs output_tensor = self.output_normalization(output_tensor) return output_tensor class TransformerDecoder(tf.keras.layers.Layer): """Transformer decoder. Like the encoder, the decoder is made up of N identical layers. Each layer is composed of the sublayers: 1. Self-attention layer 2. Multi-headed attention layer combining encoder outputs with results from the previous self-attention layer. 3. Feedforward network (2 fully-connected layers) """ def __init__(self, num_layers=6, num_attention_heads=8, intermediate_size=2048, activation="relu", dropout_rate=0.0, attention_dropout_rate=0.0, use_bias=False, norm_first=True, norm_epsilon=1e-6, intermediate_dropout=0.0, **kwargs): """Initialize a Transformer decoder. Args: num_layers: Number of layers. num_attention_heads: Number of attention heads. intermediate_size: Size of the intermediate (Feedforward) layer. activation: Activation for the intermediate layer. dropout_rate: Dropout probability. attention_dropout_rate: Dropout probability for attention layers. use_bias: Whether to enable use_bias in attention layer. If set `False`, use_bias in attention layer is disabled. norm_first: Whether to normalize inputs to attention and intermediate dense layers. If set `False`, output of attention and intermediate dense layers is normalized. norm_epsilon: Epsilon value to initialize normalization layers. intermediate_dropout: Dropout probability for intermediate_dropout_layer. **kwargs: key word arguemnts passed to tf.keras.layers.Layer. """ super(TransformerDecoder, self).__init__(**kwargs) self.num_layers = num_layers self.num_attention_heads = num_attention_heads self._intermediate_size = intermediate_size self._activation = activation self._dropout_rate = dropout_rate self._attention_dropout_rate = attention_dropout_rate self._use_bias = use_bias self._norm_first = norm_first self._norm_epsilon = norm_epsilon self._intermediate_dropout = intermediate_dropout def build(self, input_shape): """Implements build() for the layer.""" self.decoder_layers = [] for i in range(self.num_layers): self.decoder_layers.append( layers.TransformerDecoderBlock( num_attention_heads=self.num_attention_heads, intermediate_size=self._intermediate_size, intermediate_activation=self._activation, dropout_rate=self._dropout_rate, attention_dropout_rate=self._attention_dropout_rate, use_bias=self._use_bias, norm_first=self._norm_first, norm_epsilon=self._norm_epsilon, intermediate_dropout=self._intermediate_dropout, attention_initializer=attention_initializer(input_shape[2]), name=("layer_%d" % i))) self.output_normalization = tf.keras.layers.LayerNormalization( epsilon=1e-6, dtype="float32") super(TransformerDecoder, self).build(input_shape) def get_config(self): config = { "num_layers": self.num_layers, "num_attention_heads": self.num_attention_heads, "intermediate_size": self._intermediate_size, "activation": self._activation, "dropout_rate": self._dropout_rate, "attention_dropout_rate": self._attention_dropout_rate, "use_bias": self._use_bias, "norm_first": self._norm_first, "norm_epsilon": self._norm_epsilon, "intermediate_dropout": self._intermediate_dropout } base_config = super(TransformerDecoder, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, target, memory, self_attention_mask=None, cross_attention_mask=None, cache=None, decode_loop_step=None, return_all_decoder_outputs=False): """Return the output of the decoder layer stacks. Args: target: A tensor with shape `(batch_size, target_length, hidden_size)`. memory: A tensor with shape `(batch_size, input_length, hidden_size)`. self_attention_mask: A tensor with shape `(batch_size, target_len, target_length)`, the mask for decoder self-attention layer. cross_attention_mask: A tensor with shape `(batch_size, target_length, input_length)` which is the mask for encoder-decoder attention layer. cache: (Used for fast decoding) A nested dictionary storing previous decoder self-attention values. The items are: {layer_n: {"k": A tensor with shape `(batch_size, i, key_channels)`, "v": A tensor with shape `(batch_size, i, value_channels)`}, ...} decode_loop_step: An integer, the step number of the decoding loop. Used only for autoregressive inference on TPU. return_all_decoder_outputs: Return all decoder layer outputs. Note that the outputs are layer normed. This is useful when introducing per layer auxiliary loss. Returns: Output of decoder. float32 tensor with shape `(batch_size, target_length, hidden_size`). """ output_tensor = target decoder_outputs = [] for layer_idx in range(self.num_layers): transformer_inputs = [ output_tensor, memory, cross_attention_mask, self_attention_mask ] # Gets the cache for decoding. if cache is None: output_tensor, _ = self.decoder_layers[layer_idx](transformer_inputs) else: cache_layer_idx = str(layer_idx) output_tensor, cache[cache_layer_idx] = self.decoder_layers[layer_idx]( transformer_inputs, cache=cache[cache_layer_idx], decode_loop_step=decode_loop_step) if return_all_decoder_outputs: decoder_outputs.append(self.output_normalization(output_tensor)) if return_all_decoder_outputs: return decoder_outputs else: return self.output_normalization(output_tensor) def attention_initializer(hidden_size): """Initializer for attention layers in Seq2SeqTransformer.""" hidden_size = int(hidden_size) limit = math.sqrt(6.0 / (hidden_size + hidden_size)) return tf.keras.initializers.RandomUniform(minval=-limit, maxval=limit)
25,791
40.333333
100
py
models
models-master/official/nlp/modeling/models/bert_token_classifier.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT token classifier.""" # pylint: disable=g-classes-have-attributes import collections import tensorflow as tf @tf.keras.utils.register_keras_serializable(package='Text') class BertTokenClassifier(tf.keras.Model): """Token classifier model based on a BERT-style transformer-based encoder. This is an implementation of the network structure surrounding a transformer encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). The BertTokenClassifier allows a user to pass in a transformer stack, and instantiates a token classification network based on the passed `num_classes` argument. *Note* that the model is constructed by [Keras Functional API](https://keras.io/guides/functional_api/). Args: network: A transformer network. This network should output a sequence output and a classification output. Furthermore, it should expose its embedding table via a `get_embedding_table` method. num_classes: Number of classes to predict from the classification network. initializer: The initializer (if any) to use in the classification networks. Defaults to a Glorot uniform initializer. output: The output style for this network. Can be either `logits` or `predictions`. dropout_rate: The dropout probability of the token classification head. output_encoder_outputs: Whether to include intermediate sequence output in the final output. """ def __init__(self, network, num_classes, initializer='glorot_uniform', output='logits', dropout_rate=0.1, output_encoder_outputs=False, **kwargs): # We want to use the inputs of the passed network as the inputs to this # Model. To do this, we need to keep a handle to the network inputs for use # when we construct the Model object at the end of init. inputs = network.inputs # Because we have a copy of inputs to create this Model object, we can # invoke the Network object with its own input tensors to start the Model. outputs = network(inputs) if isinstance(outputs, list): sequence_output = outputs[0] else: sequence_output = outputs['sequence_output'] sequence_output = tf.keras.layers.Dropout(rate=dropout_rate)( sequence_output) classifier = tf.keras.layers.Dense( num_classes, activation=None, kernel_initializer=initializer, name='predictions/transform/logits') logits = classifier(sequence_output) if output == 'logits': output_tensors = {'logits': logits} elif output == 'predictions': output_tensors = { 'predictions': tf.keras.layers.Activation(tf.nn.log_softmax)(logits) } else: raise ValueError( ('Unknown `output` value "%s". `output` can be either "logits" or ' '"predictions"') % output) if output_encoder_outputs: output_tensors['encoder_outputs'] = sequence_output # b/164516224 # Once we've created the network using the Functional API, we call # super().__init__ as though we were invoking the Functional API Model # constructor, resulting in this object having all the properties of a model # created using the Functional API. Once super().__init__ is called, we # can assign attributes to `self` - note that all `self` assignments are # below this line. super(BertTokenClassifier, self).__init__( inputs=inputs, outputs=output_tensors, **kwargs) self._network = network config_dict = { 'network': network, 'num_classes': num_classes, 'initializer': initializer, 'output': output, 'output_encoder_outputs': output_encoder_outputs } # We are storing the config dict as a namedtuple here to ensure checkpoint # compatibility with an earlier version of this model which did not track # the config dict attribute. TF does not track immutable attrs which # do not contain Trackables, so by creating a config namedtuple instead of # a dict we avoid tracking it. config_cls = collections.namedtuple('Config', config_dict.keys()) self._config = config_cls(**config_dict) self.classifier = classifier self.logits = logits @property def checkpoint_items(self): return dict(encoder=self._network) def get_config(self): return dict(self._config._asdict()) @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
5,216
37.932836
80
py
models
models-master/official/nlp/modeling/models/xlnet_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for XLNet classifier network.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.modeling import networks from official.nlp.modeling.models import xlnet def _get_xlnet_base() -> tf.keras.layers.Layer: """Returns a trivial base XLNet model.""" return networks.XLNetBase( vocab_size=100, num_layers=2, hidden_size=4, num_attention_heads=2, head_size=2, inner_size=2, dropout_rate=0., attention_dropout_rate=0., attention_type='bi', bi_data=True, initializer=tf.keras.initializers.RandomNormal(stddev=0.1), two_stream=False, tie_attention_biases=True, reuse_length=0, inner_activation='relu') class XLNetMaskedLMTest(tf.test.TestCase): def test_xlnet_masked_lm_head(self): hidden_size = 10 seq_length = 8 batch_size = 2 masked_lm = xlnet.XLNetMaskedLM(vocab_size=10, hidden_size=hidden_size, initializer='glorot_uniform') sequence_data = np.random.uniform(size=(batch_size, seq_length)) embedding_table = np.random.uniform(size=(hidden_size, hidden_size)) mlm_output = masked_lm(sequence_data, embedding_table) self.assertAllClose(mlm_output.shape, (batch_size, hidden_size)) class XLNetPretrainerTest(tf.test.TestCase): def test_xlnet_trainer(self): """Validates that the Keras object can be created.""" seq_length = 4 num_predictions = 2 # Build a simple XLNet based network to use with the XLNet trainer. xlnet_base = _get_xlnet_base() # Create an XLNet trainer with the created network. xlnet_trainer_model = xlnet.XLNetPretrainer(network=xlnet_base) inputs = dict( input_word_ids=tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='input_word_ids'), input_type_ids=tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='input_type_ids'), input_mask=tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='input_mask'), permutation_mask=tf.keras.layers.Input( shape=(seq_length, seq_length,), dtype=tf.int32, name='permutation_mask'), target_mapping=tf.keras.layers.Input( shape=(num_predictions, seq_length), dtype=tf.int32, name='target_mapping'), masked_tokens=tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='masked_tokens')) logits, _ = xlnet_trainer_model(inputs) # [None, hidden_size, vocab_size] expected_output_shape = [None, 4, 100] self.assertAllEqual(expected_output_shape, logits.shape.as_list()) def test_xlnet_tensor_call(self): """Validates that the Keras object can be invoked.""" seq_length = 4 batch_size = 2 num_predictions = 2 # Build a simple XLNet based network to use with the XLNet trainer. xlnet_base = _get_xlnet_base() # Create an XLNet trainer with the created network. xlnet_trainer_model = xlnet.XLNetPretrainer(network=xlnet_base) sequence_shape = (batch_size, seq_length) inputs = dict( input_word_ids=np.random.randint( 10, size=sequence_shape, dtype='int32'), input_type_ids=np.random.randint(2, size=sequence_shape, dtype='int32'), input_mask=np.random.randint(2, size=sequence_shape).astype('int32'), permutation_mask=np.random.randint( 2, size=(batch_size, seq_length, seq_length)).astype('int32'), target_mapping=np.random.randint( 10, size=(num_predictions, seq_length), dtype='int32'), masked_tokens=np.random.randint( 10, size=sequence_shape, dtype='int32')) xlnet_trainer_model(inputs) def test_serialize_deserialize(self): """Validates that the XLNet trainer can be serialized and deserialized.""" # Build a simple XLNet based network to use with the XLNet trainer. xlnet_base = _get_xlnet_base() # Create an XLNet trainer with the created network. xlnet_trainer_model = xlnet.XLNetPretrainer( network=xlnet_base, mlm_activation='gelu', mlm_initializer='random_normal') # Create another XLNet trainer via serialization and deserialization. config = xlnet_trainer_model.get_config() new_xlnet_trainer_model = xlnet.XLNetPretrainer.from_config( config) # Validate that the config can be forced to JSON. _ = new_xlnet_trainer_model.to_json() # If serialization was successful, then the new config should match the old. self.assertAllEqual(xlnet_trainer_model.get_config(), new_xlnet_trainer_model.get_config()) class XLNetClassifierTest(tf.test.TestCase, parameterized.TestCase): def test_xlnet_trainer(self): """Validate that the Keras object can be created.""" num_classes = 2 seq_length = 4 # Build a simple XLNet based network to use with the XLNet trainer. xlnet_base = _get_xlnet_base() # Create an XLNet trainer with the created network. xlnet_trainer_model = xlnet.XLNetClassifier( network=xlnet_base, num_classes=num_classes, initializer=tf.keras.initializers.RandomNormal(stddev=0.1), summary_type='last', dropout_rate=0.1) inputs = dict( input_word_ids=tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='input_word_ids'), input_type_ids=tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='input_type_ids'), input_mask=tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='input_mask'), permutation_mask=tf.keras.layers.Input( shape=(seq_length, seq_length,), dtype=tf.int32, name='permutation_mask'), masked_tokens=tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='masked_tokens')) logits = xlnet_trainer_model(inputs) expected_classification_shape = [None, num_classes] self.assertAllEqual(expected_classification_shape, logits.shape.as_list()) @parameterized.parameters(1, 2) def test_xlnet_tensor_call(self, num_classes): """Validates that the Keras object can be invoked.""" seq_length = 4 batch_size = 2 # Build a simple XLNet based network to use with the XLNet trainer. xlnet_base = _get_xlnet_base() # Create an XLNet trainer with the created network. xlnet_trainer_model = xlnet.XLNetClassifier( network=xlnet_base, num_classes=num_classes, initializer=tf.keras.initializers.RandomNormal(stddev=0.1), summary_type='last', dropout_rate=0.1) sequence_shape = (batch_size, seq_length) inputs = dict( input_word_ids=np.random.randint( 10, size=sequence_shape, dtype='int32'), input_type_ids=np.random.randint(2, size=sequence_shape, dtype='int32'), input_mask=np.random.randint(2, size=sequence_shape).astype('int32'), permutation_mask=np.random.randint( 2, size=(batch_size, seq_length, seq_length)).astype('int32'), masked_tokens=np.random.randint( 10, size=sequence_shape, dtype='int32')) xlnet_trainer_model(inputs) def test_serialize_deserialize(self): """Validates that the XLNet trainer can be serialized and deserialized.""" # Build a simple XLNet based network to use with the XLNet trainer. xlnet_base = _get_xlnet_base() # Create an XLNet trainer with the created network. xlnet_trainer_model = xlnet.XLNetClassifier( network=xlnet_base, num_classes=2, initializer=tf.keras.initializers.RandomNormal(stddev=0.1), summary_type='last', dropout_rate=0.1) # Create another XLNet trainer via serialization and deserialization. config = xlnet_trainer_model.get_config() new_xlnet_trainer_model = xlnet.XLNetClassifier.from_config( config) # Validate that the config can be forced to JSON. _ = new_xlnet_trainer_model.to_json() # If serialization was successful, then the new config should match the old. self.assertAllEqual(xlnet_trainer_model.get_config(), new_xlnet_trainer_model.get_config()) class XLNetSpanLabelerTest(tf.test.TestCase): def test_xlnet_trainer(self): """Validate that the Keras object can be created.""" top_n = 2 seq_length = 4 # Build a simple XLNet based network to use with the XLNet trainer. xlnet_base = _get_xlnet_base() # Create an XLNet trainer with the created network. xlnet_trainer_model = xlnet.XLNetSpanLabeler( network=xlnet_base, start_n_top=top_n, end_n_top=top_n, initializer=tf.keras.initializers.RandomNormal(stddev=0.1), span_labeling_activation='tanh', dropout_rate=0.1) inputs = dict( input_word_ids=tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='input_word_ids'), input_type_ids=tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='input_type_ids'), input_mask=tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='input_mask'), paragraph_mask=tf.keras.layers.Input( shape=(seq_length,), dtype=tf.int32, name='paragraph_mask'), class_index=tf.keras.layers.Input( shape=(), dtype=tf.int32, name='class_index'), start_positions=tf.keras.layers.Input( shape=(), dtype=tf.int32, name='start_positions')) outputs = xlnet_trainer_model(inputs) self.assertIsInstance(outputs, dict) # Test tensor value calls for the created model. batch_size = 2 sequence_shape = (batch_size, seq_length) inputs = dict( input_word_ids=np.random.randint( 10, size=sequence_shape, dtype='int32'), input_type_ids=np.random.randint(2, size=sequence_shape, dtype='int32'), input_mask=np.random.randint(2, size=sequence_shape).astype('int32'), paragraph_mask=np.random.randint( 1, size=(sequence_shape)).astype('int32'), class_index=np.random.randint(1, size=(batch_size)).astype('uint8'), start_positions=tf.random.uniform( shape=(batch_size,), maxval=5, dtype=tf.int32)) common_keys = { 'start_logits', 'end_logits', 'start_predictions', 'end_predictions', 'class_logits', } inference_keys = { 'start_top_predictions', 'end_top_predictions', 'start_top_index', 'end_top_index', } outputs = xlnet_trainer_model(inputs) self.assertSetEqual(common_keys | inference_keys, set(outputs.keys())) outputs = xlnet_trainer_model(inputs, training=True) self.assertIsInstance(outputs, dict) self.assertSetEqual(common_keys, set(outputs.keys())) self.assertIsInstance(outputs, dict) def test_serialize_deserialize(self): """Validates that the XLNet trainer can be serialized and deserialized.""" # Build a simple XLNet based network to use with the XLNet trainer. xlnet_base = _get_xlnet_base() # Create an XLNet trainer with the created network. xlnet_trainer_model = xlnet.XLNetSpanLabeler( network=xlnet_base, start_n_top=2, end_n_top=2, initializer=tf.keras.initializers.RandomNormal(stddev=0.1), span_labeling_activation='tanh', dropout_rate=0.1) # Create another XLNet trainer via serialization and deserialization. config = xlnet_trainer_model.get_config() new_xlnet_trainer_model = xlnet.XLNetSpanLabeler.from_config( config) # Validate that the config can be forced to JSON. _ = new_xlnet_trainer_model.to_json() # If serialization was successful, then the new config should match the old. self.assertAllEqual(xlnet_trainer_model.get_config(), new_xlnet_trainer_model.get_config()) if __name__ == '__main__': tf.test.main()
12,608
38.158385
80
py
models
models-master/official/nlp/modeling/models/t5.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implement T5 Transformer model by TF official NLP library. Model paper: https://arxiv.org/pdf/1910.10683.pdf T5TransformerParams and T5Transformer are public interfaces. Other modules are implementation details, so users should never build libraries depending on them. To use with Keras, users can wrap them within Keras customized layers. """ import dataclasses import functools import math from typing import Callable, Dict, Optional, Sequence, Text, Union import numpy as np import tensorflow as tf from official.modeling import tf_utils ShapeLike = Union[int, Sequence[int], tf.TensorShape] Initializer = Callable[..., tf.Tensor] class Module(tf.Module): """The nn Module extends from the tf.Module.""" def __init__(self, dtype: tf.DType = tf.float32, name: Optional[Text] = None): """Initializes the nn Module. Args: dtype: the variable allocation dtype. name: a string for the module name. """ super().__init__(name=name) self.dtype = dtype def create_variable(self, name: Text, shape: ShapeLike, initializer: Initializer, dtype: tf.DType = tf.float32, **kwargs): initializer = tf_utils.clone_initializer(initializer) return tf.Variable(initializer(shape, dtype=dtype, **kwargs), name=name) def read_variable(self, variable: tf.Variable, as_dtype: Optional[tf.DType] = None): if as_dtype is not None: variable = tf.cast(variable, dtype=as_dtype) return variable @tf.custom_gradient def dense_gradient(x: tf.Tensor): """Identity operation whose gradient is converted to a ``tf.Tensor``. >>> embedding = tf.Variable(tf.random.normal([3, 3])) >>> with tf.GradientTape() as tape: ... y = tf.nn.embedding_lookup(dense_gradient(embedding), [1]) >>> tape.gradient(y, embedding).numpy() array([[ 0., 0., 0.], [ 1., 1., 1.], [ 0., 0., 0.]], dtype=float32) Args: x: A ``tf.Tensor``. Returns: The input ``tf.Tensor`` and a dense identity gradient function. """ def grad(dy): if isinstance(dy, tf.IndexedSlices): return tf.convert_to_tensor(dy) else: return dy return x, grad def make_attention_mask(query_input, key_input, pairwise_fn=tf.multiply, dtype=tf.float32): """Mask-making helper for attention weights. In case of 1d inputs (i.e., `[batch..., len_q]`, `[batch..., len_kv]`, the attention weights will be `[batch..., heads, len_q, len_kv]` and this function will produce `[batch..., 1, len_q, len_kv]`. Args: query_input: a batched, flat input of query_length size key_input: a batched, flat input of key_length size pairwise_fn: broadcasting elementwise comparison function dtype: mask return dtype Returns: A `[batch..., 1, len_q, len_kv]` shaped mask for 1d attention. """ mask = pairwise_fn( tf.expand_dims(query_input, axis=-1), tf.expand_dims(key_input, axis=-2)) mask = tf.expand_dims(mask, axis=-3) return tf.cast(mask, dtype=dtype) def make_causal_mask(x, dtype=tf.float32): """Make a causal mask for self-attention. In case of 1d inputs (i.e., `[batch..., len]`, the self-attention weights will be `[batch..., heads, len, len]` and this function will produce a causal mask of shape `[batch..., 1, len, len]`. Args: x: input array of shape `[batch..., len]` dtype: mask return dtype Returns: A `[batch..., 1, len, len]` shaped causal mask for 1d attention. """ x_shape = tf.shape(x) idxs = tf.broadcast_to(tf.range(x_shape[-1], dtype=tf.int32), x_shape) return make_attention_mask(idxs, idxs, tf.greater_equal, dtype=dtype) class Embed(Module): """Embedding Module. A parameterized function from integers [0, n) to d-dimensional vectors. """ def __init__(self, vocab_size: int, features: int, embeddings_initializer: Optional[Initializer] = None, compute_dtype: tf.DType = tf.float32, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.features = features self.compute_dtype = compute_dtype if embeddings_initializer: self.embed_init = embeddings_initializer else: self.embed_init = tf.keras.initializers.TruncatedNormal(stddev=1.0) with self.name_scope: self.embeddings = self.create_variable( "embedding", [self.vocab_size, self.features], self.embed_init, dtype=self.dtype) @tf.Module.with_name_scope def __call__(self, inputs: tf.Tensor, one_hot: bool = True): """Embeds the inputs along the last dimension. Args: inputs: input data, the last dimension is to embed. one_hot: whether to use one-hot matmul to gather embeddings. Returns: The output shape follows the input, with an additional `features` dimension appended. """ if one_hot: flat_inputs = tf.reshape(inputs, [-1]) one_hot_data = tf.one_hot( flat_inputs, depth=self.vocab_size, dtype=self.compute_dtype) embeddings = tf.matmul( one_hot_data, self.read_variable(self.embeddings, as_dtype=self.compute_dtype)) input_shape = tf_utils.get_shape_list(inputs) embeddings = tf.reshape(embeddings, input_shape + [self.features]) return embeddings else: return tf.nn.embedding_lookup( dense_gradient( self.read_variable(self.embeddings, as_dtype=self.compute_dtype)), inputs) def attend(self, query): """Attends over the embedding using a query tensor. Args: query: array with last dimension equal the feature depth `features` of the embedding. Returns: An tensor with final dim `num_embeddings` corresponding to the batched inner-product of the array of query vectors against each embedding. Commonly used for weight-sharing between embeddings and logit transform in NLP models. """ return tf.matmul( query, self.read_variable(self.embeddings, as_dtype=query.dtype), transpose_b=True) class RMSNorm(Module): """A layernorm module in the T5 style. No bias and no subtraction of mean. """ def __init__(self, hidden_size: int, epsilon: float = 1e-6, **kwargs): super().__init__(**kwargs) self.variance_epsilon = epsilon with self.name_scope: self.weight = self.create_variable( "scale", [hidden_size], dtype=self.dtype, initializer=tf.keras.initializers.Ones()) @tf.Module.with_name_scope def __call__(self, x): # Keeps the computation inside the layer norm to be float32. compute_dtype = x.dtype x = tf.cast(x, dtype=tf.float32) variance = tf.math.reduce_mean(tf.math.square(x), axis=-1, keepdims=True) x = x * tf.math.rsqrt(variance + self.variance_epsilon) x = tf.cast(x, dtype=compute_dtype) return self.read_variable(self.weight, as_dtype=compute_dtype) * x class Linear(Module): """Linear module, optionally including bias.""" def __init__(self, in_features: int, out_features: int, use_bias: bool = True, w_init: Optional[Initializer] = None, b_init: Optional[Initializer] = None, **kwargs): """Constructs a `Linear` module.""" super().__init__(**kwargs) self.in_features = in_features self.out_features = out_features self.use_bias = use_bias self.w_init = w_init if self.use_bias: self.b_init = b_init if b_init else tf.keras.initializers.Zeros() elif b_init is not None: raise ValueError("When not using a bias the b_init must be None.") with self.name_scope: if self.w_init is None: stddev = 1 / math.sqrt(self.in_features) self.w_init = tf.keras.initializers.HeNormal() self.w = self.create_variable( "kernel", [self.in_features, self.out_features], initializer=self.w_init, dtype=self.dtype) if self.use_bias: self.b = self.create_variable( "bias", [self.out_features], initializer=self.b_init, dtype=self.dtype) @tf.Module.with_name_scope def __call__(self, inputs: tf.Tensor) -> tf.Tensor: outputs = tf.matmul(inputs, self.read_variable(self.w, as_dtype=inputs.dtype)) if self.use_bias: outputs = tf.add(outputs, self.read_variable(self.b, as_dtype=inputs.dtype)) return outputs class Linear3D(Module): """Linear3D module, optionally including bias. Kernel stored as 2d parameter for compatibility with Adafactor optimizer. """ def __init__(self, in_features: int, out_features: int, num_heads: int, use_bias: bool = True, to_3d: bool = True, w_init: Optional[Initializer] = None, b_init: Optional[Initializer] = None, **kwargs): """Constructs a `Linear3D` module.""" super().__init__(**kwargs) self.in_features = in_features self.out_features = out_features self.num_heads = num_heads self.use_bias = use_bias self.to_3d = to_3d self.w_init = w_init if self.to_3d: self.kernel_2d_shape = (self.in_features, self.num_heads * self.out_features) self.kernel_3d_shape = (self.in_features, self.num_heads, self.out_features) self.bias_shape = (self.num_heads, self.out_features) bias_rank = 2 else: self.kernel_2d_shape = (self.in_features * self.num_heads, self.out_features) self.kernel_3d_shape = (self.num_heads, self.in_features, self.out_features) self.bias_shape = (self.out_features,) bias_rank = 1 if self.use_bias: self.b_init = b_init or tf.keras.initializers.Zeros() elif b_init is not None: raise ValueError("When not using a bias the b_init must be None.") with self.name_scope: if self.w_init is None: self.w_init = tf.keras.initializers.HeNormal() self.w = self.create_variable( "kernel", self.kernel_2d_shape, initializer=self.w_init, dtype=self.dtype) if self.use_bias: self.b = self.create_variable( "bias", self.bias_shape, initializer=self.b_init, dtype=self.dtype) @tf.Module.with_name_scope def __call__(self, inputs: tf.Tensor) -> tf.Tensor: # B: batch size # S: From Sequence length # D: dimension # N: Number of heads # H: head size compute_dtype = inputs.dtype w = self.read_variable(self.w, as_dtype=compute_dtype) w = tf.reshape(w, self.kernel_3d_shape) if self.to_3d: outputs = tf.einsum("BSD,DNH->BSNH", inputs, w) else: outputs = tf.einsum("BSNH,NHD->BSD", inputs, w) if self.use_bias: outputs = tf.add(outputs, self.read_variable(self.b, as_dtype=compute_dtype)) return outputs class Dropout(Module): """Randomly drop units in the input at a given rate.""" def __init__(self, rate: float, **kwargs): """Constructs a Dropout module. Args: rate: Probability that each element of x is discarded. Must be a scalar in the range `[0, 1)`. **kwargs: other keyword args. """ super().__init__(**kwargs) self._rate = rate @tf.Module.with_name_scope def __call__(self, x: tf.Tensor, training: bool, noise_shape: Optional[ShapeLike] = None) -> tf.Tensor: """call method for the Dropout module. Args: x: the input tensor. training: whether it is performing training pass. noise_shape: (Optional) Shape vector controlling the shape of the random noise used to apply dropout. If not set this will be the shape of the input. If set it should be broadcastable to the input shape. Returns: A tensor after applying dropout. """ if not training: return x return tf.nn.dropout(x, rate=self._rate, noise_shape=noise_shape) class FFN(Module): """Feed-forward Network. No layer norm, output dropout, or skip connection.""" activation_map = { "relu": tf.nn.relu, "gelu": functools.partial(tf.nn.gelu, approximate=True), "swish": tf.nn.silu, "silu": tf.nn.silu, } def __init__(self, d_model: int, d_ff: int, activations: Sequence[str], use_bias: bool = False, dropout_rate: Optional[float] = 0.0, layer_norm_epsilon: Optional[float] = 1e-6, weight_initializer: Optional[Initializer] = None, bias_initializer: Optional[Initializer] = None, **kwargs): super().__init__(**kwargs) self.use_bias = use_bias with self.name_scope: self.wi = [] self.activations = activations for idx, act_fn in enumerate(activations): if (act_fn is not None and act_fn != "linear" and act_fn not in self.activation_map): raise ValueError("Invalid activation function string is passed: %s" % act_fn) dense_name = "wi" if len(activations) == 1 else f"wi_{idx}" self.wi.append( Linear( d_model, d_ff, use_bias=self.use_bias, w_init=weight_initializer, b_init=bias_initializer, dtype=self.dtype, name=dense_name)) self.wo = Linear( d_ff, d_model, use_bias=self.use_bias, w_init=weight_initializer, b_init=bias_initializer, dtype=self.dtype, name="wo") self.dropout = Dropout(rate=dropout_rate) @tf.Module.with_name_scope def __call__(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: h = hidden_states factors = [] for wi, act_fn in zip(self.wi, self.activations): if act_fn is None or act_fn == "linear": factors.append(wi(h)) else: factors.append(self.activation_map[act_fn](wi(h))) h = functools.reduce(tf.math.multiply, factors) h_shape = tf_utils.get_shape_list(h) h_shape[-2] = 1 h = self.dropout(h, noise_shape=h_shape, training=training) h = self.wo(h) return h class RelativePositionEmbedding(Module): """Relative position embeddings of T5 style.""" def __init__(self, num_heads: int, relative_attention_num_buckets: int = 32, relative_attention_max_distance: int = 128, bidirectional: bool = True, embeddings_initializer: Optional[Initializer] = None, compute_dtype: tf.DType = tf.float32, **kwargs): super().__init__(**kwargs) self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.bidirectional = bidirectional self.relative_attention_max_distance = relative_attention_max_distance with self.name_scope: self.relative_attention_bias = Embed( vocab_size=self.relative_attention_num_buckets, features=self.num_heads, embeddings_initializer=embeddings_initializer, dtype=self.dtype, compute_dtype=compute_dtype, name="rel_embedding") @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on. Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ ret = 0 n = -relative_position if bidirectional: num_buckets //= 2 ret += tf.cast(tf.math.less(n, 0), tf.int32) * num_buckets n = tf.math.abs(n) else: n = tf.math.maximum(n, 0) # now n is in the range [0, inf) max_exact = num_buckets // 2 is_small = tf.math.less(n, max_exact) val_if_large = max_exact + tf.dtypes.cast( tf.math.log( tf.cast(n, tf.float32) / max_exact + np.finfo(np.float32).eps) / math.log(max_distance / max_exact) * (num_buckets - max_exact), tf.int32, ) val_if_large = tf.math.minimum(val_if_large, num_buckets - 1) ret += tf.where(is_small, n, val_if_large) return ret @tf.Module.with_name_scope def __call__(self, qlen, klen): context_position = tf.range(qlen)[:, None] memory_position = tf.range(klen)[None, :] relative_position = memory_position - context_position # shape (qlen, klen) rp_bucket = self._relative_position_bucket( relative_position, bidirectional=self.bidirectional, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance) values = self.relative_attention_bias(rp_bucket) values = tf.expand_dims( tf.transpose(values, [2, 0, 1]), axis=0) # shape (1, num_heads, qlen, klen) return values class MultiHeadAttention(Module): """T5 Attention from Mesh TensorFlow.""" def __init__(self, d_model: int, d_kv: int, num_heads: int, use_bias: bool = False, dropout_rate: Optional[float] = 0.0, rescale_query: bool = False, weight_initializer: Optional[Initializer] = None, bias_initializer: Optional[Initializer] = None, **kwargs): super().__init__(**kwargs) with self.name_scope: self.d_model = d_model self.d_kv = d_kv self.num_heads = num_heads self.rescale_query = rescale_query self.use_bias = use_bias if rescale_query or weight_initializer is None: query_w_init = weight_initializer else: init_std_rescaling = tf.math.sqrt(tf.cast(self.d_kv, dtype=self.dtype)) query_w_init = ( lambda *args, **kwargs: ( # pylint: disable=g-long-lambda tf_utils.clone_initializer(weight_initializer) (*args, **kwargs) / init_std_rescaling)) self.q = Linear3D( self.d_model, self.d_kv, num_heads=self.num_heads, use_bias=self.use_bias, w_init=query_w_init, b_init=bias_initializer, dtype=self.dtype, name="q") self.k = Linear3D( self.d_model, self.d_kv, num_heads=self.num_heads, use_bias=self.use_bias, w_init=weight_initializer, b_init=bias_initializer, dtype=self.dtype, name="k") self.v = Linear3D( self.d_model, self.d_kv, num_heads=self.num_heads, use_bias=self.use_bias, w_init=weight_initializer, b_init=bias_initializer, dtype=self.dtype, name="v") self.o = Linear3D( self.d_kv, self.d_model, num_heads=self.num_heads, use_bias=self.use_bias, to_3d=False, w_init=weight_initializer, b_init=bias_initializer, dtype=self.dtype, name="o") self.dropout = Dropout(dropout_rate) def _update_cache(self, key, value, cache, decode_position): """Updates cache states and gets full-length key/value tensors.""" # Combines cached keys and values with new keys and values. # TPU one-hot handling. key_seq_dim = cache["key"].shape.as_list()[1] indices = tf.reshape( tf.one_hot(decode_position, key_seq_dim, dtype=key.dtype), [1, key_seq_dim, 1, 1]) key = cache["key"] + key * indices value_seq_dim = cache["value"].shape.as_list()[1] indices = tf.reshape( tf.one_hot(decode_position, value_seq_dim, dtype=value.dtype), [1, value_seq_dim, 1, 1]) value = cache["value"] + value * indices # Update cache cache["key"] = key cache["value"] = value return key, value @tf.Module.with_name_scope def __call__(self, query, mask=None, kv=None, position_bias=None, cache: Optional[Dict[str, tf.Tensor]] = None, decode_position=None, training=False): """MultiHeadAttention at work. Args: query: Tensor of shape (bs, qlen, d_model). mask: None or Tensor of shape (bs, n_heads, qlen, klen). kv: None or Tensor of shape (bs, klen, d_model). position_bias: None or Tensor of shape (bs, n_heads, qlen, klen). cache: If not None, cache["key"] and cache["value"] are Tensors of shape (bs, klen, n_heads, d_kv). decode_position: If not None, which position of the sequence we are decoding for. Ranges from 0 to klen - 1. training: Effects the behavior of dropout. Returns: A dictionary, output["context"] is the output after attention, output["cache"] contains updated cache for the next round of autoregressive decoding. """ # Input is (bs, qlen, d_model) use_cache = cache is not None if kv is None: kv = query q = self.q(query) if self.rescale_query: q /= tf.math.sqrt(tf.cast(self.d_kv, dtype=q.dtype)) k = self.k(kv) v = self.v(kv) if use_cache: k, v = self._update_cache(k, v, cache, decode_position) # NOTE: T5 does not explicitly rescale the attention logits by # 1/sqrt(q_dim)! This is folded into the initializers of the # linear transformations, which is equivalent under Adafactor. scores = tf.einsum("bqnd,bknd->bnqk", q, k) # (bs, n_heads, qlen, klen) if position_bias is not None: # If position_bias is None, the input embedings should already include # position embeddings. if use_cache: bias_shape = position_bias.shape.as_list() position_bias = tf.slice( position_bias, [0, 0, decode_position, 0], [bias_shape[0], bias_shape[1], 1, bias_shape[3]]) scores += position_bias if mask is not None: scores += mask # (bs, n_heads, qlen, klen) weights = tf.nn.softmax(tf.cast(scores, tf.float32), axis=-1) # weights shape = (bs, n_heads, qlen, klen) weights = tf.cast(weights, scores.dtype) weight_shape = tf_utils.get_shape_list(weights) # NOTE: T5 broadcasts along the "length" dim, but unclear which one that # corresponds to. We assume it is the query dimension. # (bs, n_heads, qlen, klen) weight_shape[-2] = 1 weights = self.dropout(weights, training=training, noise_shape=weight_shape) c = tf.einsum("bnqk,bknd->bqnd", weights, v) c = self.o(c) outputs = dict(context=c) if cache: outputs["cache"] = cache return outputs class SelfAttention(Module): """Self attention block including residual connection.""" def __init__(self, d_model: int, d_kv: int, num_heads: int, dropout_rate: Optional[float] = 0.0, layer_norm_epsilon: Optional[float] = 1e-6, rescale_query: bool = False, weight_initializer: Optional[Initializer] = None, bias_initializer: Optional[Initializer] = None, **kwargs): super().__init__(**kwargs) with self.name_scope: self.self_attention = MultiHeadAttention( d_model=d_model, d_kv=d_kv, num_heads=num_heads, dropout_rate=dropout_rate, rescale_query=rescale_query, weight_initializer=weight_initializer, bias_initializer=bias_initializer, dtype=self.dtype, name="attention") self.layer_norm = RMSNorm( hidden_size=d_model, epsilon=layer_norm_epsilon, dtype=self.dtype, name="layer_norm") self.dropout = Dropout(dropout_rate) @tf.Module.with_name_scope def __call__(self, hidden_states, attention_mask=None, position_bias=None, cache=None, decode_position=None, training=False): norm_x = self.layer_norm(hidden_states) attention_outputs = self.self_attention( query=norm_x, mask=attention_mask, position_bias=position_bias, cache=cache, decode_position=decode_position, training=training) y = attention_outputs.pop("context") tensor_shape = tf_utils.get_shape_list(y) tensor_shape[-2] = 1 y = self.dropout(y, noise_shape=tensor_shape, training=training) layer_output = hidden_states + y attention_outputs["layer_output"] = layer_output return attention_outputs class CrossAttention(Module): """Cross attention block including residual connection.""" def __init__(self, d_model: int, d_kv: int, num_heads: int, dropout_rate: Optional[float] = 0.0, layer_norm_epsilon: Optional[float] = 1e-6, rescale_query: bool = False, weight_initializer: Optional[Initializer] = None, bias_initializer: Optional[Initializer] = None, **kwargs): super().__init__(**kwargs) with self.name_scope: self.cross_attention = MultiHeadAttention( d_model=d_model, d_kv=d_kv, num_heads=num_heads, dropout_rate=dropout_rate, rescale_query=rescale_query, weight_initializer=weight_initializer, bias_initializer=bias_initializer, dtype=self.dtype, name="attention") self.layer_norm = RMSNorm( hidden_size=d_model, epsilon=layer_norm_epsilon, dtype=self.dtype, name="layer_norm") self.dropout = Dropout(dropout_rate) @tf.Module.with_name_scope def __call__(self, hidden_states, kv, attention_mask=None, position_bias=None, cache=None, training=False): norm_x = self.layer_norm(hidden_states) attention_outputs = self.cross_attention( query=norm_x, kv=kv, mask=attention_mask, position_bias=position_bias, cache=cache, training=training) y = attention_outputs.pop("context") tensor_shape = tf_utils.get_shape_list(y) tensor_shape[-2] = 1 y = self.dropout(y, noise_shape=tensor_shape, training=training) layer_output = hidden_states + y attention_outputs["layer_output"] = layer_output return attention_outputs class EncoderBlock(Module): """Transformer Encoder Block with only self attention.""" def __init__(self, d_model: int, d_kv: int, num_heads: int, d_ff: int, ffn_activations: Sequence[str] = ("relu",), dropout_rate: Optional[float] = 0.0, layer_norm_epsilon: Optional[float] = 1e-6, rescale_query: bool = False, weight_initializer: Optional[Initializer] = None, bias_initializer: Optional[Initializer] = None, **kwargs): super().__init__(**kwargs) with self.name_scope: self.self_attention = SelfAttention( d_model=d_model, d_kv=d_kv, num_heads=num_heads, dropout_rate=dropout_rate, rescale_query=rescale_query, weight_initializer=weight_initializer, bias_initializer=bias_initializer, dtype=self.dtype, name="self_attention") self.ffn_layer_norm = RMSNorm( hidden_size=d_model, epsilon=layer_norm_epsilon, dtype=self.dtype, name="ffn_layer_norm") self.ffn = FFN( d_model=d_model, d_ff=d_ff, dropout_rate=dropout_rate, activations=ffn_activations, weight_initializer=weight_initializer, bias_initializer=bias_initializer, dtype=self.dtype, name="ffn") self.ffn_output_dropout = Dropout(dropout_rate) @tf.Module.with_name_scope def __call__(self, hidden_states, attention_mask=None, position_bias=None, training=False): attention_outputs = self.self_attention( hidden_states, attention_mask=attention_mask, position_bias=position_bias, training=training) attn_output = attention_outputs["layer_output"] ffn_output = self.ffn_layer_norm(attn_output) ffn_output = self.ffn(ffn_output, training=training) tensor_shape = tf_utils.get_shape_list(ffn_output) tensor_shape[-2] = 1 ffn_output = self.ffn_output_dropout( ffn_output, noise_shape=tensor_shape, training=training) ffn_output = attn_output + ffn_output return ffn_output class EncDecoderBlock(Module): """Transformer Decoder Block with enc-decoder cross attention.""" def __init__(self, d_model: int, d_kv: int, num_heads: int, d_ff: int, ffn_activations: Sequence[str] = ("relu",), dropout_rate: Optional[float] = 0.0, layer_norm_epsilon: Optional[float] = 1e-6, rescale_query: bool = False, weight_initializer: Optional[Initializer] = None, bias_initializer: Optional[Initializer] = None, **kwargs): super().__init__(**kwargs) with self.name_scope: self.self_attention = SelfAttention( d_model=d_model, d_kv=d_kv, num_heads=num_heads, dropout_rate=dropout_rate, rescale_query=rescale_query, weight_initializer=weight_initializer, bias_initializer=bias_initializer, dtype=self.dtype, name="self_attention") self.cross_attention = CrossAttention( d_model=d_model, d_kv=d_kv, num_heads=num_heads, dropout_rate=dropout_rate, rescale_query=rescale_query, weight_initializer=weight_initializer, bias_initializer=bias_initializer, dtype=self.dtype, name="cross_attention") self.ffn_layer_norm = RMSNorm( hidden_size=d_model, epsilon=layer_norm_epsilon, dtype=self.dtype, name="ffn_layer_norm") self.ffn = FFN( d_model=d_model, d_ff=d_ff, dropout_rate=dropout_rate, activations=ffn_activations, weight_initializer=weight_initializer, bias_initializer=bias_initializer, dtype=self.dtype, name="ffn") self.ffn_output_dropout = Dropout(dropout_rate,) @tf.Module.with_name_scope def __call__(self, hidden_states, encoder_hidden_states, attention_mask=None, encoder_decoder_mask=None, position_bias=None, cache=None, decode_position=None, training=False): self_attention_outputs = self.self_attention( hidden_states, attention_mask=attention_mask, decode_position=decode_position, position_bias=position_bias, cache=cache, training=training) if "cache" in self_attention_outputs: cache = self_attention_outputs["cache"] # No relative position bias is used for encoder-decoder cross attention. cross_attention_outputs = self.cross_attention( self_attention_outputs["layer_output"], kv=encoder_hidden_states, attention_mask=encoder_decoder_mask, training=training) attn_output = cross_attention_outputs["layer_output"] ffn_output = self.ffn_layer_norm(attn_output) ffn_output = self.ffn(ffn_output, training=training) tensor_shape = tf_utils.get_shape_list(ffn_output) tensor_shape[-2] = 1 ffn_output = self.ffn_output_dropout( ffn_output, noise_shape=tensor_shape, training=training) ffn_output = attn_output + ffn_output return ffn_output, cache @dataclasses.dataclass class T5TransformerParams: """Transformer parameters.""" num_layers: int d_model: int d_kv: int num_heads: int d_ff: int vocab_size: int target_vocab_size: Optional[int] = None dropout_rate: float = 0.0 layer_norm_epsilon: float = 1e-6 shared_embedding: bool = False vocab_embeddings_initializer: Optional[Initializer] = None relative_attention_num_buckets: int = 32 relative_attention_max_distance: int = 128 relative_embeddings_initializer: Optional[Initializer] = None weight_initializer: Optional[Initializer] = (tf.keras.initializers.HeNormal()) bias_initializer: Optional[Initializer] = None rescale_query: bool = False bidirectional: bool = True ffn_activations: Sequence[str] = ("relu",) logits_via_embedding: bool = True num_decoder_layers: Optional[int] = None one_hot_embedding: bool = True layer_sharing: bool = False # If true, uses one relative embedding for all encoder layers and one for all # decoder layers. Otherwise, have relative embedding for each layer. use_shared_relative_position_bias: bool = True class Encoder(Module): """Transformer Model Encoder for sequence to sequence.""" def __init__(self, config: T5TransformerParams, shared_embedding: Optional[tf.Variable] = None, compute_dtype: tf.DType = tf.float32, **kwargs): super().__init__(**kwargs) self.config = config self.compute_dtype = compute_dtype self.embed_dim = config.d_model with self.name_scope: # Input Embedding. if shared_embedding is None: self.input_embed = Embed( vocab_size=self.config.vocab_size, features=self.config.d_model, embeddings_initializer=self.config.vocab_embeddings_initializer, dtype=self.dtype, compute_dtype=self.compute_dtype, name="input_embedding") else: self.input_embed = shared_embedding # Creates an alias to the input embed for encoder-only models. self.word_embed = self.input_embed if config.use_shared_relative_position_bias: self.relative_embedding = RelativePositionEmbedding( num_heads=self.config.num_heads, relative_attention_num_buckets=self.config .relative_attention_num_buckets, relative_attention_max_distance=self.config .relative_attention_max_distance, bidirectional=self.config.bidirectional, embeddings_initializer=self.config.relative_embeddings_initializer, dtype=self.dtype, compute_dtype=self.compute_dtype, name="relative_posemb") else: self.relative_embeddings = [] for layer_idx in range(self.config.num_layers): relative_embedding = RelativePositionEmbedding( num_heads=self.config.num_heads, relative_attention_num_buckets=self.config .relative_attention_num_buckets, relative_attention_max_distance=self.config .relative_attention_max_distance, bidirectional=self.config.bidirectional, embeddings_initializer=self.config .relative_embeddings_initializer, dtype=self.dtype, compute_dtype=self.compute_dtype, name=f"relative_posemb_{layer_idx}") self.relative_embeddings.append(relative_embedding) self.input_dropout = Dropout(self.config.dropout_rate,) self.encoder_layers = [] for layer_idx in range(self.config.num_layers): if self.config.layer_sharing and layer_idx > 0: self.encoder_layers.append(self.encoder_layers[0]) else: self.encoder_layers.append( EncoderBlock( d_model=self.config.d_model, d_kv=self.config.d_kv, num_heads=self.config.num_heads, d_ff=self.config.d_ff, dropout_rate=self.config.dropout_rate, ffn_activations=self.config.ffn_activations, rescale_query=self.config.rescale_query, weight_initializer=self.config.weight_initializer, bias_initializer=self.config.bias_initializer, dtype=self.dtype, name="encoder_block_%d" % layer_idx)) self.output_norm = RMSNorm( hidden_size=self.config.d_model, epsilon=self.config.layer_norm_epsilon, dtype=self.dtype, name="final_layer_norm") self.output_dropout = Dropout(self.config.dropout_rate,) @tf.Module.with_name_scope def get_relpos_bias(self, input_length: int, dense_inputs: tf.Tensor, layer_idx: Optional[int] = None) -> tf.Tensor: if self.config.use_shared_relative_position_bias: position_bias = self.relative_embedding(input_length, input_length) else: position_bias = self.relative_embeddings[layer_idx](input_length, input_length) if dense_inputs is not None: # Here we ignore relative position bias for dense embeddings. # TODO(yejiayu): If we proceed to video use cases, rework this part. dense_input_length = tf_utils.get_shape_list(dense_inputs)[1] # Position bias shape: [batch, 1, len, len] paddings = tf.constant([[0, 0], [0, 0], [0, dense_input_length], [0, dense_input_length]]) position_bias = tf.pad(position_bias, paddings, "CONSTANT") return position_bias @tf.Module.with_name_scope def __call__(self, inputs=None, encoder_mask=None, dense_inputs=None, training=False): """Applies Transformer model on the inputs. Args: inputs: input word ids. Optional if dense data are provided. encoder_mask: the encoder self-attention mask. dense_inputs: dense input data. Concat after the embedding if word ids are provided. training: whether it is training pass, affecting dropouts. Returns: output of a transformer encoder. """ # Casts inputs to the dtype. if encoder_mask is not None: encoder_mask = tf.cast(encoder_mask, self.compute_dtype) cfg = self.config inputs_array = [] if inputs is not None: inputs_array.append( self.input_embed(inputs, one_hot=cfg.one_hot_embedding)) if dense_inputs is not None: inputs_array.append(dense_inputs) if not inputs_array: raise ValueError("At least one of inputs and dense_inputs must not be " "None.") x = tf.concat(inputs_array, axis=1) tensor_shape = tf_utils.get_shape_list(x) tensor_shape[-2] = 1 x = self.input_dropout(x, noise_shape=tensor_shape, training=training) if inputs is not None: input_length = tf_utils.get_shape_list(inputs)[1] else: input_length = 0 for i in range(cfg.num_layers): position_bias = self.get_relpos_bias(input_length, dense_inputs, i) x = self.encoder_layers[i]( x, attention_mask=encoder_mask, position_bias=position_bias, training=training) encoded = self.output_norm(x) encoded = self.output_dropout(encoded, training=training) return encoded class Decoder(Module): """Transformer Model Decoder for sequence to sequence.""" def __init__(self, config: T5TransformerParams, shared_embedding: Optional[tf.Variable] = None, compute_dtype: tf.DType = tf.float32, **kwargs): super().__init__(**kwargs) self.config = config self.compute_dtype = compute_dtype if self.config.num_decoder_layers is None: self.config.num_decoder_layers = self.config.num_layers if not hasattr( self.config, "target_vocab_size") or self.config.target_vocab_size is None: self.config.target_vocab_size = self.config.vocab_size with self.name_scope: # Target Embedding. if shared_embedding is None: self.target_embed = Embed( vocab_size=self.config.target_vocab_size, features=self.config.d_model, embeddings_initializer=self.config.vocab_embeddings_initializer, dtype=self.dtype, compute_dtype=self.compute_dtype, name="target_embedding") else: self.target_embed = shared_embedding self.target_dropout = Dropout(self.config.dropout_rate,) # Position bias for the target self attention. if config.use_shared_relative_position_bias: self.relative_embedding = RelativePositionEmbedding( num_heads=self.config.num_heads, relative_attention_num_buckets=self.config .relative_attention_num_buckets, relative_attention_max_distance=self.config .relative_attention_max_distance, bidirectional=self.config.bidirectional, embeddings_initializer=self.config.relative_embeddings_initializer, dtype=self.dtype, compute_dtype=self.compute_dtype, name="relative_posemb") else: self.relative_embeddings = [] for layer_idx in range(self.config.num_decoder_layers): relative_embedding = RelativePositionEmbedding( num_heads=self.config.num_heads, relative_attention_num_buckets=self.config .relative_attention_num_buckets, relative_attention_max_distance=self.config .relative_attention_max_distance, bidirectional=self.config.bidirectional, embeddings_initializer=self.config .relative_embeddings_initializer, dtype=self.dtype, compute_dtype=self.compute_dtype, name=f"relative_posemb_{layer_idx}") self.relative_embeddings.append(relative_embedding) self.decoder_layers = [] for layer_idx in range(self.config.num_decoder_layers): if self.config.layer_sharing and layer_idx > 0: self.decoder_layers.append(self.decoder_layers[0]) else: self.decoder_layers.append( EncDecoderBlock( d_model=self.config.d_model, d_kv=self.config.d_kv, num_heads=self.config.num_heads, d_ff=self.config.d_ff, dropout_rate=self.config.dropout_rate, ffn_activations=self.config.ffn_activations, rescale_query=self.config.rescale_query, weight_initializer=self.config.weight_initializer, bias_initializer=self.config.bias_initializer, dtype=self.dtype, name="decoder_block_%d" % layer_idx)) self.output_norm = RMSNorm( hidden_size=self.config.d_model, epsilon=self.config.layer_norm_epsilon, dtype=self.dtype, name="final_layer_norm") self.output_dropout = Dropout(self.config.dropout_rate,) if not self.config.logits_via_embedding: self.logits_dense = Linear( in_features=self.config.d_model, out_features=self.config.target_vocab_size, use_bias=False, dtype=self.dtype, name="logits") @tf.Module.with_name_scope def get_relpos_bias(self, input_length: int, layer_idx: int) -> tf.Tensor: if self.config.use_shared_relative_position_bias: return self.relative_embedding(input_length, input_length) else: return self.relative_embeddings[layer_idx](input_length, input_length) @tf.Module.with_name_scope def __call__(self, decoder_input_tokens, encoded, decoder_mask=None, encoder_decoder_mask=None, decode=False, decode_position=None, cache=None, max_decode_len=None, training=False): """Applies Transformer model on the inputs. Args: decoder_input_tokens: the decoder input tokens. encoded: the encoder outputs. decoder_mask: the decoder self-attention mask. encoder_decoder_mask: the cross-attention mask. decode: Whether to perform autoregressive decoding. decode_position: integer, the position to decode. cache: The cache dictionary of key, value tensors. max_decode_len: An optional integer specifying the maximum decoding length. Note that this is only used for defining the relative position embedding parameters. training: Whether it is training pass, affecting dropouts. Returns: output of a transformer encoder including 1. logits: Logits for each word in the vocab. 2. raw_logits: Logits along the moded dimension. 3. cache: Used for decoding in inference mode. """ cfg = self.config # Casts inputs to the dtype. encoded = tf.cast(encoded, self.compute_dtype) if decoder_mask is not None: decoder_mask = tf.cast(decoder_mask, self.compute_dtype) if encoder_decoder_mask is not None: encoder_decoder_mask = tf.cast(encoder_decoder_mask, self.compute_dtype) x = self.target_embed(decoder_input_tokens, one_hot=cfg.one_hot_embedding) tensor_shape = tf_utils.get_shape_list(x) tensor_shape[-2] = 1 x = self.target_dropout(x, noise_shape=tensor_shape, training=training) for i in range(cfg.num_decoder_layers): if cache is not None: position_bias = self.get_relpos_bias(max_decode_len, i) else: input_length = tf_utils.get_shape_list(decoder_input_tokens)[1] position_bias = self.get_relpos_bias(input_length, i) if cache is None: x, _ = self.decoder_layers[i]( x, encoder_hidden_states=encoded, attention_mask=decoder_mask, encoder_decoder_mask=encoder_decoder_mask, position_bias=position_bias, training=training) else: x, cache[i] = self.decoder_layers[i]( x, encoder_hidden_states=encoded, attention_mask=decoder_mask, encoder_decoder_mask=encoder_decoder_mask, position_bias=position_bias, decode_position=decode_position, cache=cache[i], training=training) output = self.output_norm(x) tensor_shape = tf_utils.get_shape_list(output) tensor_shape[-2] = 1 output = self.target_dropout( output, noise_shape=tensor_shape, training=training) if self.config.logits_via_embedding: logits = self.target_embed.attend(output) logits = logits / math.sqrt(cfg.d_model) else: logits = self.logits_dense(output) return dict(logits=logits, cache=cache, raw_logits=output) class T5Transformer(Module): """Transformer Encoder+Decoder for sequence to sequence.""" def __init__(self, config: T5TransformerParams, compute_dtype: tf.DType = tf.float32, **kwargs): super().__init__(**kwargs) # Builds the model components. shared_embedding = config.shared_embedding self.compute_dtype = compute_dtype self.decoder_cfg = dataclasses.replace(config, bidirectional=False) if self.decoder_cfg.num_decoder_layers is None: self.decoder_cfg.num_decoder_layers = self.decoder_cfg.num_layers self.encoder_cfg = dataclasses.replace(config, bidirectional=True) with self.name_scope: if shared_embedding: self.shared_embedding = Embed( vocab_size=config.vocab_size, features=config.d_model, embeddings_initializer=config.vocab_embeddings_initializer, dtype=self.dtype, compute_dtype=self.compute_dtype, name="shared") else: self.shared_embedding = None self.encoder = Encoder( self.encoder_cfg, self.shared_embedding, dtype=self.dtype, compute_dtype=self.compute_dtype) self.decoder = Decoder( self.decoder_cfg, self.shared_embedding, dtype=self.dtype, compute_dtype=self.compute_dtype) def encode(self, encoder_input_tokens=None, encoder_segment_ids=None, encoder_dense_inputs=None, encoder_dense_segment_ids=None, training=False): eligible_position_array = [] if encoder_input_tokens is not None: eligible_position_array.append( tf.cast(tf.not_equal(encoder_input_tokens, 0), self.compute_dtype)) if encoder_dense_inputs is not None: eligible_dense_positions = tf.cast( tf.reduce_any(tf.not_equal(encoder_dense_inputs, 0), axis=-1), self.compute_dtype) eligible_position_array.append(eligible_dense_positions) if not eligible_position_array: raise ValueError("At least one of encoder_input_tokens and" " encoder_dense_inputs must be provided.") eligible_positions = tf.concat(eligible_position_array, axis=1) encoder_mask = make_attention_mask( eligible_positions, eligible_positions, dtype=tf.bool) encoder_segment_id_array = [] if encoder_segment_ids is not None: encoder_segment_id_array.append(encoder_segment_ids) if encoder_dense_segment_ids is not None: encoder_segment_id_array.append(encoder_dense_segment_ids) if encoder_segment_id_array: encoder_segment_ids = tf.concat(encoder_segment_id_array, axis=1) segment_mask = make_attention_mask( encoder_segment_ids, encoder_segment_ids, tf.equal, dtype=tf.bool) encoder_mask = tf.math.logical_and(encoder_mask, segment_mask) encoder_mask = (1.0 - tf.cast(encoder_mask, self.compute_dtype)) * -1e9 return self.encoder( encoder_input_tokens, encoder_mask, encoder_dense_inputs, training=training) def decode( self, encoded, decoder_target_tokens, encoder_input_tokens=None, # only used for masks encoder_dense_inputs=None, decoder_input_tokens=None, encoder_segment_ids=None, encoder_dense_segment_ids=None, decoder_segment_ids=None, decode_position=None, cache=None, max_decode_len=None, decode=False, training=False) -> Dict[str, tf.Tensor]: eligible_inputs_array = [] if encoder_input_tokens is not None: eligible_inputs = tf.cast( tf.not_equal(encoder_input_tokens, 0), self.compute_dtype) eligible_inputs_array.append(eligible_inputs) if encoder_dense_inputs is not None: eligible_dense_inputs = tf.cast( tf.reduce_any(tf.not_equal(encoder_dense_inputs, 0), axis=-1), self.compute_dtype) eligible_inputs_array.append(eligible_dense_inputs) eligible_inputs = tf.concat(eligible_inputs_array, axis=1) if decode: # For decoding, the decoder_input_tokens is the decoder_target_tokens. decoder_input_tokens = decoder_target_tokens # fast autoregressive decoding uses only a special encoder-decoder mask decoder_mask = None encoder_decoder_mask = make_attention_mask( tf.cast( tf.not_equal(tf.ones_like(decoder_target_tokens), 0), self.compute_dtype), eligible_inputs, dtype=tf.bool) else: # Note that, masks should be created using decoder_target_tokens. eligible_targets = tf.cast( tf.not_equal(decoder_target_tokens, 0), self.compute_dtype) decoder_mask = tf.math.logical_and( make_attention_mask( eligible_targets, eligible_targets, dtype=tf.bool), make_causal_mask(decoder_target_tokens, dtype=tf.bool)) encoder_decoder_mask = make_attention_mask( eligible_targets, eligible_inputs, dtype=tf.bool) if encoder_segment_ids is not None: if decoder_mask is not None: decoder_mask = tf.math.logical_and( decoder_mask, make_attention_mask( decoder_segment_ids, decoder_segment_ids, tf.equal, dtype=tf.bool)) if encoder_dense_segment_ids is not None: encoder_segment_ids = tf.concat( [encoder_segment_ids, encoder_dense_segment_ids], axis=1) encoder_decoder_mask = tf.math.logical_and( encoder_decoder_mask, make_attention_mask( decoder_segment_ids, encoder_segment_ids, tf.equal, dtype=tf.bool)) if decoder_mask is not None: decoder_mask = (1.0 - tf.cast(decoder_mask, self.compute_dtype)) * -1e9 encoder_decoder_mask = ( 1.0 - tf.cast(encoder_decoder_mask, self.compute_dtype)) * -1e9 outputs = self.decoder( decoder_input_tokens, encoded, decode_position=decode_position, decoder_mask=decoder_mask, encoder_decoder_mask=encoder_decoder_mask, cache=cache, max_decode_len=max_decode_len, decode=decode, training=training) outputs["encoded"] = encoded return outputs @tf.Module.with_name_scope def __call__(self, encoder_input_tokens=None, decoder_target_tokens=None, encoder_dense_inputs=None, encoder_dense_segment_ids=None, decoder_input_tokens=None, encoder_segment_ids=None, decoder_segment_ids=None, training=False): """Applies Transformer model on the inputs. Args: encoder_input_tokens: input tokens to the encoder. decoder_target_tokens: target tokens to the decoder. encoder_dense_inputs: input dense vectors to the encoder. encoder_dense_segment_ids: dense input segmentation info for packed decoder_input_tokens: input tokens to the decoder, only required for training. encoder_segment_ids: input segmentation info for packed examples. examples. decoder_segment_ids: target segmentation info for packed examples. training: whether it is training pass, affecting dropouts. Returns: a dictionary of logits/cache. """ encoded = self.encode( encoder_input_tokens=encoder_input_tokens, encoder_segment_ids=encoder_segment_ids, encoder_dense_inputs=encoder_dense_inputs, encoder_dense_segment_ids=encoder_dense_segment_ids, training=training) outputs = self.decode( encoded=encoded, decoder_target_tokens=decoder_target_tokens, encoder_input_tokens=encoder_input_tokens, # only used for masks. encoder_dense_inputs=encoder_dense_inputs, # only used for masks. decoder_input_tokens=decoder_input_tokens, encoder_segment_ids=encoder_segment_ids, encoder_dense_segment_ids=encoder_dense_segment_ids, decoder_segment_ids=decoder_segment_ids, training=training) outputs["encoded"] = encoded return outputs @property def checkpoint_items(self): return dict(encoder=self.encoder, decoder=self.decoder)
57,312
35.389206
80
py
models
models-master/official/nlp/modeling/models/xlnet.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """XLNet models.""" # pylint: disable=g-classes-have-attributes from typing import Any, Mapping, Optional, Union import tensorflow as tf from official.nlp.modeling import layers from official.nlp.modeling import networks class XLNetMaskedLM(tf.keras.layers.Layer): """XLNet pretraining head.""" def __init__(self, vocab_size: int, hidden_size: int, initializer: str = 'glorot_uniform', activation: str = 'gelu', name=None, **kwargs): super().__init__(name=name, **kwargs) self._vocab_size = vocab_size self._hidden_size = hidden_size self._initializer = initializer self._activation = activation def build(self, input_shape): self.dense = tf.keras.layers.Dense( units=self._hidden_size, activation=self._activation, kernel_initializer=self._initializer, name='transform/dense') self.layer_norm = tf.keras.layers.LayerNormalization( axis=-1, epsilon=1e-12, name='transform/LayerNorm') self.bias = self.add_weight( 'output_bias/bias', shape=(self._vocab_size,), initializer='zeros', trainable=True) super().build(input_shape) def call(self, sequence_data: tf.Tensor, embedding_table: tf.Tensor): lm_data = self.dense(sequence_data) lm_data = self.layer_norm(lm_data) lm_data = tf.matmul(lm_data, embedding_table, transpose_b=True) logits = tf.nn.bias_add(lm_data, self.bias) return logits def get_config(self) -> Mapping[str, Any]: config = { 'vocab_size': self._vocab_size, 'hidden_size': self._hidden_size, 'initializer': self._initializer } base_config = super(XLNetMaskedLM, self).get_config() return dict(list(base_config.items()) + list(config.items())) @tf.keras.utils.register_keras_serializable(package='Text') class XLNetPretrainer(tf.keras.Model): """XLNet-based pretrainer. This is an implementation of the network structure surrounding a Transformer-XL encoder as described in "XLNet: Generalized Autoregressive Pretraining for Language Understanding" (https://arxiv.org/abs/1906.08237). Args: network: An XLNet/Transformer-XL based network. This network should output a sequence output and list of `state` tensors. mlm_activation: The activation (if any) to use in the Masked LM network. If None, then no activation will be used. mlm_initializer: The initializer (if any) to use in the masked LM. Defaults to a Glorot uniform initializer. """ def __init__( self, network: Union[tf.keras.layers.Layer, tf.keras.Model], mlm_activation=None, mlm_initializer='glorot_uniform', name: Optional[str] = None, **kwargs): super().__init__(name=name, **kwargs) self._config = { 'network': network, 'mlm_activation': mlm_activation, 'mlm_initializer': mlm_initializer, } self._network = network self._hidden_size = network.get_config()['hidden_size'] self._vocab_size = network.get_config()['vocab_size'] self._activation = mlm_activation self._initializer = mlm_initializer self._masked_lm = XLNetMaskedLM( vocab_size=self._vocab_size, hidden_size=self._hidden_size, initializer=self._initializer) def call(self, inputs: Mapping[str, Any]): # pytype: disable=signature-mismatch # overriding-parameter-count-checks input_word_ids = inputs['input_word_ids'] input_type_ids = inputs['input_type_ids'] masked_tokens = inputs['masked_tokens'] permutation_mask = inputs['permutation_mask'] target_mapping = inputs['target_mapping'] state = inputs.get('state', None) attention_output, state = self._network( input_ids=input_word_ids, segment_ids=input_type_ids, input_mask=None, state=state, permutation_mask=permutation_mask, target_mapping=target_mapping, masked_tokens=masked_tokens) embedding_table = self._network.get_embedding_lookup_table() mlm_outputs = self._masked_lm( sequence_data=attention_output, embedding_table=embedding_table) return mlm_outputs, state def get_config(self) -> Mapping[str, Any]: return self._config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def checkpoint_items(self): return dict(encoder=self._network) @tf.keras.utils.register_keras_serializable(package='Text') class XLNetClassifier(tf.keras.Model): """Classifier model based on XLNet. This is an implementation of the network structure surrounding a Transformer-XL encoder as described in "XLNet: Generalized Autoregressive Pretraining for Language Understanding" (https://arxiv.org/abs/1906.08237). Note: This model does not use utilize the memory mechanism used in the original XLNet Classifier. Args: network: An XLNet/Transformer-XL based network. This network should output a sequence output and list of `state` tensors. num_classes: Number of classes to predict from the classification network. initializer: The initializer (if any) to use in the classification networks. Defaults to a RandomNormal initializer. summary_type: Method used to summarize a sequence into a compact vector. dropout_rate: The dropout probability of the cls head. head_name: Name of the classification head. """ def __init__( self, network: Union[tf.keras.layers.Layer, tf.keras.Model], num_classes: int, initializer: tf.keras.initializers.Initializer = 'random_normal', summary_type: str = 'last', dropout_rate: float = 0.1, head_name: str = 'sentence_prediction', # pytype: disable=annotation-type-mismatch # typed-keras **kwargs): super().__init__(**kwargs) self._network = network self._initializer = initializer self._summary_type = summary_type self._num_classes = num_classes self._config = { 'network': network, 'initializer': initializer, 'num_classes': num_classes, 'summary_type': summary_type, 'dropout_rate': dropout_rate, 'head_name': head_name, } if summary_type == 'last': cls_token_idx = -1 elif summary_type == 'first': cls_token_idx = 0 else: raise ValueError('Invalid summary type provided: %s.' % summary_type) self.classifier = layers.ClassificationHead( inner_dim=network.get_config()['hidden_size'], num_classes=num_classes, initializer=initializer, dropout_rate=dropout_rate, cls_token_idx=cls_token_idx, name=head_name) def call(self, inputs: Mapping[str, Any]): # pytype: disable=signature-mismatch # overriding-parameter-count-checks input_ids = inputs['input_word_ids'] segment_ids = inputs['input_type_ids'] input_mask = tf.cast(inputs['input_mask'], tf.float32) state = inputs.get('mems', None) attention_output, _ = self._network( input_ids=input_ids, segment_ids=segment_ids, input_mask=input_mask, state=state) logits = self.classifier(attention_output) return logits def get_config(self): return self._config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def checkpoint_items(self): items = dict(encoder=self._network) if hasattr(self.classifier, 'checkpoint_items'): for key, item in self.classifier.checkpoint_items.items(): items['.'.join([self.classifier.name, key])] = item return items @tf.keras.utils.register_keras_serializable(package='Text') class XLNetSpanLabeler(tf.keras.Model): """Span labeler model based on XLNet. This is an implementation of the network structure surrounding a Transformer-XL encoder as described in "XLNet: Generalized Autoregressive Pretraining for Language Understanding" (https://arxiv.org/abs/1906.08237). Args: network: A transformer network. This network should output a sequence output and a classification output. Furthermore, it should expose its embedding table via a "get_embedding_table" method. start_n_top: Beam size for span start. end_n_top: Beam size for span end. dropout_rate: The dropout rate for the span labeling layer. span_labeling_activation: The activation for the span labeling head. initializer: The initializer (if any) to use in the span labeling network. Defaults to a Glorot uniform initializer. """ def __init__( self, network: Union[tf.keras.layers.Layer, tf.keras.Model], start_n_top: int = 5, end_n_top: int = 5, dropout_rate: float = 0.1, span_labeling_activation: tf.keras.initializers.Initializer = 'tanh', initializer: tf.keras.initializers.Initializer = 'glorot_uniform', # pytype: disable=annotation-type-mismatch # typed-keras **kwargs): super().__init__(**kwargs) self._config = { 'network': network, 'start_n_top': start_n_top, 'end_n_top': end_n_top, 'dropout_rate': dropout_rate, 'span_labeling_activation': span_labeling_activation, 'initializer': initializer, } network_config = network.get_config() try: input_width = network_config['inner_size'] self._xlnet_base = True except KeyError: # BertEncoder uses 'intermediate_size' due to legacy naming. input_width = network_config['intermediate_size'] self._xlnet_base = False self._network = network self._initializer = initializer self._start_n_top = start_n_top self._end_n_top = end_n_top self._dropout_rate = dropout_rate self._activation = span_labeling_activation self.span_labeling = networks.XLNetSpanLabeling( input_width=input_width, start_n_top=self._start_n_top, end_n_top=self._end_n_top, activation=self._activation, dropout_rate=self._dropout_rate, initializer=self._initializer) def call(self, inputs: Mapping[str, Any]): # pytype: disable=signature-mismatch # overriding-parameter-count-checks input_word_ids = inputs['input_word_ids'] input_type_ids = inputs['input_type_ids'] input_mask = inputs['input_mask'] class_index = inputs['class_index'] paragraph_mask = inputs['paragraph_mask'] start_positions = inputs.get('start_positions', None) if self._xlnet_base: attention_output, _ = self._network( input_ids=input_word_ids, segment_ids=input_type_ids, input_mask=input_mask) else: network_output_dict = self._network(dict( input_word_ids=input_word_ids, input_type_ids=input_type_ids, input_mask=input_mask)) attention_output = network_output_dict['sequence_output'] outputs = self.span_labeling( sequence_data=attention_output, class_index=class_index, paragraph_mask=paragraph_mask, start_positions=start_positions) return outputs @property def checkpoint_items(self): return dict(encoder=self._network) def get_config(self): return self._config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
12,004
33.696532
131
py
models
models-master/official/nlp/modeling/models/t5_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for t5.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.nlp.modeling.models import t5 def _create_cache(batch_size, init_decode_length, num_heads, head_size, dtype=tf.float32): if num_heads is None: kv_shape = [batch_size, init_decode_length, head_size] else: kv_shape = [batch_size, init_decode_length, num_heads, head_size] return { "key": tf.zeros(kv_shape, dtype=dtype), "value": tf.zeros(kv_shape, dtype=dtype) } class ModulesTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(("bfloat16", tf.bfloat16), ("float32", tf.float32)) def test_embed(self, dtype): l = t5.Embed(vocab_size=5, features=4, compute_dtype=dtype, name="foo") inputs = np.array([[2, 3], [1, 2]], dtype=np.int32) inputs = tf.convert_to_tensor(inputs) one_hot_outputs = l(inputs, one_hot=True) gather_outputs = l(inputs, one_hot=False) self.assertEqual(one_hot_outputs.shape, (2, 2, 4)) self.assertLen(l.trainable_variables, 1) self.assertAllClose(one_hot_outputs, gather_outputs) outputs = l.attend(query=tf.zeros((2, 2, 4), dtype)) self.assertEqual(outputs.shape, (2, 2, 5)) # Test initializers. l = t5.Embed( vocab_size=5, features=4, compute_dtype=dtype, name="foo", embeddings_initializer=tf.keras.initializers.Zeros()) self.assertAllClose(l(inputs), tf.zeros((2, 2, 4), dtype)) @parameterized.named_parameters(("bfloat16", tf.bfloat16), ("float32", tf.float32)) def test_rms_norm(self, dtype): l = t5.RMSNorm(hidden_size=4, epsilon=0.0, name="foo") inputs = tf.ones((2, 4), dtype=dtype) outputs = l(inputs) self.assertAllEqual(l(inputs), inputs) self.assertEqual(outputs.dtype, dtype) self.assertLen(l.trainable_variables, 1) self.assertIn("foo/scale", l.trainable_variables[0].name) @parameterized.named_parameters(("bfloat16", tf.bfloat16), ("float32", tf.float32)) def test_linear(self, dtype): l = t5.Linear( in_features=4, out_features=4, w_init=tf.keras.initializers.Ones(), name="foo") inputs = tf.ones((2, 4), dtype=dtype) outputs = l(inputs) self.assertEqual(outputs.shape, inputs.shape) self.assertEqual(outputs.dtype, dtype) self.assertLen(l.trainable_variables, 2) def test_linear3d(self): batch_size = 2 l = t5.Linear3D( in_features=4, out_features=4, num_heads=2, to_3d=True, w_init=tf.keras.initializers.Ones(), name="foo") inputs = np.ones((batch_size, 2, 4), dtype=np.float32) self.assertEqual(l(inputs).shape, (batch_size, 2, 2, 4)) l = t5.Linear3D( in_features=2, out_features=4, num_heads=2, to_3d=False, w_init=tf.keras.initializers.Ones(), name="foo") inputs = np.ones((batch_size, 2, 2, 2), dtype=np.float32) self.assertEqual(l(inputs).shape, (batch_size, 2, 4)) def test_ffn(self): inputs = np.ones((2, 4), dtype=np.float32) for activation in ["relu", "linear", "gelu", "swish"]: l = t5.FFN( d_model=4, d_ff=8, use_bias=True, dropout_rate=0.1, activations=[activation], name="foo") self.assertEqual(l(inputs).shape, inputs.shape) self.assertLen(l.trainable_variables, 4) l = t5.FFN( d_model=4, d_ff=8, dropout_rate=0.1, activations=["linear", "gelu"], name="bar") self.assertLen(l.trainable_variables, 3) self.assertEqual(l(inputs).shape, inputs.shape) @parameterized.named_parameters(("bfloat16", tf.bfloat16), ("float32", tf.float32)) def test_relative_position(self, dtype): l = t5.RelativePositionEmbedding( num_heads=4, bidirectional=False, embeddings_initializer=tf.keras.initializers.Ones(), compute_dtype=dtype, name="foo") self.assertEqual(l(4, 2).shape, (1, 4, 4, 2)) l = t5.RelativePositionEmbedding( num_heads=4, bidirectional=True, embeddings_initializer=tf.keras.initializers.Ones(), compute_dtype=dtype, name="bar") outputs = l(4, 2) self.assertEqual(outputs.shape, (1, 4, 4, 2)) self.assertEqual(outputs.dtype, dtype) def test_masks(self): causal_mask = t5.make_causal_mask(np.zeros((2, 5))) self.assertEqual(causal_mask.shape, (2, 1, 5, 5)) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.cloud_tpu_strategy, ], mode="eager")) def test_attention(self, distribution): num_heads, head_size = 2, 4 from_seq_length, to_seq_length = 4, 6 batch_size = 2 pos_embed = t5.RelativePositionEmbedding( num_heads=4, bidirectional=False, embeddings_initializer=tf.keras.initializers.Ones(), name="pos_embed") position_bias = pos_embed(from_seq_length, from_seq_length) l = t5.MultiHeadAttention(d_model=4, d_kv=2, num_heads=4, dropout_rate=0.1) query = tf.convert_to_tensor( np.ones((batch_size, from_seq_length, 4), dtype=np.float32)) self.assertEqual( l(query, position_bias=position_bias)["context"].shape, query.shape) kv = tf.convert_to_tensor( np.ones((batch_size, to_seq_length, 4), dtype=np.float32)) position_bias = pos_embed(from_seq_length, to_seq_length) outputs = l(query, kv=kv, position_bias=position_bias) self.assertEqual(outputs["context"].shape, query.shape) with distribution.scope(): l = t5.MultiHeadAttention( d_model=4, d_kv=head_size, num_heads=num_heads, dropout_rate=0.1) @tf.function def step(inputs): def _step_fn(inputs): cache = _create_cache(batch_size, from_seq_length, num_heads, head_size) mask = t5.make_causal_mask(tf.ones((batch_size, 1))) return l( query=inputs, mask=mask, cache=cache, decode_position=decode_position) outputs = distribution.run(_step_fn, args=(inputs,)) return tf.nest.map_structure(distribution.experimental_local_results, outputs) decode_position = 2 query = tf.convert_to_tensor(np.ones((2, 1, 4), dtype=np.float32)) local_outputs = step(query) self.assertEqual(local_outputs["context"][0].shape, (2, 1, 4)) self.assertNotEqual( np.sum(local_outputs["cache"]["key"][0][:, decode_position, ...].numpy()), 0.0) class T5Test(tf.test.TestCase, parameterized.TestCase): @combinations.generate( combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.cloud_tpu_strategy, ], mode="eager")) def test_attention_layers(self, distribution): num_heads, head_size = 2, 2 from_seq_length = 4 # TPU decoding should pre-allocate the entire sequence. batch_size = 2 with distribution.scope(): pos_embed = t5.RelativePositionEmbedding( num_heads=head_size, bidirectional=False, embeddings_initializer=tf.keras.initializers.Ones(), name="pos_embed") l = t5.SelfAttention( d_model=4, d_kv=head_size, num_heads=num_heads, dropout_rate=0.1) decode_position = 2 @tf.function def step(inputs): def _step_fn(inputs): cache = _create_cache(batch_size, from_seq_length, num_heads, head_size) mask = t5.make_causal_mask(tf.ones((batch_size, 1))) position_bias = pos_embed(from_seq_length, from_seq_length) return l( hidden_states=inputs, cache=cache, attention_mask=mask, decode_position=decode_position, position_bias=position_bias) outputs = distribution.run(_step_fn, args=(inputs,)) return tf.nest.map_structure(distribution.experimental_local_results, outputs) query = tf.convert_to_tensor(np.ones((2, 1, 4), dtype=np.float32)) local_outputs = step(query) self.assertEqual(local_outputs["layer_output"][0].shape, (2, 1, 4)) self.assertNotEqual( np.sum( local_outputs["cache"]["key"][0][:, decode_position, :, :].numpy()), 0.0) l = t5.CrossAttention( d_model=4, d_kv=head_size, num_heads=num_heads, dropout_rate=0.1) to_seq_length = 6 query = tf.convert_to_tensor( np.ones((2, from_seq_length, 4), dtype=np.float32)) kv = tf.convert_to_tensor( np.ones((2, to_seq_length, 4), dtype=np.float32)) @tf.function def step_cross_attn(inputs): def _step_fn(inputs): query, kv = inputs mask = t5.make_attention_mask( tf.ones((batch_size, from_seq_length)), tf.ones((batch_size, to_seq_length))) return l(hidden_states=query, kv=kv, attention_mask=mask) outputs = distribution.run(_step_fn, args=(inputs,)) return tf.nest.map_structure(distribution.experimental_local_results, outputs) local_outputs = step_cross_attn((query, kv)) self.assertEqual(local_outputs["layer_output"][0].shape, (2, from_seq_length, 4)) def test_encoder_block(self): batch_size = 2 from_seq_length = 5 d_model = 4 l = t5.EncoderBlock(d_model=4, d_kv=3, num_heads=2, d_ff=8, name="foo") pos_embed = t5.RelativePositionEmbedding( num_heads=2, bidirectional=True, embeddings_initializer=tf.keras.initializers.Ones(), name="bar") attention_mask = t5.make_attention_mask( tf.ones((batch_size, from_seq_length)), tf.ones((batch_size, from_seq_length))) position_bias = pos_embed(from_seq_length, from_seq_length) inputs = tf.ones((batch_size, from_seq_length, d_model), dtype=tf.float32) outputs = l( inputs, attention_mask=attention_mask, position_bias=position_bias) self.assertEqual(outputs.shape, (batch_size, from_seq_length, d_model)) def test_encdec_block(self): batch_size = 2 from_seq_length = 5 to_seq_length = 3 d_model = 4 l = t5.EncDecoderBlock(d_model=4, d_kv=3, num_heads=2, d_ff=8, name="foo") pos_embed = t5.RelativePositionEmbedding( num_heads=2, bidirectional=True, embeddings_initializer=tf.keras.initializers.Ones(), name="bar") encoder_decoder_mask = t5.make_attention_mask( tf.ones((batch_size, from_seq_length)), tf.ones((batch_size, to_seq_length))) position_bias = pos_embed(from_seq_length, from_seq_length) inputs = tf.ones((batch_size, from_seq_length, d_model), dtype=tf.float32) encoder_hidden_states = tf.ones((batch_size, to_seq_length, d_model), dtype=tf.float32) outputs = l( inputs, encoder_hidden_states, encoder_decoder_mask=encoder_decoder_mask, position_bias=position_bias) self.assertEqual(outputs[0].shape, (batch_size, from_seq_length, d_model)) @parameterized.named_parameters(("bfloat16", tf.bfloat16), ("float32", tf.float32)) def test_encoder(self, dtype): config = t5.T5TransformerParams( num_layers=2, d_model=4, d_kv=3, num_heads=4, d_ff=16, vocab_size=10, vocab_embeddings_initializer=tf.keras.initializers.Ones(), relative_embeddings_initializer=tf.keras.initializers.Ones()) encoder = t5.Encoder(config, compute_dtype=dtype) encoded = encoder(tf.zeros((4, 8), dtype=tf.int32)) self.assertEqual(encoded.shape, (4, 8, config.d_model)) @parameterized.named_parameters(("bfloat16", tf.bfloat16), ("float32", tf.float32)) def test_encoder_with_dense(self, dtype): config = t5.T5TransformerParams( num_layers=2, d_model=4, d_kv=3, num_heads=4, d_ff=16, vocab_size=10, vocab_embeddings_initializer=tf.keras.initializers.Ones(), relative_embeddings_initializer=tf.keras.initializers.Ones()) encoder = t5.Encoder(config, compute_dtype=dtype) encoded = encoder( tf.zeros((4, 8), dtype=tf.int32), dense_inputs=tf.ones((4, 2, 4), dtype=dtype)) self.assertEqual(encoded.shape, (4, 10, config.d_model)) @parameterized.named_parameters(("bfloat16", tf.bfloat16), ("float32", tf.float32)) def test_encoder_only_dense(self, dtype): config = t5.T5TransformerParams( num_layers=2, d_model=4, d_kv=3, num_heads=4, d_ff=16, vocab_size=10, vocab_embeddings_initializer=tf.keras.initializers.Ones(), relative_embeddings_initializer=tf.keras.initializers.Ones()) encoder = t5.Encoder(config, compute_dtype=dtype) encoded = encoder(dense_inputs=tf.ones((4, 2, 4), dtype=dtype)) self.assertEqual(encoded.shape, (4, 2, config.d_model)) def test_decoder(self): max_decode_len = 10 config = t5.T5TransformerParams( num_layers=2, d_model=4, d_kv=3, num_heads=4, d_ff=16, vocab_size=10, vocab_embeddings_initializer=tf.keras.initializers.Ones(), relative_embeddings_initializer=tf.keras.initializers.Ones()) decoder = t5.Decoder(config) batch_size = 4 targets = tf.zeros((4, 8), dtype=tf.int32) encoded = tf.zeros((4, 8, config.d_model), dtype=tf.float32) outputs = decoder(targets, encoded) logits = outputs["logits"] cache = outputs["cache"] self.assertEqual(logits.shape, (4, 8, config.vocab_size)) cache = {} cache[0] = _create_cache(batch_size, max_decode_len, config.num_heads, config.d_kv) cache[1] = _create_cache(batch_size, max_decode_len, config.num_heads, config.d_kv) targets = tf.zeros((4, 1), dtype=tf.int32) outputs = decoder( targets, encoded, decode_position=2, cache=cache, decode=True, max_decode_len=max_decode_len) logits = outputs["logits"] cache = outputs["cache"] self.assertEqual(logits.shape, (batch_size, 1, config.vocab_size)) for entry in cache.values(): for tensor in entry.values(): self.assertNotAllEqual(tensor.numpy()[:, 2, :, :], 0.0) @parameterized.named_parameters( ("t5_10", ("relu",), True, 26, False, tf.float32), ("t5_11", ("gelu", "linear"), False, 29, False, tf.float32), ("t5_10_bfloat16", ("relu",), True, 26, False, tf.bfloat16), ("t5_11_bfloat16", ("gelu", "linear"), False, 29, False, tf.bfloat16), ("t5_10_layer_sharing", ("relu",), True, 26, True, tf.float32), ("t5_11_layer_sharing", ("gelu", "linear"), False, 29, True, tf.float32), ("t5_10_bfloat16_layer_sharing", ("relu",), True, 26, True, tf.bfloat16), ("t5_11_bfloat16_layer_sharing", ("gelu", "linear"), False, 29, True, tf.bfloat16)) def test_transformer(self, ffn_activations, logits_via_embedding, expect_num_variables, layer_sharing, dtype): max_decode_len = 10 config = t5.T5TransformerParams( num_layers=1, d_model=8, d_kv=4, num_heads=4, d_ff=32, vocab_size=10, shared_embedding=True, layer_sharing=layer_sharing, ffn_activations=ffn_activations, logits_via_embedding=logits_via_embedding) transformer = t5.T5Transformer(config, compute_dtype=dtype) self.assertLen(transformer.trainable_variables, expect_num_variables) inputs = tf.convert_to_tensor( np.array([[2, 2, 1, 3, 1, 0], [3, 3, 1, 2, 2, 1]])) segments = tf.convert_to_tensor( np.array([[1, 1, 1, 2, 2, 0], [1, 1, 1, 2, 2, 2]])) outputs = transformer( encoder_input_tokens=inputs, decoder_input_tokens=inputs, decoder_target_tokens=inputs, encoder_segment_ids=segments, decoder_segment_ids=segments) cache = {} batch_size = 2 cache[0] = _create_cache( batch_size, max_decode_len, config.num_heads, config.d_kv, dtype=dtype) outputs = transformer.decode( encoder_input_tokens=inputs, encoded=outputs["encoded"], decoder_target_tokens=tf.ones((batch_size, 1), dtype=tf.int32), decode_position=1, decode=True, max_decode_len=max_decode_len, cache=cache) self.assertEqual(outputs["logits"].shape, (batch_size, 1, config.vocab_size)) for v in transformer.trainable_variables: print(v.name, v.shape) self.assertEqual(v.dtype, tf.float32) @parameterized.named_parameters( ("t5_10_dense", ("relu",), True, 26, False, tf.float32),) def test_transformer_with_dense(self, ffn_activations, logits_via_embedding, expect_num_variables, layer_sharing, dtype): max_decode_len = 10 config = t5.T5TransformerParams( num_layers=1, d_model=8, d_kv=4, num_heads=4, d_ff=32, vocab_size=10, shared_embedding=True, layer_sharing=layer_sharing, ffn_activations=ffn_activations, logits_via_embedding=logits_via_embedding) transformer = t5.T5Transformer(config, compute_dtype=dtype) self.assertLen(transformer.trainable_variables, expect_num_variables) inputs = tf.convert_to_tensor( np.array([[2, 2, 1, 3, 1, 0], [3, 3, 1, 2, 2, 1]])) segments = tf.convert_to_tensor( np.array([[1, 1, 1, 2, 2, 0], [1, 1, 1, 2, 2, 2]])) dense_inputs = tf.convert_to_tensor(np.random.randn(2, 2, 8), dtype=dtype) dense_segments = tf.convert_to_tensor(np.array([[1, 2], [1, 2]])) outputs = transformer( encoder_input_tokens=inputs, encoder_dense_inputs=dense_inputs, decoder_input_tokens=inputs, decoder_target_tokens=inputs, encoder_segment_ids=segments, encoder_dense_segment_ids=dense_segments, decoder_segment_ids=segments) cache = {} batch_size = 2 cache[0] = _create_cache( batch_size, max_decode_len, config.num_heads, config.d_kv, dtype=dtype) outputs = transformer.decode( encoder_input_tokens=inputs, encoder_dense_inputs=dense_inputs, encoded=outputs["encoded"], decoder_target_tokens=tf.ones((batch_size, 1), dtype=tf.int32), decode_position=1, decode=True, max_decode_len=max_decode_len, cache=cache) self.assertEqual(outputs["logits"].shape, (batch_size, 1, config.vocab_size)) for v in transformer.trainable_variables: print(v.name, v.shape) self.assertEqual(v.dtype, tf.float32) @parameterized.named_parameters( ("t5_10_dense_layerwise_relpos", ("relu",), True, 26, False, tf.float32, False, 1), ("t5_10_dense_shared_relpos_d2", ("relu",), True, 39, False, tf.float32, True, 2), ("t5_10_dense_layerwise_relpos_d2", ("relu",), True, 40, False, tf.float32, False, 2), ) def test_transformer_with_lw_relpos(self, ffn_activations, logits_via_embedding, expect_num_variables, layer_sharing, dtype, use_shared_relpos, num_decoder_layers): max_decode_len = 10 config = t5.T5TransformerParams( num_layers=1, num_decoder_layers=num_decoder_layers, d_model=8, d_kv=4, num_heads=4, d_ff=32, vocab_size=10, shared_embedding=True, layer_sharing=layer_sharing, ffn_activations=ffn_activations, logits_via_embedding=logits_via_embedding, use_shared_relative_position_bias=use_shared_relpos) transformer = t5.T5Transformer(config, compute_dtype=dtype) self.assertLen(transformer.trainable_variables, expect_num_variables) inputs = tf.convert_to_tensor( np.array([[2, 2, 1, 3, 1, 0], [3, 3, 1, 2, 2, 1]])) segments = tf.convert_to_tensor( np.array([[1, 1, 1, 2, 2, 0], [1, 1, 1, 2, 2, 2]])) dense_inputs = tf.convert_to_tensor(np.random.randn(2, 2, 8), dtype=dtype) dense_segments = tf.convert_to_tensor(np.array([[1, 2], [1, 2]])) outputs = transformer( encoder_input_tokens=inputs, encoder_dense_inputs=dense_inputs, decoder_input_tokens=inputs, decoder_target_tokens=inputs, encoder_segment_ids=segments, encoder_dense_segment_ids=dense_segments, decoder_segment_ids=segments) cache = {} batch_size = 2 for i in range(num_decoder_layers): cache[i] = _create_cache( batch_size, max_decode_len, config.num_heads, config.d_kv, dtype=dtype) outputs = transformer.decode( encoder_input_tokens=inputs, encoder_dense_inputs=dense_inputs, encoded=outputs["encoded"], decoder_target_tokens=tf.ones((batch_size, 1), dtype=tf.int32), decode_position=1, decode=True, max_decode_len=max_decode_len, cache=cache) self.assertEqual(outputs["logits"].shape, (batch_size, 1, config.vocab_size)) for v in transformer.trainable_variables: print(v.name, v.shape) self.assertEqual(v.dtype, tf.float32) @parameterized.named_parameters( ("t5_10", ("relu",), True, 26, False, tf.float32),) def test_transformer_with_dense_only(self, ffn_activations, logits_via_embedding, expect_num_variables, layer_sharing, dtype): max_decode_len = 10 config = t5.T5TransformerParams( num_layers=1, d_model=8, d_kv=4, num_heads=4, d_ff=32, vocab_size=10, shared_embedding=True, layer_sharing=layer_sharing, ffn_activations=ffn_activations, logits_via_embedding=logits_via_embedding) transformer = t5.T5Transformer(config, compute_dtype=dtype) self.assertLen(transformer.trainable_variables, expect_num_variables) decoder_inputs = tf.convert_to_tensor( np.array([[2, 2, 1, 3, 1, 0], [3, 3, 1, 2, 2, 1]])) decoder_segments = tf.convert_to_tensor( np.array([[1, 1, 1, 2, 2, 0], [1, 1, 1, 2, 2, 2]])) dense_inputs = tf.convert_to_tensor(np.random.randn(2, 2, 8), dtype=dtype) dense_segments = tf.convert_to_tensor(np.array([[1, 2], [1, 2]])) outputs = transformer( encoder_dense_inputs=dense_inputs, encoder_dense_segment_ids=dense_segments, decoder_input_tokens=decoder_inputs, decoder_target_tokens=decoder_inputs, decoder_segment_ids=decoder_segments) cache = {} batch_size = 2 cache[0] = _create_cache( batch_size, max_decode_len, config.num_heads, config.d_kv, dtype=dtype) outputs = transformer.decode( encoder_dense_inputs=dense_inputs, encoded=outputs["encoded"], decoder_target_tokens=tf.ones((batch_size, 1), dtype=tf.int32), decode_position=1, decode=True, max_decode_len=max_decode_len, cache=cache) self.assertEqual(outputs["logits"].shape, (batch_size, 1, config.vocab_size)) for v in transformer.trainable_variables: print(v.name, v.shape) self.assertEqual(v.dtype, tf.float32) @parameterized.named_parameters( ("t5_10", ("relu",), True, 39, tf.float32, 2), ("t5_10_bfloat16", ("relu",), True, 39, tf.bfloat16, 2)) def test_transformer_different_num_decoder_layers(self, ffn_activations, logits_via_embedding, expect_num_variables, dtype, num_decoder_layers): max_decode_len = 10 config = t5.T5TransformerParams( num_decoder_layers=num_decoder_layers, num_layers=1, d_model=8, d_kv=4, num_heads=4, d_ff=32, vocab_size=10, shared_embedding=True, ffn_activations=ffn_activations, logits_via_embedding=logits_via_embedding) transformer = t5.T5Transformer(config, compute_dtype=dtype) self.assertLen(transformer.trainable_variables, expect_num_variables) inputs = tf.convert_to_tensor( np.array([[2, 2, 1, 3, 1, 0], [3, 3, 1, 2, 2, 1]])) segments = tf.convert_to_tensor( np.array([[1, 1, 1, 2, 2, 0], [1, 1, 1, 2, 2, 2]])) outputs = transformer( encoder_input_tokens=inputs, decoder_input_tokens=inputs, decoder_target_tokens=inputs, encoder_segment_ids=segments, decoder_segment_ids=segments) cache = {} batch_size = 2 for i in range(num_decoder_layers): cache[i] = _create_cache( batch_size, max_decode_len, config.num_heads, config.d_kv, dtype=dtype) outputs = transformer.decode( encoder_input_tokens=inputs, encoded=outputs["encoded"], decoder_target_tokens=tf.ones((batch_size, 1), dtype=tf.int32), decode_position=1, decode=True, max_decode_len=max_decode_len, cache=cache) self.assertEqual(outputs["logits"].shape, (batch_size, 1, config.vocab_size)) for v in transformer.trainable_variables: print(v.name, v.shape) self.assertEqual(v.dtype, tf.float32) if __name__ == "__main__": tf.test.main()
27,024
36.639276
80
py
models
models-master/official/nlp/modeling/models/dual_encoder_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for dual encoder network.""" from absl.testing import parameterized import tensorflow as tf from official.nlp.modeling import networks from official.nlp.modeling.models import dual_encoder class DualEncoderTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters((192, 'logits'), (768, 'predictions')) def test_dual_encoder(self, hidden_size, output): """Validate that the Keras object can be created.""" # Build a transformer network to use within the dual encoder model. vocab_size = 100 sequence_length = 512 test_network = networks.BertEncoder( vocab_size=vocab_size, num_layers=2, hidden_size=hidden_size, dict_outputs=True) # Create a dual encoder model with the created network. dual_encoder_model = dual_encoder.DualEncoder( test_network, max_seq_length=sequence_length, output=output) # Create a set of 2-dimensional inputs (the first dimension is implicit). left_word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) left_mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) left_type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) right_word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) right_mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) right_type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) if output == 'logits': outputs = dual_encoder_model([ left_word_ids, left_mask, left_type_ids, right_word_ids, right_mask, right_type_ids ]) _ = outputs['left_logits'] elif output == 'predictions': outputs = dual_encoder_model([left_word_ids, left_mask, left_type_ids]) # Validate that the outputs are of the expected shape. expected_sequence_shape = [None, sequence_length, 768] self.assertAllEqual(expected_sequence_shape, outputs['sequence_output'].shape.as_list()) left_encoded = outputs['pooled_output'] expected_encoding_shape = [None, 768] self.assertAllEqual(expected_encoding_shape, left_encoded.shape.as_list()) @parameterized.parameters((192, 'logits'), (768, 'predictions')) def test_dual_encoder_tensor_call(self, hidden_size, output): """Validate that the Keras object can be invoked.""" # Build a transformer network to use within the dual encoder model. del hidden_size sequence_length = 2 test_network = networks.BertEncoder(vocab_size=100, num_layers=2) # Create a dual encoder model with the created network. dual_encoder_model = dual_encoder.DualEncoder( test_network, max_seq_length=sequence_length, output=output) # Create a set of 2-dimensional data tensors to feed into the model. word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) # Invoke the model model on the tensors. In Eager mode, this does the # actual calculation. (We can't validate the outputs, since the network is # too complex: this simply ensures we're not hitting runtime errors.) if output == 'logits': _ = dual_encoder_model( [word_ids, mask, type_ids, word_ids, mask, type_ids]) elif output == 'predictions': _ = dual_encoder_model([word_ids, mask, type_ids]) def test_serialize_deserialize(self): """Validate that the dual encoder model can be serialized / deserialized.""" # Build a transformer network to use within the dual encoder model. sequence_length = 32 test_network = networks.BertEncoder(vocab_size=100, num_layers=2) # Create a dual encoder model with the created network. (Note that all the # args are different, so we can catch any serialization mismatches.) dual_encoder_model = dual_encoder.DualEncoder( test_network, max_seq_length=sequence_length, output='predictions') # Create another dual encoder moel via serialization and deserialization. config = dual_encoder_model.get_config() new_dual_encoder = dual_encoder.DualEncoder.from_config(config) # Validate that the config can be forced to JSON. _ = new_dual_encoder.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(dual_encoder_model.get_config(), new_dual_encoder.get_config()) if __name__ == '__main__': tf.test.main()
5,120
42.398305
80
py
models
models-master/official/nlp/modeling/models/bert_span_labeler_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for BERT trainer network.""" from absl.testing import parameterized import tensorflow as tf from official.nlp.modeling import networks from official.nlp.modeling.models import bert_span_labeler class BertSpanLabelerTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters(True, False) def test_bert_trainer(self, dict_outputs): """Validate that the Keras object can be created.""" # Build a transformer network to use within the BERT trainer. vocab_size = 100 sequence_length = 512 test_network = networks.BertEncoder( vocab_size=vocab_size, num_layers=2, dict_outputs=dict_outputs) # Create a BERT trainer with the created network. bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) # Create a set of 2-dimensional inputs (the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) # Invoke the trainer model on the inputs. This causes the layer to be built. cls_outs = bert_trainer_model([word_ids, mask, type_ids]) # Validate that there are 2 outputs are of the expected shape. self.assertLen(cls_outs, 2) expected_shape = [None, sequence_length] for out in cls_outs: self.assertAllEqual(expected_shape, out.shape.as_list()) def test_bert_trainer_named_compilation(self): """Validate compilation using explicit output names.""" # Build a transformer network to use within the BERT trainer. vocab_size = 100 test_network = networks.BertEncoder(vocab_size=vocab_size, num_layers=2) # Create a BERT trainer with the created network. bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) # Attempt to compile the model using a string-keyed dict of output names to # loss functions. This will validate that the outputs are named as we # expect. bert_trainer_model.compile( optimizer='sgd', loss={ 'start_positions': 'mse', 'end_positions': 'mse' }) def test_bert_trainer_tensor_call(self): """Validate that the Keras object can be invoked.""" # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.BertEncoder(vocab_size=100, num_layers=2) # Create a BERT trainer with the created network. bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) # Create a set of 2-dimensional data tensors to feed into the model. word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) # Invoke the trainer model on the tensors. In Eager mode, this does the # actual calculation. (We can't validate the outputs, since the network is # too complex: this simply ensures we're not hitting runtime errors.) _ = bert_trainer_model([word_ids, mask, type_ids]) def test_serialize_deserialize(self): """Validate that the BERT trainer can be serialized and deserialized.""" # Build a transformer network to use within the BERT trainer. test_network = networks.BertEncoder(vocab_size=100, num_layers=2) # Create a BERT trainer with the created network. (Note that all the args # are different, so we can catch any serialization mismatches.) bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) # Create another BERT trainer via serialization and deserialization. config = bert_trainer_model.get_config() new_bert_trainer_model = bert_span_labeler.BertSpanLabeler.from_config( config) # Validate that the config can be forced to JSON. _ = new_bert_trainer_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(bert_trainer_model.get_config(), new_bert_trainer_model.get_config()) if __name__ == '__main__': tf.test.main()
4,769
40.842105
80
py
models
models-master/official/nlp/modeling/models/electra_pretrainer.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Trainer network for ELECTRA models.""" # pylint: disable=g-classes-have-attributes import copy import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling import layers @tf.keras.utils.register_keras_serializable(package='Text') class ElectraPretrainer(tf.keras.Model): """ELECTRA network training model. This is an implementation of the network structure described in "ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators" ( https://arxiv.org/abs/2003.10555). The ElectraPretrainer allows a user to pass in two transformer models, one for generator, the other for discriminator, and instantiates the masked language model (at generator side) and classification networks (at discriminator side) that are used to create the training objectives. *Note* that the model is constructed by Keras Subclass API, where layers are defined inside `__init__` and `call()` implements the computation. Args: generator_network: A transformer network for generator, this network should output a sequence output and an optional classification output. discriminator_network: A transformer network for discriminator, this network should output a sequence output vocab_size: Size of generator output vocabulary num_classes: Number of classes to predict from the classification network for the generator network (not used now) num_token_predictions: Number of tokens to predict from the masked LM. mlm_activation: The activation (if any) to use in the masked LM and classification networks. If None, no activation will be used. mlm_initializer: The initializer (if any) to use in the masked LM and classification networks. Defaults to a Glorot uniform initializer. output_type: The output style for this network. Can be either `logits` or `predictions`. disallow_correct: Whether to disallow the generator to generate the exact same token in the original sentence """ def __init__(self, generator_network, discriminator_network, vocab_size, num_classes, num_token_predictions, mlm_activation=None, mlm_initializer='glorot_uniform', output_type='logits', disallow_correct=False, **kwargs): super(ElectraPretrainer, self).__init__() self._config = { 'generator_network': generator_network, 'discriminator_network': discriminator_network, 'vocab_size': vocab_size, 'num_classes': num_classes, 'num_token_predictions': num_token_predictions, 'mlm_activation': mlm_activation, 'mlm_initializer': mlm_initializer, 'output_type': output_type, 'disallow_correct': disallow_correct, } for k, v in kwargs.items(): self._config[k] = v self.generator_network = generator_network self.discriminator_network = discriminator_network self.vocab_size = vocab_size self.num_classes = num_classes self.num_token_predictions = num_token_predictions self.mlm_activation = mlm_activation self.mlm_initializer = mlm_initializer self.output_type = output_type self.disallow_correct = disallow_correct self.masked_lm = layers.MaskedLM( embedding_table=generator_network.get_embedding_table(), activation=mlm_activation, initializer=tf_utils.clone_initializer(mlm_initializer), output=output_type, name='generator_masked_lm') self.classification = layers.ClassificationHead( inner_dim=generator_network.get_config()['hidden_size'], num_classes=num_classes, initializer=tf_utils.clone_initializer(mlm_initializer), name='generator_classification_head') self.discriminator_projection = tf.keras.layers.Dense( units=discriminator_network.get_config()['hidden_size'], activation=mlm_activation, kernel_initializer=tf_utils.clone_initializer(mlm_initializer), name='discriminator_projection_head') self.discriminator_head = tf.keras.layers.Dense( units=1, kernel_initializer=tf_utils.clone_initializer(mlm_initializer)) def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks """ELECTRA forward pass. Args: inputs: A dict of all inputs, same as the standard BERT model. Returns: outputs: A dict of pretrainer model outputs, including (1) lm_outputs: A `[batch_size, num_token_predictions, vocab_size]` tensor indicating logits on masked positions. (2) sentence_outputs: A `[batch_size, num_classes]` tensor indicating logits for nsp task. (3) disc_logits: A `[batch_size, sequence_length]` tensor indicating logits for discriminator replaced token detection task. (4) disc_label: A `[batch_size, sequence_length]` tensor indicating target labels for discriminator replaced token detection task. """ input_word_ids = inputs['input_word_ids'] input_mask = inputs['input_mask'] input_type_ids = inputs['input_type_ids'] masked_lm_positions = inputs['masked_lm_positions'] ### Generator ### sequence_output = self.generator_network( [input_word_ids, input_mask, input_type_ids])['sequence_output'] # The generator encoder network may get outputs from all layers. if isinstance(sequence_output, list): sequence_output = sequence_output[-1] lm_outputs = self.masked_lm(sequence_output, masked_lm_positions) sentence_outputs = self.classification(sequence_output) ### Sampling from generator ### fake_data = self._get_fake_data(inputs, lm_outputs, duplicate=True) ### Discriminator ### disc_input = fake_data['inputs'] disc_label = fake_data['is_fake_tokens'] disc_sequence_output = self.discriminator_network([ disc_input['input_word_ids'], disc_input['input_mask'], disc_input['input_type_ids'] ])['sequence_output'] # The discriminator encoder network may get outputs from all layers. if isinstance(disc_sequence_output, list): disc_sequence_output = disc_sequence_output[-1] disc_logits = self.discriminator_head( self.discriminator_projection(disc_sequence_output)) disc_logits = tf.squeeze(disc_logits, axis=-1) outputs = { 'lm_outputs': lm_outputs, 'sentence_outputs': sentence_outputs, 'disc_logits': disc_logits, 'disc_label': disc_label, } return outputs def _get_fake_data(self, inputs, mlm_logits, duplicate=True): """Generate corrupted data for discriminator. Args: inputs: A dict of all inputs, same as the input of `call()` function mlm_logits: The generator's output logits duplicate: Whether to copy the original inputs dict during modifications Returns: A dict of generated fake data """ inputs = unmask(inputs, duplicate) if self.disallow_correct: disallow = tf.one_hot( inputs['masked_lm_ids'], depth=self.vocab_size, dtype=tf.float32) else: disallow = None sampled_tokens = tf.stop_gradient( sample_from_softmax(mlm_logits, disallow=disallow)) sampled_tokids = tf.argmax(sampled_tokens, -1, output_type=tf.int32) updated_input_ids, masked = scatter_update(inputs['input_word_ids'], sampled_tokids, inputs['masked_lm_positions']) labels = masked * (1 - tf.cast( tf.equal(updated_input_ids, inputs['input_word_ids']), tf.int32)) updated_inputs = get_updated_inputs( inputs, duplicate, input_word_ids=updated_input_ids) return { 'inputs': updated_inputs, 'is_fake_tokens': labels, 'sampled_tokens': sampled_tokens } @property def checkpoint_items(self): """Returns a dictionary of items to be additionally checkpointed.""" items = dict(encoder=self.discriminator_network) return items def get_config(self): return self._config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) def scatter_update(sequence, updates, positions): """Scatter-update a sequence. Args: sequence: A `[batch_size, seq_len]` or `[batch_size, seq_len, depth]` tensor. updates: A tensor of size `batch_size*seq_len(*depth)`. positions: A `[batch_size, n_positions]` tensor. Returns: updated_sequence: A `[batch_size, seq_len]` or `[batch_size, seq_len, depth]` tensor of "sequence" with elements at "positions" replaced by the values at "updates". Updates to index 0 are ignored. If there are duplicated positions the update is only applied once. updates_mask: A `[batch_size, seq_len]` mask tensor of which inputs were updated. """ shape = tf_utils.get_shape_list(sequence, expected_rank=[2, 3]) depth_dimension = (len(shape) == 3) if depth_dimension: batch_size, seq_len, depth = shape else: batch_size, seq_len = shape depth = 1 sequence = tf.expand_dims(sequence, -1) n_positions = tf_utils.get_shape_list(positions)[1] shift = tf.expand_dims(seq_len * tf.range(batch_size), -1) flat_positions = tf.reshape(positions + shift, [-1, 1]) flat_updates = tf.reshape(updates, [-1, depth]) updates = tf.scatter_nd(flat_positions, flat_updates, [batch_size * seq_len, depth]) updates = tf.reshape(updates, [batch_size, seq_len, depth]) flat_updates_mask = tf.ones([batch_size * n_positions], tf.int32) updates_mask = tf.scatter_nd(flat_positions, flat_updates_mask, [batch_size * seq_len]) updates_mask = tf.reshape(updates_mask, [batch_size, seq_len]) not_first_token = tf.concat([ tf.zeros((batch_size, 1), tf.int32), tf.ones((batch_size, seq_len - 1), tf.int32) ], -1) updates_mask *= not_first_token updates_mask_3d = tf.expand_dims(updates_mask, -1) # account for duplicate positions if sequence.dtype == tf.float32: updates_mask_3d = tf.cast(updates_mask_3d, tf.float32) updates /= tf.maximum(1.0, updates_mask_3d) else: assert sequence.dtype == tf.int32 updates = tf.math.floordiv(updates, tf.maximum(1, updates_mask_3d)) updates_mask = tf.minimum(updates_mask, 1) updates_mask_3d = tf.minimum(updates_mask_3d, 1) updated_sequence = (((1 - updates_mask_3d) * sequence) + (updates_mask_3d * updates)) if not depth_dimension: updated_sequence = tf.squeeze(updated_sequence, -1) return updated_sequence, updates_mask def sample_from_softmax(logits, disallow=None): """Implement softmax sampling using gumbel softmax trick. Args: logits: A `[batch_size, num_token_predictions, vocab_size]` tensor indicating the generator output logits for each masked position. disallow: If `None`, we directly sample tokens from the logits. Otherwise, this is a tensor of size `[batch_size, num_token_predictions, vocab_size]` indicating the true word id in each masked position. Returns: sampled_tokens: A `[batch_size, num_token_predictions, vocab_size]` one hot tensor indicating the sampled word id in each masked position. """ if disallow is not None: logits -= 1000.0 * disallow uniform_noise = tf.random.uniform( tf_utils.get_shape_list(logits), minval=0, maxval=1) gumbel_noise = -tf.math.log(-tf.math.log(uniform_noise + 1e-9) + 1e-9) # Here we essentially follow the original paper and use temperature 1.0 for # generator output logits. sampled_tokens = tf.one_hot( tf.argmax(tf.nn.softmax(logits + gumbel_noise), -1, output_type=tf.int32), logits.shape[-1]) return sampled_tokens def unmask(inputs, duplicate): unmasked_input_word_ids, _ = scatter_update(inputs['input_word_ids'], inputs['masked_lm_ids'], inputs['masked_lm_positions']) return get_updated_inputs( inputs, duplicate, input_word_ids=unmasked_input_word_ids) def get_updated_inputs(inputs, duplicate, **kwargs): if duplicate: new_inputs = copy.copy(inputs) else: new_inputs = inputs for k, v in kwargs.items(): new_inputs[k] = v return new_inputs
13,036
37.916418
100
py
models
models-master/official/nlp/modeling/models/__init__.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Models are combinations of `tf.keras` layers and models that can be trained. Several pre-built canned models are provided to train encoder networks. These models are intended as both convenience functions and canonical examples. """ from official.nlp.modeling.models.bert_classifier import BertClassifier from official.nlp.modeling.models.bert_pretrainer import * from official.nlp.modeling.models.bert_span_labeler import BertSpanLabeler from official.nlp.modeling.models.bert_token_classifier import BertTokenClassifier from official.nlp.modeling.models.dual_encoder import DualEncoder from official.nlp.modeling.models.electra_pretrainer import ElectraPretrainer from official.nlp.modeling.models.seq2seq_transformer import * from official.nlp.modeling.models.t5 import T5Transformer from official.nlp.modeling.models.t5 import T5TransformerParams from official.nlp.modeling.models.xlnet import XLNetClassifier from official.nlp.modeling.models.xlnet import XLNetPretrainer from official.nlp.modeling.models.xlnet import XLNetSpanLabeler
1,654
50.71875
82
py
models
models-master/official/nlp/modeling/models/bert_classifier.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT cls-token classifier.""" # pylint: disable=g-classes-have-attributes import collections import tensorflow as tf from official.nlp.modeling import layers @tf.keras.utils.register_keras_serializable(package='Text') class BertClassifier(tf.keras.Model): """Classifier model based on a BERT-style transformer-based encoder. This is an implementation of the network structure surrounding a transformer encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). The BertClassifier allows a user to pass in a transformer stack, and instantiates a classification network based on the passed `num_classes` argument. If `num_classes` is set to 1, a regression network is instantiated. *Note* that the model is constructed by [Keras Functional API](https://keras.io/guides/functional_api/). Args: network: A transformer network. This network should output a sequence output and a classification output. Furthermore, it should expose its embedding table via a "get_embedding_table" method. num_classes: Number of classes to predict from the classification network. initializer: The initializer (if any) to use in the classification networks. Defaults to a Glorot uniform initializer. dropout_rate: The dropout probability of the cls head. use_encoder_pooler: Whether to use the pooler layer pre-defined inside the encoder. head_name: Name of the classification head. cls_head: (Optional) The layer instance to use for the classifier head. It should take in the output from network and produce the final logits. If set, the arguments ('num_classes', 'initializer', 'dropout_rate', 'use_encoder_pooler', 'head_name') will be ignored. """ def __init__(self, network, num_classes, initializer='glorot_uniform', dropout_rate=0.1, use_encoder_pooler=True, head_name='sentence_prediction', cls_head=None, **kwargs): self.num_classes = num_classes self.head_name = head_name self.initializer = initializer self.use_encoder_pooler = use_encoder_pooler # We want to use the inputs of the passed network as the inputs to this # Model. To do this, we need to keep a handle to the network inputs for use # when we construct the Model object at the end of init. inputs = network.inputs if use_encoder_pooler: # Because we have a copy of inputs to create this Model object, we can # invoke the Network object with its own input tensors to start the Model. outputs = network(inputs) if isinstance(outputs, list): cls_inputs = outputs[1] else: cls_inputs = outputs['pooled_output'] cls_inputs = tf.keras.layers.Dropout(rate=dropout_rate)(cls_inputs) else: outputs = network(inputs) if isinstance(outputs, list): cls_inputs = outputs[0] else: cls_inputs = outputs['sequence_output'] if cls_head: classifier = cls_head else: classifier = layers.ClassificationHead( inner_dim=0 if use_encoder_pooler else cls_inputs.shape[-1], num_classes=num_classes, initializer=initializer, dropout_rate=dropout_rate, name=head_name) predictions = classifier(cls_inputs) # b/164516224 # Once we've created the network using the Functional API, we call # super().__init__ as though we were invoking the Functional API Model # constructor, resulting in this object having all the properties of a model # created using the Functional API. Once super().__init__ is called, we # can assign attributes to `self` - note that all `self` assignments are # below this line. super(BertClassifier, self).__init__( inputs=inputs, outputs=predictions, **kwargs) self._network = network self._cls_head = cls_head config_dict = self._make_config_dict() # We are storing the config dict as a namedtuple here to ensure checkpoint # compatibility with an earlier version of this model which did not track # the config dict attribute. TF does not track immutable attrs which # do not contain Trackables, so by creating a config namedtuple instead of # a dict we avoid tracking it. config_cls = collections.namedtuple('Config', config_dict.keys()) self._config = config_cls(**config_dict) self.classifier = classifier @property def checkpoint_items(self): items = dict(encoder=self._network) if hasattr(self.classifier, 'checkpoint_items'): for key, item in self.classifier.checkpoint_items.items(): items['.'.join([self.classifier.name, key])] = item return items def get_config(self): return dict(self._config._asdict()) @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) def _make_config_dict(self): return { 'network': self._network, 'num_classes': self.num_classes, 'head_name': self.head_name, 'initializer': self.initializer, 'use_encoder_pooler': self.use_encoder_pooler, 'cls_head': self._cls_head, }
5,896
38.844595
80
py
models
models-master/official/nlp/modeling/layers/block_diag_feedforward.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras-based gated feedforward layer.""" # pylint: disable=g-classes-have-attributes from typing import Optional import tensorflow as tf from official.modeling import tf_utils class BlockDiagFeedforward(tf.keras.layers.Layer): """Block diagonal feedforward layer. This layer replaces the weight matrix of the output_dense layer with a block diagonal matrix to save layer parameters and FLOPs. A linear mixing layer can be added optionally to improve layer expressibility. Args: intermediate_size: Size of the intermediate layer. intermediate_activation: Activation for the intermediate layer. dropout: Dropout probability for the output dropout. num_blocks: The number of blocks for the block diagonal matrix of the output_dense layer. apply_mixing: Apply linear mixing if True. kernel_initializer: Initializer for dense layer kernels. bias_initializer: Initializer for dense layer biases. kernel_regularizer: Regularizer for dense layer kernels. bias_regularizer: Regularizer for dense layer biases. activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. """ def __init__( self, intermediate_size: int, intermediate_activation: str, dropout: float, num_blocks: int = 1, apply_mixing: bool = True, kernel_initializer: str = "glorot_uniform", bias_initializer: str = "zeros", kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, activity_regularizer: Optional[tf.keras.regularizers.Regularizer] = None, kernel_constraint: Optional[tf.keras.constraints.Constraint] = None, bias_constraint: Optional[tf.keras.constraints.Constraint] = None, **kwargs): # pylint: disable=g-doc-args super().__init__(**kwargs) self._intermediate_size = intermediate_size self._intermediate_activation = intermediate_activation self._dropout = dropout self._num_blocks = num_blocks self._apply_mixing = apply_mixing if intermediate_size % num_blocks != 0: raise ValueError("Intermediate_size (%d) isn't a multiple of num_blocks " "(%d)." % (intermediate_size, num_blocks)) self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) self._bias_initializer = tf.keras.initializers.get(bias_initializer) self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer) self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) self._bias_constraint = tf.keras.constraints.get(bias_constraint) def build(self, input_shape): hidden_size = input_shape.as_list()[-1] common_kwargs = dict( kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint) self._intermediate_dense = tf.keras.layers.EinsumDense( "abc,cde->abde", output_shape=(None, self._num_blocks, self._intermediate_size // self._num_blocks), bias_axes="de", name="intermediate", kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), bias_initializer=tf_utils.clone_initializer(self._bias_initializer), **common_kwargs) policy = tf.keras.mixed_precision.global_policy() if policy.name == "mixed_bfloat16": # bfloat16 causes BERT with the LAMB optimizer to not converge # as well, so we use float32. policy = tf.float32 self._intermediate_activation_layer = tf.keras.layers.Activation( self._intermediate_activation, dtype=policy) self._output_dense = tf.keras.layers.EinsumDense( "abde,deo->abdo", output_shape=(None, self._num_blocks, hidden_size // self._num_blocks), bias_axes="do", name="output", kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), bias_initializer=tf_utils.clone_initializer(self._bias_initializer), **common_kwargs) if self._apply_mixing: self._output_mixing = tf.keras.layers.EinsumDense( "abdo,de->abeo", output_shape=(None, self._num_blocks, hidden_size // self._num_blocks), name="output_mixing", kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), bias_initializer=tf_utils.clone_initializer(self._bias_initializer), **common_kwargs) self._output_reshape = tf.keras.layers.Reshape((-1, hidden_size)) self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout) def get_config(self): config = { "intermediate_size": self._intermediate_size, "intermediate_activation": self._intermediate_activation, "dropout": self._dropout, "num_blocks": self._num_blocks, "apply_mixing": self._apply_mixing, "kernel_initializer": tf.keras.initializers.serialize(self._kernel_initializer), "bias_initializer": tf.keras.initializers.serialize(self._bias_initializer), "kernel_regularizer": tf.keras.regularizers.serialize(self._kernel_regularizer), "bias_regularizer": tf.keras.regularizers.serialize(self._bias_regularizer), "activity_regularizer": tf.keras.regularizers.serialize(self._activity_regularizer), "kernel_constraint": tf.keras.constraints.serialize(self._kernel_constraint), "bias_constraint": tf.keras.constraints.serialize(self._bias_constraint) } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): intermediate_output = self._intermediate_dense(inputs) intermediate_output = self._intermediate_activation_layer( intermediate_output) layer_output = self._output_dense(intermediate_output) if self._apply_mixing: layer_output = self._output_mixing(layer_output) layer_output = self._output_reshape(layer_output) layer_output = self._output_dropout(layer_output) return layer_output
7,233
40.815029
80
py
models
models-master/official/nlp/modeling/layers/transformer_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Keras-based transformer block layer.""" import tensorflow as tf from official.nlp.modeling.layers import transformer def _create_cache(batch_size, init_decode_length, num_heads, head_size): return { 'key': tf.zeros([batch_size, init_decode_length, num_heads, head_size], dtype=tf.float32), 'value': tf.zeros([batch_size, init_decode_length, num_heads, head_size], dtype=tf.float32) } class TransformerDecoderBlockTest(tf.test.TestCase): def test_decoder_block_with_cache(self): num_attention_heads = 2 hidden_size = 16 decoder_block = transformer.TransformerDecoderBlock( num_attention_heads=num_attention_heads, intermediate_size=32, intermediate_activation='relu', dropout_rate=0.1, attention_dropout_rate=0.1) # Forward path. dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32) dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32) inputs = [dummy_tensor, dummy_tensor, dummy_mask, dummy_mask] cache = _create_cache(2, 0, num_attention_heads, hidden_size // num_attention_heads) output, cache = decoder_block(inputs, cache) self.assertEqual(output.shape, (2, 4, hidden_size)) self.assertEqual(cache['value'].shape, (2, 4, 2, 8)) def test_use_bias_norm_first(self): num_attention_heads = 2 hidden_size = 16 decoder_block = transformer.TransformerDecoderBlock( num_attention_heads=num_attention_heads, intermediate_size=32, intermediate_activation='relu', dropout_rate=0.1, attention_dropout_rate=0.1, use_bias=False, norm_first=True, norm_epsilon=1e-6, intermediate_dropout=0.1, attention_initializer=tf.keras.initializers.RandomUniform( minval=0., maxval=1.)) # Forward path. dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32) dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32) inputs = [dummy_tensor, dummy_tensor, dummy_mask, dummy_mask] output, _ = decoder_block(inputs) self.assertEqual(output.shape, (2, 4, hidden_size)) def test_get_config(self): num_attention_heads = 2 decoder_block = transformer.TransformerDecoderBlock( num_attention_heads=num_attention_heads, intermediate_size=32, intermediate_activation='relu', dropout_rate=0.1, attention_dropout_rate=0.1, use_bias=False, norm_first=True, norm_epsilon=1e-6, intermediate_dropout=0.1, attention_initializer=tf.keras.initializers.RandomUniform( minval=0., maxval=1.)) decoder_block_config = decoder_block.get_config() new_decoder_block = transformer.TransformerDecoderBlock.from_config( decoder_block_config) self.assertEqual(decoder_block_config, new_decoder_block.get_config()) if __name__ == '__main__': tf.test.main()
3,563
35.367347
74
py
models
models-master/official/nlp/modeling/layers/factorized_embedding_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for FactorizedEmbedding layer.""" import numpy as np import tensorflow as tf from official.nlp.modeling.layers import factorized_embedding class FactorizedEmbeddingTest(tf.test.TestCase): def test_layer_creation(self): vocab_size = 31 embedding_width = 27 output_dim = 45 test_layer = factorized_embedding.FactorizedEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, output_dim=output_dim) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # The output should be the same as the input, save that it has an extra # embedding_width dimension on the end. expected_output_shape = [None, sequence_length, output_dim] self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) self.assertEqual(output_tensor.dtype, tf.float32) def test_layer_invocation(self): vocab_size = 31 embedding_width = 27 output_dim = 45 test_layer = factorized_embedding.FactorizedEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, output_dim=output_dim) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # Create a model from the test layer. model = tf.keras.Model(input_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 3 input_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) output = model.predict(input_data) self.assertEqual(tf.float32, output.dtype) if __name__ == "__main__": tf.test.main()
2,579
35.338028
79
py
models
models-master/official/nlp/modeling/layers/spectral_normalization.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Normalization layers. ## References: [1] Yuichi Yoshida, Takeru Miyato. Spectral Norm Regularization for Improving the Generalizability of Deep Learning. _arXiv preprint arXiv:1705.10941_, 2017. https://arxiv.org/abs/1705.10941 [2] Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida. Spectral normalization for generative adversarial networks. In _International Conference on Learning Representations_, 2018. [3] Henry Gouk, Eibe Frank, Bernhard Pfahringer, Michael Cree. Regularisation of neural networks by enforcing lipschitz continuity. _arXiv preprint arXiv:1804.04368_, 2018. https://arxiv.org/abs/1804.04368 """ import numpy as np import tensorflow as tf class SpectralNormalization(tf.keras.layers.Wrapper): """Implements spectral normalization for Dense layer.""" def __init__(self, layer, iteration=1, norm_multiplier=0.95, training=True, aggregation=tf.VariableAggregation.MEAN, inhere_layer_name=False, **kwargs): """Initializer. Args: layer: (tf.keras.layers.Layer) A TF Keras layer to apply normalization to. iteration: (int) The number of power iteration to perform to estimate weight matrix's singular value. norm_multiplier: (float) Multiplicative constant to threshold the normalization. Usually under normalization, the singular value will converge to this value. training: (bool) Whether to perform power iteration to update the singular value estimate. aggregation: (tf.VariableAggregation) Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class tf.VariableAggregation. inhere_layer_name: (bool) Whether to inhere the name of the input layer. **kwargs: (dict) Other keyword arguments for the layers.Wrapper class. """ self.iteration = iteration self.do_power_iteration = training self.aggregation = aggregation self.norm_multiplier = norm_multiplier # Set layer name. wrapper_name = kwargs.pop('name', None) if inhere_layer_name: wrapper_name = layer.name if not isinstance(layer, tf.keras.layers.Layer): raise ValueError('`layer` must be a `tf.keras.layer.Layer`. ' 'Observed `{}`'.format(layer)) super().__init__( layer, name=wrapper_name, **kwargs) def build(self, input_shape): # pytype: disable=signature-mismatch # overriding-parameter-count-checks super().build(input_shape) self.layer.kernel._aggregation = self.aggregation # pylint: disable=protected-access self._dtype = self.layer.kernel.dtype self.w = self.layer.kernel self.w_shape = self.w.shape.as_list() self.v = self.add_weight( shape=(1, np.prod(self.w_shape[:-1])), initializer=tf.initializers.random_normal(), trainable=False, name='v', dtype=self.dtype, aggregation=self.aggregation) self.u = self.add_weight( shape=(1, self.w_shape[-1]), initializer=tf.initializers.random_normal(), trainable=False, name='u', dtype=self.dtype, aggregation=self.aggregation) self.update_weights() def call(self, inputs, *, training=None): training = self.do_power_iteration if training is None else training if training: u_update_op, v_update_op, w_update_op = self.update_weights( training=training) output = self.layer(inputs) w_restore_op = self.restore_weights() # Register update ops. self.add_update(u_update_op) self.add_update(v_update_op) self.add_update(w_update_op) self.add_update(w_restore_op) else: output = self.layer(inputs) return output def update_weights(self, *, training=True): w_reshaped = tf.reshape(self.w, [-1, self.w_shape[-1]]) u_hat = self.u v_hat = self.v if training: for _ in range(self.iteration): v_hat = tf.nn.l2_normalize(tf.matmul(u_hat, tf.transpose(w_reshaped))) u_hat = tf.nn.l2_normalize(tf.matmul(v_hat, w_reshaped)) sigma = tf.matmul(tf.matmul(v_hat, w_reshaped), tf.transpose(u_hat)) # Convert sigma from a 1x1 matrix to a scalar. sigma = tf.reshape(sigma, []) u_update_op = self.u.assign(u_hat) v_update_op = self.v.assign(v_hat) # Bound spectral norm to be not larger than self.norm_multiplier. w_norm = tf.cond((self.norm_multiplier / sigma) < 1, lambda: # pylint:disable=g-long-lambda (self.norm_multiplier / sigma) * self.w, lambda: self.w) w_update_op = self.layer.kernel.assign(w_norm) return u_update_op, v_update_op, w_update_op def restore_weights(self): """Restores layer weights to maintain gradient update (See Alg 1 of [1]).""" return self.layer.kernel.assign(self.w) class SpectralNormalizationConv2D(tf.keras.layers.Wrapper): """Implements spectral normalization for Conv2D layer based on [3].""" def __init__(self, layer, iteration=1, norm_multiplier=0.95, training=True, aggregation=tf.VariableAggregation.MEAN, legacy_mode=False, **kwargs): """Initializer. Args: layer: (tf.keras.layers.Layer) A TF Keras layer to apply normalization to. iteration: (int) The number of power iteration to perform to estimate weight matrix's singular value. norm_multiplier: (float) Multiplicative constant to threshold the normalization. Usually under normalization, the singular value will converge to this value. training: (bool) Whether to perform power iteration to update the singular value estimate. aggregation: (tf.VariableAggregation) Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class tf.VariableAggregation. legacy_mode: (bool) Whether to use the legacy implementation where the dimension of the u and v vectors are set to the batch size. It should not be enabled unless for backward compatibility reasons. **kwargs: (dict) Other keyword arguments for the layers.Wrapper class. """ self.iteration = iteration self.do_power_iteration = training self.aggregation = aggregation self.norm_multiplier = norm_multiplier self.legacy_mode = legacy_mode # Set layer attributes. layer._name += '_spec_norm' if not isinstance(layer, tf.keras.layers.Conv2D): raise ValueError( 'layer must be a `tf.keras.layer.Conv2D` instance. You passed: {input}' .format(input=layer)) super().__init__(layer, **kwargs) def build(self, input_shape): # pytype: disable=signature-mismatch # overriding-parameter-count-checks if not self.layer.built: self.layer.build(input_shape) self.layer.kernel._aggregation = self.aggregation # pylint: disable=protected-access self._dtype = self.layer.kernel.dtype # Shape (kernel_size_1, kernel_size_2, in_channel, out_channel). self.w = self.layer.kernel self.w_shape = self.w.shape.as_list() self.strides = self.layer.strides # Set the dimensions of u and v vectors. batch_size = input_shape[0] uv_dim = batch_size if self.legacy_mode else 1 # Resolve shapes. in_height = input_shape[1] in_width = input_shape[2] in_channel = self.w_shape[2] out_height = in_height // self.strides[0] out_width = in_width // self.strides[1] out_channel = self.w_shape[3] self.in_shape = (uv_dim, in_height, in_width, in_channel) self.out_shape = (uv_dim, out_height, out_width, out_channel) self.v = self.add_weight( shape=self.in_shape, initializer=tf.initializers.random_normal(), trainable=False, name='v', dtype=self.dtype, aggregation=self.aggregation) self.u = self.add_weight( shape=self.out_shape, initializer=tf.initializers.random_normal(), trainable=False, name='u', dtype=self.dtype, aggregation=self.aggregation) super().build() def call(self, inputs): u_update_op, v_update_op, w_update_op = self.update_weights() output = self.layer(inputs) w_restore_op = self.restore_weights() # Register update ops. self.add_update(u_update_op) self.add_update(v_update_op) self.add_update(w_update_op) self.add_update(w_restore_op) return output def update_weights(self): """Computes power iteration for convolutional filters based on [3].""" # Initialize u, v vectors. u_hat = self.u v_hat = self.v if self.do_power_iteration: for _ in range(self.iteration): # Updates v. v_ = tf.nn.conv2d_transpose( u_hat, self.w, output_shape=self.in_shape, strides=self.strides, padding='SAME') v_hat = tf.nn.l2_normalize(tf.reshape(v_, [1, -1])) v_hat = tf.reshape(v_hat, v_.shape) # Updates u. u_ = tf.nn.conv2d(v_hat, self.w, strides=self.strides, padding='SAME') u_hat = tf.nn.l2_normalize(tf.reshape(u_, [1, -1])) u_hat = tf.reshape(u_hat, u_.shape) v_w_hat = tf.nn.conv2d(v_hat, self.w, strides=self.strides, padding='SAME') sigma = tf.matmul(tf.reshape(v_w_hat, [1, -1]), tf.reshape(u_hat, [-1, 1])) # Convert sigma from a 1x1 matrix to a scalar. sigma = tf.reshape(sigma, []) u_update_op = self.u.assign(u_hat) v_update_op = self.v.assign(v_hat) w_norm = tf.cond((self.norm_multiplier / sigma) < 1, lambda: # pylint:disable=g-long-lambda (self.norm_multiplier / sigma) * self.w, lambda: self.w) w_update_op = self.layer.kernel.assign(w_norm) return u_update_op, v_update_op, w_update_op def restore_weights(self): """Restores layer weights to maintain gradient update (See Alg 1 of [1]).""" return self.layer.kernel.assign(self.w)
10,728
35.003356
106
py
models
models-master/official/nlp/modeling/layers/transformer_encoder_block.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras-based TransformerEncoder block layer.""" from typing import Any, Optional from absl import logging import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling.layers import util @tf.keras.utils.register_keras_serializable(package="Text") class TransformerEncoderBlock(tf.keras.layers.Layer): """TransformerEncoderBlock layer. This layer implements the Transformer Encoder from "Attention Is All You Need". (https://arxiv.org/abs/1706.03762), which combines a `tf.keras.layers.MultiHeadAttention` layer with a two-layer feedforward network. References: [Attention Is All You Need](https://arxiv.org/abs/1706.03762) [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) """ def __init__(self, num_attention_heads, inner_dim, inner_activation, output_range=None, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, use_bias=True, norm_first=False, norm_epsilon=1e-12, output_dropout=0.0, attention_dropout=0.0, inner_dropout=0.0, attention_initializer=None, attention_axes=None, use_query_residual=True, key_dim=None, value_dim=None, output_last_dim=None, diff_q_kv_att_layer_norm=False, return_attention_scores=False, **kwargs): """Initializes `TransformerEncoderBlock`. Note: If `output_last_dim` is used and `use_query_residual` is `True`, the `output_last_dim`'s value must equal the first input's last dimension for the query residual connection to work. This is because the residual connection after the multi-head-attention requires their dimensions to match. If `use_query_residual` is `False`, the `output_last_dim` dictactes the last dimension of the output of this module and the multi-head-attention. E.g. let's say input dims are `[batch_size, seq_dim, input_last_dim]`. Scenario 1: If `output_last_dim` is not `None`, then the output dims of this module would be `[batch_size, seq_dim, output_last_dim]`. Note `key_dim` is overriden by `output_last_dim`. Scenario 2: If `output_last_dim` is `None` and `key_dim` is not `None`, then the output dims of this module would be `[batch_size, seq_dim, key_dim]`. Scenario 3: If the `output_last_dim` and `key_dim` are both `None`, the output dims would be `[batch_size, seq_dim, input_last_dim]`. Args: num_attention_heads: Number of attention heads. inner_dim: The output dimension of the first Dense layer in a two-layer feedforward network. inner_activation: The activation for the first Dense layer in a two-layer feedforward network. output_range: the sequence output range, [0, output_range) for slicing the target sequence. `None` means the target sequence is not sliced. kernel_initializer: Initializer for dense layer kernels. bias_initializer: Initializer for dense layer biases. kernel_regularizer: Regularizer for dense layer kernels. bias_regularizer: Regularizer for dense layer biases. activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. use_bias: Whether to enable use_bias in attention layer. If set False, use_bias in attention layer is disabled. norm_first: Whether to normalize inputs to attention and intermediate dense layers. If set False, output of attention and intermediate dense layers is normalized. norm_epsilon: Epsilon value to initialize normalization layers. output_dropout: Dropout probability for the post-attention and output dropout. attention_dropout: Dropout probability for within the attention layer. inner_dropout: Dropout probability for the first Dense layer in a two-layer feedforward network. attention_initializer: Initializer for kernels of attention layers. If set `None`, attention layers use kernel_initializer as initializer for kernel. attention_axes: axes over which the attention is applied. `None` means attention over all axes, but batch, heads, and features. use_query_residual: Toggle to execute residual connection after attention. key_dim: `key_dim` for the `tf.keras.layers.MultiHeadAttention`. If `None`, we use the first `input_shape`'s last dim. value_dim: `value_dim` for the `tf.keras.layers.MultiHeadAttention`. output_last_dim: Final dimension of the output of this module. This also dictates the value for the final dimension of the multi-head-attention. When it's `None`, we use, in order of decreasing precedence, `key_dim` * `num_heads` or the first `input_shape`'s last dim as the output's last dim. diff_q_kv_att_layer_norm: If `True`, create a separate attention layer norm layer for query and key-value if `norm_first` is `True`. Invalid to set to `True` if `norm_first` is `False`. return_attention_scores: If `True`, the output of this layer will be a tuple and additionally contain the attention scores in the shape of `[batch_size, num_attention_heads, seq_dim, seq_dim]`. **kwargs: keyword arguments. """ util.filter_kwargs(kwargs) super().__init__(**kwargs) # Deprecation warning. if output_range is not None: logging.warning("`output_range` is available as an argument for `call()`." "The `output_range` as __init__ argument is deprecated.") self._num_heads = num_attention_heads self._inner_dim = inner_dim self._inner_activation = inner_activation self._attention_dropout_rate = attention_dropout self._output_dropout_rate = output_dropout self._output_range = output_range self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) self._bias_initializer = tf.keras.initializers.get(bias_initializer) self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer) self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) self._bias_constraint = tf.keras.constraints.get(bias_constraint) self._use_bias = use_bias self._norm_first = norm_first self._norm_epsilon = norm_epsilon self._inner_dropout = inner_dropout self._use_query_residual = use_query_residual self._key_dim = key_dim self._value_dim = value_dim self._output_last_dim = output_last_dim self._diff_q_kv_att_layer_norm = diff_q_kv_att_layer_norm self._return_attention_scores = return_attention_scores if attention_initializer: self._attention_initializer = tf.keras.initializers.get( attention_initializer) else: self._attention_initializer = tf_utils.clone_initializer( self._kernel_initializer) self._attention_axes = attention_axes if self._diff_q_kv_att_layer_norm and not self._norm_first: raise ValueError("Setting `diff_q_and_kv_attention_layer_norm` to True" "when `norm_first` is False is invalid.") def build(self, input_shape): if isinstance(input_shape, tf.TensorShape): input_tensor_shape = input_shape elif isinstance(input_shape, (list, tuple)): input_tensor_shape = tf.TensorShape(input_shape[0]) else: raise ValueError( "The type of input shape argument is not supported, got: %s" % type(input_shape)) einsum_equation = "abc,cd->abd" if len(input_tensor_shape.as_list()) > 3: einsum_equation = "...bc,cd->...bd" hidden_size = input_tensor_shape[-1] if hidden_size % self._num_heads != 0: logging.warning( "The input size (%d) is not a multiple of the number of attention " "heads (%d)", hidden_size, self._num_heads) if self._key_dim is None: self._key_dim = int(hidden_size // self._num_heads) if self._output_last_dim is None: last_output_shape = hidden_size else: last_output_shape = self._output_last_dim common_kwargs = dict( bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint) self._attention_layer = tf.keras.layers.MultiHeadAttention( num_heads=self._num_heads, key_dim=self._key_dim, value_dim=self._value_dim, dropout=self._attention_dropout_rate, use_bias=self._use_bias, kernel_initializer=self._attention_initializer, bias_initializer=tf_utils.clone_initializer(self._bias_initializer), attention_axes=self._attention_axes, output_shape=self._output_last_dim, name="self_attention", **common_kwargs) self._attention_dropout = tf.keras.layers.Dropout( rate=self._attention_dropout_rate) # Use float32 in layernorm for numeric stability. # It is probably safe in mixed_float16, but we haven't validated this yet. self._attention_layer_norm = ( tf.keras.layers.LayerNormalization( name="self_attention_layer_norm", axis=-1, epsilon=self._norm_epsilon, dtype=tf.float32)) self._attention_layer_norm_kv = self._attention_layer_norm if self._diff_q_kv_att_layer_norm: self._attention_layer_norm_kv = ( tf.keras.layers.LayerNormalization( name="self_attention_layer_norm_kv", axis=-1, epsilon=self._norm_epsilon, dtype=tf.float32)) self._intermediate_dense = tf.keras.layers.EinsumDense( einsum_equation, output_shape=(None, self._inner_dim), bias_axes="d", kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), bias_initializer=tf_utils.clone_initializer(self._bias_initializer), name="intermediate", **common_kwargs) policy = tf.keras.mixed_precision.global_policy() if policy.name == "mixed_bfloat16": # bfloat16 causes BERT with the LAMB optimizer to not converge # as well, so we use float32. # TODO(b/154538392): Investigate this. policy = tf.float32 self._intermediate_activation_layer = tf.keras.layers.Activation( self._inner_activation, dtype=policy) self._inner_dropout_layer = tf.keras.layers.Dropout( rate=self._inner_dropout) self._output_dense = tf.keras.layers.EinsumDense( einsum_equation, output_shape=(None, last_output_shape), bias_axes="d", name="output", kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), bias_initializer=tf_utils.clone_initializer(self._bias_initializer), **common_kwargs) self._output_dropout = tf.keras.layers.Dropout( rate=self._output_dropout_rate) # Use float32 in layernorm for numeric stability. self._output_layer_norm = tf.keras.layers.LayerNormalization( name="output_layer_norm", axis=-1, epsilon=self._norm_epsilon, dtype=tf.float32) super().build(input_shape) def get_config(self): config = { "num_attention_heads": self._num_heads, "inner_dim": self._inner_dim, "inner_activation": self._inner_activation, "output_dropout": self._output_dropout_rate, "attention_dropout": self._attention_dropout_rate, "output_range": self._output_range, "kernel_initializer": tf_utils.serialize_initializer( self._kernel_initializer, use_legacy_format=True ), "bias_initializer": tf_utils.serialize_initializer( self._bias_initializer, use_legacy_format=True ), "kernel_regularizer": tf_utils.serialize_regularizer( self._kernel_regularizer, use_legacy_format=True ), "bias_regularizer": tf_utils.serialize_regularizer( self._bias_regularizer, use_legacy_format=True ), "activity_regularizer": tf_utils.serialize_regularizer( self._activity_regularizer, use_legacy_format=True ), "kernel_constraint": tf_utils.serialize_constraint( self._kernel_constraint, use_legacy_format=True ), "bias_constraint": tf_utils.serialize_constraint( self._bias_constraint, use_legacy_format=True ), "use_bias": self._use_bias, "norm_first": self._norm_first, "norm_epsilon": self._norm_epsilon, "inner_dropout": self._inner_dropout, "attention_initializer": tf_utils.serialize_initializer( self._attention_initializer, use_legacy_format=True ), "attention_axes": self._attention_axes, "use_query_residual": self._use_query_residual, "key_dim": self._key_dim, "value_dim": self._value_dim, "output_last_dim": self._output_last_dim, "diff_q_kv_att_layer_norm": self._diff_q_kv_att_layer_norm, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: Any, output_range: Optional[tf.Tensor] = None) -> Any: """Transformer self-attention encoder block call. Args: inputs: a single tensor or a list of tensors. `input tensor` as the single sequence of embeddings. [`input tensor`, `attention mask`] to have the additional attention mask. [`query tensor`, `key value tensor`, `attention mask`] to have separate input streams for the query, and key/value to the multi-head attention. output_range: the sequence output range, [0, output_range) for slicing the target sequence. `None` means the target sequence is not sliced. If you would like to have no change to the model training, it is better to only set the `output_range` for serving. Returns: An output tensor with the same dimensions as input/query tensor. """ if isinstance(inputs, (list, tuple)): if len(inputs) == 2: input_tensor, attention_mask = inputs key_value = None elif len(inputs) == 3: input_tensor, key_value, attention_mask = inputs else: raise ValueError("Unexpected inputs to %s with length at %d" % (self.__class__, len(inputs))) else: input_tensor, key_value, attention_mask = (inputs, None, None) if output_range is None: output_range = self._output_range if output_range: if self._norm_first: source_tensor = input_tensor[:, 0:output_range, :] input_tensor = self._attention_layer_norm(input_tensor) if key_value is not None: key_value = self._attention_layer_norm_kv(key_value) target_tensor = input_tensor[:, 0:output_range, :] if attention_mask is not None: attention_mask = attention_mask[:, 0:output_range, :] else: if self._norm_first: source_tensor = input_tensor input_tensor = self._attention_layer_norm(input_tensor) if key_value is not None: key_value = self._attention_layer_norm_kv(key_value) target_tensor = input_tensor if key_value is None: key_value = input_tensor if self._return_attention_scores: attention_output, attention_scores = self._attention_layer( query=target_tensor, value=key_value, attention_mask=attention_mask, return_attention_scores=True) else: attention_output = self._attention_layer( query=target_tensor, value=key_value, attention_mask=attention_mask) attention_output = self._attention_dropout(attention_output) if self._norm_first: # Important to not combine `self._norm_first` and # `self._use_query_residual` into one if clause because else is only for # `_norm_first == False`. if self._use_query_residual: attention_output = source_tensor + attention_output else: if self._use_query_residual: attention_output = target_tensor + attention_output attention_output = self._attention_layer_norm(attention_output) if self._norm_first: source_attention_output = attention_output attention_output = self._output_layer_norm(attention_output) inner_output = self._intermediate_dense(attention_output) inner_output = self._intermediate_activation_layer(inner_output) inner_output = self._inner_dropout_layer(inner_output) layer_output = self._output_dense(inner_output) layer_output = self._output_dropout(layer_output) if self._norm_first: layer_output = source_attention_output + layer_output else: # During mixed precision training, layer norm output is always fp32 for # now. Casts fp32 for the subsequent add. layer_output = tf.cast(layer_output, tf.float32) layer_output = self._output_layer_norm(layer_output + attention_output) if self._return_attention_scores: return layer_output, attention_scores else: return layer_output
18,367
43.474576
80
py
models
models-master/official/nlp/modeling/layers/kernel_attention.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras-based kernel attention layer.""" import functools import math import tensorflow as tf from official.modeling import tf_utils _NUMERIC_STABLER = 1e-6 class KernelMask(tf.keras.layers.Layer): """Creates kernel attention mask. inputs: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. mask: a Tensor of shape [batch_size, from_seq_length] which indicates which part of the inputs we should not attend. Returns: float Tensor of shape [batch_size, from_seq_length] that KernelAttention takes as mask. """ def call(self, inputs, mask): mask = tf.cast(mask, inputs.dtype) return mask def pad_to_chunk_length(tensor, axis, chunk_length, padding=None): """Pads a tensor so that shape[axis] is divisible by chunk_length. Args: tensor: Input tensor to pad. axis: Axis to pad along. chunk_length: The output tensor will have shape[axis] divisible by chunk_length. padding: Pad the input tensor across the axis from either left or right if padding is set to "left" or "right"; applies no padding if padding is set to None. In the latter case, the axis dimension of the input tensor must be divisible by the chunk_length. Returns: Padded tensor with shape[axis] divisible by chunk_length. """ if padding is None: return tensor shape = tf.shape(tensor) rank = tf.rank(tensor) if axis < 0: axis += rank axis_length = shape[axis] pad_length = -axis_length % chunk_length if padding == "right": axis_paddings = [[0, pad_length]] elif padding == "left": axis_paddings = [[pad_length, 0]] else: raise ValueError( "Illegal padding value; must be one of \"left\", \"right\" or None.") paddings = tf.concat([ tf.zeros([axis, 2], dtype=tf.int32), axis_paddings, tf.zeros([rank - axis - 1, 2], dtype=tf.int32) ], axis=0) return tf.pad(tensor, paddings) def split_tensor_into_chunks(tensor, axis, chunk_length): """Reshape tensor along given axis using chunk_length. Args: tensor: Input tensor. axis: Reshape tensor along this axis. chunk_length: Split the axis into [axis/chunk_length, chunk_length] Returns: Reshaped tensor. """ shape = tf.shape(tensor) num_chunks = shape[axis] // chunk_length new_shape = tf.concat( [shape[:axis], [num_chunks, chunk_length], shape[(axis + 1):]], axis=0) return tf.reshape(tensor, new_shape) def rectangular_window_sum(tensor, window_length): """Summarizes tensor elements over a sliding rectangular window. Sums elements of the input tensor of shape [B, T', C', H, dim] across a rectangular window sliding along the dimension T'. Args: tensor: Tensor of shape `[B, T', C', H, dim]`. window_length: The length of the rectangular window. Returns: A tensor of shape [B, T', C', H, dim] containing sums over the window. """ tensor_cumsum = tf.cumsum(tensor, axis=-4) tensor_winsum = tensor_cumsum - tf.pad( tensor_cumsum, [[0, 0], [window_length, 0], [0, 0], [0, 0], [0, 0]])[:, :-window_length] return tensor_winsum def weighted_window_sum(tensor, window_length, window_weights): """Summarizes tensor elements over a sliding weighted window. Computes a weighted sum of elements of the input tensor of shape [B, T', C', H, dim] across a window sliding along the dimension T'. Args: tensor: Tensor of shape `[B, T', C', H, dim]`. window_length: The length of the window. window_weights: Tensor of shape [window_length] containing window weights. Returns: A tensor of shape [B, T', C', H, dim] containing sums over the window. """ # Flatten the last three dimensions of the [B, T', C', H, dim] shape # into a single channels dimension. tensor_shape = tf.shape(tensor) tensor_2d = tf.reshape(tensor, [tensor_shape[0], tensor_shape[1], 1, -1]) # Apply the same weights to all channels. conv_filter = tf.tile( tf.reshape(window_weights, [-1, 1, 1, 1]), multiples=[1, 1, tf.shape(tensor_2d)[-1], 1]) tensor_winsum_2d = tf.nn.depthwise_conv2d( tensor_2d, conv_filter, strides=[1, 1, 1, 1], padding=[[0, 0], [window_length - 1, 0], [0, 0], [0, 0]]) # Unflatten the channels dimension into the original shape. tensor_winsum = tf.reshape(tensor_winsum_2d, tensor_shape) return tensor_winsum def causal_windowed_performer_attention(query_matrix, key_matrix, value_matrix, chunk_length, window_length, window_decay=None, padding=None, cache=None): """Applies windowed causal kernel attention with query, key, value tensors. We partition the T-length input sequence into N chunks, each of chunk_length tokens (thus: T = N * chunk_length). Within each chunk, we apply bidirectional (non-causal) Performers’ implicit attention and we model relationships between different chunks using Performers’ causal attention. We consider windowed causal variant of performer, where the current chunk attends only to the window of window_length of the most recent chunks. Below is an example with T=9, chunk_length=3, window_length=2. In this example 1 indicates attention is computed between the pair while 0 indicates attention is not computed between the pairs: 111000000 111000000 111000000 111111000 111111000 111111000 000111111 000111111 000111111 User can ensure sequence_length is divisible by chunk_length or use padding="left"/"right" to pad the sequence length either at the left or right respectively and make it divisible by chunk_length. Args: query_matrix: Kernel query `Tensor` of shape `[B, T, H, dim]`. key_matrix: Kernel key `Tensor` of shape `[B, T, H, dim]`. value_matrix: Value `Tensor` of shape `[B, T, H, out_dim]`. chunk_length: Length of each chunk in tokens. window_length: Length of attention window in chunks. window_decay: Float window decay factor or `None`. If set, exponentially decay past attention window values by this factor before summation. padding: Pad the query, value and key input tensors across the axis from either left or right if padding is set to "left" or "right"; apply no padding if padding is set to None. In the latter case, the axis dimension of the query, value and key input tensors must be divisible by the chunk_length. cache: Cache to accumulate history in memory. Used at inferecne time (streaming, decoding) for causal attention. Returns: Window causal performer attention of shape `[B, T, H, out_dim]`. """ if cache is None: # Training old_shape = tf.shape(value_matrix) query_matrix = pad_to_chunk_length(query_matrix, -3, chunk_length, padding) key_matrix = pad_to_chunk_length(key_matrix, -3, chunk_length, padding) value_matrix = pad_to_chunk_length(value_matrix, -3, chunk_length, padding) new_shape = tf.shape(value_matrix) chunked_query_matrix = split_tensor_into_chunks( query_matrix, -3, chunk_length) # [-1, T//chunk_length, chunk_length, N, dim] chunked_key_matrix = split_tensor_into_chunks( key_matrix, -3, chunk_length) # [-1, T//chunk_length, chunk_length, N, dim] chunked_value_matrix = split_tensor_into_chunks( value_matrix, -3, chunk_length) # [-1, T//chunk_length, chunk_length, N, out_dim] kp_v = tf.einsum("BTCHD,BTCHO->BTHDO", chunked_key_matrix, chunked_value_matrix) k_sum = tf.math.reduce_sum(chunked_key_matrix, axis=-3, keepdims=True) if window_decay is None: kp_v_winsum = rectangular_window_sum(kp_v, window_length) k_winsum = rectangular_window_sum(k_sum, window_length) else: # Compute exponentially decaying weights. decaying_weights = tf.math.pow( tf.convert_to_tensor(window_decay, dtype=value_matrix.dtype), tf.range(window_length - 1, -1, delta=-1, dtype=value_matrix.dtype)) kp_v_winsum = weighted_window_sum(kp_v, window_length, decaying_weights) k_winsum = weighted_window_sum(k_sum, window_length, decaying_weights) numerator = tf.einsum( "BTCHD,BTHDO->BTCHO", chunked_query_matrix, kp_v_winsum) k_winsum = tf.squeeze(k_winsum, -3) denominator = tf.einsum("BTCHD,BTHD->BTCH", chunked_query_matrix, k_winsum) denominator = tf.expand_dims(denominator, -1) + _NUMERIC_STABLER attention = numerator / denominator attention = tf.reshape(attention, new_shape) start = tf.zeros([old_shape.shape[0]], dtype=old_shape.dtype) attention = tf.slice(attention, start, old_shape) # Queued window cache (drop instead of decay) not yet supported. else: # Streaming if window_decay is None or window_decay > 1.0 or window_decay < 0.0: raise ValueError("window_decay should be in (0.0, 1.0) and not None.") kv = window_decay * cache["kv"] + tf.einsum( "BTHD,BTHO->BHOD", key_matrix, value_matrix) cache["kv"] = kv k_sum = window_decay * cache["k_sum"] + tf.reduce_sum(key_matrix, axis=1) cache["k_sum"] = k_sum denominator = tf.einsum("BTHD,BHD->BTH", query_matrix, k_sum) # The below is equivalent to but converts to TF Lite better than: # tf.einsum("BTHD,BTH->BTHD", # query_matrix, 1.0 / (denominator + _NUMERIC_STABLER)) inverse_denominator = 1.0 / (denominator + _NUMERIC_STABLER) # Add another dimension to align for the broadcast multiplication. fused_query_denominator = query_matrix * tf.expand_dims(inverse_denominator, -1) attention = tf.einsum("BTHD,BHOD->BTHO", fused_query_denominator, kv) return attention def create_projection_matrix(m, d, seed=None): r"""Constructs the matrix of random projections. Constructs a matrix of random orthogonal projections. Each projection vector has direction chosen uniformly at random length taken from the \chi(d) distribution.). Args: m: number of random projections. d: dimensionality of each random projection. seed: random seed used to construct projections. If not, we use the stateful api. Returns: The matrix of random projections of the shape [m, d]. """ nb_full_blocks = math.ceil(m / d) block_list = tf.TensorArray( tf.float32, size=tf.cast(nb_full_blocks, dtype=tf.int32)) stateful = False if seed is None: stateful = True # dummy seed to make sure the graph compiles though the path is not taken. seed = tf.constant([0, 1]) current_seed = seed for i in range(nb_full_blocks): if stateful: unstructured_block = tf.random.normal((d, d)) else: unstructured_block = tf.random.stateless_normal((d, d), seed=current_seed) current_seed = tf.random.stateless_uniform([2], seed=current_seed, minval=None, dtype=tf.int32) q, _ = tf.linalg.qr(unstructured_block) q = tf.transpose(q) block_list = block_list.write(i, q) final_matrix = block_list.concat()[:m] if stateful is None: multiplier = tf.norm(tf.random.normal((m, d)), axis=1) else: multiplier = tf.norm( tf.random.stateless_normal((m, d), seed=current_seed), axis=1) return tf.linalg.matmul(tf.linalg.diag(multiplier), final_matrix) def _generalized_kernel(x, y, is_query, projection_matrix, f, h): """Generalized kernel in RETHINKING ATTENTION WITH PERFORMERS. Args: x: The feature being transformed with shape [B, T, N ,H]. y: The extra stats-tensor of shape [B, T, N ,H]. is_query: True if x is a query-tensor. projection_matrix: The matrix with shape [M, H] that we projecct x to, where M is the number of projections. f: A non-linear function applied on x or projected x. h: A muliplier which is a function of x applied after projected and transformed. Only applied if projection_matrix is not None. Returns: Transformed feature. """ del y del is_query if projection_matrix is None: return h(x) * f(x) else: x_projected = tf.einsum("BTNH,MH->BTNM", x, projection_matrix) return h(x) * f(x_projected) / tf.math.sqrt( tf.cast(tf.shape(projection_matrix)[0], tf.float32)) def expplus(data_orig, other_data, is_query, projection_matrix=None, numerical_stabilizer=0.000001, normalize_data=True, numerical_renormalizer=True, extra_renormalize_exp_fun=False): """FAVOR++ mechanism from the CRT paper: https://arxiv.org/abs/2205.15317 . Args: data_orig: data tensor of shape [B,T,H,D] for which random features aree to be computed other_data: additional tensor of the shape [B,F,H,D] used to collect stats to determine the exact instantiation of the random feature mechanism is_query: boolean indicating whether <data_orig> tensor is a query tensor projection_matrix: tensor of the shape [M,D] encoding random projections for random features (M stands for the number of random features) numerical_stabilizer: numerical stabilizer for the kernel features normalize_data: whether to sqrt-d-normalize queries/keys as in the regular attention numerical_renormalizer: whether to apply additional renormalization for numerical stability extra_renormalize_exp_fun: extra renormalizer for the exponential mapping applied to construct random features Returns: Random feature map tensor for the unbiased softmax-kernel estimation. """ data = data_orig if projection_matrix is None: return data_orig projection_matrix = tf.cast(projection_matrix, data.dtype) if normalize_data: data_normalizer = 1.0 / tf.math.sqrt( (tf.math.sqrt(tf.dtypes.cast(data.shape[-1], data.dtype)))) else: data_normalizer = 1.0 lengths = tf.math.square(data) lengths = tf.reduce_sum(lengths, axis=tf.keras.backend.ndim(data) - 1) lengths = tf.expand_dims(lengths, axis=tf.keras.backend.ndim(data) - 1) lengths = tf.math.sqrt(lengths) data /= lengths ratio = 1.0 / tf.math.sqrt( tf.dtypes.cast(projection_matrix.shape[0], data.dtype)) data_dash = tf.einsum("blhd,md->blhm", data_normalizer * data, projection_matrix) diag_data = tf.math.square(data) diag_data = tf.math.reduce_sum( diag_data, axis=tf.keras.backend.ndim(data) - 1) diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer diag_data = tf.expand_dims(diag_data, axis=tf.keras.backend.ndim(data) - 1) # Calculating coefficients A, B of the FAVOR++ mechanism: _, l, _, _ = tf_utils.get_shape_list(data_orig) l = tf.cast(l, dtype=tf.float32) first_sum_of_squares = tf.math.square(data) first_sum_of_squares = tf.math.reduce_sum( first_sum_of_squares, axis=(1, -1), keepdims=True) first_sum_of_squares *= (data_normalizer * data_normalizer) first_sum_of_squares /= l # data.shape[1] second_sum_of_squares = tf.math.square(other_data) second_sum_of_squares = tf.math.reduce_sum( second_sum_of_squares, axis=(1, -1), keepdims=True) second_sum_of_squares *= (data_normalizer * data_normalizer) second_sum_of_squares /= l # other_data.shape[1] data_sum = tf.math.reduce_sum(data, axis=(1,), keepdims=True) other_data_sum = tf.math.reduce_sum(other_data, axis=(1,), keepdims=True) d_prod = tf.einsum("blhd,blhd->blh", data_sum, other_data_sum) d_prod = tf.expand_dims(d_prod, axis=-1) d_prod *= (data_normalizer * data_normalizer) d_prod *= (2.0 / (l * l)) ave = first_sum_of_squares + second_sum_of_squares + d_prod dim = projection_matrix.shape[-1] a_coeff = (1.0 / (4.0 * ave)) * ( tf.math.sqrt((2.0 * ave + dim) * (2.0 * ave + dim) + 8.0 * dim * ave) - 2.0 * ave - dim) a_coeff = (1.0 - 1.0 / a_coeff) / 8.0 b_coeff = tf.math.sqrt(1.0 - 4.0 * a_coeff) d_coeff = tf.math.pow(1.0 - 4.0 * a_coeff, dim / 4.0) a_coeff = tf.stop_gradient(a_coeff) b_coeff = tf.stop_gradient(b_coeff) d_coeff = tf.stop_gradient(d_coeff) # Calculating diag_omega for the FAVOR++ mechanism: diag_omega = tf.math.square(projection_matrix) diag_omega = tf.math.reduce_sum( diag_omega, axis=tf.keras.backend.ndim(projection_matrix) - 1) diag_omega = tf.expand_dims(diag_omega, axis=0) diag_omega = tf.expand_dims(diag_omega, axis=0) diag_omega = tf.expand_dims(diag_omega, axis=0) diag_omega = a_coeff * diag_omega if numerical_renormalizer: if is_query: last_dims_t = (len(data_dash.shape) - 1,) stab = b_coeff * tf.math.reduce_max( data_dash, axis=last_dims_t, keepdims=True) else: stab = b_coeff * tf.math.reduce_max(data_dash, keepdims=True) if extra_renormalize_exp_fun: extra_stab = tf.reduce_max(diag_data, axis=1, keepdims=True) stab = tf.math.maximum(stab, extra_stab) data_dash = ratio * d_coeff * ( tf.math.exp(b_coeff * data_dash - stab - diag_data + diag_omega) + numerical_stabilizer) else: data_dash = ratio * d_coeff * ( tf.math.exp(b_coeff * data_dash - diag_data + diag_omega) + numerical_stabilizer) return data_dash # pylint: disable=g-long-lambda _CAUSAL_SUPPORT_TRANSFORM_MAP = { "elu": functools.partial( _generalized_kernel, f=lambda x: tf.keras.activations.elu(x) + 1, h=lambda x: 1), "relu": functools.partial( _generalized_kernel, # Improve numerical stability and avoid NaNs in some cases by adding # a tiny epsilon. f=lambda x: tf.keras.activations.relu(x) + 1e-3, h=lambda x: 1), "square": functools.partial(_generalized_kernel, f=tf.math.square, h=lambda x: 1), "exp": functools.partial( _generalized_kernel, # Avoid exp explosion by shifting. f=lambda x: tf.math.exp(x - tf.math.reduce_max( x, axis=[1, 2, 3], keepdims=True)), h=lambda x: tf.math.exp(-0.5 * tf.math.reduce_sum( tf.math.square(x), axis=-1, keepdims=True)), ), "expmod": functools.partial( _generalized_kernel, # Avoid exp explosion by shifting. f=lambda x: tf.math.exp(x - tf.math.reduce_max( x, axis=[1, 2, 3], keepdims=True)), h=lambda x: tf.math.exp(-0.5 * tf.math.sqrt( tf.cast(tf.shape(x)[-1], tf.float32))), ), "identity": functools.partial(_generalized_kernel, f=lambda x: x, h=lambda x: 1) } _NON_CAUSAL_SUPPORT_TRANSFORM_MAP = { "expplus": expplus, } _TRANSFORM_MAP = { **_CAUSAL_SUPPORT_TRANSFORM_MAP, **_NON_CAUSAL_SUPPORT_TRANSFORM_MAP } # pylint: enable=g-long-lambda class KernelAttention(tf.keras.layers.MultiHeadAttention): """A variant of efficient transformers which replaces softmax with kernels. This module combines ideas from the two following papers: Rethinking Attention with Performers (https://arxiv.org/abs/2009.14794) - exp (Lemma 1, positive), relu - random/deterministic projection Chefs' Random Tables: Non-Trigonometric Random Features (https://arxiv.org/abs/2205.15317) - expplus (OPRF mechanism) Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention (https://arxiv.org/abs/2006.16236) - elu with the theory of approximating angular Performer kernels from go/performer. The module enables computing efficient attention in both: long sequence and shorter sequence regimes. In the former setting, the attention matrix is never explicitly computed and instead its low-rank decomposition obtained with given kernel feature maps is leveraged to conduct attention module calculations (see: https://arxiv.org/abs/2006.16236). In the latter setting, attention matrix is constructed, but kernel features providing dimensionality reduction are applied, resulting in more efficient computation of the attention matrix. """ def __init__(self, feature_transform="exp", num_random_features=256, seed=0, redraw=False, is_short_seq=False, begin_kernel=0, scale=None, scale_by_length=False, use_causal_windowed=False, causal_chunk_length=1, causal_window_length=3, causal_window_decay=None, causal_padding=None, **kwargs): r"""Constructor of KernelAttention. Args: feature_transform: A non-linear transform of the keys and queries. Possible transforms are "elu", "relu", "square", "exp", "expplus", "expmod", "identity". num_random_features: Number of random features to be used for projection. if num_random_features <= 0, no production is used before transform. seed: The seed to begin drawing random features. Once the seed is set, the psedo number generation is determinisitc. Users should pass different seed for different layers. For multi-worker, each layer will use the same projection at each step. redraw: Whether to redraw projection every forward pass during training. The argument is only effective when num_random_features > 0. is_short_seq: boolean predicate indicating whether input data consists of very short sequences or not; in most cases this should be False (default option). begin_kernel: Apply kernel_attention after this sequence id and apply softmax attention before this. scale: The value to scale the dot product as described in `Attention Is All You Need`. If None, we use 1/sqrt(dk) as described in the paper. scale_by_length: boolean predicate indicating whether additionally scale the dot product based on key length. Set as log_512^(n) to stablize attention entropy against length. Refer to https://kexue.fm/archives/8823 for details. use_causal_windowed: If true perform windowed causal attention. See causal_windowed_performer_attention function docstring for more details. causal_chunk_length: Length of each chunk in tokens. causal_window_length: Length of attention window in chunks. causal_window_decay: Float window decay factor or `None`. If set, exponentially decay past attention window values by this factor before summation. causal_padding: Pad the query, value and key input tensors across the axis from either left or right if padding is set to "left" or "right"; apply no padding if padding is set to None. In the latter case, the axis dimension of the query, value and key input tensors must be divisible by the chunk_length. **kwargs: The same arguments `MultiHeadAttention` layer. """ if feature_transform not in _TRANSFORM_MAP: raise ValueError("Unsupported feature_transform. The supported " "feature_transform are %s. " "Got '%s'." % (_TRANSFORM_MAP.keys(), feature_transform)) if num_random_features <= 0 and redraw: raise ValueError( "There is nothing to redraw when num_random_features <= 0.") self._feature_transform = feature_transform self._num_random_features = num_random_features self._redraw = redraw self._is_short_seq = is_short_seq self._begin_kernel = begin_kernel self._scale_by_length = scale_by_length # We use the seed for two scenarios: # 1. inference # 2. no redraw self._seed = seed super().__init__(**kwargs) if scale is None: self._scale = 1.0 / math.sqrt(float(self._key_dim)) else: self._scale = scale self._projection_matrix = None if num_random_features > 0: self._projection_matrix = create_projection_matrix( self._num_random_features, self._key_dim, tf.constant([self._seed, self._seed + 1])) self.use_causal_windowed = use_causal_windowed self.causal_chunk_length = causal_chunk_length self.causal_window_length = causal_window_length self.causal_window_decay = causal_window_decay self.causal_padding = causal_padding if self.use_causal_windowed and self._is_short_seq: raise ValueError( "use_causal_windowed and short_seq methods are mutually exclusive") def _compute_attention(self, query, key, value, feature_transform, is_short_seq, attention_mask=None, cache=None, training=False, numeric_stabler=_NUMERIC_STABLER): """Applies kernel attention with query, key, value tensors. This function defines the computation inside `call` with projected multi-head Q, K, V inputs. Users can override this function for customized attention implementation. Args: query: Projected query `Tensor` of shape `[B, T, N, key_dim]`. key: Projected key `Tensor` of shape `[B, S, N, key_dim]`. value: Projected value `Tensor` of shape `[B, S, N, value_dim]`. feature_transform: A non-linear transform of the keys and quries. is_short_seq: boolean predicate indicating whether input data consists of short or long sequences; usually short sequence is defined as having length L <= 1024. attention_mask: a boolean mask of shape `[B, S]`, that prevents attenting to masked positions. Note that the mask is only appied to the keys. User may want to mask the output if query contains pads. cache: Cache to accumulate history in memory. Used at inferecne time (streaming, decoding) for causal attention. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). numeric_stabler: A scalar value added to avoid divide by 0. Returns: attention_output: Multi-headed outputs of attention computation. """ projection_matrix = None if self._num_random_features > 0: if self._redraw and training: projection_matrix = create_projection_matrix(self._num_random_features, self._key_dim) else: projection_matrix = self._projection_matrix if self._scale_by_length: scale = tf.math.log(tf.reduce_sum(attention_mask, axis=-1)) * self._scale / math.log(512) scale = tf.reshape(scale, [-1, 1, 1, 1]) else: scale = self._scale if is_short_seq: # Note: Applying scalar multiply at the smaller end of einsum improves # XLA performance, but may introduce slight numeric differences in # the Transformer attention head. query = query * scale else: # Note: we suspect spliting the scale to key, query yields smaller # approximation variance when random projection is used. # For simplicity, we also split when there's no random projection. key *= tf.math.sqrt(scale) query *= tf.math.sqrt(scale) key_prime = _TRANSFORM_MAP[feature_transform](key, query, False, projection_matrix) query_prime = _TRANSFORM_MAP[feature_transform](query, key, True, projection_matrix) if attention_mask is not None: key_prime = tf.einsum("BSNH,BS->BSNH", key_prime, attention_mask) if is_short_seq: attention_scores = tf.einsum("BTNH,BSNH->BTSN", query_prime, key_prime) attention_scores = tf.nn.softmax(attention_scores, axis=2) attention_output = tf.einsum("BTSN,BSNH->BTNH", attention_scores, value) elif self.use_causal_windowed: attention_output = causal_windowed_performer_attention( query_prime, key_prime, value, chunk_length=self.causal_chunk_length, window_length=self.causal_window_length, window_decay=self.causal_window_decay, padding=self.causal_padding, cache=cache) else: kv = tf.einsum("BSNH,BSND->BNDH", key_prime, value) denominator = 1.0 / ( tf.einsum("BTNH,BNH->BTN", query_prime, tf.reduce_sum(key_prime, axis=1)) + _NUMERIC_STABLER) attention_output = tf.einsum("BTNH,BNDH,BTN->BTND", query_prime, kv, denominator) return attention_output def _build_from_signature(self, query, value, key=None): super()._build_from_signature(query=query, value=value, key=key) # pytype: disable=attribute-error # typed-keras if self._begin_kernel > 0: common_kwargs = dict( kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint) self._output_dense_softmax = self._make_output_dense( self._query_shape.rank - 1, common_kwargs, name="attention_output_softmax") self._dropout_softmax = tf.keras.layers.Dropout(rate=self._dropout) def call(self, query, value, key=None, attention_mask=None, cache=None, training=False): """Compute attention with kernel mechanism. Args: query: Query `Tensor` of shape `[B, T, dim]`. value: Value `Tensor` of shape `[B, S, dim]`. key: Optional key `Tensor` of shape `[B, S, dim]`. If not given, will use `value` for both `key` and `value`, which is the most common case. attention_mask: a boolean mask of shape `[B, S]`, that prevents attenting to masked positions. Note that the mask is only appied to the keys. User may want to mask the output if query contains pads. cache: Cache to accumulate history in memory. Used at inferecne time (streaming, decoding) for causal attention. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Returns: Multi-headed outputs of attention computation. """ if cache is not None: if training: raise ValueError( "Cache is not supported when training is True.") if not self.use_causal_windowed: raise ValueError( "Cache is not supported for non use_causal_windowed case.") if self._begin_kernel: raise ValueError( "Cache is not supported when begin_kernel is set since the bahvior " "is too complicated.") if self._feature_transform in _NON_CAUSAL_SUPPORT_TRANSFORM_MAP: raise ValueError("Cache is not supported for feature_transform %s" % (self._feature_transform)) if not self._built_from_signature: self._build_from_signature(query=query, value=value, key=key) if key is None: key = value # N = `num_attention_heads` # H = `size_per_head` # `query` = [B, T, N ,H] query = self._query_dense(query) # `key` = [B, S, N, H] key = self._key_dense(key) # `value` = [B, S, N, D] value = self._value_dense(value) if self._begin_kernel > 0: attention_output_softmax = self._compute_attention( query[:, :self._begin_kernel], key, value, "identity", True, attention_mask, training) attention_output_softmax = self._dropout_softmax(attention_output_softmax) attention_output_softmax = self._output_dense_softmax( attention_output_softmax) attention_output_kernel = self._compute_attention( query[:, self._begin_kernel:], key, value, self._feature_transform, self._is_short_seq, attention_mask, training) attention_output_kernel = self._dropout_layer(attention_output_kernel) attention_output_kernel = self._output_dense(attention_output_kernel) attention_output = tf.concat( [attention_output_softmax, attention_output_kernel], axis=1) else: attention_output = self._compute_attention(query, key, value, self._feature_transform, self._is_short_seq, attention_mask, cache, training) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_output = self._dropout_layer(attention_output) attention_output = self._output_dense(attention_output) return attention_output def get_config(self): config = { "feature_transform": self._feature_transform, "num_random_features": self._num_random_features, "seed": self._seed, "redraw": self._redraw, "is_short_seq": self._is_short_seq, "begin_kernel": self._begin_kernel, "scale": self._scale, "scale_by_length": self._scale_by_length, "use_causal_windowed": self.use_causal_windowed, "causal_chunk_length": self.causal_chunk_length, "causal_window_length": self.causal_window_length, "causal_window_decay": self.causal_window_decay, "causal_padding": self.causal_padding, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
34,871
40.317536
118
py
models
models-master/official/nlp/modeling/layers/position_embedding.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras-based positional embedding layer.""" # pylint: disable=g-classes-have-attributes import math from typing import Optional import tensorflow as tf from official.modeling import tf_utils Initializer = tf.keras.initializers.Initializer @tf.keras.utils.register_keras_serializable(package="Text") class PositionEmbedding(tf.keras.layers.Layer): """Creates a positional embedding. Example: ```python position_embedding = PositionEmbedding(max_length=100) inputs = tf.keras.Input((100, 32), dtype=tf.float32) outputs = position_embedding(inputs) ``` Args: max_length: The maximum size of the dynamic sequence. initializer: The initializer to use for the embedding weights. Defaults to "glorot_uniform". seq_axis: The axis of the input tensor where we add the embeddings. Reference: This layer creates a positional embedding as described in [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805). """ def __init__(self, max_length, initializer="glorot_uniform", seq_axis=1, **kwargs): super().__init__(**kwargs) if max_length is None: raise ValueError( "`max_length` must be an Integer, not `None`." ) self._max_length = max_length self._initializer = tf.keras.initializers.get(initializer) self._seq_axis = seq_axis def get_config(self): config = { "max_length": self._max_length, "initializer": tf.keras.initializers.serialize(self._initializer), "seq_axis": self._seq_axis, } base_config = super(PositionEmbedding, self).get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape): dimension_list = input_shape width = dimension_list[-1] weight_sequence_length = self._max_length self._position_embeddings = self.add_weight( "embeddings", shape=[weight_sequence_length, width], initializer=self._initializer) super().build(input_shape) def call(self, inputs): input_shape = tf.shape(inputs) actual_seq_len = input_shape[self._seq_axis] position_embeddings = self._position_embeddings[:actual_seq_len, :] new_shape = [1 for _ in inputs.get_shape().as_list()] new_shape[self._seq_axis] = actual_seq_len new_shape[-1] = position_embeddings.get_shape().as_list()[-1] position_embeddings = tf.reshape(position_embeddings, new_shape) return tf.broadcast_to(position_embeddings, input_shape) @tf.keras.utils.register_keras_serializable(package="Text") class RelativePositionEmbedding(tf.keras.layers.Layer): """Creates a positional embedding. This layer calculates the position encoding as a mix of sine and cosine functions with geometrically increasing wavelengths. Defined and formulized in "Attention is All You Need", section 3.5. (https://arxiv.org/abs/1706.03762). Args: hidden_size: Size of the hidden layer. min_timescale: Minimum scale that will be applied at each position max_timescale: Maximum scale that will be applied at each position. """ def __init__(self, hidden_size: int, min_timescale: float = 1.0, max_timescale: float = 1.0e4, **kwargs): # We need to have a default dtype of float32, since the inputs (which Keras # usually uses to infer the dtype) will always be int32. # We compute the positional encoding in float32 even if the model uses # float16, as many of the ops used, like log and exp, are numerically # unstable in float16. if "dtype" not in kwargs: kwargs["dtype"] = "float32" super().__init__(**kwargs) self._hidden_size = hidden_size self._min_timescale = min_timescale self._max_timescale = max_timescale def get_config(self): config = { "hidden_size": self._hidden_size, "min_timescale": self._min_timescale, "max_timescale": self._max_timescale, } base_config = super(RelativePositionEmbedding, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs, length=None): """Implements call() for the layer. Args: inputs: An tensor whose second dimension will be used as `length`. If `None`, the other `length` argument must be specified. length: An optional integer specifying the number of positions. If both `inputs` and `length` are spcified, `length` must be equal to the second dimension of `inputs`. Returns: A tensor in shape of `(length, hidden_size)`. """ if inputs is None and length is None: raise ValueError("If inputs is None, `length` must be set in " "RelativePositionEmbedding().") if inputs is not None: input_shape = tf_utils.get_shape_list(inputs) if length is not None and length != input_shape[1]: raise ValueError( "If inputs is not None, `length` must equal to input_shape[1].") length = input_shape[1] position = tf.cast(tf.range(length), tf.float32) num_timescales = self._hidden_size // 2 min_timescale, max_timescale = self._min_timescale, self._max_timescale log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.cast(num_timescales, tf.float32) - 1)) inv_timescales = min_timescale * tf.exp( tf.cast(tf.range(num_timescales), tf.float32) * -log_timescale_increment) scaled_time = tf.expand_dims(position, 1) * tf.expand_dims( inv_timescales, 0) position_embeddings = tf.concat( [tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) return position_embeddings def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If `bidirectional=False`, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on. Args: relative_position: An int32 Tensor bidirectional: A boolean - whether the attention is bidirectional num_buckets: An integer max_distance: An integer Returns: A Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ ret = 0 n = -relative_position if bidirectional: num_buckets //= 2 ret += tf.cast(tf.math.less(n, 0), tf.int32) * num_buckets n = tf.math.abs(n) else: n = tf.math.maximum(n, 0) # now n is in the range [0, inf) max_exact = num_buckets // 2 is_small = tf.math.less(n, max_exact) val_if_large = max_exact + tf.dtypes.cast( tf.math.log(tf.cast(n, tf.float32) / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact), tf.int32, ) val_if_large = tf.math.minimum(val_if_large, num_buckets - 1) ret += tf.where(is_small, n, val_if_large) return ret @tf.keras.utils.register_keras_serializable(package="Text") class RelativePositionBias(tf.keras.layers.Layer): """Relative position embedding via per-head bias in T5 style. Reference implementation in MeshTF: https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L1000 This layer implements the relative position bias used in "Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer" (https://arxiv.org/abs/1910.10683) """ def __init__(self, num_heads: int, relative_attention_num_buckets: int = 32, relative_attention_max_distance: int = 128, bidirectional: bool = True, embeddings_initializer: Optional[Initializer] = None, **kwargs): super().__init__(**kwargs) self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.bidirectional = bidirectional self.relative_attention_max_distance = relative_attention_max_distance if embeddings_initializer: self._embed_init = embeddings_initializer else: self._embed_init = tf.keras.initializers.TruncatedNormal(stddev=1.0) with tf.name_scope(self.name): self._relative_attention_bias = self.add_weight( "rel_embedding", shape=[self.relative_attention_num_buckets, self.num_heads], initializer=self._embed_init, dtype=self.dtype, trainable=True) def get_config(self): config = { "num_heads": self.num_heads, "relative_attention_num_buckets": self.relative_attention_num_buckets, "relative_attention_max_distance": self.relative_attention_max_distance, "bidirectional": self.bidirectional, "embeddings_initializer": tf.keras.initializers.serialize(self._embed_init), } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, query: tf.Tensor, key: tf.Tensor): """Implements the forward pass. Args: query: query input tensor shape [batch, query length, hidden size]. key: key input tensor shape [batch, key length, hidden size]. Returns: A tensor in shape of [batch, heads, query length, key length]. """ batch_size, qlen = tf_utils.get_shape_list(query)[:2] klen = tf_utils.get_shape_list(key)[1] context_position = tf.range(qlen)[:, None] memory_position = tf.range(klen)[None, :] relative_position = memory_position - context_position rp_bucket = _relative_position_bucket( relative_position, bidirectional=self.bidirectional, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance) values = tf.nn.embedding_lookup(self._relative_attention_bias, rp_bucket) values = tf.expand_dims( tf.transpose(values, [2, 0, 1]), axis=0) # shape (1, num_heads, qlen, klen) values = tf.tile(values, [batch_size, 1, 1, 1]) return values
11,337
35.811688
104
py
models
models-master/official/nlp/modeling/layers/talking_heads_attention_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the attention layer.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.modeling.layers import talking_heads_attention # This test is revised base on attention.MultiHeadAttentionTest. class TalkingHeadsAttentionTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters( ("key_value_same_proj", None, None, [40, 80]), ("key_value_different_proj", 32, 60, [40, 60]), ) def test_non_masked_attention(self, value_dim, output_shape, output_dims): """Test that the attention layer can be created without a mask tensor.""" test_layer = talking_heads_attention.TalkingHeadsAttention( num_heads=12, key_dim=64, value_dim=value_dim, output_shape=output_shape) # Create a 3-dimensional input (the first dimension is implicit). query = tf.keras.Input(shape=(40, 80)) value = tf.keras.Input(shape=(20, 80)) output = test_layer(query=query, value=value) self.assertEqual(output.shape.as_list(), [None] + output_dims) def test_non_masked_self_attention(self): """Test with one input (self-attenntion) and no mask tensor.""" test_layer = talking_heads_attention.TalkingHeadsAttention( num_heads=12, key_dim=64) # Create a 3-dimensional input (the first dimension is implicit). query = tf.keras.Input(shape=(40, 80)) output = test_layer(query=query, value=query) self.assertEqual(output.shape.as_list(), [None, 40, 80]) def test_attention_scores(self): """Test attention outputs with coefficients.""" test_layer = talking_heads_attention.TalkingHeadsAttention( num_heads=12, key_dim=64) # Create a 3-dimensional input (the first dimension is implicit). query = tf.keras.Input(shape=(40, 80)) output, coef = test_layer(query=query, value=query, return_attention_scores=True) self.assertEqual(output.shape.as_list(), [None, 40, 80]) self.assertEqual(coef.shape.as_list(), [None, 12, 40, 40]) @parameterized.named_parameters(("with_bias", True), ("no_bias", False)) def test_masked_attention(self, use_bias): """Test with a mask tensor.""" test_layer = talking_heads_attention.TalkingHeadsAttention( num_heads=12, key_dim=2, use_bias=use_bias) # Create a 3-dimensional input (the first dimension is implicit). batch_size = 3 query = tf.keras.Input(shape=(4, 8)) value = tf.keras.Input(shape=(2, 8)) mask_tensor = tf.keras.Input(shape=(4, 2)) output = test_layer(query=query, value=value, attention_mask=mask_tensor) # Create a model containing the test layer. model = tf.keras.Model([query, value, mask_tensor], output) # Generate data for the input (non-mask) tensors. from_data = 10 * np.random.random_sample((batch_size, 4, 8)) to_data = 10 * np.random.random_sample((batch_size, 2, 8)) # Invoke the data with a random set of mask data. This should mask at least # one element. mask_data = np.random.randint(2, size=(batch_size, 4, 2)) masked_output_data = model.predict([from_data, to_data, mask_data]) # Invoke the same data, but with a null mask (where no elements are masked). null_mask_data = np.ones((batch_size, 4, 2)) unmasked_output_data = model.predict([from_data, to_data, null_mask_data]) # Because one data is masked and one is not, the outputs should not be the # same. self.assertNotAllClose(masked_output_data, unmasked_output_data) # Tests the layer with three inputs: Q, K, V. key = tf.keras.Input(shape=(2, 8)) output = test_layer( query=query, value=value, key=key, attention_mask=mask_tensor) model = tf.keras.Model([query, value, key, mask_tensor], output) masked_output_data = model.predict([from_data, to_data, to_data, mask_data]) unmasked_output_data = model.predict( [from_data, to_data, to_data, null_mask_data]) # Because one data is masked and one is not, the outputs should not be the # same. self.assertNotAllClose(masked_output_data, unmasked_output_data) if use_bias: self.assertLen(test_layer._query_dense.trainable_variables, 2) self.assertLen(test_layer._output_dense.trainable_variables, 2) else: self.assertLen(test_layer._query_dense.trainable_variables, 1) self.assertLen(test_layer._output_dense.trainable_variables, 1) def test_initializer(self): """Test with a specified initializer.""" test_layer = talking_heads_attention.TalkingHeadsAttention( num_heads=12, key_dim=64, kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) # Create a 3-dimensional input (the first dimension is implicit). query = tf.keras.Input(shape=(40, 80)) output = test_layer(query=query, value=query) self.assertEqual(output.shape.as_list(), [None, 40, 80]) @parameterized.named_parameters( ("4d_inputs_one_free_batch", [3, 4], [3, 2], [4, 2], (2,)), ("4D_inputs_2D_attention", [3, 4], [3, 2], [3, 4, 3, 2], (1, 2)), ("5D_inputs_2D_attention", [5, 3, 4], [5, 3, 2], [3, 4, 3, 2], (2, 3))) def test_high_dim_attention(self, q_dims, v_dims, mask_dims, attention_axes): """Test with a mask tensor.""" test_layer = talking_heads_attention.TalkingHeadsAttention( num_heads=12, key_dim=2, attention_axes=attention_axes) batch_size, hidden_size = 3, 8 # Generate data for the input (non-mask) tensors. query_shape = [batch_size] + q_dims + [hidden_size] value_shape = [batch_size] + v_dims + [hidden_size] mask_shape = [batch_size] + mask_dims query = 10 * np.random.random_sample(query_shape) value = 10 * np.random.random_sample(value_shape) # Invoke the data with a random set of mask data. This should mask at least # one element. mask_data = np.random.randint(2, size=mask_shape).astype("bool") output = test_layer(query=query, value=value, attention_mask=mask_data) # Invoke the same data, but with a null mask (where no elements are masked). null_mask_data = np.ones(mask_shape) unmasked_output = test_layer( query=query, value=value, attention_mask=null_mask_data) # Because one data is masked and one is not, the outputs should not be the # same. self.assertNotAllClose(output, unmasked_output) if __name__ == "__main__": tf.test.main()
7,012
43.106918
80
py
models
models-master/official/nlp/modeling/layers/rezero_transformer.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras-based rezero-transformer block layer (Transformer with ReZero).""" # pylint: disable=g-classes-have-attributes from typing import Optional from absl import logging import gin import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling.layers import util @tf.keras.utils.register_keras_serializable(package="Text") @gin.configurable class ReZeroTransformer(tf.keras.layers.Layer): """Transformer layer with ReZero. This layer implements the Transformer from "Attention Is All You Need". (https://arxiv.org/abs/1706.03762). The residual connection implements the ReZero method. (https://arxiv.org/abs/2003.04887) Args: num_attention_heads: Number of attention heads. inner_dim: The output dimension of the first Dense layer in a two-layer feedforward network. inner_activation: The activation for the first Dense layer in a two-layer feedforward network. dropout_rate: Dropout probability for the post-attention and output dropout. attention_dropout_rate: Dropout probability for within the attention layer. output_range: the sequence output range, [0, output_range) by slicing the target sequence. `None` means the target sequence is not sliced. kernel_initializer: Initializer for dense layer kernels. bias_initializer: Initializer for dense layer biases. kernel_regularizer: Regularizer for dense layer kernels. bias_regularizer: Regularizer for dense layer biases. activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. use_layer_norm: If add layer_norm on top of the ReZero. share_rezero: If attention layer and FFN layer share the same alpha. """ def __init__(self, num_attention_heads, inner_dim=768, inner_activation=tf_utils.get_activation("gelu"), dropout_rate=0.0, attention_dropout_rate=0.0, output_range=None, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, use_layer_norm=False, share_rezero=True, **kwargs): # attention_dropout will override attention_dropout_rate. # This is to unify the input params with TransformerEncoderBlock. attention_dropout_rate = kwargs.pop("attention_dropout", attention_dropout_rate) dropout_rate = kwargs.pop("output_dropout", dropout_rate) inner_dim = kwargs.pop("intermediate_size", inner_dim) inner_activation = kwargs.pop("intermediate_activation", inner_activation) util.filter_kwargs(kwargs) super().__init__(**kwargs) # Deprecation warning. if output_range is not None: logging.warning("`output_range` is avaliable as an argument for `call()`." "The `output_range` as __init__ argument is deprecated.") self._num_heads = num_attention_heads self._inner_dim = inner_dim self._inner_activation = inner_activation self._attention_dropout_rate = attention_dropout_rate self._dropout_rate = dropout_rate self._output_range = output_range self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) self._bias_initializer = tf.keras.initializers.get(bias_initializer) self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) self._bias_constraint = tf.keras.constraints.get(bias_constraint) self._use_layer_norm = use_layer_norm self._share_rezero = share_rezero def build(self, input_shape): if isinstance(input_shape, tf.TensorShape): input_tensor_shape = input_shape elif isinstance(input_shape, (list, tuple)): input_tensor_shape = tf.TensorShape(input_shape[0]) else: raise ValueError( "The type of input shape argument is not supported, got: %s" % type(input_shape)) if len(input_tensor_shape.as_list()) != 3: raise ValueError("TransformerLayer expects a three-dimensional input of " "shape [batch, sequence, width].") batch_size, sequence_length, hidden_size = input_tensor_shape if len(input_shape) == 2: mask_tensor_shape = tf.TensorShape(input_shape[1]) expected_mask_tensor_shape = tf.TensorShape( [batch_size, sequence_length, sequence_length]) if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape): raise ValueError("When passing a mask tensor to TransformerLayer, the " "mask tensor must be of shape [batch, " "sequence_length, sequence_length] (here %s). Got a " "mask tensor of shape %s." % (expected_mask_tensor_shape, mask_tensor_shape)) if hidden_size % self._num_heads != 0: raise ValueError( "The input size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, self._num_heads)) self._attention_head_size = int(hidden_size // self._num_heads) common_kwargs = dict( kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint) self._attention_layer = tf.keras.layers.MultiHeadAttention( num_heads=self._num_heads, key_dim=self._attention_head_size, dropout=self._attention_dropout_rate, name="self_attention", kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), bias_initializer=tf_utils.clone_initializer(self._bias_initializer), **common_kwargs) self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) if self._use_layer_norm: # Use float32 in layernorm for numeric stability. # It is probably safe in mixed_float16, but we haven't validated this yet. self._attention_layer_norm = ( tf.keras.layers.LayerNormalization( name="self_attention_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32)) self._intermediate_dense = tf.keras.layers.EinsumDense( "abc,cd->abd", output_shape=(None, self._inner_dim), bias_axes="d", name="intermediate", kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), bias_initializer=tf_utils.clone_initializer(self._bias_initializer), **common_kwargs) policy = tf.keras.mixed_precision.global_policy() if policy.name == "mixed_bfloat16": # bfloat16 causes BERT with the LAMB optimizer to not converge # as well, so we use float32. # TODO(b/154538392): Investigate this. policy = tf.float32 self._inner_activation_layer = tf.keras.layers.Activation( self._inner_activation, dtype=policy) self._output_dense = tf.keras.layers.EinsumDense( "abc,cd->abd", output_shape=(None, hidden_size), bias_axes="d", name="output", kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), bias_initializer=tf_utils.clone_initializer(self._bias_initializer), **common_kwargs) self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) if self._use_layer_norm: # Use float32 in layernorm for numeric stability. self._output_layer_norm = tf.keras.layers.LayerNormalization( name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32) self._rezero_a = self.add_weight( name="rezero_alpha", initializer=tf.keras.initializers.Zeros(), trainable=True, dtype=tf.float32) if self._share_rezero: self._rezero_a_ffn = self._rezero_a else: self._rezero_a_ffn = self.add_weight( name="rezero_alpha_ffn", initializer=tf.keras.initializers.Zeros(), trainable=True, dtype=tf.float32) super().build(input_shape) def get_config(self): config = { "num_attention_heads": self._num_heads, "inner_dim": self._inner_dim, "inner_activation": self._inner_activation, "dropout_rate": self._dropout_rate, "attention_dropout_rate": self._attention_dropout_rate, "output_range": self._output_range, "use_layer_norm": self._use_layer_norm, "share_rezero": self._share_rezero, "kernel_initializer": tf.keras.initializers.serialize(self._kernel_initializer), "bias_initializer": tf.keras.initializers.serialize(self._bias_initializer), "kernel_regularizer": tf.keras.regularizers.serialize(self._kernel_regularizer), "bias_regularizer": tf.keras.regularizers.serialize(self._bias_regularizer), "activity_regularizer": tf.keras.regularizers.serialize(self._activity_regularizer), "kernel_constraint": tf.keras.constraints.serialize(self._kernel_constraint), "bias_constraint": tf.keras.constraints.serialize(self._bias_constraint), } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def reset_rezero(self): self._rezero_a.assign(0.) if not self._share_rezero: self._rezero_a_ffn.assign(0.) def call(self, inputs, output_range: Optional[tf.Tensor] = None) -> tf.Tensor: if isinstance(inputs, (list, tuple)): if len(inputs) == 2: input_tensor, attention_mask = inputs key_value = None elif len(inputs) == 3: input_tensor, key_value, attention_mask = inputs else: raise ValueError("Unexpected inputs to %s with length at %d" % (self.__class__, len(inputs))) else: input_tensor, key_value, attention_mask = (inputs, None, None) if output_range is None: output_range = self._output_range if output_range: target_tensor = input_tensor[:, 0:output_range, :] if attention_mask is not None: attention_mask = attention_mask[:, 0:output_range, :] else: target_tensor = input_tensor if key_value is None: key_value = input_tensor attention_output = self._attention_layer( query=target_tensor, value=key_value, attention_mask=attention_mask) attention_output = self._attention_dropout(attention_output) attention_output = target_tensor + self._rezero_a * attention_output if self._use_layer_norm: attention_output = self._attention_layer_norm(attention_output) else: attention_output = tf.cast(attention_output, tf.float32) intermediate_output = self._intermediate_dense(attention_output) intermediate_output = self._inner_activation_layer(intermediate_output) layer_output = self._output_dense(intermediate_output) layer_output = self._output_dropout(layer_output) # During mixed precision training, attention_output is from layer norm and # is always fp32 for now. Cast layer_output to fp32 for the subsequent add. layer_output = attention_output + tf.cast(self._rezero_a_ffn * layer_output, tf.float32) if self._use_layer_norm: layer_output = self._output_layer_norm(layer_output) return layer_output
12,529
41.764505
80
py
models
models-master/official/nlp/modeling/layers/mixing_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for mixing.py.""" import numpy as np import tensorflow as tf from official.nlp.modeling.layers import mixing class MixingTest(tf.test.TestCase): def test_base_mixing_layer(self): inputs = tf.random.uniform((3, 8, 16), minval=0, maxval=10, dtype=tf.float32) with self.assertRaisesRegex(NotImplementedError, "Abstract method"): _ = mixing.MixingLayer()(query=inputs, value=inputs) def test_fourier_layer(self): batch_size = 4 max_seq_length = 8 hidden_dim = 16 inputs = tf.random.uniform((batch_size, max_seq_length, hidden_dim), minval=0, maxval=10, dtype=tf.float32) outputs = mixing.FourierTransformLayer(use_fft=True)( query=inputs, value=inputs) self.assertEqual(outputs.shape, (batch_size, max_seq_length, hidden_dim)) def test_hartley_layer(self): batch_size = 3 max_seq_length = 16 hidden_dim = 4 inputs = tf.random.uniform((batch_size, max_seq_length, hidden_dim), minval=0, maxval=12, dtype=tf.float32) outputs = mixing.HartleyTransformLayer(use_fft=True)( query=inputs, value=inputs) self.assertEqual(outputs.shape, (batch_size, max_seq_length, hidden_dim)) def test_linear_mixing_layer(self): batch_size = 2 max_seq_length = 4 hidden_dim = 3 inputs = tf.ones((batch_size, max_seq_length, hidden_dim), dtype=tf.float32) outputs = mixing.LinearTransformLayer( kernel_initializer=tf.keras.initializers.Ones())( query=inputs, value=inputs) # hidden_dim * (max_seq_length * 1) = 12. expected_outputs = [ [ [12., 12., 12.], [12., 12., 12.], [12., 12., 12.], [12., 12., 12.], ], [ [12., 12., 12.], [12., 12., 12.], [12., 12., 12.], [12., 12., 12.], ], ] np.testing.assert_allclose(outputs, expected_outputs, rtol=1e-6, atol=1e-6) def test_pick_fourier_transform(self): # Ensure we don't hit an edge case which exceeds the fixed numerical error. tf.random.set_seed(1) np.random.seed(1) batch_size = 3 max_seq_length = 4 hidden_dim = 8 fft = mixing._pick_fourier_transform( use_fft=True, max_seq_length=max_seq_length, hidden_dim=hidden_dim) dft_matmul = mixing._pick_fourier_transform( use_fft=False, max_seq_length=max_seq_length, hidden_dim=hidden_dim) inputs = tf.random.uniform([batch_size, max_seq_length, hidden_dim]) inputs = tf.cast(inputs, tf.complex64) np.testing.assert_allclose( fft(inputs), dft_matmul(inputs), rtol=1e-6, atol=1e-6) if __name__ == "__main__": tf.test.main()
3,549
31.272727
80
py
models
models-master/official/nlp/modeling/layers/bigbird_attention.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras-based bigbird attention layer.""" import numpy as np import tensorflow as tf MAX_SEQ_LEN = 4096 def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): """Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. Returns: float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size, 3*to_block_size]. """ exp_blocked_to_pad = tf.concat([ to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1] ], 2) band_mask = tf.einsum("BLQ,BLK->BLQK", from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask = tf.expand_dims(band_mask, 1) return band_mask def bigbird_block_rand_mask(from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1): """Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_rand_blocks: int. Number of random chunks per row. last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence, if positive then num_rand_blocks blocks choosen only upto last_idx. Returns: adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks """ assert from_seq_length//from_block_size == to_seq_length//to_block_size, \ "Error the number of blocks needs to be same!" rand_attn = np.zeros( (from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32) middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32) last = to_seq_length // to_block_size - 1 if last_idx > (2 * to_block_size): last = (last_idx // to_block_size) - 1 r = num_rand_blocks # shorthand for i in range(1, from_seq_length // from_block_size - 1): start = i - 2 end = i if i == 1: rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r] elif i == 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r] elif i == from_seq_length // from_block_size - 3: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -3: should have been sliced till last-3 elif i == from_seq_length // from_block_size - 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -4: should have been sliced till last-4 else: if start > last: start = last rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] elif (end + 1) == last: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] else: rand_attn[i - 1, :] = np.random.permutation( np.concatenate((middle_seq[:start], middle_seq[end + 1:last])))[:r] return rand_attn def create_rand_mask_from_inputs(from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size): """Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. rand_attn: [batch_size, num_attention_heads, from_seq_length//from_block_size-2, num_rand_blocks] num_attention_heads: int. Number of attention heads. num_rand_blocks: int. Number of random chunks per row. batch_size: int. Batch size for computation. from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. Returns: float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2, from_block_size, num_rand_blocks*to_block_size]. """ num_windows = from_seq_length // from_block_size - 2 rand_mask = tf.reshape( tf.gather(to_blocked_mask, rand_attn, batch_dims=1), [ batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size ]) rand_mask = tf.einsum("BLQ,BHLK->BHLQK", from_blocked_mask[:, 1:-1], rand_mask) return rand_mask def bigbird_block_sparse_attention( query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, size_per_head, batch_size, from_seq_length, to_seq_length, from_block_size, to_block_size): """BigBird attention sparse calculation using blocks in linear time. Assumes from_seq_length//from_block_size == to_seq_length//to_block_size. Args: query_layer: float Tensor of shape [batch_size, num_attention_heads, from_seq_length, size_per_head] key_layer: float Tensor of shape [batch_size, num_attention_heads, to_seq_length, size_per_head] value_layer: float Tensor of shape [batch_size, num_attention_heads, to_seq_length, size_per_head] band_mask: (optional) int32 Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size, 3*to_block_size]. The values should be 1 or 0. The attention scores will effectively be set to -infinity for any positions in the mask that are 0, and will be unchanged for positions that are 1. from_mask: (optional) int32 Tensor of shape [batch_size, 1, from_seq_length, 1]. The values should be 1 or 0. The attention scores will effectively be set to -infinity for any positions in the mask that are 0, and will be unchanged for positions that are 1. to_mask: (optional) int32 Tensor of shape [batch_size, 1, 1, to_seq_length]. The values should be 1 or 0. The attention scores will effectively be set to -infinity for any positions in the mask that are 0, and will be unchanged for positions that are 1. from_blocked_mask: (optional) int32 Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. Same as from_mask, just reshaped. to_blocked_mask: (optional) int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. Same as to_mask, just reshaped. rand_attn: [batch_size, num_attention_heads, from_seq_length//from_block_size-2, num_rand_blocks] num_attention_heads: int. Number of attention heads. num_rand_blocks: int. Number of random chunks per row. size_per_head: int. Size of each attention head. batch_size: int. Batch size for computation. from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. Returns: float Tensor of shape [batch_size, from_seq_length, num_attention_heads, size_per_head]. """ rand_attn = tf.expand_dims(rand_attn, 0) rand_attn = tf.repeat(rand_attn, batch_size, 0) rand_mask = create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size, ) # Define shorthands h = num_attention_heads r = num_rand_blocks d = size_per_head b = batch_size m = from_seq_length n = to_seq_length wm = from_block_size wn = to_block_size dtype = query_layer.dtype query_layer = tf.transpose(query_layer, perm=[0, 2, 1, 3]) key_layer = tf.transpose(key_layer, perm=[0, 2, 1, 3]) value_layer = tf.transpose(value_layer, perm=[0, 2, 1, 3]) blocked_query_matrix = tf.reshape(query_layer, (b, h, m // wm, wm, -1)) blocked_key_matrix = tf.reshape(key_layer, (b, h, n // wn, wn, -1)) blocked_value_matrix = tf.reshape(value_layer, (b, h, n // wn, wn, -1)) gathered_key = tf.reshape( tf.gather(blocked_key_matrix, rand_attn, batch_dims=2, name="gather_key"), (b, h, m // wm - 2, r * wn, -1)) # [b, h, n//wn-2, r, wn, -1] gathered_value = tf.reshape( tf.gather( blocked_value_matrix, rand_attn, batch_dims=2, name="gather_value"), (b, h, m // wm - 2, r * wn, -1)) # [b, h, n//wn-2, r, wn, -1] first_product = tf.einsum( "BHQD,BHKD->BHQK", blocked_query_matrix[:, :, 0], key_layer) # [b, h, wm, -1] x [b, h, n, -1] ==> [b, h, wm, n] first_product = tf.multiply(first_product, 1.0 / np.sqrt(d)) first_product += (1.0 - tf.cast(to_mask, dtype=dtype)) * -10000.0 first_attn_weights = tf.nn.softmax(first_product) # [b, h, wm, n] first_context_layer = tf.einsum( "BHQK,BHKD->BHQD", first_attn_weights, value_layer) # [b, h, wm, n] x [b, h, n, -1] ==> [b, h, wm, -1] first_context_layer = tf.expand_dims(first_context_layer, 2) second_key_mat = tf.concat([ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1], blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1], gathered_key[:, :, 0] ], 2) # [b, h, (4+r)*wn, -1] second_value_mat = tf.concat([ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1], blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1], gathered_value[:, :, 0] ], 2) # [b, h, (4+r)*wn, -1] second_product = tf.einsum( "BHQD,BHKD->BHQK", blocked_query_matrix[:, :, 1], second_key_mat ) # [b, h, wm, -1] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, (4+r)*wn] second_seq_pad = tf.concat([ to_mask[:, :, :, :3 * wn], to_mask[:, :, :, -wn:], tf.ones([b, 1, 1, r * wn], dtype=dtype) ], 3) second_rand_pad = tf.concat([ tf.ones([b, h, wm, 4 * wn], dtype=dtype), rand_mask[:, :, 0] ], 3) second_product = tf.multiply(second_product, 1.0 / np.sqrt(d)) second_product += (1.0 - tf.minimum(second_seq_pad, second_rand_pad)) * -10000.0 second_attn_weights = tf.nn.softmax(second_product) # [b , h, wm, (4+r)*wn] second_context_layer = tf.einsum( "BHQK,BHKD->BHQD", second_attn_weights, second_value_mat ) # [b, h, wm, (4+r)*wn] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, -1] second_context_layer = tf.expand_dims(second_context_layer, 2) exp_blocked_key_matrix = tf.concat([ blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1] ], 3) # [b, h, m//wm-4, 3*wn, -1] exp_blocked_value_matrix = tf.concat([ blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1] ], 3) # [b, h, m//wm-4, 3*wn, -1] middle_query_matrix = blocked_query_matrix[:, :, 2:-2] inner_band_product = tf.einsum( "BHLQD,BHLKD->BHLQK", middle_query_matrix, exp_blocked_key_matrix ) # [b, h, m//wm-4, wm, -1] x [b, h, m//wm-4, 3*wn, -1] # ==> [b, h, m//wm-4, wm, 3*wn] inner_band_product = tf.multiply(inner_band_product, 1.0 / np.sqrt(d)) rand_band_product = tf.einsum( "BHLQD,BHLKD->BHLQK", middle_query_matrix, gathered_key[:, :, 1:-1]) # [b, h, m//wm-4, wm, -1] x [b, h, m//wm-4, r*wn, -1] # ==> [b, h, m//wm-4, wm, r*wn] rand_band_product = tf.multiply(rand_band_product, 1.0 / np.sqrt(d)) first_band_product = tf.einsum( "BHLQD,BHKD->BHLQK", middle_query_matrix, blocked_key_matrix[:, :, 0] ) # [b, h, m//wm-4, wm, -1] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, wn] first_band_product = tf.multiply(first_band_product, 1.0 / np.sqrt(d)) last_band_product = tf.einsum( "BHLQD,BHKD->BHLQK", middle_query_matrix, blocked_key_matrix[:, :, -1] ) # [b, h, m//wm-4, wm, -1] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, wn] last_band_product = tf.multiply(last_band_product, 1.0 / np.sqrt(d)) inner_band_product += (1.0 - band_mask) * -10000.0 first_band_product += (1.0 - tf.expand_dims(to_mask[:, :, :, :wn], 3)) * -10000.0 last_band_product += (1.0 - tf.expand_dims(to_mask[:, :, :, -wn:], 3)) * -10000.0 rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * -10000.0 band_product = tf.concat([ first_band_product, inner_band_product, rand_band_product, last_band_product ], -1) # [b, h, m//wm-4, wm, (5+r)*wn] attn_weights = tf.nn.softmax(band_product) # [b, h, m//wm-4, wm, (5+r)*wn] context_layer = tf.einsum( "BHLQK,BHLKD->BHLQD", attn_weights[:, :, :, :, wn:4 * wn], exp_blocked_value_matrix ) # [b, h, m//wm-4, wm, 3*wn] x [b, h, m//wm-4, 3*wn, -1] # ==> [b, h, m//wm-4, wm, -1] context_layer += tf.einsum( "BHLQK,BHLKD->BHLQD", attn_weights[:, :, :, :, 4 * wn:-wn], gathered_value[:, :, 1:-1] ) # [b, h, m//wm-4, wm, r*wn] x [b, h, m//wm-4, r*wn, -1] # ==> [b, h, m//wm-4, wm, -1] context_layer += tf.einsum( "BHLQK,BHKD->BHLQD", attn_weights[:, :, :, :, :wn], blocked_value_matrix[:, :, 0] ) # [b, h, m//wm-4, wm, wn] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, -1] context_layer += tf.einsum( "BHLQK,BHKD->BHLQD", attn_weights[:, :, :, :, -wn:], blocked_value_matrix[:, :, -1] ) # [b, h, m//wm-4, wm, wn] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, -1] second_last_key_mat = tf.concat([ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3], blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1], gathered_key[:, :, -1] ], 2) # [b, h, (4+r)*wn, -1] second_last_value_mat = tf.concat([ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3], blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1], gathered_value[:, :, -1] ], 2) # [b, h, (4+r)*wn, -1] second_last_product = tf.einsum( "BHQD,BHKD->BHQK", blocked_query_matrix[:, :, -2], second_last_key_mat ) # [b, h, wm, -1] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, (4+r)*wn] second_last_seq_pad = tf.concat([ to_mask[:, :, :, :wn], to_mask[:, :, :, -3 * wn:], tf.ones([b, 1, 1, r * wn], dtype=dtype) ], 3) second_last_rand_pad = tf.concat( [tf.ones([b, h, wm, 4 * wn], dtype=dtype), rand_mask[:, :, -1]], 3) second_last_product = tf.multiply(second_last_product, 1.0 / np.sqrt(d)) second_last_product += ( 1.0 - tf.minimum(second_last_seq_pad, second_last_rand_pad)) * -10000.0 second_last_attn_weights = tf.nn.softmax( second_last_product) # [b, h, wm, (4+r)*wn] second_last_context_layer = tf.einsum( "BHQK,BHKD->BHQD", second_last_attn_weights, second_last_value_mat ) # [b, h, wm, (4+r)*wn] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, -1] second_last_context_layer = tf.expand_dims(second_last_context_layer, 2) last_product = tf.einsum( "BHQD,BHKD->BHQK", blocked_query_matrix[:, :, -1], key_layer) # [b, h, wm, -1] x [b, h, n, -1] ==> [b, h, wm, n] last_product = tf.multiply(last_product, 1.0 / np.sqrt(d)) last_product += (1.0 - to_mask) * -10000.0 last_attn_weights = tf.nn.softmax(last_product) # [b, h, wm, n] last_context_layer = tf.einsum( "BHQK,BHKD->BHQD", last_attn_weights, value_layer) # [b, h, wm, n] x [b, h, n, -1] ==> [b, h, wm, -1] last_context_layer = tf.expand_dims(last_context_layer, 2) context_layer = tf.concat([ first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer ], 2) context_layer = tf.reshape(context_layer, (b, h, m, -1)) * from_mask context_layer = tf.transpose(context_layer, (0, 2, 1, 3)) return context_layer class BigBirdMasks(tf.keras.layers.Layer): """Creates bigbird attention masks.""" def __init__(self, block_size, **kwargs): super().__init__(**kwargs) self._block_size = block_size def call(self, inputs, mask): encoder_shape = tf.shape(mask) mask = tf.cast(mask, inputs.dtype) batch_size, seq_length = encoder_shape[0], encoder_shape[1] # reshape for blocking blocked_encoder_mask = tf.reshape( mask, (batch_size, seq_length // self._block_size, self._block_size)) encoder_from_mask = tf.reshape(mask, (batch_size, 1, seq_length, 1)) encoder_to_mask = tf.reshape(mask, (batch_size, 1, 1, seq_length)) band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask) return [band_mask, encoder_from_mask, encoder_to_mask, blocked_encoder_mask] @tf.keras.utils.register_keras_serializable(package="Text") class BigBirdAttention(tf.keras.layers.MultiHeadAttention): """BigBird, a sparse attention mechanism. This layer follows the paper "Big Bird: Transformers for Longer Sequences" (https://arxiv.org/abs/2007.14062). It reduces this quadratic dependency of attention computation to linear. Arguments are the same as `MultiHeadAttention` layer. """ def __init__(self, num_rand_blocks=3, from_block_size=64, to_block_size=64, max_rand_mask_length=MAX_SEQ_LEN, seed=None, **kwargs): super().__init__(**kwargs) self._num_rand_blocks = num_rand_blocks self._from_block_size = from_block_size self._to_block_size = to_block_size self._seed = seed # Generates random attention. np.random.seed(self._seed) # pylint: disable=g-complex-comprehension rand_attn = [ bigbird_block_rand_mask( max_rand_mask_length, max_rand_mask_length, from_block_size, to_block_size, num_rand_blocks, last_idx=1024) for _ in range(self._num_heads) ] # pylint: enable=g-complex-comprehension rand_attn = np.stack(rand_attn, axis=0) self.rand_attn = tf.constant(rand_attn, dtype=tf.int32) def _compute_attention(self, query, key, value, attention_mask=None): (band_mask, encoder_from_mask, encoder_to_mask, blocked_encoder_mask) = attention_mask query_shape = tf.shape(query) from_seq_length = query_shape[1] to_seq_length = tf.shape(key)[1] rand_attn = self.rand_attn[:, :(from_seq_length // self._from_block_size - 2)] return bigbird_block_sparse_attention( query, key, value, band_mask, encoder_from_mask, encoder_to_mask, blocked_encoder_mask, blocked_encoder_mask, num_attention_heads=self._num_heads, num_rand_blocks=self._num_rand_blocks, size_per_head=self._key_dim, batch_size=query_shape[0], from_seq_length=from_seq_length, to_seq_length=to_seq_length, from_block_size=self._from_block_size, to_block_size=self._to_block_size, rand_attn=rand_attn) def call(self, query, value, key=None, attention_mask=None, **kwargs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks if not self._built_from_signature: self._build_from_signature(query=query, value=value, key=key) if key is None: key = value # N = `num_attention_heads` # H = `size_per_head` # `query` = [B, T, N ,H] query = self._query_dense(query) # `key` = [B, S, N, H] key = self._key_dense(key) # `value` = [B, S, N, H] value = self._value_dense(value) attention_output = self._compute_attention(query, key, value, attention_mask) attention_output.set_shape([None, None, self._num_heads, self._value_dim]) attention_output = self._output_dense(attention_output) return attention_output def get_config(self): config = { "num_rand_blocks": self._num_rand_blocks, "from_block_size": self._from_block_size, "to_block_size": self._to_block_size, "seed": self._seed } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
21,101
41.803245
147
py
models
models-master/official/nlp/modeling/layers/reuse_transformer_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Keras-based transformer block layer.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.modeling.layers import reuse_transformer @parameterized.named_parameters( ('base', reuse_transformer.ReuseTransformer)) class ReuseTransformerLayerTest(tf.test.TestCase, parameterized.TestCase): def tearDown(self): super(ReuseTransformerLayerTest, self).tearDown() tf.keras.mixed_precision.set_global_policy('float32') def test_layer_creation(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu') sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output_tensor, _ = test_layer(data_tensor) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) def test_layer_creation_with_mask(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu') sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor, _ = test_layer([data_tensor, mask_tensor]) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) def test_layer_invocation(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu') sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output_tensor = test_layer(data_tensor) # Create a model from the test layer. model = tf.keras.Model(data_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = np.random.random_sample( (batch_size, sequence_length, width)) _ = model.predict(input_data) def test_layer_invocation_with_mask(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu') sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = np.random.random_sample( (batch_size, sequence_length, width)) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) _ = model.predict([input_data, mask_data]) def test_layer_output_range(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu') sequence_length = 21 width = 80 batch_size = 6 input_data = np.random.random_sample( (batch_size, sequence_length, width)) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) output_tensor, _ = test_layer([input_data, mask_data]) # The layer only attends to the first token and outputs the first token # embedding. new_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu', output_range=1) _ = new_layer([input_data, mask_data]) new_layer.set_weights(test_layer.get_weights()) new_output_tensor, _ = new_layer([input_data, mask_data]) self.assertAllClose( new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01) def test_layer_output_range_with_relative_pe(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu', use_relative_pe=True) sequence_length = 21 width = 80 batch_size = 6 input_data = np.random.random_sample( (batch_size, sequence_length, width)) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) output_tensor, _ = test_layer([input_data, mask_data]) # The layer only attends to the first token and outputs the first token # embedding. new_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu', output_range=1, use_relative_pe=True) _ = new_layer([input_data, mask_data]) new_layer.set_weights(test_layer.get_weights()) new_output_tensor, _ = new_layer([input_data, mask_data]) self.assertAllClose( new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01) def test_layer_output_range_without_mask(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu', norm_first=True) sequence_length = 21 width = 80 batch_size = 6 input_data = np.random.random_sample( (batch_size, sequence_length, width)) output_tensor, _ = test_layer(input_data) # The layer only attends to the first token and outputs the first token # embedding. new_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu', output_range=1, norm_first=True) _ = new_layer(input_data) new_layer.set_weights(test_layer.get_weights()) new_output_tensor, _ = new_layer(input_data) self.assertAllClose( new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01) def test_layer_output_range_with_pre_norm(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu', norm_first=True) sequence_length = 21 width = 80 batch_size = 6 input_data = np.random.random_sample( (batch_size, sequence_length, width)) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) output_tensor, _ = test_layer([input_data, mask_data]) # The layer only attends to the first token and outputs the first token # embedding. new_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu', output_range=1, norm_first=True) _ = new_layer([input_data, mask_data]) new_layer.set_weights(test_layer.get_weights()) new_output_tensor, _ = new_layer([input_data, mask_data]) self.assertAllClose( new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01) def test_layer_invocation_with_float16_dtype(self, transformer_cls): tf.keras.mixed_precision.set_global_policy('mixed_float16') test_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu') sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = (np.random.random_sample( (batch_size, sequence_length, width))) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) _ = model.predict([input_data, mask_data]) def test_transform_with_initializer(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu', kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output, _ = test_layer(data_tensor) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list()) def test_dynamic_layer_sequence(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=10, inner_dim=2048, inner_activation='relu', kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) # Create a 3-dimensional input (the first dimension is implicit). width = 30 input_tensor = tf.keras.Input(shape=(None, width)) output_tensor, _ = test_layer(input_tensor) model = tf.keras.Model(input_tensor, output_tensor) input_length = 17 input_data = np.ones((1, input_length, width)) output_data = model.predict(input_data) self.assertAllEqual([1, input_length, width], output_data.shape) class ReuseTransformerArgumentTest(tf.test.TestCase, parameterized.TestCase): def test_use_bias_norm_first(self): num_attention_heads = 2 hidden_size = 16 encoder_block = reuse_transformer.ReuseTransformer( num_attention_heads=num_attention_heads, inner_dim=32, inner_activation='relu', output_dropout=0.1, attention_dropout=0.1, use_bias=False, norm_first=True, norm_epsilon=1e-6, inner_dropout=0.1, attention_initializer=tf.keras.initializers.RandomUniform( minval=0., maxval=1.)) # Forward path. dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32) dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32) inputs = [dummy_tensor, dummy_mask] output, _ = encoder_block(inputs) self.assertEqual(output.shape, (2, 4, hidden_size)) def test_get_config(self): num_attention_heads = 2 encoder_block = reuse_transformer.ReuseTransformer( num_attention_heads=num_attention_heads, inner_dim=32, inner_activation='relu', output_dropout=0.1, attention_dropout=0.1, use_bias=False, norm_first=True, norm_epsilon=1e-6, inner_dropout=0.1, attention_initializer=tf.keras.initializers.RandomUniform( minval=0., maxval=1.)) encoder_block_config = encoder_block.get_config() new_encoder_block = reuse_transformer.ReuseTransformer.from_config( encoder_block_config) self.assertEqual(encoder_block_config, new_encoder_block.get_config()) @parameterized.parameters({'attention_axes': None}, {'attention_axes': [1]}, {'attention_axes': [2]}, {'attention_axes': [1, 2]}) def test_several_attention_axes(self, attention_axes): test_layer = reuse_transformer.ReuseTransformer( inner_dim=32, inner_activation='relu', output_dropout=0.1, attention_dropout=0.1, use_bias=False, norm_first=True, norm_epsilon=1e-6, inner_dropout=0.1, num_attention_heads=10, attention_axes=attention_axes) num_rows = 21 num_cols = 13 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(num_rows, num_cols, width)) output_tensor, _ = test_layer(data_tensor) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) @parameterized.named_parameters( ('plain', False, False, False), ('plain_returnscore', False, True, False), ('plain_with_relative_pe', False, False, True), ('reuse_all', True, False, False), ('reuse_all_returnscore', True, True, False), ('reuse_all_with_relative_pe', True, False, True), ('reuse_5', 5, False, False), ('reuse_5_returnscore', 5, True, False), ('reuse_5_with_relative_pe', 5, False, True),) def test_layer_invocation_with_mask(self, reuse_attention, return_attention_scores, use_relative_pe): test_layer = reuse_transformer.ReuseTransformer( num_attention_heads=10, inner_dim=2048, inner_activation='relu', reuse_attention=reuse_attention, use_relative_pe=use_relative_pe) sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) return_scores_tensor = tf.keras.Input(shape=(1,)) reuse_attention_scores = tf.keras.Input( shape=(10, sequence_length, sequence_length)) output_tensor, _ = test_layer( [data_tensor, mask_tensor, reuse_attention_scores]) # Create a model from the test layer. model = tf.keras.Model( ([data_tensor, mask_tensor, reuse_attention_scores], return_scores_tensor), output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = np.random.random_sample( (batch_size, sequence_length, width)) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) reuse_scores = np.random.rand( batch_size, 10, sequence_length, sequence_length) _ = model.predict([input_data, mask_data, reuse_scores], return_attention_scores) @parameterized.named_parameters( ('without_relative_pe_with_pe_max_seq_length_10', False, 10), ('with_relative_pe_with_pe_max_seq_length_10', True, 10), ('without_relative_pe_with_pe_max_seq_length_100', False, 100), ('with_relative_pe_with_pe_max_seq_length_100', True, 100)) def test_layer_invocation_with_float16_with_relative_pe( self, use_relative_pe, pe_max_seq_length): tf.keras.mixed_precision.set_global_policy('mixed_float16') test_layer = reuse_transformer.ReuseTransformer( num_attention_heads=10, inner_dim=2048, inner_activation='relu', use_relative_pe=use_relative_pe, pe_max_seq_length=pe_max_seq_length) sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = (np.random.random_sample( (batch_size, sequence_length, width))) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) _ = model.predict([input_data, mask_data]) if __name__ == '__main__': tf.test.main()
17,478
40.916067
80
py
models
models-master/official/nlp/modeling/layers/transformer_xl.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras-based Transformer XL layer.""" from absl import logging import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling.layers import relative_attention def _cache_memory(current_state, previous_state, memory_length, reuse_length=0): """Caches hidden states into memory. Args: current_state: `Tensor`, the current state. previous_state: `Tensor`, the previous state. memory_length: `int`, the number of tokens to cache. reuse_length: `int`, the number of tokens in the current batch to be cached and reused in the future. Returns: A `Tensor`, representing the cached state with stopped gradients. """ if memory_length is None or memory_length == 0: return None else: if reuse_length > 0: current_state = current_state[:, :reuse_length, :] if previous_state is None: new_mem = current_state[:, -memory_length:, :] else: new_mem = tf.concat( [previous_state, current_state], 1)[:, -memory_length:, :] return tf.stop_gradient(new_mem) @tf.keras.utils.register_keras_serializable(package="Text") class TransformerXLBlock(tf.keras.layers.Layer): """Transformer XL block. This implements a Transformer XL block from "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" (https://arxiv.org/abs/1901.02860). This block is further extended to allow for the Transformer-XL re-parameterization in "XLNet: Generalized Autoregressive Pretraining for Language Understanding" (https://arxiv.org/abs/1906.08237). Given an input stream, this block computes attention, applies dropouts and layer norms and feeds into the FFN network. **Note: This layer is currently experimental. Attributes: vocab_size: The size of the token vocabulary. hidden_size: The size of the transformer hidden layers. num_attention_heads: The number of attention heads. head_size: The dimension size of each attention head. inner_size: The inner size for the transformer layers. dropout_rate: Dropout rate for the output of this layer. attention_dropout_rate: Dropout rate on attention probabilities. two_stream: Whether or not to use `TwoStreamRelativeAttention` used in the XLNet pretrainer. If `False`, then it will use `MultiHeadRelativeAttention` as in Transformer XL. norm_epsilon: Epsilon value to initialize normalization layers. inner_activation: The activation to use for the inner FFN layers. kernel_initializer: Initializer for dense layer kernels. inner_dropout: Dropout probability for the inner dropout layer. """ def __init__(self, vocab_size, hidden_size, num_attention_heads, head_size, inner_size, dropout_rate, attention_dropout_rate, two_stream=False, norm_epsilon=1e-12, inner_activation="relu", kernel_initializer="variance_scaling", inner_dropout=0.0, **kwargs): """Initializes TransformerXLBlock layer.""" super().__init__(**kwargs) self._vocab_size = vocab_size self._num_heads = num_attention_heads self._head_size = head_size self._hidden_size = hidden_size self._inner_size = inner_size self._dropout_rate = dropout_rate self._attention_dropout_rate = attention_dropout_rate self._inner_activation = inner_activation self._norm_epsilon = norm_epsilon self._kernel_initializer = kernel_initializer self._inner_dropout = inner_dropout self._two_stream = two_stream if two_stream: self._attention_layer_type = relative_attention.TwoStreamRelativeAttention else: self._attention_layer_type = relative_attention.MultiHeadRelativeAttention def build(self, input_shape): input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape input_tensor_shape = tf.TensorShape(input_tensor) if len(input_tensor_shape.as_list()) != 3: raise ValueError("TransformerLayer expects a three-dimensional input of " "shape [batch, sequence, width].") batch_size, sequence_length, hidden_size = input_tensor_shape if len(input_shape) == 2: mask_tensor_shape = tf.TensorShape(input_shape[1]) expected_mask_tensor_shape = tf.TensorShape( [batch_size, sequence_length, sequence_length]) if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape): raise ValueError("When passing a mask tensor to TransformerXLBlock, " "the mask tensor must be of shape [batch, " "sequence_length, sequence_length] (here %s). Got a " "mask tensor of shape %s." % (expected_mask_tensor_shape, mask_tensor_shape)) if hidden_size % self._num_heads != 0: raise ValueError( "The input size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, self._num_heads)) self._attention_layer = self._attention_layer_type( num_heads=self._num_heads, key_dim=self._head_size, value_dim=self._head_size, dropout=self._attention_dropout_rate, use_bias=False, kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), name="rel_attn") self._attention_dropout = tf.keras.layers.Dropout( rate=self._attention_dropout_rate) self._attention_layer_norm = tf.keras.layers.LayerNormalization( name="self_attention_layer_norm", axis=-1, epsilon=self._norm_epsilon, dtype=tf.float32) self._inner_dense = tf.keras.layers.EinsumDense( "abc,cd->abd", output_shape=(None, self._inner_size), bias_axes="d", kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer), name="inner") self._inner_activation_layer = tf.keras.layers.Activation( self._inner_activation) self._inner_dropout_layer = tf.keras.layers.Dropout( rate=self._inner_dropout) self._output_dense = tf.keras.layers.EinsumDense( "abc,cd->abd", output_shape=(None, hidden_size), bias_axes="d", name="output", kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer)) self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) self._output_layer_norm = tf.keras.layers.LayerNormalization( name="output_layer_norm", axis=-1, epsilon=self._norm_epsilon) super().build(input_shape) def get_config(self): config = { "vocab_size": self._vocab_size, "hidden_size": self._hidden_size, "num_attention_heads": self._num_heads, "head_size": self._head_size, "inner_size": self._inner_size, "dropout_rate": self._dropout_rate, "attention_dropout_rate": self._attention_dropout_rate, "two_stream": self._two_stream, "norm_epsilon": self._norm_epsilon, "inner_activation": self._inner_activation, "kernel_initializer": self._kernel_initializer, "inner_dropout": self._inner_dropout, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, content_stream, content_attention_bias, positional_attention_bias, relative_position_encoding=None, segment_matrix=None, segment_encoding=None, segment_attention_bias=None, state=None, content_attention_mask=None, query_stream=None, query_attention_mask=None, target_mapping=None): """Implements `call` for the Layer. Args: content_stream: `Tensor`, the input content stream. This is the standard input to Transformer XL and is commonly referred to as `h` in XLNet. content_attention_bias: Bias `Tensor` for content based attention of shape `[num_heads, dim]`. positional_attention_bias: Bias `Tensor` for position based attention of shape `[num_heads, dim]`. relative_position_encoding: Relative positional encoding `Tensor` of shape `[B, L, dim]`. segment_matrix: Optional `Tensor` of shape `[B, S, S + M]`. Used in XLNet, but not in Transformer XL. segment_encoding: Optional `Tensor` of shape `[2, num_heads, dim]`. Used in XLNet, but not in Transformer XL. segment_attention_bias: Optional bias `Tensor` for segment based attention of shape `[num_heads, dim]`. state: Optional `Tensor` of shape `[B, M, E]`, where M is the length of the state or memory. If passed, this is also attended over as in Transformer XL. content_attention_mask: Optional `Tensor` representing the mask that is added to content attention logits. If state is not None, the mask source sequence dimension should extend M. query_stream: Optional `Tensor`, the query stream. This is introduced in `TwoStreamRelativeAttention`/XLNet pretrainer. This is ignored if `two_stream` is `False`. query_attention_mask: Optional `Tensor` representing the mask that is added to query attention logits. If state is not None, the mask source sequence dimension should extend M. target_mapping: Optional `Tensor` representing the target mapping when calculating query attention. Returns: A `dict` object, containing the key value pairs for `content_attention` and (if `two_stream` is `True`) `query_attention`. """ if not self._two_stream and query_stream is not None: logging.warning("`query_stream` was provided but two stream attention is " "disabled. `query_stream` will be ignored.") if self._two_stream: attention_kwargs = dict( content_stream=content_stream, query_stream=query_stream, query_attention_mask=query_attention_mask, target_mapping=target_mapping, content_attention_mask=content_attention_mask) else: attention_kwargs = dict( query=content_stream, value=content_stream, key=content_stream, attention_mask=content_attention_mask) common_attention_kwargs = dict( content_attention_bias=content_attention_bias, relative_position_encoding=relative_position_encoding, positional_attention_bias=positional_attention_bias, segment_matrix=segment_matrix, segment_encoding=segment_encoding, segment_attention_bias=segment_attention_bias, state=state) attention_kwargs.update(common_attention_kwargs) attention_output = self._attention_layer(**attention_kwargs) if self._two_stream: attention_streams = attention_output input_streams = [content_stream, query_stream] else: attention_streams = [attention_output] input_streams = [content_stream] attention_keys = ["content_attention", "query_attention"] attention_output = {} for attention_stream, input_stream, attention_key in zip( attention_streams, input_streams, attention_keys): attention_stream = self._attention_dropout(attention_stream) attention_stream = self._attention_layer_norm( attention_stream + input_stream) inner_output = self._inner_dense(attention_stream) inner_output = self._inner_activation_layer( inner_output) inner_output = self._inner_dropout_layer( inner_output) layer_output = self._output_dense(inner_output) layer_output = self._output_dropout(layer_output) layer_output = self._output_layer_norm(layer_output + attention_stream) attention_output[attention_key] = layer_output return attention_output class TransformerXL(tf.keras.layers.Layer): """Transformer XL. This layer combines multiple Transformer XL blocks from "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" (https://arxiv.org/abs/1901.02860). This layer handles the attention biases as well as memory caching and reuse as in Transformer XL and XLNet. Attributes: vocab_size: The number of tokens in vocabulary. num_layers: The number of layers. hidden_size: The hidden size. num_attention_heads: The number of attention heads. head_size: The dimension size of each attention head. inner_size: The hidden size in feed-forward layers. dropout_rate: Dropout rate used in each Transformer XL block. attention_dropout_rate: Dropout rate on attention probabilities. two_stream: Whether or not to use `TwoStreamRelativeAttention` used in the XLNet pretrainer. If `False`, then it will use `MultiHeadRelativeAttention` as in Transformer XL. initializer: The initializer to use for attention biases. tie_attention_biases: Whether or not to tie biases together. If `True`, then each Transformer XL block shares the same trainable attention bias. If `False`, then each block has its own attention bias. This is usually set to `True`. memory_length: The number of tokens to cache. reuse_length: The number of tokens in the current batch to be cached and reused in the future. inner_activation: The activation to use in the inner layers for Transformer XL blocks. Typically "relu" or "gelu". """ def __init__(self, vocab_size, num_layers, hidden_size, num_attention_heads, head_size, inner_size, dropout_rate, attention_dropout_rate, initializer, two_stream=False, tie_attention_biases=True, memory_length=None, reuse_length=None, inner_activation="relu", **kwargs): """Initializes TransformerXL.""" super().__init__(**kwargs) self._vocab_size = vocab_size self._initializer = initializer self._num_layers = num_layers self._hidden_size = hidden_size self._num_attention_heads = num_attention_heads self._head_size = head_size self._inner_size = inner_size self._inner_activation = inner_activation self._dropout_rate = dropout_rate self._attention_dropout_rate = attention_dropout_rate self._tie_attention_biases = tie_attention_biases self._two_stream = two_stream self._memory_length = memory_length self._reuse_length = reuse_length if self._tie_attention_biases: attention_bias_shape = [self._num_attention_heads, self._head_size] else: attention_bias_shape = [self._num_layers, self._num_attention_heads, self._head_size] self.content_attention_bias = self.add_weight( "content_attention_bias", shape=attention_bias_shape, dtype=tf.float32, initializer=tf_utils.clone_initializer(self._initializer)) self.positional_attention_bias = self.add_weight( "positional_attention_bias", shape=attention_bias_shape, dtype=tf.float32, initializer=tf_utils.clone_initializer(self._initializer)) self.segment_attention_bias = self.add_weight( "segment_attention_bias", shape=attention_bias_shape, dtype=tf.float32, initializer=tf_utils.clone_initializer(self._initializer)) self.transformer_xl_layers = [] for i in range(self._num_layers): self.transformer_xl_layers.append( TransformerXLBlock( vocab_size=self._vocab_size, hidden_size=self._head_size * self._num_attention_heads, num_attention_heads=self._num_attention_heads, head_size=self._head_size, inner_size=self._inner_size, dropout_rate=self._dropout_rate, attention_dropout_rate=self._attention_dropout_rate, norm_epsilon=1e-12, inner_activation=self._inner_activation, two_stream=self._two_stream, kernel_initializer="variance_scaling", name="layer_%d" % i)) self.output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) def get_config(self): config = { "vocab_size": self._vocab_size, "num_layers": self._num_layers, "hidden_size": self._hidden_size, "num_attention_heads": self._num_attention_heads, "head_size": self._head_size, "inner_size": self._inner_size, "dropout_rate": self._dropout_rate, "attention_dropout_rate": self._attention_dropout_rate, "initializer": self._initializer, "two_stream": self._two_stream, "tie_attention_biases": self._tie_attention_biases, "memory_length": self._memory_length, "reuse_length": self._reuse_length, "inner_activation": self._inner_activation, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, content_stream, relative_position_encoding, segment_matrix=None, segment_embedding=None, state=None, content_attention_mask=None, query_stream=None, query_attention_mask=None, target_mapping=None): """Implements call() for the layer. Args: content_stream: `Tensor`, the input content stream. This is the standard input to Transformer XL and is commonly referred to as `h` in XLNet. relative_position_encoding: Relative positional encoding `Tensor` of shape `[B, L, dim]`. segment_matrix: Optional `Tensor` of shape `[B, S, S + M]`. Used in XLNet, but not in Transformer XL. segment_embedding: Optional `Tensor` of shape `[2, num_heads, dim]`. Used in XLNet, but not in Transformer XL. state: Optional `Tensor` of shape `[B, M, E]`, where M is the length of the state or memory. If passed, this is also attended over as in Transformer XL. content_attention_mask: Optional `Tensor` representing the mask that is added to content attention logits. If state is not None, the mask source sequence dimension should extend M. query_stream: Optional `Tensor`, the query stream. This is introduced in `TwoStreamRelativeAttention`/XLNet pretrainer. This is ignored if `two_stream` is `False`. query_attention_mask: Optional `Tensor` representing the mask that is added to query attention logits. If state is not None, the mask source sequence dimension should extend M. target_mapping: Optional `Tensor` representing the target mapping when calculating query attention. Returns: A tuple consisting of the attention output and the list of cached memory states. The attention output is `content_attention` if `two_stream` is `False`, otherwise it is `query_attention`. """ new_mems = [] if state is None: state = [None] * self._num_layers for i in range(self._num_layers): # cache new mems new_mems.append( _cache_memory(content_stream, state[i], self._memory_length, self._reuse_length)) # segment bias if segment_matrix is None: segment_attention_bias = None segment_encoding = None else: segment_attention_bias = (self.segment_attention_bias if self._tie_attention_biases else self.segment_attention_bias[i]) segment_encoding = segment_embedding[i] content_attention_bias = (self.content_attention_bias if self._tie_attention_biases else self.content_attention_bias[i]) positional_attention_bias = (self.positional_attention_bias if self._tie_attention_biases else self.positional_attention_bias[i]) transformer_xl_layer = self.transformer_xl_layers[i] transformer_xl_output = transformer_xl_layer( content_stream=content_stream, content_attention_bias=content_attention_bias, positional_attention_bias=positional_attention_bias, relative_position_encoding=relative_position_encoding, segment_matrix=segment_matrix, segment_encoding=segment_encoding, segment_attention_bias=segment_attention_bias, state=state[i], content_attention_mask=content_attention_mask, query_attention_mask=query_attention_mask, query_stream=query_stream, target_mapping=target_mapping) content_stream = transformer_xl_output["content_attention"] if self._two_stream: query_stream = transformer_xl_output["query_attention"] else: query_stream = None if self._two_stream: output_stream = query_stream else: output_stream = content_stream return output_stream, new_mems
22,180
38.538324
80
py
models
models-master/official/nlp/modeling/layers/masked_lm.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Masked language model network.""" # pylint: disable=g-classes-have-attributes import tensorflow as tf @tf.keras.utils.register_keras_serializable(package='Text') class MaskedLM(tf.keras.layers.Layer): """Masked language model network head for BERT modeling. This layer implements a masked language model based on the provided transformer based encoder. It assumes that the encoder network being passed has a "get_embedding_table()" method. Example: ```python encoder=modeling.networks.BertEncoder(...) lm_layer=MaskedLM(embedding_table=encoder.get_embedding_table()) ``` Args: embedding_table: The embedding table from encoder network. activation: The activation, if any, for the dense layer. initializer: The initializer for the dense layer. Defaults to a Glorot uniform initializer. output: The output style for this layer. Can be either 'logits' or 'predictions'. """ def __init__(self, embedding_table, activation=None, initializer='glorot_uniform', output='logits', name=None, **kwargs): super().__init__(name=name, **kwargs) self.embedding_table = embedding_table self.activation = activation self.initializer = tf.keras.initializers.get(initializer) if output not in ('predictions', 'logits'): raise ValueError( ('Unknown `output` value "%s". `output` can be either "logits" or ' '"predictions"') % output) self._output_type = output def build(self, input_shape): self._vocab_size, hidden_size = self.embedding_table.shape self.dense = tf.keras.layers.Dense( hidden_size, activation=self.activation, kernel_initializer=self.initializer, name='transform/dense') self.layer_norm = tf.keras.layers.LayerNormalization( axis=-1, epsilon=1e-12, name='transform/LayerNorm') self.bias = self.add_weight( 'output_bias/bias', shape=(self._vocab_size,), initializer='zeros', trainable=True) super().build(input_shape) def call(self, sequence_data, masked_positions): masked_lm_input = self._gather_indexes(sequence_data, masked_positions) lm_data = self.dense(masked_lm_input) lm_data = self.layer_norm(lm_data) lm_data = tf.matmul(lm_data, self.embedding_table, transpose_b=True) logits = tf.nn.bias_add(lm_data, self.bias) masked_positions_length = masked_positions.shape.as_list()[1] or tf.shape( masked_positions)[1] logits = tf.reshape(logits, [-1, masked_positions_length, self._vocab_size]) if self._output_type == 'logits': return logits return tf.nn.log_softmax(logits) def get_config(self): raise NotImplementedError('MaskedLM cannot be directly serialized because ' 'it has variable sharing logic.') def _gather_indexes(self, sequence_tensor, positions): """Gathers the vectors at the specific positions, for performance. Args: sequence_tensor: Sequence output of shape (`batch_size`, `seq_length`, num_hidden) where num_hidden is number of hidden units. positions: Positions ids of tokens in sequence to mask for pretraining of with dimension (batch_size, num_predictions) where `num_predictions` is maximum number of tokens to mask out and predict per each sequence. Returns: Masked out sequence tensor of shape (batch_size * num_predictions, num_hidden). """ sequence_shape = tf.shape(sequence_tensor) batch_size, seq_length = sequence_shape[0], sequence_shape[1] width = sequence_tensor.shape.as_list()[2] or sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape( positions + tf.cast(flat_offsets, positions.dtype), [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor
4,798
37.392
80
py
models
models-master/official/nlp/modeling/layers/attention_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the attention layer.""" import numpy as np import tensorflow as tf from official.nlp.modeling.layers import attention def _create_cache(batch_size, init_decode_length, num_heads, head_size): return { "key": tf.zeros([batch_size, init_decode_length, num_heads, head_size], dtype=tf.float32), "value": tf.zeros([batch_size, init_decode_length, num_heads, head_size], dtype=tf.float32) } class CachedAttentionTest(tf.test.TestCase): def test_masked_attention(self): """Test with a mask tensor.""" num_heads, head_size = 2, 2 # Create a 3-dimensional input (the first dimension is implicit). from_seq_length = 4 batch_size = 3 # GPU/CPU case. init_decode_length = 0 # Directly tests the keras layer. cache = _create_cache(batch_size, init_decode_length, num_heads, head_size) layer = attention.CachedAttention(num_heads=num_heads, key_dim=head_size) # Generate data for the input (non-mask) tensors. from_data = tf.zeros((batch_size, from_seq_length, 8), dtype=np.float32) # Invoke the data with a random set of mask data. This should mask at least # one element. mask_data = np.random.randint( 2, size=(batch_size, from_seq_length, from_seq_length)) masked_output_data, cache = layer( query=from_data, value=from_data, attention_mask=mask_data, cache=cache) self.assertEqual(masked_output_data.shape, (3, 4, 8)) self.assertEqual(cache["value"].shape, (3, 4, 2, 2)) # Tests inputs without cache. masked_output_data, cache = layer( query=from_data, value=from_data, attention_mask=mask_data) self.assertEqual(masked_output_data.shape, (3, 4, 8)) self.assertIsNone(cache) def test_padded_decode(self): """Test with a mask tensor.""" num_heads, head_size = 2, 2 from_seq_length = 4 # TPU decoding should pre-allocate the entire sequence. batch_size = 3 init_decode_length = from_seq_length # Directly tests the keras layer. cache = _create_cache(batch_size, init_decode_length, num_heads, head_size) layer = attention.CachedAttention(num_heads=num_heads, key_dim=head_size) # Generate data for the input (non-mask) tensors. from_data = tf.zeros((batch_size, from_seq_length, 8), dtype=np.float32) decode_loop_step = 2 mask_data = np.random.randint( 2, size=(batch_size, from_seq_length, from_seq_length), dtype=np.int32) # Testing the invocation directly as Keras cannot consume inputs correctly. masked_output_data, cache = layer( query=from_data, value=from_data, attention_mask=mask_data, cache=cache, decode_loop_step=decode_loop_step) self.assertEqual(masked_output_data.shape, (3, 4, 8)) self.assertEqual(cache["value"].shape, (3, 4, 2, 2)) if __name__ == "__main__": tf.test.main()
3,526
36.126316
80
py
models
models-master/official/nlp/modeling/layers/tn_transformer_expand_condense.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TN-BERT TNTransformerExpandCondense employing Expand-Condense layer instead of Dense.""" # pylint: disable=g-classes-have-attributes # Import libraries import gin import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling.layers.tn_expand_condense import TNExpandCondense @tf.keras.utils.register_keras_serializable(package="Text") @gin.configurable class TNTransformerExpandCondense(tf.keras.layers.Layer): """Transformer layer using tensor network Expand-Condense layer. This layer implements the Transformer from transformer.py, with a single tensor network layer replacing the usual intermediate and output Dense layers. Args: num_attention_heads: Number of attention heads. intermediate_size: Size of the intermediate layer. intermediate_activation: Activation for the intermediate layer. dropout_rate: Dropout probability for the post-attention and output dropout. attention_dropout_rate: Dropout probability for within the attention layer. output_range: the sequence output range, [0, output_range) by slicing the target sequence. `None` means the target sequence is not sliced. kernel_initializer: Initializer for dense layer kernels. bias_initializer: Initializer for dense layer biases. kernel_regularizer: Regularizer for dense layer kernels. bias_regularizer: Regularizer for dense layer biases. activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. use_bias: Whether to enable use_bias in attention layer. If set to False, use_bias in attention layer is disabled. norm_first: Whether to normalize inputs to attention and intermediate dense layers. If set False, output of attention and intermediate dense layers is normalized. norm_epsilon: Epsilon value to initialize normalization layers. intermediate_dropout: Dropout probability for intermediate_dropout_layer. attention_initializer: Initializer for kernels of attention layers. If set `None`, attention layers use kernel_initializer as initializer for kernel. """ def __init__(self, num_attention_heads, intermediate_size, intermediate_activation, dropout_rate=0.0, attention_dropout_rate=0.0, output_range=None, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, use_bias=True, norm_first=False, norm_epsilon=1e-12, intermediate_dropout=0.0, attention_initializer=None, **kwargs): super().__init__(**kwargs) self._num_heads = num_attention_heads self._intermediate_size = intermediate_size self._intermediate_activation = intermediate_activation self._attention_dropout_rate = attention_dropout_rate self._dropout_rate = dropout_rate self._output_range = output_range self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) self._bias_initializer = tf.keras.initializers.get(bias_initializer) self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer) self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) self._bias_constraint = tf.keras.constraints.get(bias_constraint) self._use_bias = use_bias self._norm_first = norm_first self._norm_epsilon = norm_epsilon self._intermediate_dropout = intermediate_dropout if attention_initializer: self._attention_initializer = tf.keras.initializers.get( attention_initializer) else: self._attention_initializer = tf_utils.clone_initializer( self._kernel_initializer) def build(self, input_shape): input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape input_tensor_shape = tf.TensorShape(input_tensor) if len(input_tensor_shape.as_list()) != 3: raise ValueError( "TNTransformerExpandCondense expects a three-dimensional input of " "shape [batch, sequence, width].") batch_size, sequence_length, hidden_size = input_tensor_shape if len(input_shape) == 2: mask_tensor_shape = tf.TensorShape(input_shape[1]) expected_mask_tensor_shape = tf.TensorShape( [batch_size, sequence_length, sequence_length]) if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape): raise ValueError( "When passing a mask tensor to TNTransformerExpandCondense, the " "mask tensor must be of shape [batch, " "sequence_length, sequence_length] (here %s). Got a " "mask tensor of shape %s." % (expected_mask_tensor_shape, mask_tensor_shape)) if hidden_size % self._num_heads != 0: raise ValueError( "The input size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, self._num_heads)) self._attention_head_size = int(hidden_size // self._num_heads) common_kwargs = dict( kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint) self._attention_layer = tf.keras.layers.MultiHeadAttention( num_heads=self._num_heads, key_dim=self._attention_head_size, dropout=self._attention_dropout_rate, use_bias=self._use_bias, kernel_initializer=self._attention_initializer, bias_initializer=tf_utils.clone_initializer(self._bias_initializer), name="self_attention", **common_kwargs) self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) # Use float32 in layernorm for numeric stability. # It is probably safe in mixed_float16, but we haven't validated this yet. self._attention_layer_norm = ( tf.keras.layers.LayerNormalization( name="self_attention_layer_norm", axis=-1, epsilon=self._norm_epsilon, dtype=tf.float32)) # Substitute Dense layers with a single Expand-Condense layer. self._output_dense = TNExpandCondense( 4, use_bias=True, activation=self._intermediate_activation, kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer) self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) # Use float32 in layernorm for numeric stability. self._output_layer_norm = tf.keras.layers.LayerNormalization( name="output_layer_norm", axis=-1, epsilon=self._norm_epsilon, dtype=tf.float32) super().build(input_shape) def get_config(self): config = { "num_attention_heads": self._num_heads, "intermediate_size": self._intermediate_size, "intermediate_activation": self._intermediate_activation, "dropout_rate": self._dropout_rate, "attention_dropout_rate": self._attention_dropout_rate, "output_range": self._output_range, "kernel_initializer": tf.keras.initializers.serialize(self._kernel_initializer), "bias_initializer": tf.keras.initializers.serialize(self._bias_initializer), "kernel_regularizer": tf.keras.regularizers.serialize(self._kernel_regularizer), "bias_regularizer": tf.keras.regularizers.serialize(self._bias_regularizer), "activity_regularizer": tf.keras.regularizers.serialize(self._activity_regularizer), "kernel_constraint": tf.keras.constraints.serialize(self._kernel_constraint), "bias_constraint": tf.keras.constraints.serialize(self._bias_constraint), "use_bias": self._use_bias, "norm_first": self._norm_first, "norm_epsilon": self._norm_epsilon, "intermediate_dropout": self._intermediate_dropout, "attention_initializer": tf.keras.initializers.serialize(self._attention_initializer) } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): if isinstance(inputs, (list, tuple)) and len(inputs) == 2: input_tensor, attention_mask = inputs else: input_tensor, attention_mask = (inputs, None) if self._output_range: target_tensor = input_tensor[:, 0:self._output_range, :] attention_mask = attention_mask[:, 0:self._output_range, :] else: if self._norm_first: source_tensor = input_tensor input_tensor = self._attention_layer_norm(input_tensor) target_tensor = input_tensor attention_output = self._attention_layer( query=target_tensor, value=input_tensor, attention_mask=attention_mask) attention_output = self._attention_dropout(attention_output) if self._norm_first: attention_output = source_tensor + attention_output else: attention_output = self._attention_layer_norm(target_tensor + attention_output) if self._norm_first: source_attention_output = attention_output attention_output = self._output_layer_norm(attention_output) layer_output = self._output_dense(attention_output) layer_output = self._output_dropout(layer_output) # During mixed precision training, attention_output is from layer norm and # is always fp32 for now. Cast layer_output to fp32 for the subsequent # add. layer_output = tf.cast(layer_output, tf.float32) if self._norm_first: layer_output = source_attention_output + layer_output else: layer_output = self._output_layer_norm(layer_output + attention_output) return layer_output
11,022
42.058594
91
py
models
models-master/official/nlp/modeling/layers/tn_expand_condense.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ExpandCondense tensor network layer used in TN-BERT.""" # pylint: disable=g-classes-have-attributes from typing import List, Optional, Text, Any, Dict import tensorflow as tf from official.modeling import tf_utils Layer = tf.keras.layers.Layer activations = tf.keras.activations initializers = tf.keras.initializers @tf.keras.utils.register_keras_serializable(package='Text') class TNExpandCondense(Layer): """A TPU-optimized TensorNetwork layer. Designed for use in models that currently use Dense layers to achieve up projection followed by down projection. This layer is a TPU-optimized combination of 3 operations: Expand, Apply Activation, and Condense. The layer projects up from `input_shape[-1]` to `input_shape[-1] * proj_multiplier`, applies `self.activation`, and then condenses back to `input_shape[-1]`. Note the input shape and output shape will be identical. Args: proj_multiplier: Positive integer, multiple of `input_shape[-1]` to project up to. Must be one of `[2, 4, 6, 8]`. use_bias: Boolean, whether the layer uses a bias vector. activation: Activation function to use between Expand and Condense. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). kernel_initializer: Initializer for the weight matrices. bias_initializer: Initializer for the bias vector. Input shape: N-D tensor with shape: `(batch_size, ..., input_shape[-1])`. Output shape: N-D tensor with shape: `(batch_size, ..., input_shape[-1])`. """ def __init__(self, proj_multiplier: int, use_bias: Optional[bool] = True, activation: Optional[Text] = 'relu', kernel_initializer: Optional[Text] = 'glorot_uniform', bias_initializer: Optional[Text] = 'zeros', **kwargs) -> None: # Allow specification of input_dim instead of input_shape, # for compatability with Keras layers that support this if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super().__init__(**kwargs) assert proj_multiplier in [ 2, 4, 6, 8, 10, 12 ], 'proj_multiplier needs to be one of [2, 4, 6, 8, 10, 12]' self.proj_multiplier = proj_multiplier self.use_bias = use_bias self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) def build(self, input_shape: List[int]) -> None: # Disable the attribute-defined-outside-init violations in this function # pylint: disable=attribute-defined-outside-init if input_shape[-1] is None: raise ValueError( 'The last dimension of the inputs to `TNExpandCondense` ' 'should be defined. Found `None`.') super().build(input_shape) self.proj_size = self.proj_multiplier * input_shape[-1] assert (self.proj_size // input_shape[-1]) * input_shape[ -1] == self.proj_size, (f'{self.proj_size} / {input_shape[-1]} must be ' f'round') assert (input_shape[-1] // 128 ) * 128 == input_shape[-1], f'{input_shape[-1]} / 128 must be round' self.w1 = self.add_weight( name='w1', shape=(input_shape[-1], input_shape[-1]), trainable=True, initializer=tf_utils.clone_initializer(self.kernel_initializer)) self.w2 = self.add_weight( name='w2', shape=(128, (128 * (self.proj_size // input_shape[-1]))), trainable=True, initializer=tf_utils.clone_initializer(self.kernel_initializer)) self.w3 = self.add_weight( name='w3', shape=(128 * (self.proj_size // input_shape[-1]), 128), trainable=True, initializer=tf_utils.clone_initializer(self.kernel_initializer)) self.w4 = self.add_weight( name='w4', shape=(input_shape[-1] // 128, 128, input_shape[-1]), trainable=True, initializer=tf_utils.clone_initializer(self.kernel_initializer)) if self.use_bias: self.bias = self.add_weight( name='b', shape=(input_shape[-1] // 128, 1, 128 * (self.proj_size // input_shape[-1])), trainable=True, initializer=self.bias_initializer) else: self.bias = None def call(self, inputs: tf.Tensor, **kwargs): orig_shape = tf.shape(inputs) input_dim = inputs.shape[-1] tmp = tf.reshape(inputs, (-1, input_dim)) # Shape is (BatchSeq, input_dim) # Expansion network tmp = tf.einsum('ab,Qb->aQ', self.w1, tmp) # Note: Letter Q will always represent the BatchSeq axis. tmp = tf.reshape(tmp, (input_dim // 128, 128, -1)) tmp = tf.einsum('abQ,bd->aQd', tmp, self.w2) # Apply activation and then Condense tmp = self.activation(tmp + self.bias) tmp = tf.einsum('aQd,db->aQb', tmp, self.w3) tmp = tf.einsum('aQb,abd->Qd', tmp, self.w4) out = tf.reshape(tmp, orig_shape) return out def compute_output_shape(self, input_shape: List[int]) -> List[int]: return input_shape def get_config(self) -> Dict[Any, Any]: """Returns the config of the layer. The same layer can be reinstantiated later (without its trained weights) from this configuration. Returns: Python dictionary containing the configuration of the layer. """ config = {} # Include the layer-specific arguments args = ['proj_multiplier', 'use_bias'] for arg in args: config[arg] = getattr(self, arg) # Serialize the activation config['activation'] = activations.serialize(getattr(self, 'activation')) # Serialize the initializers decomp_initializers = ['kernel_initializer', 'bias_initializer'] for initializer_arg in decomp_initializers: config[initializer_arg] = initializers.serialize( getattr(self, initializer_arg)) # Get base config base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
6,703
35.63388
80
py
models
models-master/official/nlp/modeling/layers/mobile_bert_layers.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MobileBERT embedding and transformer layers.""" import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling.layers import on_device_embedding from official.nlp.modeling.layers import position_embedding @tf.keras.utils.register_keras_serializable(package='Text') class NoNorm(tf.keras.layers.Layer): """Apply element-wise linear transformation to the last dimension.""" def __init__(self, name=None): super().__init__(name=name) def build(self, shape): kernal_size = shape[-1] self.bias = self.add_weight('beta', shape=[kernal_size], initializer='zeros') self.scale = self.add_weight('gamma', shape=[kernal_size], initializer='ones') def call(self, feature): output = feature * self.scale + self.bias return output def _get_norm_layer(normalization_type='no_norm', name=None): """Get normlization layer. Args: normalization_type: String. The type of normalization_type, only `no_norm` and `layer_norm` are supported. name: Name for the norm layer. Returns: layer norm class. """ if normalization_type == 'no_norm': layer = NoNorm(name=name) elif normalization_type == 'layer_norm': layer = tf.keras.layers.LayerNormalization( name=name, axis=-1, epsilon=1e-12, dtype=tf.float32) else: raise NotImplementedError('Only "no_norm" and "layer_norm" and supported.') return layer @tf.keras.utils.register_keras_serializable(package='Text') class MobileBertEmbedding(tf.keras.layers.Layer): """Performs an embedding lookup for MobileBERT. This layer includes word embedding, token type embedding, position embedding. """ def __init__(self, word_vocab_size, word_embed_size, type_vocab_size, output_embed_size, max_sequence_length=512, normalization_type='no_norm', initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), dropout_rate=0.1, **kwargs): """Class initialization. Args: word_vocab_size: Number of words in the vocabulary. word_embed_size: Word embedding size. type_vocab_size: Number of word types. output_embed_size: Embedding size for the final embedding output. max_sequence_length: Maximum length of input sequence. normalization_type: String. The type of normalization_type, only `no_norm` and `layer_norm` are supported. initializer: The initializer to use for the embedding weights and linear projection weights. dropout_rate: Dropout rate. **kwargs: keyword arguments. """ super().__init__(**kwargs) self.word_vocab_size = word_vocab_size self.word_embed_size = word_embed_size self.type_vocab_size = type_vocab_size self.output_embed_size = output_embed_size self.max_sequence_length = max_sequence_length self.normalization_type = normalization_type self.initializer = tf.keras.initializers.get(initializer) self.dropout_rate = dropout_rate self.word_embedding = on_device_embedding.OnDeviceEmbedding( self.word_vocab_size, self.word_embed_size, initializer=tf_utils.clone_initializer(self.initializer), name='word_embedding') self.type_embedding = on_device_embedding.OnDeviceEmbedding( self.type_vocab_size, self.output_embed_size, initializer=tf_utils.clone_initializer(self.initializer), name='type_embedding') self.pos_embedding = position_embedding.PositionEmbedding( max_length=max_sequence_length, initializer=tf_utils.clone_initializer(self.initializer), name='position_embedding') self.word_embedding_proj = tf.keras.layers.EinsumDense( 'abc,cd->abd', output_shape=[None, self.output_embed_size], kernel_initializer=tf_utils.clone_initializer(self.initializer), bias_axes='d', name='embedding_projection') self.layer_norm = _get_norm_layer(normalization_type, 'embedding_norm') self.dropout_layer = tf.keras.layers.Dropout( self.dropout_rate, name='embedding_dropout') def get_config(self): config = { 'word_vocab_size': self.word_vocab_size, 'word_embed_size': self.word_embed_size, 'type_vocab_size': self.type_vocab_size, 'output_embed_size': self.output_embed_size, 'max_sequence_length': self.max_sequence_length, 'normalization_type': self.normalization_type, 'initializer': tf.keras.initializers.serialize(self.initializer), 'dropout_rate': self.dropout_rate } base_config = super(MobileBertEmbedding, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, input_ids, token_type_ids=None): word_embedding_out = self.word_embedding(input_ids) word_embedding_out = tf.concat( [tf.pad(word_embedding_out[:, 1:], ((0, 0), (0, 1), (0, 0))), word_embedding_out, tf.pad(word_embedding_out[:, :-1], ((0, 0), (1, 0), (0, 0)))], axis=2) word_embedding_out = self.word_embedding_proj(word_embedding_out) pos_embedding_out = self.pos_embedding(word_embedding_out) embedding_out = word_embedding_out + pos_embedding_out if token_type_ids is not None: type_embedding_out = self.type_embedding(token_type_ids) embedding_out += type_embedding_out embedding_out = self.layer_norm(embedding_out) embedding_out = self.dropout_layer(embedding_out) return embedding_out @tf.keras.utils.register_keras_serializable(package='Text') class MobileBertTransformer(tf.keras.layers.Layer): """Transformer block for MobileBERT. An implementation of one layer (block) of Transformer with bottleneck and inverted-bottleneck for MobilerBERT. Original paper for MobileBERT: https://arxiv.org/pdf/2004.02984.pdf """ def __init__(self, hidden_size=512, num_attention_heads=4, intermediate_size=512, intermediate_act_fn='relu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, intra_bottleneck_size=128, use_bottleneck_attention=False, key_query_shared_bottleneck=True, num_feedforward_networks=4, normalization_type='no_norm', initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), **kwargs): """Class initialization. Args: hidden_size: Hidden size for the Transformer input and output tensor. num_attention_heads: Number of attention heads in the Transformer. intermediate_size: The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: Dropout probability for the hidden layers. attention_probs_dropout_prob: Dropout probability of the attention probabilities. intra_bottleneck_size: Size of bottleneck. use_bottleneck_attention: Use attention inputs from the bottleneck transformation. If true, the following `key_query_shared_bottleneck` will be ignored. key_query_shared_bottleneck: Whether to share linear transformation for keys and queries. num_feedforward_networks: Number of stacked feed-forward networks. normalization_type: The type of normalization_type, only `no_norm` and `layer_norm` are supported. `no_norm` represents the element-wise linear transformation for the student model, as suggested by the original MobileBERT paper. `layer_norm` is used for the teacher model. initializer: The initializer to use for the embedding weights and linear projection weights. **kwargs: keyword arguments. Raises: ValueError: A Tensor shape or parameter is invalid. """ super().__init__(**kwargs) self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.intermediate_act_fn = intermediate_act_fn self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.intra_bottleneck_size = intra_bottleneck_size self.use_bottleneck_attention = use_bottleneck_attention self.key_query_shared_bottleneck = key_query_shared_bottleneck self.num_feedforward_networks = num_feedforward_networks self.normalization_type = normalization_type self.initializer = tf.keras.initializers.get(initializer) if intra_bottleneck_size % num_attention_heads != 0: raise ValueError( (f'The bottleneck size {intra_bottleneck_size} is not a multiple ' f'of the number of attention heads {num_attention_heads}.')) attention_head_size = int(intra_bottleneck_size / num_attention_heads) self.block_layers = {} # add input bottleneck dense_layer_2d = tf.keras.layers.EinsumDense( 'abc,cd->abd', output_shape=[None, self.intra_bottleneck_size], bias_axes='d', kernel_initializer=tf_utils.clone_initializer(self.initializer), name='bottleneck_input/dense') layer_norm = _get_norm_layer(self.normalization_type, name='bottleneck_input/norm') self.block_layers['bottleneck_input'] = [dense_layer_2d, layer_norm] if self.key_query_shared_bottleneck: dense_layer_2d = tf.keras.layers.EinsumDense( 'abc,cd->abd', output_shape=[None, self.intra_bottleneck_size], bias_axes='d', kernel_initializer=tf_utils.clone_initializer(self.initializer), name='kq_shared_bottleneck/dense') layer_norm = _get_norm_layer(self.normalization_type, name='kq_shared_bottleneck/norm') self.block_layers['kq_shared_bottleneck'] = [dense_layer_2d, layer_norm] # add attention layer attention_layer = tf.keras.layers.MultiHeadAttention( num_heads=self.num_attention_heads, key_dim=attention_head_size, value_dim=attention_head_size, dropout=self.attention_probs_dropout_prob, output_shape=self.intra_bottleneck_size, kernel_initializer=tf_utils.clone_initializer(self.initializer), name='attention') layer_norm = _get_norm_layer(self.normalization_type, name='attention/norm') self.block_layers['attention'] = [attention_layer, layer_norm] # add stacked feed-forward networks self.block_layers['ffn'] = [] for ffn_layer_idx in range(self.num_feedforward_networks): layer_prefix = f'ffn_layer_{ffn_layer_idx}' layer_name = layer_prefix + '/intermediate_dense' intermediate_layer = tf.keras.layers.EinsumDense( 'abc,cd->abd', activation=self.intermediate_act_fn, output_shape=[None, self.intermediate_size], bias_axes='d', kernel_initializer=tf_utils.clone_initializer(self.initializer), name=layer_name) layer_name = layer_prefix + '/output_dense' output_layer = tf.keras.layers.EinsumDense( 'abc,cd->abd', output_shape=[None, self.intra_bottleneck_size], bias_axes='d', kernel_initializer=tf_utils.clone_initializer(self.initializer), name=layer_name) layer_name = layer_prefix + '/norm' layer_norm = _get_norm_layer(self.normalization_type, name=layer_name) self.block_layers['ffn'].append([intermediate_layer, output_layer, layer_norm]) # add output bottleneck bottleneck = tf.keras.layers.EinsumDense( 'abc,cd->abd', output_shape=[None, self.hidden_size], activation=None, bias_axes='d', kernel_initializer=tf_utils.clone_initializer(self.initializer), name='bottleneck_output/dense') dropout_layer = tf.keras.layers.Dropout( self.hidden_dropout_prob, name='bottleneck_output/dropout') layer_norm = _get_norm_layer(self.normalization_type, name='bottleneck_output/norm') self.block_layers['bottleneck_output'] = [bottleneck, dropout_layer, layer_norm] def get_config(self): config = { 'hidden_size': self.hidden_size, 'num_attention_heads': self.num_attention_heads, 'intermediate_size': self.intermediate_size, 'intermediate_act_fn': self.intermediate_act_fn, 'hidden_dropout_prob': self.hidden_dropout_prob, 'attention_probs_dropout_prob': self.attention_probs_dropout_prob, 'intra_bottleneck_size': self.intra_bottleneck_size, 'use_bottleneck_attention': self.use_bottleneck_attention, 'key_query_shared_bottleneck': self.key_query_shared_bottleneck, 'num_feedforward_networks': self.num_feedforward_networks, 'normalization_type': self.normalization_type, 'initializer': tf.keras.initializers.serialize(self.initializer), } base_config = super(MobileBertTransformer, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, input_tensor, attention_mask=None, return_attention_scores=False): """Implementes the forward pass. Args: input_tensor: Float tensor of shape `(batch_size, seq_length, hidden_size)`. attention_mask: (optional) int32 tensor of shape `(batch_size, seq_length, seq_length)`, with 1 for positions that can be attended to and 0 in positions that should not be. return_attention_scores: If return attention score. Returns: layer_output: Float tensor of shape `(batch_size, seq_length, hidden_size)`. attention_scores (Optional): Only when return_attention_scores is True. Raises: ValueError: A Tensor shape or parameter is invalid. """ input_width = input_tensor.shape.as_list()[-1] if input_width != self.hidden_size: raise ValueError( (f'The width of the input tensor {input_width} != ' f'hidden size {self.hidden_size}')) prev_output = input_tensor # input bottleneck dense_layer = self.block_layers['bottleneck_input'][0] layer_norm = self.block_layers['bottleneck_input'][1] layer_input = dense_layer(prev_output) layer_input = layer_norm(layer_input) if self.use_bottleneck_attention: key_tensor = layer_input query_tensor = layer_input value_tensor = layer_input elif self.key_query_shared_bottleneck: dense_layer = self.block_layers['kq_shared_bottleneck'][0] layer_norm = self.block_layers['kq_shared_bottleneck'][1] shared_attention_input = dense_layer(prev_output) shared_attention_input = layer_norm(shared_attention_input) key_tensor = shared_attention_input query_tensor = shared_attention_input value_tensor = prev_output else: key_tensor = prev_output query_tensor = prev_output value_tensor = prev_output # attention layer attention_layer = self.block_layers['attention'][0] layer_norm = self.block_layers['attention'][1] attention_output, attention_scores = attention_layer( query_tensor, value_tensor, key_tensor, attention_mask, return_attention_scores=True, ) attention_output = layer_norm(attention_output + layer_input) # stacked feed-forward networks layer_input = attention_output for ffn_idx in range(self.num_feedforward_networks): intermediate_layer = self.block_layers['ffn'][ffn_idx][0] output_layer = self.block_layers['ffn'][ffn_idx][1] layer_norm = self.block_layers['ffn'][ffn_idx][2] intermediate_output = intermediate_layer(layer_input) layer_output = output_layer(intermediate_output) layer_output = layer_norm(layer_output + layer_input) layer_input = layer_output # output bottleneck bottleneck = self.block_layers['bottleneck_output'][0] dropout_layer = self.block_layers['bottleneck_output'][1] layer_norm = self.block_layers['bottleneck_output'][2] layer_output = bottleneck(layer_output) layer_output = dropout_layer(layer_output) layer_output = layer_norm(layer_output + prev_output) if return_attention_scores: return layer_output, attention_scores else: return layer_output @tf.keras.utils.register_keras_serializable(package='Text') class MobileBertMaskedLM(tf.keras.layers.Layer): """Masked language model network head for BERT modeling. This layer implements a masked language model based on the provided transformer based encoder. It assumes that the encoder network being passed has a "get_embedding_table()" method. Different from canonical BERT's masked LM layer, when the embedding width is smaller than hidden_size, it adds an extra output weights in shape [vocab_size, (hidden_size - embedding_width)]. """ def __init__(self, embedding_table, activation=None, initializer='glorot_uniform', output='logits', output_weights_use_proj=False, **kwargs): """Class initialization. Args: embedding_table: The embedding table from encoder network. activation: The activation, if any, for the dense layer. initializer: The initializer for the dense layer. Defaults to a Glorot uniform initializer. output: The output style for this layer. Can be either `logits` or `predictions`. output_weights_use_proj: Use projection instead of concating extra output weights, this may reduce the MLM task accuracy but will reduce the model params as well. **kwargs: keyword arguments. """ super().__init__(**kwargs) self.embedding_table = embedding_table self.activation = activation self.initializer = tf.keras.initializers.get(initializer) if output not in ('predictions', 'logits'): raise ValueError( ('Unknown `output` value "%s". `output` can be either "logits" or ' '"predictions"') % output) self._output_type = output self._output_weights_use_proj = output_weights_use_proj def build(self, input_shape): self._vocab_size, embedding_width = self.embedding_table.shape hidden_size = input_shape[-1] self.dense = tf.keras.layers.Dense( hidden_size, activation=self.activation, kernel_initializer=tf_utils.clone_initializer(self.initializer), name='transform/dense') if hidden_size > embedding_width: if self._output_weights_use_proj: self.extra_output_weights = self.add_weight( 'output_weights_proj', shape=(embedding_width, hidden_size), initializer=tf_utils.clone_initializer(self.initializer), trainable=True) else: self.extra_output_weights = self.add_weight( 'extra_output_weights', shape=(self._vocab_size, hidden_size - embedding_width), initializer=tf_utils.clone_initializer(self.initializer), trainable=True) elif hidden_size == embedding_width: self.extra_output_weights = None else: raise ValueError( 'hidden size %d cannot be smaller than embedding width %d.' % (hidden_size, embedding_width)) self.layer_norm = tf.keras.layers.LayerNormalization( axis=-1, epsilon=1e-12, name='transform/LayerNorm') self.bias = self.add_weight( 'output_bias/bias', shape=(self._vocab_size,), initializer='zeros', trainable=True) super(MobileBertMaskedLM, self).build(input_shape) def call(self, sequence_data, masked_positions): masked_lm_input = self._gather_indexes(sequence_data, masked_positions) lm_data = self.dense(masked_lm_input) lm_data = self.layer_norm(lm_data) if self.extra_output_weights is None: lm_data = tf.matmul(lm_data, self.embedding_table, transpose_b=True) else: if self._output_weights_use_proj: lm_data = tf.matmul( lm_data, self.extra_output_weights, transpose_b=True) lm_data = tf.matmul(lm_data, self.embedding_table, transpose_b=True) else: lm_data = tf.matmul( lm_data, tf.concat([self.embedding_table, self.extra_output_weights], axis=1), transpose_b=True) logits = tf.nn.bias_add(lm_data, self.bias) masked_positions_length = masked_positions.shape.as_list()[1] or tf.shape( masked_positions)[1] logits = tf.reshape(logits, [-1, masked_positions_length, self._vocab_size]) if self._output_type == 'logits': return logits return tf.nn.log_softmax(logits) def get_config(self): raise NotImplementedError('MaskedLM cannot be directly serialized because ' 'it has variable sharing logic.') def _gather_indexes(self, sequence_tensor, positions): """Gathers the vectors at the specific positions. Args: sequence_tensor: Sequence output of `BertModel` layer of shape `(batch_size, seq_length, num_hidden)` where `num_hidden` is number of hidden units of `BertModel` layer. positions: Positions ids of tokens in sequence to mask for pretraining of with dimension `(batch_size, num_predictions)` where `num_predictions` is maximum number of tokens to mask out and predict per each sequence. Returns: Masked out sequence tensor of shape `(batch_size * num_predictions, num_hidden)`. """ sequence_shape = tf.shape(sequence_tensor) batch_size, seq_length = sequence_shape[0], sequence_shape[1] width = sequence_tensor.shape.as_list()[2] or sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor
23,471
39.75
80
py
models
models-master/official/nlp/modeling/layers/transformer_scaffold.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras-based transformer scaffold layer.""" # pylint: disable=g-classes-have-attributes from absl import logging import gin import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling.layers import attention from official.nlp.modeling.layers import util @tf.keras.utils.register_keras_serializable(package="Text") @gin.configurable class TransformerScaffold(tf.keras.layers.Layer): """Transformer scaffold layer. This layer implements the Transformer from "Attention Is All You Need". (https://arxiv.org/abs/1706.03762), with a customizable attention layer and feedforward layer option. Users can pass a class to `attention_cls`/`feedforward_cls` and associated config to `attention_cfg`/`feedforward_cfg`, in which case the scaffold will instantiate the class with the config, or pass a class instance to `attention_cls`/`feedforward_cls`. Args: num_attention_heads: Number of attention heads. inner_dim: The output dimension of the first Dense layer in a two-layer feedforward network. inner_activation: The activation for the first Dense layer in a two-layer feedforward network. attention_cls: A class to instantiate attention layer, or a layer instance. attention_cfg: The config with which to instantiate `attention_cls`. Ignored if attention_cls is a layer instance or None. If `attention_cls` is a class, but `attention_cfg` is None, following kwargs will be used to instantiate the attention instance: { "num_heads": num_attention_heads, "key_dim": int(hidden_size // num_attention_heads), "dropout": attention_dropout_rate, "name": "self_attention" }, where `hidden_size` is the input tensor's last dimension. feedforward_cls: A class to instantiate feedforward layer, or a layer instance. If None, will use the standard feedforward layer as described in "Attention Is All You Need" paper. If not None, the instantiated feedforward layer is expected to take the output of attention as input and its output is this transformer layer's output. feedforward_cfg: The config with which to instantiate `feedforward_cls`. Ignored if feedforward_cls is a layer instance or is None. If `feedforward_cls` is a class, but `feedforward_cfg` is None, following kwargs will be used to instantiate the feedforward instance: { "inner_dim": inner_dim, "inner_activation": inner_activation, "dropout": dropout_rate, "name": "feedforward" }. dropout_rate: Dropout probability for the post-attention and output dropout. attention_dropout_rate: Dropout probability for within the attention layer. norm_first: Whether to normalize inputs to attention and intermediate dense layers. If set False, output of attention and intermediate dense layers is normalized. norm_epsilon: Epsilon value to initialize normalization layers. kernel_initializer: Initializer for dense layer kernels. bias_initializer: Initializer for dense layer biases. kernel_regularizer: Regularizer for dense layer kernels. bias_regularizer: Regularizer for dense layer biases. activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. """ def __init__(self, num_attention_heads, inner_dim=768, inner_activation=tf_utils.get_activation("gelu"), attention_cls=attention.MultiHeadAttention, attention_cfg=None, feedforward_cls=None, feedforward_cfg=None, dropout_rate=0.0, attention_dropout_rate=0.0, norm_first=False, norm_epsilon=1e-12, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): inner_dim = kwargs.pop("intermediate_size", inner_dim) inner_activation = kwargs.pop("inner_activation", inner_activation) util.filter_kwargs(kwargs) super().__init__(**kwargs) self._attention_cfg = attention_cfg self._attention_cls = attention_cls self._feedforward_cls = feedforward_cls self._feedforward_cfg = feedforward_cfg self._norm_first = norm_first self._norm_epsilon = norm_epsilon self._num_heads = num_attention_heads self._inner_dim = inner_dim self._inner_activation = inner_activation self._attention_dropout_rate = attention_dropout_rate self._dropout_rate = dropout_rate self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) self._bias_initializer = tf.keras.initializers.get(bias_initializer) self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) self._bias_constraint = tf.keras.constraints.get(bias_constraint) def build(self, input_shape): if isinstance(input_shape, tf.TensorShape): input_tensor_shape = input_shape elif isinstance(input_shape, (list, tuple)): input_tensor_shape = tf.TensorShape(input_shape[0]) else: raise ValueError( "The type of input shape argument is not supported, got: %s" % type(input_shape)) if len(input_tensor_shape.as_list()) != 3: raise ValueError( "TransformerScaffold expects a three-dimensional input of " "shape [batch, sequence, width].") hidden_size = input_tensor_shape[-1] if hidden_size % self._num_heads != 0: raise ValueError( "The input size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, self._num_heads)) self._attention_head_size = int(hidden_size // self._num_heads) common_kwargs = dict( kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint) def get_layer_instance(instance_or_cls, config, default_config): if isinstance(instance_or_cls, tf.keras.layers.Layer): return instance_or_cls elif isinstance(instance_or_cls, dict): return get_layer_instance( tf.keras.utils.deserialize_keras_object(instance_or_cls), config, default_config, ) else: if config is None: return instance_or_cls(**default_config) else: return instance_or_cls(**config) default_attention_cfg = { "kernel_initializer": tf_utils.clone_initializer( self._kernel_initializer), "bias_initializer": tf_utils.clone_initializer(self._bias_initializer), "num_heads": self._num_heads, "key_dim": self._attention_head_size, "dropout": self._attention_dropout_rate, "name": "self_attention" } default_attention_cfg.update(common_kwargs) self._attention_layer = get_layer_instance( self._attention_cls, config=self._attention_cfg, default_config=default_attention_cfg) if self._feedforward_cls is not None: default_feedforward_cfg = { "kernel_initializer": tf_utils.clone_initializer( self._kernel_initializer), "bias_initializer": tf_utils.clone_initializer( self._bias_initializer), "inner_dim": self._inner_dim, "inner_activation": self._inner_activation, # TODO(hongkuny): try to update all ffn block args. "intermediate_size": self._inner_dim, "intermediate_activation": self._inner_activation, "dropout": self._dropout_rate, "name": "feedforward", } default_feedforward_cfg.update(common_kwargs) self._feedforward_block = get_layer_instance( self._feedforward_cls, config=self._feedforward_cfg, default_config=default_feedforward_cfg) else: self._feedforward_block = None # self._dropout_rate controls dropout rates at two places: # after attention, and after FFN. self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) # Use float32 in layernorm for numeric stability. # It is probably safe in mixed_float16, but we haven't validated this yet. self._attention_layer_norm = ( tf.keras.layers.LayerNormalization( name="self_attention_layer_norm", axis=-1, epsilon=self._norm_epsilon, dtype=tf.float32)) if self._feedforward_block is None: self._intermediate_dense = tf.keras.layers.EinsumDense( "abc,cd->abd", output_shape=(None, self._inner_dim), bias_axes="d", name="intermediate", kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), bias_initializer=tf_utils.clone_initializer(self._bias_initializer), **common_kwargs) policy = tf.keras.mixed_precision.global_policy() if policy.name == "mixed_bfloat16": # bfloat16 causes BERT with the LAMB optimizer to not converge # as well, so we use float32. # TODO(b/154538392): Investigate this. policy = tf.float32 self._intermediate_activation_layer = tf.keras.layers.Activation( self._inner_activation, dtype=policy) self._output_dense = tf.keras.layers.EinsumDense( "abc,cd->abd", output_shape=(None, hidden_size), bias_axes="d", name="output", kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), bias_initializer=tf_utils.clone_initializer(self._bias_initializer), **common_kwargs) self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) # Use float32 in layernorm for numeric stability. self._output_layer_norm = tf.keras.layers.LayerNormalization( name="output_layer_norm", axis=-1, epsilon=self._norm_epsilon, dtype=tf.float32) super().build(input_shape) logging.info("%s configs: %s", self.__class__.__name__, self.get_config()) def get_config(self): config = { "attention_cls": self._attention_layer, "feedforward_cls": self._feedforward_block, "num_attention_heads": self._num_heads, "inner_dim": self._inner_dim, "inner_activation": self._inner_activation, "dropout_rate": self._dropout_rate, "attention_dropout_rate": self._attention_dropout_rate, "norm_first": self._norm_first, "norm_epsilon": self._norm_epsilon, "kernel_initializer": tf_utils.serialize_initializer( self._kernel_initializer, use_legacy_format=True ), "bias_initializer": tf_utils.serialize_initializer( self._bias_initializer, use_legacy_format=True ), "kernel_regularizer": tf_utils.serialize_regularizer( self._kernel_regularizer, use_legacy_format=True ), "bias_regularizer": tf_utils.serialize_regularizer( self._bias_regularizer, use_legacy_format=True ), "activity_regularizer": tf_utils.serialize_regularizer( self._activity_regularizer, use_legacy_format=True ), "kernel_constraint": tf_utils.serialize_constraint( self._kernel_constraint, use_legacy_format=True ), "bias_constraint": tf_utils.serialize_constraint( self._bias_constraint, use_legacy_format=True ), } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs, training=None): if isinstance(inputs, (list, tuple)): if len(inputs) == 2: input_tensor, attention_mask = inputs key_value = None elif len(inputs) == 3: input_tensor, key_value, attention_mask = inputs else: raise ValueError("Unexpected inputs to %s with length at %d" % (self.__class__, len(inputs))) else: input_tensor, key_value, attention_mask = (inputs, None, None) if key_value is None: key_value = input_tensor if self._norm_first: source_tensor = input_tensor input_tensor = self._attention_layer_norm(input_tensor, training=training) attention_output = self._attention_layer( query=input_tensor, value=key_value, attention_mask=attention_mask, training=training) attention_output = self._attention_dropout(attention_output, training=training) if self._norm_first: attention_output = source_tensor + attention_output else: attention_output = self._attention_layer_norm(input_tensor + attention_output, training=training) if self._norm_first: source_attention_output = attention_output attention_output = self._output_layer_norm(attention_output, training=training) if self._feedforward_block is None: intermediate_output = self._intermediate_dense(attention_output) intermediate_output = self._intermediate_activation_layer( intermediate_output) layer_output = self._output_dense(intermediate_output, training=training) layer_output = self._output_dropout(layer_output, training=training) # During mixed precision training, attention_output is from layer norm # and is always fp32 for now. Cast layer_output to fp32 for the subsequent # add. layer_output = tf.cast(layer_output, tf.float32) if self._norm_first: layer_output = source_attention_output + layer_output else: layer_output = self._output_layer_norm(layer_output + attention_output, training=training) else: if self._norm_first: # if norm_first, assume the feedforward block will not apply layer norm layer_output = self._feedforward_block(attention_output, training=training) layer_output += source_attention_output else: # Attention: if not norm_first, assume that the feedforwad does apply # layer norm. The feedford also apply residual connection. Please # read the `GatedFeedforward` as a concrete example. layer_output = self._feedforward_block(attention_output, training=training) return layer_output
15,694
42.597222
80
py
models
models-master/official/nlp/modeling/layers/tn_transformer_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for TN-BERT transformer.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.modeling.layers.tn_transformer_expand_condense import TNTransformerExpandCondense @parameterized.named_parameters(('tn', TNTransformerExpandCondense)) class TransformerLayerTest(tf.test.TestCase, parameterized.TestCase): def tearDown(self): super(TransformerLayerTest, self).tearDown() tf.keras.mixed_precision.set_global_policy('float32') def test_layer_creation(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=16, intermediate_size=2048, intermediate_activation='relu') sequence_length = 21 width = 256 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output_tensor = test_layer(data_tensor) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) def test_layer_creation_with_mask(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=16, intermediate_size=2048, intermediate_activation='relu') sequence_length = 21 width = 256 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) def test_layer_creation_with_incorrect_mask_fails(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=16, intermediate_size=2048, intermediate_activation='relu') sequence_length = 21 width = 256 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length - 3)) with self.assertRaisesRegex(ValueError, 'When passing a mask tensor.*'): _ = test_layer([data_tensor, mask_tensor]) def test_layer_invocation(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=16, intermediate_size=2048, intermediate_activation='relu') sequence_length = 21 width = 256 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output_tensor = test_layer(data_tensor) # Create a model from the test layer. model = tf.keras.Model(data_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = 16 * np.random.random_sample( (batch_size, sequence_length, width)) _ = model.predict(input_data) def test_layer_invocation_with_mask(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=16, intermediate_size=2048, intermediate_activation='relu') sequence_length = 21 width = 256 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = 16 * np.random.random_sample( (batch_size, sequence_length, width)) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) _ = model.predict([input_data, mask_data]) def test_layer_output_range(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=16, intermediate_size=2048, intermediate_activation='relu') sequence_length = 21 width = 256 batch_size = 6 input_data = 16 * np.random.random_sample( (batch_size, sequence_length, width)) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) output_tensor = test_layer([input_data, mask_data]) # The layer only attends to the first token and outputs the first token # embeeding. new_layer = transformer_cls( num_attention_heads=16, intermediate_size=2048, intermediate_activation='relu', output_range=1) _ = new_layer([input_data, mask_data]) new_layer.set_weights(test_layer.get_weights()) new_output_tensor = new_layer([input_data, mask_data]) self.assertAllClose( new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003) def test_layer_invocation_with_float16_dtype(self, transformer_cls): tf.keras.mixed_precision.set_global_policy('mixed_float16') test_layer = transformer_cls( num_attention_heads=16, intermediate_size=2048, intermediate_activation='relu') sequence_length = 21 width = 256 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = (16 * np.random.random_sample( (batch_size, sequence_length, width))) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) _ = model.predict([input_data, mask_data]) def test_transform_with_initializer(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=16, intermediate_size=2048, intermediate_activation='relu', kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) sequence_length = 21 width = 256 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output = test_layer(data_tensor) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list()) def test_dynamic_layer_sequence(self, transformer_cls): test_layer = transformer_cls( num_attention_heads=16, intermediate_size=2048, intermediate_activation='relu', kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) # Create a 3-dimensional input (the first dimension is implicit). width = 256 input_tensor = tf.keras.Input(shape=(None, width)) output_tensor = test_layer(input_tensor) model = tf.keras.Model(input_tensor, output_tensor) input_length = 17 input_data = np.ones((1, input_length, width)) output_data = model.predict(input_data) self.assertAllEqual([1, input_length, width], output_data.shape) if __name__ == '__main__': tf.test.main()
8,883
41.104265
99
py
models
models-master/official/nlp/modeling/layers/on_device_embedding.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras-based one-hot embedding layer.""" # pylint: disable=g-classes-have-attributes import tensorflow as tf @tf.keras.utils.register_keras_serializable(package="Text") class OnDeviceEmbedding(tf.keras.layers.Layer): """Performs an embedding lookup suitable for accelerator devices. This layer uses either tf.gather or tf.one_hot to translate integer indices to float embeddings. Args: vocab_size: Number of elements in the vocabulary. embedding_width: Output size of the embedding layer. initializer: The initializer to use for the embedding weights. Defaults to "glorot_uniform". use_one_hot: Whether to use tf.one_hot over tf.gather for the embedding lookup. Defaults to False (that is, using tf.gather). Setting this option to True may improve performance, especially on small vocabulary sizes, but will generally require more memory. scale_factor: Whether to scale the output embeddings. Defaults to None (that is, not to scale). Setting this option to a float will let values in output embeddings multiplied by scale_factor. weight_fallback_dtype: When keras mix precision inferred wrong dtype for varibales, `weight_fallback_dtype` will be used to define the dtype of weights. """ def __init__(self, vocab_size, embedding_width, initializer="glorot_uniform", use_one_hot=False, scale_factor=None, weight_fallback_dtype=tf.float32, **kwargs): super().__init__(**kwargs) self._vocab_size = vocab_size self._embedding_width = embedding_width self._initializer = initializer self._use_one_hot = use_one_hot self._scale_factor = scale_factor # Backup control of the weight dtype because Keras mix precision sometimes # depends on the input to infer the compute dtype, but the inputs of # this layer are int type. self._weight_fallback_dtype = weight_fallback_dtype def get_config(self): config = { "vocab_size": self._vocab_size, "embedding_width": self._embedding_width, "initializer": self._initializer, "use_one_hot": self._use_one_hot, "scale_factor": self._scale_factor, "weight_fallback_dtype": self._weight_fallback_dtype, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape): if ( self.dtype is not None and not tf.dtypes.as_dtype(self.dtype).is_floating ): # Keras failed to infer the right dtype. dtype = self._weight_fallback_dtype else: dtype = self.dtype self.embeddings = self.add_weight( "embeddings", shape=[self._vocab_size, self._embedding_width], initializer=self._initializer, dtype=dtype) super().build(input_shape) def call(self, inputs): flat_inputs = tf.reshape(inputs, [-1]) if self._use_one_hot: dtype = self.compute_dtype if not tf.dtypes.as_dtype(dtype).is_floating: # TensorFlow 1 compatibility. In TF1, self.compute_dtype is int32 # instead of a floating-point dtype, as the dtype is inferred from the # dtype of the inputs dtype = self._weight_fallback_dtype one_hot_data = tf.one_hot( flat_inputs, depth=self._vocab_size, dtype=dtype) embeddings = tf.matmul(one_hot_data, self.embeddings) else: embeddings = tf.gather(self.embeddings, flat_inputs) embeddings = tf.reshape( embeddings, tf.concat([tf.shape(inputs), [self._embedding_width]], axis=0)) embeddings.set_shape(inputs.shape.as_list() + [self._embedding_width]) if self._scale_factor: embeddings *= self._scale_factor return embeddings @property def vocab_size(self): return self._vocab_size @property def embedding_width(self): return self._embedding_width
4,572
36.178862
80
py
models
models-master/official/nlp/modeling/layers/mixing.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras-based mixing layers. Based on the mixing layers use by FNet (https://aclanthology.org/2022.naacl-main.319/) and Sparse Mixers (https://arxiv.org/abs/2205.12399). Mixing layers can be used as drop in replacements for self-attention layers. For interoperability with attention layers, we use the same `query` and `value` call signature. Note: These mixing layers currently only support encoder stacks. Decoder stacks can be supported in the future by utilizing the `value` inputs. """ import enum import functools from typing import Callable, Tuple, Union import gin import numpy as np from scipy import linalg import tensorflow as tf from official.modeling import tf_utils _Initializer = Union[str, tf.keras.initializers.Initializer] default_kernel_initializer = tf.keras.initializers.TruncatedNormal(stddev=2e-2) @gin.constants_from_enum class MixingMechanism(enum.Enum): """Determines the type of mixing layer. Possible options: FOURIER: Fourier Transform mixing. LINEAR: Mixing using dense matrix multiplications with learnable weights. HARTLEY: Hartley Transform mixing. """ FOURIER = "fourier" HARTLEY = "hartley" LINEAR = "linear" class MixingLayer(tf.keras.layers.Layer): """Mixing layer base class. This class cannot be used directly. It just specifies the API for mixing layer subclasses. For interoperability with attention layers, we use the same `query` and `value` call signature. Based on the mixing layers use by FNet (https://aclanthology.org/2022.naacl-main.319/) and Sparse Mixers (https://arxiv.org/abs/2205.12399). """ def __init__(self, name: str = "mixing", **kwargs): """Initializes layer. Args: name: Name for layer. **kwargs: Keyword arguments. """ super().__init__(name=name, **kwargs) def call(self, query: tf.Tensor, value: tf.Tensor, **kwargs) -> tf.Tensor: """Calls the layer. Subclasses should return tensors of shape <float>[batch_size, max_seq_length, hidden_dim]. Args: query: Batch of input embeddings, typically of shape <float>[batch_size, max_seq_length, hidden_dim]. value: Unused. Included to match attention layer API. **kwargs: Optional arguments to catch unused attention keyword arguments. Raises: NotImplementedError. This class should not be called directly. """ raise NotImplementedError("Abstract method") class FourierTransformLayer(MixingLayer): """Fourier Transform layer. Applies 2D Fourier Transform over final two dimensions of `query` inputs - typically the sequence and hidden dimensions. """ def __init__(self, use_fft: bool = False, name: str = "fourier_transform", **kwargs): """Initializes layer. Args: use_fft: Whether to use Fast Fourier Transform (True) or the Discrete Fourier Transform (DFT) matrix (False) to compute the Fourier Transform. See _pick_fourier_transform() for recommendations on when to use FFT or DFT. name: Name for layer. **kwargs: Keyword arguments. """ super().__init__(name=name, **kwargs) self.use_fft = use_fft def build(self, input_shape: Tuple[int, ...]): """Picks the Fourier Transform implementation.""" self.fourier_transform = _pick_fourier_transform( self.use_fft, max_seq_length=input_shape[-2], hidden_dim=input_shape[-1]) def call(self, query: tf.Tensor, value: tf.Tensor, **kwargs) -> tf.Tensor: """Applies layer to `query`. Args: query: Batch of input embeddings, typically of shape <float>[batch_size, max_seq_length, hidden_dim]. value: Unused. Included to match attention layer API. **kwargs: Optional arguments to catch unused attention keyword arguments. Returns: Real part of discrete Fourier Transform of `query` inputs with shape <float32>[batch_size, max_seq_length, hidden_dim]. """ del value # Ignored by encoder-only mixing layers query = tf.cast(query, tf.complex64) return tf.math.real(self.fourier_transform(query)) class HartleyTransformLayer(MixingLayer): """Hartley Transform layer. Applies 2D Hartley Transform over final two dimensions of `query` inputs - typically the sequence and hidden dimensions. """ def __init__(self, use_fft: bool = False, name: str = "hartley_transform", **kwargs): """Initializes layer. Args: use_fft: Whether to use Fast Fourier Transform (True) or the Discrete Fourier Transform (DFT) matrix (False) to compute the Hartley Transform. See _pick_fourier_transform() for recommendations on when to use FFT or DFT. name: Name for layer. **kwargs: Keyword arguments. """ super().__init__(name=name, **kwargs) self.use_fft = use_fft def build(self, input_shape: Tuple[int, ...]): """Picks the Fourier Transform implementation.""" self.fourier_transform = _pick_fourier_transform( self.use_fft, max_seq_length=input_shape[-2], hidden_dim=input_shape[-1]) def call(self, query: tf.Tensor, value: tf.Tensor, **kwargs) -> tf.Tensor: """Applies layer to `query`. Args: query: Batch of input embeddings, typically of shape <float>[batch_size, max_seq_length, hidden_dim]. value: Unused. Included to match attention layer API. **kwargs: Optional arguments to catch unused attention keyword arguments. Returns: Real part of discrete Hartley Transform of `query` inputs with shape <float32>[batch_size, max_seq_length, hidden_dim]. """ del value # Ignored by encoder-only mixing layers query = tf.cast(query, tf.complex64) frequencies = self.fourier_transform(query) return tf.math.real(frequencies) - tf.math.imag(frequencies) class LinearTransformLayer(MixingLayer): """Dense, linear transformation layer. Applies matrix multiplications over sequence and hidden dimensions. """ def __init__(self, kernel_initializer: _Initializer = default_kernel_initializer, name: str = "linear_transform", **kwargs): """Initializes layer. Args: kernel_initializer: Initialization scheme for kernel. name: Name for layer. **kwargs: Keyword arguments. """ super().__init__(name=name, **kwargs) self.kernel_initializer = kernel_initializer def build(self, input_shape: Tuple[int, ...]): """Creates the hidden and sequence matrix variables of the layer.""" self.mat_hidden = self.add_weight( shape=(input_shape[-1], input_shape[-1]), initializer=tf_utils.clone_initializer(self.kernel_initializer), trainable=True, name="hidden_kernel") self.mat_seq = self.add_weight( shape=(input_shape[-2], input_shape[-2]), initializer=tf_utils.clone_initializer(self.kernel_initializer), trainable=True, name="seq_kernel") def call(self, query: tf.Tensor, value: tf.Tensor, **kwargs) -> tf.Tensor: """Applies layer to `query`. Args: query: Batch of input embeddings, typically of shape <float>[batch_size, max_seq_length, hidden_dim]. value: Unused. Included to match attention layer API. **kwargs: Optional arguments to catch unused attention keyword arguments. Returns: Linearly transformed `query` inputs with shape <float>[batch_size, max_seq_length, hidden_dim]. """ del value # Ignored by encoder-only mixing layers return tf.einsum("bij,jk,ni->bnk", query, self.mat_hidden, self.mat_seq) def _pick_fourier_transform( use_fft: bool, max_seq_length: int, hidden_dim: int) -> Callable[[tf.Tensor], tf.Tensor]: """Returns FFT or DFT Fourier Transform implementation. On TPUs, we recommend using the Discrete Fourier Transform (DFT) matrix (use_fft=False), except for very long sequence lengths. On GPUs and CPUs, the Fast Fourier Transform (use_fft=True) is generally optimal for all sequence lengths. Note: When using the FFT it is recommended to use a sequence length that is a power of 2. Args: use_fft: If True, return FFT. Otherwise, return DFT matrix. max_seq_length: Maximum sequence length of inputs. Only used if use_fft=False. hidden_dim: Size of hidden dimension of inputs. Only used if use_fft=False. Returns: Fourier Transform. """ if use_fft: return tf.signal.fft2d else: dft_mat_seq = linalg.dft(max_seq_length).astype(np.complex64) dft_mat_hidden = linalg.dft(hidden_dim).astype(np.complex64) def two_dim_matmul(x: tf.Tensor, matrix_dim_one: tf.Tensor, matrix_dim_two: tf.Tensor) -> tf.Tensor: """Applies 2D matrix multiplication to input tensors of rank >= 2.""" return tf.einsum("...ij,jk,ni->...nk", tf.cast(x, tf.complex64), matrix_dim_two, matrix_dim_one) return functools.partial( two_dim_matmul, matrix_dim_one=dft_mat_seq, matrix_dim_two=dft_mat_hidden)
9,733
33.034965
80
py
models
models-master/official/nlp/modeling/layers/spectral_normalization_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for normalization layers. ## References: [1] Hanie Sedghi, Vineet Gupta, Philip M. Long. The Singular Values of Convolutional Layers. In _International Conference on Learning Representations_, 2019. """ from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.modeling.layers import spectral_normalization DenseLayer = tf.keras.layers.Dense(10) Conv2DLayer = tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding='valid') def _compute_spectral_norm(weight): if weight.ndim > 2: # Computes Conv2D via FFT transform as in [1]. weight = np.fft.fft2(weight, weight.shape[1:3], axes=[0, 1]) return np.max(np.linalg.svd(weight, compute_uv=False)) class NormalizationTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): super(NormalizationTest, self).setUp() self.num_iterations = 1000 self.norm_multiplier = 0.95 @parameterized.named_parameters( ('Dense', (None, 10), DenseLayer, spectral_normalization.SpectralNormalization), ('Conv2D', (None, 32, 32, 3), Conv2DLayer, spectral_normalization.SpectralNormalizationConv2D)) def test_spec_norm_magnitude(self, input_shape, layer, norm_wrapper): """Tests if the weights spectral norm converges to norm_multiplier.""" layer.build(input_shape) sn_layer = norm_wrapper( layer, iteration=self.num_iterations, norm_multiplier=self.norm_multiplier) # Perform normalization. sn_layer.build(input_shape) sn_layer.update_weights() normalized_kernel = sn_layer.layer.kernel.numpy() spectral_norm_computed = _compute_spectral_norm(normalized_kernel) spectral_norm_expected = self.norm_multiplier self.assertAllClose( spectral_norm_computed, spectral_norm_expected, atol=1e-1) # Test that the normalized layer is K-Lipschitz. In particular, if the layer # is a function f, then ||f(x1) - f(x2)||_2 <= K * ||(x1 - x2)||_2, where K # is the norm multiplier. new_input_shape = (16,) + input_shape[1:] new_input = tf.random.uniform(new_input_shape) delta_vec = tf.random.uniform(new_input_shape) output1 = sn_layer(new_input) output2 = sn_layer(new_input + delta_vec) delta_input = tf.norm(tf.reshape(delta_vec, (-1,))).numpy() delta_output = tf.norm(tf.reshape(output2 - output1, (-1,))).numpy() self.assertLessEqual(delta_output, self.norm_multiplier * delta_input) if __name__ == '__main__': tf.test.main()
3,111
34.770115
80
py
models
models-master/official/nlp/modeling/layers/on_device_embedding_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Keras-based one-hot embedding layer.""" import numpy as np import tensorflow as tf from official.nlp.modeling.layers import on_device_embedding class OnDeviceEmbeddingTest(tf.test.TestCase): def test_layer_creation(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # The output should be the same as the input, save that it has an extra # embedding_width dimension on the end. expected_output_shape = [None, sequence_length, embedding_width] self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) self.assertEqual(output_tensor.dtype, tf.float32) def test_layer_creation_with_mixed_precision(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, dtype="mixed_float16") # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # The output should be the same as the input, save that it has an extra # embedding_width dimension on the end. expected_output_shape = [None, sequence_length, embedding_width] self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) self.assertEqual(output_tensor.dtype, tf.float16) def test_layer_invocation(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # Create a model from the test layer. model = tf.keras.Model(input_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 3 input_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) output = model.predict(input_data) self.assertEqual(tf.float32, output.dtype) def test_layer_invocation_with_mixed_precision(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, dtype="mixed_float16") # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # Create a model from the test layer. model = tf.keras.Model(input_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 3 input_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) output = model.predict(input_data) self.assertEqual(tf.float16, output.dtype) def test_one_hot_layer_creation(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, use_one_hot=True) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # The output should be the same as the input, save that it has an extra # embedding_width dimension on the end. expected_output_shape = [None, sequence_length, embedding_width] self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) self.assertEqual(output_tensor.dtype, tf.float32) def test_one_hot_layer_creation_with_mixed_precision(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, dtype="mixed_float16", use_one_hot=True) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # The output should be the same as the input, save that it has an extra # embedding_width dimension on the end. expected_output_shape = [None, sequence_length, embedding_width] self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) self.assertEqual(output_tensor.dtype, tf.float16) def test_one_hot_layer_invocation(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, use_one_hot=True) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # Create a model from the test layer. model = tf.keras.Model(input_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 3 input_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) output = model.predict(input_data) self.assertEqual(tf.float32, output.dtype) def test_one_hot_layer_invocation_with_mixed_precision(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, dtype="mixed_float16", use_one_hot=True) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # Create a model from the test layer. model = tf.keras.Model(input_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 3 input_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) output = model.predict(input_data) self.assertEqual(tf.float16, output.dtype) def test_use_scale_layer_invocation(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, scale_factor=embedding_width**0.5) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # Create a model from the test layer. model = tf.keras.Model(input_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 3 input_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) output = model.predict(input_data) self.assertEqual(tf.float32, output.dtype) if __name__ == "__main__": tf.test.main()
8,579
39.857143
79
py
models
models-master/official/nlp/modeling/layers/moe_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for moe.py.""" import numpy as np import tensorflow as tf from official.nlp.modeling.layers import moe def small_config(): """Creates a small model config that can be used by all tests.""" config = {} config['d_ff'] = 32 config['output_dropout'] = 0.1 config['num_experts'] = 2 config['expert_d_ff'] = 33 config['expert_dropout_rate'] = 0.1 config['jitter_noise'] = 0.1 config['train_capacity_factor'] = 1.0 config['eval_capacity_factor'] = 1.0 config['examples_per_group'] = 2.0 config['backbone_d_ff'] = 13 return config def make_input_ones(batch_size: int = 4, seq_length: int = 10, hidden_dim: int = 7) -> tf.Tensor: return tf.ones((batch_size, seq_length, hidden_dim)) def make_experts_input_ones(num_groups: int = 1, num_experts: int = 2, expert_capacity: int = 5, hidden_dim: int = 7) -> tf.Tensor: return tf.ones((num_groups, num_experts, expert_capacity, hidden_dim)) class MoeTest(tf.test.TestCase): def tearDown(self): super().tearDown() tf.keras.mixed_precision.set_global_policy('float32') def test_router_z_loss_dtype(self): x = tf.constant([[[10.0, 5.0]]], dtype=tf.float32) y = moe._router_z_loss(x) expected = (5 + np.log(np.exp(5) + 1))**2 self.assertAllClose(expected, y, atol=1e-7) self.assertDTypeEqual(y, tf.float32) def test_router_z_loss_shape(self): x = make_input_ones(2, 5, 7) y = moe._router_z_loss(x) expected = (np.log(7) + 1)**2 self.assertAllClose(expected, y, atol=1e-7) def test_experts_choose_masked_router_dtype_shape(self): tf.keras.mixed_precision.set_global_policy('mixed_bfloat16') num_groups = 2 tokens_per_group = 3 hidden_dim = tokens_per_group num_experts = tokens_per_group expert_capacity = 2 x = np.zeros([num_groups, tokens_per_group, hidden_dim]) x[0, 0, 0] += 1 x[0, :2, :2] += 1 x[1, 1:, 1:] += 1 x[1, -1, -1] += 1 router = moe.ExpertsChooseMaskedRouter( num_experts=num_experts, jitter_noise=0.1, use_bias=True, kernel_initializer=tf.keras.initializers.get('identity'), bias_initializer=tf.keras.initializers.get('ones')) router_mask = router(x, expert_capacity=expert_capacity, training=False) self.assertDTypeEqual(router_mask.dispatch_mask, tf.bfloat16) self.assertDTypeEqual(router_mask.combine_array, tf.bfloat16) expect_shape = [num_groups, tokens_per_group, num_experts, expert_capacity] self.assertEqual(expect_shape, router_mask.dispatch_mask.shape) self.assertEqual(expect_shape, router_mask.combine_array.shape) # top_k call may not be sorted, so can't compare the output directly # Check that the output contains only 0s and 1s out_dm = router_mask.dispatch_mask.numpy() self.assertSetEqual({0, 1}, set(out_dm.flatten().astype(np.int32))) # Check that the right tokens for selected out_dm_indices = np.dot( out_dm.transpose((0, 2, 3, 1)), np.arange(tokens_per_group)) # Shape [num_groups, num_experts, expert_capacity] self.assertSetEqual({0, 1}, set(out_dm_indices[0, 0, :].astype(np.int32))) self.assertSetEqual({1, 2}, set(out_dm_indices[0, 1, :].astype(np.int32))) self.assertSetEqual({1, 2}, set(out_dm_indices[0, 2, :].astype(np.int32))) self.assertSetEqual({0, 1}, set(out_dm_indices[1, 0, :].astype(np.int32))) self.assertSetEqual({0, 1}, set(out_dm_indices[1, 1, :].astype(np.int32))) self.assertSetEqual({1, 2}, set(out_dm_indices[1, 2, :].astype(np.int32))) out_ca = router_mask.combine_array.numpy() out_ca = np.dot(out_ca, np.ones((expert_capacity,))) expected_combine_array = np.array([[[0.66, 0.0, 0.0], [0.42, 0.42, 0.16], [0.0, 0.33, 0.33]], [[0.33, 0.33, 0.0], [0.16, 0.42, 0.42], [0.0, 0.0, 0.66]]]) self.assertAllClose(expected_combine_array, out_ca, atol=1e-2) def test_feed_forward_shape_and_vars(self): config = small_config() layer = moe.FeedForward( d_ff=config['d_ff'], output_dropout=config['output_dropout']) inputs = make_input_ones() outputs = layer(inputs) self.assertAllEqual(tf.shape(inputs), tf.shape(outputs)) var_names = sorted([v.name for v in layer.trainable_variables]) self.assertAllEqual([ 'feed_forward/intermediate/bias:0', 'feed_forward/intermediate/kernel:0', 'feed_forward/output/bias:0', 'feed_forward/output/kernel:0' ], var_names) def test_feed_forward_manual(self): config = small_config() layer = moe.FeedForward( d_ff=config['d_ff'], output_dropout=config['output_dropout'], activation=tf.keras.activations.relu, kernel_initializer=tf.keras.initializers.get('ones'), bias_initializer=tf.keras.initializers.get('ones')) inputs = make_input_ones(1, 2, 3) outputs = layer(inputs, training=False) manual_outputs = tf.constant([[[129.0, 129.0, 129.0], [129.0, 129.0, 129.0]]]) self.assertAllClose(manual_outputs, outputs, atol=1e-7) def test_feed_forward_experts_shape_and_vars(self): config = small_config() layer = moe.FeedForwardExperts( num_experts=config['num_experts'], d_ff=config['expert_d_ff'], output_dropout=config['expert_dropout_rate']) inputs = make_experts_input_ones() outputs = layer(inputs) self.assertAllEqual(tf.shape(inputs), tf.shape(outputs)) var_names = sorted([v.name for v in layer.trainable_variables]) self.assertAllEqual([ 'experts/intermediate/bias:0', 'experts/intermediate/kernel:0', 'experts/output/bias:0', 'experts/output/kernel:0' ], var_names) def test_feed_forward_experts_manual(self): config = small_config() layer = moe.FeedForwardExperts( num_experts=1, d_ff=config['expert_d_ff'], output_dropout=config['expert_dropout_rate'], activation=tf.keras.activations.relu, kernel_initializer=tf.keras.initializers.get('ones'), bias_initializer=tf.keras.initializers.get('ones')) inputs = make_experts_input_ones(1, 1, 2, 3) outputs = layer(inputs, training=False) manual_outputs = tf.constant([[[[133.0, 133.0, 133.0], [133.0, 133.0, 133.0]]]]) self.assertAllClose(manual_outputs, outputs, atol=1e-7) def test_moe_layer(self): config = small_config() experts = moe.FeedForwardExperts( num_experts=config['num_experts'], d_ff=config['expert_d_ff'], output_dropout=config['expert_dropout_rate']) router = moe.ExpertsChooseMaskedRouter( config['num_experts'], jitter_noise=config['jitter_noise']) moe_layer = moe.MoeLayer( experts, router, train_capacity_factor=config['train_capacity_factor'], eval_capacity_factor=config['eval_capacity_factor'], examples_per_group=config['examples_per_group']) inputs = make_input_ones() outputs = moe_layer(inputs, training=True) self.assertAllEqual(tf.shape(inputs), tf.shape(outputs)) var_names = sorted([v.name for v in moe_layer.trainable_variables]) self.assertAllEqual([ 'moe/experts/intermediate/bias:0', 'moe/experts/intermediate/kernel:0', 'moe/experts/output/bias:0', 'moe/experts/output/kernel:0', 'moe/router/router_weights/bias:0', 'moe/router/router_weights/kernel:0' ], var_names) self.assertLen(moe_layer.losses, 1) metrics = [metric.name for metric in moe_layer.metrics] self.assertSetEqual( { 'router_z_loss', 'unscaled_router_z_loss', 'load_balancing_loss', 'fraction_tokens_left_behind', 'router_confidence', 'expert_usage' }, set(metrics)) def test_moe_layer_with_backbone(self): config = small_config() experts = moe.FeedForwardExperts( num_experts=config['num_experts'], d_ff=config['expert_d_ff'], output_dropout=config['expert_dropout_rate']) router = moe.ExpertsChooseMaskedRouter( config['num_experts'], jitter_noise=config['jitter_noise']) moe_layer = moe.MoeLayer( experts, router, train_capacity_factor=config['train_capacity_factor'], eval_capacity_factor=config['eval_capacity_factor'], examples_per_group=config['examples_per_group']) layer = moe.MoeLayerWithBackbone(moe_layer, config['backbone_d_ff']) inputs = make_input_ones() outputs = layer(inputs) self.assertAllEqual(tf.shape(inputs), tf.shape(outputs)) if __name__ == '__main__': tf.test.main()
9,404
38.351464
80
py
models
models-master/official/nlp/modeling/layers/cls_head.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A Classification head layer which is common used with sequence encoders.""" import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling.layers import gaussian_process from official.nlp.modeling.layers import spectral_normalization class ClassificationHead(tf.keras.layers.Layer): """Pooling head for sentence-level classification tasks.""" def __init__(self, inner_dim, num_classes, cls_token_idx=0, activation="tanh", dropout_rate=0.0, initializer="glorot_uniform", **kwargs): """Initializes the `ClassificationHead`. Args: inner_dim: The dimensionality of inner projection layer. If 0 or `None` then only the output projection layer is created. num_classes: Number of output classes. cls_token_idx: The index inside the sequence to pool. activation: Dense layer activation. dropout_rate: Dropout probability. initializer: Initializer for dense layer kernels. **kwargs: Keyword arguments. """ super().__init__(**kwargs) self.dropout_rate = dropout_rate self.inner_dim = inner_dim self.num_classes = num_classes self.activation = tf_utils.get_activation(activation) self.initializer = tf.keras.initializers.get(initializer) self.cls_token_idx = cls_token_idx if self.inner_dim: self.dense = tf.keras.layers.Dense( units=self.inner_dim, activation=self.activation, kernel_initializer=tf_utils.clone_initializer(self.initializer), name="pooler_dense") self.dropout = tf.keras.layers.Dropout(rate=self.dropout_rate) self.out_proj = tf.keras.layers.Dense( units=num_classes, kernel_initializer=tf_utils.clone_initializer(self.initializer), name="logits") def call(self, features: tf.Tensor, only_project: bool = False): """Implements call(). Args: features: a rank-3 Tensor when self.inner_dim is specified, otherwise it is a rank-2 Tensor. only_project: a boolean. If True, we return the intermediate Tensor before projecting to class logits. Returns: a Tensor, if only_project is True, shape= [batch size, hidden size]. If only_project is False, shape= [batch size, num classes]. """ if not self.inner_dim: x = features else: x = features[:, self.cls_token_idx, :] # take <CLS> token. x = self.dense(x) if only_project: return x x = self.dropout(x) x = self.out_proj(x) return x def get_config(self): config = { "cls_token_idx": self.cls_token_idx, "dropout_rate": self.dropout_rate, "num_classes": self.num_classes, "inner_dim": self.inner_dim, "activation": tf.keras.activations.serialize(self.activation), "initializer": tf.keras.initializers.serialize(self.initializer), } config.update(super(ClassificationHead, self).get_config()) return config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def checkpoint_items(self): return {self.dense.name: self.dense} class MultiClsHeads(tf.keras.layers.Layer): """Pooling heads sharing the same pooling stem.""" def __init__(self, inner_dim, cls_list, cls_token_idx=0, activation="tanh", dropout_rate=0.0, initializer="glorot_uniform", **kwargs): """Initializes the `MultiClsHeads`. Args: inner_dim: The dimensionality of inner projection layer. If 0 or `None` then only the output projection layer is created. cls_list: a list of pairs of (classification problem name and the numbers of classes. cls_token_idx: The index inside the sequence to pool. activation: Dense layer activation. dropout_rate: Dropout probability. initializer: Initializer for dense layer kernels. **kwargs: Keyword arguments. """ super().__init__(**kwargs) self.dropout_rate = dropout_rate self.inner_dim = inner_dim self.cls_list = cls_list self.activation = tf_utils.get_activation(activation) self.initializer = tf.keras.initializers.get(initializer) self.cls_token_idx = cls_token_idx if self.inner_dim: self.dense = tf.keras.layers.Dense( units=inner_dim, activation=self.activation, kernel_initializer=tf_utils.clone_initializer(self.initializer), name="pooler_dense") self.dropout = tf.keras.layers.Dropout(rate=self.dropout_rate) self.out_projs = [] for name, num_classes in cls_list: self.out_projs.append( tf.keras.layers.Dense( units=num_classes, kernel_initializer=tf_utils.clone_initializer(self.initializer), name=name)) def call(self, features: tf.Tensor, only_project: bool = False): """Implements call(). Args: features: a rank-3 Tensor when self.inner_dim is specified, otherwise it is a rank-2 Tensor. only_project: a boolean. If True, we return the intermediate Tensor before projecting to class logits. Returns: If only_project is True, a Tensor with shape= [batch size, hidden size]. If only_project is False, a dictionary of Tensors. """ if not self.inner_dim: x = features else: x = features[:, self.cls_token_idx, :] # take <CLS> token. x = self.dense(x) if only_project: return x x = self.dropout(x) outputs = {} for proj_layer in self.out_projs: outputs[proj_layer.name] = proj_layer(x) return outputs def get_config(self): config = { "dropout_rate": self.dropout_rate, "cls_token_idx": self.cls_token_idx, "cls_list": self.cls_list, "inner_dim": self.inner_dim, "activation": tf.keras.activations.serialize(self.activation), "initializer": tf.keras.initializers.serialize(self.initializer), } config.update(super().get_config()) return config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config) @property def checkpoint_items(self): items = {self.dense.name: self.dense} items.update({v.name: v for v in self.out_projs}) return items class GaussianProcessClassificationHead(ClassificationHead): """Gaussian process-based pooling head for sentence classification. This class implements a classifier head for BERT encoder that is based on the spectral-normalized neural Gaussian process (SNGP) [1]. SNGP is a simple method to improve a neural network's uncertainty quantification ability without sacrificing accuracy or lantency. It applies spectral normalization to the hidden pooler layer, and then replaces the dense output layer with a Gaussian process. [1]: Jeremiah Liu et al. Simple and Principled Uncertainty Estimation with Deterministic Deep Learning via Distance Awareness. In _Neural Information Processing Systems_, 2020. https://arxiv.org/abs/2006.10108 """ def __init__(self, inner_dim, num_classes, cls_token_idx=0, activation="tanh", dropout_rate=0.0, initializer="glorot_uniform", use_spec_norm=True, use_gp_layer=True, temperature=None, **kwargs): """Initializes the `GaussianProcessClassificationHead`. Args: inner_dim: The dimensionality of inner projection layer. If 0 or `None` then only the output projection layer is created. num_classes: Number of output classes. cls_token_idx: The index inside the sequence to pool. activation: Dense layer activation. dropout_rate: Dropout probability. initializer: Initializer for dense layer kernels. use_spec_norm: Whether to apply spectral normalization to pooler layer. use_gp_layer: Whether to use Gaussian process as the output layer. temperature: The temperature parameter to be used for mean-field approximation during inference. If None then no mean-field adjustment is applied. **kwargs: Additional keyword arguments. """ # Collects spectral normalization and Gaussian process args from kwargs. self.use_spec_norm = use_spec_norm self.use_gp_layer = use_gp_layer self.spec_norm_kwargs = extract_spec_norm_kwargs(kwargs) self.gp_layer_kwargs = extract_gp_layer_kwargs(kwargs) self.temperature = temperature super().__init__( inner_dim=inner_dim, num_classes=num_classes, cls_token_idx=cls_token_idx, activation=activation, dropout_rate=dropout_rate, initializer=initializer, **kwargs) # Applies spectral normalization to the dense pooler layer. if self.use_spec_norm and hasattr(self, "dense"): self.dense = spectral_normalization.SpectralNormalization( self.dense, inhere_layer_name=True, **self.spec_norm_kwargs) # Replace Dense output layer with the Gaussian process layer. if use_gp_layer: self.out_proj = gaussian_process.RandomFeatureGaussianProcess( self.num_classes, kernel_initializer=tf_utils.clone_initializer(self.initializer), name="logits", **self.gp_layer_kwargs) def call(self, features, training=False, return_covmat=False): """Returns model output. Dring training, the model returns raw logits. During evaluation, the model returns uncertainty adjusted logits, and (optionally) the covariance matrix. Arguments: features: A tensor of input features, shape (batch_size, feature_dim). training: Whether the model is in training mode. return_covmat: Whether the model should also return covariance matrix if `use_gp_layer=True`. During training, it is recommended to set `return_covmat=False` to be compatible with the standard Keras pipelines (e.g., `model.fit()`). Returns: logits: Uncertainty-adjusted predictive logits, shape (batch_size, num_classes). covmat: (Optional) Covariance matrix, shape (batch_size, batch_size). Returned only when return_covmat=True. """ logits = super().call(features) # Extracts logits and covariance matrix from model output. if self.use_gp_layer: logits, covmat = logits else: covmat = None # Computes the uncertainty-adjusted logits during evaluation. if not training: logits = gaussian_process.mean_field_logits( logits, covmat, mean_field_factor=self.temperature) if return_covmat and covmat is not None: return logits, covmat return logits def reset_covariance_matrix(self): """Resets covariance matrix of the Gaussian process layer.""" if hasattr(self.out_proj, "reset_covariance_matrix"): self.out_proj.reset_covariance_matrix() def get_config(self): config = dict( use_spec_norm=self.use_spec_norm, use_gp_layer=self.use_gp_layer) config.update(self.spec_norm_kwargs) config.update(self.gp_layer_kwargs) config["temperature"] = self.temperature config.update(super(GaussianProcessClassificationHead, self).get_config()) return config def extract_gp_layer_kwargs(kwargs): """Extracts Gaussian process layer configs from a given kwarg.""" return dict( num_inducing=kwargs.pop("num_inducing", 1024), normalize_input=kwargs.pop("normalize_input", True), gp_cov_momentum=kwargs.pop("gp_cov_momentum", 0.999), gp_cov_ridge_penalty=kwargs.pop("gp_cov_ridge_penalty", 1.), scale_random_features=kwargs.pop("scale_random_features", False), l2_regularization=kwargs.pop("l2_regularization", 1e-6), gp_cov_likelihood=kwargs.pop("gp_cov_likelihood", "gaussian"), return_gp_cov=kwargs.pop("return_gp_cov", True), return_random_features=kwargs.pop("return_random_features", False), use_custom_random_features=kwargs.pop("use_custom_random_features", True), custom_random_features_initializer=kwargs.pop( "custom_random_features_initializer", "random_normal"), custom_random_features_activation=kwargs.pop( "custom_random_features_activation", None)) def extract_spec_norm_kwargs(kwargs): """Extracts spectral normalization configs from a given kwarg.""" return dict( iteration=kwargs.pop("iteration", 1), norm_multiplier=kwargs.pop("norm_multiplier", .99)) class PerQueryDenseHead(tf.keras.layers.Layer): """Pooling head used for EncT5 style models. This module projects each query to use a different projection. For a input shape= [bs, num_queries, hidden_size], it projects each query to (features). Ending up with shape= [bs, num_queries, features]. For example, for classification with a few classes, one may use num_queries as 1 and features as number of classes. For multilabel classification, one may use num_queries as number of classes and features as 2. So each query represents a binary classification of one label. """ def __init__(self, num_queries: int, features: int, use_bias: bool = False, kernel_initializer: str = "glorot_uniform", **kwargs): """Initializes the `PerQueryDenseHead`. Args: num_queries: number of queries (the learnable embeddings in the input sequences) from the decoder. features: int with numbers of output features. Each query with be projected to this number with a different projection. use_bias: whether to add a bias to the output. kernel_initializer: Initializer for dense layer kernels. **kwargs: Keyword arguments. """ super().__init__(**kwargs) self.num_queries = num_queries self.features = features self.use_bias = use_bias self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) def build(self, input_shape): input_shape = tf.TensorShape(input_shape) # Hidden size. last_dim = tf.compat.dimension_value(input_shape[-1]) self.hidden_size = last_dim self.kernel = self.add_weight( "kernel", shape=[self.num_queries, last_dim, self.features], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True) if self.use_bias: self.bias = self.add_weight( "bias", shape=[ self.num_queries, self.features, ], dtype=self.dtype, trainable=True) else: self.bias = None def call(self, inputs: tf.Tensor) -> tf.Tensor: """Implements call(). Args: inputs: a rank-3 Tensor of shape= [bs, num_queries, hidden_size]. Returns: A Tensor, shape= [batch size, num_queries, features]. """ outputs = tf.einsum("bqh,qhf->bqf", inputs, self.kernel) if self.use_bias: outputs += self.bias return outputs def get_config(self): config = { "num_queries": self.num_queries, "features": self.features, "kernel_initializer": tf.keras.activations.serialize(self.kernel_initializer), } config.update(super(PerQueryDenseHead, self).get_config()) return config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
16,198
34.138829
80
py
models
models-master/official/nlp/modeling/layers/gated_feedforward.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras-based gated feedforward layer.""" # pylint: disable=g-classes-have-attributes import gin import tensorflow as tf from official.modeling import tf_utils from official.nlp.modeling.layers import util @tf.keras.utils.register_keras_serializable(package="Text") @gin.configurable class GatedFeedforward(tf.keras.layers.Layer): """Gated linear feedforward layer. This layer follows the paper "GLU Variants Improve Transformer" (https://arxiv.org/abs/2002.05202). In additional, it allows to stack multiple feedforward blocks and specify the position of dropout layer. Args: intermediate_size: Size of the intermediate layer. intermediate_activation: Activation for the intermediate layer. dropout: Dropout probability for the output dropout. use_gate: Whether to use gated linear units. If True, assuming `GELU` as the activation and omitting bias, will apply `GEGLU(x, W, V, W_2) = (GEGLU(xW) * xV)W2`; if False, will follow "Attention Is All You Need" (https://arxiv.org/abs/1706.03762) paper and apply `FFN(x, W, W_2) = GELU(xW_1)W_2.` num_blocks: The number of feedforward blocks to stack. Each block contains a (gated) linear layer and a fully connected layer followed by dropout, layer norm and residual. dropout_position: Where to apply the dropout, the value can be either `before_residual` or `after_residual`. If `before_residual`, will apply `layer_output = layer_norm(dropout(layer_output) + layer_input)`; if `after residual`, will apply `layer_output = dropout(layer_norm(layer_output + layer_input))`. kernel_initializer: Initializer for dense layer kernels. bias_initializer: Initializer for dense layer biases. kernel_regularizer: Regularizer for dense layer kernels. bias_regularizer: Regularizer for dense layer biases. activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. """ def __init__(self, inner_dim=768, inner_activation=tf_utils.get_activation("gelu"), dropout=0.0, use_gate=True, apply_output_layer_norm=True, num_blocks=1, dropout_position="before_residual", kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): inner_dim = kwargs.pop("intermediate_size", inner_dim) inner_activation = kwargs.pop("intermediate_activation", inner_activation) util.filter_kwargs(kwargs) super().__init__(**kwargs) self._inner_dim = inner_dim self._inner_activation = inner_activation self._dropout = dropout self._use_gate = use_gate self._num_blocks = num_blocks self._apply_output_layer_norm = apply_output_layer_norm self._dropout_position = dropout_position if self._dropout_position not in ("before_residual", "after_residual"): raise ValueError( "The dropout_position should be either `before_residual` or" "`after_residual`, got: %s" % self._dropout_position) self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) self._bias_initializer = tf.keras.initializers.get(bias_initializer) self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer) self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) self._bias_constraint = tf.keras.constraints.get(bias_constraint) def build(self, input_shape): hidden_size = input_shape.as_list()[-1] common_kwargs = dict( kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint) self._intermediate_dense = [] self._inner_activation_layers = [] self._gate_dense = [] self._output_dense = [] self._output_dropout = [] self._output_layer_norm = [] activation_policy = tf.keras.mixed_precision.global_policy() if activation_policy.name == "mixed_bfloat16": # bfloat16 causes BERT with the LAMB optimizer to not converge # as well, so we use float32. # TODO(b/154538392): Investigate this. activation_policy = tf.float32 for i in range(self._num_blocks): self._intermediate_dense.append( tf.keras.layers.EinsumDense( "abc,cd->abd", output_shape=(None, self._inner_dim), bias_axes="d", name="intermediate_%d" % i, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), bias_initializer=tf_utils.clone_initializer( self._bias_initializer), **common_kwargs)) self._inner_activation_layers.append( tf.keras.layers.Activation( self._inner_activation, dtype=activation_policy)) if self._use_gate: self._gate_dense.append( tf.keras.layers.EinsumDense( "abc,cd->abd", output_shape=(None, self._inner_dim), bias_axes="d", name="gate_%d" % i, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), bias_initializer=tf_utils.clone_initializer( self._bias_initializer), **common_kwargs)) self._output_dense.append( tf.keras.layers.EinsumDense( "abc,cd->abd", output_shape=(None, hidden_size), bias_axes="d", name="output_%d" % i, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), bias_initializer=tf_utils.clone_initializer( self._bias_initializer), **common_kwargs)) self._output_dropout.append(tf.keras.layers.Dropout(rate=self._dropout)) # Use float32 in layernorm for numeric stability. if self._apply_output_layer_norm: self._output_layer_norm.append( tf.keras.layers.LayerNormalization( name="output_layer_norm_%d" % i, axis=-1, epsilon=1e-12, dtype=tf.float32)) def get_config(self): config = { "inner_dim": self._inner_dim, "inner_activation": self._inner_activation, "dropout": self._dropout, "use_gate": self._use_gate, "num_blocks": self._num_blocks, "dropout_position": self._dropout_position, "kernel_initializer": tf.keras.initializers.serialize(self._kernel_initializer), "bias_initializer": tf.keras.initializers.serialize(self._bias_initializer), "kernel_regularizer": tf.keras.regularizers.serialize(self._kernel_regularizer), "bias_regularizer": tf.keras.regularizers.serialize(self._bias_regularizer), "activity_regularizer": tf.keras.regularizers.serialize(self._activity_regularizer), "kernel_constraint": tf.keras.constraints.serialize(self._kernel_constraint), "bias_constraint": tf.keras.constraints.serialize(self._bias_constraint) } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): layer_output = inputs for i in range(self._num_blocks): layer_input = layer_output intermediate_output = self._intermediate_dense[i](layer_input) intermediate_output = self._inner_activation_layers[i]( intermediate_output) if self._use_gate: gated_linear = self._gate_dense[i](layer_input) intermediate_output = intermediate_output * gated_linear layer_output = self._output_dense[i](intermediate_output) if self._dropout_position == "before_residual": layer_output = self._output_dropout[i](layer_output) # During mixed precision training, `layer_input` may be from layer norm. # If so, it is always fp32. Cast layer_output to fp32 for the subsequent # add. if layer_input.dtype == tf.float32: layer_output = tf.cast(layer_output, tf.float32) if self._apply_output_layer_norm: layer_output = self._output_layer_norm[i](layer_output + layer_input) if self._dropout_position == "after_residual": layer_output = self._output_dropout[i](layer_output) return layer_output
9,681
41.651982
80
py
models
models-master/official/nlp/modeling/layers/gaussian_process.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Definitions for random feature Gaussian process layer.""" import math import tensorflow as tf _SUPPORTED_LIKELIHOOD = ('binary_logistic', 'poisson', 'gaussian') class RandomFeatureGaussianProcess(tf.keras.layers.Layer): """Gaussian process layer with random feature approximation [1]. During training, the model updates the maximum a posteriori (MAP) logits estimates and posterior precision matrix using minibatch statistics. During inference, the model divides the MAP logit estimates by the predictive standard deviation, which is equivalent to approximating the posterior mean of the predictive probability via the mean-field approximation. User can specify different types of random features by setting `use_custom_random_features=True`, and change the initializer and activations of the custom random features. For example: MLP Kernel: initializer='random_normal', activation=tf.nn.relu RBF Kernel: initializer='random_normal', activation=tf.math.cos A linear kernel can also be specified by setting gp_kernel_type='linear' and `use_custom_random_features=True`. [1]: Ali Rahimi and Benjamin Recht. Random Features for Large-Scale Kernel Machines. In _Neural Information Processing Systems_, 2007. https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf Attributes: units: (int) The dimensionality of layer. num_inducing: (int) The number of random features for the approximation. is_training: (tf.bool) Whether the layer is set in training mode. If so the layer updates the Gaussian process' variance estimate using statistics computed from the incoming minibatches. """ def __init__(self, units, num_inducing=1024, gp_kernel_type='gaussian', gp_kernel_scale=1., gp_output_bias=0., normalize_input=False, gp_kernel_scale_trainable=False, gp_output_bias_trainable=False, gp_cov_momentum=0.999, gp_cov_ridge_penalty=1., scale_random_features=True, use_custom_random_features=True, custom_random_features_initializer=None, custom_random_features_activation=None, l2_regularization=1e-6, gp_cov_likelihood='gaussian', return_gp_cov=True, return_random_features=False, dtype=None, name='random_feature_gaussian_process', **gp_output_kwargs): """Initializes a random-feature Gaussian process layer instance. Args: units: (int) Number of output units. num_inducing: (int) Number of random Fourier features used for approximating the Gaussian process. gp_kernel_type: (string) The type of kernel function to use for Gaussian process. Currently default to 'gaussian' which is the Gaussian RBF kernel. gp_kernel_scale: (float) The length-scale parameter of the a shift-invariant kernel function, i.e., for RBF kernel: exp(-|x1 - x2|**2 / gp_kernel_scale). gp_output_bias: (float) Scalar initial value for the bias vector. normalize_input: (bool) Whether to normalize the input to Gaussian process. gp_kernel_scale_trainable: (bool) Whether the length scale variable is trainable. gp_output_bias_trainable: (bool) Whether the bias is trainable. gp_cov_momentum: (float) A discount factor used to compute the moving average for posterior covariance matrix. gp_cov_ridge_penalty: (float) Initial Ridge penalty to posterior covariance matrix. scale_random_features: (bool) Whether to scale the random feature by sqrt(2. / num_inducing). use_custom_random_features: (bool) Whether to use custom random features implemented using tf.keras.layers.Dense. custom_random_features_initializer: (str or callable) Initializer for the random features. Default to random normal which approximates a RBF kernel function if activation function is cos. custom_random_features_activation: (callable) Activation function for the random feature layer. Default to cosine which approximates a RBF kernel function. l2_regularization: (float) The strength of l2 regularization on the output weights. gp_cov_likelihood: (string) Likelihood to use for computing Laplace approximation for covariance matrix. Default to `gaussian`. return_gp_cov: (bool) Whether to also return GP covariance matrix. If False then no covariance learning is performed. return_random_features: (bool) Whether to also return random features. dtype: (tf.DType) Input data type. name: (string) Layer name. **gp_output_kwargs: Additional keyword arguments to dense output layer. """ super().__init__(name=name, dtype=dtype) self.units = units self.num_inducing = num_inducing self.normalize_input = normalize_input self.gp_input_scale = 1. / tf.sqrt(gp_kernel_scale) self.gp_feature_scale = tf.sqrt(2. / float(num_inducing)) self.scale_random_features = scale_random_features self.return_random_features = return_random_features self.return_gp_cov = return_gp_cov self.gp_kernel_type = gp_kernel_type self.gp_kernel_scale = gp_kernel_scale self.gp_output_bias = gp_output_bias self.gp_kernel_scale_trainable = gp_kernel_scale_trainable self.gp_output_bias_trainable = gp_output_bias_trainable self.use_custom_random_features = use_custom_random_features self.custom_random_features_initializer = custom_random_features_initializer self.custom_random_features_activation = custom_random_features_activation self.l2_regularization = l2_regularization self.gp_output_kwargs = gp_output_kwargs self.gp_cov_momentum = gp_cov_momentum self.gp_cov_ridge_penalty = gp_cov_ridge_penalty self.gp_cov_likelihood = gp_cov_likelihood if self.use_custom_random_features: # Default to Gaussian RBF kernel. self.random_features_bias_initializer = tf.random_uniform_initializer( minval=0., maxval=2. * math.pi) if self.custom_random_features_initializer is None: self.custom_random_features_initializer = ( tf.keras.initializers.RandomNormal(stddev=1.)) if self.custom_random_features_activation is None: self.custom_random_features_activation = tf.math.cos def build(self, input_shape): # Defines model layers. if self.normalize_input: self._input_norm_layer = tf.keras.layers.LayerNormalization( name='gp_input_normalization') self._input_norm_layer.build(input_shape) input_shape = self._input_norm_layer.compute_output_shape(input_shape) self._random_feature = self._make_random_feature_layer( name='gp_random_feature') self._random_feature.build(input_shape) input_shape = self._random_feature.compute_output_shape(input_shape) if self.return_gp_cov: self._gp_cov_layer = LaplaceRandomFeatureCovariance( momentum=self.gp_cov_momentum, ridge_penalty=self.gp_cov_ridge_penalty, likelihood=self.gp_cov_likelihood, dtype=self.dtype, name='gp_covariance') self._gp_cov_layer.build(input_shape) self._gp_output_layer = tf.keras.layers.Dense( units=self.units, use_bias=False, kernel_regularizer=tf.keras.regularizers.l2(self.l2_regularization), dtype=self.dtype, name='gp_output_weights', **self.gp_output_kwargs) self._gp_output_layer.build(input_shape) self._gp_output_bias = tf.Variable( initial_value=[self.gp_output_bias] * self.units, dtype=self.dtype, trainable=self.gp_output_bias_trainable, name='gp_output_bias') self.built = True def _make_random_feature_layer(self, name): """Defines random feature layer depending on kernel type.""" if not self.use_custom_random_features: # Use default RandomFourierFeatures layer from tf.keras. return tf.keras.layers.experimental.RandomFourierFeatures( output_dim=self.num_inducing, kernel_initializer=self.gp_kernel_type, scale=self.gp_kernel_scale, trainable=self.gp_kernel_scale_trainable, dtype=self.dtype, name=name) if self.gp_kernel_type.lower() == 'linear': custom_random_feature_layer = tf.keras.layers.Lambda( lambda x: x, name=name) else: # Use user-supplied configurations. custom_random_feature_layer = tf.keras.layers.Dense( units=self.num_inducing, use_bias=True, activation=self.custom_random_features_activation, kernel_initializer=self.custom_random_features_initializer, bias_initializer=self.random_features_bias_initializer, trainable=False, name=name) return custom_random_feature_layer def reset_covariance_matrix(self): """Resets covariance matrix of the GP layer. This function is useful for reseting the model's covariance matrix at the beginning of a new epoch. """ self._gp_cov_layer.reset_precision_matrix() def call(self, inputs, global_step=None, training=None): # Computes random features. gp_inputs = inputs if self.normalize_input: gp_inputs = self._input_norm_layer(gp_inputs) elif self.use_custom_random_features: # Supports lengthscale for custom random feature layer by directly # rescaling the input. gp_input_scale = tf.cast(self.gp_input_scale, inputs.dtype) gp_inputs = gp_inputs * gp_input_scale gp_feature = self._random_feature(gp_inputs) if self.scale_random_features: # Scale random feature by 2. / sqrt(num_inducing) following [1]. # When using GP layer as the output layer of a nerual network, # it is recommended to turn this scaling off to prevent it from changing # the learning rate to the hidden layers. gp_feature_scale = tf.cast(self.gp_feature_scale, inputs.dtype) gp_feature = gp_feature * gp_feature_scale # Computes posterior center (i.e., MAP estimate) and variance. gp_output = self._gp_output_layer(gp_feature) + self._gp_output_bias if self.return_gp_cov: gp_covmat = self._gp_cov_layer(gp_feature, gp_output, training) # Assembles model output. model_output = [gp_output,] if self.return_gp_cov: model_output.append(gp_covmat) if self.return_random_features: model_output.append(gp_feature) return model_output class LaplaceRandomFeatureCovariance(tf.keras.layers.Layer): """Computes the Gaussian Process covariance using Laplace method. At training time, this layer updates the Gaussian process posterior using model features in minibatches. Attributes: momentum: (float) A discount factor used to compute the moving average for posterior precision matrix. Analogous to the momentum factor in batch normalization. If -1 then update covariance matrix using a naive sum without momentum, which is desirable if the goal is to compute the exact covariance matrix by passing through data once (say in the final epoch). ridge_penalty: (float) Initial Ridge penalty to weight covariance matrix. This value is used to stablize the eigenvalues of weight covariance estimate so that the matrix inverse can be computed for Cov = inv(t(X) * X + s * I). The ridge factor s cannot be too large since otherwise it will dominate the t(X) * X term and make covariance estimate not meaningful. likelihood: (str) The likelihood to use for computing Laplace approximation for the covariance matrix. Can be one of ('binary_logistic', 'poisson', 'gaussian'). """ def __init__(self, momentum=0.999, ridge_penalty=1., likelihood='gaussian', dtype=None, name='laplace_covariance'): if likelihood not in _SUPPORTED_LIKELIHOOD: raise ValueError( f'"likelihood" must be one of {_SUPPORTED_LIKELIHOOD}, got {likelihood}.' ) self.ridge_penalty = ridge_penalty self.momentum = momentum self.likelihood = likelihood super(LaplaceRandomFeatureCovariance, self).__init__(dtype=dtype, name=name) def compute_output_shape(self, input_shape): gp_feature_dim = input_shape[-1] return tf.TensorShape([gp_feature_dim, gp_feature_dim]) def build(self, input_shape): gp_feature_dim = input_shape[-1] # Convert gp_feature_dim to int value for TF1 compatibility. if isinstance(gp_feature_dim, tf.compat.v1.Dimension): gp_feature_dim = gp_feature_dim.value # Posterior precision matrix for the GP's random feature coefficients. self.initial_precision_matrix = ( self.ridge_penalty * tf.eye(gp_feature_dim, dtype=self.dtype)) self.precision_matrix = ( self.add_weight( name='gp_precision_matrix', shape=(gp_feature_dim, gp_feature_dim), dtype=self.dtype, initializer=tf.keras.initializers.Identity(self.ridge_penalty), trainable=False, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)) self.built = True def make_precision_matrix_update_op(self, gp_feature, logits, precision_matrix): """Defines update op for the precision matrix of feature weights.""" if self.likelihood != 'gaussian': if logits is None: raise ValueError( f'"logits" cannot be None when likelihood={self.likelihood}') if logits.shape[-1] != 1: raise ValueError( f'likelihood={self.likelihood} only support univariate logits.' f'Got logits dimension: {logits.shape[-1]}') batch_size = tf.shape(gp_feature)[0] batch_size = tf.cast(batch_size, dtype=gp_feature.dtype) # Computes batch-specific normalized precision matrix. if self.likelihood == 'binary_logistic': prob = tf.sigmoid(logits) prob_multiplier = prob * (1. - prob) elif self.likelihood == 'poisson': prob_multiplier = tf.exp(logits) else: prob_multiplier = 1. gp_feature_adjusted = tf.sqrt(prob_multiplier) * gp_feature precision_matrix_minibatch = tf.matmul( gp_feature_adjusted, gp_feature_adjusted, transpose_a=True) # Updates the population-wise precision matrix. if self.momentum > 0: # Use moving-average updates to accumulate batch-specific precision # matrices. precision_matrix_minibatch = precision_matrix_minibatch / batch_size precision_matrix_new = ( self.momentum * precision_matrix + (1. - self.momentum) * precision_matrix_minibatch) else: # Compute exact population-wise covariance without momentum. # If use this option, make sure to pass through data only once. precision_matrix_new = precision_matrix + precision_matrix_minibatch # Returns the update op. return precision_matrix.assign(precision_matrix_new) def reset_precision_matrix(self): """Resets precision matrix to its initial value. This function is useful for reseting the model's covariance matrix at the beginning of a new epoch. """ precision_matrix_reset_op = self.precision_matrix.assign( self.initial_precision_matrix) self.add_update(precision_matrix_reset_op) def compute_predictive_covariance(self, gp_feature): """Computes posterior predictive variance. Approximates the Gaussian process posterior using random features. Given training random feature Phi_tr (num_train, num_hidden) and testing random feature Phi_ts (batch_size, num_hidden). The predictive covariance matrix is computed as (assuming Gaussian likelihood): s * Phi_ts @ inv(t(Phi_tr) * Phi_tr + s * I) @ t(Phi_ts), where s is the ridge factor to be used for stablizing the inverse, and I is the identity matrix with shape (num_hidden, num_hidden). Args: gp_feature: (tf.Tensor) The random feature of testing data to be used for computing the covariance matrix. Shape (batch_size, gp_hidden_size). Returns: (tf.Tensor) Predictive covariance matrix, shape (batch_size, batch_size). """ # Computes the covariance matrix of the feature coefficient. feature_cov_matrix = tf.linalg.inv(self.precision_matrix) # Computes the covariance matrix of the gp prediction. cov_feature_product = tf.matmul( feature_cov_matrix, gp_feature, transpose_b=True) * self.ridge_penalty gp_cov_matrix = tf.matmul(gp_feature, cov_feature_product) return gp_cov_matrix def _get_training_value(self, training=None): if training is None: training = tf.keras.backend.learning_phase() if isinstance(training, int): training = bool(training) return training def call(self, inputs, logits=None, training=None): """Minibatch updates the GP's posterior precision matrix estimate. Args: inputs: (tf.Tensor) GP random features, shape (batch_size, gp_hidden_size). logits: (tf.Tensor) Pre-activation output from the model. Needed for Laplace approximation under a non-Gaussian likelihood. training: (tf.bool) whether or not the layer is in training mode. If in training mode, the gp_weight covariance is updated using gp_feature. Returns: gp_stddev (tf.Tensor): GP posterior predictive variance, shape (batch_size, batch_size). """ batch_size = tf.shape(inputs)[0] training = self._get_training_value(training) if training: # Define and register the update op for feature precision matrix. precision_matrix_update_op = self.make_precision_matrix_update_op( gp_feature=inputs, logits=logits, precision_matrix=self.precision_matrix) self.add_update(precision_matrix_update_op) # Return null estimate during training. return tf.eye(batch_size, dtype=self.dtype) else: # Return covariance estimate during inference. return self.compute_predictive_covariance(gp_feature=inputs) def mean_field_logits(logits, covariance_matrix=None, mean_field_factor=1.): """Adjust the model logits so its softmax approximates the posterior mean [1]. [1]: Zhiyun Lu, Eugene Ie, Fei Sha. Uncertainty Estimation with Infinitesimal Jackknife. _arXiv preprint arXiv:2006.07584_, 2020. https://arxiv.org/abs/2006.07584 Arguments: logits: A float tensor of shape (batch_size, num_classes). covariance_matrix: The covariance matrix of shape (batch_size, batch_size). If None then it assumes the covariance_matrix is an identity matrix. mean_field_factor: The scale factor for mean-field approximation, used to adjust the influence of posterior variance in posterior mean approximation. If covariance_matrix=None then it is used as the temperature parameter for temperature scaling. Returns: Tensor of adjusted logits, shape (batch_size, num_classes). """ if mean_field_factor is None or mean_field_factor < 0: return logits # Compute standard deviation. if covariance_matrix is None: variances = 1. else: variances = tf.linalg.diag_part(covariance_matrix) # Compute scaling coefficient for mean-field approximation. logits_scale = tf.sqrt(1. + variances * mean_field_factor) if len(logits.shape) > 1: # Cast logits_scale to compatible dimension. logits_scale = tf.expand_dims(logits_scale, axis=-1) return logits / logits_scale
20,440
40.294949
83
py
models
models-master/official/nlp/modeling/layers/talking_heads_attention.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Talking Head Attention layer.""" # pylint: disable=g-classes-have-attributes import math import string import gin import tensorflow as tf from official.modeling import tf_utils _CHR_IDX = string.ascii_lowercase @tf.keras.utils.register_keras_serializable(package="Text") @gin.configurable class TalkingHeadsAttention(tf.keras.layers.MultiHeadAttention): """Implements Talking-Heads Attention. This is an implementation of Talking-Heads Attention based on the paper Talking-Heads Attention (https://arxiv.org/abs/2003.02436): it enhanced multi-head attention by including linearprojections across the attention-heads dimension, immediately before and after the softmax operation. See the base class `tf.keras.layers.MultiHeadAttention` for more details. Args: num_heads: Number of attention heads. key_dim: Size of each attention head for query and key. value_dim: Size of each attention head for value. dropout: Dropout probability. use_bias: Boolean, whether the dense layers use bias vectors/matrices. output_shape: The expected shape of an output tensor, besides the batch and sequence dims. If not specified, projects back to the key feature dim. attention_axes: axes over which the attention is applied. `None` means attention over all axes, but batch, heads, and features. return_attention_scores: bool, if `True`, returns the multi-head attention scores as an additional output argument. kernel_initializer: Initializer for dense layer kernels. bias_initializer: Initializer for dense layer biases. kernel_regularizer: Regularizer for dense layer kernels. bias_regularizer: Regularizer for dense layer biases. activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. """ def _build_attention(self, qkv_rank): """Builds multi-head dot-product attention computations. This function overrides base class to create additional linear projection that will be applied on attention scores before and after softmax. Args: qkv_rank: The rank of query, key, value tensors after projection. """ super(TalkingHeadsAttention, self)._build_attention(qkv_rank) # Build an equation: # (<batch_dims>, num_heads_a, ...),(num_heads_a, num_heads_b) -> # (<batch_dims>, num_heads_b, ...) # qkv_ranks has `batch_dims`, `attention_dims`, `num_heads` and `channels`. num_batch_dims = qkv_rank - len(self._attention_axes) - 2 # The shape of attn_scores is: # (<batch_dims>, num_heads, <query_attn_dims>, <key_attn_dims>) attn_scores_rank = num_batch_dims + 1 + len(self._attention_axes) * 2 scores_notation = _CHR_IDX[:attn_scores_rank] projection_notation = scores_notation[num_batch_dims] + ( _CHR_IDX[attn_scores_rank]) projected_scores_notation = scores_notation[:num_batch_dims] + ( _CHR_IDX[attn_scores_rank] + scores_notation[num_batch_dims + 1:]) self._talking_heads_equation = "%s,%s->%s" % ( scores_notation, projection_notation, projected_scores_notation) self._pre_softmax_weight = self.add_weight( "pre_softmax_weight", shape=(self._num_heads, self._num_heads), initializer=tf_utils.clone_initializer(self._kernel_initializer), regularizer=self._kernel_regularizer, constraint=self._kernel_constraint, dtype=self.dtype, trainable=True) self._post_softmax_weight = self.add_weight( "post_softmax_weight", shape=(self._num_heads, self._num_heads), initializer=tf_utils.clone_initializer(self._kernel_initializer), regularizer=self._kernel_regularizer, constraint=self._kernel_constraint, dtype=self.dtype, trainable=True) def _compute_attention(self, query_tensor, key_tensor, value_tensor, attention_mask=None, training=None): """Applies Dot-product attention with query, key, value tensors. This function overrides base class to apply additional linear projection on attention scores before and after softmax. Args: query_tensor: Projected query `Tensor` of shape `[B, T, N, key_dim]`. key_tensor: Projected key `Tensor` of shape `[B, T, N, key_dim]`. value_tensor: Projected value `Tensor` of shape `[B, T, N, value_dim]`. attention_mask: a boolean mask of shape `[B, T, S]`, that prevents attention to certain positions. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Returns: attention_output: Multi-headed outputs of attention computation. attention_scores: Multi-headed attention weights. """ # Take the dot product between "query" and "key" to get the raw # attention scores. attention_scores = tf.einsum(self._dot_product_equation, key_tensor, query_tensor) attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(self._key_dim))) # Apply linear projection before softmax attention_scores = tf.einsum(self._talking_heads_equation, attention_scores, self._pre_softmax_weight) # Normalize the attention scores to probabilities. # `attention_scores` = [B, N, T, S] attention_scores = self._masked_softmax(attention_scores, attention_mask) # Apply linear projection after softmax attention_scores = tf.einsum(self._talking_heads_equation, attention_scores, self._post_softmax_weight) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_scores_dropout = self._dropout_layer( attention_scores, training=training) # `context_layer` = [B, T, N, H] attention_output = tf.einsum(self._combine_equation, attention_scores_dropout, value_tensor) return attention_output, attention_scores
6,903
42.696203
80
py
models
models-master/official/nlp/modeling/layers/self_attention_mask.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras layer that creates a self-attention mask.""" from typing import Optional import tensorflow as tf def get_mask(inputs: tf.Tensor, to_mask: tf.Tensor, dtype: Optional[tf.DType] = None) -> tf.Tensor: """Gets a 3D self-attention mask. Args: inputs: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. dtype: the output Tensor dtype. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. """ from_shape = tf.shape(inputs) batch_size = from_shape[0] from_seq_length = from_shape[1] dtype = inputs.dtype if dtype is None else dtype to_shape = tf.shape(to_mask) to_seq_length = to_shape[1] to_mask = tf.cast( tf.reshape(to_mask, [batch_size, 1, to_seq_length]), dtype=dtype) return tf.broadcast_to(to_mask, [batch_size, from_seq_length, to_seq_length]) @tf.keras.utils.register_keras_serializable(package='Text') class SelfAttentionMask(tf.keras.layers.Layer): """Create 3D attention mask from a 2D tensor mask. inputs[0]: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. inputs[1]: to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. """ def call(self, inputs, to_mask=None): if isinstance(inputs, list) and to_mask is None: to_mask = inputs[1] inputs = inputs[0] return get_mask(inputs, to_mask)
2,163
32.292308
79
py
models
models-master/official/nlp/modeling/layers/mobile_bert_layers_test.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from absl.testing import parameterized import numpy as np import tensorflow as tf from official.nlp.modeling.layers import mobile_bert_layers from official.nlp.modeling.networks import mobile_bert_encoder def generate_fake_input(batch_size=1, seq_len=5, vocab_size=10000, seed=0): """Generate consistent fake integer input sequences.""" np.random.seed(seed) fake_input = [] for _ in range(batch_size): fake_input.append([]) for _ in range(seq_len): fake_input[-1].append(np.random.randint(0, vocab_size)) fake_input = np.asarray(fake_input) return fake_input class MobileBertEncoderTest(parameterized.TestCase, tf.test.TestCase): def test_embedding_layer_with_token_type(self): layer = mobile_bert_layers.MobileBertEmbedding(10, 8, 2, 16) input_seq = tf.Variable([[2, 3, 4, 5]]) token_type = tf.Variable([[0, 1, 1, 1]]) output = layer(input_seq, token_type) output_shape = output.shape.as_list() expected_shape = [1, 4, 16] self.assertListEqual(output_shape, expected_shape, msg=None) def test_embedding_layer_without_token_type(self): layer = mobile_bert_layers.MobileBertEmbedding(10, 8, 2, 16) input_seq = tf.Variable([[2, 3, 4, 5]]) output = layer(input_seq) output_shape = output.shape.as_list() expected_shape = [1, 4, 16] self.assertListEqual(output_shape, expected_shape, msg=None) def test_embedding_layer_get_config(self): layer = mobile_bert_layers.MobileBertEmbedding( word_vocab_size=16, word_embed_size=32, type_vocab_size=4, output_embed_size=32, max_sequence_length=32, normalization_type='layer_norm', initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01), dropout_rate=0.5) layer_config = layer.get_config() new_layer = mobile_bert_layers.MobileBertEmbedding.from_config(layer_config) self.assertEqual(layer_config, new_layer.get_config()) def test_no_norm(self): layer = mobile_bert_layers.NoNorm() feature = tf.random.normal([2, 3, 4]) output = layer(feature) output_shape = output.shape.as_list() expected_shape = [2, 3, 4] self.assertListEqual(output_shape, expected_shape, msg=None) @parameterized.named_parameters(('with_kq_shared_bottleneck', False), ('without_kq_shared_bottleneck', True)) def test_transfomer_kq_shared_bottleneck(self, is_kq_shared): feature = tf.random.uniform([2, 3, 512]) layer = mobile_bert_layers.MobileBertTransformer( key_query_shared_bottleneck=is_kq_shared) output = layer(feature) output_shape = output.shape.as_list() expected_shape = [2, 3, 512] self.assertListEqual(output_shape, expected_shape, msg=None) def test_transfomer_with_mask(self): feature = tf.random.uniform([2, 3, 512]) input_mask = [[[0., 0., 1.], [0., 0., 1.], [0., 0., 1.]], [[0., 1., 1.], [0., 1., 1.], [0., 1., 1.]]] input_mask = np.asarray(input_mask) layer = mobile_bert_layers.MobileBertTransformer() output = layer(feature, input_mask) output_shape = output.shape.as_list() expected_shape = [2, 3, 512] self.assertListEqual(output_shape, expected_shape, msg=None) def test_transfomer_return_attention_score(self): sequence_length = 5 num_attention_heads = 8 feature = tf.random.uniform([2, sequence_length, 512]) layer = mobile_bert_layers.MobileBertTransformer( num_attention_heads=num_attention_heads) _, attention_score = layer(feature, return_attention_scores=True) expected_shape = [2, num_attention_heads, sequence_length, sequence_length] self.assertListEqual( attention_score.shape.as_list(), expected_shape, msg=None) def test_transformer_get_config(self): layer = mobile_bert_layers.MobileBertTransformer( hidden_size=32, num_attention_heads=2, intermediate_size=48, intermediate_act_fn='gelu', hidden_dropout_prob=0.5, attention_probs_dropout_prob=0.4, intra_bottleneck_size=64, use_bottleneck_attention=True, key_query_shared_bottleneck=False, num_feedforward_networks=2, normalization_type='layer_norm', initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01), name='block') layer_config = layer.get_config() new_layer = mobile_bert_layers.MobileBertTransformer.from_config( layer_config) self.assertEqual(layer_config, new_layer.get_config()) class MobileBertMaskedLMTest(tf.test.TestCase): def create_layer(self, vocab_size, hidden_size, embedding_width, output='predictions', xformer_stack=None): # First, create a transformer stack that we can use to get the LM's # vocabulary weight. if xformer_stack is None: xformer_stack = mobile_bert_encoder.MobileBERTEncoder( word_vocab_size=vocab_size, num_blocks=1, hidden_size=hidden_size, num_attention_heads=4, word_embed_size=embedding_width) # Create a maskedLM from the transformer stack. test_layer = mobile_bert_layers.MobileBertMaskedLM( embedding_table=xformer_stack.get_embedding_table(), output=output) return test_layer def test_layer_creation(self): vocab_size = 100 sequence_length = 32 hidden_size = 64 embedding_width = 32 num_predictions = 21 test_layer = self.create_layer( vocab_size=vocab_size, hidden_size=hidden_size, embedding_width=embedding_width) # Make sure that the output tensor of the masked LM is the right shape. lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size)) masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32) output = test_layer(lm_input_tensor, masked_positions=masked_positions) expected_output_shape = [None, num_predictions, vocab_size] self.assertEqual(expected_output_shape, output.shape.as_list()) def test_layer_invocation_with_external_logits(self): vocab_size = 100 sequence_length = 32 hidden_size = 64 embedding_width = 32 num_predictions = 21 xformer_stack = mobile_bert_encoder.MobileBERTEncoder( word_vocab_size=vocab_size, num_blocks=1, hidden_size=hidden_size, num_attention_heads=4, word_embed_size=embedding_width) test_layer = self.create_layer( vocab_size=vocab_size, hidden_size=hidden_size, embedding_width=embedding_width, xformer_stack=xformer_stack, output='predictions') logit_layer = self.create_layer( vocab_size=vocab_size, hidden_size=hidden_size, embedding_width=embedding_width, xformer_stack=xformer_stack, output='logits') # Create a model from the masked LM layer. lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size)) masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32) output = test_layer(lm_input_tensor, masked_positions) logit_output = logit_layer(lm_input_tensor, masked_positions) logit_output = tf.keras.layers.Activation(tf.nn.log_softmax)(logit_output) logit_layer.set_weights(test_layer.get_weights()) model = tf.keras.Model([lm_input_tensor, masked_positions], output) logits_model = tf.keras.Model(([lm_input_tensor, masked_positions]), logit_output) # Invoke the masked LM on some fake data to make sure there are no runtime # errors in the code. batch_size = 3 lm_input_data = 10 * np.random.random_sample( (batch_size, sequence_length, hidden_size)) masked_position_data = np.random.randint( sequence_length, size=(batch_size, num_predictions)) # ref_outputs = model.predict([lm_input_data, masked_position_data]) # outputs = logits_model.predict([lm_input_data, masked_position_data]) ref_outputs = model([lm_input_data, masked_position_data]) outputs = logits_model([lm_input_data, masked_position_data]) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, num_predictions, vocab_size) self.assertEqual(expected_output_shape, ref_outputs.shape) self.assertEqual(expected_output_shape, outputs.shape) self.assertAllClose(ref_outputs, outputs) def test_layer_invocation(self): vocab_size = 100 sequence_length = 32 hidden_size = 64 embedding_width = 32 num_predictions = 21 test_layer = self.create_layer( vocab_size=vocab_size, hidden_size=hidden_size, embedding_width=embedding_width) # Create a model from the masked LM layer. lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size)) masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32) output = test_layer(lm_input_tensor, masked_positions) model = tf.keras.Model([lm_input_tensor, masked_positions], output) # Invoke the masked LM on some fake data to make sure there are no runtime # errors in the code. batch_size = 3 lm_input_data = 10 * np.random.random_sample( (batch_size, sequence_length, hidden_size)) masked_position_data = np.random.randint( 2, size=(batch_size, num_predictions)) _ = model.predict([lm_input_data, masked_position_data]) def test_unknown_output_type_fails(self): with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'): _ = self.create_layer( vocab_size=8, hidden_size=8, embedding_width=4, output='bad') def test_hidden_size_smaller_than_embedding_width(self): hidden_size = 8 sequence_length = 32 num_predictions = 20 with self.assertRaisesRegex( ValueError, 'hidden size 8 cannot be smaller than embedding width 16.'): test_layer = self.create_layer( vocab_size=8, hidden_size=8, embedding_width=16) lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size)) masked_positions = tf.keras.Input( shape=(num_predictions,), dtype=tf.int32) _ = test_layer(lm_input_tensor, masked_positions) if __name__ == '__main__': tf.test.main()
10,880
38.711679
80
py
models
models-master/official/nlp/modeling/layers/per_dim_scale_attention.py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras-based attention layer with learnable per dim scaling.""" import gin import numpy as np import tensorflow as tf @gin.configurable @tf.keras.utils.register_keras_serializable(package='Text') class PerDimScaleAttention(tf.keras.layers.MultiHeadAttention): """Learn scales for individual dims. It can improve quality but might hurt training stability. """ def _build_from_signature(self, query, value, key=None): super()._build_from_signature(query=query, value=value, key=key) # pytype: disable=attribute-error self._scale_dim = self._key_dim with tf.init_scope(): self.per_dim_scale = self.add_weight( name='per_dim_scale', shape=(self._scale_dim,), initializer='zeros', dtype=self.dtype, trainable=True) def _scale_query(self, query): # 1.0/tf.nn.softplus(0.0) = 1.442695041. Hard code this number so that we # can avoid unnecessary XLA op fusion mess on TPU. r_softplus_0 = 1.442695041 scale = tf.constant( r_softplus_0 / np.sqrt(float(self._scale_dim)), dtype=query.dtype) scale *= tf.nn.softplus(self.per_dim_scale) return query * scale def _compute_attention(self, query, key, value, attention_mask=None, training=None): query = self._scale_query(query) attention_scores = tf.einsum(self._dot_product_equation, key, query) attention_scores = self._masked_softmax(attention_scores, attention_mask) attention_scores_dropout = self._dropout_layer( attention_scores, training=training) # `context_layer` = [B, T, N, H] attention_output = tf.einsum(self._combine_equation, attention_scores_dropout, value) return attention_output, attention_scores def call( # pytype: disable=signature-mismatch # overriding-parameter-count-checks self, query, value, key=None, attention_mask=None, return_attention_scores=False, training=None, ): if not self._built_from_signature: self._build_from_signature(query=query, value=value, key=key) if key is None: key = value # N = `num_attention_heads` # H = `size_per_head` # `query` = [B, T, N ,H] query = self._query_dense(query) # `key` = [B, S, N, H] key = self._key_dense(key) # `value` = [B, S, N, H] value = self._value_dense(value) attention_output, attention_scores = self._compute_attention( query, key, value, attention_mask, training) attention_output = self._output_dense(attention_output) if return_attention_scores: return attention_output, attention_scores return attention_output
3,406
32.401961
103
py