repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/build/__init__.py | 0 | 0 | 0 | py |
|
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/build/gen_ver.py | #!/usr/bin/env python3
# coding: utf-8
#
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import subprocess
import sys
from datetime import datetime
from genfile import GenFile
def main():
args = argparse.ArgumentParser()
args.add_argument("-o", "--output", help="Output file (default: stdout)",
default=None)
args.add_argument("-c", dest='commit', help="Use specified GIT revision",
default="HEAD")
args.add_argument("-C", dest='path',
help="Run GIT in the specified directory", default=None)
args.add_argument("-n", dest='now', action='store_true',
help="Use now as timestamp")
options = args.parse_args()
cwd = options.path
ret = subprocess.run(['git', 'diff', options.commit, '--quiet'],
cwd=cwd, stdout=subprocess.PIPE)
dirty = ret.returncode
ret = subprocess.run(['git', 'rev-parse', '--short', options.commit],
cwd=cwd, stdout=subprocess.PIPE)
if ret.returncode:
raise Exception('git rev-parse failed\n', ret.stderr)
rev = ret.stdout.decode("utf-8").strip()
id = rev + ('-dirty' if dirty else '')
if options.now or dirty:
utcnow = datetime.utcnow()
utcnow = utcnow.replace(microsecond=0)
time = utcnow.isoformat(sep=' ')
time = time + ' UTC'
else:
ret = subprocess.run(['git', 'show', '-s', '--pretty=%cd',
'--date=iso-local', options.commit],
cwd=cwd, env={'TZ': 'UTC'},
stdout=subprocess.PIPE)
if ret.returncode:
raise Exception('git rev-parse failed\n', ret.stderr)
time = ret.stdout.decode("utf-8").strip()
time = time.replace('+0000', 'UTC')
out = '// This file is automatically generated.\n'
out += '// Do not manually resolve conflicts! Contact ' \
'hypervisor.team for assistance.\n'
out += '#define HYP_GIT_VERSION {:s}\n'.format(id)
out += '#define HYP_BUILD_DATE \"{:s}\"\n'.format(time)
if options.output:
with GenFile(options.output, 'w', encoding='utf-8') as f:
f.write(out)
else:
sys.stdout.write(out)
if __name__ == '__main__':
sys.exit(main())
| 2,389 | 31.297297 | 78 | py |
EmpTransfo | EmpTransfo-master/train_full.py | # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
import os
import math
import logging
from pprint import pformat
from argparse import ArgumentParser
from collections import defaultdict
from itertools import chain
import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss, MetricsLambda, RunningAverage
from ignite.contrib.handlers import ProgressBar, PiecewiseLinear
from config import Config
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger, OutputHandler, OptimizerParamsHandler
from pytorch_pretrained_bert import (OpenAIAdam, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,
GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME, CONFIG_NAME,
BertModel, BertTokenizer)
from utils import get_dataset, get_dataset_for_daily_dialog
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>",
"<no_emotion>", "<happiness>", "<surprise>", "<sadness>", "<disgust>", "<anger>", "<fear>",
"<directive>", "<inform>", "<commissive>", "<question>",
"<pad>"]
MODEL_INPUTS = ["input_ids", "mc_token_ids", "lm_labels", "mc_labels", "token_type_ids", "token_emotion_ids"]
PADDED_INPUTS = ["input_ids", "lm_labels", "token_type_ids", "token_emotion_ids"]
logger = logging.getLogger(__file__)
def average_distributed_scalar(scalar, config):
""" Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. """
if config.local_rank == -1:
return scalar
scalar_t = torch.tensor(scalar, dtype=torch.float, device=config.device) / torch.distributed.get_world_size()
torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)
return scalar_t.item()
def pad_dataset(dataset, padding=0):
""" Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. """
max_l = max(len(x) for x in dataset["input_ids"])
for name in PADDED_INPUTS:
dataset[name] = [x + [padding if name != "lm_labels" else -1] * (max_l - len(x)) for x in dataset[name]]
return dataset
def build_input_from_segments(history, emotions, reply, candidate_emotion, tokenizer, lm_labels=False, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:4])
instance = {}
#sequence = [[bos] + history[0] + list(chain(*history[1:]))] + [reply + ([eos] if with_eos else [])] #seq = [personas, history, reply] concatenate all persona sentences
sequence = [[bos] + history[0]] + history[1:] +[reply +([eos] if with_eos else [])]
sequence = [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence)]
all_emotions = emotions + [candidate_emotion]
sequence = [[all_emotions[i]] + s for i, s in enumerate(sequence)]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s] # the last for is for repeating the speaker1 and speaker2 for all tokens
instance["token_emotion_ids"] = [emotions[i] for i, s in enumerate(sequence[:-1]) for _ in s]+[candidate_emotion]*len(sequence[-1])
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:] #all -1 except for reply, reply is just the ids
return instance, sequence
def get_data_loaders(config, tokenizer):
""" Prepare the dataset for training and evaluation """
personachat = get_dataset_for_daily_dialog(tokenizer, config.dataset_path, config.dataset_cache, SPECIAL_TOKENS)
# personachat["train"] = personachat["train"][:100]
# personachat["valid"] = personachat["valid"][:10]
logger.info("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
gpu_max_length = 310
for dataset_name, dataset in personachat.items():
num_candidates = len(dataset[0]["utterances"][0]["candidates"])
if config.num_candidates > 0 and dataset_name == 'train':
num_candidates = min(config.num_candidates, num_candidates)
for dialog in dataset:
for utterance in dialog["utterances"]:
history = utterance["history"][-(2*config.max_history+1):]
emotions = utterance["emotion"][-(2 * config.max_history + 1):]
for j, candidate in enumerate(utterance["candidates"][-num_candidates:]):
lm_labels = bool(j == num_candidates-1) #the true label is always the last one in list of candidates
candidate_emotion = utterance['candidates_emotions'][j]
instance, _ = build_input_from_segments(history, emotions, candidate, candidate_emotion, tokenizer, lm_labels)
#print(len(instance["input_ids"]))
if len(instance["input_ids"]) > gpu_max_length:
truncated_history = [hist[:10] for hist in history]
truncated_candidate = candidate[:10]
instance, _ = build_input_from_segments(truncated_history, emotions, truncated_candidate, candidate_emotion, tokenizer, lm_labels)
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
datasets[dataset_name]["mc_labels"].append(num_candidates - 1)
datasets[dataset_name]["n_candidates"] = num_candidates
logger.info("Pad inputs and convert to Tensor")
tensor_datasets = {"train": [], "valid": []}
for dataset_name, dataset in datasets.items():
dataset = pad_dataset(dataset, padding=tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1]))
for input_name in MODEL_INPUTS:
tensor = torch.tensor(dataset[input_name])
if input_name != "mc_labels":
tensor = tensor.view((-1, datasets[dataset_name]["n_candidates"]) + tensor.shape[1:])
tensor_datasets[dataset_name].append(tensor)
logger.info("Build train and validation dataloaders")
train_dataset, valid_dataset = TensorDataset(*tensor_datasets["train"]), TensorDataset(*tensor_datasets["valid"])
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if config.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if config.distributed else None
train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=config.train_batch_size, shuffle=False)
valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=config.valid_batch_size, shuffle=False)
logger.info("Train dataset (Batch, Candidates, Seq length): {}".format(train_dataset.tensors[0].shape))
logger.info("Valid dataset (Batch, Candidates, Seq length): {}".format(valid_dataset.tensors[0].shape))
return train_loader, valid_loader, train_sampler, valid_sampler
def train():
config_file = "configs/train_full_config.json"
config = Config.from_json_file(config_file)
# logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
logging.basicConfig(level=logging.INFO if config.local_rank in [-1, 0] else logging.WARN)
logger.warning("Running process %d", config.local_rank) # This is a logger.warning: it will be printed by all distributed processes
logger.info("Arguments: %s", pformat(config))
# Initialize distributed training if needed
config.distributed = (config.local_rank != -1)
if config.distributed:
torch.cuda.set_device(config.local_rank)
config.device = torch.device("cuda", config.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
logger.info("Prepare tokenizer, pretrained model and optimizer - add special tokens for fine-tuning")
tokenizer_class = GPT2Tokenizer if "gpt2" in config.model_checkpoint else OpenAIGPTTokenizer
tokenizer = tokenizer_class.from_pretrained(config.model_checkpoint)
model_class = GPT2DoubleHeadsModel if "gpt2" in config.model_checkpoint else OpenAIGPTDoubleHeadsModel
model = model_class.from_pretrained(config.model_checkpoint)
tokenizer.set_special_tokens(SPECIAL_TOKENS)
model.set_num_special_tokens(len(SPECIAL_TOKENS))
model.to(config.device)
optimizer = OpenAIAdam(model.parameters(), lr=config.lr)
# Prepare model for FP16 and distributed training if needed (order is important, distributed should be the last)
if config.fp16:
from apex import amp # Apex is only required if we use fp16 training
model, optimizer = amp.initialize(model, optimizer, opt_level=config.fp16)
if config.distributed:
model = DistributedDataParallel(model, device_ids=[config.local_rank], output_device=config.local_rank)
logger.info("Prepare datasets")
train_loader, val_loader, train_sampler, valid_sampler = get_data_loaders(config, tokenizer)
# Training function and trainer
def update(engine, batch):
model.train()
input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids = tuple(input_tensor.to(config.device) for input_tensor in batch)
lm_loss, mc_loss = model(input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids)
loss = (lm_loss * config.lm_coef + mc_loss * config.mc_coef) / config.gradient_accumulation_steps
if config.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.max_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_norm)
if engine.state.iteration % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return loss.item()
trainer = Engine(update)
# Evaluation function and evaluator (evaluator output is the input of the metrics)
def inference(engine, batch):
model.eval()
with torch.no_grad():
batch = tuple(input_tensor.to(config.device) for input_tensor in batch)
input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids = batch
#logger.info(tokenizer.decode(input_ids[0, -1, :].tolist()))
model_outputs = model(input_ids, mc_token_ids, token_type_ids=token_type_ids, token_emotion_ids=token_emotion_ids)
lm_logits, mc_logits = model_outputs[0], model_outputs[1] # So we can also use GPT2 outputs
lm_logits_flat_shifted = lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1))
lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)
return (lm_logits_flat_shifted, mc_logits), (lm_labels_flat_shifted, mc_labels)
evaluator = Engine(inference)
# Attach evaluation to trainer: we evaluate when we start the training and at the end of each epoch
trainer.add_event_handler(Events.EPOCH_COMPLETED, lambda _: evaluator.run(val_loader))
if config.n_epochs < 1:
trainer.add_event_handler(Events.COMPLETED, lambda _: evaluator.run(val_loader))
if config.eval_before_start:
trainer.add_event_handler(Events.STARTED, lambda _: evaluator.run(val_loader))
# Make sure distributed data samplers split the dataset nicely between the distributed processes
if config.distributed:
trainer.add_event_handler(Events.EPOCH_STARTED, lambda engine: train_sampler.set_epoch(engine.state.epoch))
evaluator.add_event_handler(Events.EPOCH_STARTED, lambda engine: valid_sampler.set_epoch(engine.state.epoch))
# Linearly decrease the learning rate from lr to zero
scheduler = PiecewiseLinear(optimizer, "lr", [(0, config.lr), (config.n_epochs * len(train_loader), 0.0)])
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
# Prepare metrics - note how we compute distributed metrics
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
metrics = {"nll": Loss(torch.nn.CrossEntropyLoss(ignore_index=-1), output_transform=lambda x: (x[0][0], x[1][0])),
"accuracy": Accuracy(output_transform=lambda x: (x[0][1], x[1][1]))}
metrics.update({"average_nll": MetricsLambda(average_distributed_scalar, metrics["nll"], config),
"average_accuracy": MetricsLambda(average_distributed_scalar, metrics["accuracy"], config)})
metrics["average_ppl"] = MetricsLambda(math.exp, metrics["average_nll"])
for name, metric in metrics.items():
metric.attach(evaluator, name)
# On the main process: add progress bar, tensorboard, checkpoints and save model, configuration and tokenizer before we start to train
if config.local_rank in [-1, 0]:
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names=["loss"])
evaluator.add_event_handler(Events.COMPLETED, lambda _: pbar.log_message("Validation: %s" % pformat(evaluator.state.metrics)))
tb_logger = TensorboardLogger(log_dir=config.log_dir)
tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", metric_names=["loss"]), event_name=Events.ITERATION_COMPLETED)
tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED)
tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=list(metrics.keys()), another_engine=trainer), event_name=Events.EPOCH_COMPLETED)
checkpoint_handler = ModelCheckpoint(tb_logger.writer.log_dir, 'checkpoint', save_interval=1, n_saved=3)
trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {'mymodel': getattr(model, 'module', model)}) # "getattr" take care of distributed encapsulation
torch.save(config, tb_logger.writer.log_dir + '/model_training_args.bin')
getattr(model, 'module', model).config.to_json_file(os.path.join(tb_logger.writer.log_dir, CONFIG_NAME))
tokenizer.save_vocabulary(tb_logger.writer.log_dir)
# Run the training
trainer.run(train_loader, max_epochs=config.n_epochs)
# On the main process: close tensorboard logger and rename the last checkpoint (for easy re-loading with OpenAIGPTModel.from_pretrained method)
if config.local_rank in [-1, 0] and config.n_epochs > 0:
os.rename(checkpoint_handler._saved[-1][1][-1], os.path.join(tb_logger.writer.log_dir, WEIGHTS_NAME)) # TODO: PR in ignite to have better access to saved file paths (cleaner)
tb_logger.close()
if __name__ == "__main__":
train()
| 15,365 | 59.496063 | 183 | py |
EmpTransfo | EmpTransfo-master/evaluate.py | # # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
from argparse import ArgumentParser
from itertools import chain
from pprint import pformat
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from config import InteractConfig
from pytorch_pretrained_bert import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer
from utils import download_pretrained_model, get_dataset, _bleu, _f1_score
def build_input_from_segments(persona, history, reply, tokenizer, SPECIAL_TOKENS, lm_labels=False, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])
instance = {}
sequence = [[bos] + list(chain(*persona))] + history + [
reply + ([eos] if with_eos else [])] # seq = [personas, history, reply] concatenate all persona sentences
sequence = [sequence[0]] + [[speaker2 if (len(sequence) - i) % 2 else speaker1] + s for i, s in
enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in
s] # the last for is for repeating the speaker1 and speaker2 for all tokens
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:] # all -1 except for reply, reply is just the ids
return instance, sequence
def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (..., vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def get_emotions(dataset):
for data in tqdm(dataset['valid']):
utterances = data['utterances']
for utterance in utterances:
true_emotion = utterance["emotion"]
def calculate_metrics(args, model, tokenizer, dataset, special_tokens):
special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens)
all_blues = []
all_f1_scores = []
all_true_sentences = []
all_predicted_sentences = []
for data in tqdm(dataset['valid']):
personality = data['personality']
utterances = data['utterances']
#utterance = utterances[-1] #only the longest conversaion
for utterance in utterances:
true_label = utterance['candidates'][-1]
history = utterance['history']
predicted_output = []
for i in range(args.max_length):
instance, _ = build_input_from_segments(personality, history, predicted_output, tokenizer, special_tokens, with_eos=False)
try:
if len(instance["input_ids"]) > 310:
truncated_history = [hist[:5] for hist in history]
instance, _ = build_input_from_segments(personality, truncated_history, predicted_output, tokenizer, special_tokens, with_eos=False)
input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)
logits = model(input_ids, token_type_ids=token_type_ids)
except:
print("exception")
continue
if "gpt2" == args.model:
logits = logits[0]
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
# if i < args.min_length and prev.item() in special_tokens_ids:
# k=0
# while prev.item() in special_tokens_ids and k < 100:
# prev = torch.multinomial(probs, num_samples=1)
# k+=1
if i < args.min_length:
prev = torch.multinomial(probs, num_samples=1)
# if prev.item() in special_tokens_ids:
# break
predicted_output.append(prev.item())
predicted_sentence = tokenizer.decode(predicted_output, skip_special_tokens=True)
true_sentence = tokenizer.decode(true_label, skip_special_tokens=True)
#looks like zero gives the best results
all_predicted_sentences.append(predicted_sentence)
all_true_sentences.append(true_sentence)
bleus = [_bleu(predicted_sentence, [true_sentence], method="method"+str(i)) for i in [0,1,2,3,5]]
#bleu = _bleu(predicted_sentence, [true_sentence])
f1_score = _f1_score(predicted_sentence, [true_sentence])
#print(f1_score)
all_blues.append(bleus)
all_f1_scores.append(f1_score)
#compare predicted and label with bleu
print("avg bleu", np.array(all_blues).mean(axis=0))
print("avg f1 score", np.mean(all_f1_scores))
print("max bleu", np.array(all_blues).max(axis=0))
def run():
config_file = "configs/interact_config.json"
config = InteractConfig.from_json_file(config_file)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
logger.info(pformat(config))
if config.model_checkpoint == "":
config.model_checkpoint = download_pretrained_model()
random.seed(config.seed)
torch.random.manual_seed(config.seed)
torch.cuda.manual_seed(config.seed)
logger.info("Get pretrained model and tokenizer")
tokenizer_class = GPT2Tokenizer if "gpt2" == config.model else OpenAIGPTTokenizer
tokenizer = tokenizer_class.from_pretrained(config.model_checkpoint)
model_class = GPT2LMHeadModel if "gpt2" == config.model else OpenAIGPTLMHeadModel
model = model_class.from_pretrained(config.model_checkpoint)
model.to(config.device)
model.eval()
dataset = get_dataset(tokenizer, config.dataset_path, config.dataset_cache)
special_tokens = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
calculate_metrics(config, model, tokenizer, dataset, special_tokens)
if __name__ == "__main__":
run()
| 8,472 | 42.229592 | 156 | py |
EmpTransfo | EmpTransfo-master/utils.py | # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import os
import tarfile
import tempfile
import re
import torch
from pytorch_pretrained_bert import cached_path
from collections import Counter
try:
from nltk.translate import bleu_score as nltkbleu
except ImportError:
# User doesn't have nltk installed, so we can't use it for bleu
# We'll just turn off things, but we might want to warn the user
nltkbleu = None
PERSONACHAT_URL = "https://s3.amazonaws.com/datasets.huggingface.co/personachat/personachat_self_original.json"
HF_FINETUNED_MODEL = "https://s3.amazonaws.com/models.huggingface.co/transfer-learning-chatbot/finetuned_chatbot_gpt.tar.gz"
logger = logging.getLogger(__file__)
re_art = re.compile(r'\b(a|an|the)\b')
re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re_art.sub(' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
return re_punc.sub(' ', text) # convert punctuation to spaces
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def download_pretrained_model():
""" Download and extract finetuned model from S3 """
resolved_archive_file = cached_path(HF_FINETUNED_MODEL)
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
return tempdir
def get_dataset(tokenizer, dataset_path, dataset_cache=None):
""" Get PERSONACHAT from S3 """
dataset_path = dataset_path or PERSONACHAT_URL
dataset_cache = dataset_cache + '_' + type(tokenizer).__name__ # Do avoid using GPT cache for GPT-2 and vice-versa
if dataset_cache and os.path.isfile(dataset_cache):
logger.info("Load tokenized dataset from cache at %s", dataset_cache)
dataset = torch.load(dataset_cache)
else:
logger.info("Download dataset from %s", dataset_path)
personachat_file = cached_path(dataset_path)
with open(personachat_file, "r", encoding="utf-8") as f:
dataset = json.loads(f.read())
logger.info("Tokenize and encode the dataset")
def tokenize(obj):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
dataset = tokenize(dataset)
if dataset_cache:
torch.save(dataset, dataset_cache)
return dataset
def get_dataset_for_daily_dialog(tokenizer, dataset_path, dataset_cache=None, special_tokens=None):
""" Get PERSONACHAT from S3 """
dataset_path = dataset_path or PERSONACHAT_URL
dataset_cache = dataset_cache + '_' + type(tokenizer).__name__ # Do avoid using GPT cache for GPT-2 and vice-versa
if dataset_cache and os.path.isfile(dataset_cache):
logger.info("Load tokenized dataset from cache at %s", dataset_cache)
dataset = torch.load(dataset_cache)
else:
logger.info("Download dataset from %s", dataset_path)
personachat_file = cached_path(dataset_path)
with open(personachat_file, "r", encoding="utf-8") as f:
dataset = json.loads(f.read())
logger.info("Tokenize and encode the dataset")
def tokenize(obj):
if isinstance(obj, str):
if obj in special_tokens:
return tokenizer.convert_tokens_to_ids(obj)
else:
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
dataset = tokenize(dataset)
if dataset_cache:
torch.save(dataset, dataset_cache)
return dataset
def get_dataset_personalities(tokenizer, dataset_path, dataset_cache=None):
""" Get personalities from PERSONACHAT """
dataset_path = dataset_path or PERSONACHAT_URL
dataset_cache = dataset_cache + '_' + type(tokenizer).__name__ # Do avoid using GPT cache for GPT-2 and vice-versa
if os.path.isfile(dataset_cache):
logger.info("Load tokenized dataset from cache at %s", dataset_cache)
personachat = torch.load(dataset_cache)
else:
logger.info("Download PERSONACHAT dataset from %s", dataset_path)
personachat_file = cached_path(dataset_path)
with open(personachat_file, "r", encoding="utf-8") as f:
personachat = json.loads(f.read())
logger.info("Tokenize and encode the dataset")
def tokenize(obj):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
personachat = tokenize(personachat)
torch.save(personachat, dataset_cache)
logger.info("Filter personalities")
personalities = []
for dataset in personachat.values():
for dialog in dataset:
personalities.append(dialog["personality"])
logger.info("Gathered {} personalities".format(len(personalities)))
return personalities
def _prec_recall_f1_score(pred_items, gold_items):
"""
Compute precision, recall and f1 given a set of gold and prediction items.
:param pred_items: iterable of predicted values
:param gold_items: iterable of gold values
:return: tuple (p, r, f1) for precision, recall, f1
"""
common = Counter(gold_items) & Counter(pred_items)
num_same = sum(common.values())
if num_same == 0:
return 0, 0, 0
precision = 1.0 * num_same / len(pred_items)
recall = 1.0 * num_same / len(gold_items)
f1 = (2 * precision * recall) / (precision + recall)
return precision, recall, f1
def _f1_score(guess, answers):
"""Return the max F1 score between the guess and *any* answer."""
if guess is None or answers is None:
return 0
g_tokens = normalize_answer(guess).split()
scores = [
_prec_recall_f1_score(g_tokens, normalize_answer(a).split())for a in answers
]
return max(f1 for p, r, f1 in scores)
def _bleu(guess, answers, method=None):
"""Compute approximate BLEU score between guess and a set of answers."""
if nltkbleu is None:
# bleu library not installed, just return a default value
return None
# Warning: BLEU calculation *should* include proper tokenization and
# punctuation etc. We're using the normalize_answer for everything though,
# so we're over-estimating our BLEU scores. Also note that NLTK's bleu is
# going to be slower than fairseq's (which is written in C), but fairseq's
# requires that everything be in arrays of ints (i.e. as tensors). NLTK's
# works with strings, which is better suited for this module.
if method == "method0":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method0
elif method == "method1":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method1
elif method == "method2":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method2
elif method == "method3":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method3
elif method == "method4":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method4
elif method == "method5":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method5
elif method == "method6":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method6
elif method == "method7":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method7
else:
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method3
return nltkbleu.sentence_bleu(
[normalize_answer(a).split(" ") for a in answers],
normalize_answer(guess).split(" "),
smoothing_function=smoothing_func,
)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
| 8,740 | 37.676991 | 124 | py |
EmpTransfo | EmpTransfo-master/train_emotion_recognition.py | # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
import os
import math
import logging
from pprint import pformat
from argparse import ArgumentParser
from collections import defaultdict
from itertools import chain
import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Recall, Loss, MetricsLambda, RunningAverage, Precision, ConfusionMatrix
from ignite.contrib.handlers import ProgressBar, PiecewiseLinear
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger, OutputHandler, OptimizerParamsHandler
from config import Config
from pytorch_pretrained_bert import (OpenAIAdam, OpenAIGPTDoubleHeadLMEmotionRecognitionModel, OpenAIGPTTokenizer,
GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME, CONFIG_NAME)
from utils import get_dataset, get_dataset_for_daily_dialog
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>",
"<no_emotion>", "<happiness>", "<surprise>", "<sadness>", "<disgust>", "<anger>", "<fear>",
"<directive>", "<inform>", "<commissive>", "<question>",
"<pad>"]
MODEL_INPUTS = ["input_ids", "mc_token_ids", "lm_labels", "mc_labels", "token_type_ids", "token_emotion_ids"]
PADDED_INPUTS = ["input_ids", "lm_labels", "token_type_ids", "token_emotion_ids"]
logger = logging.getLogger(__file__)
def average_distributed_scalar(scalar, config):
""" Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. """
if config.local_rank == -1:
return scalar
scalar_t = torch.tensor(scalar, dtype=torch.float, device=config.device) / torch.distributed.get_world_size()
torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)
return scalar_t.item()
def pad_dataset(dataset, padding=0):
""" Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. """
max_l = max(len(x) for x in dataset["input_ids"])
for name in PADDED_INPUTS:
dataset[name] = [x + [padding if name != "lm_labels" else -1] * (max_l - len(x)) for x in dataset[name]]
return dataset
def get_emotion_label(tokenizer, candidate_emotion):
_, _, _, _, no_emotion_id, happiness_id, surprise_id, sadness_id, disgust_id, anger_id, fear_id, _, _, _, _, _ = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if candidate_emotion == happiness_id:
return 0
elif candidate_emotion == surprise_id:
return 1
elif candidate_emotion == sadness_id:
return 2
elif candidate_emotion == disgust_id:
return 3
elif candidate_emotion == anger_id:
return 4
elif candidate_emotion == fear_id:
return 5
elif candidate_emotion == no_emotion_id:
return 6
def build_input_from_segments(history, emotions, reply, true_emotion, tokenizer, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:4])
#tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1])
instance = {}
# sequence = [[bos] + history[0] + list(chain(*history[1:]))] + [reply + ([eos] if with_eos else [])] #seq = [personas, history, reply] concatenate all persona sentences
sequence = [[bos] + history[0]] + history[1:] + [reply + ([eos] if with_eos else [])]
sequence = [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence)]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s] # the last for is for repeating the speaker1 and speaker2 for all tokens
#instance["token_emotion_ids"] = [emotions[i] for i, s in enumerate(sequence[:-1]) for _ in s] + [true_emotion] * len(sequence[-1])
instance["token_emotion_ids"] = [emotions[i] for i, s in enumerate(sequence[:-1]) for _ in s]
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["mc_labels"] = get_emotion_label(tokenizer, true_emotion)
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:] #all -1 except for reply, reply is just the ids
return instance, sequence
def get_data_loaders(config, tokenizer):
""" Prepare the dataset for training and evaluation """
personachat = get_dataset_for_daily_dialog(tokenizer, config.dataset_path, config.dataset_cache, SPECIAL_TOKENS)
# personachat["train"] = personachat["train"][:100]
# personachat["valid"] = personachat["valid"][:10]
logger.info("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
gpu_max_length = 310
for dataset_name, dataset in personachat.items():
num_candidates = 2#len(dataset[0]["utterances"][0]["candidates"])
if config.num_candidates > 0 and dataset_name == 'train':
num_candidates = min(config.num_candidates, num_candidates)
for dialog in dataset:
for utterance in dialog["utterances"]:
history = utterance["history"][-(2 * config.max_history + 1):]
emotions = utterance["emotion"][-(2 * config.max_history + 1):]
reply = utterance["candidates"][-1]
true_emotion = utterance['candidates_emotions'][-1]
if true_emotion == tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)[4]:
continue
instance, _ = build_input_from_segments(history,
emotions,
reply,
true_emotion,
tokenizer)
if len(instance["input_ids"]) > gpu_max_length:
truncated_history = [hist[:10] for hist in history]
truncated_candidate = reply[:10]
true_emotion = utterance['candidates_emotions'][-1]
instance, _ = build_input_from_segments(truncated_history,
emotions,
truncated_candidate,
true_emotion,
tokenizer)
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
datasets[dataset_name]["n_candidates"] = num_candidates
logger.info("Pad inputs and convert to Tensor")
tensor_datasets = {"train": [], "valid": []}
for dataset_name, dataset in datasets.items():
dataset = pad_dataset(dataset, padding=tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1]))
for input_name in MODEL_INPUTS:
tensor = torch.tensor(dataset[input_name])
#if input_name != "mc_labels":
# tensor = tensor.view((-1, datasets[dataset_name]["n_candidates"]) + tensor.shape[1:])
tensor_datasets[dataset_name].append(tensor)
logger.info("Build train and validation dataloaders")
train_dataset, valid_dataset = TensorDataset(*tensor_datasets["train"]), TensorDataset(*tensor_datasets["valid"])
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if config.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if config.distributed else None
train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=config.train_batch_size, shuffle=False)
valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=config.valid_batch_size, shuffle=False)
logger.info("Train dataset (Batch, Candidates, Seq length): {}".format(train_dataset.tensors[0].shape))
logger.info("Valid dataset (Batch, Candidates, Seq length): {}".format(valid_dataset.tensors[0].shape))
return train_loader, valid_loader, train_sampler, valid_sampler
def train():
config_file = "configs/train_emotion_recognition_config.json"
config = Config.from_json_file(config_file)
# logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
logging.basicConfig(level=logging.INFO if config.local_rank in [-1, 0] else logging.WARN)
logger.warning("Running process %d", config.local_rank) # This is a logger.warning: it will be printed by all distributed processes
logger.info("Arguments: %s", pformat(config))
# Initialize distributed training if needed
config.distributed = (config.local_rank != -1)
if config.distributed:
torch.cuda.set_device(config.local_rank)
config.device = torch.device("cuda", config.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
logger.info("Prepare tokenizer, pretrained model and optimizer - add special tokens for fine-tuning")
tokenizer_class = GPT2Tokenizer if "gpt2" in config.model_checkpoint else OpenAIGPTTokenizer
tokenizer = tokenizer_class.from_pretrained(config.model_checkpoint)
model_class = OpenAIGPTDoubleHeadLMEmotionRecognitionModel
model = model_class.from_pretrained(config.model_checkpoint)
tokenizer.set_special_tokens(SPECIAL_TOKENS)
model.set_num_special_tokens(len(SPECIAL_TOKENS))
model.to(config.device)
optimizer = OpenAIAdam(model.parameters(), lr=config.lr)
# Prepare model for FP16 and distributed training if needed (order is important, distributed should be the last)
if config.fp16:
from apex import amp # Apex is only required if we use fp16 training
model, optimizer = amp.initialize(model, optimizer, opt_level=config.fp16)
if config.distributed:
model = DistributedDataParallel(model, device_ids=[config.local_rank], output_device=config.local_rank)
logger.info("Prepare datasets")
train_loader, val_loader, train_sampler, valid_sampler = get_data_loaders(config, tokenizer)
# Training function and trainer
def update(engine, batch):
model.train()
input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids = tuple(input_tensor.to(config.device) for input_tensor in batch)
#token_emotion_ids = None
lm_loss, mc_loss = model(input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids)
loss = (lm_loss * config.lm_coef + mc_loss * config.mc_coef) / config.gradient_accumulation_steps
if config.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.max_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_norm)
if engine.state.iteration % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return loss.item()
trainer = Engine(update)
# Evaluation function and evaluator (evaluator output is the input of the metrics)
def inference(engine, batch):
model.eval()
with torch.no_grad():
batch = tuple(input_tensor.to(config.device) for input_tensor in batch)
input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids = batch
#token_emotion_ids = None
model_outputs = model(input_ids, mc_token_ids, token_type_ids=token_type_ids, token_emotion_ids=token_emotion_ids)
lm_logits, mc_logits = model_outputs[0], model_outputs[1] # So we can also use GPT2 outputs
lm_logits_flat_shifted = lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1))
lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)
return (lm_logits_flat_shifted, mc_logits), (lm_labels_flat_shifted, mc_labels)
evaluator = Engine(inference)
# Attach evaluation to trainer: we evaluate when we start the training and at the end of each epoch
trainer.add_event_handler(Events.EPOCH_COMPLETED, lambda _: evaluator.run(val_loader))
if config.n_epochs < 1:
trainer.add_event_handler(Events.COMPLETED, lambda _: evaluator.run(val_loader))
if config.eval_before_start:
trainer.add_event_handler(Events.STARTED, lambda _: evaluator.run(val_loader))
# Make sure distributed data samplers split the dataset nicely between the distributed processes
if config.distributed:
trainer.add_event_handler(Events.EPOCH_STARTED, lambda engine: train_sampler.set_epoch(engine.state.epoch))
evaluator.add_event_handler(Events.EPOCH_STARTED, lambda engine: valid_sampler.set_epoch(engine.state.epoch))
# Linearly decrease the learning rate from lr to zero
scheduler = PiecewiseLinear(optimizer, "lr", [(0, config.lr), (config.n_epochs * len(train_loader), 0.0)])
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
# Prepare metrics - note how we compute distributed metrics
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
metrics = {"nll": Loss(torch.nn.CrossEntropyLoss(ignore_index=-1), output_transform=lambda x: (x[0][0], x[1][0])),
"accuracy": Accuracy(output_transform=lambda x: (x[0][1], x[1][1]))}
metrics.update({"precision": Precision(output_transform=lambda x: (x[0][1], x[1][1])),
"recall": Recall(output_transform=lambda x: (x[0][1], x[1][1]))})
metrics.update({"average_nll": MetricsLambda(average_distributed_scalar, metrics["nll"], config),
"average_accuracy": MetricsLambda(average_distributed_scalar, metrics["accuracy"], config)})
metrics.update({"confusion_matrix": ConfusionMatrix(num_classes=6, output_transform=lambda x: (x[0][1], x[1][1]))})
metrics["average_ppl"] = MetricsLambda(math.exp, metrics["average_nll"])
for name, metric in metrics.items():
metric.attach(evaluator, name)
# On the main process: add progress bar, tensorboard, checkpoints and save model, configuration and tokenizer before we start to train
if config.local_rank in [-1, 0]:
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names=["loss"])
evaluator.add_event_handler(Events.COMPLETED, lambda _: pbar.log_message("Validation: %s" % pformat(evaluator.state.metrics)))
tb_logger = TensorboardLogger(log_dir=config.log_dir)
tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", metric_names=["loss"]), event_name=Events.ITERATION_COMPLETED)
tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED)
tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=list(metrics.keys()), another_engine=trainer), event_name=Events.EPOCH_COMPLETED)
checkpoint_handler = ModelCheckpoint(tb_logger.writer.log_dir, 'checkpoint', save_interval=1, n_saved=3)
trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {'mymodel': getattr(model, 'module', model)}) # "getattr" take care of distributed encapsulation
torch.save(config, tb_logger.writer.log_dir + '/model_training_args.bin')
getattr(model, 'module', model).config.to_json_file(os.path.join(tb_logger.writer.log_dir, CONFIG_NAME))
tokenizer.save_vocabulary(tb_logger.writer.log_dir)
# Run the training
trainer.run(train_loader, max_epochs=config.n_epochs)
# On the main process: close tensorboard logger and rename the last checkpoint (for easy re-loading with OpenAIGPTModel.from_pretrained method)
if config.local_rank in [-1, 0] and config.n_epochs > 0:
os.rename(checkpoint_handler._saved[-1][1][-1], os.path.join(tb_logger.writer.log_dir, WEIGHTS_NAME)) # TODO: PR in ignite to have better access to saved file paths (cleaner)
tb_logger.close()
if __name__ == "__main__":
train()
| 16,552 | 56.675958 | 183 | py |
EmpTransfo | EmpTransfo-master/interact.py | # # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
from argparse import ArgumentParser
from itertools import chain
from pprint import pformat
import torch
import torch.nn.functional as F
from config import InteractConfig
from pytorch_pretrained_bert import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer, \
BertTokenizer
from pytorch_pretrained_bert.modeling import BertLMHeadModel
from utils import get_dataset_personalities, download_pretrained_model, get_dataset
def build_input_from_segments(history, reply, tokenizer, SPECIAL_TOKENS, lm_labels=False, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])
persona = []
instance = {}
sequence = [[bos] + list(chain(*persona))] + history + [
reply + ([eos] if with_eos else [])] # seq = [personas, history, reply] concatenate all persona sentences
sequence = [sequence[0]] + [[speaker2 if (len(sequence) - i) % 2 else speaker1] + s for i, s in
enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in
s] # the last for is for repeating the speaker1 and speaker2 for all tokens
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:] # all -1 except for reply, reply is just the ids
return instance, sequence
def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (..., vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(history, tokenizer, model, args, SPECIAL_TOKENS, current_output=None):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if current_output is None:
current_output = []
for i in range(args.max_length):
instance, sequence = build_input_from_segments(history, current_output, tokenizer, SPECIAL_TOKENS,
with_eos=False)
input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)
logits = model(input_ids, token_type_ids=token_type_ids)
if "gpt2" == args.model:
logits = logits[0]
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
if i < args.min_length and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids:
break
current_output.append(prev.item())
return current_output
def run():
config_file = "configs/interact_config.json"
config = InteractConfig.from_json_file(config_file)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
logger.info(pformat(config))
if config.model_checkpoint == "":
config.model_checkpoint = download_pretrained_model()
torch.random.manual_seed(config.seed)
torch.cuda.manual_seed(config.seed)
logger.info("Get pretrained model and tokenizer")
if config.model == "bert":
tokenizer_class = BertTokenizer
model_class = BertLMHeadModel
elif config.model == "gpt2":
tokenizer_class = GPT2Tokenizer
model_class = GPT2LMHeadModel
else:
tokenizer_class = OpenAIGPTTokenizer
model_class = OpenAIGPTLMHeadModel
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
tokenizer = tokenizer_class.from_pretrained(config.model_checkpoint)
model = model_class.from_pretrained(config.model_checkpoint)
model.to(config.device)
model.eval()
history = []
while True:
raw_text = input(">>> ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input(">>> ")
history.append(tokenizer.encode(raw_text))
with torch.no_grad():
out_ids = sample_sequence(history, tokenizer, model, config, SPECIAL_TOKENS)
history.append(out_ids)
history = history[-(2 * config.max_history + 1):]
out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
print(out_text)
if __name__ == "__main__":
run()
| 6,871 | 41.419753 | 151 | py |
EmpTransfo | EmpTransfo-master/config.py | import json
class Config:
def __init__(self,
dataset_path="",
dataset_cache="",
model_checkpoint="",
num_candidates=2,
do_lower_case=True,
max_history=2,
train_batch_size=4,
valid_batch_size=4,
gradient_accumulation_steps=8,
lr=5e-5,
warmup_proportion=0.1,
lm_coef=1,
mc_coef=1,
max_norm=10,
n_epochs=2,
personality_permutations=1,
eval_before_start=False,
device="cpu",
fp16="",
local_rank=-1,
log_dir="",
):
self.dataset_path = dataset_path
self.dataset_cache = dataset_cache
self.model_checkpoint = model_checkpoint
self.num_candidates = num_candidates
self.do_lower_case = do_lower_case
self.max_history = max_history
self.train_batch_size = train_batch_size
self.valid_batch_size = valid_batch_size
self.gradient_accumulation_steps = gradient_accumulation_steps
self.lr = lr
self.warmup_proportion = warmup_proportion
self.lm_coef = lm_coef
self.mc_coef = mc_coef
self.max_norm = max_norm
self.n_epochs = n_epochs
self.personality_permutations = personality_permutations
self.eval_before_start = eval_before_start
self.device = device
self.fp16 = fp16
self.local_rank = local_rank
self.log_dir = log_dir
@classmethod
def from_dict(cls, json_object):
config = Config()
for key in json_object:
config.__dict__[key] = json_object[key]
return config
@classmethod
def from_json_file(cls, json_file):
with open(json_file) as f:
config_json = f.read()
return cls.from_dict(json.loads(config_json))
class InteractConfig:
def __init__(self,
dataset_path="",
model="",
dataset_cache="",
model_checkpoint="",
max_history="",
device="",
no_sample="",
max_length="",
min_length="",
seed="",
temperature="",
top_k="",
top_p=""
):
self.dataset_path = dataset_path
self.model = model
self.dataset_cache = dataset_cache
self.model_checkpoint = model_checkpoint
self.max_history = max_history
self.device = device
self.no_sample = no_sample
self.max_length = max_length
self.min_length = min_length
self.seed = seed
self.temperature = temperature
self.top_k = top_k
self.top_p = top_p
@classmethod
def from_dict(cls, json_object):
config = InteractConfig()
for key in json_object:
config.__dict__[key] = json_object[key]
return config
@classmethod
def from_json_file(cls, json_file):
with open(json_file) as f:
config_json = f.read()
return cls.from_dict(json.loads(config_json))
| 3,331 | 29.290909 | 70 | py |
EmpTransfo | EmpTransfo-master/train.py | # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
import os
import math
import logging
from pprint import pformat
from argparse import ArgumentParser
from collections import defaultdict
from itertools import chain
from config import Config
import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss, MetricsLambda, RunningAverage
from ignite.contrib.handlers import ProgressBar, PiecewiseLinear
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger, OutputHandler, OptimizerParamsHandler
from pytorch_pretrained_bert import (OpenAIAdam, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,
GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME, CONFIG_NAME,
BertModel, BertTokenizer)
from utils import get_dataset
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
MODEL_INPUTS = ["input_ids", "mc_token_ids", "lm_labels", "mc_labels", "token_type_ids"]
PADDED_INPUTS = ["input_ids", "lm_labels", "token_type_ids"]
logger = logging.getLogger(__file__)
def average_distributed_scalar(scalar, config):
""" Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. """
if config.local_rank == -1:
return scalar
scalar_t = torch.tensor(scalar, dtype=torch.float, device=config.device) / torch.distributed.get_world_size()
torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)
return scalar_t.item()
def pad_dataset(dataset, padding=0):
""" Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. """
max_l = max(len(x) for x in dataset["input_ids"])
for name in PADDED_INPUTS:
dataset[name] = [x + [padding if name != "lm_labels" else -1] * (max_l - len(x)) for x in dataset[name]]
return dataset
def build_input_from_segments(history, reply, tokenizer, lm_labels=False, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])
instance = {}
sequence = [[bos] + history[0]] + history[1:] +[reply +([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s] # the last for is for repeating the speaker1 and speaker2 for all tokens
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:] #all -1 except for reply, reply is just the ids
return instance, sequence
def get_data_loaders(config, tokenizer):
""" Prepare the dataset for training and evaluation """
personachat = get_dataset(tokenizer, config.dataset_path, config.dataset_cache)
logger.info("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
gpu_max_length = 310 #this depends on the gpu memory size, using bigger gpu memory you can increase this to include longer inputs
for dataset_name, dataset in personachat.items():
num_candidates = len(dataset[0]["utterances"][0]["candidates"])
if config.num_candidates > 0 and dataset_name == 'train':
num_candidates = min(config.num_candidates, num_candidates)
for dialog in dataset:
for utterance in dialog["utterances"]:
history = utterance["history"][-(2*config.max_history+1):]
for j, candidate in enumerate(utterance["candidates"][-num_candidates:]):
lm_labels = bool(j == num_candidates-1) #the true label is always the last one in list of candidates
instance, _ = build_input_from_segments(history, candidate, tokenizer, lm_labels)
#print(len(instance["input_ids"]))
##
if len(instance["input_ids"]) > gpu_max_length:
truncated_history = [hist[:10] for hist in history]
truncated_candidate = candidate[:10]
instance, _ = build_input_from_segments(truncated_history, truncated_candidate, tokenizer, lm_labels)
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
datasets[dataset_name]["mc_labels"].append(num_candidates - 1)
datasets[dataset_name]["n_candidates"] = num_candidates
logger.info("Pad inputs and convert to Tensor")
tensor_datasets = {"train": [], "valid": []}
for dataset_name, dataset in datasets.items():
dataset = pad_dataset(dataset, padding=tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1]))
for input_name in MODEL_INPUTS:
tensor = torch.tensor(dataset[input_name])
if input_name != "mc_labels":
tensor = tensor.view((-1, datasets[dataset_name]["n_candidates"]) + tensor.shape[1:])
tensor_datasets[dataset_name].append(tensor)
logger.info("Build train and validation dataloaders")
train_dataset, valid_dataset = TensorDataset(*tensor_datasets["train"]), TensorDataset(*tensor_datasets["valid"])
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if config.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if config.distributed else None
train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=config.train_batch_size, shuffle=False)
valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=config.valid_batch_size, shuffle=False)
logger.info("Train dataset (Batch, Candidates, Seq length): {}".format(train_dataset.tensors[0].shape))
logger.info("Valid dataset (Batch, Candidates, Seq length): {}".format(valid_dataset.tensors[0].shape))
return train_loader, valid_loader, train_sampler, valid_sampler
def train():
config_file = "configs/train_full_config.json"
config = Config.from_json_file(config_file)
# logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
logging.basicConfig(level=logging.INFO if config.local_rank in [-1, 0] else logging.WARN)
logger.warning("Running process %d", config.local_rank) # This is a logger.warning: it will be printed by all distributed processes
logger.info("Arguments: %s", pformat(config))
# Initialize distributed training if needed
config.distributed = (config.local_rank != -1)
if config.distributed:
torch.cuda.set_device(config.local_rank)
config.device = torch.device("cuda", config.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
logger.info("Prepare tokenizer, pretrained model and optimizer - add special tokens for fine-tuning")
tokenizer_class = GPT2Tokenizer if "gpt2" in config.model_checkpoint else OpenAIGPTTokenizer
tokenizer = tokenizer_class.from_pretrained(config.model_checkpoint)
model_class = GPT2DoubleHeadsModel if "gpt2" in config.model_checkpoint else OpenAIGPTDoubleHeadsModel
model = model_class.from_pretrained(config.model_checkpoint)
tokenizer.set_special_tokens(SPECIAL_TOKENS)
model.set_num_special_tokens(len(SPECIAL_TOKENS))
model.to(config.device)
optimizer = OpenAIAdam(model.parameters(), lr=config.lr)
# Prepare model for FP16 and distributed training if needed (order is important, distributed should be the last)
if config.fp16:
from apex import amp # Apex is only required if we use fp16 training
model, optimizer = amp.initialize(model, optimizer, opt_level=config.fp16)
if config.distributed:
model = DistributedDataParallel(model, device_ids=[config.local_rank], output_device=config.local_rank)
logger.info("Prepare datasets")
train_loader, val_loader, train_sampler, valid_sampler = get_data_loaders(config, tokenizer)
# Training function and trainer
def update(engine, batch):
model.train()
batch = tuple(input_tensor.to(config.device) for input_tensor in batch)
lm_loss, mc_loss = model(*batch)
loss = (lm_loss * config.lm_coef + mc_loss * config.mc_coef) / config.gradient_accumulation_steps
if config.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.max_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_norm)
if engine.state.iteration % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return loss.item()
trainer = Engine(update)
# Evaluation function and evaluator (evaluator output is the input of the metrics)
def inference(engine, batch):
model.eval()
with torch.no_grad():
batch = tuple(input_tensor.to(config.device) for input_tensor in batch)
input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids = batch
#logger.info(tokenizer.decode(input_ids[0, -1, :].tolist()))
model_outputs = model(input_ids, mc_token_ids, token_type_ids=token_type_ids)
lm_logits, mc_logits = model_outputs[0], model_outputs[1] # So we can also use GPT2 outputs
lm_logits_flat_shifted = lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1))
lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)
return (lm_logits_flat_shifted, mc_logits), (lm_labels_flat_shifted, mc_labels)
evaluator = Engine(inference)
# Attach evaluation to trainer: we evaluate when we start the training and at the end of each epoch
trainer.add_event_handler(Events.EPOCH_COMPLETED, lambda _: evaluator.run(val_loader))
if config.n_epochs < 1:
trainer.add_event_handler(Events.COMPLETED, lambda _: evaluator.run(val_loader))
if config.eval_before_start:
trainer.add_event_handler(Events.STARTED, lambda _: evaluator.run(val_loader))
# Make sure distributed data samplers split the dataset nicely between the distributed processes
if config.distributed:
trainer.add_event_handler(Events.EPOCH_STARTED, lambda engine: train_sampler.set_epoch(engine.state.epoch))
evaluator.add_event_handler(Events.EPOCH_STARTED, lambda engine: valid_sampler.set_epoch(engine.state.epoch))
# Linearly decrease the learning rate from lr to zero
scheduler = PiecewiseLinear(optimizer, "lr", [(0, config.lr), (config.n_epochs * len(train_loader), 0.0)])
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
# Prepare metrics - note how we compute distributed metrics
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
metrics = {"nll": Loss(torch.nn.CrossEntropyLoss(ignore_index=-1), output_transform=lambda x: (x[0][0], x[1][0])),
"accuracy": Accuracy(output_transform=lambda x: (x[0][1], x[1][1]))}
metrics.update({"average_nll": MetricsLambda(average_distributed_scalar, metrics["nll"], config),
"average_accuracy": MetricsLambda(average_distributed_scalar, metrics["accuracy"], config)})
metrics["average_ppl"] = MetricsLambda(math.exp, metrics["average_nll"])
for name, metric in metrics.items():
metric.attach(evaluator, name)
# On the main process: add progress bar, tensorboard, checkpoints and save model, configuration and tokenizer before we start to train
if config.local_rank in [-1, 0]:
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names=["loss"])
evaluator.add_event_handler(Events.COMPLETED, lambda _: pbar.log_message("Validation: %s" % pformat(evaluator.state.metrics)))
tb_logger = TensorboardLogger(log_dir=config.log_dir)
tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", metric_names=["loss"]), event_name=Events.ITERATION_COMPLETED)
tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED)
tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=list(metrics.keys()), another_engine=trainer), event_name=Events.EPOCH_COMPLETED)
checkpoint_handler = ModelCheckpoint(tb_logger.writer.log_dir, 'checkpoint', save_interval=1, n_saved=3)
trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {'mymodel': getattr(model, 'module', model)}) # "getattr" take care of distributed encapsulation
torch.save(config, tb_logger.writer.log_dir + '/model_training_args.bin')
getattr(model, 'module', model).config.to_json_file(os.path.join(tb_logger.writer.log_dir, CONFIG_NAME))
tokenizer.save_vocabulary(tb_logger.writer.log_dir)
# Run the training
trainer.run(train_loader, max_epochs=config.n_epochs)
# On the main process: close tensorboard logger and rename the last checkpoint (for easy re-loading with OpenAIGPTModel.from_pretrained method)
if config.local_rank in [-1, 0] and config.n_epochs > 0:
os.rename(checkpoint_handler._saved[-1][1][-1], os.path.join(tb_logger.writer.log_dir, WEIGHTS_NAME)) # TODO: PR in ignite to have better access to saved file paths (cleaner)
tb_logger.close()
if __name__ == "__main__":
train()
| 14,215 | 58.233333 | 183 | py |
EmpTransfo | EmpTransfo-master/train_multihead.py | # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
import os
import math
import logging
from pprint import pformat
from argparse import ArgumentParser
from collections import defaultdict
from itertools import chain
import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss, MetricsLambda, RunningAverage
from ignite.contrib.handlers import ProgressBar, PiecewiseLinear
from config import Config
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger, OutputHandler, OptimizerParamsHandler
from pytorch_pretrained_bert import (OpenAIAdam, OpenAIGPTMultiHeadModel, OpenAIGPTTokenizer,
GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME, CONFIG_NAME,
BertModel, BertTokenizer)
from utils import get_dataset, get_dataset_for_daily_dialog
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>",
"<no_emotion>", "<happiness>", "<surprise>", "<sadness>", "<disgust>", "<anger>", "<fear>",
"<work>", "<finance>", "<relationship>", "<attitude_and_emotion>", "<culture_and_education>",
"<school_life>", "<tourism>", "<ordinary_life>", "<politics>", "<health>",
"<directive>", "<inform>", "<commissive>", "<question>",
"<pad>"]
MODEL_INPUTS = ["input_ids", "ec_token_ids", "sc_token_ids", "lm_labels", "ec_labels", "sc_labels",
"token_type_ids", "token_emotion_ids", "token_action_ids"]
PADDED_INPUTS = ["input_ids", "lm_labels", "token_type_ids", "token_emotion_ids", "token_action_ids"]
logger = logging.getLogger(__file__)
def average_distributed_scalar(scalar, config):
""" Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. """
if config.local_rank == -1:
return scalar
scalar_t = torch.tensor(scalar, dtype=torch.float, device=config.device) / torch.distributed.get_world_size()
torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)
return scalar_t.item()
def pad_dataset(dataset, padding=0):
""" Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. """
max_l = max(len(x) for x in dataset["input_ids"])
for name in PADDED_INPUTS:
dataset[name] = [x + [padding if name != "lm_labels" else -1] * (max_l - len(x)) for x in dataset[name]]
return dataset
def get_emotion_label(tokenizer, candidate_emotion):
no_emotion_id, happiness_id, surprise_id, sadness_id, disgust_id, anger_id, fear_id = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[4:11])
if candidate_emotion == no_emotion_id:
return 0
elif candidate_emotion == happiness_id:
return 1
elif candidate_emotion == surprise_id:
return 2
elif candidate_emotion == sadness_id:
return 3
elif candidate_emotion == disgust_id:
return 4
elif candidate_emotion == anger_id:
return 5
elif candidate_emotion == fear_id:
return 6
def build_input_from_segments(topic, history, emotions, actions, reply, candidate_emotion, canidate_act, tokenizer, lm_labels=False, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, speaker1, speaker2, no_emotion = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:5])
inform = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-4])
emotions = [no_emotion] + emotions
actions = [inform] + actions
instance = {}
sequence = [[bos] + [topic]] + history + [reply + ([eos] if with_eos else [])]
sequence = [[speaker2 if (len(sequence) - i) % 2 else speaker1] + s for i, s in enumerate(sequence)]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in
s] # the last for is for repeating the speaker1 and speaker2 for all tokens
instance["token_emotion_ids"] = [emotions[i] for i, s in enumerate(sequence[:-1]) for _ in s] + [
candidate_emotion] * len(sequence[-1])
instance["token_action_ids"] = [actions[i] for i, s in enumerate(sequence[:-1]) for _ in s] + [canidate_act] * len(
sequence[-1])
instance["ec_token_ids"] = len(instance["input_ids"]) - 1
instance["sc_token_ids"] = len(instance["input_ids"]) - 2
instance["ec_labels"] = -1
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][
1:] # all -1 except for reply, reply is just the ids
instance["ec_labels"] = get_emotion_label(tokenizer, candidate_emotion)
return instance, sequence
def get_data_loaders(config, tokenizer):
""" Prepare the dataset for training and evaluation """
personachat = get_dataset_for_daily_dialog(tokenizer, config.dataset_path, config.dataset_cache, SPECIAL_TOKENS)
logger.info("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
gpu_max_length = 310
for dataset_name, dataset in personachat.items():
num_candidates = len(dataset[0]["utterances"][0]["candidates"])
if config.num_candidates > 0 and dataset_name == 'train':
num_candidates = min(config.num_candidates, num_candidates)
for dialog in dataset:
topic = dialog["topic"]
for utterance in dialog["utterances"]:
history = utterance["history"][-(2 * config.max_history + 1):]
emotions = utterance["emotion"][-(2 * config.max_history + 1):]
actions = utterance["act"][-(2 * config.max_history + 1):]
for j, candidate in enumerate(utterance["candidates"][-num_candidates:]):
lm_labels = bool(
j == num_candidates - 1) # the true label is always the last one in list of candidates
candidate_emotion = utterance['candidates_emotions'][j]
candidate_act = utterance['candidates_acts'][j]
instance, _ = build_input_from_segments(topic, history, emotions, actions, candidate,
candidate_emotion, candidate_act, tokenizer, lm_labels)
if len(instance["input_ids"]) > gpu_max_length:
truncated_history = [hist[:10] for hist in history]
truncated_candidate = candidate[:10]
instance, _ = build_input_from_segments(topic, truncated_history, emotions, actions,
truncated_candidate,
candidate_emotion, candidate_act, tokenizer, lm_labels)
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
datasets[dataset_name]["sc_labels"].append(num_candidates - 1)
datasets[dataset_name]["n_candidates"] = num_candidates
logger.info("Pad inputs and convert to Tensor")
tensor_datasets = {"train": [], "valid": []}
for dataset_name, dataset in datasets.items():
dataset = pad_dataset(dataset, padding=tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1]))
for input_name in MODEL_INPUTS:
tensor = torch.tensor(dataset[input_name])
if input_name != "sc_labels":
tensor = tensor.view((-1, datasets[dataset_name]["n_candidates"]) + tensor.shape[1:])
tensor_datasets[dataset_name].append(tensor)
logger.info("Build train and validation dataloaders")
train_dataset, valid_dataset = TensorDataset(*tensor_datasets["train"]), TensorDataset(*tensor_datasets["valid"])
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if config.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if config.distributed else None
train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=config.train_batch_size, shuffle=False)
valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=config.valid_batch_size, shuffle=False)
logger.info("Train dataset (Batch, Candidates, Seq length): {}".format(train_dataset.tensors[0].shape))
logger.info("Valid dataset (Batch, Candidates, Seq length): {}".format(valid_dataset.tensors[0].shape))
return train_loader, valid_loader, train_sampler, valid_sampler
def train():
config_file = "configs/train_multihead_config.json"
config = Config.from_json_file(config_file)
ec_coef = 1
sc_coef = 1
# logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
logging.basicConfig(level=logging.INFO if config.local_rank in [-1, 0] else logging.WARN)
logger.warning("Running process %d",
config.local_rank) # This is a logger.warning: it will be printed by all distributed processes
logger.info("Arguments: %s", pformat(config))
# Initialize distributed training if needed
config.distributed = (config.local_rank != -1)
if config.distributed:
torch.cuda.set_device(config.local_rank)
config.device = torch.device("cuda", config.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
logger.info("Prepare tokenizer, pretrained model and optimizer - add special tokens for fine-tuning")
tokenizer_class = OpenAIGPTTokenizer
tokenizer = tokenizer_class.from_pretrained(config.model_checkpoint)
model_class = OpenAIGPTMultiHeadModel
model = model_class.from_pretrained(config.model_checkpoint)
tokenizer.set_special_tokens(SPECIAL_TOKENS)
model.set_num_special_tokens(len(SPECIAL_TOKENS))
model.to(config.device)
optimizer = OpenAIAdam(model.parameters(), lr=config.lr)
# Prepare model for FP16 and distributed training if needed (order is important, distributed should be the last)
if config.fp16:
from apex import amp # Apex is only required if we use fp16 training
model, optimizer = amp.initialize(model, optimizer, opt_level=config.fp16)
if config.distributed:
model = DistributedDataParallel(model, device_ids=[config.local_rank], output_device=config.local_rank)
logger.info("Prepare datasets")
train_loader, val_loader, train_sampler, valid_sampler = get_data_loaders(config, tokenizer)
# Training function and trainer
def update(engine, batch):
model.train()
# input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids, token_action_ids = tuple(input_tensor.to(config.device) for input_tensor in batch)
input_ids, ec_token_ids, sc_token_ids, lm_labels, ec_labels, sc_labels, token_type_ids, token_emotion_ids, token_action_ids = tuple(
input_tensor.to(config.device) for input_tensor in batch)
lm_loss, emotion_loss, sentence_loss = model(input_ids, ec_token_ids, sc_token_ids,
lm_labels, ec_labels, sc_labels, token_type_ids,
token_emotion_ids, token_action_ids)
loss = (lm_loss * config.lm_coef + emotion_loss * ec_coef + sentence_loss * sc_coef) / config.gradient_accumulation_steps
if config.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.max_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_norm)
if engine.state.iteration % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return loss.item()
trainer = Engine(update)
# Evaluation function and evaluator (evaluator output is the input of the metrics)
def inference(engine, batch):
model.eval()
with torch.no_grad():
batch = tuple(input_tensor.to(config.device) for input_tensor in batch)
input_ids, ec_token_ids, sc_token_ids, lm_labels, ec_labels, \
sc_labels, token_type_ids, token_emotion_ids, token_action_ids = batch
# logger.info(tokenizer.decode(input_ids[0, -1, :].tolist()))
model_outputs = model(input_ids, ec_token_ids, sc_token_ids, token_type_ids=token_type_ids,
token_emotion_ids=token_emotion_ids,
token_action_ids=token_action_ids)
lm_logits, mc_logits = model_outputs[0], model_outputs[2] # So we can also use GPT2 outputs
lm_logits_flat_shifted = lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1))
lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)
return (lm_logits_flat_shifted, mc_logits), (lm_labels_flat_shifted, sc_labels)
evaluator = Engine(inference)
# Attach evaluation to trainer: we evaluate when we start the training and at the end of each epoch
trainer.add_event_handler(Events.EPOCH_COMPLETED, lambda _: evaluator.run(val_loader))
if config.n_epochs < 1:
trainer.add_event_handler(Events.COMPLETED, lambda _: evaluator.run(val_loader))
if config.eval_before_start:
trainer.add_event_handler(Events.STARTED, lambda _: evaluator.run(val_loader))
# Make sure distributed data samplers split the dataset nicely between the distributed processes
if config.distributed:
trainer.add_event_handler(Events.EPOCH_STARTED, lambda engine: train_sampler.set_epoch(engine.state.epoch))
evaluator.add_event_handler(Events.EPOCH_STARTED, lambda engine: valid_sampler.set_epoch(engine.state.epoch))
# Linearly decrease the learning rate from lr to zero
scheduler = PiecewiseLinear(optimizer, "lr", [(0, config.lr), (config.n_epochs * len(train_loader), 0.0)])
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
# Prepare metrics - note how we compute distributed metrics
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
metrics = {"nll": Loss(torch.nn.CrossEntropyLoss(ignore_index=-1), output_transform=lambda x: (x[0][0], x[1][0])),
"accuracy": Accuracy(output_transform=lambda x: (x[0][1], x[1][1]))}
metrics.update({"average_nll": MetricsLambda(average_distributed_scalar, metrics["nll"], config),
"average_accuracy": MetricsLambda(average_distributed_scalar, metrics["accuracy"], config)})
metrics["average_ppl"] = MetricsLambda(math.exp, metrics["average_nll"])
for name, metric in metrics.items():
metric.attach(evaluator, name)
# On the main process: add progress bar, tensorboard, checkpoints and save model, configuration and tokenizer before we start to train
if config.local_rank in [-1, 0]:
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names=["loss"])
evaluator.add_event_handler(Events.COMPLETED,
lambda _: pbar.log_message("Validation: %s" % pformat(evaluator.state.metrics)))
tb_logger = TensorboardLogger(log_dir=config.log_dir)
tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", metric_names=["loss"]),
event_name=Events.ITERATION_COMPLETED)
tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED)
tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=list(metrics.keys()),
another_engine=trainer),
event_name=Events.EPOCH_COMPLETED)
checkpoint_handler = ModelCheckpoint(tb_logger.writer.log_dir, 'checkpoint', save_interval=1, n_saved=3)
trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {
'mymodel': getattr(model, 'module', model)}) # "getattr" take care of distributed encapsulation
torch.save(config, tb_logger.writer.log_dir + '/model_training_args.bin')
getattr(model, 'module', model).config.to_json_file(os.path.join(tb_logger.writer.log_dir, CONFIG_NAME))
tokenizer.save_vocabulary(tb_logger.writer.log_dir)
# Run the training
trainer.run(train_loader, max_epochs=config.n_epochs)
# On the main process: close tensorboard logger and rename the last checkpoint (for easy re-loading with OpenAIGPTModel.from_pretrained method)
if config.local_rank in [-1, 0] and config.n_epochs > 0:
os.rename(checkpoint_handler._saved[-1][1][-1], os.path.join(tb_logger.writer.log_dir,
WEIGHTS_NAME)) # TODO: PR in ignite to have better access to saved file paths (cleaner)
tb_logger.close()
if __name__ == "__main__":
train()
| 17,664 | 55.800643 | 174 | py |
EmpTransfo | EmpTransfo-master/eval_emotion_recognition.py | # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
import logging
from pprint import pformat
from collections import defaultdict
from itertools import chain
import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from config import Config
from pytorch_pretrained_bert import (OpenAIAdam, OpenAIGPTDoubleHeadLMEmotionRecognitionModel, OpenAIGPTTokenizer,
GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME, CONFIG_NAME,
BertModel, BertTokenizer)
from utils import get_dataset, get_dataset_for_daily_dialog
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>",
"<no_emotion>", "<happiness>", "<surprise>", "<sadness>", "<disgust>", "<anger>", "<fear>",
"<directive>", "<inform>", "<commissive>", "<question>",
"<pad>"]
MODEL_INPUTS = ["input_ids", "mc_token_ids", "lm_labels", "mc_labels", "token_type_ids", "token_emotion_ids"]
PADDED_INPUTS = ["input_ids", "lm_labels", "token_type_ids", "token_emotion_ids"]
logger = logging.getLogger(__file__)
def average_distributed_scalar(scalar, config):
""" Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. """
if config.local_rank == -1:
return scalar
scalar_t = torch.tensor(scalar, dtype=torch.float, device=config.device) / torch.distributed.get_world_size()
torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)
return scalar_t.item()
def pad_dataset(dataset, padding=0):
""" Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. """
max_l = max(len(x) for x in dataset["input_ids"])
for name in PADDED_INPUTS:
dataset[name] = [x + [padding if name != "lm_labels" else -1] * (max_l - len(x)) for x in dataset[name]]
return dataset
def get_emotion_label(tokenizer, candidate_emotion):
_, _, _, _, no_emotion_id, happiness_id, surprise_id, sadness_id, disgust_id, anger_id, fear_id, _, _, _, _, _ = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if candidate_emotion == happiness_id:
return 0
elif candidate_emotion == surprise_id:
return 1
elif candidate_emotion == sadness_id:
return 2
elif candidate_emotion == disgust_id:
return 3
elif candidate_emotion == anger_id:
return 4
elif candidate_emotion == fear_id:
return 5
elif candidate_emotion == no_emotion_id:
return 6
def build_input_from_segments(history, emotions, reply, true_emotion, tokenizer, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:4])
#tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1])
instance = {}
# sequence = [[bos] + history[0] + list(chain(*history[1:]))] + [reply + ([eos] if with_eos else [])] #seq = [personas, history, reply] concatenate all persona sentences
sequence = [[bos] + history[0]] + history[1:] + [reply + ([eos] if with_eos else [])]
sequence = [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence)]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s] # the last for is for repeating the speaker1 and speaker2 for all tokens
#instance["token_emotion_ids"] = [emotions[i] for i, s in enumerate(sequence[:-1]) for _ in s] + [true_emotion] * len(sequence[-1])
instance["token_emotion_ids"] = [emotions[i] for i, s in enumerate(sequence[:-1]) for _ in s]
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["mc_labels"] = get_emotion_label(tokenizer, true_emotion)
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:] #all -1 except for reply, reply is just the ids
return instance, sequence
def get_data_loaders(config, tokenizer):
""" Prepare the dataset for training and evaluation """
personachat = get_dataset_for_daily_dialog(tokenizer, config.dataset_path, config.dataset_cache, SPECIAL_TOKENS)
#personachat["train"] = personachat["train"][:100]
#personachat["valid"] = personachat["valid"][:10]
logger.info("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
c = 0
for dataset_name, dataset in personachat.items():
num_candidates = 2#len(dataset[0]["utterances"][0]["candidates"])
if config.num_candidates > 0 and dataset_name == 'train':
num_candidates = min(config.num_candidates, num_candidates)
for dialog in dataset:
for utterance in dialog["utterances"]:
history = utterance["history"][-(2 * config.max_history + 1):]
emotions = utterance["emotion"][-(2 * config.max_history + 1):]
reply = utterance["candidates"][-1]
true_emotion = utterance['candidates_emotions'][-1]
if true_emotion == tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)[4]:
continue
instance, _ = build_input_from_segments(history,
emotions,
reply,
true_emotion,
tokenizer)
if len(instance["input_ids"]) > 310:
truncated_history = [hist[:10] for hist in history]
truncated_candidate = reply[:10]
true_emotion = utterance['candidates_emotions'][-1]
instance, _ = build_input_from_segments(truncated_history,
emotions,
truncated_candidate,
true_emotion,
tokenizer)
c+=1
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
#datasets[dataset_name]["mc_labels"].append(num_candidates - 1)
datasets[dataset_name]["n_candidates"] = num_candidates
print(c)
logger.info("Pad inputs and convert to Tensor")
tensor_datasets = {"train": [], "valid": []}
for dataset_name, dataset in datasets.items():
dataset = pad_dataset(dataset, padding=tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1]))
for input_name in MODEL_INPUTS:
tensor = torch.tensor(dataset[input_name])
#if input_name != "mc_labels":
# tensor = tensor.view((-1, datasets[dataset_name]["n_candidates"]) + tensor.shape[1:])
tensor_datasets[dataset_name].append(tensor)
logger.info("Build train and validation dataloaders")
train_dataset, valid_dataset = TensorDataset(*tensor_datasets["train"]), TensorDataset(*tensor_datasets["valid"])
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if config.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if config.distributed else None
train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=config.train_batch_size, shuffle=False)
valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=config.valid_batch_size, shuffle=False)
logger.info("Train dataset (Batch, Candidates, Seq length): {}".format(train_dataset.tensors[0].shape))
logger.info("Valid dataset (Batch, Candidates, Seq length): {}".format(valid_dataset.tensors[0].shape))
return train_loader, valid_loader, train_sampler, valid_sampler
def train():
config_file = "configs/train_full_pipeline_config.json"
config = Config.from_json_file(config_file)
# logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
logging.basicConfig(level=logging.INFO if config.local_rank in [-1, 0] else logging.WARN)
logger.warning("Running process %d", config.local_rank) # This is a logger.warning: it will be printed by all distributed processes
logger.info("Arguments: %s", pformat(config))
# Initialize distributed training if needed
config.distributed = (config.local_rank != -1)
if config.distributed:
torch.cuda.set_device(config.local_rank)
config.device = torch.device("cuda", config.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
logger.info("Prepare tokenizer, pretrained model and optimizer - add special tokens for fine-tuning")
tokenizer_class = GPT2Tokenizer if "gpt2" in config.model_checkpoint else OpenAIGPTTokenizer
tokenizer = tokenizer_class.from_pretrained(config.model_checkpoint)
model_class = GPT2DoubleHeadsModel if "gpt2" in config.model_checkpoint else OpenAIGPTDoubleHeadLMEmotionRecognitionModel
model = model_class.from_pretrained(config.model_checkpoint)
tokenizer.set_special_tokens(SPECIAL_TOKENS)
model.set_num_special_tokens(len(SPECIAL_TOKENS))
model.to(config.device)
optimizer = OpenAIAdam(model.parameters(), lr=config.lr)
# Prepare model for FP16 and distributed training if needed (order is important, distributed should be the last)
if config.fp16:
from apex import amp # Apex is only required if we use fp16 training
model, optimizer = amp.initialize(model, optimizer, opt_level=config.fp16)
if config.distributed:
model = DistributedDataParallel(model, device_ids=[config.local_rank], output_device=config.local_rank)
logger.info("Prepare datasets")
train_loader, val_loader, train_sampler, valid_sampler = get_data_loaders(config, tokenizer)
# Evaluation function and evaluator (evaluator output is the input of the metrics)
model.eval()
num_correct = 0
num_all = len(val_loader)
for batch in val_loader:
with torch.no_grad():
batch = tuple(input_tensor.to(config.device) for input_tensor in batch)
input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids = batch
model_outputs = model(input_ids, mc_token_ids, token_type_ids=token_type_ids, token_emotion_ids=token_emotion_ids)
lm_logits, mc_logits = model_outputs[0], model_outputs[1] # So we can also use GPT2 outputs
indices = torch.argmax(mc_logits, dim=1)
correct = torch.eq(indices, mc_labels).view(-1)
num_correct += torch.sum(correct).item()
print(num_correct / num_all)
if __name__ == "__main__":
train()
| 11,203 | 52.607656 | 182 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
import logging
import abc
import sys
logger = logging.getLogger(__name__)
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta('ABC', (), {})
class _LRSchedule(ABC):
""" Parent of all LRSchedules here. """
warn_t_total = False # is set to True for schedules where progressing beyond t_total steps doesn't make sense
def __init__(self, warmup=0.002, t_total=-1, **kw):
"""
:param warmup: what fraction of t_total steps will be used for linear warmup
:param t_total: how many training steps (updates) are planned
:param kw:
"""
super(_LRSchedule, self).__init__(**kw)
if t_total < 0:
logger.warning("t_total value of {} results in schedule not being applied".format(t_total))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
warmup = max(warmup, 0.)
self.warmup, self.t_total = float(warmup), float(t_total)
self.warned_for_t_total_at_progress = -1
def get_lr(self, step, nowarn=False):
"""
:param step: which of t_total steps we're on
:param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps
:return: learning rate multiplier for current update
"""
if self.t_total < 0:
return 1.
progress = float(step) / self.t_total
ret = self.get_lr_(progress)
# warning for exceeding t_total (only active with warmup_linear
if not nowarn and self.warn_t_total and progress > 1. and progress > self.warned_for_t_total_at_progress:
logger.warning(
"Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly."
.format(ret, self.__class__.__name__))
self.warned_for_t_total_at_progress = progress
# end warning
return ret
@abc.abstractmethod
def get_lr_(self, progress):
"""
:param progress: value between 0 and 1 (unless going beyond t_total steps) specifying training progress
:return: learning rate multiplier for current update
"""
return 1.
class ConstantLR(_LRSchedule):
def get_lr_(self, progress):
return 1.
class WarmupCosineSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Decreases learning rate from 1. to 0. over remaining `1 - warmup` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
warn_t_total = True
def __init__(self, warmup=0.002, t_total=-1, cycles=.5, **kw):
"""
:param warmup: see LRSchedule
:param t_total: see LRSchedule
:param cycles: number of cycles. Default: 0.5, corresponding to cosine decay from 1. at progress==warmup and 0 at progress==1.
:param kw:
"""
super(WarmupCosineSchedule, self).__init__(warmup=warmup, t_total=t_total, **kw)
self.cycles = cycles
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
return 0.5 * (1. + math.cos(math.pi * self.cycles * 2 * progress))
class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
learning rate (with hard restarts).
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
assert(cycles >= 1.)
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * ((self.cycles * progress) % 1)))
return ret
class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule):
"""
All training progress is divided in `cycles` (default=1.) parts of equal length.
Every part follows a schedule with the first `warmup` fraction of the training steps linearly increasing from 0. to 1.,
followed by a learning rate decreasing from 1. to 0. following a cosine curve.
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
assert(warmup * cycles < 1.)
warmup = warmup * cycles if warmup >= 0 else warmup
super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
def get_lr_(self, progress):
progress = progress * self.cycles % 1.
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * progress))
return ret
class WarmupConstantSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Keeps learning rate equal to 1. after warmup.
"""
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return 1.
class WarmupLinearSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Linearly decreases learning rate from 1. to 0. over remaining `1 - warmup` steps.
"""
warn_t_total = True
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return max((progress - 1.) / (self.warmup - 1.), 0.)
SCHEDULES = {
None: ConstantLR,
"none": ConstantLR,
"warmup_cosine": WarmupCosineSchedule,
"warmup_constant": WarmupConstantSchedule,
"warmup_linear": WarmupLinearSchedule
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1
schedule: schedule to use for the warmup (see above).
Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below).
If `None` or `'none'`, learning rate is always kept constant.
Default : `'warmup_linear'`
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0, **kwargs):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
# initialize schedule object
if not isinstance(schedule, _LRSchedule):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
else:
if warmup != -1 or t_total != -1:
logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. "
"Please specify custom warmup and t_total in _LRSchedule object.")
defaults = dict(lr=lr, schedule=schedule,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
| 13,028 | 42 | 139 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/optimization_openai.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for OpenAI GPT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
import logging
from .optimization import SCHEDULES, _LRSchedule, WarmupCosineWithWarmupRestartsSchedule, \
WarmupCosineWithHardRestartsSchedule, WarmupCosineSchedule, WarmupLinearSchedule, WarmupConstantSchedule
logger = logging.getLogger(__name__)
class OpenAIAdam(Optimizer):
"""Implements Open AI version of Adam algorithm with weight decay fix.
"""
def __init__(self, params, lr=required, schedule='warmup_linear', warmup=-1, t_total=-1,
b1=0.9, b2=0.999, e=1e-8, weight_decay=0,
vector_l2=False, max_grad_norm=-1, **kwargs):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
# initialize schedule object
if not isinstance(schedule, _LRSchedule):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
else:
if warmup != -1 or t_total != -1:
logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. "
"Please specify custom warmup and t_total in _LRSchedule object.")
defaults = dict(lr=lr, schedule=schedule,
b1=b1, b2=b2, e=e, weight_decay=weight_decay, vector_l2=vector_l2,
max_grad_norm=max_grad_norm)
super(OpenAIAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['b1'], group['b2']
state['step'] += 1
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['e'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
# Add weight decay at the end (fixed version)
if (len(p.size()) > 1 or group['vector_l2']) and group['weight_decay'] > 0:
p.data.add_(-lr_scheduled * group['weight_decay'], p.data)
return loss
| 5,517 | 42.109375 | 134 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/__main__.py | # coding: utf8
def main():
import sys
if (len(sys.argv) != 4 and len(sys.argv) != 5) or sys.argv[1] not in [
"convert_tf_checkpoint_to_pytorch",
"convert_openai_checkpoint",
"convert_transfo_xl_checkpoint",
"convert_gpt2_checkpoint",
]:
print(
"Should be used as one of: \n"
">> `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`, \n"
">> `pytorch_pretrained_bert convert_openai_checkpoint OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG]`, \n"
">> `pytorch_pretrained_bert convert_transfo_xl_checkpoint TF_CHECKPOINT_OR_DATASET PYTORCH_DUMP_OUTPUT [TF_CONFIG]` or \n"
">> `pytorch_pretrained_bert convert_gpt2_checkpoint TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [GPT2_CONFIG]`")
else:
if sys.argv[1] == "convert_tf_checkpoint_to_pytorch":
try:
from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) != 5:
# pylint: disable=line-too-long
print("Should be used as `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`")
else:
PYTORCH_DUMP_OUTPUT = sys.argv.pop()
TF_CONFIG = sys.argv.pop()
TF_CHECKPOINT = sys.argv.pop()
convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "convert_openai_checkpoint":
from .convert_openai_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch
OPENAI_GPT_CHECKPOINT_FOLDER_PATH = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
OPENAI_GPT_CONFIG = sys.argv[4]
else:
OPENAI_GPT_CONFIG = ""
convert_openai_checkpoint_to_pytorch(OPENAI_GPT_CHECKPOINT_FOLDER_PATH,
OPENAI_GPT_CONFIG,
PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "convert_transfo_xl_checkpoint":
try:
from .convert_transfo_xl_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if 'ckpt' in sys.argv[2].lower():
TF_CHECKPOINT = sys.argv[2]
TF_DATASET_FILE = ""
else:
TF_DATASET_FILE = sys.argv[2]
TF_CHECKPOINT = ""
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
TF_CONFIG = sys.argv[4]
else:
TF_CONFIG = ""
convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, TF_DATASET_FILE)
else:
try:
from .convert_gpt2_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
TF_CHECKPOINT = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
TF_CONFIG = sys.argv[4]
else:
TF_CONFIG = ""
convert_gpt2_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
if __name__ == '__main__':
main()
| 4,393 | 51.309524 | 145 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/convert_gpt2_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
from __future__ import absolute_import, division, print_function
import argparse
from io import open
import torch
from pytorch_pretrained_bert.modeling_gpt2 import (CONFIG_NAME, WEIGHTS_NAME,
GPT2Config,
GPT2Model,
load_tf_weights_in_gpt2)
def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path):
# Construct model
if gpt2_config_file == "":
config = GPT2Config()
else:
config = GPT2Config(gpt2_config_file)
model = GPT2Model(config)
# Load weights from numpy
load_tf_weights_in_gpt2(model, gpt2_checkpoint_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--gpt2_checkpoint_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
parser.add_argument("--gpt2_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture.")
args = parser.parse_args()
convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path,
args.gpt2_config_file,
args.pytorch_dump_folder_path)
| 3,017 | 40.342466 | 111 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/convert_openai_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
from __future__ import absolute_import, division, print_function
import argparse
from io import open
import torch
from pytorch_pretrained_bert.modeling_openai import (CONFIG_NAME, WEIGHTS_NAME,
OpenAIGPTConfig,
OpenAIGPTModel,
load_tf_weights_in_openai_gpt)
def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
# Construct model
if openai_config_file == "":
config = OpenAIGPTConfig()
else:
config = OpenAIGPTConfig(openai_config_file)
model = OpenAIGPTModel(config)
# Load weights from numpy
load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--openai_checkpoint_folder_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
parser.add_argument("--openai_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture.")
args = parser.parse_args()
convert_openai_checkpoint_to_pytorch(args.openai_checkpoint_folder_path,
args.openai_config_file,
args.pytorch_dump_folder_path)
| 3,106 | 41.561644 | 118 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/tokenization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
}
VOCAB_NAME = 'vocab.txt'
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]",
"[MASK]", "[BOS]", "[EOS]", "[SPEAKER1]", "[SPEAKER2]")):
"""Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
"""
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file
def encode(self, text):
return self.convert_tokens_to_ids(self.tokenize(text))
def decode(self, tokens, skip_special_tokens=False, clean_up_tokenization_spaces=True):
text = ' '.join(self.convert_ids_to_tokens(tokens))
if clean_up_tokenization_spaces:
text = text.replace('<unk>', '')
text = text.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','
).replace(" ' ", "'").replace(
" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
).replace(" 's", "'s").replace(" 've", "'ve").replace(
" 're", "'re")
return text
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 18,169 | 40.770115 | 135 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/modeling.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from .file_utils import cached_path, WEIGHTS_NAME, CONFIG_NAME
#from pytorch_pretrained_bert.modeling_openai import OpenAIGPTModel, OpenAIGPTLMHead
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-pytorch_model.bin",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-config.json",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
}
BERT_CONFIG_NAME = 'bert_config.json'
TF_WEIGHTS_NAME = 'model.ckpt'
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
print("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = output_attentions
self.keep_multihead_output = keep_multihead_output
self.multihead_output = None
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
if self.keep_multihead_output:
self.multihead_output = context_layer
self.multihead_output.retain_grad()
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if self.output_attentions:
return attention_probs, context_layer
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertAttention, self).__init__()
self.output_attentions = output_attentions
self.self = BertSelfAttention(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.output = BertSelfOutput(config)
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
def forward(self, input_tensor, attention_mask, head_mask=None):
self_output = self.self(input_tensor, attention_mask, head_mask)
if self.output_attentions:
attentions, self_output = self_output
attention_output = self.output(self_output, input_tensor)
if self.output_attentions:
return attentions, attention_output
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertLayer, self).__init__()
self.output_attentions = output_attentions
self.attention = BertAttention(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_output = self.attention(hidden_states, attention_mask, head_mask)
if self.output_attentions:
attentions, attention_output = attention_output
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if self.output_attentions:
return attentions, layer_output
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertEncoder, self).__init__()
self.output_attentions = output_attentions
layer = BertLayer(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, head_mask=None):
all_encoder_layers = []
all_attentions = []
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, attention_mask, head_mask[i])
if self.output_attentions:
attentions, hidden_states = hidden_states
all_attentions.append(attentions)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if self.output_attentions:
return all_attentions, all_encoder_layers
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
. `bert-base-german-cased`
. `bert-large-uncased-whole-word-masking`
. `bert-large-cased-whole-word-masking`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
cache_dir = kwargs.get('cache_dir', None)
kwargs.pop('cache_dir', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
if from_tf:
# Directly load from a TensorFlow checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, BERT_CONFIG_NAME)
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
logger.error(
"Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
try:
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in PRETRAINED_CONFIG_ARCHIVE_MAP:
logger.error(
"Couldn't reach server at '{}' to download pretrained model configuration file.".format(
config_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_CONFIG_ARCHIVE_MAP.keys()),
config_file))
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = BertConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertModel(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertModel, self).__init__(config)
self.output_attentions = output_attentions
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_multihead_outputs(self):
""" Gather all multi-head outputs.
Return: list (layers) of multihead module outputs with gradients
"""
return [layer.attention.self.multihead_output for layer in self.encoder.layer]
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True,
head_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand_as(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(
-1) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
head_mask=head_mask)
if self.output_attentions:
all_attentions, encoded_layers = encoded_layers
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
if self.output_attentions:
return all_attentions, encoded_layers, pooled_output
return encoded_layers, pooled_output
class BertForPreTraining(BertPreTrainedModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertForPreTraining, self).__init__(config)
self.output_attentions = output_attentions
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, head_mask=None):
outputs = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False, head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, pooled_output = outputs
else:
sequence_output, pooled_output = outputs
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
elif self.output_attentions:
return all_attentions, prediction_scores, seq_relationship_score
return prediction_scores, seq_relationship_score
class BertForMaskedLM(BertPreTrainedModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertForMaskedLM, self).__init__(config)
self.output_attentions = output_attentions
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, head_mask=None):
#todo added by rooh
input_shape = input_ids.size() # (B, C, F)
input_ids = input_ids.view(-1, input_ids.size(-1))
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
#todo
outputs = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, _ = outputs
else:
sequence_output, _ = outputs
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
# #todo
# prediction_scores = prediction_scores[..., :-1, :].contiguous()
# masked_lm_labels = masked_lm_labels[..., 1:].contiguous()
# #todo
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
elif self.output_attentions:
return all_attentions, prediction_scores
return prediction_scores
class BertForNextSentencePrediction(BertPreTrainedModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertForNextSentencePrediction, self).__init__(config)
self.output_attentions = output_attentions
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None, head_mask=None):
outputs = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, _, pooled_output = outputs
else:
_, pooled_output = outputs
seq_relationship_score = self.cls(pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
elif self.output_attentions:
return all_attentions, seq_relationship_score
return seq_relationship_score
class BertForSequenceClassification(BertPreTrainedModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary. Items in the batch should begin with the special "CLS" token. (see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2, output_attentions=False, keep_multihead_output=False):
super(BertForSequenceClassification, self).__init__(config)
self.output_attentions = output_attentions
self.num_labels = num_labels
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
outputs = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, _, pooled_output = outputs
else:
_, pooled_output = outputs
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
elif self.output_attentions:
return all_attentions, logits
return logits
class BertForMultipleChoice(BertPreTrainedModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices=2, output_attentions=False, keep_multihead_output=False):
super(BertForMultipleChoice, self).__init__(config)
self.output_attentions = output_attentions
self.num_choices = num_choices
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
outputs = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, _, pooled_output = outputs
else:
_, pooled_output = outputs
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
elif self.output_attentions:
return all_attentions, reshaped_logits
return reshaped_logits
class BertMultipleChoice(BertPreTrainedModel):
def __init__(self, config, num_choices=2, output_attentions=False, keep_multihead_output=False):
super(BertMultipleChoice, self).__init__(config)
self.output_attentions = output_attentions
self.num_choices = num_choices
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, pooled_output, num_choices=2, labels=None, head_mask=None):
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
return reshaped_logits
class BertForTokenClassification(BertPreTrainedModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2, output_attentions=False, keep_multihead_output=False):
super(BertForTokenClassification, self).__init__(config)
self.output_attentions = output_attentions
self.num_labels = num_labels
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
outputs = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, _ = outputs
else:
sequence_output, _ = outputs
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
elif self.output_attentions:
return all_attentions, logits
return logits
class BertForQuestionAnswering(BertPreTrainedModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertForQuestionAnswering, self).__init__(config)
self.output_attentions = output_attentions
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,
end_positions=None, head_mask=None):
outputs = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, _ = outputs
else:
sequence_output, _ = outputs
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
elif self.output_attentions:
return all_attentions, start_logits, end_logits
return start_logits, end_logits
class BertLMHeadModel(BertPreTrainedModel):
def __init__(self, config, output_attentions=False):
super(BertLMHeadModel, self).__init__(config)
self.bert = BertModel(config, output_attentions=output_attentions)
self.lm_head = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, input_mask=None, lm_labels=None, token_type_ids=None, position_ids=None):
input_shape = input_ids.size() # (B, C, F)
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
hidden_states, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask,
output_all_encoded_layers=False)
lm_logits = self.lm_head(hidden_states)
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
return loss
return lm_logits
class BertDoubleHeadsModel(BertPreTrainedModel):
def __init__(self, config, output_attentions=False):
super(BertDoubleHeadsModel, self).__init__(config)
self.bert = BertModel(config, output_attentions=output_attentions)
self.lm_head = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.multiple_choice_head = BertMultipleChoice(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, mc_token_ids, input_mask=None, lm_labels=None, mc_labels=None, token_type_ids=None,
position_ids=None):
input_shape = input_ids.size() # (B, C, F)
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
hidden_states, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask,
output_all_encoded_layers=False)
num_choices = input_shape[1]
output_shape = (input_shape) + (hidden_states.size(-1),)
hidden_states = hidden_states.view(*output_shape)
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(pooled_output, num_choices=num_choices)
losses = []
if lm_labels is not None:
#bert is not a causal language model so the lm loss can't be defined. But I used it
# and for now it works pretty well
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
return lm_logits, mc_logits
# class BertOpenAIDoubleHeadsModel(BertPreTrainedModel):
# def __init__(self, config, output_attentions=False):
# super(BertOpenAIDoubleHeadsModel, self).__init__(config)
# self.bert = BertModel(config, output_attentions=output_attentions)
# self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions)
# self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
# self.multiple_choice_head = BertMultipleChoice(config)
# self.apply(self.init_bert_weights)
#
# def forward(self, input_ids, mc_token_ids, input_mask=None, lm_labels=None, mc_labels=None, token_type_ids=None,
# position_ids=None):
# input_shape = input_ids.size() # (B, C, F)
# flat_input_ids = input_ids.view(-1, input_ids.size(-1))
# flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
# flat_attention_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
# bert_hidden_states, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask,
# output_all_encoded_layers=False)
#
# transformer_hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
#
# num_choices = input_shape[1]
# # output_shape = (input_shape) + (hidden_states.size(-1),)
# # hidden_states = hidden_states.view(*output_shape)
#
# lm_logits = self.lm_head(transformer_hidden_states)
# mc_logits = self.multiple_choice_head(pooled_output, num_choices=num_choices)
# losses = []
# if lm_labels is not None:
# shift_logits = lm_logits[..., :-1, :].contiguous()
# shift_labels = lm_labels[..., 1:].contiguous()
# loss_fct = CrossEntropyLoss(ignore_index=-1)
# losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
# if mc_labels is not None:
# loss_fct = CrossEntropyLoss()
# losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
# if losses:
# return losses
# return lm_logits, mc_logits | 85,247 | 51.524954 | 187 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/modeling_gpt2.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
from .modeling import BertLayerNorm as LayerNorm
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin"}
PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-config.json"}
def load_tf_weights_in_gpt2(model, gpt2_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'w' or l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'wpe' or l[0] == 'wte':
pointer = getattr(pointer, l[0])
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class GPT2Config(object):
"""Configuration class to store the configuration of a `GPT2Model`.
"""
def __init__(
self,
vocab_size_or_config_json_file=50257,
n_special=0,
n_positions=1024,
n_ctx=1024,
n_embd=768,
n_layer=12,
n_head=12,
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
predict_special_tokens=True
):
"""Constructs GPT2Config.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
predict_special_tokens: should we predict special tokens (when the model has a LM head)
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_special = n_special
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.predict_special_tokens = predict_special_tokens
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@property
def total_tokens_embeddings(self):
return self.vocab_size + self.n_special
@classmethod
def from_dict(cls, json_object):
"""Constructs a `GPT2Config` from a Python dictionary of parameters."""
config = GPT2Config(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `GPT2Config` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False, output_attentions=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.output_attentions = output_attentions
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns-nd:ns, :ns]
w = w * b - 1e4 * (1 - b)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
if self.output_attentions:
return w, torch.matmul(w, v)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
a = self._attn(query, key, value)
if self.output_attentions:
attentions, a = a
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
if self.output_attentions:
return attentions, a, present
return a, present
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False, output_attentions=False):
super(Block, self).__init__()
nx = config.n_embd
self.output_attentions = output_attentions
self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale, output_attentions)
self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None):
output_attn = self.attn(self.ln_1(x), layer_past=layer_past)
if self.output_attentions:
attentions, a, present = output_attn
else:
a, present = output_attn
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
if self.output_attentions:
return attentions, x, present
return x, present
class GPT2LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model_embeddings_weights, config):
super(GPT2LMHead, self).__init__()
self.n_embd = config.n_embd
self.vocab_size = config.vocab_size
self.predict_special_tokens = config.predict_special_tokens
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights, predict_special_tokens=True):
self.predict_special_tokens = predict_special_tokens
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
lm_logits = self.decoder(hidden_state)
if not self.predict_special_tokens:
lm_logits = lm_logits[..., :self.vocab_size]
return lm_logits
class GPT2MultipleChoiceHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(GPT2MultipleChoiceHead, self).__init__()
self.n_embd = config.n_embd
self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation
self.linear = nn.Linear(config.n_embd, 1)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, num_choices, seq_length, hidden_size)
# mc_token_ids (bsz, num_choices)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
# (bsz, num_choices, 1, hidden_size)
multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
# (bsz, num_choices, hidden_size)
multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2)
multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
# (bsz, num_choices)
return multiple_choice_logits
class GPT2PreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(GPT2PreTrainedModel, self).__init__()
if not isinstance(config, GPT2Config):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `GPT2Config`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a GPT2PreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `gpt2`
- a path or url to a pretrained model archive containing:
. `gpt2_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a GPT2Model instance
- a path or url to a pretrained model archive containing:
. `gpt2_config.json` a configuration file for the model
. a TensorFlow checkpoint with trained weights
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific GPT2 class
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
cache_dir = kwargs.get('cache_dir', None)
kwargs.pop('cache_dir', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
num_special_tokens = kwargs.get('num_special_tokens', None)
kwargs.pop('num_special_tokens', None)
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
archive_file, config_file
)
)
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = GPT2Config.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint (stored as NumPy array)
return load_tf_weights_in_gpt2(model, resolved_archive_file)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if key.endswith(".g"):
new_key = key[:-2] + ".weight"
elif key.endswith(".b"):
new_key = key[:-2] + ".bias"
elif key.endswith(".w"):
new_key = key[:-2] + ".weight"
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
start_model = model
if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
start_model = model.transformer
load(start_model, prefix="")
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
)
# Add additional embeddings for special tokens if needed
# This step also make sure we are still sharing the output and input embeddings after loading weights
model.set_num_special_tokens(num_special_tokens if num_special_tokens is not None else config.n_special)
return model
class GPT2Model(GPT2PreTrainedModel):
"""OpenAI GPT-2 model ("Language Models are Unsupervised Multitask Learners").
GPT-2 use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
(key and values in the attention blocks) to speed up sequential decoding
(this is the presents output of the model, cf. below).
Outputs a tuple consisting of:
`hidden_states`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
(or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
`presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
torch.FloatTensors. They can be reused to speed up sequential decoding.
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2Model(config)
hidden_states, presents = model(input_ids)
```
"""
def __init__(self, config, output_attentions=False):
super(GPT2Model, self).__init__(config)
self.output_attentions = output_attentions
self.wte = nn.Embedding(config.total_tokens_embeddings, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
block = Block(config.n_ctx, config, scale=True, output_attentions=output_attentions)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens):
" Update input embeddings with new embedding matrice if needed "
if self.config.n_special == num_special_tokens:
return
# Update config
self.config.n_special = num_special_tokens
# Build new embeddings and initialize all new embeddings (in particular the special tokens)
old_embed = self.wte
self.wte = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
self.wte.to(old_embed.weight.device)
self.init_weights(self.wte)
# Copy word embeddings from the previous weights
self.wte.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
presents = []
all_attentions = []
for block, layer_past in zip(self.h, past):
if self.output_attentions:
attentions, hidden_states, present = block(hidden_states, layer_past)
all_attentions.append(attentions)
else:
hidden_states, present = block(hidden_states, layer_past)
presents.append(present)
hidden_states = self.ln_f(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
if self.output_attentions:
return all_attentions, hidden_states.view(*output_shape), presents
return hidden_states.view(*output_shape), presents
class GPT2LMHeadModel(GPT2PreTrainedModel):
"""OpenAI GPT-2 model with a Language Modeling head ("Language Models are Unsupervised Multitask Learners").
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
(key and values in the attention blocks) to speed up sequential decoding
(this is the presents output of the model, cf. below).
Outputs:
if `lm_labels` is not `None`:
Outputs the language modeling loss.
else a tuple:
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, config.vocab_size]
(or more generally [d_1, ..., d_n, config.vocab_size] were d_1 ... d_n are the dimension of input_ids)
`presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
torch.FloatTensors. They can be reused to speed up sequential decoding.
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2LMHeadModel(config)
lm_logits, presents = model(input_ids)
```
"""
def __init__(self, config, output_attentions=False):
super(GPT2LMHeadModel, self).__init__(config)
self.transformer = GPT2Model(config, output_attentions=output_attentions)
self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.wte.weight, predict_special_tokens=predict_special_tokens)
def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, past=None):
transformer_output = self.transformer(input_ids, position_ids, token_type_ids, past)
if self.transformer.output_attentions:
all_attentions, hidden_states, presents = transformer_output
else:
hidden_states, presents = transformer_output
lm_logits = self.lm_head(hidden_states)
if lm_labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
return loss
if self.transformer.output_attentions:
return all_attentions, lm_logits, presents
return lm_logits, presents
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
"""OpenAI GPT-2 model with a Language Modeling and a Multiple Choice head ("Language Models are Unsupervised Multitask Learners").
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
indices selected in the range [0, config.vocab_size[
`mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with indices selected in [-1, 0, ..., config.vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., config.vocab_size]
`multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
`past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
(key and values in the attention blocks) to speed up sequential decoding
(this is the presents output of the model, cf. below).
Outputs:
if `lm_labels` and `multiple_choice_labels` are not `None`:
Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
else: a tuple with
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, config.vocab_size]
`multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
`presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
torch.FloatTensors. They can be reused to speed up sequential decoding.
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]]) # (bsz, number of choice, seq length)
mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2DoubleHeadsModel(config)
lm_logits, multiple_choice_logits, presents = model(input_ids, mc_token_ids)
```
"""
def __init__(self, config, output_attentions=False):
super(GPT2DoubleHeadsModel, self).__init__(config)
self.transformer = GPT2Model(config, output_attentions=output_attentions)
self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
self.multiple_choice_head = GPT2MultipleChoiceHead(config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.wte.weight, predict_special_tokens=predict_special_tokens)
def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None, past=None):
transformer_output = self.transformer(input_ids, position_ids, token_type_ids, past)
if self.transformer.output_attentions:
all_attentions, hidden_states, presents = transformer_output
else:
hidden_states, presents = transformer_output
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
losses = []
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
if self.transformer.output_attentions:
return all_attentions, lm_logits, mc_logits, presents
return lm_logits, mc_logits, presents
| 38,587 | 45.944039 | 146 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/modeling_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
from .modeling import BertLayerNorm as LayerNorm
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"}
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
print("Loading weights...")
names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8'))
shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
# This was used when we had a single embedding matrix for positions and tokens
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
# del init_params[1]
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.tokens_embed.weight.shape == init_params[1].shape
assert model.positions_embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
e.args += (model.positions_embed.weight.shape, init_params[0].shape)
raise
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
names.pop(0)
# Pop position and token embedding arrays
init_params.pop(0)
init_params.pop(0)
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'w':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x):
return x * torch.sigmoid(x)
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu}
class OpenAIGPTConfig(object):
"""Configuration class to store the configuration of a `OpenAIGPTModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file=40478,
n_special=0,
n_positions=512,
n_ctx=512,
n_embd=768,
n_layer=12,
n_head=12,
afn="gelu",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
predict_special_tokens=True
):
"""Constructs OpenAIGPTConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file.
n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
afn: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
layer_norm_epsilon: epsilon to use in the layer norm layers
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
predict_special_tokens: should we predict special tokens (when the model has a LM head)
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_special = n_special
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.afn = afn
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.predict_special_tokens = predict_special_tokens
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@property
def total_tokens_embeddings(self):
return self.vocab_size + self.n_special
@classmethod
def from_dict(cls, json_object):
"""Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters."""
config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `OpenAIGPTConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class Conv1D(nn.Module):
def __init__(self, nf, rf, nx):
super(Conv1D, self).__init__()
self.rf = rf
self.nf = nf
if rf == 1: # faster 1x1 conv
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
else: # was used to train LM
raise NotImplementedError
def forward(self, x):
if self.rf == 1:
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
else:
raise NotImplementedError
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False, output_attentions=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.output_attentions = output_attentions
self.c_attn = Conv1D(n_state * 3, 1, nx) # (out_channels, size_conv, in_channels)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.bias + -1e9 * (1 - self.bias) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
w = w * b + -1e9 * (1 - b)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
if self.output_attentions:
return w, torch.matmul(w, v)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
a = self._attn(query, key, value)
if self.output_attentions:
attentions, a = a
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
if self.output_attentions:
return attentions, a
return a
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, 1, nx)
self.c_proj = Conv1D(nx, 1, n_state)
self.act = ACT_FNS[config.afn]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False, output_attentions=False):
super(Block, self).__init__()
nx = config.n_embd
self.output_attentions = output_attentions
self.attn = Attention(nx, n_ctx, config, scale, output_attentions)
self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x):
a = self.attn(x)
if self.output_attentions:
attentions, a = a
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
if self.output_attentions:
return attentions, h
return h
class OpenAIGPTLMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model_embeddings_weights, config):
super(OpenAIGPTLMHead, self).__init__()
self.n_embd = config.n_embd
self.vocab_size = config.vocab_size
self.predict_special_tokens = config.predict_special_tokens
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights, predict_special_tokens=True):
self.predict_special_tokens = predict_special_tokens
embed_shape = model_embeddings_weights.shape
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
lm_logits = self.decoder(hidden_state)
if not self.predict_special_tokens:
lm_logits = lm_logits[..., :self.vocab_size]
return lm_logits
class OpenAIGPTMultipleChoiceHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(OpenAIGPTMultipleChoiceHead, self).__init__()
self.n_embd = config.n_embd
self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation
self.linear = nn.Linear(config.n_embd, 1)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, num_choices, seq_length, hidden_size)
# mc_token_ids (bsz, num_choices)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
# mc_token_ids (bsz, num_choices, 1, hidden_size)
multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
# multiple_choice_h (bsz, num_choices, hidden_size)
multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2)
multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
# (bsz, num_choices)
return multiple_choice_logits
class OpenAIGPTPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(OpenAIGPTPreTrainedModel, self).__init__()
if not isinstance(config, OpenAIGPTConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `OpenAIGPTConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, num_special_tokens=None, *inputs, **kwargs):
"""
Instantiate a OpenAIGPTPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `openai-gpt`
- a path or url to a pretrained model archive containing:
. `openai_gpt_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a OpenAIGPTModel instance
- a path or url to a pretrained model archive containing:
. `openai-gpt-config.json` a configuration file for the model
. a series of NumPy files containing OpenAI TensorFlow trained weights
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific OpenAI-GPT class
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
cache_dir = kwargs.get('cache_dir', None)
kwargs.pop('cache_dir', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
archive_file, config_file
)
)
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = OpenAIGPTConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint (stored as NumPy array)
return load_tf_weights_in_openai_gpt(model, resolved_archive_file)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if key.endswith(".g"):
new_key = key[:-2] + ".weight"
elif key.endswith(".b"):
new_key = key[:-2] + ".bias"
elif key.endswith(".w"):
new_key = key[:-2] + ".weight"
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
start_model = model
if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
start_model = model.transformer
load(start_model, prefix="")
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
)
# Add additional embeddings for special tokens if needed
# This step also make sure we are still sharing the output and input embeddings after loading weights
model.set_num_special_tokens(num_special_tokens if num_special_tokens is not None else config.n_special)
return model
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
Outputs:
`hidden_states`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
(or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTModel(config)
hidden_states = model(input_ids)
```
"""
def __init__(self, config, output_attentions=False):
super(OpenAIGPTModel, self).__init__(config)
self.output_attentions = output_attentions
self.tokens_embed = nn.Embedding(config.total_tokens_embeddings, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
block = Block(config.n_ctx, config, scale=True, output_attentions=output_attentions)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens):
" Update input embeddings with new embedding matrice if needed "
if self.config.n_special == num_special_tokens:
return
# Update config
self.config.n_special = num_special_tokens
# Build new embeddings and initialize all new embeddings (in particular the special tokens)
old_embed = self.tokens_embed
self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
self.tokens_embed.to(old_embed.weight.device)
self.init_weights(self.tokens_embed)
# Copy word embeddings from the previous weights
self.tokens_embed.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
def forward(self, input_ids, position_ids=None, token_type_ids=None, token_emotion_ids=None, token_action_ids=None):
if position_ids is None:
# This was used when we had a single embedding matrice from position and token embeddings
# start = self.config.vocab_size + self.config.n_special
# end = start + input_ids.size(-1)
# position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device)
position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
if token_emotion_ids is not None:
token_emotion_ids = token_emotion_ids.view(-1, token_emotion_ids.size(-1))
token_emotion_embeds = self.tokens_embed(token_emotion_ids)
else:
token_emotion_embeds = 0
if token_action_ids is not None:
token_action_ids = token_action_ids.view(-1, token_action_ids.size(-1))
token_action_embeds = self.tokens_embed(token_action_ids)
else:
token_action_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds + token_emotion_embeds + token_action_embeds
hidden_states = self.drop(hidden_states)
all_attentions = []
for block in self.h:
if self.output_attentions:
attentions, hidden_states = block(hidden_states)
all_attentions.append(attentions)
else:
hidden_states = block(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
if self.output_attentions:
return all_attentions, hidden_states.view(*output_shape)
return hidden_states.view(*output_shape)
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model with a Language Modeling head ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `lm_labels` is not `None`:
Outputs the language modeling loss.
else:
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, total_tokens_embeddings]
(or more generally [d_1, ..., d_n, total_tokens_embeddings] were d_1 ... d_n are the dimension of input_ids)
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTLMHeadModel(config)
lm_logits = model(input_ids)
```
"""
def __init__(self, config, output_attentions=False):
super(OpenAIGPTLMHeadModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight,
predict_special_tokens=predict_special_tokens)
def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
if self.transformer.output_attentions:
all_attentions, hidden_states = hidden_states
lm_logits = self.lm_head(hidden_states)
if lm_labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
return loss
if self.transformer.output_attentions:
return all_attentions, lm_logits
return lm_logits
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model with a Language Modeling and a Multiple Choice head ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
indices selected in the range [0, total_tokens_embeddings[
`mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with indices selected in [-1, 0, ..., total_tokens_embeddings]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., total_tokens_embeddings]
`multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `lm_labels` and `multiple_choice_labels` are not `None`:
Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
else: a tuple with
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, total_tokens_embeddings]
`multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]]) # (bsz, number of choice, seq length)
mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTDoubleHeadsModel(config)
lm_logits, multiple_choice_logits = model(input_ids, mc_token_ids)
```
"""
def __init__(self, config, output_attentions=False):
super(OpenAIGPTDoubleHeadsModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.multiple_choice_head = OpenAIGPTMultipleChoiceHead(config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight,
predict_special_tokens=predict_special_tokens)
def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None,
token_type_ids=None, token_emotion_ids=None, token_action_ids=None, position_ids=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids, token_emotion_ids, token_action_ids)
if self.transformer.output_attentions:
all_attentions, hidden_states = hidden_states
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
losses = []
if lm_labels is not None: # when lm_labels is all -1 it means it's not the correct candidate which in turn means it's a negative example and we ignore it because ignore_index=-1
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
if self.transformer.output_attentions:
return all_attentions, lm_logits, mc_logits
return lm_logits, mc_logits
###############################################################################
class OpenAIGPTEmotionChoiceHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(OpenAIGPTEmotionChoiceHead, self).__init__()
self.n_embd = config.n_embd
self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation
num_emotions = 7
self.linear = nn.Linear(config.n_embd, num_emotions)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, seq_length, hidden_size)
# mc_token_ids (bsz,)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, hidden_states.size(-1))
# mc_token_ids (bsz, 1, hidden_size)
multiple_choice_h = hidden_states.gather(1, mc_token_ids).squeeze(1)
# multiple_choice_h (bsz, hidden_size)
multiple_choice_h = self.dropout(multiple_choice_h)
multiple_choice_logits = self.linear(multiple_choice_h)
# (bsz, num_choices)
return multiple_choice_logits
class OpenAIGPTBatchedEmotionChoiceHead(nn.Module):
def __init__(self, config):
super(OpenAIGPTBatchedEmotionChoiceHead, self).__init__()
self.n_embd = config.n_embd
self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation
num_emotions = 7
self.linear = nn.Linear(config.n_embd, num_emotions)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, num_choices, seq_length, hidden_size)
# mc_token_ids (bsz, num_choices)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
# mc_token_ids (bsz, num_choices, 1, hidden_size)
multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
# multiple_choice_h (bsz, num_choices, hidden_size)
multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2)
multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
# (bsz, num_choices)
return multiple_choice_logits
class OpenAIGPTEmotionHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(OpenAIGPTEmotionHead, self).__init__()
self.n_embd = config.n_embd
self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation
num_classes = 2 # this probably need to be 1
self.linear = nn.Linear(config.n_embd, num_classes)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, seq_length, hidden_size)
# mc_token_ids (bsz,)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, hidden_states.size(-1))
# mc_token_ids (bsz, 1, hidden_size)
multiple_choice_h = hidden_states.gather(1, mc_token_ids).squeeze(1)
# multiple_choice_h (bsz, hidden_size)
multiple_choice_h = self.dropout(multiple_choice_h)
multiple_choice_logits = self.linear(multiple_choice_h)
# (bsz, num_choices)
return multiple_choice_logits
class OpenAIGPTDoubleHeadLMEmotionRecognitionModel(OpenAIGPTPreTrainedModel):
def __init__(self, config, output_attentions=False):
super(OpenAIGPTDoubleHeadLMEmotionRecognitionModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.emotion_choice_head = OpenAIGPTEmotionChoiceHead(config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight,
predict_special_tokens=predict_special_tokens)
def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None,
token_emotion_ids=None, position_ids=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids, token_emotion_ids)
if self.transformer.output_attentions:
all_attentions, hidden_states = hidden_states
lm_logits = self.lm_head(hidden_states)
mc_logits = self.emotion_choice_head(hidden_states, mc_token_ids)
losses = []
if lm_labels is not None: # when lm_labels is all -1 it means it's not the correct candidate which in turn means it's a negative example and we ignore it because ignore_index=-1
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
if mc_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
# loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
if self.transformer.output_attentions:
return all_attentions, lm_logits, mc_logits
return lm_logits, mc_logits
class OpenAIGPTForEmotionDetection(OpenAIGPTPreTrainedModel):
def __init__(self, config, output_attentions=False):
super(OpenAIGPTForEmotionDetection, self).__init__(config)
self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.emotion_classification_head = OpenAIGPTEmotionHead(config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight,
predict_special_tokens=predict_special_tokens)
def forward(self, input_ids, mc_token_ids, lm_labels=None,
mc_labels=None, token_type_ids=None, position_ids=None, token_emotion_ids=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids, token_emotion_ids=token_emotion_ids)
if self.transformer.output_attentions:
all_attentions, hidden_states = hidden_states
lm_logits = self.lm_head(hidden_states)
mc_logits = self.emotion_classification_head(hidden_states, mc_token_ids)
losses = []
if lm_labels is not None: # when lm_labels is all -1 it means it's not the correct candidate which in turn means it's a negative example and we ignore it because ignore_index=-1
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
if mc_labels is not None:
# loss_fct = CrossEntropyLoss(ignore_index=-1)
loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
if self.transformer.output_attentions:
return all_attentions, lm_logits, mc_logits
return lm_logits, mc_logits
class OpenAIGPTMultiHeadModel(OpenAIGPTPreTrainedModel):
def __init__(self, config, output_attentions=False):
super(OpenAIGPTMultiHeadModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.emotion_choice_head = OpenAIGPTBatchedEmotionChoiceHead(config)
self.sentence_choice_head = OpenAIGPTMultipleChoiceHead(config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight,
predict_special_tokens=predict_special_tokens)
def forward(self, input_ids, ec_token_ids, sc_token_ids, lm_labels=None,
ec_labels=None, sc_labels=None, token_type_ids=None,
token_emotion_ids=None, token_action_ids=None,
position_ids=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids, token_emotion_ids)
if self.transformer.output_attentions:
all_attentions, hidden_states = hidden_states
lm_logits = self.lm_head(hidden_states)
emotion_logits = self.emotion_choice_head(hidden_states, ec_token_ids)
sentence_logits = self.sentence_choice_head(hidden_states, sc_token_ids)
losses = []
if lm_labels is not None: # when lm_labels is all -1 it means it's not the correct candidate which in turn means it's a negative example and we ignore it because ignore_index=-1
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
if ec_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(emotion_logits.view(-1, emotion_logits.size(-1)), ec_labels.view(-1)))
if sc_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(sentence_logits.view(-1, sentence_logits.size(-1)), sc_labels.view(-1)))
if losses:
return losses
if self.transformer.output_attentions:
return all_attentions, lm_logits, emotion_logits, sentence_logits
return lm_logits, emotion_logits, sentence_logits
| 53,002 | 47.626606 | 186 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/convert_transfo_xl_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Transformer XL checkpoint and datasets."""
from __future__ import absolute_import, division, print_function
import argparse
import os
import sys
from io import open
import torch
import pytorch_pretrained_bert.tokenization_transfo_xl as data_utils
from pytorch_pretrained_bert.modeling_transfo_xl import (CONFIG_NAME,
WEIGHTS_NAME,
TransfoXLConfig,
TransfoXLLMHeadModel,
load_tf_weights_in_transfo_xl)
from pytorch_pretrained_bert.tokenization_transfo_xl import (CORPUS_NAME,
VOCAB_NAME)
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
data_utils.Vocab = data_utils.TransfoXLTokenizer
data_utils.Corpus = data_utils.TransfoXLCorpus
sys.modules['data_utils'] = data_utils
sys.modules['vocabulary'] = data_utils
def convert_transfo_xl_checkpoint_to_pytorch(tf_checkpoint_path,
transfo_xl_config_file,
pytorch_dump_folder_path,
transfo_xl_dataset_file):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(transfo_xl_dataset_file, "rb") as fp:
corpus = pickle.load(fp, encoding="latin1")
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
pytorch_vocab_dump_path = pytorch_dump_folder_path + '/' + VOCAB_NAME
print("Save vocabulary to {}".format(pytorch_vocab_dump_path))
corpus_vocab_dict = corpus.vocab.__dict__
torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
corpus_dict_no_vocab = corpus.__dict__
corpus_dict_no_vocab.pop('vocab', None)
pytorch_dataset_dump_path = pytorch_dump_folder_path + '/' + CORPUS_NAME
print("Save dataset to {}".format(pytorch_dataset_dump_path))
torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
config_path = os.path.abspath(transfo_xl_config_file)
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting Transformer XL checkpoint from {} with config at {}".format(tf_path, config_path))
# Initialise PyTorch model
if transfo_xl_config_file == "":
config = TransfoXLConfig()
else:
config = TransfoXLConfig(transfo_xl_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = TransfoXLLMHeadModel(config)
model = load_tf_weights_in_transfo_xl(model, config, tf_path)
# Save pytorch-model
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path)))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path)))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the folder to store the PyTorch model or dataset/vocab.")
parser.add_argument("--tf_checkpoint_path",
default = "",
type = str,
help = "An optional path to a TensorFlow checkpoint path to be converted.")
parser.add_argument("--transfo_xl_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--transfo_xl_dataset_file",
default = "",
type = str,
help = "An optional dataset file to be converted in a vocabulary.")
args = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file)
| 5,671 | 47.478632 | 121 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import sys
import json
import logging
import os
import shutil
import tempfile
import fnmatch
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'pytorch_pretrained_bert')
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(
os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
default_cache_path)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(output_string, 'utf-8') # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 9,347 | 32.385714 | 98 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT checkpoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import argparse
import tensorflow as tf
import torch
import numpy as np
from pytorch_pretrained_bert.modeling import BertConfig, BertForPreTraining, load_tf_weights_in_bert
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
# Initialise PyTorch model
config = BertConfig.from_json_file(bert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = BertForPreTraining(config)
# Load weights from tf checkpoint
load_tf_weights_in_bert(model, tf_checkpoint_path)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--tf_checkpoint_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--bert_config_file",
default = None,
type = str,
required = True,
help = "The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--pytorch_dump_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path,
args.bert_config_file,
args.pytorch_dump_path)
| 2,593 | 37.716418 | 101 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/__init__.py | __version__ = "0.6.2"
from .tokenization import BertTokenizer, BasicTokenizer, WordpieceTokenizer
from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus)
from .tokenization_gpt2 import GPT2Tokenizer
from .modeling import (BertConfig, BertModel, BertForPreTraining,
BertForMaskedLM, BertForNextSentencePrediction,
BertForSequenceClassification, BertForMultipleChoice,
BertForTokenClassification, BertForQuestionAnswering,
load_tf_weights_in_bert)
from .modeling_openai import (OpenAIGPTConfig, OpenAIGPTModel,
OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTDoubleHeadLMEmotionRecognitionModel,
OpenAIGPTForEmotionDetection,
OpenAIGPTMultiHeadModel,
load_tf_weights_in_openai_gpt)
from .modeling_transfo_xl import (TransfoXLConfig, TransfoXLModel, TransfoXLLMHeadModel,
load_tf_weights_in_transfo_xl)
from .modeling_gpt2 import (GPT2Config, GPT2Model,
GPT2LMHeadModel, GPT2DoubleHeadsModel, GPT2MultipleChoiceHead,
load_tf_weights_in_gpt2)
from .optimization import BertAdam
from .optimization_openai import OpenAIAdam
from .file_utils import PYTORCH_PRETRAINED_BERT_CACHE, cached_path, WEIGHTS_NAME, CONFIG_NAME
| 1,498 | 54.518519 | 124 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/tokenization_gpt2.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import json
import logging
import os
import regex as re
from io import open
try:
from functools import lru_cache
except ImportError:
# Just a dummy decorator to get the checks to run on python2
# because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now.
def lru_cache():
return lambda func: func
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json",
'gpt2-medium': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-vocab.json",
}
PRETRAINED_MERGES_ARCHIVE_MAP = {
'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt",
'gpt2-medium': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-merges.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'gpt2': 1024,
}
VOCAB_NAME = 'vocab.json'
MERGES_NAME = 'merges.txt'
SPECIAL_TOKENS_NAME = 'special_tokens.txt'
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
_chr = unichr if sys.version_info[0] == 2 else chr
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [_chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class GPT2Tokenizer(object):
"""
GPT-2 BPE tokenizer. Peculiarities:
- Byte-level BPE
"""
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a GPT2Tokenizer from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path]
special_tokens_file = None
else:
vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME)
special_tokens_file = os.path.join(pretrained_model_name_or_path, SPECIAL_TOKENS_NAME)
if not os.path.exists(special_tokens_file):
special_tokens_file = None
else:
logger.info("loading special tokens file {}".format(special_tokens_file))
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
vocab_file, merges_file))
return None
if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file:
logger.info("loading vocabulary file {}".format(vocab_file))
logger.info("loading merges file {}".format(merges_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
logger.info("loading merges file {} from cache at {}".format(
merges_file, resolved_merges_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
if special_tokens_file and 'special_tokens' not in kwargs:
special_tokens = open(special_tokens_file, encoding='utf-8').read().split('\n')[:-1]
else:
special_tokens = kwargs.pop('special_tokens', [])
tokenizer = cls(resolved_vocab_file, resolved_merges_file, special_tokens=special_tokens, *inputs, **kwargs)
return tokenizer
def __init__(self, vocab_file, merges_file, errors='replace', special_tokens=None, max_len=None):
self.max_len = max_len if max_len is not None else int(1e12)
self.encoder = json.load(open(vocab_file))
self.decoder = {v:k for k,v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v:k for k, v in self.byte_encoder.items()}
bpe_data = open(merges_file, encoding='utf-8').read().split('\n')[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_data]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
self.special_tokens = {}
self.special_tokens_decoder = {}
self.set_special_tokens(special_tokens)
def __len__(self):
return len(self.encoder) + len(self.special_tokens)
def set_special_tokens(self, special_tokens):
""" Add a list of additional tokens to the encoder.
The additional tokens are indexed starting from the last index of the
current vocabulary in the order of the `special_tokens` list.
"""
if not special_tokens:
self.special_tokens = {}
self.special_tokens_decoder = {}
return
self.special_tokens = dict((tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens))
self.special_tokens_decoder = {v:k for k, v in self.special_tokens.items()}
logger.info("Special tokens {}".format(self.special_tokens))
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def tokenize(self, text):
""" Tokenize a string. """
bpe_tokens = []
for token in re.findall(self.pat, text):
if sys.version_info[0] == 2:
token = ''.join(self.byte_encoder[ord(b)] for b in token)
else:
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def convert_tokens_to_ids(self, tokens):
""" Converts a sequence of tokens into ids using the vocab. """
ids = []
if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)):
if tokens in self.special_tokens:
return self.special_tokens[tokens]
else:
return self.encoder.get(tokens, 0)
for token in tokens:
if token in self.special_tokens:
ids.append(self.special_tokens[token])
else:
ids.append(self.encoder.get(token, 0))
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this OpenAI GPT model ({} > {}). Running this"
" sequence through the model will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
"""Converts a sequence of ids in BPE tokens using the vocab."""
tokens = []
for i in ids:
if i in self.special_tokens_decoder:
if not skip_special_tokens:
tokens.append(self.special_tokens_decoder[i])
else:
tokens.append(self.decoder[i])
return tokens
def encode(self, text):
return self.convert_tokens_to_ids(self.tokenize(text))
def decode(self, tokens, skip_special_tokens=False, clean_up_tokenization_spaces=True):
text = ''.join(self.convert_ids_to_tokens(tokens, skip_special_tokens=skip_special_tokens))
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
if clean_up_tokenization_spaces:
text = text.replace('<unk>', '')
text = text.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','
).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
return text
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary and merge files to a directory."""
if not os.path.isdir(vocab_path):
logger.error("Vocabulary path ({}) should be a directory".format(vocab_path))
return
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
merge_file = os.path.join(vocab_path, MERGES_NAME)
special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME)
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write(u'#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(merge_file))
index = token_index
writer.write(' '.join(bpe_tokens) + u'\n')
index += 1
index = len(self.encoder)
with open(special_tokens_file, 'w', encoding='utf-8') as writer:
for token, token_index in sorted(self.special_tokens.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving special tokens vocabulary to {}: BPE indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(special_tokens_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file, merge_file, special_tokens_file
| 13,902 | 43.560897 | 116 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/modeling_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import collections
import sys
from io import open
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling import BertLayerNorm as LayerNorm
from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax, sample_logits
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-config.json",
}
TF_WEIGHTS_NAME = 'model.ckpt'
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, 'transformer'):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update({
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias})
for i, (out_l, proj_l, tie_proj) in enumerate(zip(
model.crit.out_layers,
model.crit.out_projs,
config.tie_projs)):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({
layer_str + 'b': out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({
layer_str + 'lookup_table': out_l.weight,
layer_str + 'b': out_l.bias})
if not tie_proj:
tf_to_pt_map.update({
layer_str + 'proj': proj_l
})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({
layer_str + 'lookup_table': embed_l.weight,
layer_str + 'proj_W': proj_l
})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update({
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
})
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({
'transformer/r_r_bias': r_r_list,
'transformer/r_w_bias': r_w_list})
return tf_to_pt_map
def load_tf_weights_in_transfo_xl(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
for name, pointer in tf_to_pt_map.items():
assert name in tf_weights
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if 'kernel' in name or 'proj' in name:
array = np.transpose(array)
if ('r_r_bias' in name or 'r_w_bias' in name) and len(pointer) > 1:
# Here we will split the TF weigths
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
print("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + '/Adam', None)
tf_weights.pop(name + '/Adam_1', None)
print("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model
class TransfoXLConfig(object):
"""Configuration class to store the configuration of a `TransfoXLModel`.
"""
def __init__(self,
vocab_size_or_config_json_file=267735,
cutoffs=[20000, 40000, 200000],
d_model=1024,
d_embed=1024,
n_head=16,
d_head=64,
d_inner=4096,
div_val=4,
pre_lnorm=False,
n_layer=18,
tgt_len=128,
ext_len=0,
mem_len=1600,
clamp_len=1000,
same_length=True,
proj_share_all_but_first=True,
attn_type=0,
sample_softmax=-1,
adaptive=True,
tie_weight=True,
dropout=0.1,
dropatt=0.0,
untie_r=True,
init="normal",
init_range=0.01,
proj_init_std=0.01,
init_std=0.02):
"""Constructs TransfoXLConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `TransfoXLModel` or a configuration json file.
cutoffs: cutoffs for the adaptive softmax
d_model: Dimensionality of the model's hidden states.
d_embed: Dimensionality of the embeddings
d_head: Dimensionality of the model's heads.
div_val: divident value for adapative input and softmax
pre_lnorm: apply LayerNorm to the input instead of the output
d_inner: Inner dimension in FF
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
tgt_len: number of tokens to predict
ext_len: length of the extended context
mem_len: length of the retained previous heads
same_length: use the same attn length for all tokens
proj_share_all_but_first: True to share all but first projs, False not to share.
attn_type: attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.
clamp_len: use the same pos embeddings after clamp_len
sample_softmax: number of samples in sampled softmax
adaptive: use adaptive softmax
tie_weight: tie the word embedding and softmax weights
dropout: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
dropatt: The dropout ratio for the attention probabilities.
untie_r: untie relative position biases
embd_pdrop: The dropout ratio for the embeddings.
init: parameter initializer to use
init_range: parameters initialized by U(-init_range, init_range).
proj_init_std: parameters initialized by N(0, init_std)
init_std: parameters initialized by N(0, init_std)
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.n_token = vocab_size_or_config_json_file
self.cutoffs = []
self.cutoffs.extend(cutoffs)
self.tie_weight = tie_weight
if proj_share_all_but_first:
self.tie_projs = [False] + [True] * len(self.cutoffs)
else:
self.tie_projs = [False] + [False] * len(self.cutoffs)
self.d_model = d_model
self.d_embed = d_embed
self.d_head = d_head
self.d_inner = d_inner
self.div_val = div_val
self.pre_lnorm = pre_lnorm
self.n_layer = n_layer
self.n_head = n_head
self.tgt_len = tgt_len
self.ext_len = ext_len
self.mem_len = mem_len
self.same_length = same_length
self.attn_type = attn_type
self.clamp_len = clamp_len
self.sample_softmax = sample_softmax
self.adaptive = adaptive
self.dropout = dropout
self.dropatt = dropatt
self.untie_r = untie_r
self.init = init
self.init_range = init_range
self.proj_init_std = proj_init_std
self.init_std = init_std
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `TransfoXLConfig` from a Python dictionary of parameters."""
config = TransfoXLConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `TransfoXLConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:,None,:].expand(-1, bsz, -1)
else:
return pos_emb[:,None,:]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner), nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
##### layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
##### residual connection
output = core_out + inp
else:
##### positionwise feed-forward
core_out = self.CoreNet(inp)
##### residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False, r_r_bias=None, r_w_bias=None):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
def forward(self, h, attn_mask=None, mems=None):
##### multihead attention
# [hlen x bsz x n_head x d_head]
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
##### layer normalization
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
# [qlen x klen x bsz x n_head]
attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# [qlen x klen x bsz x n_head] + [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head]
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = h + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(h + attn_out)
return output
class RelMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False,
r_r_bias=None, r_w_bias=None):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
def _parallelogram_mask(self, h, w, left=False):
mask = torch.ones((h, w)).byte()
m = min(h, w)
mask[:m,:m] = torch.triu(mask[:m,:m])
mask[-m:,-m:] = torch.tril(mask[-m:,-m:])
if left:
return mask
else:
return mask.flip(0)
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)),
device=x.device, dtype=x.dtype)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:,:,None,None]) \
.view(qlen, klen, x.size(2), x.size(3))
return x
def _rel_shift(self, x, zero_triu=False):
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
x = x_padded[1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:,:,None,None]
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def forward(self, w, r, attn_mask=None, mems=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score = attn_score.float().masked_fill(
attn_mask[None,:,:,None], -1e30).type_as(attn_score)
elif attn_mask.dim() == 3:
attn_score = attn_score.float().masked_fill(
attn_mask[:,:,:,None], -1e30).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class RelLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
# r_emb: [klen, n_head, d_head], used for term B
# r_w_bias: [n_head, d_head], used for term C
# r_bias: [klen, n_head], used for term D
qlen, bsz = w.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
#### compute attention score
rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb)) # qlen x klen x bsz x n_head
D_ = r_bias[None, :, None] # 1 x klen x 1 x n_head
BD = self._rel_shift(B_ + D_)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class DecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(DecoderLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelLearnableDecoderLayer, self).__init__()
self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head, dropout,
**kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r_emb, r_w_bias, r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model,
d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
sample_softmax=False):
super(AdaptiveEmbedding, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(
nn.Embedding(n_token, d_embed, sparse=sample_softmax>0)
)
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj],
dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
return embed
class TransfoXLPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(TransfoXLPreTrainedModel, self).__init__()
if not isinstance(config, TransfoXLConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `TransfoXLConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weight(self, weight):
if self.config.init == 'uniform':
nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
elif self.config.init == 'normal':
nn.init.normal_(weight, 0.0, self.config.init_std)
def init_bias(self, bias):
nn.init.constant_(bias, 0.0)
def init_weights(self, m):
""" Initialize the weights.
"""
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
self.init_weight(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
self.init_bias(m.bias)
elif classname.find('AdaptiveEmbedding') != -1:
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('Embedding') != -1:
if hasattr(m, 'weight'):
self.init_weight(m.weight)
elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:
if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:
self.init_weight(m.cluster_weight)
if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:
self.init_bias(m.cluster_bias)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, 1.0, self.config.init_std)
if hasattr(m, 'bias') and m.bias is not None:
self.init_bias(m.bias)
elif classname.find('TransformerLM') != -1:
if hasattr(m, 'r_emb'):
self.init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
self.init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
self.init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
self.init_bias(m.r_bias)
def set_num_special_tokens(self, num_special_tokens):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `transfo-xl-wt103`
- a path or url to a pretrained model archive containing:
. `transfo_xl_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a TransfoXLModel instance
- a path or url to a pretrained model archive containing:
. `transfo_xl_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific TransformerXL class
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
cache_dir = kwargs.get('cache_dir', None)
kwargs.pop('cache_dir', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
archive_file, config_file))
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = TransfoXLConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint
return load_tf_weights_in_transfo_xl(model, config, pretrained_model_name_or_path)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'transformer') and any(s.startswith('transformer.') for s in state_dict.keys()):
start_prefix = 'transformer.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
# Make sure we are still sharing the input and output embeddings
if hasattr(model, 'tie_weights'):
model.tie_weights()
return model
class TransfoXLModel(TransfoXLPreTrainedModel):
"""Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context").
Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
- you don't need to specify positioning embeddings indices
- the tokens in the vocabulary have to be sorted to decreasing frequency.
Params:
config: a TransfoXLConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the token indices selected in the range [0, self.config.n_token[
`mems`: optional memomry of hidden states from previous forward passes
as a list (num layers) of hidden states at the entry of each layer
each hidden states has shape [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Outputs:
A tuple of (last_hidden_state, new_mems)
`last_hidden_state`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, self.config.d_model]
`new_mems`: list (num layers) of updated mem states at the entry of each layer
each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]])
config = TransfoXLConfig()
model = TransfoXLModel(config)
last_hidden_state, new_mems = model(input_ids)
# Another time on input_ids_next using the memory:
last_hidden_state, new_mems = model(input_ids_next, new_mems)
```
"""
def __init__(self, config):
super(TransfoXLModel, self).__init__(config)
self.n_token = config.n_token
self.d_embed = config.d_embed
self.d_model = config.d_model
self.n_head = config.n_head
self.d_head = config.d_head
self.word_emb = AdaptiveEmbedding(config.n_token, config.d_embed, config.d_model, config.cutoffs,
div_val=config.div_val)
self.drop = nn.Dropout(config.dropout)
self.n_layer = config.n_layer
self.tgt_len = config.tgt_len
self.mem_len = config.mem_len
self.ext_len = config.ext_len
self.max_klen = config.tgt_len + config.ext_len + config.mem_len
self.attn_type = config.attn_type
if not config.untie_r:
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.layers = nn.ModuleList()
if config.attn_type == 0: # the default attention
for i in range(config.n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
elif config.attn_type == 1: # learnable embeddings
for i in range(config.n_layer):
self.layers.append(
RelLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
elif config.attn_type in [2, 3]: # absolute embeddings
for i in range(config.n_layer):
self.layers.append(
DecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
self.same_length = config.same_length
self.clamp_len = config.clamp_len
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 1: # learnable
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.r_bias = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head))
elif self.attn_type == 2: # absolute standard
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 3: # absolute deeper SA
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.apply(self.init_weights)
def backward_compatible(self):
self.sample_softmax = -1
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def init_mems(self, data):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer):
empty = torch.zeros(self.mem_len, data.size(1), self.config.d_model,
dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, qlen, mlen):
# does not deal with None
if mems is None: return None
# mems is not None
assert len(hids) == len(mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
def _forward(self, dec_inp, mems=None):
qlen, bsz = dec_inp.size()
word_emb = self.word_emb(dec_inp)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones(qlen, klen)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1+mlen)
+ torch.tril(all_ones, -mask_shift_len)).byte()[:, :, None] # -1
else:
dec_attn_mask = torch.triu(
word_emb.new_ones(qlen, klen), diagonal=1+mlen).byte()[:,:,None]
hids = []
if self.attn_type == 0: # default
pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, pos_emb, dec_attn_mask=dec_attn_mask, mems=mems_i)
elif self.attn_type == 1: # learnable
core_out = self.drop(word_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
if self.clamp_len > 0:
r_emb = self.r_emb[i][-self.clamp_len :]
r_bias = self.r_bias[i][-self.clamp_len :]
else:
r_emb, r_bias = self.r_emb[i], self.r_bias[i]
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, r_emb, self.r_w_bias[i],
r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i)
elif self.attn_type == 2: # absolute
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb + pos_emb[-qlen:])
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
if mems_i is not None and i == 0:
mems_i += pos_emb[:mlen]
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
elif self.attn_type == 3:
core_out = self.drop(word_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
if mems_i is not None and mlen > 0:
cur_emb = self.r_emb[i][:-qlen]
cur_size = cur_emb.size(0)
if cur_size < mlen:
cur_emb_pad = cur_emb[0:1].expand(mlen-cur_size, -1, -1)
cur_emb = torch.cat([cur_emb_pad, cur_emb], 0)
else:
cur_emb = cur_emb[-mlen:]
mems_i += cur_emb.view(mlen, 1, -1)
core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1)
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
return core_out, new_mems
def forward(self, input_ids, mems=None):
""" Params:
input_ids :: [bsz, len]
mems :: optional mems from previous forwar passes (or init_mems)
list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Returns:
tuple (last_hidden, new_mems) where:
new_mems: list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
last_hidden: output of the last layer:
shape :: [bsz, len, self.config.d_model]
"""
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
# so we transpose here from shape [bsz, len] to shape [len, bsz]
input_ids = input_ids.transpose(0, 1).contiguous()
if mems is None:
mems = self.init_mems(input_ids)
last_hidden, new_mems = self._forward(input_ids, mems=mems)
# We transpose back here to shape [bsz, len, hidden_dim]
last_hidden = last_hidden.transpose(0, 1).contiguous()
return (last_hidden, new_mems)
class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
"""Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context").
This model add an (adaptive) softmax head on top of the TransfoXLModel
Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
- you don't need to specify positioning embeddings indices
- the tokens in the vocabulary have to be sorted to decreasing frequency.
Call self.tie_weights() if you update/load the weights of the transformer to keep the weights tied.
Params:
config: a TransfoXLConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the token indices selected in the range [0, self.config.n_token[
`target`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with the target token indices selected in the range [0, self.config.n_token[
`mems`: an optional memory of hidden states from previous forward passes
as a list (num layers) of hidden states at the entry of each layer
each hidden states has shape [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Outputs:
A tuple of (last_hidden_state, new_mems)
`softmax_output`: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape [batch_size, sequence_length]
else:
log probabilities of tokens, shape [batch_size, sequence_length, n_tokens]
`new_mems`: list (num layers) of updated mem states at the entry of each layer
each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]])
config = TransfoXLConfig()
model = TransfoXLModel(config)
last_hidden_state, new_mems = model(input_ids)
# Another time on input_ids_next using the memory:
last_hidden_state, new_mems = model(input_ids_next, mems=new_mems)
```
"""
def __init__(self, config):
super(TransfoXLLMHeadModel, self).__init__(config)
self.transformer = TransfoXLModel(config)
self.sample_softmax = config.sample_softmax
# use sampled softmax
if config.sample_softmax > 0:
self.out_layer = nn.Linear(config.d_model, config.n_token)
self.sampler = LogUniformSampler(config.n_token, config.sample_softmax)
# use adaptive softmax (including standard softmax)
else:
self.crit = ProjectedAdaptiveLogSoftmax(config.n_token, config.d_embed, config.d_model,
config.cutoffs, div_val=config.div_val)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Run this to be sure output and input (adaptive) softmax weights are tied """
# sampled softmax
if self.sample_softmax > 0:
if self.config.tie_weight:
self.out_layer.weight = self.transformer.word_emb.weight
# adaptive softmax (including standard softmax)
else:
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
def reset_length(self, tgt_len, ext_len, mem_len):
self.transformer.reset_length(tgt_len, ext_len, mem_len)
def init_mems(self, data):
return self.transformer.init_mems(data)
def forward(self, input_ids, target=None, mems=None):
""" Params:
input_ids :: [bsz, len]
target :: [bsz, len]
Returns:
tuple(softmax_output, new_mems) where:
new_mems: list (num layers) of hidden states at the entry of each layer
shape :: [mem_len, bsz, self.config.d_model] :: Warning: shapes are transposed here w. regards to input_ids
softmax_output: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape :: [bsz, len]
else:
log probabilities of tokens, shape :: [bsz, len, n_tokens]
"""
bsz = input_ids.size(0)
tgt_len = input_ids.size(1)
last_hidden, new_mems = self.transformer(input_ids, mems)
pred_hid = last_hidden[:, -tgt_len:]
if self.sample_softmax > 0 and self.training:
assert self.config.tie_weight
logit = sample_logits(self.transformer.word_emb, self.out_layer.bias, target, pred_hid, self.sampler)
softmax_output = -F.log_softmax(logit, -1)[:, :, 0]
else:
softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target)
if target is None:
softmax_output = softmax_output.view(bsz, tgt_len, -1)
else:
softmax_output = softmax_output.view(bsz, tgt_len)
# We transpose back
return (softmax_output, new_mems)
| 59,065 | 41.40201 | 131 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/tokenization_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import glob
import logging
import os
import sys
from collections import Counter, OrderedDict
from io import open
import unicodedata
import torch
import numpy as np
from .file_utils import cached_path
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-vocab.bin",
}
VOCAB_NAME = 'vocab.bin'
PRETRAINED_CORPUS_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-corpus.bin",
}
CORPUS_NAME = 'corpus.bin'
class TransfoXLTokenizer(object):
"""
Transformer-XL tokenizer adapted from Vocab class in https://github.com/kimiyoung/transformer-xl
"""
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a TransfoXLTokenizer.
The TransfoXLTokenizer.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
if os.path.isdir(pretrained_model_name_or_path):
vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
else:
vocab_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
# Instantiate tokenizer.
tokenizer = cls(*inputs, **kwargs)
vocab_dict = torch.load(resolved_vocab_file)
for key, value in vocab_dict.items():
tokenizer.__dict__[key] = value
return tokenizer
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=False,
delimiter=None, vocab_file=None, never_split=("<unk>", "<eos>", "<formula>")):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
self.never_split = never_split
def count_file(self, path, verbose=False, add_eos=False):
if verbose: print('counting file {} ...'.format(path))
assert os.path.exists(path)
sents = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose: print('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
if '<UNK>' in self.sym2idx:
self.unk_idx = self.sym2idx['<UNK>']
elif '<unk>' in self.sym2idx:
self.unk_idx = self.sym2idx['<unk>']
else:
raise ValueError('No <unkown> token in vocabulary')
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
torch.save(self.__dict__, vocab_file)
return vocab_file
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq: break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose: print('encoding file {} ...'.format(path))
assert os.path.exists(path)
encoded = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose: print('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), 'Index {} out of vocabulary range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# print('encounter unk {}'.format(sym))
# assert '<eos>' not in sym
if hasattr(self, 'unk_idx'):
return self.sym2idx.get(sym, self.unk_idx)
# Backward compatibility with pre-trained models
elif '<unk>' in self.sym2idx:
return self.sym2idx['<unk>']
elif '<UNK>' in self.sym2idx:
return self.sym2idx['<UNK>']
else:
raise ValueError('Token not in vocabulary and no <unk> token in vocabulary for replacement')
def convert_ids_to_tokens(self, indices):
"""Converts a sequence of indices in symbols using the vocab."""
return [self.get_sym(idx) for idx in indices]
def convert_tokens_to_ids(self, symbols):
"""Converts a sequence of symbols into ids using the vocab."""
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.convert_tokens_to_ids(symbols))
def decode(self, indices, exclude=None):
"""Converts a sequence of indices in a string."""
if exclude is None:
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
def __len__(self):
return len(self.idx2sym)
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
class LMOrderedIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None):
"""
data -- LongTensor -- the LongTensor is strictly ordered
"""
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
# Work out how cleanly we can divide the dataset into bsz parts.
self.n_step = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, self.n_step * bsz)
# Evenly divide the data across the bsz batches.
self.data = data.view(bsz, -1).t().contiguous().to(device)
# Number of mini-batches
self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
def get_batch(self, i, bptt=None):
if bptt is None: bptt = self.bptt
seq_len = min(bptt, self.data.size(0) - 1 - i)
end_idx = i + seq_len
beg_idx = max(0, i - self.ext_len)
data = self.data[beg_idx:end_idx]
target = self.data[i+1:i+1+seq_len]
data_out = data.transpose(0, 1).contiguous().to(self.device)
target_out = target.transpose(0, 1).contiguous().to(self.device)
return data_out, target_out, seq_len
def get_fixlen_iter(self, start=0):
for i in range(start, self.data.size(0) - 1, self.bptt):
yield self.get_batch(i)
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
max_len = self.bptt + max_deviation * std
i = start
while True:
bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.
bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
data, target, seq_len = self.get_batch(i, bptt)
i += seq_len
yield data, target, seq_len
if i >= self.data.size(0) - 2:
break
def __iter__(self):
return self.get_fixlen_iter()
class LMShuffledIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
"""
self.data = data
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self):
# index iterator
epoch_indices = np.random.permutation(len(self.data)) if self.shuffle \
else np.array(range(len(self.data)))
# sentence iterator
for idx in epoch_indices:
yield self.data[idx]
def stream_iterator(self, sent_stream):
# streams for each data in the batch
streams = [None] * self.bsz
data = torch.LongTensor(self.bptt, self.bsz)
target = torch.LongTensor(self.bptt, self.bsz)
n_retain = 0
while True:
# data : [n_retain+bptt x bsz]
# target : [bptt x bsz]
data[n_retain:].fill_(-1)
target.fill_(-1)
valid_batch = True
for i in range(self.bsz):
n_filled = 0
try:
while n_filled < self.bptt:
if streams[i] is None or len(streams[i]) <= 1:
streams[i] = next(sent_stream)
# number of new tokens to fill in
n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
# first n_retain tokens are retained from last batch
data[n_retain+n_filled:n_retain+n_filled+n_new, i] = \
streams[i][:n_new]
target[n_filled:n_filled+n_new, i] = \
streams[i][1:n_new+1]
streams[i] = streams[i][n_new:]
n_filled += n_new
except StopIteration:
valid_batch = False
break
if not valid_batch:
return
data_out = data.transpose(0, 1).contiguous().to(self.device)
target_out = target.transpose(0, 1).contiguous().to(self.device)
yield data_out, target_out, self.bptt
n_retain = min(data.size(0), self.ext_len)
if n_retain > 0:
data[:n_retain] = data[-n_retain:]
data.resize_(n_retain + self.bptt, data.size(1))
def __iter__(self):
# sent_stream is an iterator
sent_stream = self.get_sent_stream()
for batch in self.stream_iterator(sent_stream):
yield batch
class LMMultiFileIterator(LMShuffledIterator):
def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None,
shuffle=False):
self.paths = paths
self.vocab = vocab
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self, path):
sents = self.vocab.encode_file(path, add_double_eos=True)
if self.shuffle:
np.random.shuffle(sents)
sent_stream = iter(sents)
return sent_stream
def __iter__(self):
if self.shuffle:
np.random.shuffle(self.paths)
for path in self.paths:
# sent_stream is an iterator
sent_stream = self.get_sent_stream(path)
for batch in self.stream_iterator(sent_stream):
yield batch
class TransfoXLCorpus(object):
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a pre-processed corpus.
"""
vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
if pretrained_model_name_or_path in PRETRAINED_CORPUS_ARCHIVE_MAP:
corpus_file = PRETRAINED_CORPUS_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
corpus_file = os.path.join(pretrained_model_name_or_path, CORPUS_NAME)
# redirect to the cache, if necessary
try:
resolved_corpus_file = cached_path(corpus_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Corpus '{}' was not found in corpus list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
corpus_file))
return None
if resolved_corpus_file == corpus_file:
logger.info("loading corpus file {}".format(corpus_file))
else:
logger.info("loading corpus file {} from cache at {}".format(
corpus_file, resolved_corpus_file))
# Instantiate tokenizer.
corpus = cls(*inputs, **kwargs)
corpus_dict = torch.load(resolved_corpus_file)
for key, value in corpus_dict.items():
corpus.__dict__[key] = value
corpus.vocab = vocab
if corpus.train is not None:
corpus.train = torch.tensor(corpus.train, dtype=torch.long)
if corpus.valid is not None:
corpus.valid = torch.tensor(corpus.valid, dtype=torch.long)
if corpus.test is not None:
corpus.test = torch.tensor(corpus.test, dtype=torch.long)
return corpus
def __init__(self, *args, **kwargs):
self.vocab = TransfoXLTokenizer(*args, **kwargs)
self.dataset = None
self.train = None
self.valid = None
self.test = None
def build_corpus(self, path, dataset):
self.dataset = dataset
if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']:
self.vocab.count_file(os.path.join(path, 'train.txt'))
self.vocab.count_file(os.path.join(path, 'valid.txt'))
self.vocab.count_file(os.path.join(path, 'test.txt'))
elif self.dataset == 'wt103':
self.vocab.count_file(os.path.join(path, 'train.txt'))
elif self.dataset == 'lm1b':
train_path_pattern = os.path.join(
path, '1-billion-word-language-modeling-benchmark-r13output',
'training-monolingual.tokenized.shuffled', 'news.en-*')
train_paths = glob.glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
if self.dataset in ['ptb', 'wt2', 'wt103']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True)
elif self.dataset in ['enwik8', 'text8']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True, add_eos=False)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True, add_eos=False)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True, add_eos=False)
elif self.dataset == 'lm1b':
self.train = train_paths
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True)
def get_iterator(self, split, *args, **kwargs):
if split == 'train':
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
elif self.dataset == 'lm1b':
kwargs['shuffle'] = True
data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
elif split in ['valid', 'test']:
data = self.valid if split == 'valid' else self.test
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(data, *args, **kwargs)
elif self.dataset == 'lm1b':
data_iter = LMShuffledIterator(data, *args, **kwargs)
return data_iter
def get_lm_corpus(datadir, dataset):
fn = os.path.join(datadir, 'cache.pt')
fn_pickle = os.path.join(datadir, 'cache.pkl')
if os.path.exists(fn):
print('Loading cached dataset...')
corpus = torch.load(fn_pickle)
elif os.path.exists(fn):
print('Loading cached dataset from pickle...')
with open(fn, "rb") as fp:
corpus = pickle.load(fp)
else:
print('Producing dataset {}...'.format(dataset))
kwargs = {}
if dataset in ['wt103', 'wt2']:
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = False
elif dataset == 'ptb':
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = True
elif dataset == 'lm1b':
kwargs['special'] = []
kwargs['lower_case'] = False
kwargs['vocab_file'] = os.path.join(datadir, '1b_word_vocab.txt')
elif dataset in ['enwik8', 'text8']:
pass
corpus = TransfoXLCorpus(datadir, dataset, **kwargs)
torch.save(corpus, fn)
return corpus
| 22,060 | 36.582624 | 110 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/tokenization_openai.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import logging
import os
import re
import sys
from io import open
from tqdm import tqdm
from .file_utils import cached_path
from .tokenization import BasicTokenizer
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json",
}
PRETRAINED_MERGES_ARCHIVE_MAP = {
'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'openai-gpt': 512,
}
VOCAB_NAME = 'vocab.json'
MERGES_NAME = 'merges.txt'
SPECIAL_TOKENS_NAME = 'special_tokens.txt'
def get_pairs(word):
"""
Return set of symbol pairs in a word.
word is represented as tuple of symbols (symbols being variable-length strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def text_standardize(text):
"""
fixes some issues the spacy tokenizer had on books corpus
also does some whitespace standardization
"""
text = text.replace('—', '-')
text = text.replace('–', '-')
text = text.replace('―', '-')
text = text.replace('…', '...')
text = text.replace('´', "'")
text = re.sub(r'''(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text)
text = re.sub(r'\s*\n\s*', ' \n ', text)
text = re.sub(r'[^\S\n]+', ' ', text)
return text.strip()
class OpenAIGPTTokenizer(object):
"""
BPE tokenizer. Peculiarities:
- lower case all inputs
- uses SpaCy tokenizer and ftfy for pre-BPE tokenization if they are installed, fallback to BERT's BasicTokenizer if not.
- argument special_tokens and function set_special_tokens:
can be used to add additional symbols (ex: "__classify__") to a vocabulary.
"""
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path]
special_tokens_file = None
else:
vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME)
special_tokens_file = os.path.join(pretrained_model_name_or_path, SPECIAL_TOKENS_NAME)
if not os.path.exists(special_tokens_file):
special_tokens_file = None
else:
logger.info("loading special tokens file {}".format(special_tokens_file))
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
vocab_file, merges_file))
return None
if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file:
logger.info("loading vocabulary file {}".format(vocab_file))
logger.info("loading merges file {}".format(merges_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
logger.info("loading merges file {} from cache at {}".format(
merges_file, resolved_merges_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
if special_tokens_file and 'special_tokens' not in kwargs:
special_tokens = open(special_tokens_file, encoding='utf-8').read().split('\n')[:-1]
else:
special_tokens = kwargs.pop('special_tokens', [])
tokenizer = cls(resolved_vocab_file, resolved_merges_file, special_tokens=special_tokens, *inputs, **kwargs)
return tokenizer
def __init__(self, vocab_file, merges_file, special_tokens=None, max_len=None):
try:
import ftfy
import spacy
self.nlp = spacy.load('en_core_web_sm', disable=['parser', 'tagger', 'ner', 'textcat'])
self.fix_text = ftfy.fix_text
except ImportError:
logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.")
self.nlp = BasicTokenizer(do_lower_case=True,
never_split=special_tokens if special_tokens is not None else [])
self.fix_text = None
self.max_len = max_len if max_len is not None else int(1e12)
self.encoder = json.load(open(vocab_file, encoding="utf-8"))
self.decoder = {v:k for k,v in self.encoder.items()}
merges = open(merges_file, encoding='utf-8').read().split('\n')[1:-1]
merges = [tuple(merge.split()) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
self.special_tokens = {}
self.special_tokens_decoder = {}
self.set_special_tokens(special_tokens)
def __len__(self):
return len(self.encoder) + len(self.special_tokens)
def set_special_tokens(self, special_tokens):
""" Add a list of additional tokens to the encoder.
The additional tokens are indexed starting from the last index of the
current vocabulary in the order of the `special_tokens` list.
"""
if not special_tokens:
self.special_tokens = {}
self.special_tokens_decoder = {}
return
self.special_tokens = dict((tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens))
self.special_tokens_decoder = {v:k for k, v in self.special_tokens.items()}
if self.fix_text is None:
# Using BERT's BasicTokenizer: we can update the tokenizer
self.nlp.never_split = special_tokens
logger.info("Special tokens {}".format(self.special_tokens))
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + '</w>',)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if word == '\n </w>':
word = '\n</w>'
self.cache[token] = word
return word
def tokenize(self, text):
""" Tokenize a string. """
split_tokens = []
if self.fix_text is None:
# Using BERT's BasicTokenizer
text = self.nlp.tokenize(text)
for token in text:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
else:
# Using SpaCy & ftfy (original tokenization process of OpenAI GPT)
text = self.nlp(text_standardize(self.fix_text(text)))
for token in text:
split_tokens.extend([t for t in self.bpe(token.text.lower()).split(' ')])
return split_tokens
def convert_tokens_to_ids(self, tokens):
""" Converts a sequence of tokens into ids using the vocab. """
ids = []
if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)):
if tokens in self.special_tokens:
return self.special_tokens[tokens]
else:
return self.encoder.get(tokens, 0)
for token in tokens:
if token in self.special_tokens:
ids.append(self.special_tokens[token])
else:
ids.append(self.encoder.get(token, 0))
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this OpenAI GPT model ({} > {}). Running this"
" sequence through the model will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
"""Converts a sequence of ids in BPE tokens using the vocab."""
tokens = []
for i in ids:
if i in self.special_tokens_decoder:
if not skip_special_tokens:
tokens.append(self.special_tokens_decoder[i])
else:
tokens.append(self.decoder[i])
return tokens
def encode(self, text):
return self.convert_tokens_to_ids(self.tokenize(text))
def decode(self, ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
"""Converts a sequence of ids in a string."""
tokens = self.convert_ids_to_tokens(ids, skip_special_tokens=skip_special_tokens)
out_string = ''.join(tokens).replace('</w>', ' ').strip()
if clean_up_tokenization_spaces:
out_string = out_string.replace('<unk>', '')
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','
).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
return out_string
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary and merge files to a directory."""
if not os.path.isdir(vocab_path):
logger.error("Vocabulary path ({}) should be a directory".format(vocab_path))
return
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
merge_file = os.path.join(vocab_path, MERGES_NAME)
special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME)
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write(u'#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(merge_file))
index = token_index
writer.write(' '.join(bpe_tokens) + u'\n')
index += 1
index = len(self.encoder)
with open(special_tokens_file, 'w', encoding='utf-8') as writer:
for token, token_index in sorted(self.special_tokens.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving special tokens vocabulary to {}: BPE indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(special_tokens_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file, merge_file, special_tokens_file
| 13,922 | 43.340764 | 129 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/modeling_transfo_xl_utilities.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Utilities for PyTorch Transformer XL model.
Directly adapted from https://github.com/kimiyoung/transformer-xl.
"""
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
# CUDA_MINOR = int(torch.version.cuda.split('.')[1])
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
keep_order=False):
super(ProjectedAdaptiveLogSoftmax, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = nn.ModuleList()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_embed))
)
else:
self.out_projs.append(None)
self.out_layers.append(nn.Linear(d_embed, n_token))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_emb_i))
)
self.out_layers.append(nn.Linear(d_emb_i, r_idx-l_idx))
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
proj_hid = F.linear(hidden, proj.t().contiguous())
logit = F.linear(proj_hid, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def forward(self, hidden, target=None, keep_order=False):
'''
Params:
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
Return:
if target is None:
out :: [len*bsz] Negative log likelihood
else:
out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary
We could replace this implementation by the native PyTorch one
if their's had an option to set bias on all clusters in the native one.
here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138
'''
if target is not None:
target = target.view(-1)
if hidden.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
if target is not None:
out = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
out = F.log_softmax(logit, dim=-1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
if target is None:
out = hidden.new_empty((head_logit.size(0), self.n_token))
else:
out = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
if target is not None:
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
hidden_i = hidden.index_select(0, indices_i)
else:
hidden_i = hidden
if i == 0:
if target is not None:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
if target is not None:
logprob_i = head_logprob_i[:, cluster_prob_idx] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
out[:, l_idx:r_idx] = logprob_i
if target is not None:
if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
out.index_copy_(0, indices_i, -logprob_i)
else:
out[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def log_prob(self, hidden):
r""" Computes log probabilities for all :math:`n\_classes`
From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py
Args:
hidden (Tensor): a minibatch of examples
Returns:
log-probabilities of for each class :math:`c`
in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a
parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.
Shape:
- Input: :math:`(N, in\_features)`
- Output: :math:`(N, n\_classes)`
"""
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
return F.log_softmax(logit, dim=-1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
out = hidden.new_empty((head_logit.size(0), self.n_token))
head_logprob = F.log_softmax(head_logit, dim=1)
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob[:, -i] + tail_logprob_i
out[:, start_idx, stop_idx] = logprob_i
return out
class LogUniformSampler(object):
def __init__(self, range_max, n_sample):
"""
Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
`P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
expected count can be approximated by 1 - (1 - p)^n
and we use a numerically stable version -expm1(num_tries * log1p(-p))
Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run
"""
with torch.no_grad():
self.range_max = range_max
log_indices = torch.arange(1., range_max+2., 1.).log_()
self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# print('P', self.dist.numpy().tolist()[-30:])
self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()
self.n_sample = n_sample
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
"""
# neg_samples = torch.empty(0).long()
n_sample = self.n_sample
n_tries = 2 * n_sample
with torch.no_grad():
neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()
device = labels.device
neg_samples = neg_samples.to(device)
true_log_probs = self.log_q[labels].to(device)
samp_log_probs = self.log_q[neg_samples].to(device)
return true_log_probs, samp_log_probs, neg_samples
def sample_logits(embedding, bias, labels, inputs, sampler):
"""
embedding: an nn.Embedding layer
bias: [n_vocab]
labels: [b1, b2]
inputs: [b1, b2, n_emb]
sampler: you may use a LogUniformSampler
Return
logits: [b1, b2, 1 + n_sample]
"""
true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
n_sample = neg_samples.size(0)
b1, b2 = labels.size(0), labels.size(1)
all_ids = torch.cat([labels.view(-1), neg_samples])
all_w = embedding(all_ids)
true_w = all_w[: -n_sample].view(b1, b2, -1)
sample_w = all_w[- n_sample:].view(n_sample, -1)
all_b = bias[all_ids]
true_b = all_b[: -n_sample].view(b1, b2)
sample_b = all_b[- n_sample:]
hit = (labels[:, :, None] == neg_samples).detach()
true_logits = torch.einsum('ijk,ijk->ij',
[true_w, inputs]) + true_b - true_log_probs
sample_logits = torch.einsum('lk,ijk->ijl',
[sample_w, inputs]) + sample_b - samp_log_probs
sample_logits.masked_fill_(hit, -1e30)
logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
return logits
# class LogUniformSampler(object):
# def __init__(self, range_max, unique=False):
# """
# Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
# `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
# """
# self.range_max = range_max
# log_indices = torch.arange(1., range_max+2., 1.).log_()
# self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# self.unique = unique
# if self.unique:
# self.exclude_mask = torch.ByteTensor(range_max).fill_(0)
# def sample(self, n_sample, labels):
# pos_sample, new_labels = labels.unique(return_inverse=True)
# n_pos_sample = pos_sample.size(0)
# n_neg_sample = n_sample - n_pos_sample
# if self.unique:
# self.exclude_mask.index_fill_(0, pos_sample, 1)
# sample_dist = self.dist.clone().masked_fill_(self.exclude_mask, 0)
# self.exclude_mask.index_fill_(0, pos_sample, 0)
# else:
# sample_dist = self.dist
# neg_sample = torch.multinomial(sample_dist, n_neg_sample)
# sample = torch.cat([pos_sample, neg_sample])
# sample_prob = self.dist[sample]
# return new_labels, sample, sample_prob
if __name__ == '__main__':
S, B = 3, 4
n_vocab = 10000
n_sample = 5
H = 32
labels = torch.LongTensor(S, B).random_(0, n_vocab)
# sampler = LogUniformSampler(n_vocab, unique=False)
# new_labels, sample, sample_prob = sampler.sample(n_sample, labels)
sampler = LogUniformSampler(n_vocab, n_sample)#, unique=True)
# true_probs, samp_probs, neg_samples = sampler.sample(n_sample, labels)
# print('true_probs', true_probs.numpy().tolist())
# print('samp_probs', samp_probs.numpy().tolist())
# print('neg_samples', neg_samples.numpy().tolist())
# print('sum', torch.sum(sampler.dist).item())
# assert torch.all(torch.sort(sample.unique())[0].eq(torch.sort(sample)[0])).item()
embedding = nn.Embedding(n_vocab, H)
bias = torch.zeros(n_vocab)
inputs = torch.Tensor(S, B, H).normal_()
logits, out_labels = sample_logits(embedding, bias, labels, inputs, sampler, n_sample)
print('logits', logits.detach().numpy().tolist())
print('logits shape', logits.size())
print('out_labels', out_labels.detach().numpy().tolist())
print('out_labels shape', out_labels.size())
| 16,108 | 38.972705 | 132 | py |
potapov_interpolation | potapov_interpolation-master/setup.py | from setuptools import setup
setup(name='Potapov_interpolation',
version='0.1',
description='Treating feedback with delays in quantum systems',
url='https://github.com/tabakg/potapov_interpolation/',
author='Gil Tabak',
author_email='[email protected]',
license='GNU',
packages=['Potapov_Code'],
install_requires=[
'matplotlib',
'sympy',
'numpy',
'QNET==1.4.1',
],
dependency_links = [
'[email protected]:mabuchilab/QNET.git#egg=QNET-1.4.1',
],
zip_safe=False)
| 555 | 25.47619 | 67 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/Roots.py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 28 20:15:35 2015
@author: gil
@title: Rootfinder
Find the roots of a function f in the complex plane inside of a rectangular region.
We implement the method in the following paper:
Delves, L. M., and J. N. Lyness. "A numerical method for locating the zeros of
an analytic function." Mathematics of computation 21.100 (1967): 543-560.
Alternative code using a similar method can be found here:
http://cpc.cs.qub.ac.uk/summaries/ADKW_v1_0.html
The main idea is to compute contour integrals of functions of the form
:math:`z^k f'/f` around the contour, for integer values of k. Here :math:`f'` denotes the
derivative of f. The resulting values of the contour integrals are proportional
to :math:`\sum_i z_i^k`, where i is the index of the roots.
Throughout we denote :math:`f_{frac} = f'/f`.
I have also tried several optimizations and strategies for numerical stability.
"""
import numpy as np
from itertools import chain
from scipy import integrate
import math
import cmath as cm
from functions import limit
def Muller(x1,x2,x3,f,tol = 1e-5,N=400,verbose=False):
'''
A method that works well for finding roots locally in the complex plane.
Uses three points for initial guess, x1,x2,x3.
Args:
x1,x2,x3 (complex numbers): initial points for the algorithm.
f (function): complex valued function for which to find roots.
tol (optional[float]): tolerance.
N(optional[int]): maximum number of iterations.
verbose (optional[boolean]): print warnings.
Returns:
estimated root of the function f.
'''
n = 0
x = x3
if x1 == x2:
if verbose:
print "Muller needs x1 and x2 different!!!"
print "x1 = x2 = ", x1
return x3
if x2 == x3:
if verbose:
print "Muller needs x2 and x3 different!!!"
print "x2 = x3 = ", x2
return x3
if x1 == x3:
if verbose:
print "Muller needs x1 and x3 different!!!"
print "x1 = x3 = ", x1
return x3
while n < N and abs(f(x3))>tol:
n+=1
q = (x3 - x2) / (x2 - x1)
A = q * f(x3) - q*(1.+q)*f(x2)+q**2.*f(x1)
B = (2.*q+1.)*f(x3)-(1.+q)**2.*f(x2)+q**2.*f(x1)
C = (1.+q)*f(x3)
D1 = B+cm.sqrt(B**2-4.*A*C)
D2 = B-cm.sqrt(B**2-4.*A*C)
if abs(D1) > abs(D2):
D = D1
elif D1 == D2 == 0:
if abs(f(x3))< tol:
return x3
else:
if verbose:
print "Desired tolerance not reached and Muller denominator diverges.",
"Please try different parameters in Muller for better results."
return x3
else: D = D2
x = x3 - (x3-x2)*2.*C / D
x1 = x2
x2 = x3
x3 = x
#print x
return x
def residues(f_frac,roots):
'''
Finds the resides of :math:`f_{frac} = f'/f` given the location of some roots of f.
The roots of f are the poles of f_frac.
Args:
f_frac (function): a complex.
roots (a list of complex numbers): the roots of f; poles of f_frac.
Returns:
A list of residues of f_frac.
'''
return [limit(lambda z: (z-root)*f_frac(z),root) for root in roots]
def new_f_frac(f_frac,z0,residues,roots,val=None):
'''
Functions that evaluate the f_frac after some roots and their residues are subtracted.
This function does NOT check to see if there is division by zero of if the
values become too large.
We assume here that the poles are of order 1.
Args:
f_frac (function): function for which roots will be subtracted.
z0 (complex number): point where new_f_frac is evaluated.
residues (list of complex numbers): The corresponding residues to subtract.
roots (list of complex numbers): The corresponding roots to subtract.
val (optional[complex number]): We can impose a value f_frac(z0) if we wish.
Returns:
The new value of f_frac(z0) once the chosen poles have been subtracted.
'''
if val == None:
val = f_frac(z0)
for res,root in zip(residues,roots):
val -= res/(z0-root)
return val
def new_f_frac_safe(f_frac,z0,residues,roots,max_ok,val=None,verbose=False):
'''
Functions that evaluate the f_frac after some roots and their residues are subtracted.
The safe version checks for large values and division by zero.
If the value of f_frac(z0) is too large, subtracting the roots of f becomes
numerically unstable. In this case, we approximate the new function f_frac
by using the limit function.
We assume here that the poles are of order 1.
Args:
f_frac (function): function for which roots will be subtracted.
z0 (complex number): point where new_f_frac is evaluated.
residues (list of complex numbers): The corresponding residues to
subtract.
roots (list of complex numbers): The corresponding roots to subtract.
val (optional[complex number]): We can impose a value f_frac(z0) if
we wish.
max_ok (float) Maximum absolute value of f_frac(z0 to use).
verbose (optional[boolean]): print warnings.
Returns:
The new value of f_frac(z0) once the chosen poles have been subtracted.
'''
try:
if val == None:
val = f_frac(z0)
if abs(val) < max_ok:
return new_f_frac(f_frac,z0,residues,roots,val)
else:
return limit(lambda z: new_f_frac(f_frac,z,residues,roots),z0)
except ZeroDivisionError:
if verbose:
print 'division by zero in new_f_frac_safe'
return limit(lambda z: new_f_frac(f_frac,z,residues,roots),z0)
def find_roots(y_smooth,c,num_roots_to_find):
'''
given the values y_smooth, locations c, and the number to go up to,
find the roots using the polynomial trick.
Args:
y_smooth (list of complex numbers): poins along smoothed-out boundary.
'''
p=[0] ##placeholder
for i in xrange(1,num_roots_to_find+1):
p.append(integrate.trapz([el*z**i for el,z in zip(y_smooth,c)],c) )
e = [1.]
for k in xrange(1,num_roots_to_find+1):
s = 0.
for i in xrange(1,k+1):
s += (-1.)**(i-1)*e[k-i]*p[i]
e.append(s / k)
coeff = [e[k]*(-1.)**(2.*num_roots_to_find-k)
for k in xrange(0,num_roots_to_find+1)]
return np.roots(coeff)
def combine(eps=1e-5,*args):
'''
chain together several lists and purge redundancies.
Args:
eps (optional[float]): tolerance for purging elements.
args (lists): several lists.
Returns:
A list of combined elements.
'''
lst = list(chain(*args))
return purge(lst, eps)
def purge(lst,eps=1e-14):
'''
Get rid of redundant elements in a list. There is a precision cutoff eps.
Args:
lst (list): elements.
eps (optional[float]): precision cutoff.
Returns:
A list without redundant elements.
'''
if len(lst) == 0:
return []
for el in lst[:-1]:
if abs(el-lst[-1]) < eps:
return purge(lst[:-1],eps)
return purge(lst[:-1],eps) + [lst[-1]]
def linspace(c1,c2,num=50):
'''
make a linespace method for complex numbers.
Args:
c1,c2 (complex numbers): The two points along which to draw a line.
num (optional [int]): number of points along the line.
Returns:
a list of num points starting at c1 and going to c2.
'''
x1 = c1.real
y1 = c1.imag
x2 = c2.real*(num-1.)/num+x1*(1.)/num
y2 = c2.imag*(num-1.)/num+y1*(1.)/num
return [real+imag*1j for real,imag in zip(np.linspace(x1,x2,num=num),
np.linspace(y1,y2,num=num)) ]
def get_boundary(x_cent,y_cent,width,height,N):
'''
Make a rectangle centered at x_cent,y_cent. Find points along this rectangle.
I use the convention that width/height make up half the dimensions of the rectangle.
Args:
x_cent,y_cent (floats): the coordinates of the center of the rectangle.
width,height (float): The (half) width and height of the rectangle.
N (int): number of points to use along each edge.
Returns:
A list of points along the edge of the rectangle in the complex plane.
'''
c1 = x_cent-width+(y_cent-height)*1j
c2 = x_cent+width+(y_cent-height)*1j
c3 = x_cent+width+(y_cent+height)*1j
c4 = x_cent-width+(y_cent+height)*1j
return linspace(c1,c2,num=N)+\
linspace(c2,c3,num=N)+\
linspace(c3,c4,num=N)+\
linspace(c4,c1,num=N)
def inside_boundary(roots_near_boundary,x_cent,y_cent,width,height):
'''
Takes roots and the specification of a rectangular region
returns the roots in the interior (and ON the boundary) of the region.
Args:
roots_near_boundary (list of complex numbers): roots near the boundary.
x_cent,y_cent (floats): coordinates of the center of the region.
width,height (floats): The (half) width of height of the rectangle.
Returns:
Roots in the interior and on the boundary of the rectangle.
'''
return [root for root in roots_near_boundary if
x_cent - width <= root.real <= x_cent + width and \
y_cent - height <= root.imag <= y_cent + height]
def get_max(y):
'''
return the :math:`IQR + median` to determine a maximum permissible value to use
in the numerically safe function new_f_frac_safe.
'''
q75, q50, q25 = np.percentile(y, [75 , 50, 25])
IQR = q75-q25
return q50+IQR
def find_maxes(y):
'''
Given a list of numbers, find the indices where local maxima happen.
Args:
y(list of floats).
Returns:
list of indices where maxima occur.
'''
maxes = []
for i in xrange(-2,len(y)-2):
if y[i-1] < y[i] > y[i+1]:
maxes.append(i)
return maxes
def count_roots_rect(f,fp,x_cent,y_cent,width,height,N=10,outlier_coeff=100.,
max_steps=5,known_roots=None,verbose=False):
'''
I assume f is analytic with simple (i.e. order one) zeros.
TODO:
save values along edges if iterating to a smaller rectangle
extend to other kinds of functions, e.g. function with non-simple zeros.
Args:
f (function): the function for which the roots (i.e. zeros) will be
found.
fp (function): the derivative of f.
x_cent,y_cent (floats): The center of the rectangle in the complex
plane.
width,height (floats): half the width and height of the rectangular
region.
N (optional[int]): Number of points to sample per edge
outlier_coeff (float): multiplier for coefficient used when subtracting
poles to improve numerical stability. See new_f_frac_safe.
max_step (optional[int]): Number of iterations allowed for algorithm to
repeat on smaller rectangles.
known roots (optional[list of complex numbers]): Roots of f that are
already known.
verbose (optional[boolean]): print warnings.
Returns:
A list of roots for the function f inside the rectangle determined by
the values x_cent,y_cent,width, and height.
'''
c = get_boundary(x_cent,y_cent,width,height,N)
f_frac = lambda z: fp(z)/(2j*np.pi*f(z))
y = [f_frac(z) for z in c]
I0 = integrate.trapz(y, c) ##approx number of roots not subtracted
return I0
def get_roots_rect(f,fp,x_cent,y_cent,width,height,N=10,outlier_coeff=100.,
max_steps=5,known_roots=None,verbose=False):
'''
I assume f is analytic with simple (i.e. order one) zeros.
TODO:
save values along edges if iterating to a smaller rectangle
extend to other kinds of functions, e.g. function with non-simple zeros.
Args:
f (function): the function for which the roots (i.e. zeros) will be
found.
fp (function): the derivative of f.
x_cent,y_cent (floats): The center of the rectangle in the complex
plane.
width,height (floats): half the width and height of the rectangular
region.
N (optional[int]): Number of points to sample per edge
outlier_coeff (float): multiplier for coefficient used when subtracting
poles to improve numerical stability. See new_f_frac_safe.
max_step (optional[int]): Number of iterations allowed for algorithm to
repeat on smaller rectangles.
known roots (optional[list of complex numbers]): Roots of f that are
already known.
verbose (optional[boolean]): print warnings.
Returns:
A list of roots for the function f inside the rectangle determined by
the values x_cent,y_cent,width, and height.
'''
c = get_boundary(x_cent,y_cent,width,height,N)
f_frac = lambda z: fp(z)/(2j*np.pi*f(z))
y = [f_frac(z) for z in c]
outliers = find_maxes(map(abs,y))
roots_near_boundary = []
for outlier_index in outliers:
try:
r = Muller(c[outlier_index-2], c[outlier_index+2],
(c[outlier_index])/2, f, verbose)
roots_near_boundary.append(r)
except:
pass
if known_roots is None:
known_roots=[]
subtracted_roots = purge(roots_near_boundary+known_roots)
## we don't need the roots far outside the boundary
subtracted_roots = inside_boundary(subtracted_roots,
x_cent,y_cent,width+2.,height+2.)
max_ok = abs(outlier_coeff*get_max(y))
subtracted_residues = residues(f_frac,subtracted_roots)
y_smooth = [new_f_frac_safe(f_frac,z_el,subtracted_residues,
subtracted_roots,max_ok,y_el,verbose)
for y_el,z_el in zip(y,c)]
I0 = integrate.trapz(y_smooth, c) ##approx number of roots not subtracted
print (I0)
## If there's only a few roots, find them.
if I0 < 10:
num_roots_interior = int(round(abs(I0)))
if num_roots_interior == 0:
return inside_boundary(subtracted_roots,x_cent,y_cent,width,height)
if verbose:
if abs(num_roots_interior-I0)>0.005:
print "Warning!! Number of roots may be imprecise for this N."
print "Increase N for greater precision."
print "Approx number of roots in current rect = ", abs(I0)
rough_roots = find_roots(y_smooth,c,num_roots_interior)
Muller_all = np.vectorize(Muller)
##TODO: best way to pick points for Muller method below
##TODO: catch error in case Muller diverges (unlikely for these points)
interior_roots = purge(Muller_all(rough_roots-1e-5,rough_roots+1e-5,
rough_roots,f,verbose).tolist())
combined_roots = purge(roots_near_boundary + interior_roots)
else:
combined_roots = purge(roots_near_boundary)
## if some interior roots are missed or if there were many roots,
## subdivide the rectangle and search recursively.
if I0>=10 or len(combined_roots) < num_roots_interior and max_steps != 0:
x_list = [x_cent - width / 2.,x_cent - width / 2.,
x_cent + width / 2.,x_cent + width / 2.]
y_list = [y_cent - height / 2.,y_cent + height / 2.,
y_cent - height / 2.,y_cent + height / 2.]
for x,y in zip(x_list,y_list):
roots_from_subrectangle = get_roots_rect(f,fp,x,y,
width/2.,height/2.,N,outlier_coeff,
max_steps=max_steps-1,known_roots=combined_roots)
combined_roots = purge(combined_roots + roots_from_subrectangle)
elif max_steps == 0:
if verbose:
print "max_steps exceeded. Some interior roots might be missing."
return inside_boundary(combined_roots,x_cent,y_cent,width,height)
| 16,077 | 31.220441 | 91 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/phase_matching.py | import sympy as sp
import numpy as np
import scipy.constants
from sympy.utilities.autowrap import ufuncify
from scipy import interpolate
import scipy.constants as consts
import matplotlib.pyplot as plt
from functions import timeit
from functions import make_dict_values_to_lists_of_inputs
import itertools
@timeit
def setup_ranges(max_i,base,min_value = 6.,max_value = 11.):
ranges= {}
for i in range(max_i+1):
ranges[i] = np.linspace(min_value,max_value,1+pow(base,i+1))
return ranges
@timeit
def initial_voxels(ranges,k_of_nu1_nu2,max_i,base,starting_i,eps):
solution_containing_voxels = {}
eps_current = eps * pow(base,max_i-starting_i)
solution_containing_voxels[starting_i] = {}
for i1,om1 in enumerate(ranges[starting_i]):
for i2,om2 in enumerate(ranges[starting_i]):
err = k_of_nu1_nu2(om1,om2)
if abs(err) < eps_current:
solution_containing_voxels[starting_i][i1,i2] = err
return solution_containing_voxels
@timeit
def add_high_res_voxels(ranges,k_of_nu1_nu2,max_i,base,starting_i,eps,solution_containing_voxels):
for i in range(starting_i+1,max_i+1):
eps_current = eps * pow(base,max_i-i)
solution_containing_voxels[i] = {}
for (i1,i2) in solution_containing_voxels[i-1]:
step_size = int(base/2)
max_length = pow(base,i+1)
for i1_new in range(max(0,i1*base-step_size),min(max_length,i1*base+step_size+1)):
for i2_new in range(max(0,i2*base-step_size),min(max_length,i2*base+step_size+1)):
err = k_of_nu1_nu2(ranges[i][i1_new],ranges[i][i2_new])
if abs(err) < eps_current:
solution_containing_voxels[i][i1_new,i2_new] = err
@timeit
def plot_voxels(solution_containing_voxels,i):
voxels = np.zeros((1+pow(base,i+1),1+pow(base,i+1)))
for (i1,i2) in solution_containing_voxels[i]:
voxels[i1,i2] = 1
plot_arr(voxels)
def voxel_solutions(ranges,k_of_nu1_nu2,max_i,base,starting_i,eps):
solution_containing_voxels = initial_voxels(ranges,k_of_nu1_nu2,max_i,
base,starting_i,eps)
add_high_res_voxels(ranges,k_of_nu1_nu2,max_i,base,starting_i,eps,
solution_containing_voxels)
return solution_containing_voxels
######### TODO: put the above methods in their own file.
def generate_k_func(pols=(1,1,-1),n_symb = None):
lambd,nu,nu1,nu2,nu3,nu4 = sp.symbols(
'lambda nu nu_1 nu_2 nu_3 nu_4')
l2 = lambd **2
if n_symb is None:
def n_symb(pol=1):
'''Valid for lambda between 0.5 and 5. (units are microns)'''
s = 1.
if pol == 1:
s += 2.6734 * l2 / (l2 - 0.01764)
s += 1.2290 * l2 / (l2 - 0.05914)
s += 12.614 * l2 / (l2 - 474.6)
else:
s += 2.9804 * l2 / (l2 - 0.02047)
s += 0.5981 * l2 / (l2 - 0.0666)
s += 8.9543 * l2 / (l2 - 416.08)
return sp.sqrt(s)
def k_symb(symbol=nu,pol=1):
'''k is accurate for nu inputs between 6-60 (units are 1e13 Hz).'''
return ((n_symb(pol=pol) * symbol )
.subs(lambd,scipy.constants.c / (symbol*1e7)))
expressions = [k_symb(nu1,pols[0]),
k_symb(nu2,pols[1]),
k_symb(nu3,pols[2])]
dispersion_difference_function = sum(expressions)
dispersion_difference_function = dispersion_difference_function.subs(
nu3,-nu1-nu2)
k_of_nu1_nu2 = ufuncify([nu1,nu2],
dispersion_difference_function)
return k_of_nu1_nu2
def make_positive_keys_chi2(pos_nus_lst,chi,eps=1e-4,
starting_i = 0,max_i = 2,base = 10,pols = None):
'''
takes a Hamiltonian and a Chi_nonlin with chi_order = 2 and generates
pairs of positive numbers (nu1,nu2) corresponding to solutions
(nu1,nu2,-nu1-nu2) of the phase matching problem. Here,
the nu1,nu2 are selected from ham.omegas (assumed to be positive).
TODO: In the future, k_of_nu1_nu2 will be generated from
chi_nonlin.refraction_index_func in the form of a function
n_symb(pol=+1) -> sympy expression in lambd.
TODO:
if the error isn't being used, can use a set instead of a dict
for solution_containing_voxels.
'''
if pols == None: ##some default values for polarization
pols = (1,1,-1)
### In the future, k_of_nu1_nu2 will be generated from chi_nonlin.refraction_index_func
k_of_nu1_nu2 = generate_k_func(pols)
## get the positive nus. Make a dict to the original index.
min_value = min(pos_nus_lst)
max_value = max(pos_nus_lst)
ranges = setup_ranges(max_i,base,min_value = min_value,max_value = max_value)
Delta = ranges[max_i][1] - ranges[max_i][0] ## spacing in grid used
## get index values
values = [ int(round( (freq - min_value) / Delta)) for freq in pos_nus_lst]
## make a dict to remember which frequencies belong in which grid voxel.
grid_indices_to_unrounded = make_dict_values_to_lists_of_inputs(values,pos_nus_lst)
grid_indices_to_ham_index = make_dict_values_to_lists_of_inputs(values,range(len(pos_nus_lst)))
solution_containing_voxels = voxel_solutions(ranges,k_of_nu1_nu2,
max_i,base,starting_i,eps)
## Let's figure out which indices we can expect for nu3
spacing = (max_value-min_value)/ pow(base,max_i+1)
num_indices_from_zero = min_value / spacing ## float, round up or down
solutions_nu1_and_nu2 = solution_containing_voxels[max_i].keys()
solution_indices = []
for indices in solutions_nu1_and_nu2:
for how_to_round_last_index in range(2):
last_index = (sum(indices)
+ int(num_indices_from_zero)
+ how_to_round_last_index)
if last_index < 0 or last_index >= len(ranges[max_i]):
print "breaking!"
break
current_grid_indices = (indices[0],indices[1],last_index)
if all([ind in grid_indices_to_ham_index for ind in current_grid_indices]):
for it in itertools.product(*[grid_indices_to_ham_index[ind] for ind in current_grid_indices]):
solution_indices.append(it)
return solution_indices
if __name__ == "__main__":
pols = (1,1,-1)
k_of_nu1_nu2 = generate_k_func(pols)
eps = 0.006
starting_i = 0
max_i = 2
base = 10
min_value = 6.
max_value = 20.
ranges = setup_ranges(max_i,base,min_value,max_value)
solution_containing_voxels = voxel_solutions(ranges,k_of_nu1_nu2,
max_i,base,starting_i,eps)
| 6,774 | 37.714286 | 111 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/Time_Sims_nonlin.py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 31 2015
@author: gil
@title: Time_Sims_Nonlin.py
"""
import Roots
import Potapov
import Time_Delay_Network
import functions
import Hamiltonian
import numpy as np
import numpy.linalg as la
import sympy as sp
import itertools
import matplotlib.pyplot as plt
from scipy.integrate import ode
from scipy.integrate import quad
def make_f(eq_mot,B,a_in):
r'''Equations of motion, including possibly nonlinear internal dynamics.
Args:
eq_mot (function): The equations of motion, which map
:math:`(t,a) \to v`. Here \math:`t` is a scalar corresponding to time,
:math:`a` is an array of inputs correpsonding to the internal degrees
of freedom, and :math:`v` is a complex-valued column matrix describing
the gradient.
B (matrix): The matrix multiplying the inputs to the system.
a_in (function): The inputs to the system.
Returns:
(function):
A function that maps :math:`(t,a) \to f'(t,a)`, where t is a scalar
(time), and a is an array representing the state of the system.
'''
return lambda t,a: np.asarray(eq_mot(t,a)+B*a_in(t)).T[0]
def make_f_lin(A,B,a_in):
r'''Linear equations of motion
Args:
A (matrix): The matrix for the linear equations of motion:
:math:`\frac{d}{dt}\begin{pmatrix} a \\ a^+ \end{pmatrix} = A \begin{pmatrix} a \\ a^+ \end{pmatrix}+ B \breve a_{in} (t).`
B (matrix): The matrix multiplying the inputs to the system.
a_in (function): The inputs to the system :math:`\breve a`.
Returns:
(function);
A function that maps :math:`(t,a) \to f'(t,a)`, where t is a scalar
(time), and a is an array representing the state of the system.
'''
return lambda t,a: np.asarray(A*np.asmatrix(a).T+B*a_in(t)).T[0]
def run_ODE(f, a_in, C, D, num_of_variables, T = 10, dt = 0.01, y0 = None):
'''Run the ODE for the given set of equations and record the outputs.
Args:
f (function): Evolution of the system.
a_in (function): inputs as a function of time.
C,D (matrices): matrices to use to obtain output from system state and
input.
num_of_variables (int): number of variables the system has.
T (optional[positive float]): length of simulation.
dt (optional[float]): time step used by the simulation.
Returns:
(array):
An array Y of outputs.
'''
if y0 is None:
y0 = np.asmatrix([0.]*num_of_variables).T
r = ode(f).set_integrator('zvode', method='bdf')
r.set_initial_value(y0, 0.)
Y = []
while r.successful() and r.t < T:
Y.append(C*r.y+D*a_in(r.t))
r.integrate(r.t+dt)
return Y
if __name__ == "__main__" and False:
Ex = Time_Delay_Network.Example3(r1 = 0.9, r3 = 0.9, max_linewidth=15.,max_freq=25.)
Ex.run_Potapov()
modes = functions.spatial_modes(Ex.roots,Ex.M1,Ex.E)
M = len(Ex.roots)
A,B,C,D = Ex.get_Potapov_ABCD(doubled=False)
A_d,B_d,C_d,D_d = Ex.get_Potapov_ABCD(doubled=True)
ham = Hamiltonian.Hamiltonian(Ex.roots,modes,Ex.delays,Omega=-1j*A)
ham.make_chi_nonlinearity(delay_indices=0,start_nonlin=0,
length_nonlin=0.1,indices_of_refraction=1.,
chi_order=3,chi_function=None,)
ham.make_H()
eq_mot = ham.make_eq_motion()
a_in = lambda t: np.asmatrix([1.]*np.shape(D_d)[-1]).T ## make a sample input function
## find f for the linear and nonlinear systems
f = Time_Sims_nonlin.make_f(eq_mot,B_d,a_in)
f_lin = Time_Sims_nonlin.make_f_lin(A_d,B_d,a_in)
Y_lin = Time_Sims_nonlin.run_ODE(f_lin, a_in, C_d, D_d, 2*M, T = 15, dt = 0.01) ## select f here.
Y_nonlin = Time_Sims_nonlin.run_ODE(f, a_in, C_d, D_d, 2*M, T = 15, dt = 0.01) ## select f here.
for i in range(2):
plt.plot([abs(y)[i][0,0] for y in Y ])
plt.savefig('sample_graph'+str(i)+'.pdf',format='pdf')
| 4,036 | 29.816794 | 135 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/functions.py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 13:59:30 2015
@author: Gil Tabak
@title: Potapov
Functions used by other files.
"""
import numpy as np
import numpy.linalg as la
import scipy.constants as consts
from fractions import gcd
import time
import math
def make_dict_values_to_lists_of_inputs(values,inputs):
'''
Make a dictionary mapping value to lists of corresponding inputs.
Args:
values (list of floats):
Values in a list, corresponding to the inputs.
inputs (list of floats):
Inputs in a list.
Returns:
D (dict):
dictionary mapping value to lists of corresponding inputs.
'''
D = {}
for k, v in zip(values,inputs):
if not math.isnan(k):
D.setdefault(k, []).append(v)
return D
def timeit(method):
'''
from https://www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods
'''
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print '%r %2.2f sec' % \
(method.__name__, te-ts)
return result
return timed
def gcd_lst(lst):
l = len(lst)
if l == 0:
return None
elif l == 1:
return lst[0]
elif l == 2:
return gcd(lst[0],lst[1])
else:
return gcd(lst[0],gcd_lst(lst[1:]))
def der(f,z,eps = 1e-5):
'''
Estimate the derivative of the function f at z
Args:
f (function): the function to use.
z (complex number): point at which to evaluate derivative.
eps(optional[complex number]): number to perturb z to find derivative.
Returns:
Derivative of f. (complex):
'''
return (f(z+eps)-f(z-eps))/(2*eps)
def limit(f,z0,N=10,eps=1e-3):
'''
Takes possibly matrix-valued function f and its simple pole z0 and returns
limit_{z \to val} f(z). Estimates the value based on N surrounding
points at a distance eps.
Args:
f (function): the function for which the limit will be found.
z0 (complex number): The value at which the limit is evaluated.
N (int): number of points used in the estimate.
eps (optional[float]):
distance from z0 at which estimating points are placed.
Returns:
Limit value (complex):
The estimated value of :math:`limit_{z -> z_0} f(z)`.
'''
t=np.linspace(0.,2.*np.pi*(N-1.)/N,num=N)
c=np.exp(1j*t)*eps
try:
s=sum(f(z0 + c_el) for c_el in c)/float(N)
return s
except:
print "Something went wrong in estimating the limit."
return
def factorial(n):
'''Find the factorial of n.
Args:
n (integer).
Returns:
factorial of n. (int):
'''
end = 1
for k in xrange(1,n+1):
end *= k
return end
def pade_approx(n):
'''Numerator coefficients of symmetric Pade approximation of math:`e^z` of order n.
Args:
n (integer).
Returns:
Coefficients for Pade approximation numerator. (float):
'''
output = [0]*(n+1)
for k in xrange(0,n+1):
output[n-k] = float(factorial(2*n-k)) * factorial(n) / \
float((factorial(2*n) )* factorial(k) * factorial(n - k) )
return output
def pade_roots(n):
'''Extract roots of Pade polynomial.
Args:
n (integer).
Returns:
Roots of Pade polynomial. (list of complex numbers) :
'''
return np.roots(pade_approx(n))
def Q(z,n):
r'''Numerator of Pade approximation of :math:`e^z`.
Args:
n (integer): order of approximation.
z (complex number): point of evaluation.
Returns:
Value of Numerator of Pade approximation. (float):
'''
coeffs = pade_approx(n)
sum = 0
for i in xrange(0,n+1):
sum += coeffs[i]*pow(z,n-i)
return sum
def Pade(n,z):
r'''Pade pproximation of :math:`e^z`
Args:
n (integer): order of approximation
z (complex number): point of evaluation.
Returns:
Value of Pade approximation. (float):
'''
return Q(z,n)/Q(-z,n)
def double_up(M1,M2=None):
r'''
Takes a given matrix M1 and an optional matrix M2 and generates a
doubled-up matrix to use for simulations when the doubled-up notation
is needed. i.e.
.. math::
\begin{pmatrix}
M_1 && M_2
\end{pmatrix}
\to
\begin{pmatrix}
M_1 && M_2 \\
M_2^\# && M_1^\#
\end{pmatrix}
In the case M2 == None, it becomes replaced by the zero matrix.
Args:
M1 (matrix): matrix to double-up
M2 (matrix): optional second matrix to double-up
Returns:
(complex-valued matrix):
The doubled-up matrix.
'''
if M2 == None:
M2 = np.zeros_like(M1)
top = np.hstack([M1,M2])
bottom = np.hstack([np.conj(M2),np.conj(M1)])
return np.vstack([top,bottom])
def spatial_modes(roots,M1,E,delays=None):
'''
Obtain the spetial mode profile at each node up to a constant.
If the delays are provided, the modes will be normalized using the delays.
Otherwise, the modes will not be normalized.
Args:
roots (list of complex numbers): The eigenvalues of the system.
M1 (matrix): The connectivity matrix among internal nodes.
E (matrix-valued function): Time-delay matrix.
delays (optional[list of floats]): List of delays in the network.
Returns:
A list of spatial eigenvectors. (list of complex-valued column matrices):
'''
spatial_vecs = []
for i in xrange(len(roots)):
evals,evecs = la.eig(M1*E(roots[i]))
spatial_vecs.append(evecs[:,np.argmin(abs(1.-evals))])
if delays == None:
return spatial_vecs
if type(delays) != list:
raise Exception('delays must be a list of delays.')
for mode in spatial_vecs:
mode /= _norm_of_mode(mode,delays)
return spatial_vecs
def inner_product_of_two_modes(root1,root2,v1,v2,delays,eps=1e-7,
func=lambda z : z.imag):
'''
This function integrates two spatial modes against each other
along the various delays of the system. Each delay follows a
node in the system.
The frequency is assumed to be the imaginary part of each root.
Args:
root1,root2 (complex number): the two roots.
v1,v2 (column matrices):
the amplitude of each mode at the
various nodes.
delays (list of floats):
The duration of each delay following
each node in the system.
eps(optional[float]):
cutoff for two frequencies being equal
func (optional[funciton]):
used to transform the roots. Default
value is set to lambda z: z.imag, meaning we take the frequency
of each mode.
Returns:
The inner product of the two modes. (complex):
Sanity check: if root1==root2 and v1==v2, returns real value.
'''
s = 0j
for delay,e1,e2 in zip(delays,v1,v2):
if abs(func(root1-root2)) < eps:
s+=e1*np.conj(e2)*delay
else:
s += (e1*e2.H*1j*(np.exp(-1j*delay*func(root1-root2)) - 1. )
/func(root1-root2) )
return s[0,0]
def _norm_of_mode(mode,delays):
'''
Find the norm of the given mode
Args:
mode (vector):
column of complex numbers describing the amplitude of
each mode at the various nodes.
delays (list of floats):
time delays in the network.
Returns:
norm (float):
the norm of the mode.
'''
return np.sqrt(inner_product_of_two_modes(0,0,mode,mode,delays))
def make_normalized_inner_product_matrix(roots,modes,delays,eps=1e-12,
func=lambda z : z.imag):
'''
Given a list of roots and a list of vectors representing the
electric field at each node of the corresponding nodes, compute
the normalized matrix representing the inner products among the
various modes.
TODO: add weights for different delays to account for geometry.
Args:
roots (list of complex numbers):
The roots of the various eigenmodes.
modes (list of column matrices):
the amplitudes of the modes at
various nodes.
delays (list of floats):
The duration of each delay following
each node in the system.
eps(optional[float]):
cutoff for two frequencies being equal.
func (optional[funciton]):
used to transform the roots. Default
value is set to lambda z: z.imag, meaning we take the frequency
of each mode.
Returns:
inner product matrix (complex-valued matrix):
A matrix of normalized inner products representing the geometric
overlap of the various given modes in the system.
'''
dim = len(roots)
norms = [0]*dim
for i,(root,v) in enumerate(zip(roots,modes)):
norms[i] = inner_product_of_two_modes(root,root,v,v,delays,eps=eps,
func=func)
inner_prods = np.zeros((dim,dim),dtype='complex_')
for i in range(dim):
for j in range(dim):
inner_prods[i,j] = ((inner_product_of_two_modes(roots[i],roots[j],
modes[i],modes[j],delays)) /
np.sqrt(norms[i]*norms[j]) )
return inner_prods
def make_nonlinear_interaction(natural_freqs, modes, delays, delay_indices,
start_nonlin,length_nonlin,plus_or_minus_arr,
indices_of_refraction = None,
eps=1e-5):
'''
This function takes several (say M) natural_freqs and their corresponding modes,
as well as the (N) delay lengths of the network, and determines the term
we need to add to the Hamiltonian corresponding to the resulting
nonlinearity. We assume there is a crystal going from start_nonlin to
and has length length_nonlin. The plus_or_minus_arr is an array of
length m of 1 or -1 used to determined whether a mode corresponds to a
creation (1, a^\dag) or annihilation (-1,a) operator. The corresponding
electric field integrated will be E^\dag for 1 and E for -1.
The k-vectors are computed from the following formula:
k = omega / v_p = omega n(omega) / c.
If the indices of refraction n(omega_i) are given, we use them to compute
the phase-mismatch delta_k. Otherwise we assume they are all equal to 1.
Args:
natural_freqs (list of floats):
The natural frequencies of the
various eigenmodes (i.e. the imaginary component of each root).
modes (list of column matrices):
the amplitudes of the modes at
various nodes.
delays (list of floats):
The duration of each delay following
each node in the system.
delay_indices (int OR list/tuple of ints):
the index representing the
delay line along which the nonlinearity lies. If given a list/tuple
then the nonlinearity interacts the N different modes.
start_nonlin (float OR list/tuple of floats):
the beginning of the
nonlinearity. If a list/tuple then each nonlinearity begins at a
different time along its corresponding delay line.
length_nonlin (float):
duration of the nonlinearity in terms of length.
plus_or_minus_arr (array of 1s and -1s):
Creation/annihilation of
a photon in each of the given modes.
indices_of_refraction (float/int or list/tuple of float/int):
the
indices of refraction corresponding to the various modes. If float
or int then all are the same.
eps(optional[float]):
cutoff for two frequencies being equal.
Returns:
nonlinear interaction (complex):
strength of nonlinearity.
'''
M = len(natural_freqs)
if len(modes) != M:
raise Exception('number of modes different than number of natural_freqs.')
if type(delay_indices) == int:
delay_indices = [delay_indices] * M
elif not type(delay_indices) in [list,tuple]:
raise Exception('delay_indices must be an int or a list/tuple')
if type(start_nonlin) in [int,float]:
start_nonlin = [start_nonlin] * M
elif not type(start_nonlin) in [list,tuple]:
raise Exception('start_nonlin must be an int/float or a list/tuple')
if length_nonlin < 0:
raise Exception('length_nonlin must be greater than 0.')
for delay_index,start_loc in zip(delay_indices,start_nonlin):
if start_loc < 0:
raise Exception('each element of start_nonlin must be greater than 0.')
# Below is the condition we would need to check when
# the index of refraction is 1. In the case the index of refraction
# is different, length_nonlin is multiplied by the refractiive index.
# However, the duration of the delay lengthens by the same amount so the
# condition remains unchanged.
if length_nonlin / consts.c + start_loc > delays[delay_index]:
raise Exception('length_nonlin + start_loc must be less than the '
+'delay of index delay_index for start_loc in '
+'start_nonlin and delay_index in delay_indices.')
if indices_of_refraction is None:
indices_of_refraction = [1.] * M
elif type(indices_of_refraction) in [float,int]:
indices_of_refraction = [float(indices_of_refraction)] * M
elif not type(indices_of_refraction) in [list,tuple]:
raise Exception('indices_of_refraction is not a float, integer, list, '
+'tuple, or None.')
def pick_conj(m,sign):
if sign == 1:
return m
elif sign == -1:
return np.conj(m)
else:
raise Exception('bad input value -- must be 1 or -1.')
values_at_nodes = [m_vec[delay_index,0] for m_vec,delay_index
in zip(modes,delay_indices)]
delta_k = ( sum([n*omega*sign for n,omega,sign
in zip(indices_of_refraction,natural_freqs,plus_or_minus_arr)]))
const = np.prod([pick_conj(m*np.exp(-1j*delta_k*start_loc),sign)
for m,sign,start_loc
in zip(values_at_nodes,plus_or_minus_arr,start_nonlin)])
if abs(delta_k) < eps: ## delta_k \approx 0
# print "delta_k is less than epsilon, approximating linearly."
return const * length_nonlin
else:
return 1j*const*(np.exp(-1j*delta_k*length_nonlin) - 1 ) / delta_k
| 14,913 | 30.006237 | 109 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/Time_Delay_Network.py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 17:37:37 2015
@author: gil
@title: examples
"""
import Roots
import Potapov
import numpy as np
import numpy.linalg as la
from numpy import pi
from colorsys import hls_to_rgb
import sympy as sp
import matplotlib.pyplot as plt
#import mpmath as mp ## for complex-valued plots
from functions import double_up
from functions import der
from functions import Pade
from functions import spatial_modes
from functions import gcd_lst
import matplotlib.patches as patches
from sympy.utilities.autowrap import ufuncify
from decimal import Decimal
def plot_all(L,dx,labels,colors,lw,name,*args):
'''
A method to plot the absolute value and phase of each component for a list
of matrix-valued functions in the complex plane along an axis.
Args:
L (float): plot from 0 to L.
dx (float): distance between points.
labels (list of str): labels to use.
colors (list of srt): indicators of color for different curves.
lw (float): line width to use.
name (str): name of the file to save.
* args (a list of functions): A list of functions to plot.
Returns:
None.
'''
delta = np.pi
x = np.linspace(0,L,2.*L/dx)
[rows,cols] = args[0](0).shape
plt.figure(figsize=(18,12))
for k,func in enumerate([np.abs,np.angle]):
for i in xrange(rows):
for j in xrange(cols):
plt.subplot(2*rows,cols,1+j+(2*i+k)*cols)
plt.tight_layout()
s = "norm of component " \
if k == 0 else "phase of component "
s += "["+str(i)+","+str(j)+"]"
plt.tick_params(labelsize=20)
plt.title(s, fontsize=24)
plt.xlabel('frequency (rad)', fontsize=20)
for l,arg in enumerate(args):
y = np.asarray([func(arg(x_el*1j)[i,j]) for x_el in x ])
jumps = np.r_[0, np.where(np.abs(np.diff(y)) > delta)[0] + 1, y.size]
for m in range(jumps.size-1):
start, end = jumps[m:m + 2]
if m == 0:
plt.plot(x[start:end], y[start:end], colors[l],
label = labels[l], lw = lw)
else:
plt.plot(x[start:end], y[start:end], colors[l], lw = lw)
if k == 0: ## plotting abs
plt.axis([0,L,0,1])
else: ## ploting angle
plt.axis([0,L,-np.pi,np.pi])
art = []
lgd = plt.legend( loc=9, bbox_to_anchor=(0.5, -0.1),shadow=True, fancybox=True, fontsize=18)
art.append(lgd)
plt.savefig(name,additional_artists=art,
bbox_inches="tight")
return
class Time_Delay_Network():
'''
A class to contain the information of a passive linear network with time
delays.
Attributes:
max_freq (optional [float]): maximum height in the complex plane.
max_linewidth (optional [float]): maximum width in the complex plane.
N (optional [int]): number of points to use on the contour for finding
the roots/poles of the network.
center_freq (optional [float] ): how much to move the frame up or down
the complex plane.
'''
def __init__(self,max_freq=30.,max_linewidth=1.,N=1000,center_freq = 0.):
self.max_freq = max_freq
self.max_linewidth = max_linewidth
self.N = N
self.Potapov_ran = False
self.center_freq = center_freq
return
def _make_decimal_delays(self,):
self.Decimal_delays = map(lambda x: Decimal(str(x)),self.delays)
self.Decimal_gcd = self._find_commensurate(self.Decimal_delays)
def make_roots(self):
'''Generate the roots given the denominator of the transfer function.
'''
ret, self.roots = Roots.get_roots_rect(self.T_denom,self.Tp_denom,
-self.max_linewidth/2.,self.center_freq,
self.max_linewidth/2.,self.max_freq,N=self.N)
return
def _find_commensurate(self,delays):
'''
Find the 'gcd' but for Decimal numbers.
Args:
delays(list of Demicals): numbers whose gcd will be found.
Returns:
Decimal gcd.
'''
mult = min([d.as_tuple().exponent for d in delays])
power = 10**-mult
delays = map(lambda x: x*power,delays)
int_gcd = gcd_lst(delays)
return int_gcd/power
def _make_T_denom_sym(self,):
r'''
A method to prepare the symbolic expression T_denom_sym for further
computations. This expression represents the denominator in terms of
a symbol x, which represents the shortest time delay in the network.
'''
self._make_decimal_delays()
self.x = sp.symbols('x')
E_sym = sp.Matrix(np.zeros_like(self.M1))
for i,delay in enumerate(self.Decimal_delays):
E_sym[i,i] = self.x**int(delay / self.Decimal_gcd)
M1_sym = sp.Matrix(self.M1)
self.T_denom_sym = sp.apart((E_sym - M1_sym).det())
## I use apart above because sympy yields a function that is not
## completely reduced. Alternatively, can use *.as_numer_denom()
## and take the first component for the numerator. However, this could
## results in spurious roots if the denominator is nontrivial.
return
# def get_symbolic_frequency_perturbation_T_and_z(self,simplify = False):
# r'''
# A method to prepare the symbolic expression T_denom_sym for further
# computations. This expression represents the denominator in terms of
# the various delays :math:`T_1,...,T_k` and the complex variable
# :math:`z`.
#
# This method treats the various delays as separate variables.
# To get the expression we Taylor expand about both :math:`\Delta_z`
# as well as :math:`\Delta T_j`.
#
# Args:
# simplify (optiona[boolean]): simplify the output sympy expression.
# '''
# M = len(self.delays)
# self._make_decimal_delays()
# try:
# self.z,self.z_Delta,self.Ts,self.Ts_Delta
# except:
# self.z, self.z_Delta = sp.symbols('z dz')
# self.Ts = [sp.symbols('T_'+str(i)) for i in range(M)]
# self.Ts_Delta = [sp.symbols('dT_'+str(i)) for i in range(M)]
#
# xs = [sp.symbols('x_'+str(i)) for i in range(4) ]
# E_sym = sp.Matrix(np.zeros_like(self.M1))
# for i,delay in enumerate(self.Decimal_delays):
# E_sym[i,i] = xs[i]
# M1_sym = sp.Matrix(self.M1)
# num, den = (E_sym - M1_sym).det().as_numer_denom()
# D = {x: sp.exp(-self.z*T) for x,T in zip(xs,self.Ts)}
# exp_periodic = num.subs(D)
# T_expression = sum([exp_periodic.diff(T)*T_d
# for T,T_d in zip(self.Ts,self.Ts_Delta)])
#
# ## Next solve for the first-order perturbation.
# ## The commented-out line might be slow -- the code below does the same.
# #sol = sp.solve(T_expression + exp_periodic.diff(z)*z_Delta, z_Delta)[0]
#
# diff_z = exp_periodic.diff(self.z)
# T_temps = [sp.symbols('T_temp_'+str(i)) for i in range(M)]
# D_tmp = {self.z*T:T_t for T,T_t in zip(self.Ts,T_temps)}
# D_inv = {T_t:self.z*T for T,T_t in zip(self.Ts,T_temps)}
# D = {T:dT for T,dT in zip(self.Ts,self.Ts_Delta)}
# diff_z2 = diff_z.subs(D_tmp)
# diff_z3 = diff_z2.subs(D)
# diff_z4 = diff_z3.subs(D_inv)
# sol = -self.z*diff_z4 / diff_z
#
# return sp.simplify(sol) if simplify else sol
def get_symbolic_frequency_perturbation_z(self,):
r'''
A method to prepare the symbolic expression T_denom_sym for further
computations. This expression represents the denominator in terms of
the various delays :math:`T_1,...,T_k` and the complex variable
:math:`z`.
The inputs should be :math:`z,(T_1+\Delta T_1,...,T_k+\Delta T_k)`
This method treats the various delays as separate variables.
Returns:
A pair of two symbolic expressions (tuple):
T_denom_sym and its derivative in :math:`z`.
'''
M = len(self.delays)
self._make_decimal_delays()
try:
self.z,self.z_Delta,self.Ts
except:
self.z, self.z_Delta = sp.symbols('z dz',complex=True)
self.Ts = [sp.symbols('T_'+str(i),real=True) for i in range(M)]
xs = [sp.symbols('x_'+str(i)) for i in range(M) ]
E_sym = sp.Matrix(np.zeros_like(self.M1))
for i,delay in enumerate(self.Decimal_delays):
E_sym[i,i] = xs[i]
M1_sym = sp.Matrix(self.M1)
num, den = (E_sym - M1_sym).det().as_numer_denom()
D = {x: sp.exp(self.z*T) for x,T in zip(xs,self.Ts)}
exp_periodic = num.subs(D)
diff_z = exp_periodic.diff(self.z)
return (exp_periodic, diff_z)
# def get_frequency_pertub_func_T_and_z(self,simplify = False):
# sym_freq_pert = self.get_symbolic_frequency_perturbation_T_and_z(simplify = simplify)
# def new_func(z_num,Ts_num,Delta_Ts_num):
# D = {T:T_num for T,T_num in zip(self.Ts,Ts_num)}
# D.update({self.z:z_num})
# D.update({Delta_T:Delta_T_num for Delta_T,Delta_T_num in zip(self.Ts_Delta,Delta_Ts_num)})
# return complex((sym_freq_pert.subs(D)).evalf())
# return new_func
# ## lambdify below seems to ignore the imaginary part.
# #return sp.lambdify( (self.z,self.Ts,self.Ts_Delta), sym_freq_pert.expand(complex=True))
def _get_newtons_func(self,expression):
'''
Takes an expression in terms of Ts and sp.exp(-z*T) for T in Ts,
and returns a complex-valued function for it.
Here :math:`z = x + i y` is a complex number.
Args:
expression (sympy expression): A symbolic expression.
Returns:
a function in x,y,Ts that returns the value of the input expression.
'''
x,y = sp.symbols('x y', real = True)
D = {sp.exp(self.z*T): sp.exp(x*T)*(1j*sp.sin(y*T)+sp.cos(y*T)) for T in self.Ts}
expression2 = expression.subs(D)
num_real,num_imag = expression2.expand().as_real_imag()
f_r = ufuncify( [x,y]+self.Ts, num_real)
f_i = ufuncify( [x,y]+self.Ts, num_imag)
return lambda x,y,Ts: f_r(x,y,*Ts)+f_i(x,y,*Ts)*1j
def get_frequency_pertub_func_z(self,use_ufuncify = True):
'''
Generates a function that can be used to perturb roots using
Newton's method. This function has form :math:`-f(z) / f'(z)`
when the time delays are held fixed.
We give two ways to generate the perturbative function. One is
by directly plugging in numbers into a sympy expression and the
second is by using the ufuncify method to creative a wrapper for
the function.
Args:
use_ufuncify (optional [boolean]): whether to use ufuncify
or not.
Returns:
Newton's method function (function):
The function to use for Newton's method in :math:`z`,
:math:`-f(z) / f'(z)`.
'''
sym_freq_pert = self.get_symbolic_frequency_perturbation_z()
if not use_ufuncify:
sym_freq_pert = -sym_freq_pert[0] / sym_freq_pert[1]
def func(z_num,Ts_num):
D = {T:T_num for T,T_num in zip(self.Ts,Ts_num)}
D.update({self.z:z_num})
return complex((sym_freq_pert.subs(D)).evalf())
return func
else:
real_imag_func_num = self._get_newtons_func(sym_freq_pert[0])
real_imag_func_denom = self._get_newtons_func(sym_freq_pert[1])
def func(z_num,Ts_num):
x,y = z_num.real,z_num.imag
return - ( real_imag_func_num(x,y,Ts_num) /
real_imag_func_denom (x,y,Ts_num) )
return func
def get_minimizing_function_z(self,):
r'''
Minimizing this function gives the adjusted roots.
Gives a function to minimize, its arguments are
:math:`x,y,Ts`. Also gives its derivative.
Returns:
Minimizing function (function):
A function of :math:`x,y,*Ts` to minimize in the two variables,
:math:`x,y`.
'''
expression = self.get_symbolic_frequency_perturbation_z()[0]
x,y = sp.symbols('x y', real = True)
D = {sp.exp(self.z*T): sp.exp(x*T)*(1j*sp.sin(y*T)+sp.cos(y*T)) for T in self.Ts}
expression2 = expression.subs(D)
num_real,num_imag = expression2.expand().as_real_imag()
diff_x_real = num_real.diff(x)
diff_y_imag = num_real.diff(y)
func = ufuncify( [x,y]+self.Ts, num_real**2 + num_imag**2)
dfunc_x = ufuncify( [x,y]+self.Ts,num_real*diff_x_real)
dfunc_y = ufuncify( [x,y]+self.Ts, num_imag*diff_y_imag)
return func, lambda x,y,*Ts: np.asarray([dfunc_x(x,y,*Ts),dfunc_y(x,y,*Ts)])
def _find_instances_in_range_good_initial_point(self,z,freq_range,T):
'''
Find numbers of the form :math:`z + Tni` where :math:`T` is the
period and :math:`n` is an integer inside the given frequency range.
Assumes the given z is in the desired frequency range.
Args:
z (complex number).
freq_range (2-tuple): (minimum frequency, maximum frequency).
Returns:
(list of complex numbers):
list of numbers of the desired form.
'''
lst_in_range = [z]
num_below = int((z.imag - freq_range[0])/T )
num_above = int((freq_range[1] - z.imag)/T )
above_range = (np.asarray(range(num_above))+1) * T
below_range = (np.asarray(range(num_below))+1) * T
lst_in_range += [z + 1j * disp for disp in above_range]
lst_in_range += [z - 1j * disp for disp in below_range]
return lst_in_range
def _find_instances_in_range(self,z,freq_range,T):
'''
Find numbers of the form :math:`z + Tni` where :math:`T` is the
period and :math:`n` is an integer inside the given frequency range.
Args:
z (complex number).
freq_range (2-tuple): (minimum frequency, maximum frequency).
Returns:
(list of complex numbers):
list of numbers of the desired form. Empty list if none exist.
'''
if z.imag >= freq_range[0] and z.imag <= freq_range[1]:
return self._find_instances_in_range_good_initial_point(z,freq_range,T)
elif z.imag > freq_range[1]:
min_dist = (int((z.imag - freq_range[1])/T)+1) * T
max_dist = int((z.imag - freq_range[0]) / T) * T
if min_dist > max_dist:
return []
else:
return self._find_instances_in_range_good_initial_point(
z - 1j*min_dist,freq_range,T)
else:
min_dist = (int((freq_range[0] - z.imag)/T)+1) * T
max_dist = int((freq_range[1] - z.imag)/T) * T
if min_dist > max_dist:
return []
else:
return self._find_instances_in_range_good_initial_point(
z + 1j*min_dist,freq_range,T)
def make_commensurate_roots(self,list_of_ranges = []):
'''
Assuming the delays are commensurate, obtain all the roots within the
frequency ranges of interest. Sets self.roots a list of complex roots
in the desired frequency ranges.
Args:
list_of_ranges (optional [list of 2-tuples]): list of frequency
ranges of interest in the form:
(minimum frequency, maximum frequency).
Returns:
None.
'''
self._make_T_denom_sym()
poly = sp.Poly(self.T_denom_sym, self.x)
poly_coeffs = poly.all_coeffs()
roots = np.roots(poly_coeffs)
zs = np.asarray(map(lambda r: np.log(r) / float(self.Decimal_gcd),
roots))
T_gcd = 2.*np.pi / float(self.Decimal_gcd)
self.map_root_to_commensurate_index = {}
lst_to_return = []
for freq_range in list_of_ranges:
for i,r in enumerate(zs):
prev_len = len(lst_to_return)
new_roots = self._find_instances_in_range(r,freq_range,T_gcd)
len_new_roots = len(new_roots)
lst_to_return += new_roots
for j in range(prev_len,prev_len + len_new_roots):
self.map_root_to_commensurate_index[j] = i
self.roots = lst_to_return
self.commensurate_roots = zs
return
def make_commensurate_vecs(self,):
self.commensurate_vecs = Potapov.get_Potapov_vecs(
self.T,self.commensurate_roots)
self.vecs = map(
lambda i: self.commensurate_vecs[self.map_root_to_commensurate_index[i]],
range(len(self.roots)) )
return
def make_T_Testing(self):
'''Generate the approximating transfer function using the identified
poles of the transfer function.
'''
self.T_testing = Potapov.get_Potapov(self.T,self.roots,self.vecs)
return
def make_vecs(self):
'''Generate an ordered list of vectors representing the form of the
Potapov factors.
'''
self.vecs = Potapov.get_Potapov_vecs(self.T,self.roots)
return
def make_spatial_modes(self,):
'''Generate the spatial modes of the network.
'''
self.spatial_modes = spatial_modes(self.roots,self.M1,self.E,delays=self.delays)
return
def run_Potapov(self, commensurate_roots = False, filtering_roots = True):
'''Run the entire Potapov procedure to find all important information.
The generated roots, vecs, approximated transfer function T_Testing,
and the spatial_modes are all stored in the class.
Args:
commensurate_roots (optional[boolean]): which root-finding method
to use.
filtering_roots (optional[boolean]): makes sure the poles of the
transfer function all have negative real part. Drops ones that
might not.
Returns:
None.
'''
self.Potapov_ran = True
if commensurate_roots:
self.make_commensurate_roots([(-self.max_freq,self.max_freq)])
if filtering_roots:
self.roots = [r for r in self.roots if r.real <= 0]
self.make_commensurate_vecs()
else:
self.make_roots()
if filtering_roots:
self.roots = [r for r in self.roots if r.real <= 0]
self.make_vecs()
self.make_T_Testing()
self.make_spatial_modes()
return
def get_outputs(self):
'''Get some of the relevant outputs from the Potapov procedure.
Returns:
`self.T`,`self.T_testing`,`self.roots`,`self.vecs` (tuple):
The original transfer function, the approximating generated
transfer function, the identified poles of the transfer function,
and the vectors representing the form of the Potapov factors.
Raises:
Exception: Must have `self.T,self.T_testing,self.roots,self.vecs`.
'''
if self.Potapov_ran:
return self.T,self.T_testing,self.roots,self.vecs
else:
raise Exception("Must run Potapov to get outputs!!!")
return
def get_Potapov_ABCD(self,z=0.,doubled=False):
'''
Find the ABCD matrices from the Time_Delay_Network.
Args:
z (optional [complex number]): location where to estimate D.
Return:
(tuple of matrices):
A,B,C,D matrices.
'''
A,B,C,D = Potapov.get_Potapov_ABCD(self.roots,self.vecs,self.T,z=z)
if not doubled:
return A,B,C,D
else:
A_d,C_d,D_d = map(double_up,(A,C,D))
B_d = -double_up(C.H)
return A_d,B_d,C_d,D_d
class Example1(Time_Delay_Network):
'''
Single input, single output with a single delay.
'''
def __init__(self, max_freq=30.,max_linewidth=1.,N=1000, center_freq = 0.,
tau = 0.3,r = 0.8):
Time_Delay_Network.__init__(self, max_freq,max_linewidth,N,center_freq)
self.tau = tau
self.delays = [tau]
self.r = r
self.M1=np.matrix([[r]])
self.E = lambda z: np.exp(-z*self.tau)
self.T = lambda z: np.matrix([(np.exp(-z*self.tau) - self.r)/
(1.-self.r* np.exp(-z*self.tau))])
self.T_denom = lambda z: (1.-self.r* np.exp(-z*self.tau))
self.Tp_denom = lambda z: der(self.T_denom,z)
class Example2(Time_Delay_Network):
'''
Two inputs, two outputs with a delay (i.e. Fabry-Perot).
'''
def __init__(self, max_freq=10.,max_linewidth=10.,N=1000, center_freq = 0.,
r=0.9,tau = 1.):
Time_Delay_Network.__init__(self, max_freq,max_linewidth,N,center_freq)
self.r = r
self.delays = [tau]
e = lambda z: np.exp(-z*tau)
dim = 2
self.M1 = np.matrix([[0,r],[r,0]])
self.E = lambda z: np.matrix([[e(z),0],[0,e(z)]])
self.T_denom = lambda z: (1.-r**2* e(z)**2)
self.T = lambda z: -r*np.eye(dim) + ((1.-r**2.)/self.T_denom(z)) * \
np.matrix([[r*e(z)**2,e(z)],[e(z),r*e(z)**2]])
self.Tp_denom = lambda z: der(self.T_denom,z)
class Example3(Time_Delay_Network):
'''
Two inputs and two outputs, with four delays and third mirror
This corresponds to figures 7 and 8 in our paper.
'''
def __init__(self, max_freq=60.,max_linewidth=1.,N=5000, center_freq = 0.,
r1=0.9,r2=0.4,r3=0.8,
tau1 = 0.1, tau2 = 0.23,tau3 = 0.1,tau4 = 0.17,
):
Time_Delay_Network.__init__(self, max_freq,max_linewidth,N,center_freq)
self.r1 = r1
self.r2 = r2
self.r3 = r3
self.delays =[tau1,tau2,tau3,tau4]
t1 = np.sqrt(1-r1**2)
t2 = np.sqrt(1-r2**2)
t3 = np.sqrt(1-r3**2)
dim = 4
self.M1 = np.matrix([[0,-r1,0,0],
[-r2,0,t2,0],
[0,0,0,-r3],
[t2,0,r2,0]])
self.M2 = np.matrix([[t1,0],
[0,0],
[0,t3],
[0,0]])
self.M3 = np.matrix([[0,t1,0,0],
[0,0,0,t3]])
self.M4 = np.matrix([[r1,0],
[0,r3]])
E = lambda z: np.matrix([[np.exp(-tau1*z),0,0,0],
[0,np.exp(-tau2*z),0,0],
[0,0,np.exp(-tau3*z),0],
[0,0,0,np.exp(-tau4*z)]])
self.E = E
self.T_denom = lambda z: la.det(np.eye(dim) - self.M1*E(z))
self.Tp_denom = lambda z: der(self.T_denom,z)
self.T = lambda z: self.M3*E(z)*la.inv(np.eye(dim) - self.M1*E(z))*self.M2+self.M4
class Example4(Time_Delay_Network):
'''
Two inputs and two outputs, with free delay (i.e. not in a loop).
This corresponds to figures 9 and 10 in our paper.
'''
def __init__(self, max_freq=100.,max_linewidth=3.,N=5000,center_freq = 0.):
Time_Delay_Network.__init__(self, max_freq,max_linewidth,N,center_freq)
tau1 = 0.1
tau2 = 0.039
tau3 = 0.11
tau4 = 0.08
self.delays = [tau1,tau2,tau3,tau4]
r = 0.9
t = np.sqrt(1-r**2)
dim = 4
M1 = np.matrix([[0,0,-r,0],
[r,0,0,0],
[0,r,0,t],
[t,0,0,0]])
self.M1 = M1
M2 = np.matrix([[t,0],
[0,t],
[0,0],
[0,-r]])
M3 = np.matrix([[0,0,t,0],
[0,t,0,-r]])
M4 = np.matrix([[r,0],
[0,0]])
E = lambda z: np.matrix([[np.exp(-tau1*z),0,0,0],
[0,np.exp(-tau2*z),0,0],
[0,0,np.exp(-tau3*z),0],
[0,0,0,np.exp(-tau4*z)]])
self.E = E
self.T_denom = lambda z: la.det(np.eye(dim) - M1*E(z))
self.Tp_denom = lambda z: der(self.T_denom,z)
self.T = lambda z: M3*E(z)*la.inv(np.eye(dim) - M1*E(z))*M2+M4
class Example5(Time_Delay_Network):
'''
Modified example 4, with analytic term.
'''
def __init__(self, max_freq=50.,max_linewidth=3.,N=1000,center_freq = 0.,):
Time_Delay_Network.__init__(self, max_freq ,max_linewidth,N,center_freq)
tau1 = 0.1
tau2 = 0.039
tau3 = 0.11
tau4 = 0.08
self.delays = [tau1,tau2,tau3,tau4]
r = 0.9
t = np.sqrt(1-r**2)
dim = 4
M1 = np.matrix([[0,0,-r,0],
[r,0,0,0],
[0,r,0,t],
[t,0,0,0]])
self.M1=M1
M2 = np.matrix([[t,0],
[0,t],
[0,0],
[0,-r]])
M3 = np.matrix([[0,0,t,0],
[0,t,0,-r]])
M4 = np.matrix([[r,0],
[0,0]])
E = lambda z: np.matrix([[np.exp(-(tau1+tau4)*z),0,0,0],
[0,np.exp(-(tau2-tau4)*z),0,0],
[0,0,np.exp(-tau3*z),0],
[0,0,0,1.]])
self.E=E
self.T_denom = lambda z: la.det(np.eye(dim) - M1*E(z))
self.Tp_denom = lambda z: der(self.T_denom,z)
self.T = lambda z: M3*E(z)*la.inv(np.eye(dim) - M1*E(z))*M2+M4
def example6_pade():
'''
This example is the same as example 3, but we return a Pade approximation
instead of a Potapov approximation. Instead of returnings roots, etc., we
return a different kind of function (see below).
This is used for figure 14 of our paper.
Returns:
T (matrix-valued function in complex number `z` and integer `n`):
An approximation to :math:`T(z)` using Pade approximation of
order :math:`n`.
'''
tau1 = 0.1
tau2 = 0.23
tau3 = 0.1
tau4 = 0.17
r1 = 0.9
r2 = 0.4
r3 = 0.8
t1 = np.sqrt(1-r1**2)
t2 = np.sqrt(1-r2**2)
t3 = np.sqrt(1-r3**2)
dim = 4
M1 = np.matrix([[0,-r1,0,0],
[-r2,0,t2,0],
[0,0,0,-r3],
[t2,0,r2,0]])
M2 = np.matrix([[t1,0],
[0,0],
[0,t3],
[0,0]])
M3 = np.matrix([[0,t1,0,0],
[0,0,0,t3]])
M4 = np.matrix([[r1,0],
[0,r3]])
def E(z,n):
taus = [tau1,tau2,tau3,tau4]
tau_tot = sum(taus)
ns = [np.int(np.round(n*t)) for t in taus]
while (sum(ns) < n):
j = np.argmax([abs(t/tau_tot - float(i)/n) for t,i in zip(taus,ns)])
ns[j] +=1
while (sum(ns) > n):
j = np.argmax([abs(t/tau_tot - float(i)/n) for t,i in zip(taus,ns)])
ns[j] -=1
return np.matrix([[Pade(ns[0],-z*tau1),0,0,0],
[0,Pade(ns[1],-z*tau2),0,0],
[0,0,Pade(ns[2],-tau3*z),0],
[0,0,0,Pade(ns[3],-tau4*z)]])
T = lambda z,n: M3*E(z,n)*la.inv(np.eye(dim) - M1*E(z,n))*M2+M4
return T
def plot3D(f,N = 2000,x_min = -1.5, x_max = 1.5, y_min = -25., y_max = 25., name = "complex_plane_plot.pdf",
title = 'Complex Frequecy Response Diagram',
xlabel = r'Cavity damping rate ($\kappa$)',
ylabel = r'Detuning ($\Delta$)'):
'''
Make a color and hue plot in the complex plane for a given function.
Used code from http://stackoverflow.com/questions/17044052/mathplotlib-imshow-complex-2d-array
Args:
f (function): to plot.
N(optional[int]): number of points to use per dimension.
Returns:
None.
'''
def colorize(z):
r = np.abs(z)
arg = np.angle(z)
h = (arg) / (2 * pi) + 0.5
l = 1.0 - 1.0/(1.0 + r**0.3)
s = 0.8
c = np.vectorize(hls_to_rgb) (h,l,s) # --> tuple
c = np.array(c) # --> array of (3,n,m) shape, but need (n,m,3)
c = c.swapaxes(0,2)
return c
x,y = np.ogrid[x_min:x_max:N*1j, y_min:y_max:N*1j]
z = x + 1j*y
f_vec = np.vectorize(f)
w = f_vec(z)
img = colorize(w)
plt.imshow(img, extent = [x_min,x_max,y_min,y_max], aspect = 0.1)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(name)
plt.show()
#fig = mp.cplot(f, [-10,10], [-10, 10], points = points)
# fig.savefig("complex_plane_plot.pdf")
# return
if __name__ == "__main__":
print 'Running Examples.py'
################
## Plot for complex-valued functions with and without time delay
## This part uses mpmath
################
# tau=1.; r=0.8
# E = Example1(max_freq = 500., max_linewidth = 1.0,tau=tau,r=r)
# E.run_Potapov()
#
# T,T_1,roots1,vecs1 = E.get_outputs()
#
# f = lambda z: (mp.exp(-z*tau) - r)/(1.-r* mp.exp(-z*tau))*mp.exp(-z*tau)
# f2 = lambda z: (mp.exp(-z*tau) - r)/(1.-r* mp.exp(-z*tau))
# plot3D(f,N = 50)
## plot3D(f2,N = 500000)
################
## Plot for complex-valued functions for example 3.
################
E = Example3(max_freq = 50,max_linewidth = 3.0)
E.run_Potapov(commensurate_roots = True)
T,T_1,roots1,vecs1 = E.get_outputs()
f = lambda z: la.det(T(z))
plot3D(f,N = 1000,x_min = 0., x_max = 3.0, y_min = -35.,y_max = 35.)
# plot3D(f2,N = 500000)
################
## Input/output plot for example 1
################
# L = 100.
# dx = 0.3
# freqs = [30.,50.,80.,100.]
# T_ls = []; roots_ls = []; vecs_ls = []
#
# for freq in freqs:
# E = Example1(max_freq = freq)
# E.run_Potapov()
# T,T_,roots,vecs = E.get_outputs()
# T_ls.append(T_)
# roots_ls.append(roots)
# vecs_ls.append(vecs)
#
# labels = ['Original T'] + ['Potapov T of Order '+str(len(r))
# for r in roots_ls]
# colors = ['b','r--','y--','m--','c--']
#
# plot_all(L,dx,labels,colors,0.5,'example_tmp.pdf',T,*T_ls)
################
## Input/output plot for example 3
################
# L = 100.
# dx = 0.3
# freqs = [30.,50.,80.,100.]
# T_ls = []; roots_ls = []; vecs_ls = []
#
# for freq in freqs:
# E = Example3(max_freq = freq)
# E.run_Potapov()
# T,T_,roots,vecs = E.get_outputs()
# T_ls.append(T_)
# roots_ls.append(roots)
# vecs_ls.append(vecs)
#
# labels = ['Original T'] + ['Potapov T of Order '+str(len(r))
# for r in roots_ls]
# colors = ['b','r--','y--','m--','c--']
#
# plot_all(L,dx,labels,colors,0.5,'figure_8_v3.pdf',T,*T_ls)
###########
## Testing example 3 as above, but now using commensurate roots.
###########
# L = 1000.
# dx = 0.5
# freqs = [300.,500.,800.,1000.]
# T_ls = []; roots_ls = []; vecs_ls = []
#
# for freq in freqs:
# E = Example3(max_freq = freq)
# E.run_Potapov(commensurate_roots=True)
# T,T_,roots,vecs = E.get_outputs()
# T_ls.append(T_)
# roots_ls.append(roots)
# vecs_ls.append(vecs)
#
# labels = ['Original T'] + ['Potapov T of Order '+str(len(r))
# for r in roots_ls]
# colors = ['b','r--','y--','m--','c--']
#
# plot_all(L,dx,labels,colors,0.5,'figure_8_commensurate.pdf',T,*T_ls)
################
## Input/output plot for example 4
################
# L = 100.
# dx = 0.05
# freqs = [50.,80.,100.,125.]
# T_ls = []; roots_ls = []; vecs_ls = []
#
# for freq in freqs:
# E = Example4(max_freq = freq)
# E.run_Potapov()
# T,T_,roots,vecs = E.get_outputs()
# T_ls.append(T_)
# roots_ls.append(roots)
# vecs_ls.append(vecs)
#
# E5 = Example5(max_freq=30.)
# E5.run_Potapov()
# T_correct,T_1_correct,roots1_correct,vecs1_correct = E5.get_outputs()
# T_ls = [T_correct] + T_ls
#
# labels = ['Original T','T with feedforward removed'] + \
# ['Potapov T of Order ' +str(len(r)) for r in roots_ls]
# colors = ['b','black','r--','y--','m--','c--']
#
# plot_all(L,dx,labels,colors, 0.5,'figure_10_v4.pdf',
# T,*T_ls)
#################
### Input/output plot for example 5
#################
#
# L = 100.
# dx = 0.05
# freqs = [30.,50.,65.,80.]
# T_ls = []; roots_ls = []; vecs_ls = []
#
# for freq in freqs:
# E = Example5(max_freq = freq)
# E.run_Potapov()
# T,T_,roots,vecs = E.get_outputs()
# T_ls.append(T_)
# roots_ls.append(roots)
# vecs_ls.append(vecs)
#
# labels = ['Original T'] + ['Potapov T of Order '+str(len(r))
# for r in roots_ls]
# colors = ['b','r--','y--','m--','c--']
#
# plot_all(L,dx,labels,colors,0.5,'example_tmp2.pdf',T,*T_ls)
#################
### Input/output plot for example 6
#################
# E = Example3()
# T_orig,T_1,roots1,vecs1 = E.get_outputs()
# T = example6_pade()
#
# L = 100.
# dx = 0.05
# ns = [9,15,19]
#
# args = [T_orig]+[lambda z: T(z,9),lambda z: T(z,15),lambda z: T(z,19)]
# lw = 0.5
# name = "figure_14_v3.pdf"
#
# labels = ['Original T'] + ['Pade T of order ' + str(n) for n in ns]
# colors = ['b','k--','r--','y--']
#
# plot_all(L,dx,labels,colors,0.5,'figure_14_v3.pdf',*args)
###########
## make scatter plot for the roots and poles of example 3
###########
# E = Example3(max_freq = 400.)
# E.run_Potapov()
# T,T_3,roots3,vecs3 = E.get_outputs()
# fig = plt.figure(figsize=(3,10))
#
# ax2 = fig.add_subplot(111)
# ax2.add_patch(
# patches.Rectangle(
# (0.3,-0),
# 0.5,
# 100,
# fill=False # remove background
# )
# )
#
# ax2 = fig.add_subplot(111)
# ax2.add_patch(
# patches.Rectangle(
# (0.3,-0),
# 0.5,
# 200,
# fill=False # remove background
# )
# )
# fig.suptitle('Zero-polt scatter plot', fontsize=20)
#
#
# plt.xlim(-1.,1.)
# plt.ylim(-400,400)
#
# plt.xlabel('linewidth', fontsize=18)
# plt.ylabel('frequency', fontsize=16)
# plt.scatter(map(lambda z: z.real, roots3),map(lambda z: z.imag, roots3))
# poles = map(lambda z: -z, roots3)
#
# plt.scatter(map(lambda z: z.real, poles),map(lambda z: z.imag, poles),c="red")
#
# plt.show()
##########
## make scatter plot for the roots and poles of example 4
##########
#
#print "making scatter plot for example 4"
#
#import matplotlib.patches as patches
#
#
# E = Example4(max_freq = 400.)
# E.run_Potapov()
# T,T_4,roots4,vecs4 = E.get_outputs()
#scl = 1
#fig = plt.figure(figsize=(6*scl,10*scl))
#
#
#ax2 = fig.add_subplot(111)
#ax2.add_patch(
# patches.Rectangle(
# (-2.9,-50),
# 5.8,
# 100,
# linestyle = 'dashed',
# color = 'red',
# fill=False # remove background
# )
#)
#
#ax2.add_patch(
# patches.Rectangle(
# (-2.95,-100),
# 5.9,
# 200,
# linestyle = 'dashed',
# color = 'blue',
# fill=False # remove background
# )
#)
#
#ax2.add_patch(
# patches.Rectangle(
# (-3,-150),
# 6,
# 300,
# linestyle = 'dashed',
# color = 'green',
# fill=False # remove background
# )
#)
#
#fig.suptitle('Pole-zero plot in the s-plane', fontsize=20)
#
#
#plt.xlim(-4.,4.)
#plt.ylim(-200,200)
#
#plt.axhline(0, color='black')
#plt.axvline(0, color='black')
#
#plt.xlabel('Re(s)', fontsize=18)
#plt.ylabel('Im(s)', fontsize=16)
#xs = plt.scatter(map(lambda z: z.real, roots4),map(lambda z: z.imag, roots4),
# s=80, facecolors='none', edgecolors='black',label='zero')
#poles = map(lambda z: -z, roots4)
#
#os = plt.scatter(map(lambda z: z.real, poles),map(lambda z: z.imag, poles),
# s=80,c="black",marker='x',label='pole')
#
#plt.rcParams['legend.scatterpoints'] = 1
#
#plt.legend( handles=[xs,os])
#
#plt.savefig('eg4_scatter.pdf')
#
#plt.show()
| 37,357 | 32.385165 | 108 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/Hamiltonian.py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 31 2015
@author: gil
@title: Hamiltonian.py
"""
import Roots
import Potapov
import Time_Delay_Network
import functions
import phase_matching
import phase_matching_hash
import numpy as np
import numpy.linalg as la
import sympy as sp
import scipy.constants as consts
from scipy.optimize import minimize
import itertools
import copy
#from sympy.printing.theanocode import theano_function
from sympy.physics.quantum import Dagger as sp_Dagger
from sympy.physics.quantum.boson import *
from sympy.physics.quantum.operatorordering import *
from qnet.algebra.circuit_algebra import *
class Chi_nonlin():
r'''Class to store the information in a particular nonlinear chi element.
Attributes:
delay_indices (list of indices):
indices of delays to use.
start_nonlin (positive float or list of positive floats):
location of
nonlinear crystal with respect to each edge.
length_nonlin (float):
length of the nonlinear element.
refraction_index_func (function):
the indices of refraction as a
function of the netural frequency :math:`/omega` AND polarization pol.
chi_order (optional [int]):
order of nonlinearity.
chi_function (optional [function]):
strength of nonlinearity.
first (chi_order+1) args are frequencies,
next (chi_order+1) args are indices of polarization.
'''
def __init__(self,delay_indices,start_nonlin,length_nonlin,
refraction_index_func = None,
chi_order=3,chi_function = None):
self.delay_indices = delay_indices
self.start_nonlin = start_nonlin
self.length_nonlin = length_nonlin
self.chi_order = chi_order
if refraction_index_func is None:
self.refraction_index_func = lambda *args: 1.
else:
self.refraction_index_func = refraction_index_func
if chi_function is None:
self.chi_function = lambda *args: 1.
else:
self.chi_function = chi_function
class Hamiltonian():
'''A class to create a sympy expression for the Hamiltonian of a network.
Attributes:
roots (list of complex numbers):
the poles of the transfer function.
omegas (list of floats):
The natural frequencies of the modes.
modes (list of complex-valued column matrices):
Modes of the network.
delays (list of floats):
The delays in the network.
Omega (optional [matrix]):
Quadratic Hamiltonian term for linear
dynamics.
nonlin_coeff (optional [float]):
Overall scaling for the nonlinearities.
polarizations (optional [list]):
The polarizations of the respective
modes. These should match the arguments in Chi_nonlin.chi_func.
cross_sectional_area (float):
Area of beams, used to determines the
scaling for the various modes.
chi_nonlinearities (list):
A list of Chi_nonlin instances.
TODO: Return L operator for QNET.
TODO: decide what to do with roots of negative imaginary part (negative freq.)
TODO: maybe re-organize the data into a dict of the form mode_inde: (root,mode,etc)
TODO: Replace python floats by mpmath variables for arbitrary precision
'''
def __init__(self,roots,modes,delays,
Omega = None,
nonlin_coeff = 1.,polarizations = None,
cross_sectional_area = 1e-10,
chi_nonlinearities = None,
using_qnet_symbols = False,
):
self.roots = roots
self._update_omegas()
self.m = len(roots)
self.modes = modes
self.delays = delays
self.cross_sectional_area = cross_sectional_area
self.Delta_delays = np.zeros((self.m,len(self.delays)))
self.volumes = self.mode_volumes()
self.normalize_modes()
self.E_field_weights = self.make_E_field_weights()
self.using_qnet_symbols = using_qnet_symbols
self.t = sp.symbols('t')
self.H = 0.
self.nonlin_coeff = nonlin_coeff
if chi_nonlinearities is None:
self.chi_nonlinearities = []
else:
self.chi_nonlinearities = chi_nonlinearities
if Omega is None:
self.Omega = np.asmatrix(np.zeros((self.m,self.m)))
else:
self.Omega = Omega
if polarizations is None:
self.polarizations = [1.]*len(self.delays)
else:
self.polarizations = polarizations
if self.using_qnet_symbols:
self.a = [Destroy(i) for i in range(self.m)]
else:
#self.a = [sp.symbols('a_'+str(i)) for i in range(self.m)]
self.a = [BosonOp('a_'+str(i)) for i in range(self.m)]
def _update_omegas(self,):
self.omegas = map(lambda z: z.imag / (2.*consts.pi), self.roots)
return
def Dagger(self, symbol):
if self.using_qnet_symbols:
return symbol.dag()
else:
return sp_Dagger(symbol)
# def adjusted_delays(self):
# '''
#
# '''
# N = len(self.delays)
# M = len(self.Delta_delays)
# delay_adjustment = [0.]*N
# for i in range(M):
# for j in range(N):
# delay_adjustment[j] += self.Delta_delays[i,j] / M
# return map(sum, zip(delay_adjustment, self.delays))
def make_Delta_delays(self,):
'''
Each different frequency will experience a different shift in delay
lengths due to all nonlinearities present.
We will store those shifts as a list of lists in the class.
This list is called Delta_delays.
The ith list will be the shifts in all the original delays for the ith
root (i.e. frequency).
Returns:
None.
'''
self.Delta_delays = np.zeros((self.m,len(self.delays)))
for chi in self.chi_nonlinearities:
for i,omega in enumerate(self.omegas):
for delay_index in chi.delay_indices:
pol = self.polarizations[delay_index]
index_of_refraction = chi.refraction_index_func(omega,pol)
self.Delta_delays[i,delay_index] = (
(index_of_refraction - 1.)* chi.length_nonlin/consts.c)
# print i,delay_index,'delta delay is', self.Delta_delays[i,delay_index]
# print "Delta delays are", self.Delta_delays
return
def perturb_roots_z(self,perturb_func,eps = 1e-12):
'''
One approach to perturbing the roots is to use Newton's method.
This is done here using a function perturb_func that corresponds to
:math:`-f(z) / f'(z)` when the time delays are held fixed.
The function perturb_func is generated in
get_frequency_pertub_func_z.
Args:
perturb_func (function):
The Newton's method function.
eps (optional [float]):
Desired precision for convergence.
'''
max_count = 10
for j in range(max_count):
self.make_Delta_delays() #delays depend on omegas
old_roots = copy.copy(self.roots)
for i,root in enumerate(self.roots):
pert = perturb_func(root,map(sum,zip(self.delays,self.Delta_delays[i])))
self.roots[i] += pert
self._update_omegas()
if all([abs(new-old) < eps for new,old in zip(self.roots,old_roots)]):
print "root adjustment converged!"
break
else:
"root adjustment aborted."
# def minimize_roots_z(self,func,dfunc,eps = 1e-12):
# '''
# One approach to perturb the roots is to use a function in :math:`x,y`
# that becomes minimized at a zero. This is done here.
#
# The result is an update to roots, omegas, and Delta_delays.
#
# Args:
# func, dfuncs (functions):
# Functions in x,y. The first becomes
# minimized at a zero and the second is the gradient in x,y.
# These functions are generated in
# Time_Delay_Network.get_minimizing_function_z.
#
# eps (optional [float]):
# Desired precision for convergence.
#
# '''
# max_count = 1
# for j in range(max_count):
# self.make_Delta_delays()
# old_roots = copy.copy(self.roots)
# for i,root in enumerate(self.roots):
# fun_z = lambda x,y: func(x,y,*map(sum,zip(self.delays,self.Delta_delays[i])))
# fun_z_2 = lambda arr: fun_z(*arr)
# dfun_z = lambda x,y: dfunc(x,y,*map(sum,zip(self.delays,self.Delta_delays[i])))
# dfun_z_2 = lambda arr: dfun_z(*arr)
# x0 = np.asarray([root.real,root.imag])
# minimized = minimize(fun_z_2,x0,jac = dfun_z_2).x
# self.roots[i] = minimized[0] + minimized[1] * 1j
# #print self.roots
# self._update_omegas()
# if all([abs(new-old) < eps for new,old in zip(self.roots,old_roots)]):
# print "root adjustment converged!"
# break
# else:
# "root adjustment aborted."
# # def perturb_roots_T_and_z(self,perturb_func,eps = 1e-15):
# r'''
# For each root, use the corresponding perturbations in the delays
# to generate the perturbation of the root.
#
# Args:
# perturb_func (function): A function whose input is a tuple of the
# form (z,Ts,Ts_Delta), where z is a complex number, while each
# Ts and Ts_Delta are lists of floats.
#
# Each network pole `z^*` with the corresponding perturbed
# delays will satisfy `perturb_func(z^*,Ts,Ts_Delta) = 0`.
# '''
# # print 'roots before'
# # print self.roots
#
# max_count = 200
# for j in range(max_count):
# old_roots = copy.copy(self.roots)
# for i,root in enumerate(self.roots):
# #updated_delays = map(sum,zip(self.delays,self.Delta_delays[i]))
# #self.make_Delta_delays()
# #updated_delta_delays = map(sum,zip(self.delays,self.Delta_delays[i]))
# pert = perturb_func(root,self.delays,self.Delta_delays[i])
# # print pert
# self.roots[i] += pert
# self._update_omegas()
# if all([abs(new-old) < eps for new,old in zip(self.roots,old_roots)]):
# break
#
# # print 'roots after:'
# # print self.roots
# return
def make_chi_nonlinearity(self,delay_indices,start_nonlin,
length_nonlin,refraction_index_func = None,
chi_order=3,chi_function = None):
r'''Add an instance of Chi_nonlin to Hamiltonian.
Args:
delay_indices (int OR list/tuple of ints):
The index representing the
delay line along which the nonlinearity lies. If given a list/tuple
then the nonlinearity interacts the N different modes.
start_nonlin (float OR list/tuple of floats):
The beginning of the
nonlinearity. If a list/tuple then each nonlinearity begins at a
different time along its corresponding delay line.
length_nonlin (float):
Duration of the nonlinearity in terms of
length. (Units in length)
refraction_index_func (function):
The indices of refraction as a
function of the natural frequency :math:`/omega`.
chi_order (optional [int]):
Order of the chi nonlinearity.
chi_function (function):
A function of 2*chi_order+2 parameters that
returns the strenght of the interaction for given frequency
combinations and polarizations. The first chi_order+1 parameters
correspond to frequencies combined the the next chi_order+1 parameters
correspond to the various polarizations.
TODO: check units everywhere, including f versus \omega = f / 2 pi.
'''
if isinstance(delay_indices, int):
delay_indices = [delay_indices]
chi_nonlinearity = Chi_nonlin(delay_indices,start_nonlin,
length_nonlin,refraction_index_func=refraction_index_func,
chi_order=chi_order,chi_function=chi_function)
self.chi_nonlinearities.append(chi_nonlinearity)
def normalize_modes(self,):
''' Normalize the modes of Hamiltonian.
'''
for i,mode in enumerate(self.modes):
mode /= functions._norm_of_mode(mode,map(sum, zip(self.delays,self.Delta_delays[i])))
def mode_volumes(self,):
'''Find the effective volume of each mode to normalize the field.
Returns:
A list of the effective lengths of the various modes.
'''
volumes = []
for i,mode in enumerate(self.modes):
for j,delay in enumerate(map(sum, zip(self.delays,self.Delta_delays[i]))):
volumes.append( delay * abs(mode[j,0]**2) *
self.cross_sectional_area )
return volumes
def make_nonlin_term_sympy(self,combination,pm_arr):
'''Make symbolic nonlinear term using sympy.
Example:
>>> combination = [1,2,3]; pm_arr = [-1,1,1]
>>> print Hamiltonian.make_nonlin_term_sympy(combination,pm_arr)
a_1*Dagger(a_2)*Dagger(a_3)
Args:
combination (tuple/list of integers):
indices of which terms to include.
pm_arr (tuple/list of +1 and -1):
Creation and
annihilation indicators for the respective terms in combination.
Returns:
(sympy expression):
symbolic expression for the combination of creation and annihilation
operators.
'''
r = 1
for index,sign in zip(combination,pm_arr):
if sign == 1:
r*= self.Dagger(self.a[index])
else:
r *= self.a[index]
return r
def phase_weight(self,combination,pm_arr,chi):
'''The weight to give to each nonlinear term characterized by the given
combination and pm_arr.
Args:
combination (list/tuple of integers):
Which modes/roots to pick
pm_arr (list of +1 and -1):
creation and annihilation of modes
chi (Chi_nonlin):
The chi nonlinearity for which to compute
the phase coefficient.
Returns:
The weight to add to the Hamiltonian.
'''
omegas_to_use = np.array([self.omegas[i] for i in combination])
modes_to_use = [self.modes[i] for i in combination]
polarizations_to_use = [self.polarizations[i] for i in chi.delay_indices]
indices_of_refraction = map(chi.refraction_index_func,
zip(omegas_to_use,polarizations_to_use) )
return functions.make_nonlinear_interaction(
omegas_to_use, modes_to_use, self.delays, chi.delay_indices,
chi.start_nonlin, chi.length_nonlin, pm_arr,
indices_of_refraction)
def make_phase_matching_weights(self,weight_keys,chi,
filtering_phase_weights = False ,eps = 1e-5):
'''Make a dict to store the weights for the selected components and the
creation/annihilation information.
Args:
weight_keys (list of tuples):
Keys for weights to consider.
Each key is a tuple consisting of two
components: the first is a tuple of the indices of modes and the
second is a tuple of +1 and -1.
filtering_phase_weights (optional[boolean]):
Whether or not to filter the phase_matching_weights by the size
of their values. The cutoff for their absolute value is given
by eps.
eps (optional [float]):
Cutoff for filtering of weights.
Returns:
Weights (dict):
A dictionary of weights with values corresponding to the
phase matching coefficients.
'''
weights = {}
for comb,pm_arr in weight_keys:
weights[comb,pm_arr] = self.phase_weight(comb,pm_arr,chi)
if filtering_phase_weights:
weights = {k:v for k,v in weights.iteritems() if abs(v) > eps}
return weights
def E_field_weight(self,mode_index):
r'''Make the weights for each field component :math:`E_i(n) = [\text{weight}] (a+a^\dagger)`.
Args:
mode_index (int):
The index of the mode.
Returns:
Weight of E-field (float):
It has form:
:math:`[\hbar * \omega(n) / 2 V_{eff}(n) \epsilon]^{1/2}`.
Here we set \hbar = 1.
'''
omega = self.omegas[mode_index]
#eps0 = consts.epsilon_0
hbar = consts.hbar
return np.sqrt(hbar * abs(omega) / (2 * self.volumes[mode_index]) ) ## / eps0
def make_E_field_weights(self,):
'''
Returns:
Weights (dict):
A dictionary from mode index to the E-field weight.
TODO: In the make_positive_keys_chi2 function, generate and pass the
correct polarization functions.
'''
weights = {}
for mode_index in range(self.m):
weights[mode_index] = self.E_field_weight(mode_index)
return weights
# def _setup_ranges(max_i,base):
# ranges= {}
# for i in range(max_i+1):
# ranges[i] = np.linspace(6.,11.,1+pow(base,i+1))
# return ranges
# def _make_positive_keys_chi2(chi):
# '''
# Returns:
#
# '''
# if chi.chi_order != 2:
# raise Exception('chi must of order 2 for this method.')
#
# ## TODO: get actual modes
def make_weight_keys(self,chi, key_types = 'all_keys',pols = (1,1,-1), res=(1e-1,1e-1,1e-1)):
r'''
Make a list of keys for which various weights will be determined.
Each key is a tuple consisting of two
components: the first is a tuple of the indices of modes and the
second is a tuple of +1 and -1.
Args:
chi (Chi_nonlin):
the nonlinearity for which the weight will be
found.
Returns:
Keys (list):
A list of keys of the type described.
TODO: pass the k(lambda) function from chi to the function called from
phase_matching or phase_matching_hash.
'''
weight_keys=[]
if key_types == 'all_keys':
list_of_pm_arr = list(itertools.product([-1, 1],
repeat=chi.chi_order+1))
field_combinations = itertools.combinations_with_replacement(
range(self.m), chi.chi_order+1) ##generator
for combination in field_combinations:
for pm_arr in list_of_pm_arr:
weight_keys.append( (tuple(combination),tuple(pm_arr)) )
return weight_keys
elif not all([el >= 0 for el in self.omegas]):
## We need all omegas to be positive.
print "Not all omegas are positive!"
else: ## key_types != 'all_keys' and all omegas are positive
def filter_by_polarization(positive_omega_indices):
'''Filter by polarizations.'''
return [indices for indices in positive_omega_indices
if all([pols[j] == self.polarizations[i] for j,i in enumerate(indices)])]
def sign_tuple(indices,flip = False):
'''Tuple of the signs of the indices tuple.'''
if flip:
return tuple(map(lambda z: -int(np.sign(z)),indices))
else:
return tuple(map(lambda z: int(np.sign(z)),indices))
def generate_positive_omega_keys(chi,indices_method,pols=pols):
'''Make indices for weights, assuming first two are the SAME sign.'''
## Multiplied by 1e-13 * 2 * pi for units.
pos_nus_lst = [ (1e-13 * 2 * consts.pi * omega) for omega in self.omegas if omega >= 0.]
positive_omega_indices = indices_method(pos_nus_lst,chi,pols = pols)
positive_omega_indices = filter_by_polarization(positive_omega_indices)
weight_keys = ( [(indices,sign_tuple(indices)) for indices in positive_omega_indices]
+ [(indices,sign_tuple(indices, flip = True)) for indices in positive_omega_indices] )
return weight_keys
def generate_all_permutations_of_omega_keys(chi,indices_method,pols=pols,permutations=None):
weight_keys = []
for perm in permutations:
if perm is None:
weight_keys.append(generate_positive_omega_keys(chi,indices_method,pols=pols))
else:
pols_perm = tuple([pols[p] for p in perm])
permuted_keys = generate_positive_omega_keys(chi,indices_method,pols=pols_perm)
umpermuted_keys = [(el[0][p],el[1][p]) for p,el in zip(perm,permuted_keys)]
weight_keys.append(umpermuted_keys)
return weight_keys
if key_types == 'search_voxels' and chi.chi_order == 2:
permutations = [None, (2,1,0), (0,2,1)]
return generate_all_permutations_of_omega_keys(chi,phase_matching.make_positive_keys_chi2,pols=pols,permutations=permutations)
elif key_types == 'hash_method' and chi.chi_order == 3:
permutations = [None, (0,2,1,3), (0,2,3,1), (2,0,1,3), (2,0,3,1), (2,3,0,1)]
def make_positive_keys_chi3_fixed_res(pos_nus_lst,chi,pols = pols):
return phase_matching_hash.make_positive_keys_chi3(pos_nus_lst,chi,pols = pols,res = res)
return generate_all_permutations_of_omega_keys(chi,make_positive_keys_chi3_fixed_res,pols=pols,permutations=permutations)
print "key_types not known or doesn't match chi_order."
def make_nonlin_H(self,filtering_phase_weights=False,eps=1e-5):
'''Make a nonlinear Hamiltonian based on nonlinear interaction terms
Args:
filtering_phase_weights (optional[boolean]):
Whether or not to filter the phase_matching_weights by the size
of their values. The cutoff for their absolute value is given
by eps.
eps (optional[float]):
Cutoff for the significance of a particular term.
Returns:
Expression (sympy expression):
A symbolic expression for the nonlinear Hamiltonian.
TODO: Make separate dictionaries for values of chi_function,
for phase_matching_weights, and for producs of E_field_weights. filter
the keys before generating terms.
TODO: Make fast function for integrator; combine with make_f and make_f_lin
'''
H_nonlin_sp = sp.Float(0.)
self.make_dict_H_nonlin()
for term_identifier, value in self.Hamiltonian_dict_nonlin.iteritems():
H_nonlin_sp += self.make_nonlin_term_sympy(*term_identifier) * value
return H_nonlin_sp
def make_lin_H(self,Omega):
'''Make a linear Hamiltonian based on Omega.
Args:
Omega (complex-valued matrix):
A matrix that describes the Hamiltonian of the system.
Returns:
Expression (sympy expression):
A symbolic expression for the nonlinear Hamiltonian.
'''
self.make_dict_H_lin(Omega)
H_lin_sp = sp.Float(0.)
for i in range(self.m):
for j in range(self.m):
H_lin_sp += self.Dagger(self.a[i])*self.a[j]*Omega[i,j]
return H_lin_sp
def make_H(self,eps=1e-5):
r'''Make a Hamiltonian combining the linear and nonlinear parts.
The term -1j*A carries the effective linear Hamiltonian, including the
decay term :math:`-\frac{i}{2} L^\dagger L`. However, this term does
not include material effects including dielectric and nonlinear terms.
It also does not include a term with contribution from the inputs.
If one wishes to include terms due to coherent input, one can impose a
linear Hamiltonian term consistent with the classical equations of
motion. This yields the usual term :math:`i(a \alpha^* - a^\dagger \alpha)`.
To obtain the form :math:`A = i \Omega - \frac{1}{2} C^\dagger C` with
:math:`Omega` Hermitian, we notice :math:`A` can be split into Hermitian
and anti-Hermitian parts. The anti-Hermitian part of A describes the
closed dynamics only and the Hermitian part corresponds to the decay
terms due to the coupling to the environment at the input/output ports.
Args:
Omega (complex-valued matrix):
Describes the Hamiltonian of the system.
eps (optional[float]):
Cutoff for the significance of a particular term.
Note:
Omega = -1j*A <--- full dynamics (not necessarily Hermitian)
Omega = (A-A.H)/(2j) <--- closed dynamics only (Hermitian part of above)
Returns:
Expression (sympy expression):
A symbolic expression for the full Hamiltonian.
'''
H_nonlin = self.make_nonlin_H(eps)
H_lin = self.make_lin_H(self.Omega)
self.H = H_lin + H_nonlin * self.nonlin_coeff
# self.H = normal_order((self.H).expand())
return self.H
def make_dict_H_lin(self,Omega):
r''' Using the current information about the modes and
chi_nonlinearities, generate a dictioanry mapping
'''
self.Hamiltonian_dict_lin = {}
for i in range(self.m):
for j in range(self.m):
combination = (i,j)
pm_arr = (+1,-1) ## all terms have form a_i.dag()*a_j
self.Hamiltonian_dict_lin[(combination,pm_arr)] = Omega[i,j]
def make_dict_H_nonlin(self,filtering_phase_weights=False,eps=1e-5):
r''' Using the current information about the modes and
chi_nonlinearities, generate a dictioanry mapping
'''
self.Hamiltonian_dict_nonlin = {}
for chi in self.chi_nonlinearities:
weight_keys = self.make_weight_keys(chi)
phase_matching_weights = self.make_phase_matching_weights(
weight_keys,chi,filtering_phase_weights,eps)
for combination,pm_arr in phase_matching_weights:
omegas_to_use = map(lambda i: self.omegas[i],combination)
omegas_with_sign = [omega * pm for omega,pm
in zip(omegas_to_use,pm_arr)]
pols = map(lambda i: self.polarizations[i],chi.delay_indices)
chi_args = omegas_with_sign + pols
self.Hamiltonian_dict_nonlin.setdefault( (combination,pm_arr),0.)
self.Hamiltonian_dict_nonlin[(combination,pm_arr)] += (
chi.chi_function(*chi_args) *
phase_matching_weights[combination,pm_arr] *
np.prod([self.E_field_weights[i] for i in combination]) )
def move_to_rotating_frame(self, freqs = 0.,include_time_terms = True):
r'''Moves the symbolic Hamiltonian to a rotating frame
We apply a change of basis :math:`a_j \to a e^{- i \omega_j}` for
each mode :math:`a_j`. This method modifies the symbolic Hamiltonian,
so to use it the Hamiltonian sould already be constructed and stored.
Args:
freqs (optional [real number or list/tuple]):
Frequency or list
of frequencies to use to displace the Hamiltonian.
include_time_terms (optional [boolean]):
If this is set to true,
we include the terms :math:`e^{- i \omega_j}` in the Hamiltonian
resulting from a change of basis. This can be set to False if all
such terms have already been eliminated (i.e. if the rotating wave
approximation has been applied).
TODO: replace the sine and cosine stuff with something nicer.
Maybe utilize the _get_real_imag_func method in Time_Delay_Network.
'''
if type(freqs) in [float,long,int]:
if freqs == 0.:
return
else:
self.move_to_rotating_frame([freqs]*self.m)
elif type(freqs) in [list,tuple]:
for op,freq in zip(self.a,freqs):
self.H -= freq * self.Dagger(op)* op
self.H = (self.H).expand()
if include_time_terms:
for op,freq in zip(self.a,freqs):
self.H = (self.H).subs({
self.Dagger(op) : self.Dagger(op)*( sp.cos(freq * self.t)
+ sp.I * sp.sin(freq * self.t) ),
op : op * ( sp.cos(freq * self.t)
- sp.I * sp.sin(freq * self.t) ),
})
### Sympy has issues with the complex exponential...
# self.H = (self.H).subs({
# self.Dagger(op) : self.Dagger(op)*sp.exp(sp.I * freq * self.t),
# op : op*sp.exp(-sp.I * freq * self.t),
# })
###
self.H = (self.H).expand()
else:
print "freqs should be a real number or list of real numbers."
return
def make_eq_motion(self,):
r'''Input is a tuple or list, output is a matrix vector.
This generates Hamilton's equations of motion for a and a^H.
These equations are CLASSICAL equations of motion. This means
we replace the operators with c-numbers. The orde r of the operators
will yield different results, so we assume the Hamiltonian is already
in the desired order (e.g. normally ordered).
These equations of motion will not show effects of squeezing. To do
this, we will need a full quantum picture.
Returns:
Equations of Motion (function):
A function that yields the Hamiltonian equations of motion based on
the Hamiltonian given. The equations of motion map
:math:`(t,a) \to v`. where \math:`t` is a scalar corresponding to
time, :math:`a` is an array of inputs correpsonding to the internal
degrees of freedom, and :math:`v` is a complex-valued column matrix
describing the gradient.
'''
if self.using_qnet_symbols:
print "Warning: The Hamiltonian should be regular c-numbers!"
print "Returning None"
return None
## c-numbers
b = [sp.symbols('b'+str(i)) for i in range(self.m)]
b_conj = [sp.symbols('b_H'+str(i)) for i in range(self.m)]
D_to_c_numbers = {self.a[i] : b[i] for i in range(self.m)}
D_to_c_numbers.update({self.Dagger(self.a[i]) : b_conj[i] for
i in range(self.m)})
H_conj = self.Dagger(self.H)
H_c_numbers = self.H.subs(D_to_c_numbers)
H_conj_c_numbers = H_conj.subs(D_to_c_numbers) ## don't have to copy
## classical equations of motion
diff_ls = ([1j*sp.diff(H_c_numbers,var) for var in b_conj ] +
[-1j*sp.diff(H_conj_c_numbers,var) for var in b])
fs = ([sp.lambdify( (self.t, b + b_conj), expression)
for expression in diff_ls ])
return lambda t,arr: (np.asmatrix([ sp.N ( f(t,arr) )
for f in fs], dtype = 'complex128' )).T
#### In future implementations, we can use theano to make calculations faster
#### right now however theano does not have good support for complex numbers.
###f = theano_function([self.t]+b+b_H, diff_ls) ## f(t,b[0],b[1],...)
###F = lambda t,args: np.asarray(f(t,*args)) ## F(t,(b[0],b[1],...))
#return F
| 33,044 | 38.527512 | 142 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/__init__.py | import Roots
import Potapov
import Time_Delay_Network
import Time_Sims
import functions
import tests
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
def contour_plot(Mat):
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(abs(Mat), interpolation='nearest')
fig.colorbar(cax)
if __name__ == "__main__" and False:
Ex = Time_Delay_Network.Example3(r1 = 0.999, r3 = 0.999)
Ex.run_Potapov()
E = Ex.E
roots = Ex.roots
M1 = Ex.M1
delays = Ex.delays
modes = functions.spatial_modes(roots,M1,E)
Mat = functions.make_normalized_inner_product_matrix(roots,modes,delays)
contour_plot(Mat)
print Ex.max_freq
print Ex.max_linewidth
print roots
| 745 | 18.631579 | 76 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/Potapov.py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 13:59:30 2015
@author: Gil Tabak
@title: Potapov
The code in this file implements the procedure for finding Blaschke-Potapov
products to approximate given functions near poles.
Please see section 6.2 in our manuscript for details: http://arxiv.org/abs/1510.08942
(to be published in EPJ QT).
"""
import numpy as np
import functions as f
import matplotlib.pyplot as plt
import numpy.linalg as la
def plot(L,dx,func,(i,j),*args):
'''A nice function for plotting components of matrix-valued functions.
Args:
L (float):
length along which to plot.
dx (float):
step length to take.
func (function):
complex matrix-valued function.
i,j (tuple of ints):
coordinate to plot.
args (functions):
Desired transformations on the inputs.
'''
x = np.linspace(-L,L,2.*L/dx)
for arg in args:
plt.plot(x,[func(arg(x_el*1j)[i,j]) for x_el in x ])
return
def Potapov_prod(z,poles,vecs,N):
r'''
Takes a transfer function T(z) that outputs numpy matrices for imaginary
:math:`z = i \omega` and the desired poles that characterize the modes.
Returns the Potapov product as a function approximating the original
transfer function.
Args:
z (complex number):
value where product is evaluated.
poles (list of complex numbers):
The poles of the Potapov product.
vecs (list of complex-valued matrices):
The eigenvectors corresponding to
the orthogonal projectors of the Potapov product.
N (int):
Dimensionality of the the range.
Returns:
(matrix):
Complex-valued matrix of size :math:`N \times N`.
'''
R = np.asmatrix(np.eye(N))
for pole_i,vec in zip(poles,vecs):
Pi = vec*vec.H
R = R*(np.eye(N) - Pi + Pi * ( z + pole_i.conjugate() )/( z - pole_i) )
return R
def get_Potapov_vecs(T,poles):
'''
Given a transfer function T and some poles, compute the residues about the
poles and generate the eigenvectors to use for constructing the projectors
in the Blaschke-Potapov factorization.
'''
N = T(0).shape[0]
found_vecs = []
for pole in poles:
L = (la.inv(Potapov_prod(pole,poles,found_vecs,N)) *
f.limit(lambda z: (z-pole)*T(z),pole) ) ## Current bottleneck O(n^2).
[eigvals,eigvecs] = la.eig(L)
index = np.argmax(map(abs,eigvals))
big_vec = np.asmatrix(eigvecs[:,index])
found_vecs.append(big_vec)
return found_vecs
def get_Potapov(T,poles,found_vecs):
'''
Given a transfer function T and some poles, generate the Blaschke-Potapov
product to reconstruct or approximate T, assuming that T can be represented
by the Blaschke-Potapov product with the given poles. Also match the values
of the functions at zero.
If T is a Blaschke-Potapov function and the the given poles are the only poles,
then T will be reconstructed.
In general, there is possibly an analytic term that is not captured by using
a Blaschke-Potapov approximation.
Args:
T (matrix-valued function):
A given meromorphic function.
poles (a list of complex valued numbers):
The given poles of T.
vecs (list of complex-valued matrices):
The eigenvectors corresponding to
the orthogonal projectors of the Potapov product.
Returns:
Potapov product (matrix-valued function):
equation to T at z=0 and approximating T
using a Potapov product generated by its poles and residues.
'''
N = T(0).shape[0]
return lambda z: T(0)*Potapov_prod(0,poles,found_vecs,N).H*\
Potapov_prod(z,poles,found_vecs,N)
def prod(z,U,eigenvectors,eigenvalues):
'''
Return the Blaschke-Potapov product with the given eigenvalues and
eigenvectors and constant unitary factor U evaluated at z.
Args:
z (complex number):
where product is evaluated.
U (complex-valued matrix):
A unitary matrix.
eigenvectors(list of complex-valued matrices):
eigenvectors to use.
eigenvalues(list of complex numebrs):
eigenvalues to use.
Returns:
Product (complex-valued matrix):
The Potapov product evaluated at z.
'''
if eigenvectors==[] or eigenvalues == []:
return U
else:
vec = eigenvectors[-1]
val = eigenvalues[-1]
N = U.shape[0]
return prod(z,U,eigenvectors[:-1],eigenvalues[:-1])*\
(np.eye(N) - vec*vec.H + vec*vec.H*(z+val.conjugate())/(z-val))
def finite_transfer_function(U,eigenvectors,eigenvalues):
'''
Give a rational Blaschke-Potapov product of z with the given
eigenvalues and eigenvectors and constant unitary factor U.
Args:
U (complex-valued matrix):
A unitary matrix.
eigenvectors(list of complex-valued matrices):
eigenvectors to use.
eigenvalues(list of complex numebrs):
eigenvalues to use.
Returns:
Transfer function (function):
A function that takes a complex number and returns the Potapov product
evaluated at that number.
'''
return lambda z: prod(z,U,eigenvectors,eigenvalues)
def normalize(vec):
'''
Normalize a vector.
Args:
vec (complex-valued matrix):
A vector.
Returns:
(vector):
The normalized vector.
'''
return vec / la.norm(vec)
def estimate_D(A,B,C,T,z):
r'''
Estimate the scattering matrix S=D using the ABC matrices
the transfer function T at a frequency :math:`z = i \omega`.
Try to satisfy
:math:`T(z) = D + C(zI-A)^{-1}B`
Args:
A,B,C (matrices):
The A,B, and C matrices of the
state-space representation.
T (matrix-valued function):
The input/output function
to estimate.
z (complex number):
The location at which the scattering
matrix will be estimated.
Returns:
D (matrix):
The estimated S=D scaterring matrix based on the value of
the function T and the ABC matrices.
'''
N = np.shape(A)[0]
return T(z)+C*la.inv(A-z*np.eye(N))*B
def get_ABCD(val, vec):
'''
Make the ABCD model of a single Potapov factor given some eigenvalue
and eigenvector.
The ABCD model can be used to obtain the dynamics of a linear system.
Args:
val (complex number):
an eigenvalue.
vec (complex-valued matrix):
an eigenvector.
sym (optiona[boolean]):
Modify :math:`B` and :math:`C` so that
:math:`B = C.H`.
Returns:
[A,B,C,D] (list):
Four matrices representing the ABCD model.
'''
N = vec.shape[0]
q = np.sqrt( -(val+val.conjugate()) )
return [val*vec.H*vec, -q*vec.H, q*vec, np.eye(N)]
def get_Potapov_ABCD(poles,vecs,T=None,z=None):
'''
Combine the ABCD models for the different degrees of freedom.
Args:
val (a list of complex numbers):
given eigenvalues.
vec (a list of complex-valued matrices):
given eigenvectors.
Returns:
[A,B,C,D] (list):
Four matrices representing the ABCD model.
'''
if min(len(poles),len(vecs)) < 1:
print "Emptry list into get_Potapov_ABCD"
elif min(len(poles),len(vecs)) == 1:
return get_ABCD(poles[0],vecs[0])
else:
[A1,B1,C1,D1] = get_Potapov_ABCD(poles[1:], vecs[1:])
[A2,B2,C2,D2] = get_ABCD(poles[0],vecs[0])
O = np.zeros((A1.shape[0],A2.shape[1]))
A_first_row_block = np.hstack((A1,O))
A_second_row_block = np.hstack((B2 * C1, A2))
A = np.vstack((A_first_row_block,A_second_row_block))
B = np.vstack(( B1, B2*D1))
C = np.hstack(( D2*C1, C2))
if T != None and z != None:
D = estimate_D(A,B,C,T,z)
else:
D = D2*D1
return [A,B,C,D]
| 8,231 | 27.28866 | 85 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/Time_Sims.py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 15:52:32 2015
@author: gil
@title: Time_Sims
"""
#example for using ODE library
from scipy.integrate import ode
import numpy as np
import matplotlib.pyplot as plt
import Time_Delay_Network
import Potapov
def time_sim(Example, omega = 0., t1=150, dt=0.05, freq=None,
port_in = 0, port_out = [0,1], kind='FP',
):
'''
takes an example and simulates it up to t1 increments of dt.
freq indicates the maximum frequency where we look for modes
omega indicates the frequency of driving. omega = 0 is DC.
port_in and port_out are where the system is driven.
'''
E = Example(max_freq = freq) if freq != None else Example()
E.run_Potapov()
T,T_testing,poles,vecs = E.get_outputs()
print "number of poles is ", len(poles)
num = len(poles)
[A,B,C,D] = Potapov.get_Potapov_ABCD(poles,vecs)
y0 = np.matrix([[0]]*A.shape[1])
t0 = 0
force_func = lambda t: np.cos(omega*t)
r = ode(f).set_integrator('zvode', method='bdf')
r.set_initial_value(y0, t0).set_f_params(A,B,force_func,port_in)
Y = [C*y0+D*force_func(t0)]
while r.successful() and r.t < t1:
r.integrate(r.t+dt)
#print r.t, r.y
u = force_func(r.t)
Y.append(C*r.y+D*u)
time = np.linspace(t0,t1,len(Y))
plot_time(time,Y,port_out,port_in,num=num,kind=kind)
return
def stack_func_port(force_func,forcing_port,t,max_size):
u = np.vstack( (np.matrix([0]*forcing_port),force_func(t)) ) \
if forcing_port > 0 else np.matrix([[force_func(t)]])
u = np.vstack( (u,np.matrix([0]*(max_size-forcing_port-1)) )) \
if (max_size-forcing_port-1) > 0 else u
return u
def test_stacking():
print "u ", stack_func_port(np.sin,1)
def f(t, y, A,B, force_func,forcing_port):
u = stack_func_port(force_func,forcing_port,t,B.shape[1])
return A*np.asmatrix(y).T+B*np.asmatrix(u)
def plot_time(time,y,port_out,port_in,num=0,kind='FP',format = 'pdf'):
#plt.figure(1)
plt.figure(figsize=(9,6))
y_coords = [ [np.abs(y_el[i,port_in]) for y_el in y] for i in port_out]
plt.xlabel('time',fontsize=24)
plt.ylabel('Norm of Output',fontsize=24)
plt.title('Time domain output with '+ str(num) \
+(' Mode' if num == 1 else ' Modes'), fontsize=28 )
[plt.plot(time,y_coords[i],label='Output port '+str(i)) for i in port_out]
plt.tight_layout()
plt.rcParams['legend.numpoints'] = 1
plt.legend(loc = 5,fontsize=24)
plt.tick_params(labelsize=20)
plt.savefig(kind + str(num)+ '.' + format,format=format)
return
if __name__ == "__main__" and False:
'''
Run a single simulation of a Fabry-Perot cavity.
freq is the maximum frequency to look for poles.
Setting it to 1 only gets the pole at zero.
'''
###########################################
eg = Time_Delay_Network.Example2
kind = 'FP' ## Fabry-Perot
time_sim(eg,port_out = [1],t1=50,dt=0.0005, freq = 1.)
###########################################
'''
Run several simulations of FP
'''
# eg = Time_Delay_Network.Example2
# kind = 'FP' ## Fabry-Perot
#
# for num in xrange(5,7):
# time_sim(eg,port_out = [0,1],t1=50,freq = 1.+np.pi*num,\
# kind=kind)
# ###########################################
'''
Run a double - Fabry Perot (i.e. three mirrors)
'''
#eg = Time_Delay_Network.Example3
#kind = 'DFP' ##'double'-Fabry-Perot
#for num in xrange(105,106):
# time_sim(eg,port_out = 1,t1=10,dt=0.0001,freq = 1+np.pi*num,\
# kind=kind)
###########################################
| 3,668 | 29.831933 | 78 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/phase_matching_hash.py | import sympy as sp
import numpy as np
import scipy.constants
from sympy.utilities.autowrap import ufuncify
from scipy import interpolate
import matplotlib.pyplot as plt
from functions import timeit
from functions import make_dict_values_to_lists_of_inputs
def generate_k_func_4wv(pols=(-1,-1,-1,-1), n_symb = None):
'''
Generates two functions that should sum to zero when the phase
and frequency matching conditions are satisfied.
Args:
pols (optional[tuple]): list of polarizations for the four freqs.
n_sym (optional[function]): index of refraction as a function of nu.
Returns:
diff_func_4wv_1 (function):
a function of only two variables phi1 and phi2
diff_func_4wv_2 (function):
a function of only two variables phi1 and nu3
'''
## from http://refractiveindex.info/?shelf=main&book=LiNbO3&page=Zelmon-o
lambd,nu,nu1,nu2,nu3,nu4 = sp.symbols(
'lambda nu nu_1 nu_2 nu_3 nu_4')
l2 = lambd **2
if n_symb is None:
def n_symb(pol=1):
s = 1.
if pol == 1:
s += 2.6734 * l2 / (l2 - 0.01764)
s += 1.2290 * l2 / (l2 - 0.05914)
s += 12.614 * l2 / (l2 - 474.6)
else: # pol = -1
s += 2.9804 * l2 / (l2 - 0.02047)
s += 0.5981 * l2 / (l2 - 0.0666)
s += 8.9543 * l2 / (l2 - 416.08)
return sp.sqrt(s)
def k_symb(symbol=nu,pol=1):
'''
k is accurate for nu inputs between 6-60.
'''
return ((n_symb(pol=pol) * symbol )
.subs(lambd,scipy.constants.c / (symbol*1e7))) ## / scipy.constants.c
phi1, phi2 = sp.symbols('phi_1 phi_2')
ex1 = ( (k_symb(nu1,pol=pols[0])
+k_symb(nu2,pol=pols[1]))
.expand().subs({nu1:(phi1 + phi2)/2, nu2: (phi1-phi2)/2}) )
ex2 = -((k_symb(nu3,pol=pols[2])
+k_symb(nu4,pol=pols[3]))
.expand().subs(nu4,-phi1-nu3) )
diff_func_4wv_1 = ufuncify([phi1,phi2], ex1)
diff_func_4wv_2 = ufuncify([phi1,nu3], ex2)
def diff_func_4wv_1_ranges_checked(phi1,phi2):
nus = [phi1 + phi2, phi1 - phi2]
if any([abs(nu) < 6. or abs(nu) > 60. for nu in nus]):
return float('NaN')
else:
return diff_func_4wv_1(phi1,phi2)
def diff_func_4wv_2_ranges_checked(phi1,nu3):
if abs(phi1) < 6. or abs(phi1) > 60.:
return float('NaN')
else:
return diff_func_4wv_2(phi1,nu3)
return diff_func_4wv_1_ranges_checked, diff_func_4wv_2_ranges_checked
def eps_multiply_digitize(y,eps):
'''
Divide input by epsilong and round.
'''
return map(lambda el: int(el/eps), y)
@timeit
def make_matching_dict_hash(diff_func_4wv_1,diff_func_4wv_2,
phi1_range,phi2_range,nu3_range,eps=2e-4):
'''
Make a dictionary mapping points close to solutions of a phase-matching condition
to error values using hash tables.
Args:
phi1_range (numpy array):
list of phi1 values
phi2_range (numpy array):
list of phi2 values
nu3_range (numpy array):
list of nu3 values
eps (optional[float]):
error allowed for phase-matching condition
Returns:
Dictionary (dict):
dict mapping points to error values.
'''
phi2_indices = range(len(phi2_range))
nu3_indices = range(len(nu3_range))
matching_dict = {}
for phi1_index,phi1 in enumerate(phi1_range):
y1 = diff_func_4wv_1(phi1,phi2_range)
y2 = diff_func_4wv_2(phi1,nu3_range)
y1_rounded = eps_multiply_digitize(y1,eps)
y1_rounded_up = [ind + 1 for ind in y1_rounded]
y2_rounded = eps_multiply_digitize(y2,eps)
y2_rounded_up = [ind + 1 for ind in y2_rounded]
D1 = make_dict_values_to_lists_of_inputs(
y1_rounded+y1_rounded_up,2*phi2_indices)
D2 = make_dict_values_to_lists_of_inputs(
y2_rounded+y2_rounded_up,2*nu3_indices)
inter = set(D1.keys()) & set(D2.keys())
for el in inter:
for ind1 in D1[el]:
for ind2 in D2[el]:
err = y1[ind1]-y2[ind2]
if abs(err) < eps:
matching_dict[phi1_index,ind1,ind2] = err
return matching_dict
def make_positive_keys_chi3(pos_nus_lst,chi,eps=2e-4,pols = None, res = (1e-2,1e-2,1e-2) ):
'''
TODO: In the future, diff_func_4wv_1,diff_func_4wv_2 will be generated from
chi_nonlin.refraction_index_func in the form of a function
n_symb(pol=+1) -> sympy expression in lambd.
'''
## get the positive nus. Make a dict to the original index.
min_nu_value = min(pos_nus_lst)
max_nu_value = max(pos_nus_lst)
## phi1 = (nu1 + nu2) / 2, nu's are postiive
phi1_min = min_nu_value
phi1_max = max_nu_value
## phi2 = (nu1 - nu2) / 2, nu's are postiive
phi2_min = (max_nu_value - min_nu_value) / 2
phi2_max = (min_nu_value - max_nu_value) / 2
## nu_3 had no transformations
nu3_min = min_nu_value
nu3_max = max_nu_value
phi1_range = np.arange(phi1_min,phi1_max,res[0])
phi2_range = np.arange(phi2_min,phi2_max,res[1])
nu3_range = np.arange(nu3_min,nu3_max,res[2])
phi1_indices = range(len(phi1_range))
phi2_indices = range(len(phi2_range))
diff_func_4wv_1,diff_func_4wv_2 = generate_k_func_4wv(pols=pols)
matching_dict_hash = make_matching_dict_hash(
diff_func_4wv_1,diff_func_4wv_2,
phi1_range,phi2_range,nu3_range,eps=eps)
return matching_dict_hash
# def f_phi12_nu3(phi1,phi2,nu3):
# nus = [phi1 + phi2, phi1 - phi2, nu3]
# if any([abs(nu) < 6. or abs(nu) > 60. for nu in nus]):
# return float('NaN')
# return (diff_func_4wv_1(phi1_range[phi1],phi2_range[phi2])
# - diff_func_4wv_2(phi1_range[phi1],nu3_range[nu3]) )
if __name__ == "__main__":
eps = 2e-4
phi1_min = 30.
phi1_max = 34.
phi2_min = -13
phi2_max = -9
nu3_min = -26.
nu3_max = -16.
phi1_range = np.arange(phi1_min,phi1_max,0.01)
phi2_range = np.arange(phi2_min,phi2_max,0.01)
nu3_range = np.arange(nu3_min,nu3_max,0.01)
phi1_indices = range(len(phi1_range))
phi2_indices = range(len(phi2_range))
diff_func_4wv_1,diff_func_4wv_2 = generate_k_func_4wv(pols=(-1,-1,-1,-1))
matching_dict_hash = make_matching_dict_hash(
diff_func_4wv_1,diff_func_4wv_2,
phi1_range,phi2_range,nu3_range,eps=eps)
print len(matching_dict_hash)
| 6,627 | 31.019324 | 91 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/tests/tests_Hamiltonian_dict.py | from .. import Potapov
from .. import Roots
from .. import Time_Delay_Network
from .. import functions
from .. import Time_Sims_nonlin
from .. import Hamiltonian
from .. import phase_matching
import numpy.testing as testing
import numpy as np
import numpy.linalg as la
from scipy.integrate import ode
import scipy.constants as consts
import matplotlib.pyplot as plt
import time
def make_lin_H_from_dict(ham,Omega,eps=1e-3):
'''Make a linear Hamiltonian based on Omega.
Args:
Omega (complex-valued matrix):
A matrix that describes the Hamiltonian of the system.
Returns:
Expression (sympy expression):
A symbolic expression for the nonlinear Hamiltonian.
'''
self.make_dict_H_lin(Omega)
H_lin_sp = sp.Float(0.)
for i in range(self.m):
for j in range(self.m):
H_lin_sp += self.Dagger(self.a[i])*self.a[j] * self.Hamiltonian_dict_lin[(i,j),(+1,-1)]
return H_lin_sp
def make_nonlin_H(ham,filtering_phase_weights=False,eps=1e-5):
'''Old version of make_nonlin_H. To be used for testing.
'''
H_nonlin_sp = sp.Float(0.)
for chi in ham.chi_nonlinearities:
weight_keys = ham.make_weight_keys(chi)
phase_matching_weights = ham.make_phase_matching_weights(
weight_keys,chi,filtering_phase_weights,eps)
for combination,pm_arr in phase_matching_weights:
omegas_to_use = map(lambda i: ham.omegas[i],combination)
omegas_with_sign = [omega * pm for omega,pm
in zip(omegas_to_use,pm_arr)]
pols = map(lambda i: ham.polarizations[i],chi.delay_indices)
chi_args = omegas_with_sign + pols
H_nonlin_sp += ( ham.make_nonlin_term_sympy(combination,pm_arr) *
chi.chi_function(*chi_args) *
phase_matching_weights[combination,pm_arr] *
np.prod([ham.E_field_weights[i] for i in combination]) )
return H_nonlin_sp
def compare_dict_with_old_hams():
'''
Make the symbolic expression either directly or with a dictionary and
confirm that the results are the same.
'''
## Make a sample Time_Delay_Network, changing some parameters.
X = Time_Delay_Network.Example3(r1 = 0.7, r3 = 0.7, max_linewidth=35.,max_freq=15.)
## run the Potapov procedure.
## Setting commensurate_roots to True will tell the program to identify
## the roots utilizing the periodic structure of the roots.
X.run_Potapov(commensurate_roots = True)
## Get the roots, modes, and delays from the Time_Delay_Network.
modes = X.spatial_modes
roots = X.roots
delays = X.delays
## Generated doubled-up ABCD matrices for the passive system.
## These matrices are not doubled up
A,B,C,D = X.get_Potapov_ABCD(doubled=False)
## Generated doubled-up ABCD matrices for the passive system.
## These matrices not doubled up
A_d,B_d,C_d,D_d = X.get_Potapov_ABCD(doubled=True)
M = len(A)
## make an instance of Hamiltonian.
## The non-Hermitian part of A dictates the linear internal dynamics of the system
## the Hermitian part of A dictates the linear decay of the internal modes.
ham = Hamiltonian.Hamiltonian(roots,modes,delays,Omega=-1j*A,nonlin_coeff = 0.)
## Add a chi nonlinearity to ham.
ham.make_chi_nonlinearity(delay_indices=[0],start_nonlin=0,
length_nonlin=0.1,
chi_order=3)
lin_H_sp_from_dict = make_lin_H_from_dict(ham,Omega)
lin_H_sp = ham.make_lin_H(Omega)
expr_lin = lin_H_sp_from_dict - lin_H_sp
D = {el:1 for el in ham.a}
D.update({ham.Dagger(el):1 for el in ham.a})
expr_lin = expr_lin.subs(D)
assert (abs(expr_lin) < eps)
ham.make_dict_H_nonlin()
nonlin_H_sp_from_dict = ham.Hamiltonian_dict_nonlin
nonlin_H_sp = ham.make_nonlin_H()
expr_nonlin = lin_H_sp_from_dict - lin_H_sp
D = {el:1 for el in ham.a}
D.update({ham.Dagger(el):1 for el in ham.a})
expr_nonlin = expr_nonlin.subs(D)
assert (abs(expr_nonlin) < eps)
if __name__ == "__main__":
test_phase_matching_chi_2(plot=True)
| 4,177 | 32.693548 | 99 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/tests/tests_commensurate_root.py | from .. import Potapov
from .. import Roots
from .. import Time_Delay_Network
from .. import functions
from .. import Time_Sims_nonlin
from .. import Hamiltonian
from .. import phase_matching
import numpy.testing as testing
import numpy as np
import numpy.linalg as la
from scipy.integrate import ode
import scipy.constants as consts
import matplotlib.pyplot as plt
import time
def test_commensurate_vecs_example_3():
times = [time.clock()]
X = Time_Delay_Network.Example3(tau1 = 0.1, tau2 = 0.2,tau3 = 0.1,tau4 = 0.2,)
times.append(time.clock())
X.make_commensurate_roots([(-60000,60000)])
times.append(time.clock())
X.make_commensurate_vecs()
times.append(time.clock())
times.append(time.clock())
#print len(X.vecs)
assert(len(X.roots) == len(X.vecs))
times.append(time.clock())
X.make_T_Testing()
times.append(time.clock())
T_testing = X.T_testing
T = X.T
print abs(T(-10j)-T_testing(-10j))
print abs(T(-100j)-T_testing(-100j))
print abs(T(-200j)-T_testing(-200j))
print [times[i+1]-times[i] for i in range(len(times)-1)]
if __name__ == "__main__":
test_make_T_denom_sym_separate_delays()
test_commensurate_vecs_example_3()
| 1,218 | 26.088889 | 82 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/tests/tests_phase_matching.py | from .. import Potapov
from .. import Roots
from .. import Time_Delay_Network
from .. import functions
from .. import Time_Sims_nonlin
from .. import Hamiltonian
from .. import phase_matching
import numpy.testing as testing
import numpy as np
import numpy.linalg as la
from scipy.integrate import ode
import scipy.constants as consts
import random
import matplotlib.pyplot as plt
import time
def plot_arr(arr,name='no_name'):
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(111)
cax = ax.matshow(np.asmatrix(arr), interpolation='nearest')
fig.colorbar(cax)
plt.savefig('plot of matrix ' + name + '.pdf')
def plot_voxels(solution_containing_voxels,base,i):
voxels = np.zeros((1+pow(base,i+1),1+pow(base,i+1)))
for (i1,i2) in solution_containing_voxels[i]:
voxels[i1,i2] = 1
plot_arr(voxels, name='voxels with resolution ' + str(i) )
def test_phase_matching_chi_2(plot=False,
eps = 0.006,
starting_i = 0,
max_i = 2,
base = 10,
pols = (1,1,-1),
min_value = 6.,
max_value = 11.,
):
k_of_omega1_omega2 = phase_matching.generate_k_func(pols)
ranges = phase_matching.setup_ranges(max_i,base,min_value = min_value,max_value = max_value)
solution_containing_voxels = phase_matching.voxel_solutions(ranges,k_of_omega1_omega2,
max_i,base,starting_i,eps)
if plot:
for i in range(max_i+1):
plot_voxels(solution_containing_voxels,base,i)
def test_get_freqs_from_ham():
pols = (1,1,-1)
eps = 0.006
starting_i = 0
max_i = 2
base = 10
min_value = 6.
max_value = 20.
## The positive nu's to use.
pos_nus_lst = np.random.uniform(min_value,max_value,20000)
## assign random polarizations
polarizations = 2*np.random.randint(0,2,20000)-1
## Generate interacting triplets
positive_omega_indices = phase_matching.make_positive_keys_chi2(pos_nus_lst,None,pols = pols)
print len(positive_omega_indices)
positive_omega_indices = [indices for indices in positive_omega_indices
if all([pols[j] == polarizations[i] for j,i in enumerate(indices)]) ]
print len(positive_omega_indices)
def test_Hamiltonian_calling_make_weight_keys( pols = (1,1,-1),
eps = 0.006,
starting_i = 0,
max_i = 2,
base = 10,
min_value = 6.,
max_value = 20.):
## The positive nu's to use.
pos_nus_lst = np.random.uniform(min_value,max_value,50000)
ham = Hamiltonian.Hamiltonian([],[],[])
ham.omegas = [nu * 1e13 / (2*consts.pi) for nu in pos_nus_lst]
## assign random polarizations
ham.polarizations = 2*np.random.randint(0,2,50000)-1
## make a nonlinearity of order 2 (make_weight_keys checks for this)
ham.make_chi_nonlinearity(delay_indices=[0],start_nonlin=0,
length_nonlin=0.1,
chi_order=2)
chi = ham.chi_nonlinearities[0]
## Use the make_weight_keys() with the selected ham and chi with
## the correct key_types
weight_keys = Hamiltonian.Hamiltonian.make_weight_keys(ham, chi, key_types = 'search_voxels', pols = (1,1,-1) )
L = len(weight_keys)
print L
if L > 0:
print weight_keys[0]
else:
print "zero weight keys"
if __name__ == "__main__":
test_phase_matching_chi_2(plot=True)
test_get_freqs_from_ham()
| 3,821 | 31.389831 | 115 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/tests/tests_time_perturbations.py | from .. import Potapov
from .. import Roots
from .. import Time_Delay_Network
from .. import functions
from .. import Time_Sims_nonlin
from .. import Hamiltonian
from .. import phase_matching
import numpy.testing as testing
import numpy as np
import numpy.linalg as la
from scipy.integrate import ode
import scipy.constants as consts
import matplotlib.pyplot as plt
import time
def test_altered_delay_pert(plot=False,eps=1e-5):
r'''
We will have a method to shift the delays in the network before the
commensurate root analysis, which will be based on taking the average
Delta_delays that result from the nonlinearities over the different
frequencies. We test this here.
It also tests the corresponding perturbation in the frequencies.
We assume that the refraction_index_func and the input delays into
the Time_Delay_Network have been adjusted so that refraction_index_func
is close to zero in the desired frequency range.
There are several effects of the delays being different for different
modes. The most important one is an effective detuning for different
modes (as well as decay). There are other effects as well. The effective
mode volume will also change (this is taken into account in the
Hamitonian class). However, this is not taken into account in the Potapov
expansion because it becomes computationally difficult and the effect
will be small. This could be done in principle. The time delays in the
transfer function could be written as a function of frequency,
:math:`T = T(\omega)`.
The above function can be analytically continued to the complex plane.
Then the transfer function would be expressed
in terms of :math:`exp(-z T) = exp ( -z T (z))`.
Once this is done, the complex root-finding procedure can be applied.
The difficulty in using this approach is that the resulting functions no
longer have a periodic structure that we could identify when the delays
were commensurate.
'''
Ex = Time_Delay_Network.Example3( max_linewidth=15.,max_freq=500.)
Ex.run_Potapov(commensurate_roots=True)
modes = Ex.spatial_modes
A,B,C,D = Ex.get_Potapov_ABCD(doubled=False)
ham = Hamiltonian.Hamiltonian(Ex.roots,modes,Ex.delays,Omega=-1j*A,
nonlin_coeff = 1.)
## This nonlinearity will depend on the frequency.
chi_nonlin_test = Hamiltonian.Chi_nonlin(delay_indices=[0],start_nonlin=0,
length_nonlin=0.1*consts.c)
chi_nonlin_test.refraction_index_func = lambda freq, pol: 1. + abs(freq / (5000*np.pi))
ham.chi_nonlinearities.append(chi_nonlin_test)
## update delays, which are different becuase of the nonlinearity.
ham.make_Delta_delays()
#print ham.Delta_delays
## Perturb the roots to account for deviations in the index of refraction
## as a function of frequency.
# print ham.roots
perturb_func = Ex.get_frequency_pertub_func_z(use_ufuncify = True)
ham.perturb_roots_z(perturb_func)
# print ham.roots
print len(ham.roots)
# plt.plot(ham.omegas)
if plot:
plt.scatter(np.asarray(ham.roots).real,np.asarray(ham.roots).imag)
plt.show()
# TODO: make a function to perturb in several steps to avoid root-skipping.
def test_delay_perturbations(eps=1e-5):
'''
This funciton tests the parturbations for the delays for each frequency.
It also tests the corresponding perturbation in the frequencies.
'''
Ex = Time_Delay_Network.Example3( max_linewidth=15.,max_freq=30.)
Ex.run_Potapov(commensurate_roots=True)
modes = Ex.spatial_modes
M = len(Ex.roots)
A,B,C,D = Ex.get_Potapov_ABCD(doubled=False)
ham = Hamiltonian.Hamiltonian(Ex.roots,modes,Ex.delays,Omega=-1j*A,
nonlin_coeff = 0.)
ham.make_chi_nonlinearity(delay_indices=[0],start_nonlin=0,
length_nonlin=0.1*consts.c)
ham.make_Delta_delays()
#print ham.Delta_delays
for row in ham.Delta_delays:
for el in row:
assert(el == 0)
## Now let's make a non-trivial nonlinearity.
## turn on the nonlin_coeff
ham.nonlin_coeff = 1.
## set the index of refraction to be 2 for the nonlinearity
ham.chi_nonlinearities[0].refraction_index_func = lambda *args: 2.
ham.make_Delta_delays()
# print ham.Delta_delays
## Next, generate the perturb_func and perturb the roots
#print Ex.roots
perturb_func = Ex.get_frequency_pertub_func_z(use_ufuncify = True)
ham.perturb_roots_z(perturb_func)
if __name__ == "__main__":
test_altered_delay_pert(plot=True)
| 4,649 | 34.769231 | 91 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/tests/tests_Hamiltonian.py | from .. import Potapov
from .. import Roots
from .. import Time_Delay_Network
from .. import functions
from .. import Time_Sims_nonlin
from .. import Hamiltonian
from .. import phase_matching
import numpy.testing as testing
import numpy as np
import numpy.linalg as la
from scipy.integrate import ode
import scipy.constants as consts
import matplotlib.pyplot as plt
import time
from sympy.physics.quantum import *
from sympy.physics.quantum.boson import *
from sympy.physics.quantum.operatorordering import *
from qnet.algebra.circuit_algebra import *
def test_Hamiltonian_with_doubled_equations(eps=1e-5):
'''
This method tests various methods in Hamiltonian and Time_Sims_nonlin.
In particular, we compare the output from the classical equations of motion
that results directly from the ABCD model versus the classical Hamiltonian
equations of motion when we set the coefficient of the nonlinearity to zero.
This method will NOT test the details of the nonlinear Hamiltonian.
Args:
eps[optional(float)]: how closely each point in time along the two
tested trajectories should match.
'''
## Make a sample Time_Delay_Network, changing some parameters.
X = Time_Delay_Network.Example3(r1 = 0.7, r3 = 0.7, max_linewidth=35.,max_freq=15.)
## run the Potapov procedure.
## Setting commensurate_roots to True will tell the program to identify
## the roots utilizing the periodic structure of the roots.
X.run_Potapov(commensurate_roots = True)
## Get the roots, modes, and delays from the Time_Delay_Network.
modes = X.spatial_modes
roots = X.roots
delays = X.delays
## Generated doubled-up ABCD matrices for the passive system.
## These matrices are not doubled up
A,B,C,D = X.get_Potapov_ABCD(doubled=False)
## Generated doubled-up ABCD matrices for the passive system.
## These matrices not doubled up
A_d,B_d,C_d,D_d = X.get_Potapov_ABCD(doubled=True)
M = len(A)
## make an instance of Hamiltonian.
## The non-Hermitian part of A dictates the linear internal dynamics of the system
## the Hermitian part of A dictates the linear decay of the internal modes.
ham = Hamiltonian.Hamiltonian(roots,modes,delays,Omega=-1j*A,nonlin_coeff = 0.)
## Add a chi nonlinearity to ham.
ham.make_chi_nonlinearity(delay_indices=[0],start_nonlin=0,
length_nonlin=0.1,
chi_order=3)
H = ham.make_H()
## Make the classical equation of motion from Hamilton's equations.
eq_mot = ham.make_eq_motion()
## make a sample input function
a_in = lambda t: np.asmatrix([1.]*np.shape(D_d)[-1]).T
## find f, the system evolution function from Hamilton's equations
f = Time_Sims_nonlin.make_f(eq_mot,B_d,a_in)
## Generate the linear equations of motion from the original linear system matrices
f_lin = Time_Sims_nonlin.make_f_lin(A_d,B_d,a_in)
## Simulate the systems (both linear and nonlinear).
Y_lin = Time_Sims_nonlin.run_ODE(f_lin, a_in, C_d, D_d, 2*M, T = 15, dt = 0.01)
Y_nonlin = Time_Sims_nonlin.run_ODE(f, a_in, C_d, D_d, 2*M, T = 15, dt = 0.01)
for y_lin,y_nonlin in zip(Y_lin,Y_nonlin):
assert abs(sum(y_lin - y_nonlin)) < eps
if __name__ == "__main__":
test_Hamiltonian_with_doubled_equations()
| 3,371 | 33.762887 | 87 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/tests/tests_Time_Delay_Network.py | from .. import Potapov
from .. import Roots
from .. import Time_Delay_Network
from .. import functions
from .. import Time_Sims_nonlin
from .. import Hamiltonian
from .. import phase_matching
import numpy.testing as testing
import numpy as np
import numpy.linalg as la
from scipy.integrate import ode
import scipy.constants as consts
import matplotlib.pyplot as plt
import time
def test_example_1():
Ex = Time_Delay_Network.Example1()
Ex.run_Potapov()
E = Ex.E
roots = Ex.roots
M1 = Ex.M1
delays = Ex.delays
modes = functions.spatial_modes(roots,M1,E,delays)
assert( len(roots) == 3)
def test_example_2():
Ex = Time_Delay_Network.Example2()
Ex.run_Potapov()
E = Ex.E
roots = Ex.roots
M1 = Ex.M1
delays = Ex.delays
modes = functions.spatial_modes(roots,M1,E,delays)
assert( len(roots) == 7)
def test_example_3():
Ex = Time_Delay_Network.Example3()
Ex.run_Potapov()
E = Ex.E
roots = Ex.roots
M1 = Ex.M1
delays = Ex.delays
modes = functions.spatial_modes(roots,M1,E,delays)
assert( len(roots) == 11)
def test_example_4():
Ex = Time_Delay_Network.Example4()
Ex.run_Potapov()
E = Ex.E
roots = Ex.roots
M1 = Ex.M1
delays = Ex.delays
modes = functions.spatial_modes(roots,M1,E,delays)
assert( len(roots) == 8)
def test_commensurate_roots_example_3():
X = Time_Delay_Network.Example3()
X.make_commensurate_roots()
assert(len(X.roots) == 0)
X.make_commensurate_roots([(0,1000)])
# assert(len(X.roots) == 91)
# X.make_commensurate_roots([(0,10000)])
# assert(len(X.roots) == 931)
# X.make_commensurate_roots([(0,10000),(1e15,1e15 +10000)])
# assert(len(X.roots) == 1891)
if __name__ == "__main__":
test_example_1()
test_example_2()
test_example_3()
test_example_4()
test_commensurate_roots_example_3()
| 1,887 | 23.205128 | 63 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/tests/__init__.py | from .. import Potapov
from .. import Roots
from .. import Time_Delay_Network
from .. import functions
from .. import Time_Sims_nonlin
from .. import Hamiltonian
from .. import phase_matching
import numpy.testing as testing
import numpy as np
import numpy.linalg as la
from scipy.integrate import ode
import scipy.constants as consts
import matplotlib.pyplot as plt
import time
| 380 | 21.411765 | 33 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/tests/tests_Potapov.py | from .. import Potapov
from .. import Roots
from .. import Time_Delay_Network
from .. import functions
from .. import Time_Sims_nonlin
from .. import Hamiltonian
from .. import phase_matching
import numpy.testing as testing
import numpy as np
import numpy.linalg as la
from scipy.integrate import ode
import scipy.constants as consts
import matplotlib.pyplot as plt
import time
def test_Potapov_1(eps=1e-7):
'''
Generate a finite_transfer_function from eigenvectors and eigenvalues.
Then generate a Potapov product from the finite transfer function. These
should be analytically equal. We test to see if they are close within some
precision.
'''
vals = [1-1j,-1+1j, 2+2j]
vecs = [ Potapov.normalize(np.matrix([-5.,4j])).T, Potapov.normalize(np.matrix([1j,3.]).T),
Potapov.normalize(np.matrix([2j,7.]).T)]
T = Potapov.finite_transfer_function(np.eye(2),vecs,vals)
T_test = Potapov.get_Potapov(T,vals,vecs)
points = [0.,10j,10.,-10j,10.+10j]
assert all(np.amax(abs(T(z) - T_test(z))) < eps for z in points)
if __name__ == "__main__":
test_Potapov_1()
| 1,120 | 28.5 | 95 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/tests/tests_Roots.py | from .. import Potapov
from .. import Roots
from .. import Time_Delay_Network
from .. import functions
from .. import Time_Sims_nonlin
from .. import Hamiltonian
from .. import phase_matching
import numpy.testing as testing
import numpy as np
import numpy.linalg as la
from scipy.integrate import ode
import scipy.constants as consts
import matplotlib.pyplot as plt
import time
def get_root_bounds(roots):
x_lmt = [None,None]
y_lmt = [None,None]
for root in roots:
if x_lmt[0] is None or x_lmt[0]>root.real:
x_lmt[0] = root.real
if x_lmt[1] is None or x_lmt[1]<root.real:
x_lmt[1] = root.real
if y_lmt[0] is None or y_lmt[0]>root.imag:
y_lmt[0] = root.imag
if y_lmt[1] is None or y_lmt[1]<root.imag:
y_lmt[1] = root.imag
return x_lmt, y_lmt
def almost_equal(el1,el2,eps=1e-7):
if abs(el1 - el2) < eps:
return True
else: return False
def two_sets_almost_equal(S1,S2,eps=1e-7):
'''
Tests if two iterables have the same elements up to some tolerance eps.
Args:
S1,S2 (lists): two lists
eps (optional[float]): precision for testing each elements
Returns:
True if the two sets are equal up to eps, false otherwise
'''
if len(S1) != len(S2):
return False
ran2 = range(len(S2))
for i in range(len(S1)):
found_match = False
for j in ran2:
if almost_equal(S1[i],S2[j],eps):
found_match = True
ran2.remove(j)
break
if not found_match:
return False
return True
def test_Roots_1():
'''
Make a square of length just under 5*pi. Find the roots of sine.
'''
N=5000
f = lambda z: np.sin(z)
fp = lambda z: np.cos(z)
x_cent = 0.
y_cent = 0.
width = 5.*np.pi-1e-5
height = 5.*np.pi-1e-5
ret, retRoots = Roots.get_roots_rect(f,fp,x_cent,y_cent,width,height,N)
roots = np.asarray(retRoots)
roots_inside_boundary = Roots.inside_boundary(roots,x_cent,y_cent,width,height)
print two_sets_almost_equal(np.asarray(roots_inside_boundary)/np.pi,
[-4.,-3.,-2.,-1.,-0.,1.,2.,3.,4.] )
def test_Roots_2():
'''
Make a square of length just over 5*pi. Find the roots of sine.
'''
N=5000
f = lambda z: np.sin(z)
fp = lambda z: np.cos(z)
x_cent = 0.
y_cent = 0.
width = 5.*np.pi+1e-5
height = 5.*np.pi+1e-5
ret, retRoots = Roots.get_roots_rect(f,fp,x_cent,y_cent,width,height,N)
roots = np.asarray(retRoots)
roots_inside_boundary = Roots.inside_boundary(roots,x_cent,y_cent,width,height)
print two_sets_almost_equal(np.asarray(roots_inside_boundary)/np.pi,
[-5.,-4.,-3.,-2.,-1.,-0.,1.,2.,3.,4.,5.] )
def test_Poly_Roots(N, printRoots=False, printPolys=False, printParams=False, doubleOnWarning=False):
print "\nN=" + str(N)
coeff = []
for n in range(N):
coeff.append((n+1)*1.0+(n+1)*1.0j)
roots_numpy = np.roots(coeff)
bnds = get_root_bounds(roots_numpy)
poly = np.poly1d(coeff)
poly_diff = np.polyder(poly)
N = 5000
max_steps = 5
f = lambda z: poly(z)
fp = lambda z: poly_diff(z)
width = (bnds[0][1]-bnds[0][0])/2.
height = (bnds[1][1]-bnds[1][0])/2.
x_cent = bnds[0][0] + width
y_cent = bnds[1][0] + height
width += 0.1
height += 0.1
if printPolys:
print poly
print poly_diff
ret = -1
while ret==-1 or (doubleOnWarning and ret!=0):
# Doubling is for test purposes.
if ret & Roots.warn_imprecise_roots:
N *= 2
elif ret & Roots.warn_max_steps_exceeded:
max_steps *= 2
if printParams:
print "x_cent:" + str(x_cent)
print "y_cent:" + str(y_cent)
print "width:" + str(width)
print "height:" + str(height)
print "N:" + str(N)
print "max_steps:" + str(max_steps)
ret, roots_gil = Roots.get_roots_rect(f,fp,x_cent,y_cent,width,height,N,max_steps=max_steps,verbose=False,summary=True)
roots_gil = np.asarray(roots_gil)
roots_gil = Roots.inside_boundary(roots_gil,x_cent,y_cent,width,height)
print "\t" + str(len(roots_numpy)) + " numpy roots"
print "\t" + str(len(roots_gil)) + " gil roots"
common = 0
for root_numpy in roots_numpy:
for root_gil in roots_gil:
if almost_equal(root_numpy, root_gil,eps=1e-5):
common += 1
break
print "\t" + str(common) + " common roots"
if printRoots:
for root in sorted(roots_numpy):
print str(root) + " \t" + str(f(root))
print
for root in sorted(roots_gil):
print str(root) + " \t" + str(f(root))
def test_Roots_3(printRoots=False, printPolys=False, printParams=False, doubleOnWarning=False):
for N in range(2,51):
test_Poly_Roots(N,printRoots,printPolys,printParams,doubleOnWarning)
if __name__ == "__main__":
test_Roots_1()
test_Roots_2()
test_Roots_3()
| 5,024 | 28.733728 | 127 | py |
potapov_interpolation | potapov_interpolation-master/Potapov_Code/tests/tests_phase_matching_hash.py | from .. import Potapov
from .. import Roots
from .. import Time_Delay_Network
from .. import functions
from .. import Time_Sims_nonlin
from .. import Hamiltonian
from .. import phase_matching_hash
import numpy.testing as testing
import numpy as np
import numpy.linalg as la
from scipy.integrate import ode
import scipy.constants as consts
import random
import matplotlib.pyplot as plt
import time
def test_make_positive_keys_chi3( eps = 2e-4,
pols = (-1,-1,-1,-1),
res = (1e-1,1e-1,1e-1),
min_value = 10.,
max_value = 55.
):
pos_nus_lst = np.random.uniform(min_value,max_value,500)
## make a Hamilonian with a chi - 3.
ham = Hamiltonian.Hamiltonian([],[],[])
ham.make_chi_nonlinearity(delay_indices=[0],start_nonlin=0,
length_nonlin=0.1,
chi_order=3)
chi = ham.chi_nonlinearities[0]
matching_dict_hash = phase_matching_hash.make_positive_keys_chi3(pos_nus_lst, chi, eps=eps, pols = pols, res = res )
print len(matching_dict_hash)
return
def test_Hamiltonian_calling_make_weight_keys(eps = 2e-4,
pols = (-1,-1,-1,-1),
min_value = 10.,
max_value = 55.):
## The positive nu's to use.
pos_nus_lst = np.random.uniform(min_value,max_value,500)
ham = Hamiltonian.Hamiltonian([],[],[])
ham.omegas = [nu * 1e13 / (2*consts.pi) for nu in pos_nus_lst]
## assign random polarizations
ham.polarizations = 2*np.random.randint(0,2,500)-1
## make a nonlinearity of order 2 (make_weight_keys checks for this)
ham.make_chi_nonlinearity(delay_indices=[0],start_nonlin=0,
length_nonlin=0.1,
chi_order=3)
chi = ham.chi_nonlinearities[0]
## Use the make_weight_keys() with the selected ham and chi with
## the correct key_types
weight_keys = Hamiltonian.Hamiltonian.make_weight_keys(ham, chi, key_types = 'hash_method', pols = pols, )
L = len(weight_keys)
print L
if L > 0:
print weight_keys[0]
else:
print "zero weight keys"
return
if __name__ == "__main__":
test_phase_matching_chi_3(plot=True)
test_get_freqs_from_ham()
| 2,465 | 31.88 | 120 | py |
potapov_interpolation | potapov_interpolation-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Potapov_interpolation documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 25 15:36:39 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# from recommonmark.parser import CommonMarkParser
#
# source_parsers = {
# '.md': CommonMarkParser,
# }
source_suffix = ['.rst', '.md']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../Potapov_Code'))
import mock
MOCK_MODULES = ['numpy', 'scipy', 'matplotlib', 'matplotlib.pyplot',
'numpy.linalg','scipy.constants','scipy.integrate',
'scipy.optimize',
'numpy.testing',
'matplotlib.patches',
'qnet.algebra.circuit_algebra',
'sympy.utilities.autowrap'
#'mpmath','mpmath.libmp','mpmath.libmp.backend',
#'mpmath.libmp.libmpc','mpmath.libmp.libmpf',
#'mpmath.libmp.gammazeta',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
sys.modules.update((mod_name, mock.Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinxcontrib.napoleon',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Potapov_interpolation'
copyright = u'2016, Gil Tabak'
author = u'Gil Tabak'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Potapov_interpolationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Potapov_interpolation.tex', u'Potapov\\_interpolation Documentation',
u'Gil Tabak', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'potapov_interpolation', u'Potapov_interpolation Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Potapov_interpolation', u'Potapov_interpolation Documentation',
author, 'Potapov_interpolation', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 10,438 | 30.923547 | 85 | py |
gnn_cff | gnn_cff-main/graphconv.py | """Torch modules for graph convolutions(GCN)."""
# pylint: disable= no-member, arguments-differ, invalid-name
import torch as th
from torch import nn
from torch.nn import init
from .... import function as fn
from ....base import DGLError
from ....utils import expand_as_pair
from ....transform import reverse
from ....convert import block_to_graph
from ....heterograph import DGLBlock
class EdgeWeightNorm(nn.Module):
r"""
Description
-----------
This module normalizes positive scalar edge weights on a graph
following the form in `GCN <https://arxiv.org/abs/1609.02907>`__.
Mathematically, setting ``norm='both'`` yields the following normalization term:
.. math:
c_{ji} = (\sqrt{\sum_{k\in\mathcal{N}(j)}e_{jk}}\sqrt{\sum_{k\in\mathcal{N}(i)}e_{ki}})
And, setting ``norm='right'`` yields the following normalization term:
.. math:
c_{ji} = (\sum_{k\in\mathcal{N}(i)}}e_{ki})
where :math:`e_{ji}` is the scalar weight on the edge from node :math:`j` to node :math:`i`.
The module returns the normalized weight :math:`e_{ji} / c_{ji}`.
Parameters
----------
norm : str, optional
The normalizer as specified above. Default is `'both'`.
eps : float, optional
A small offset value in the denominator. Default is 0.
Examples
--------
>>> import dgl
>>> import numpy as np
>>> import torch as th
>>> from dgl.nn import EdgeWeightNorm, GraphConv
>>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
>>> g = dgl.add_self_loop(g)
>>> feat = th.ones(6, 10)
>>> edge_weight = th.tensor([0.5, 0.6, 0.4, 0.7, 0.9, 0.1, 1, 1, 1, 1, 1, 1])
>>> norm = EdgeWeightNorm(norm='both')
>>> norm_edge_weight = norm(g, edge_weight)
>>> conv = GraphConv(10, 2, norm='none', weight=True, bias=True)
>>> res = conv(g, feat, edge_weight=norm_edge_weight)
>>> print(res)
tensor([[-1.1849, -0.7525],
[-1.3514, -0.8582],
[-1.2384, -0.7865],
[-1.9949, -1.2669],
[-1.3658, -0.8674],
[-0.8323, -0.5286]], grad_fn=<AddBackward0>)
"""
def __init__(self, norm='both', eps=0.):
super(EdgeWeightNorm, self).__init__()
self._norm = norm
self._eps = eps
def forward(self, graph, edge_weight):
r"""
Description
-----------
Compute normalized edge weight for the GCN model.
Parameters
----------
graph : DGLGraph
The graph.
edge_weight : torch.Tensor
Unnormalized scalar weights on the edges.
The shape is expected to be :math:`(|E|)`.
Returns
-------
torch.Tensor
The normalized edge weight.
Raises
------
DGLError
Case 1:
The edge weight is multi-dimensional. Currently this module
only supports a scalar weight on each edge.
Case 2:
The edge weight has non-positive values with ``norm='both'``.
This will trigger square root and division by a non-positive number.
"""
with graph.local_scope():
if isinstance(graph, DGLBlock):
graph = block_to_graph(graph)
if len(edge_weight.shape) > 1:
raise DGLError('Currently the normalization is only defined '
'on scalar edge weight. Please customize the '
'normalization for your high-dimensional weights.')
if self._norm == 'both' and th.any(edge_weight <= 0).item():
raise DGLError('Non-positive edge weight detected with `norm="both"`. '
'This leads to square root of zero or negative values.')
dev = graph.device
graph.srcdata['_src_out_w'] = th.ones((graph.number_of_src_nodes())).float().to(dev)
graph.dstdata['_dst_in_w'] = th.ones((graph.number_of_dst_nodes())).float().to(dev)
graph.edata['_edge_w'] = edge_weight
if self._norm == 'both':
reversed_g = reverse(graph)
reversed_g.edata['_edge_w'] = edge_weight
reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight'))
degs = reversed_g.dstdata['out_weight'] + self._eps
norm = th.pow(degs, -0.5)
graph.srcdata['_src_out_w'] = norm
if self._norm != 'none':
graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight'))
degs = graph.dstdata['in_weight'] + self._eps
if self._norm == 'both':
norm = th.pow(degs, -0.5)
else:
norm = 1.0 / degs
graph.dstdata['_dst_in_w'] = norm
graph.apply_edges(lambda e: {'_norm_edge_weights': e.src['_src_out_w'] * \
e.dst['_dst_in_w'] * \
e.data['_edge_w']})
return graph.edata['_norm_edge_weights']
# # pylint: disable=W0235
# class GraphConv(nn.Module):
# r"""
#
# Description
# -----------
# Graph convolution was introduced in `GCN <https://arxiv.org/abs/1609.02907>`__
# and mathematically is defined as follows:
#
# .. math::
# h_i^{(l+1)} = \sigma(b^{(l)} + \sum_{j\in\mathcal{N}(i)}\frac{1}{c_{ji}}h_j^{(l)}W^{(l)})
#
# where :math:`\mathcal{N}(i)` is the set of neighbors of node :math:`i`,
# :math:`c_{ji}` is the product of the square root of node degrees
# (i.e., :math:`c_{ji} = \sqrt{|\mathcal{N}(j)|}\sqrt{|\mathcal{N}(i)|}`),
# and :math:`\sigma` is an activation function.
#
# If a weight tensor on each edge is provided, the weighted graph convolution is defined as:
#
# .. math::
# h_i^{(l+1)} = \sigma(b^{(l)} + \sum_{j\in\mathcal{N}(i)}\frac{e_{ji}}{c_{ji}}h_j^{(l)}W^{(l)})
#
# where :math:`e_{ji}` is the scalar weight on the edge from node :math:`j` to node :math:`i`.
# This is NOT equivalent to the weighted graph convolutional network formulation in the paper.
#
# To customize the normalization term :math:`c_{ji}`, one can first set ``norm='none'`` for
# the model, and send the pre-normalized :math:`e_{ji}` to the forward computation. We provide
# :class:`~dgl.nn.pytorch.EdgeWeightNorm` to normalize scalar edge weight following the GCN paper.
#
# Parameters
# ----------
# in_feats : int
# Input feature size; i.e, the number of dimensions of :math:`h_j^{(l)}`.
# out_feats : int
# Output feature size; i.e., the number of dimensions of :math:`h_i^{(l+1)}`.
# norm : str, optional
# How to apply the normalizer. If is `'right'`, divide the aggregated messages
# by each node's in-degrees, which is equivalent to averaging the received messages.
# If is `'none'`, no normalization is applied. Default is `'both'`,
# where the :math:`c_{ji}` in the paper is applied.
# weight : bool, optional
# If True, apply a linear layer. Otherwise, aggregating the messages
# without a weight matrix.
# bias : bool, optional
# If True, adds a learnable bias to the output. Default: ``True``.
# activation : callable activation function/layer or None, optional
# If not None, applies an activation function to the updated node features.
# Default: ``None``.
# allow_zero_in_degree : bool, optional
# If there are 0-in-degree nodes in the graph, output for those nodes will be invalid
# since no message will be passed to those nodes. This is harmful for some applications
# causing silent performance regression. This module will raise a DGLError if it detects
# 0-in-degree nodes in input graph. By setting ``True``, it will suppress the check
# and let the users handle it by themselves. Default: ``False``.
#
# Attributes
# ----------
# weight : torch.Tensor
# The learnable weight tensor.
# bias : torch.Tensor
# The learnable bias tensor.
#
# Note
# ----
# Zero in-degree nodes will lead to invalid output value. This is because no message
# will be passed to those nodes, the aggregation function will be appied on empty input.
# A common practice to avoid this is to add a self-loop for each node in the graph if
# it is homogeneous, which can be achieved by:
#
# >>> g = ... # a DGLGraph
# >>> g = dgl.add_self_loop(g)
#
# Calling ``add_self_loop`` will not work for some graphs, for example, heterogeneous graph
# since the edge type can not be decided for self_loop edges. Set ``allow_zero_in_degree``
# to ``True`` for those cases to unblock the code and handle zero-in-degree nodes manually.
# A common practise to handle this is to filter out the nodes with zero-in-degree when use
# after conv.
#
# Examples
# --------
# >>> import dgl
# >>> import numpy as np
# >>> import torch as th
# >>> from dgl.nn import GraphConv
#
# >>> # Case 1: Homogeneous graph
# >>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
# >>> g = dgl.add_self_loop(g)
# >>> feat = th.ones(6, 10)
# >>> conv = GraphConv(10, 2, norm='both', weight=True, bias=True)
# >>> res = conv(g, feat)
# >>> print(res)
# tensor([[ 1.3326, -0.2797],
# [ 1.4673, -0.3080],
# [ 1.3326, -0.2797],
# [ 1.6871, -0.3541],
# [ 1.7711, -0.3717],
# [ 1.0375, -0.2178]], grad_fn=<AddBackward0>)
# >>> # allow_zero_in_degree example
# >>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
# >>> conv = GraphConv(10, 2, norm='both', weight=True, bias=True, allow_zero_in_degree=True)
# >>> res = conv(g, feat)
# >>> print(res)
# tensor([[-0.2473, -0.4631],
# [-0.3497, -0.6549],
# [-0.3497, -0.6549],
# [-0.4221, -0.7905],
# [-0.3497, -0.6549],
# [ 0.0000, 0.0000]], grad_fn=<AddBackward0>)
#
# >>> # Case 2: Unidirectional bipartite graph
# >>> u = [0, 1, 0, 0, 1]
# >>> v = [0, 1, 2, 3, 2]
# >>> g = dgl.heterograph({('_U', '_E', '_V') : (u, v)})
# >>> u_fea = th.rand(2, 5)
# >>> v_fea = th.rand(4, 5)
# >>> conv = GraphConv(5, 2, norm='both', weight=True, bias=True)
# >>> res = conv(g, (u_fea, v_fea))
# >>> res
# tensor([[-0.2994, 0.6106],
# [-0.4482, 0.5540],
# [-0.5287, 0.8235],
# [-0.2994, 0.6106]], grad_fn=<AddBackward0>)
# """
# def __init__(self,
# in_feats,
# out_feats,
# norm='both',
# weight=True,
# bias=True,
# activation=None,
# allow_zero_in_degree=False):
# super(GraphConv, self).__init__()
# if norm not in ('none', 'both', 'right'):
# raise DGLError('Invalid norm value. Must be either "none", "both" or "right".'
# ' But got "{}".'.format(norm))
# self._in_feats = in_feats
# self._out_feats = out_feats
# self._norm = norm
# self._allow_zero_in_degree = allow_zero_in_degree
#
# if weight:
# self.weight = nn.Parameter(th.Tensor(in_feats, out_feats))
# else:
# self.register_parameter('weight', None)
#
# if bias:
# self.bias = nn.Parameter(th.Tensor(out_feats))
# else:
# self.register_parameter('bias', None)
#
# self.reset_parameters()
#
# self._activation = activation
#
# def reset_parameters(self):
# r"""
#
# Description
# -----------
# Reinitialize learnable parameters.
#
# Note
# ----
# The model parameters are initialized as in the
# `original implementation <https://github.com/tkipf/gcn/blob/master/gcn/layers.py>`__
# where the weight :math:`W^{(l)}` is initialized using Glorot uniform initialization
# and the bias is initialized to be zero.
#
# """
# if self.weight is not None:
# init.xavier_uniform_(self.weight)
# if self.bias is not None:
# init.zeros_(self.bias)
#
# def set_allow_zero_in_degree(self, set_value):
# r"""
#
# Description
# -----------
# Set allow_zero_in_degree flag.
#
# Parameters
# ----------
# set_value : bool
# The value to be set to the flag.
# """
# self._allow_zero_in_degree = set_value
#
# def forward(self, graph, feat, weight=None, edge_weight=None):
# r"""
#
# Description
# -----------
# Compute graph convolution.
#
# Parameters
# ----------
# graph : DGLGraph
# The graph.
# feat : torch.Tensor or pair of torch.Tensor
# If a torch.Tensor is given, it represents the input feature of shape
# :math:`(N, D_{in})`
# where :math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
# If a pair of torch.Tensor is given, which is the case for bipartite graph, the pair
# must contain two tensors of shape :math:`(N_{in}, D_{in_{src}})` and
# :math:`(N_{out}, D_{in_{dst}})`.
# weight : torch.Tensor, optional
# Optional external weight tensor.
# edge_weight : torch.Tensor, optional
# Optional tensor on the edge. If given, the convolution will weight
# with regard to the message.
#
# Returns
# -------
# torch.Tensor
# The output feature
#
# Raises
# ------
# DGLError
# Case 1:
# If there are 0-in-degree nodes in the input graph, it will raise DGLError
# since no message will be passed to those nodes. This will cause invalid output.
# The error can be ignored by setting ``allow_zero_in_degree`` parameter to ``True``.
#
# Case 2:
# External weight is provided while at the same time the module
# has defined its own weight parameter.
#
# Note
# ----
# * Input shape: :math:`(N, *, \text{in_feats})` where * means any number of additional
# dimensions, :math:`N` is the number of nodes.
# * Output shape: :math:`(N, *, \text{out_feats})` where all but the last dimension are
# the same shape as the input.
# * Weight shape: :math:`(\text{in_feats}, \text{out_feats})`.
# """
# with graph.local_scope():
# if not self._allow_zero_in_degree:
# if (graph.in_degrees() == 0).any():
# raise DGLError('There are 0-in-degree nodes in the graph, '
# 'output for those nodes will be invalid. '
# 'This is harmful for some applications, '
# 'causing silent performance regression. '
# 'Adding self-loop on the input graph by '
# 'calling `g = dgl.add_self_loop(g)` will resolve '
# 'the issue. Setting ``allow_zero_in_degree`` '
# 'to be `True` when constructing this module will '
# 'suppress the check and let the code run.')
# aggregate_fn = fn.copy_src('h', 'm')
# if edge_weight is not None:
# assert edge_weight.shape[0] == graph.number_of_edges()
# graph.edata['_edge_weight'] = edge_weight
# aggregate_fn = fn.u_mul_e('h', '_edge_weight', 'm')
#
# # (BarclayII) For RGCN on heterogeneous graphs we need to support GCN on bipartite.
# feat_src, feat_dst = expand_as_pair(feat, graph)
# if self._norm == 'both':
# degs = graph.out_degrees().float().clamp(min=1)
# norm = th.pow(degs, -0.5)
# shp = norm.shape + (1,) * (feat_src.dim() - 1)
# norm = th.reshape(norm, shp)
# feat_src = feat_src * norm
#
# if weight is not None:
# if self.weight is not None:
# raise DGLError('External weight is provided while at the same time the'
# ' module has defined its own weight parameter. Please'
# ' create the module with flag weight=False.')
# else:
# weight = self.weight
#
# if self._in_feats > self._out_feats:
# # mult W first to reduce the feature size for aggregation.
# if weight is not None:
# feat_src = th.matmul(feat_src, weight)
# graph.srcdata['h'] = feat_src
# graph.update_all(aggregate_fn, fn.sum(msg='m', out='h'))
# rst = graph.dstdata['h']
# else:
# # aggregate first then mult W
# graph.srcdata['h'] = feat_src
# graph.update_all(aggregate_fn, fn.sum(msg='m', out='h'))
# rst = graph.dstdata['h']
# if weight is not None:
# rst = th.matmul(rst, weight)
#
# if self._norm != 'none':
# degs = graph.in_degrees().float().clamp(min=1)
# if self._norm == 'both':
# norm = th.pow(degs, -0.5)
# else:
# norm = 1.0 / degs
# shp = norm.shape + (1,) * (feat_dst.dim() - 1)
# norm = th.reshape(norm, shp)
# rst = rst * norm
#
# if self.bias is not None:
# rst = rst + self.bias
#
# if self._activation is not None:
# rst = self._activation(rst)
#
# return rst
#
# def extra_repr(self):
# """Set the extra representation of the module,
# which will come into effect when printing the model.
# """
# summary = 'in={_in_feats}, out={_out_feats}'
# summary += ', normalization={_norm}'
# if '_activation' in self.__dict__:
# summary += ', activation={_activation}'
# return summary.format(**self.__dict__)
class GraphConv(nn.Module):
r"""
Description
-----------
Graph convolution was introduced in `GCN <https://arxiv.org/abs/1609.02907>`__
and mathematically is defined as follows:
.. math::
h_i^{(l+1)} = \sigma(b^{(l)} + \sum_{j\in\mathcal{N}(i)}\frac{1}{c_{ij}}h_j^{(l)}W^{(l)})
where :math:`\mathcal{N}(i)` is the set of neighbors of node :math:`i`,
:math:`c_{ij}` is the product of the square root of node degrees
(i.e., :math:`c_{ij} = \sqrt{|\mathcal{N}(i)|}\sqrt{|\mathcal{N}(j)|}`),
and :math:`\sigma` is an activation function.
Parameters
----------
in_feats : int
Input feature size; i.e, the number of dimensions of :math:`h_j^{(l)}`.
out_feats : int
Output feature size; i.e., the number of dimensions of :math:`h_i^{(l+1)}`.
norm : str, optional
How to apply the normalizer. If is `'right'`, divide the aggregated messages
by each node's in-degrees, which is equivalent to averaging the received messages.
If is `'none'`, no normalization is applied. Default is `'both'`,
where the :math:`c_{ij}` in the paper is applied.
weight : bool, optional
If True, apply a linear layer. Otherwise, aggregating the messages
without a weight matrix.
bias : bool, optional
If True, adds a learnable bias to the output. Default: ``True``.
activation : callable activation function/layer or None, optional
If not None, applies an activation function to the updated node features.
Default: ``None``.
allow_zero_in_degree : bool, optional
If there are 0-in-degree nodes in the graph, output for those nodes will be invalid
since no message will be passed to those nodes. This is harmful for some applications
causing silent performance regression. This module will raise a DGLError if it detects
0-in-degree nodes in input graph. By setting ``True``, it will suppress the check
and let the users handle it by themselves. Default: ``False``.
Attributes
----------
weight : torch.Tensor
The learnable weight tensor.
bias : torch.Tensor
The learnable bias tensor.
Note
----
Zero in-degree nodes will lead to invalid output value. This is because no message
will be passed to those nodes, the aggregation function will be appied on empty input.
A common practice to avoid this is to add a self-loop for each node in the graph if
it is homogeneous, which can be achieved by:
>>> g = ... # a DGLGraph
>>> g = dgl.add_self_loop(g)
Calling ``add_self_loop`` will not work for some graphs, for example, heterogeneous graph
since the edge type can not be decided for self_loop edges. Set ``allow_zero_in_degree``
to ``True`` for those cases to unblock the code and handle zere-in-degree nodes manually.
A common practise to handle this is to filter out the nodes with zere-in-degree when use
after conv.
Examples
--------
>>> import dgl
>>> import numpy as np
>>> import torch as th
>>> from dgl.nn import GraphConv
>>> # Case 1: Homogeneous graph
>>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
>>> g = dgl.add_self_loop(g)
>>> feat = th.ones(6, 10)
>>> conv = GraphConv(10, 2, norm='both', weight=True, bias=True)
>>> res = conv(g, feat)
>>> print(res)
tensor([[ 1.3326, -0.2797],
[ 1.4673, -0.3080],
[ 1.3326, -0.2797],
[ 1.6871, -0.3541],
[ 1.7711, -0.3717],
[ 1.0375, -0.2178]], grad_fn=<AddBackward0>)
>>> # allow_zero_in_degree example
>>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
>>> conv = GraphConv(10, 2, norm='both', weight=True, bias=True, allow_zero_in_degree=True)
>>> res = conv(g, feat)
>>> print(res)
tensor([[-0.2473, -0.4631],
[-0.3497, -0.6549],
[-0.3497, -0.6549],
[-0.4221, -0.7905],
[-0.3497, -0.6549],
[ 0.0000, 0.0000]], grad_fn=<AddBackward0>)
>>> # Case 2: Unidirectional bipartite graph
>>> u = [0, 1, 0, 0, 1]
>>> v = [0, 1, 2, 3, 2]
>>> g = dgl.bipartite((u, v))
>>> u_fea = th.rand(2, 5)
>>> v_fea = th.rand(4, 5)
>>> conv = GraphConv(5, 2, norm='both', weight=True, bias=True)
>>> res = conv(g, (u_fea, v_fea))
>>> res
tensor([[-0.2994, 0.6106],
[-0.4482, 0.5540],
[-0.5287, 0.8235],
[-0.2994, 0.6106]], grad_fn=<AddBackward0>)
"""
def __init__(self,
in_feats,
out_feats,
norm='both',
weight=True,
bias=True,
activation=None,
allow_zero_in_degree=False):
super(GraphConv, self).__init__()
if norm not in ('none', 'both', 'right'):
raise DGLError('Invalid norm value. Must be either "none", "both" or "right".'
' But got "{}".'.format(norm))
self._in_feats = in_feats
self._out_feats = out_feats
self._norm = norm
self._allow_zero_in_degree = allow_zero_in_degree
if weight:
self.weight = nn.Parameter(th.Tensor(in_feats, out_feats))
else:
self.register_parameter('weight', None)
if bias:
self.bias = nn.Parameter(th.Tensor(out_feats))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self._activation = activation
def reset_parameters(self):
r"""
Description
-----------
Reinitialize learnable parameters.
Note
----
The model parameters are initialized as in the
`original implementation <https://github.com/tkipf/gcn/blob/master/gcn/layers.py>`__
where the weight :math:`W^{(l)}` is initialized using Glorot uniform initialization
and the bias is initialized to be zero.
"""
if self.weight is not None:
init.xavier_uniform_(self.weight)
if self.bias is not None:
init.zeros_(self.bias)
def set_allow_zero_in_degree(self, set_value):
r"""
Description
-----------
Set allow_zero_in_degree flag.
Parameters
----------
set_value : bool
The value to be set to the flag.
"""
self._allow_zero_in_degree = set_value
def forward(self, graph, feat, eweight, weight=None):
r"""
Description
-----------
Compute graph convolution.
Parameters
----------
graph : DGLGraph
The graph.
feat : torch.Tensor or pair of torch.Tensor
If a torch.Tensor is given, it represents the input feature of shape
:math:`(N, D_{in})`
where :math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
If a pair of torch.Tensor is given, which is the case for bipartite graph, the pair
must contain two tensors of shape :math:`(N_{in}, D_{in_{src}})` and
:math:`(N_{out}, D_{in_{dst}})`.
eweight : torch.Tensor of shape (E, 1)
Edge weights, E for the number of edges.
weight : torch.Tensor, optional
Optional external weight tensor.
Returns
-------
torch.Tensor
The output feature
Raises
------
DGLError
Case 1:
If there are 0-in-degree nodes in the input graph, it will raise DGLError
since no message will be passed to those nodes. This will cause invalid output.
The error can be ignored by setting ``allow_zero_in_degree`` parameter to ``True``.
Case 2:
External weight is provided while at the same time the module
has defined its own weight parameter.
Note
----
* Input shape: :math:`(N, *, \text{in_feats})` where * means any number of additional
dimensions, :math:`N` is the number of nodes.
* Output shape: :math:`(N, *, \text{out_feats})` where all but the last dimension are
the same shape as the input.
* Weight shape: :math:`(\text{in_feats}, \text{out_feats})`.
"""
with graph.local_scope():
if not self._allow_zero_in_degree:
if (graph.in_degrees() == 0).any():
raise DGLError('There are 0-in-degree nodes in the graph, '
'output for those nodes will be invalid. '
'This is harmful for some applications, '
'causing silent performance regression. '
'Adding self-loop on the input graph by '
'calling `g = dgl.add_self_loop(g)` will resolve '
'the issue. Setting ``allow_zero_in_degree`` '
'to be `True` when constructing this module will '
'suppress the check and let the code run.')
# (BarclayII) For RGCN on heterogeneous graphs we need to support GCN on bipartite.
feat_src, feat_dst = expand_as_pair(feat, graph)
if self._norm == 'both':
degs = graph.out_degrees().float().clamp(min=1)
norm = th.pow(degs, -0.5)
shp = norm.shape + (1,) * (feat_src.dim() - 1)
norm = th.reshape(norm, shp)
feat_src = feat_src * norm
if weight is not None:
if self.weight is not None:
raise DGLError('External weight is provided while at the same time the'
' module has defined its own weight parameter. Please'
' create the module with flag weight=False.')
else:
weight = self.weight
# Set edge weights
graph.edata['w'] = eweight
if self._in_feats > self._out_feats:
# mult W first to reduce the feature size for aggregation.
if weight is not None:
feat_src = th.matmul(feat_src, weight)
graph.srcdata['h'] = feat_src
# Changed from fn.copy_src to fn.u_mul_e
graph.update_all(fn.u_mul_e(lhs_field='h', rhs_field='w', out='m'),
fn.sum(msg='m', out='h'))
rst = graph.dstdata['h']
else:
# aggregate first then mult W
graph.srcdata['h'] = feat_src
# Changed from fn.copy_src to fn.u_mul_e
graph.update_all(fn.u_mul_e(lhs_field='h', rhs_field='w', out='m'),
fn.sum(msg='m', out='h'))
rst = graph.dstdata['h']
if weight is not None:
rst = th.matmul(rst, weight)
if self._norm != 'none':
degs = graph.in_degrees().float().clamp(min=1)
if self._norm == 'both':
norm = th.pow(degs, -0.5)
else:
norm = 1.0 / degs
shp = norm.shape + (1,) * (feat_dst.dim() - 1)
norm = th.reshape(norm, shp)
rst = rst * norm
if self.bias is not None:
rst = rst + self.bias
if self._activation is not None:
rst = self._activation(rst)
return rst
def extra_repr(self):
"""Set the extra representation of the module,
which will come into effect when printing the model.
"""
summary = 'in={_in_feats}, out={_out_feats}'
summary += ', normalization={_norm}'
if '_activation' in self.__dict__:
summary += ', activation={_activation}'
return summary.format(**self.__dict__) | 30,598 | 40.072483 | 102 | py |
gnn_cff | gnn_cff-main/models/gcn.py | import numpy as np
from dgl.nn.pytorch import GraphConv
import dgl
import torch
# class GCNGraphNew(torch.nn.Module):
# def __init__(self, in_feats, h_feats):
# super(GCNGraphNew, self).__init__()
# self.conv1 = GraphConv(in_feats, h_feats)
# self.conv2 = GraphConv(h_feats, h_feats)
# self.conv3 = GraphConv(h_feats, h_feats)
# self.dense = torch.nn.Linear(h_feats, 1)
# self.maxpool = dgl.nn.pytorch.glob.MaxPooling()
# def forward(self, g, in_feat, e_weight):
# h = self.conv1(g, in_feat, e_weight)
# h = torch.nn.functional.relu(h)
# h = self.conv2(g, h, e_weight)
# h = torch.nn.functional.relu(h)
# h = self.conv3(g, h, e_weight)
# h = torch.nn.functional.relu(h)
# g.ndata['h'] = h
# h = self.maxpool(g, h) # pooling
# h = self.dense(h)
# h = torch.nn.functional.sigmoid(h)
# return h
class GCNGraph(torch.nn.Module):
def __init__(self, in_feats, h_feats):
super(GCNGraph, self).__init__()
self.conv1 = GraphConv(in_feats, h_feats)
self.conv2 = GraphConv(h_feats, h_feats)
self.conv3 = GraphConv(h_feats, h_feats)
self.dense1 = torch.nn.Linear(h_feats, 16)
self.dense2 = torch.nn.Linear(16, 8)
self.dense3 = torch.nn.Linear(8, 1)
def forward(self, g, in_feat, e_weight):
h = self.conv1(g, in_feat, e_weight)
h = torch.nn.functional.relu(h)
h = self.conv2(g, h, e_weight)
h = torch.nn.functional.relu(h)
h = self.conv3(g, h, e_weight)
h = torch.nn.functional.relu(h)
g.ndata['h'] = h
h = dgl.mean_nodes(g, 'h') # pooling
h = self.dense1(h)
h = torch.nn.functional.relu(h)
h = self.dense2(h)
h = torch.nn.functional.relu(h)
h = self.dense3(h)
h = torch.nn.functional.sigmoid(h)
return h
class GCNNodeBAShapes(torch.nn.Module):
# TODO
def __init__(self, in_feats, h_feats, num_classes, device, if_exp=False):
super(GCNNodeBAShapes, self).__init__()
self.conv1 = GraphConv(in_feats, h_feats)
self.conv2 = GraphConv(h_feats, h_feats)
self.conv3 = GraphConv(h_feats, num_classes)
self.if_exp = if_exp
self.device = device
def forward(self, g, in_feat, e_weight, target_node):
# map target node index
x = torch.cat((torch.tensor([0]).to(self.device), torch.cumsum(g.batch_num_nodes(), dim=0)), dim=0)[:-1]
target_node = target_node + x
h = self.conv1(g, in_feat, e_weight)
h = torch.nn.functional.relu(h)
h = self.conv2(g, h, e_weight)
h = torch.nn.functional.relu(h)
h = self.conv3(g, h, e_weight)
if self.if_exp: # if in the explanation mod, should add softmax layer
h = torch.nn.functional.softmax(h)
g.ndata['h'] = h
return g.ndata['h'][target_node]
class GCNNodeTreeCycles(torch.nn.Module):
# TODO
def __init__(self, in_feats, h_feats, num_classes, if_exp=False):
super(GCNNodeTreeCycles, self).__init__()
self.conv1 = GraphConv(in_feats, h_feats)
self.conv2 = GraphConv(h_feats, h_feats)
self.conv3 = GraphConv(h_feats, num_classes)
self.if_exp = if_exp
def forward(self, g, in_feat, e_weight, target_node):
# map target node index
x = torch.cat((torch.tensor([0]), torch.cumsum(g.batch_num_nodes(), dim=0)), dim=0)[:-1]
target_node = target_node + x
h = self.conv1(g, in_feat, e_weight)
h = torch.nn.functional.relu(h)
h = self.conv2(g, h, e_weight)
h = torch.nn.functional.relu(h)
h = self.conv3(g, h, e_weight)
if self.if_exp: # if in the explanation mod, should add softmax layer
h = torch.nn.functional.sigmoid(h)
g.ndata['h'] = h
return g.ndata['h'][target_node]
class GCNNodeCiteSeer(torch.nn.Module):
# TODO
def __init__(self, in_feats, h_feats, num_classes, if_exp=False):
super(GCNNodeCiteSeer, self).__init__()
self.conv1 = GraphConv(in_feats, h_feats)
self.conv2 = GraphConv(h_feats, num_classes)
self.if_exp = if_exp
def forward(self, g, in_feat, e_weight, target_node):
# map target node index
x = torch.cat((torch.tensor([0]), torch.cumsum(g.batch_num_nodes(), dim=0)), dim=0)[:-1]
target_node = target_node + x
h = self.conv1(g, in_feat, e_weight)
h = torch.nn.functional.relu(h)
h = self.conv2(g, h, e_weight)
if self.if_exp: # if in the explanation mod, should add softmax layer
h = torch.nn.functional.softmax(h)
g.ndata['h'] = h
return g.ndata['h'][target_node]
| 4,777 | 36.622047 | 112 | py |
gnn_cff | gnn_cff-main/models/explainer_models.py | from re import S
import numpy as np
import torch
import math
import tqdm
import sys
import matplotlib.pyplot as plt
import networkx as nx
from utils.common_utils import mutag_dgl_to_networkx, get_mutag_color_dict, ba_shapes_dgl_to_networkx
class GraphExplainerEdge(torch.nn.Module):
def __init__(self, base_model, G_dataset, test_indices, args, fix_exp=None):
super(GraphExplainerEdge, self).__init__()
self.base_model = base_model
self.base_model.eval()
self.G_dataset = G_dataset
self.test_indices = test_indices
self.args = args
if fix_exp:
self.fix_exp = fix_exp * 2
else:
self.fix_exp = None
def explain_nodes_gnn_stats(self):
exp_dict = {} # {'gid': masked_adj, 'gid': mask_adj}
num_dict = {} # {'gid': exp_num, 'gid': exp_num}
num=200
for gid in tqdm.tqdm(self.test_indices[:num]):
ori_pred = self.base_model(self.G_dataset.graphs[gid],
self.G_dataset.graphs[gid].ndata['feat'].float(),
self.G_dataset.graphs[gid].edata['weight'])[0, 0]
pred_label = torch.round(ori_pred)
ori_label = self.G_dataset.labels[gid]
if pred_label == 1 and ori_label == 1: # only explain why the graph is predicted as mutagenic
masked_adj, exp_num = self.explain(gid, ori_pred)
exp_dict[gid] = masked_adj
num_dict[gid] = exp_num
print('average number of exps:', sum(num_dict.values()) / len(num_dict.keys()))
PN = self.compute_pn(exp_dict)
PS = self.compute_ps(exp_dict)
acc, pre, rec, f1 = self.compute_precision_recall(exp_dict)
print('PN', PN)
print('PS', PS)
print('FNS', 2 * PN * PS / (PN + PS))
print('acc: ', acc, ' pre: ', pre, ' rec: ', rec, ' f1: ', f1)
return PN, PS, 2 * PN * PS / (PN + PS), sum(num_dict.values()) / len(num_dict.keys()), acc, pre, rec, f1
def explain(self, gid, ori_pred):
# only generate exps for the correct predictions for now (to be consistent with GNN Explainer).
explainer = ExplainModelGraph(
graph=self.G_dataset.graphs[gid],
base_model=self.base_model,
args=self.args
)
if self.args.gpu:
explainer = explainer.cuda()
# train explainer
optimizer = torch.optim.Adam(explainer.parameters(), lr=self.args.lr, weight_decay=0)
explainer.train()
for epoch in range(self.args.num_epochs):
explainer.zero_grad()
pred1, pred2 = explainer()
bpr1, bpr2, l1, loss = explainer.loss(
pred1[0, 0], pred2[0, 0], ori_pred, self.args.gam, self.args.lam, self.args.alp)
# if epoch % 200 == 0:
# print('bpr1: ', self.args.lam * self.args.alp * bpr1,
# 'bpr2:', self.args.lam * (1 - self.args.alp) * bpr2,
# 'l1', l1,
# 'loss', loss)
# print('bpr: ', 50 * bpr, 'l1', l1, 'loss', loss)
loss.backward()
optimizer.step()
masked_adj = explainer.get_masked_adj()
masked_adj = explainer.get_masked_adj()
new_edge_num = len(masked_adj[masked_adj > self.args.mask_thresh])
exp_num = new_edge_num / 2
return masked_adj, exp_num
def compute_pn(self, exp_dict):
pn_count = 0
for gid, masked_adj in exp_dict.items():
graph = self.G_dataset.graphs[gid]
if self.fix_exp:
thresh = masked_adj.flatten().sort(descending=True)[0][self.fix_exp+1]
else:
thresh = self.args.mask_thresh
ps_adj = (masked_adj > thresh).float()
pn_adj = graph.edata['weight'] - ps_adj
new_pre = self.base_model(graph, graph.ndata['feat'].float(), pn_adj)[0, 0]
if new_pre < 0.5:
pn_count += 1
pn = pn_count / len(exp_dict.keys())
return pn
def compute_ps(self, exp_dict):
ps_count = 0
for gid, masked_adj in exp_dict.items():
graph = self.G_dataset.graphs[gid]
if self.fix_exp:
thresh = masked_adj.flatten().sort(descending=True)[0][self.fix_exp+1]
else:
thresh = self.args.mask_thresh
ps_adj = (masked_adj > thresh).float()
new_pre = self.base_model(graph, graph.ndata['feat'].float(), ps_adj)[0, 0]
if new_pre > 0.5:
ps_count += 1
ps = ps_count / len(exp_dict.keys())
return ps
def compute_precision_recall(self, exp_dict):
pres = []
recalls = []
f1s = []
accs = []
for gid, masked_adj in exp_dict.items():
if self.fix_exp:
thresh = masked_adj.flatten().sort(descending=True)[0][self.fix_exp+1]
else:
thresh = self.args.mask_thresh
e_labels = self.G_dataset[gid][0].edata['label']
new_edges = [masked_adj > thresh][0].numpy()
old_edges = [self.G_dataset[gid][0].edata['weight'] > thresh][0].numpy()
int_map = map(int, new_edges)
new_edges = list(int_map)
int_map = map(int, old_edges)
old_edges = list(int_map)
exp_list = np.array(new_edges)
TP = 0
FP = 0
TN = 0
FN = 0
for i in range(len(e_labels)):
if exp_list[i] == 1:
if e_labels[i] == 1:
TP += 1
else:
FP += 1
else:
if e_labels[i] == 1:
FN += 1
else:
TN += 1
if TP != 0:
pre = TP / (TP + FP)
rec = TP / (TP + FN)
acc = (TP + TN) / (TP + FP + TN + FN)
f1 = 2 * pre * rec / (pre + rec)
else:
pre = 0
rec = 0
f1 = 0
acc = (TP + TN) / (TP + FP + TN + FN)
pres.append(pre)
recalls.append(rec)
f1s.append(f1)
accs.append(acc)
return np.mean(accs), np.mean(pres), np.mean(recalls), np.mean(f1s)
class ExplainModelGraph(torch.nn.Module):
def __init__(self, graph, base_model, args):
super(ExplainModelGraph, self).__init__()
self.graph = graph
self.num_nodes = len(self.graph.nodes())
self.base_model = base_model
self.args = args
self.adj_mask = self.construct_adj_mask()
# For masking diagonal entries
self.diag_mask = torch.ones(self.num_nodes, self.num_nodes) - torch.eye(self.num_nodes)
if self.args.gpu:
self.diag_mask = self.diag_mask.cuda()
def forward(self):
masked_adj = self.get_masked_adj()
# should be reversed in the future
pred1 = self.base_model(self.graph, self.graph.ndata['feat'].float(), masked_adj) # factual
pred2 = self.base_model(self.graph, self.graph.ndata['feat'].float(), self.graph.edata['weight'] - masked_adj) # counterfactual
return pred1, pred2
def loss(self, pred1, pred2, ori_pred, gam, lam, alp):
relu = torch.nn.ReLU()
bpr1 = relu(gam + 0.5 - pred1) # factual
bpr2 = relu(gam + pred2 - 0.5) # counterfactual
masked_adj = self.get_masked_adj()
L1 = torch.linalg.norm(masked_adj, ord=1)
loss = L1 + lam * (alp * bpr1 + (1 - alp) * bpr2)
return bpr1, bpr2, L1, loss
def construct_adj_mask(self):
mask = torch.nn.Parameter(torch.FloatTensor(self.num_nodes, self.num_nodes))
std = torch.nn.init.calculate_gain("relu") * math.sqrt(
2.0 / (self.num_nodes + self.num_nodes)
)
with torch.no_grad():
mask.normal_(1.0, std)
return mask
def get_masked_adj(self):
sym_mask = torch.sigmoid(self.adj_mask)
sym_mask = (sym_mask + sym_mask.t()) / 2
adj = self.graph.edata['weight']
flatten_sym_mask = torch.reshape(sym_mask, (-1, ))
masked_adj = adj * flatten_sym_mask
# masked_adj = masked_adj * self.diag_mask
''
return masked_adj
class NodeExplainerEdgeMulti(torch.nn.Module):
def __init__(self, base_model, G_dataset, test_indices, args, fix_exp=None):
super(NodeExplainerEdgeMulti, self).__init__()
self.base_model = base_model
self.base_model.eval()
self.G_dataset = G_dataset
self.test_indices = test_indices
self.args = args
if fix_exp:
self.fix_exp = fix_exp * 2
else:
self.fix_exp = None
def explain_nodes_gnn_stats(self):
exp_dict = {} # {'gid': masked_adj, 'gid': mask_adj}
num_dict = {} # {'gid': exp_num, 'gid': exp_num}
pred_label_dict = {}
t_gid = []
for gid in tqdm.tqdm(self.test_indices):
ori_pred = self.base_model(self.G_dataset.graphs[gid],
self.G_dataset.graphs[gid].ndata['feat'].float(),
self.G_dataset.graphs[gid].edata['weight'], self.G_dataset.targets[gid])[0]
ori_pred_label = torch.argmax(ori_pred)
if self.args.dataset == 'citeseer':
ori_label = self.G_dataset.labels[gid]
else:
ori_label = torch.argmax(self.G_dataset.labels[gid])
if self.args.dataset == 'citeseer' or (ori_pred_label != 0 and ori_label != 0):
t_gid.append(gid)
masked_adj, exp_num = self.explain(gid, ori_pred_label)
exp_dict[gid] = masked_adj
num_dict[gid] = exp_num
pred_label_dict[gid] = ori_pred_label
print('average number of exps:', sum(num_dict.values()) / len(num_dict.keys()))
PN = self.compute_pn(exp_dict, pred_label_dict)
PS = self.compute_ps(exp_dict, pred_label_dict)
if self.args.dataset == 'citeseer':
acc = -1
pre = -1
rec = -1
f1 = -1
else:
acc, pre, rec, f1 = self.compute_precision_recall(exp_dict)
print('PN', PN)
print('PS', PS)
print('PNS', 2 * PN * PS / (PN + PS))
print('ave exp', sum(num_dict.values()) / len(num_dict.keys()))
print('acc: ', acc, ' pre: ', pre, ' rec: ', rec, ' f1: ', f1)
return PN, PS, 2 * PN * PS / (PN + PS), sum(num_dict.values()) / len(num_dict.keys()), acc, pre, rec, f1
def explain(self, gid, pred_label):
explainer = ExplainModelNodeMulti(
graph=self.G_dataset.graphs[gid],
base_model=self.base_model,
target_node=self.G_dataset.targets[gid],
args=self.args
)
if self.args.gpu:
explainer = explainer.cuda()
optimizer = torch.optim.Adam(explainer.parameters(), lr=self.args.lr, weight_decay=0)
explainer.train()
for epoch in range(self.args.num_epochs):
explainer.zero_grad()
pred1, pred2 = explainer()
bpr1, bpr2, l1, loss = explainer.loss(
pred1[0], pred2[0], pred_label, self.args.gam, self.args.lam, self.args.alp)
# if epoch % 201 == 0:
# print('bpr1: ', self.args.lam * self.args.alp * bpr1,
# 'bpr2:', self.args.lam * (1 - self.args.alp) * bpr2,
# 'l1', l1,
# 'loss', loss)
loss.backward()
optimizer.step()
masked_adj = explainer.get_masked_adj()
new_edge_num = len(masked_adj[masked_adj > self.args.mask_thresh])
exp_num = new_edge_num / 2
return masked_adj, exp_num
def compute_pn(self, exp_dict, pred_label_dict):
pn_count = 0
for gid, masked_adj in exp_dict.items():
graph = self.G_dataset.graphs[gid]
target = self.G_dataset.targets[gid]
ori_pred_label = pred_label_dict[gid]
if self.fix_exp:
if self.fix_exp > (len(masked_adj.flatten()) - 1):
thresh = masked_adj.flatten().sort(descending=True)[0][len(masked_adj.flatten()) - 1]
else:
thresh = masked_adj.flatten().sort(descending=True)[0][self.fix_exp + 1]
else:
thresh = self.args.mask_thresh
ps_adj = (masked_adj > thresh).float()
pn_adj = graph.edata['weight'] - ps_adj
new_pre = self.base_model(graph, graph.ndata['feat'].float(), pn_adj, target)[0]
new_label = torch.argmax(new_pre)
if new_label != ori_pred_label:
pn_count += 1
pn = pn_count / len(exp_dict.keys())
return pn
def compute_ps(self, exp_dict, pred_label_dict):
ps_count = 0
for gid, masked_adj in exp_dict.items():
graph = self.G_dataset.graphs[gid]
target = self.G_dataset.targets[gid]
ori_pred_label = pred_label_dict[gid]
if self.fix_exp:
if self.fix_exp > (len(masked_adj.flatten()) - 1):
thresh = masked_adj.flatten().sort(descending=True)[0][len(masked_adj.flatten()) - 1]
else:
thresh = masked_adj.flatten().sort(descending=True)[0][self.fix_exp + 1]
else:
thresh = self.args.mask_thresh
ps_adj = (masked_adj > thresh).float()
new_pre = self.base_model(graph, graph.ndata['feat'].float(), ps_adj, target)[0]
new_label = torch.argmax(new_pre)
if new_label == ori_pred_label:
ps_count += 1
ps = ps_count / len(exp_dict.keys())
return ps
def compute_precision_recall(self, exp_dict):
pres = []
recalls = []
f1s = []
accs = []
for gid, masked_adj in exp_dict.items():
if self.fix_exp:
if self.fix_exp > (len(masked_adj.flatten()) - 1):
thresh = masked_adj.flatten().sort(descending=True)[0][len(masked_adj.flatten()) - 1]
else:
thresh = masked_adj.flatten().sort(descending=True)[0][self.fix_exp + 1]
else:
thresh = self.args.mask_thresh
e_labels = self.G_dataset[gid][0].edata['gt']
new_edges = [masked_adj > thresh][0].numpy()
old_edges = [self.G_dataset[gid][0].edata['weight'] > thresh][0].numpy()
int_map = map(int, new_edges)
new_edges = list(int_map)
int_map = map(int, old_edges)
old_edges = list(int_map)
exp_list = np.array(new_edges)
TP = 0
FP = 0
TN = 0
FN = 0
for i in range(len(e_labels)):
if exp_list[i] == 1:
if e_labels[i] == 1:
TP += 1
else:
FP += 1
else:
if e_labels[i] == 1:
FN += 1
else:
TN += 1
# print('TP', TP, 'FP', FP, 'TN', TN, 'FN', FN)
if TP != 0:
pre = TP / (TP + FP)
rec = TP / (TP + FN)
acc = (TP + TN) / (TP + FP + TN + FN)
f1 = 2 * pre * rec / (pre + rec)
else:
pre = 0
rec = 0
f1 = 0
acc = (TP + TN) / (TP + FP + TN + FN)
pres.append(pre)
recalls.append(rec)
f1s.append(f1)
accs.append(acc)
return np.mean(accs), np.mean(pres), np.mean(recalls), np.mean(f1s)
class ExplainModelNodeMulti(torch.nn.Module):
"""
explain BA-shapes and CiteSeer
"""
def __init__(self, graph, base_model, target_node, args):
super(ExplainModelNodeMulti, self).__init__()
self.graph = graph
self.num_nodes = len(self.graph.nodes())
self.base_model = base_model
self.target_node = target_node
self.args = args
self.adj_mask = self.construct_adj_mask()
# For masking diagonal entries
self.diag_mask = torch.ones(self.num_nodes, self.num_nodes) - torch.eye(self.num_nodes)
if self.args.gpu:
self.diag_mask = self.diag_mask.cuda()
def forward(self):
masked_adj = self.get_masked_adj()
pred1 = self.base_model(self.graph, self.graph.ndata['feat'].float(),
masked_adj, self.target_node)
pred2 = self.base_model(self.graph, self.graph.ndata['feat'].float(),
self.graph.edata['weight'] - masked_adj,
self.target_node)
return pred1, pred2
def loss(self, pred1, pred2, pred_label, gam, lam, alp):
relu = torch.nn.ReLU()
f_next = torch.max(torch.cat((pred1[:pred_label],
pred1[pred_label+1:])))
cf_next = torch.max(torch.cat((pred2[:pred_label],
pred2[pred_label+1:])))
bpr1 = relu(gam + f_next - pred1[pred_label])
bpr2 = relu(gam + pred2[pred_label] - cf_next)
masked_adj = self.get_masked_adj()
L1 = torch.linalg.norm(masked_adj, ord=1)
loss = L1 + lam * (alp * bpr1 + (1 - alp) * bpr2)
return bpr1, bpr2, L1, loss
def construct_adj_mask(self):
mask = torch.nn.Parameter(torch.FloatTensor(self.num_nodes, self.num_nodes))
std = torch.nn.init.calculate_gain("relu") * math.sqrt(
2.0 / (self.num_nodes + self.num_nodes)
)
with torch.no_grad():
mask.normal_(1.0, std)
return mask
def get_masked_adj(self):
sym_mask = torch.sigmoid(self.adj_mask)
sym_mask = (sym_mask + sym_mask.t()) / 2
adj = self.graph.edata['weight']
flatten_sym_mask = torch.reshape(sym_mask, (-1, ))
masked_adj = adj * flatten_sym_mask
''
return masked_adj
class NodeExplainerFeatureMulti(torch.nn.Module):
def __init__(self, base_model, G_dataset, test_indices, args, fix_exp=None):
super(NodeExplainerFeatureMulti, self).__init__()
self.base_model = base_model
self.base_model.eval()
self.G_dataset = G_dataset
self.test_indices = test_indices
self.args = args
if fix_exp:
self.fix_exp = fix_exp * 2
else:
self.fix_exp = None
def explain_nodes_gnn_stats(self):
exp_dict = {} # {'gid': masked_adj, 'gid': mask_adj}
num_dict = {} # {'gid': exp_num, 'gid': exp_num}
pred_label_dict = {}
for gid in tqdm.tqdm(self.test_indices[:51]):
ori_pred = self.base_model(self.G_dataset.graphs[gid],
self.G_dataset.graphs[gid].ndata['feat'].float(),
self.G_dataset.graphs[gid].edata['weight'], self.G_dataset.targets[gid])[0]
ori_pred_label = torch.argmax(ori_pred)
if self.args.dataset == 'citeseer':
ori_label = self.G_dataset.labels[gid]
else:
ori_label = torch.argmax(self.G_dataset.labels[gid])
if self.args.dataset == 'citeseer' or (ori_pred_label != 0 and ori_label != 0): # only explain when the graph is not on the motif
print('explain gid: ', gid)
print('num of nodes: ', torch.sum(self.G_dataset[gid][0].edata['weight']))
masked_feat, exp_num = self.explain(gid, ori_pred_label)
exp_dict[gid] = masked_feat
num_dict[gid] = exp_num
pred_label_dict[gid] = ori_pred_label
print('average number of exps:', sum(num_dict.values()) / len(num_dict.keys()))
PN = self.compute_pn(exp_dict, pred_label_dict)
PS = self.compute_ps(exp_dict, pred_label_dict)
if self.args.dataset == 'citeseer':
acc = -1
pre = -1
rec = -1
f1 = -1
else:
acc, pre, rec, f1 = self.compute_precision_recall(exp_dict)
print('PN', PN)
print('PS', PS)
print('PNS', 2 * PN * PS / (PN + PS))
print('ave exp', sum(num_dict.values()) / len(num_dict.keys()))
print('acc: ', acc, ' pre: ', pre, ' rec: ', rec, ' f1: ', f1)
return PN, PS, 2 * PN * PS / (PN + PS), sum(num_dict.values()) / len(num_dict.keys()), acc, pre, rec, f1
def explain(self, gid, pred_label):
# only generate exps for the correct predictions for now (to be consistent with GNN Explainer).
explainer = ExplainModelNodeMultiFeature(
graph=self.G_dataset.graphs[gid],
base_model=self.base_model,
target_node=self.G_dataset.targets[gid],
args=self.args
)
print('ori label', self.G_dataset.labels[gid])
print('ori feat num', torch.sum(self.G_dataset.graphs[gid].ndata['feat']))
if self.args.gpu:
explainer = explainer.cuda()
# train explainer
optimizer = torch.optim.Adam(explainer.parameters(), lr=self.args.lr, weight_decay=0)
explainer.train()
for epoch in range(self.args.num_epochs):
explainer.zero_grad()
pred1, pred2 = explainer()
bpr1, bpr2, l1, loss = explainer.loss(
pred1[0], pred2[0], pred_label, self.args.gam, self.args.lam, self.args.alp)
if epoch % 200 == 0:
print('bpr1: ', self.args.lam * self.args.alp * bpr1,
'bpr2:', self.args.lam * (1 - self.args.alp) * bpr2,
'l1', l1,
'loss', loss)
loss.backward()
optimizer.step()
masked_feat = explainer.get_masked_feat()
new_feat_num = len(masked_feat[masked_feat > self.args.mask_thresh])
exp_num = new_feat_num
print('exp num', exp_num)
return masked_feat, exp_num
def compute_pn(self, exp_dict, pred_label_dict):
pn_count = 0
for gid, masked_feat in exp_dict.items():
graph = self.G_dataset.graphs[gid]
target = self.G_dataset.targets[gid]
ori_pred_label = pred_label_dict[gid]
if self.fix_exp:
thresh = masked_feat.flatten().sort(descending=True)[0][self.fix_exp+1]
else:
thresh = self.args.mask_thresh
ps_feat = (masked_feat > thresh).float()
pn_feat = graph.ndata['feat'] - ps_feat
new_pre = self.base_model(graph, pn_feat.float(), graph.edata['weight'], target)[0]
new_label = torch.argmax(new_pre)
if new_label != ori_pred_label:
pn_count += 1
pn = pn_count / len(exp_dict.keys())
return pn
def compute_ps(self, exp_dict, pred_label_dict):
ps_count = 0
for gid, masked_feat in exp_dict.items():
graph = self.G_dataset.graphs[gid]
target = self.G_dataset.targets[gid]
ori_pred_label = pred_label_dict[gid]
if self.fix_exp:
thresh = masked_feat.flatten().sort(descending=True)[0][self.fix_exp+1]
else:
thresh = self.args.mask_thresh
ps_feat = (masked_feat > thresh).float()
new_pre = self.base_model(graph, ps_feat.float(), graph.edata['weight'], target)[0]
new_label = torch.argmax(new_pre)
if new_label == ori_pred_label:
ps_count += 1
ps = ps_count / len(exp_dict.keys())
return ps
def compute_precision_recall(self, exp_dict):
pres = []
recalls = []
f1s = []
accs = []
for gid, masked_adj in exp_dict.items():
if self.fix_exp:
thresh = masked_adj.flatten().sort(descending=True)[0][self.fix_exp+1]
else:
thresh = self.args.mask_thresh
e_labels = self.G_dataset[gid][0].edata['gt']
new_edges = [masked_adj > thresh][0].numpy()
old_edges = [self.G_dataset[gid][0].edata['weight'] > thresh][0].numpy()
int_map = map(int, new_edges)
new_edges = list(int_map)
int_map = map(int, old_edges)
old_edges = list(int_map)
exp_list = np.array(new_edges)
TP = 0
FP = 0
TN = 0
FN = 0
for i in range(len(e_labels)):
if exp_list[i] == 1:
if e_labels[i] == 1:
TP += 1
else:
FP += 1
else:
if e_labels[i] == 1:
FN += 1
else:
TN += 1
if TP != 0:
pre = TP / (TP + FP)
rec = TP / (TP + FN)
acc = (TP + TN) / (TP + FP + TN + FN)
f1 = 2 * pre * rec / (pre + rec)
else:
pre = 0
rec = 0
f1 = 0
acc = (TP + TN) / (TP + FP + TN + FN)
pres.append(pre)
recalls.append(rec)
f1s.append(f1)
accs.append(acc)
return np.mean(accs), np.mean(pres), np.mean(recalls), np.mean(f1s)
class ExplainModelNodeMultiFeature(torch.nn.Module):
"""
explain BA-shapes and CiteSeer
"""
def __init__(self, graph, base_model, target_node, args):
super(ExplainModelNodeMultiFeature, self).__init__()
self.graph = graph
self.num_nodes = len(self.graph.nodes())
self.feat = self.graph.ndata['feat']
self.feat_dim = self.feat.shape[1]
self.base_model = base_model
self.target_node = target_node
self.args = args
self.feat_mask = self.construct_feat_mask()
def forward(self):
masked_feat = self.get_masked_feat() # masked adj is always the exp sub graph
pred1 = self.base_model(self.graph, masked_feat.float(),
self.graph.edata['weight'], self.target_node)
pred2 = self.base_model(self.graph, (self.feat - masked_feat).float(),
self.graph.edata['weight'],
self.target_node)
return pred1, pred2
def loss(self, pred1, pred2, pred_label, gam, lam, alp):
relu = torch.nn.ReLU()
f_next = torch.max(torch.cat((pred1[:pred_label],
pred1[pred_label+1:])))
cf_next = torch.max(torch.cat((pred2[:pred_label],
pred2[pred_label+1:])))
bpr1 = relu(gam + f_next - pred1[pred_label])
bpr2 = relu(gam + pred2[pred_label] - cf_next)
masked_feat = self.get_masked_feat()
L1 = torch.linalg.norm(masked_feat)
loss = L1 + lam * (alp * bpr1 + (1 - alp) * bpr2)
return bpr1, bpr2, L1, loss
def construct_feat_mask(self):
"""
construct mask for feature vector
:return:
"""
mask = torch.nn.Parameter(torch.FloatTensor(self.num_nodes, self.feat_dim))
std = torch.nn.init.calculate_gain("relu") * math.sqrt(
2.0 / (self.num_nodes + self.feat_dim)
)
with torch.no_grad():
mask.normal_(1.0, std)
return mask
def get_masked_feat(self):
feat_mask = torch.sigmoid(self.feat_mask)
masked_feat = self.feat * feat_mask
return masked_feat | 27,933 | 40.079412 | 142 | py |
gnn_cff | gnn_cff-main/scripts/exp_node_tree_cycles.py | import os
import numpy as np
import torch
from utils.argument import arg_parse_exp_node_tree_cycles
from models.explainer_models import NodeExplainerEdgeMulti
from models.gcn import GCNNodeTreeCycles
from utils.preprocessing.tree_cycles_preprocessing import TreeCyclesDataset
import sys
if __name__ == "__main__":
torch.manual_seed(1000)
np.random.seed(0)
np.set_printoptions(threshold=sys.maxsize)
exp_args = arg_parse_exp_node_tree_cycles()
print("argument:\n", exp_args)
model_path = exp_args.model_path
train_indices = np.load(os.path.join(model_path, 'train_indices.pickle'), allow_pickle=True)
test_indices = np.load(os.path.join(model_path, 'test_indices.pickle'), allow_pickle=True)
G_dataset = TreeCyclesDataset(load_path=os.path.join(model_path))
# targets = np.load(os.path.join(model_path, 'targets.pickle'), allow_pickle=True) # the target node to explain
graphs = G_dataset.graphs
labels = G_dataset.labels
targets = G_dataset.targets
if exp_args.gpu:
device = torch.device('cuda:%s' % exp_args.cuda)
else:
device = 'cpu'
base_model = GCNNodeTreeCycles(G_dataset.feat_dim, 32, num_classes=2, if_exp=True).to(device)
base_model.load_state_dict(torch.load(os.path.join(model_path, 'model.model')))
# fix the base model
for param in base_model.parameters():
param.requires_grad = False
# Create explainer
explainer = NodeExplainerEdgeMulti(
base_model=base_model,
G_dataset=G_dataset,
args=exp_args,
test_indices=test_indices,
# fix_exp=6
)
explainer.explain_nodes_gnn_stats()
| 1,650 | 35.688889 | 116 | py |
gnn_cff | gnn_cff-main/scripts/exp_node_ba_shapes.py | import os
import numpy as np
import torch
from utils.argument import arg_parse_exp_node_ba_shapes
from models.explainer_models import NodeExplainerEdgeMulti
from models.gcn import GCNNodeBAShapes
from utils.preprocessing.ba_shapes_preprocessing import BAShapesDataset
import sys
if __name__ == "__main__":
torch.manual_seed(0)
np.random.seed(0)
np.set_printoptions(threshold=sys.maxsize)
exp_args = arg_parse_exp_node_ba_shapes()
print("argument:\n", exp_args)
model_path = exp_args.model_path
train_indices = np.load(os.path.join(model_path, 'train_indices.pickle'), allow_pickle=True)
test_indices = np.load(os.path.join(model_path, 'test_indices.pickle'), allow_pickle=True)
G_dataset = BAShapesDataset(load_path=os.path.join(model_path))
# targets = np.load(os.path.join(model_path, 'targets.pickle'), allow_pickle=True) # the target node to explain
graphs = G_dataset.graphs
labels = G_dataset.labels
targets = G_dataset.targets
if exp_args.gpu:
device = torch.device('cuda:%s' % exp_args.cuda)
else:
device = 'cpu'
base_model = GCNNodeBAShapes(G_dataset.feat_dim, 16, num_classes=4, device=device, if_exp=True).to(device)
base_model.load_state_dict(torch.load(os.path.join(model_path, 'model.model')))
# fix the base model
for param in base_model.parameters():
param.requires_grad = False
# Create explainer
explainer = NodeExplainerEdgeMulti(
base_model=base_model,
G_dataset=G_dataset,
args=exp_args,
test_indices=test_indices,
# fix_exp=6
)
explainer.explain_nodes_gnn_stats() | 1,646 | 37.302326 | 116 | py |
gnn_cff | gnn_cff-main/scripts/train_graph_classification.py | import numpy as np
import torch
import os
import time
from pathlib import Path
from models.gcn import GCNGraph
from utils.argument import arg_parse_train_graph_mutag_0
from utils.graph_init import graph_init_real
from torch.utils.data.sampler import SubsetRandomSampler
from dgl.dataloading import GraphDataLoader
def train_graph_classification(args):
if args.gpu:
device = torch.device('cuda:%s' % args.cuda)
else:
device = 'cpu'
# device = 'cpu'
out_path = os.path.join(args.save_dir, args.dataset + "_logs")
G_dataset = graph_init_real(args.dataset)
Path(out_path).mkdir(parents=True, exist_ok=True)
num_examples = len(G_dataset)
num_train = int(num_examples * args.train_ratio)
train_indices = np.unique(np.random.choice(np.arange(num_examples), num_train, replace=False))
test_indices = np.unique(np.array([i for i in np.arange(num_examples) if i not in train_indices]))
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
train_dataloader = GraphDataLoader(
G_dataset, sampler=train_sampler, batch_size=128, drop_last=False)
test_dataloader = GraphDataLoader(
G_dataset, sampler=test_sampler, batch_size=128, drop_last=False)
model = GCNGraph(G_dataset.feat_dim, 128).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)
loss_fn = torch.nn.BCELoss()
for epoch in range(args.num_epochs):
begin = time.time()
losses = []
num_correct = 0
num_train = 0
for batched_graph, labels in train_dataloader:
batched_graph = batched_graph.to(device)
labels = labels.to(device)
optimizer.zero_grad()
pred = model(batched_graph, batched_graph.ndata['feat'].float(), batched_graph.edata['weight']).squeeze()
num_correct += ((pred >= 0.5).int() == labels).sum().item()
num_train += len(labels)
loss = loss_fn(pred, labels.float())
losses.append(loss.to('cpu').detach().numpy())
loss.backward()
optimizer.step()
print('epoch:%d' % epoch, 'loss:', np.mean(losses), 'Train accuracy:', num_correct / num_train)
print('time', time.time() - begin)
# evaluate
num_correct = 0
num_tests = 0
for batched_graph, labels in train_dataloader:
batched_graph = batched_graph.to(device)
labels = labels.to(device)
pred = model(batched_graph, batched_graph.ndata['feat'].float(), batched_graph.edata['weight']).squeeze()
num_correct += ((pred >= 0.5).int() == labels).sum().item()
num_tests += len(labels)
print('Final train accuracy:', num_correct / num_tests)
num_correct = 0
num_tests = 0
for batched_graph, labels in test_dataloader:
batched_graph = batched_graph.to(device)
labels = labels.to(device)
pred = model(batched_graph, batched_graph.ndata['feat'].float(), batched_graph.edata['weight']).squeeze()
num_correct += ((pred >= 0.5).int() == labels).sum().item()
num_tests += len(labels)
print('Test accuracy:', num_correct / num_tests)
train_indices.dump(os.path.join(out_path, 'train_indices.pickle'))
test_indices.dump(os.path.join(out_path, 'test_indices.pickle'))
G_dataset.save_(os.path.join(out_path, 'dgl_graph.bin'))
torch.save(model.state_dict(), os.path.join(out_path, 'model.model'))
return True
if __name__ == "__main__":
torch.manual_seed(0)
np.random.seed(0)
train_args = arg_parse_train_graph_mutag_0()
if train_args.gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = train_args.cuda
print("Using CUDA", train_args.cuda)
else:
print("Using CPU")
train_graph_classification(train_args)
| 3,833 | 39.787234 | 117 | py |
gnn_cff | gnn_cff-main/scripts/exp_graph.py | import os
import numpy as np
import torch
from utils.argument import arg_parse_exp_graph_mutag_0
from models.explainer_models import GraphExplainerEdge
from models.gcn import GCNGraph
from utils.preprocessing.mutag_preprocessing_0 import MutagDataset0
import sys
if __name__ == "__main__":
np.set_printoptions(threshold=sys.maxsize)
torch.manual_seed(0)
np.random.seed(0)
exp_args = arg_parse_exp_graph_mutag_0()
print("argument:\n", exp_args)
model_path = exp_args.model_path
train_indices = np.load(os.path.join(model_path, 'train_indices.pickle'), allow_pickle=True)
test_indices = np.load(os.path.join(model_path, 'test_indices.pickle'), allow_pickle=True)
G_dataset = MutagDataset0(load_path=os.path.join(model_path))
graphs = G_dataset.graphs
labels = G_dataset.labels
if exp_args.gpu:
device = torch.device('cuda:%s' % exp_args.cuda)
else:
device = 'cpu'
base_model = GCNGraph(G_dataset.feat_dim, 128).to(device)
base_model.load_state_dict(torch.load(os.path.join(model_path, 'model.model')))
# fix the base model
for param in base_model.parameters():
param.requires_grad = False
# Create explainer
explainer = GraphExplainerEdge(
base_model=base_model,
G_dataset=G_dataset,
args=exp_args,
test_indices=test_indices,
# fix_exp=15
)
explainer.explain_nodes_gnn_stats()
| 1,428 | 32.232558 | 96 | py |
gnn_cff | gnn_cff-main/scripts/train_node_classification.py | import numpy as np
import torch
import os
import time
from pathlib import Path
from models.gcn import GCNNodeBAShapes
from utils.argument import arg_parse_train_node_ba_shapes
from utils.graph_init import graph_init_real
from torch.utils.data.sampler import SubsetRandomSampler
from dgl.dataloading import GraphDataLoader
def train_node_classification(args):
if args.gpu:
device = torch.device('cuda:%s' % args.cuda)
else:
device = 'cpu'
# device = 'cpu'
out_path = os.path.join(args.save_dir, args.dataset + "_logs")
G_dataset = graph_init_real(args.dataset)
Path(out_path).mkdir(parents=True, exist_ok=True)
num_examples = len(G_dataset)
num_train = int(num_examples * args.train_ratio)
train_indices = np.unique(np.random.choice(np.arange(num_examples), num_train, replace=False))
test_indices = np.unique(np.array([i for i in np.arange(num_examples) if i not in train_indices]))
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
train_dataloader = GraphDataLoader(
G_dataset, sampler=train_sampler, batch_size=32, drop_last=False)
test_dataloader = GraphDataLoader(
G_dataset, sampler=test_sampler, batch_size=32, drop_last=False)
model = GCNNodeBAShapes(G_dataset.feat_dim, 16, num_classes=4, device=device).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)
loss_fn = torch.nn.CrossEntropyLoss()
begin = time.time()
for epoch in range(args.num_epochs):
losses = []
num_correct = 0
num_train = 0
# begin = time.time()
for batched_graph, labels, target_nodes in train_dataloader:
batched_graph = batched_graph.to(device)
labels = labels.to(device)
target_nodes = target_nodes.to(device)
optimizer.zero_grad()
pred = model(batched_graph, batched_graph.ndata['feat'].float(),
batched_graph.edata['weight'], target_nodes).squeeze()
# print(pred)
ori_int_labels = torch.argmax(labels, dim=1)
pre_int_labels = torch.argmax(pred, dim=1)
num_correct += (ori_int_labels == pre_int_labels).sum().item()
num_train += len(labels)
loss = loss_fn(pred, ori_int_labels)
losses.append(loss.to('cpu').detach().numpy())
loss.backward()
optimizer.step()
print('epoch:%d' % epoch, 'loss:', np.mean(losses), 'Train accuracy:', num_correct / num_train)
# evaluate
num_correct = 0
num_train = 0
for batched_graph, labels, target_nodes in train_dataloader:
batched_graph = batched_graph.to(device)
labels = labels.to(device)
target_nodes = target_nodes.to(device)
pred = model(batched_graph, batched_graph.ndata['feat'].float(),
batched_graph.edata['weight'], target_nodes).squeeze()
ori_int_labels = torch.argmax(labels, dim=1)
pre_int_labels = torch.argmax(pred, dim=1)
num_correct += (ori_int_labels == pre_int_labels).sum().item()
num_train += len(labels)
print('Final train accuracy:', num_correct / num_train)
num_correct = 0
num_tests = 0
for batched_graph, labels, target_nodes in test_dataloader:
batched_graph = batched_graph.to(device)
labels = labels.to(device)
target_nodes = target_nodes.to(device)
pred = model(batched_graph, batched_graph.ndata['feat'].float(),
batched_graph.edata['weight'], target_nodes).squeeze()
ori_int_labels = torch.argmax(labels, dim=1)
pre_int_labels = torch.argmax(pred, dim=1)
num_correct += (ori_int_labels == pre_int_labels).sum().item()
num_tests += len(labels)
print('Test accuracy:', num_correct / num_tests)
print('time: ', time.time() - begin)
train_indices.dump(os.path.join(out_path, 'train_indices.pickle'))
test_indices.dump(os.path.join(out_path, 'test_indices.pickle'))
G_dataset.save_(out_path)
torch.save(model.state_dict(), os.path.join(out_path, 'model.model'))
return True
if __name__ == "__main__":
torch.manual_seed(0)
np.random.seed(0)
train_args = arg_parse_train_node_ba_shapes()
if train_args.gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = train_args.cuda
print("Using CUDA", train_args.cuda)
else:
print("Using CPU")
print(train_args)
train_node_classification(train_args)
| 4,554 | 38.95614 | 104 | py |
gnn_cff | gnn_cff-main/utils/graph_init.py | import numpy as np
def graph_init_real(dataset):
if dataset == "Mutagenicity_0":
from utils.preprocessing.mutag_preprocessing_0 import mutag_preprocessing_0
G_dataset = mutag_preprocessing_0(dataset_dir="datasets/Mutagenicity_0")
elif dataset == "NCI1":
from utils.preprocessing.nci1_preprocessing import nci1_preprocessing
G_dataset = nci1_preprocessing(dataset_dir="datasets/NCI1")
elif dataset == "BA_Shapes":
from utils.preprocessing.ba_shapes_preprocessing import ba_shapes_preprocessing
G_dataset = ba_shapes_preprocessing(dataset_dir="datasets/BA_Shapes")
elif dataset == "Tree_Cycles":
from utils.preprocessing.tree_cycles_preprocessing import tree_cycles_preprocessing
G_dataset = tree_cycles_preprocessing(dataset_dir="datasets/Tree_Cycles")
elif dataset == "citeseer":
from utils.preprocessing.citeseer_preprocessing import citeseer_preprocessing
G_dataset = citeseer_preprocessing(dataset_dir="datasets/citeseer")
else:
print('Error: Dataset not known.')
exit(2)
return G_dataset | 1,117 | 47.608696 | 91 | py |
gnn_cff | gnn_cff-main/utils/argument.py | import argparse
def arg_parse_train_graph_mutag_0():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", dest="dataset", type=str, default="Mutagenicity_0",
help="choose a graph classification task")
parser.add_argument("--gpu", dest="gpu", action="store_false", help="whether to use gpu")
parser.add_argument("--cuda", dest="cuda", type=str, default='0', help="which cuda")
parser.add_argument("--weight_decay", dest="weight_decay", type=float, default='0.005', help="L2 norm to the wights")
parser.add_argument("--opt", dest="opt", type=str, default="adam", help="optimizer")
parser.add_argument("--train_ratio", dest="train_ratio", type=float, default=0.8, help="ratio of training data")
parser.add_argument("--lr", dest="lr", type=float, default=0.002, help="learning rate")
parser.add_argument("--num_epochs", dest="num_epochs", type=int, default=1000, help="number of the training epochs")
parser.add_argument("--save_dir", dest="save_dir", type=str, default="log")
return parser.parse_args()
def arg_parse_train_node_ba_shapes():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", dest="dataset", type=str, default="BA_Shapes",
help="choose a node classification task")
parser.add_argument("--gpu", dest="gpu", action="store_false", help="whether to use gpu")
parser.add_argument("--cuda", dest="cuda", type=str, default='0', help="which cuda")
parser.add_argument("--weight_decay", dest="weight_decay", type=float, default='0.005', help="L2 norm to the wights")
parser.add_argument("--opt", dest="opt", type=str, default="adam", help="optimizer")
parser.add_argument("--train_ratio", dest="train_ratio", type=float, default=0.8, help="ratio of training data")
parser.add_argument("--lr", dest="lr", type=float, default=0.001, help="learning rate")
parser.add_argument("--num_epochs", dest="num_epochs", type=int, default=3000, help="number of the training epochs")
parser.add_argument("--save_dir", dest="save_dir", type=str, default="log")
return parser.parse_args()
def arg_parse_train_node_tree_cycles():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", dest="dataset", type=str, default="Tree_Cycles",
help="choose a node classification task")
parser.add_argument("--gpu", dest="gpu", action="store_false", help="whether to use gpu")
parser.add_argument("--cuda", dest="cuda", type=str, default='0', help="which cuda")
parser.add_argument("--weight_decay", dest="weight_decay", type=float, default='0.005', help="L2 norm to the wights")
parser.add_argument("--opt", dest="opt", type=str, default="adam", help="optimizer")
parser.add_argument("--train_ratio", dest="train_ratio", type=float, default=0.8, help="ratio of training data")
parser.add_argument("--lr", dest="lr", type=float, default=0.005, help="learning rate")
parser.add_argument("--num_epochs", dest="num_epochs", type=int, default=1000, help="number of the training epochs")
parser.add_argument("--save_dir", dest="save_dir", type=str, default="log")
return parser.parse_args()
def arg_parse_exp_graph_mutag_0():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", dest="dataset", type=str, default="Mutagenicity_0",
help="choose a graph explanation task")
parser.add_argument("--model_path", dest="model_path", type=str, default="log/Mutagenicity_0_logs",
help="path to the model that need to be explained")
parser.add_argument("--gpu", dest="gpu", action="store_true", help="whether to use gpu")
parser.add_argument("--cuda", dest="cuda", type=str, default='0', help="which cuda")
parser.add_argument("--weight_decay", dest="weight_decay", type=float, default='0.005', help="L2 norm to the wights")
parser.add_argument("--opt", dest="opt", type=str, default="adam", help="optimizer")
parser.add_argument("--lr", dest="lr", type=float, default=0.05, help="learning rate")
parser.add_argument("--num_epochs", dest="num_epochs", type=int, default=500, help="number of the training epochs")
parser.add_argument("--lam", dest="lam", type=float, default=1000,
help="hyper param control the trade-off between "
"the explanation complexity and explanation strength")
parser.add_argument("--alp", dest="alp", type=float, default=0.6,
help="hyper param control factual and counterfactual, 1 is totally factual")
parser.add_argument("--gam", dest="gam", type=float, default=.5, help="margin value for bpr loss")
parser.add_argument("--mask_thresh", dest="mask_thresh", type=float, default=.5,
help="threshold to convert relaxed adj matrix to binary")
return parser.parse_args()
def arg_parse_exp_node_ba_shapes():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", dest="dataset", type=str, default="BA_Shapes",
help="choose a node explanation task")
parser.add_argument("--model_path", dest="model_path", type=str, default="log/BA_Shapes_logs",
help="path to the model that need to be explained")
parser.add_argument("--gpu", dest="gpu", action="store_true", help="whether to use gpu")
parser.add_argument("--cuda", dest="cuda", type=str, default='0', help="which cuda")
parser.add_argument("--weight_decay", dest="weight_decay", type=float, default='0.005', help="L2 norm to the wights")
parser.add_argument("--opt", dest="opt", type=str, default="adam", help="optimizer")
parser.add_argument("--lr", dest="lr", type=float, default=0.01, help="learning rate")
parser.add_argument("--num_epochs", dest="num_epochs", type=int, default=2000, help="number of the training epochs")
parser.add_argument("--lam", dest="lam", type=float, default=500,
help="hyper param control the trade-off between "
"the explanation complexity and explanation strength")
parser.add_argument("--alp", dest="alp", type=float, default=0.6,
help="hyper param control factual and counterfactual")
parser.add_argument("--gam", dest="gam", type=float, default=0.5, help="margin value for bpr loss")
parser.add_argument("--mask_thresh", dest="mask_thresh", type=float, default=.5,
help="threshold to convert relaxed adj matrix to binary")
return parser.parse_args()
def arg_parse_exp_node_tree_cycles():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", dest="dataset", type=str, default="Tree_Cycles",
help="choose a node explanation task")
parser.add_argument("--model_path", dest="model_path", type=str, default="log/Tree_Cycles_logs",
help="path to the model that need to be explained")
parser.add_argument("--gpu", dest="gpu", action="store_true", help="whether to use gpu")
parser.add_argument("--cuda", dest="cuda", type=str, default='0', help="which cuda")
parser.add_argument("--weight_decay", dest="weight_decay", type=float, default='0.005', help="L2 norm to the wights")
parser.add_argument("--opt", dest="opt", type=str, default="adam", help="optimizer")
parser.add_argument("--lr", dest="lr", type=float, default=0.05, help="learning rate")
parser.add_argument("--num_epochs", dest="num_epochs", type=int, default=500, help="number of the training epochs")
parser.add_argument("--lam", dest="lam", type=float, default=500,
help="hyper param control the trade-off between "
"the explanation complexity and explanation strength")
parser.add_argument("--alp", dest="alp", type=float, default=0.6,
help="hyper param control factual and counterfactual")
parser.add_argument("--gam", dest="gam", type=float, default=0.5, help="margin value for bpr loss")
parser.add_argument("--mask_thresh", dest="mask_thresh", type=float, default=.1,
help="threshold to convert relaxed adj matrix to binary")
return parser.parse_args() | 8,301 | 71.191304 | 121 | py |
gnn_cff | gnn_cff-main/utils/common_utils.py | import numpy as np
import networkx as nx
def mutag_dgl_to_networkx(dgl_G):
component_dict = {0: 'C', 1: 'O', 2: 'Cl', 3: 'H', 4: 'N', 5: 'F', 6: 'Br', 7: 'S',
8: 'P', 9: 'I', 10: 'Na', 11: 'K', 12: 'Li', 13: 'Ca'}
nodes = dgl_G.nodes().numpy()
edges = np.array(list(zip(dgl_G.edges()[0], dgl_G.edges()[1])))
node_labels = dgl_G.ndata['feat'].numpy()
edge_weights = dgl_G.edata['weight'].numpy()
edge_labels = dgl_G.edata['label'].numpy()
edges = edges[np.where(edge_weights > 0)]
edge_labels = edge_labels[np.where(edge_weights > 0)]
nx_G = nx.Graph()
nx_G.add_nodes_from(nodes)
# add edge with label
for eid in range(len(edges)):
nx_G.add_edge(edges[eid][0], edges[eid][1], gt=edge_labels[eid])
for node in nx_G.nodes(data=True):
node[1]['label'] = component_dict[np.where(node_labels[node[0]] == 1.0)[0][0]]
return nx_G
def get_mutag_color_dict():
mutage_color_dict = {'C': 'tab:orange', 'O': 'tab:gray', 'Cl': 'cyan', 'H': 'tab:blue', 'N': 'blue',
'F': 'green', 'Br': 'y', 'S': 'm', 'P': 'red', 'I': 'tab:green', 'Na': 'tab: purple',
'K': 'tab:brown', 'Li': 'tab:pink', 'Ca': 'tab:olive'}
return mutage_color_dict
def read_file(f_path):
"""
read graph dataset .txt files
:param f_path: the path to the .txt file
:return: read the file (as lines) and return numpy arrays.
"""
f_list = []
with open(f_path, 'r') as f:
lines = f.readlines()
for line in lines:
items = line.replace('\n', '').split(',')
f_list.append([])
for item in items:
f_list[-1].append(int(item))
return np.array(f_list).squeeze()
def read_file_citeseer(f_path):
"""
read citeseer dataset
:param f_path: the path to the .txt file
:return: read the file (as lines) and return numpy arrays.
"""
f_list = []
with open(f_path, 'r') as f:
lines = f.readlines()
for line in lines:
items = line.replace('\n', '').split('\t')
f_list.append([])
for item in items:
f_list[-1].append(item)
return np.array(f_list).squeeze()
def ba_shapes_dgl_to_networkx(dgl_G):
nodes = dgl_G.nodes().numpy()
edges = np.array(list(zip(dgl_G.edges()[0], dgl_G.edges()[1])))
edge_weights = dgl_G.edata['weight'].numpy()
edge_labels = dgl_G.edata['gt'].numpy()
edges = edges[np.where(edge_weights > 0)]
edge_labels = edge_labels[np.where(edge_weights > 0)]
nx_G = nx.Graph() # init networkX graph
nx_G.add_nodes_from(nodes)
# add edge with label
for eid in range(len(edges)):
nx_G.add_edge(edges[eid][0], edges[eid][1], gt=edge_labels[eid])
return nx_G
def citeseer_dgl_to_networkx(dgl_G):
nodes = dgl_G.nodes().numpy()
edges = np.array(list(zip(dgl_G.edges()[0], dgl_G.edges()[1])))
edge_weights = dgl_G.edata['weight'].numpy()
edge_labels = dgl_G.edata['gt'].numpy()
edges = edges[np.where(edge_weights > 0)]
edge_labels = edge_labels[np.where(edge_weights > 0)]
nx_G = nx.Graph() # init networkX graph
nx_G.add_nodes_from(nodes)
# add edge with label
for eid in range(len(edges)):
nx_G.add_edge(edges[eid][0], edges[eid][1], gt=edge_labels[eid])
return nx_G | 3,366 | 35.204301 | 108 | py |
gnn_cff | gnn_cff-main/utils/preprocessing/ba_shapes_preprocessing.py | """Read the Mutag dataset and create the graphx"""
import numpy as np
import os
import dgl
from dgl.data import DGLDataset
import torch
import networkx as nx
import matplotlib.pyplot as plt
from dgl import save_graphs, load_graphs
from utils.common_utils import read_file
from utils.common_utils import ba_shapes_dgl_to_networkx
class BAShapesDataset(DGLDataset):
def __init__(self, adj=None, node_labels=None, edge_labels=None, hop_num=3, feat_dim=10, load_path=None):
super().__init__(name='ba_shapes')
if load_path:
self.load_path = load_path
self.load_()
else:
self.adj = adj
self.edge_labels = edge_labels
self.node_labels = node_labels
self.hop_num = hop_num
self.feat_dim = feat_dim
self.graphs = []
self.labels = []
self.targets = []
for n_i, node in enumerate(np.arange(len(self.adj))):
n_l = self.node_labels[node]
g, new_idx = self.sub_graph_generator(node)
self.graphs.append(g)
self.labels.append(n_l)
self.targets.append(new_idx)
self.labels = torch.from_numpy(np.array(self.labels))
self.targets = torch.from_numpy(np.array(self.targets))
def sub_graph_generator(self, node):
"""
a simple bfs to find the k-hop sub-graph
:param node:
:param node_labels:
:return:
"""
sub_nodes = set() # the sub nodes in the sub graph (within k hop)
sub_nodes.add(node)
que = [node]
close_set = set()
for i in range(self.hop_num):
hop_nodes = []
while que:
tar = que.pop(0)
neighbors = np.where(self.adj[tar] == 1)[0]
hop_nodes.extend(neighbors)
sub_nodes.update(neighbors)
if tar not in close_set:
close_set.add(tar)
if len(hop_nodes) == 0:
break
for n in hop_nodes:
if n not in close_set:
que.append(n)
sub_nodes = np.sort(np.array(list(sub_nodes)))
node_new = np.where(sub_nodes == node)[0][0]
sub_edge_labels = self.edge_labels[sub_nodes][:, sub_nodes]
filtered_sub_edge_labels = np.zeros((sub_edge_labels.shape[0], sub_edge_labels.shape[1]))
sgt_nodes = set() # the sub nodes in the gt graph (within k hop)
sgt_nodes.add(node_new)
que = [node_new]
close_set = set()
for i in range(self.hop_num + 1):
hop_nodes = []
while que:
tar = que.pop(0)
neighbors = np.where(sub_edge_labels[tar] == 1)[0]
hop_nodes.extend(neighbors)
for n in neighbors:
filtered_sub_edge_labels[tar, n] = 1
filtered_sub_edge_labels[n, tar] = 1
sgt_nodes.update(neighbors)
if tar not in close_set:
close_set.add(tar)
if len(hop_nodes) == 0:
break
for n in hop_nodes:
if n not in close_set:
que.append(n)
sub_edge_labels = filtered_sub_edge_labels
sub_adj = self.adj[sub_nodes][:, sub_nodes]
sub_nodes = np.arange(len(sub_nodes))
# create dgl graph
comb = np.array(np.meshgrid(sub_nodes, sub_nodes)).T.reshape(-1, 2)
g = dgl.graph((torch.from_numpy(comb[:, 0]), torch.from_numpy(comb[:, 1])), num_nodes=len(sub_nodes))
g_feats = np.ones((len(sub_nodes), self.feat_dim))
g.ndata['feat'] = torch.from_numpy(g_feats)
edge_weights = sub_adj.reshape(1, -1)[0]
edge_gts = sub_edge_labels.reshape(1, -1)[0]
g.edata['weight'] = torch.from_numpy(edge_weights)
g.edata['gt'] = torch.from_numpy(edge_gts)
return g, node_new
def process(self):
print('processing')
def __getitem__(self, i):
return self.graphs[i], self.labels[i], self.targets[i]
def __len__(self):
return len(self.graphs)
def save_(self, save_path):
save_graphs(os.path.join(save_path, 'dgl_graph.bin'), self.graphs, {'labels': self.labels})
np.array(self.targets).dump(os.path.join(save_path, 'targets.pickle'))
def load_(self):
# load processed data from directory `self.save_path`
self.graphs, label_dict = load_graphs(os.path.join(self.load_path, 'dgl_graph.bin'))
self.labels = label_dict['labels']
self.feat_dim = self.graphs[0].ndata['feat'].shape[1]
self.targets = np.load(os.path.join(self.load_path, 'targets.pickle'), allow_pickle=True)
def ba_shapes_preprocessing(dataset_dir):
name = "BA_Shapes"
data = np.load(os.path.join(dataset_dir, 'syn_data.pkl'), allow_pickle=True)
adj = np.array(data[0], dtype='float32')
feats = data[1]
y_train = data[2]
y_val = data[3]
y_test = data[4]
e_labels = data[8]
e_labels = np.array(np.maximum(e_labels, e_labels.T), dtype="float32") # make symmetric
node_labels = np.array(np.logical_or(y_train, np.logical_or(y_val, y_test)), dtype=int)
G_dataset = BAShapesDataset(adj, node_labels, e_labels, hop_num=3, feat_dim=10)
return G_dataset
| 5,366 | 37.335714 | 109 | py |
gnn_cff | gnn_cff-main/utils/preprocessing/mutag_preprocessing_0.py | """Read the Mutag dataset and create the graphx"""
import numpy as np
import os
import dgl
from dgl.data import DGLDataset
import torch
from dgl import save_graphs, load_graphs
from utils.common_utils import read_file
class MutagDataset0(DGLDataset):
def __init__(self, edges=None, graph_indicator=None, node_labels=None, edge_labels=None, graph_labels=None, load_path=None):
super().__init__(name='mutag0')
if load_path:
self.load_path = load_path
self.load_()
else:
for i in range(len(graph_labels)):
if graph_labels[i] == 1:
graph_labels[i] = 0
else:
graph_labels[i] = 1
self.edges = edges
self.graph_indicator = graph_indicator
self.node_labels = node_labels
self.edge_labels = edge_labels
self.graph_labels = graph_labels
self.graphs = []
self.labels = []
self.feat_dim = len(np.unique(self.node_labels))
self.component_dict = {0: 'C', 1: 'O', 2: 'Cl', 3: 'H', 4: 'N', 5: 'F', 6: 'Br', 7: 'S',
8: 'P', 9: 'I', 10: 'Na', 11: 'K', 12: 'Li', 13: 'Ca'}
# group edges
edges_group = {}
e_labels_group = {}
for e_id, edge in enumerate(self.edges):
e_label = self.edge_labels[e_id]
g_id = self.graph_indicator[edge[0]]
if g_id != self.graph_indicator[edge[1]]:
print('graph indicator error!')
exit(1)
if g_id not in edges_group.keys():
edges_group[g_id] = [edge]
e_labels_group[g_id] = [e_label]
else:
edges_group[g_id].append(edge)
e_labels_group[g_id].append(e_label)
for g_id, g_edges in edges_group.items():
g_label = self.graph_labels[g_id]
g_edges = np.array(g_edges)
g_e_labels = e_labels_group[g_id]
src = g_edges[:, 0]
dst = g_edges[:, 1]
unique_nodes = np.unique(np.concatenate((src, dst), axis=0))
g_feats = np.zeros((len(unique_nodes), self.feat_dim))
int_feats = self.node_labels[unique_nodes]
g_feats[np.arange(len(unique_nodes)), int_feats] = 1 # convert feature to one-hot vec
n_id_dict = {}
n_id_dict_reverse = {}
for i in range(len(unique_nodes)):
n_id_dict[unique_nodes[i]] = i
n_id_dict_reverse[i] = unique_nodes[i]
for i in range(len(src)):
src[i] = n_id_dict[src[i]]
dst[i] = n_id_dict[dst[i]]
num_nodes = len(np.unique(np.concatenate((src, dst), axis=0)))
adj = np.zeros((num_nodes, num_nodes), dtype='float32')
adj_e_label = np.zeros((num_nodes, num_nodes), dtype='float32')
for i in range(len(src)):
n0 = src[i]
n1 = dst[i]
adj[n0, n1] = 1.0
adj_e_label[n0, n1] = g_e_labels[i]
comb = np.array(np.meshgrid(np.arange(num_nodes), np.arange(num_nodes))).T.reshape(-1, 2)
g = dgl.graph((torch.from_numpy(comb[:, 0]), torch.from_numpy(comb[:, 1])), num_nodes=num_nodes)
g.ndata['feat'] = torch.from_numpy(g_feats)
edge_weights = adj.reshape(1, -1)[0]
edge_labels = adj_e_label.reshape(1, -1)[0]
g.edata['weight'] = torch.from_numpy(edge_weights)
g.edata['label'] = torch.from_numpy(edge_labels)
self.graphs.append(g)
self.labels.append(g_label)
self.labels = torch.from_numpy(np.array(self.labels))
def process(self):
print('processing')
def __getitem__(self, i):
return self.graphs[i], self.labels[i]
def __len__(self):
return len(self.graphs)
def save_(self, save_path):
save_graphs(save_path, self.graphs, {'labels': self.labels})
def load_(self):
# load processed data from directory `self.save_path`
self.graphs, label_dict = load_graphs(os.path.join(self.load_path, 'dgl_graph.bin'))
self.labels = label_dict['labels']
self.feat_dim = self.graphs[0].ndata['feat'].shape[1]
def mutag_preprocessing_0(dataset_dir):
name = "Mutagenicity"
edge_path = os.path.join(dataset_dir, name + "_A.txt")
edge_label_path = os.path.join(dataset_dir, name + "_edge_gt.txt")
graph_indicator_path = os.path.join(dataset_dir, name + "_graph_indicator.txt")
node_label_path = os.path.join(dataset_dir, name + "_node_labels.txt")
graph_label_path = os.path.join(dataset_dir, name + "_graph_labels.txt")
edge_data = read_file(edge_path)
edge_data = np.array(edge_data)
edge_data = edge_data - 1
edge_labels = read_file(edge_label_path)
edge_labels = np.array(edge_labels)
graph_indicator = read_file(graph_indicator_path) - 1
node_labels = read_file(node_label_path)
graph_labels = read_file((graph_label_path))
G_dataset = MutagDataset0(edge_data, graph_indicator, node_labels, edge_labels, graph_labels)
return G_dataset
| 5,400 | 41.865079 | 128 | py |
gnn_cff | gnn_cff-main/utils/preprocessing/tree_cycles_preprocessing.py | """Read the Mutag dataset and create the graphx"""
import numpy as np
import os
import dgl
from dgl.data import DGLDataset
import torch
import networkx as nx
import matplotlib.pyplot as plt
from dgl import save_graphs, load_graphs
from utils.common_utils import read_file
from utils.common_utils import ba_shapes_dgl_to_networkx
class TreeCyclesDataset(DGLDataset):
def __init__(self, adj=None, node_labels=None, edge_labels=None, hop_num=3, feat_dim=10, load_path=None):
super().__init__(name='tree_cycles')
if load_path:
self.load_path = load_path
self.load_()
else:
self.adj = adj
self.edge_labels = edge_labels
self.node_labels = node_labels
self.hop_num = hop_num
self.feat_dim = feat_dim
self.graphs = []
self.labels = []
self.targets = []
for n_i, node in enumerate(np.arange(len(self.adj))):
n_l = self.node_labels[node]
g, new_idx = self.sub_graph_generator(node)
self.graphs.append(g)
self.labels.append(n_l)
self.targets.append(new_idx)
self.labels = torch.from_numpy(np.array(self.labels))
self.targets = torch.from_numpy(np.array(self.targets))
def sub_graph_generator(self, node):
"""
a simple bfs to find the k-hop sub-graph
:param node:
:param node_labels:
:return:
"""
sub_nodes = set()
sub_nodes.add(node)
que = [node]
close_set = set()
for i in range(self.hop_num):
hop_nodes = []
while que:
tar = que.pop(0)
neighbors = np.where(self.adj[tar] == 1)[0]
hop_nodes.extend(neighbors)
sub_nodes.update(neighbors)
if tar not in close_set:
close_set.add(tar)
if len(hop_nodes) == 0:
break
for n in hop_nodes:
if n not in close_set:
que.append(n)
sub_nodes = np.sort(np.array(list(sub_nodes)))
node_new = np.where(sub_nodes == node)[0][0]
sub_edge_labels = self.edge_labels[sub_nodes][:, sub_nodes]
filtered_sub_edge_labels = np.zeros((sub_edge_labels.shape[0], sub_edge_labels.shape[1]))
sgt_nodes = set()
sgt_nodes.add(node_new)
que = [node_new]
close_set = set()
for i in range(self.hop_num + 1):
hop_nodes = []
while que:
tar = que.pop(0)
neighbors = np.where(sub_edge_labels[tar] == 1)[0]
hop_nodes.extend(neighbors)
for n in neighbors:
filtered_sub_edge_labels[tar, n] = 1
filtered_sub_edge_labels[n, tar] = 1
sgt_nodes.update(neighbors)
if tar not in close_set:
close_set.add(tar)
if len(hop_nodes) == 0:
break
for n in hop_nodes:
if n not in close_set:
que.append(n)
sub_edge_labels = filtered_sub_edge_labels
sub_adj = self.adj[sub_nodes][:, sub_nodes]
sub_nodes = np.arange(len(sub_nodes))
# create dgl graph
comb = np.array(np.meshgrid(sub_nodes, sub_nodes)).T.reshape(-1, 2)
g = dgl.graph((torch.from_numpy(comb[:, 0]), torch.from_numpy(comb[:, 1])), num_nodes=len(sub_nodes))
g_feats = np.ones((len(sub_nodes), self.feat_dim))
g.ndata['feat'] = torch.from_numpy(g_feats)
edge_weights = sub_adj.reshape(1, -1)[0]
edge_gts = sub_edge_labels.reshape(1, -1)[0]
g.edata['weight'] = torch.from_numpy(edge_weights)
g.edata['gt'] = torch.from_numpy(edge_gts)
# # test plot
# nx_G = ba_shapes_dgl_to_networkx(g)
# edge_color_map = []
# for edge in nx_G.edges(data=True):
# if edge[2]['gt'] == 1:
# edge_color_map.append('red')
# else:
# edge_color_map.append('black')
# pos = nx.kamada_kawai_layout(nx_G)
# nx.draw_networkx(nx_G, pos, with_labels=True, edge_color=edge_color_map)
# ax = plt.gca()
# ax.margins(0.08)
# plt.axis("off")
# plt.tight_layout()
# # plt.savefig('mutag_%d.png' % graph_i)
# # plt.clf()
# plt.show()
return g, node_new
def process(self):
print('processing')
def __getitem__(self, i):
return self.graphs[i], self.labels[i], self.targets[i]
def __len__(self):
return len(self.graphs)
def save_(self, save_path):
save_graphs(os.path.join(save_path, 'dgl_graph.bin'), self.graphs, {'labels': self.labels})
np.array(self.targets).dump(os.path.join(save_path, 'targets.pickle'))
def load_(self):
# load processed data from directory `self.save_path`
self.graphs, label_dict = load_graphs(os.path.join(self.load_path, 'dgl_graph.bin'))
self.labels = label_dict['labels']
self.feat_dim = self.graphs[0].ndata['feat'].shape[1]
self.targets = np.load(os.path.join(self.load_path, 'targets.pickle'), allow_pickle=True)
def tree_cycles_preprocessing(dataset_dir):
name = "Tree_Cycles"
# assign path
data = np.load(os.path.join(dataset_dir, 'syn_data.pkl'), allow_pickle=True)
adj = np.array(data[0], dtype='float32')
feats = data[1]
y_train = data[2]
y_val = data[3]
y_test = data[4]
e_labels = data[8]
e_labels = np.array(np.maximum(e_labels, e_labels.T), dtype="float32")
node_labels = np.array(np.logical_or(y_train, np.logical_or(y_val, y_test)), dtype=int)
G_dataset = TreeCyclesDataset(adj, node_labels, e_labels, hop_num=3, feat_dim=10)
return G_dataset
| 5,903 | 36.367089 | 109 | py |
gnn_cff | gnn_cff-main/utils/preprocessing/citeseer_preprocessing.py | """Read the Mutag dataset and create the graphx"""
import numpy as np
import os
import dgl
from dgl.data import DGLDataset
import torch
import networkx as nx
import matplotlib.pyplot as plt
from dgl import save_graphs, load_graphs
from utils.common_utils import read_file_citeseer
from utils.common_utils import ba_shapes_dgl_to_networkx
class CiteSeerDataset(DGLDataset):
def __init__(self, adj=None, node_labels=None, node_feats=None, hop_num=3, load_path=None):
super().__init__(name='citeseer')
if load_path:
self.load_path = load_path
self.load_()
else:
self.adj = adj
self.node_feats = node_feats
self.node_labels = node_labels
self.hop_num = hop_num
self.feat_dim = len(node_feats[0])
self.graphs = []
self.labels = []
self.targets = []
for n_i, node in enumerate(np.arange(len(self.adj))):
n_l = self.node_labels[node]
g, new_idx = self.sub_graph_generator(node)
self.graphs.append(g)
self.labels.append(n_l)
self.targets.append(new_idx)
self.labels = torch.from_numpy(np.array(self.labels))
self.targets = torch.from_numpy(np.array(self.targets))
def sub_graph_generator(self, node):
"""
a simple bfs to find the k-hop sub-graph
:param node:
:param node_labels:
:return:
"""
# print(node)
sub_nodes = set() # the sub nodes in the sub graph (within k hop)
sub_nodes.add(node)
que = [node]
close_set = set()
for i in range(self.hop_num):
hop_nodes = []
while que:
tar = que.pop(0)
neighbors = np.where(self.adj[tar] == 1)[0]
hop_nodes.extend(neighbors)
sub_nodes.update(neighbors)
if tar not in close_set:
close_set.add(tar)
if len(hop_nodes) == 0:
break
for n in hop_nodes:
if n not in close_set:
que.append(n)
sub_nodes = np.sort(np.array(list(sub_nodes)))
node_new = np.where(sub_nodes == node)[0][0]
sub_adj = self.adj[sub_nodes][:, sub_nodes]
g_feats = self.node_feats[sub_nodes]
sub_nodes = np.arange(len(sub_nodes))
# create dgl graph
comb = np.array(np.meshgrid(sub_nodes, sub_nodes)).T.reshape(-1, 2)
g = dgl.graph((torch.from_numpy(comb[:, 0]), torch.from_numpy(comb[:, 1])), num_nodes=len(sub_nodes))
g.ndata['feat'] = torch.from_numpy(g_feats)
edge_weights = sub_adj.reshape(1, -1)[0]
g.edata['weight'] = torch.from_numpy(edge_weights)
return g, node_new
def process(self):
print('processing')
def __getitem__(self, i):
return self.graphs[i], self.labels[i], self.targets[i]
def __len__(self):
return len(self.graphs)
def save_(self, save_path):
save_graphs(os.path.join(save_path, 'dgl_graph.bin'), self.graphs, {'labels': self.labels})
np.array(self.targets).dump(os.path.join(save_path, 'targets.pickle'))
def load_(self):
# load processed data from directory `self.save_path`
self.graphs, label_dict = load_graphs(os.path.join(self.load_path, 'dgl_graph.bin'))
self.labels = label_dict['labels']
self.feat_dim = self.graphs[0].ndata['feat'].shape[1]
self.targets = np.load(os.path.join(self.load_path, 'targets.pickle'), allow_pickle=True)
def citeseer_preprocessing(dataset_dir):
name = "citeseer"
paper_type_dict = {"Agents": 0, "AI": 1, "DB": 2, "IR": 3, "ML": 4, "HCI": 5}
edge_data_path = os.path.join(dataset_dir, 'citeseer.cites')
node_info_data_path = os.path.join(dataset_dir, 'citeseer.content')
node_info_data = np.array(read_file_citeseer(node_info_data_path))
edge_data = np.array(read_file_citeseer(edge_data_path))
# filter out papers without info
valid_paper_set = set()
for info in node_info_data:
valid_paper_set.add(info[0])
valid_edge_data = []
for edge in edge_data:
if edge[0] in valid_paper_set and edge[1] in valid_paper_set:
valid_edge_data.append(edge)
edge_data = np.array(valid_edge_data) # only the edges with info
name_int_dict = {} # {'name': index}
idx = 0
for edge in edge_data:
if edge[0] not in name_int_dict:
name_int_dict[edge[0]] = idx
idx += 1
if edge[1] not in name_int_dict:
name_int_dict[edge[1]] = idx
idx += 1
for i in range(len(edge_data)):
edge_data[i][0] = name_int_dict[edge_data[i][0]]
edge_data[i][1] = name_int_dict[edge_data[i][1]]
edge_data = np.array(edge_data, dtype=int)
node_num = len(name_int_dict.keys())
feat_dim = len(node_info_data[0][1:-1])
node_labels = np.ones(node_num, dtype=int) * -1
node_feats = np.ones((node_num, feat_dim)) * -1
idx_set = set()
for i in range(len(node_info_data)):
paper_id = node_info_data[i][0]
paper_label = paper_type_dict[node_info_data[i][-1]]
paper_feat = node_info_data[i][1:-1]
paper_idx = name_int_dict[paper_id]
idx_set.add(paper_idx)
node_labels[paper_idx] = paper_label
node_feats[paper_idx] = paper_feat
# create adj matrix
adj = np.zeros((node_num, node_num), dtype='float32')
for edge in edge_data:
n0 = edge[0]
n1 = edge[1]
adj[n0, n1] = 1
adj[n1, n0] = 1
G_dataset = CiteSeerDataset(adj, node_labels, node_feats, hop_num=3)
return G_dataset
| 5,767 | 34.170732 | 109 | py |
gnn_cff | gnn_cff-main/utils/preprocessing/nci1_preprocessing.py | """Read the Mutag dataset and create the graphx"""
import numpy as np
import os
import dgl
from dgl.data import DGLDataset
import torch
from dgl import save_graphs, load_graphs
from utils.common_utils import read_file
class NCI1Dataset(DGLDataset):
def __init__(self, edges=None, graph_indicator=None, node_labels=None, graph_labels=None, load_path=None):
super().__init__(name='mutag')
if load_path:
self.load_path = load_path
self.load_()
else:
self.edges = edges
self.graph_indicator = graph_indicator
self.node_labels = node_labels
self.graph_labels = graph_labels
self.graphs = []
self.labels = []
self.feat_dim = len(np.unique(self.node_labels))
self.component_dict = {0: 'C', 1: 'O', 2: 'Cl', 3: 'H', 4: 'N', 5: 'F', 6: 'Br', 7: 'S',
8: 'P', 9: 'I', 10: 'Na', 11: 'K', 12: 'Li', 13: 'Ca'}
# group edges
edges_group = {}
for e_id, edge in enumerate(self.edges):
g_id = self.graph_indicator[edge[0]]
if g_id != self.graph_indicator[edge[1]]:
print('graph indicator error!')
exit(1)
if g_id not in edges_group.keys():
edges_group[g_id] = [edge]
else:
edges_group[g_id].append(edge)
for g_id, g_edges in edges_group.items():
g_label = self.graph_labels[g_id]
g_edges = np.array(g_edges)
src = g_edges[:, 0]
dst = g_edges[:, 1]
unique_nodes = np.unique(np.concatenate((src, dst), axis=0))
g_feats = np.zeros((len(unique_nodes), self.feat_dim))
int_feats = self.node_labels[unique_nodes]
g_feats[np.arange(len(unique_nodes)), int_feats] = 1
n_id_dict = {}
n_id_dict_reverse = {}
for i in range(len(unique_nodes)):
n_id_dict[unique_nodes[i]] = i
n_id_dict_reverse[i] = unique_nodes[i]
for i in range(len(src)):
src[i] = n_id_dict[src[i]]
dst[i] = n_id_dict[dst[i]]
num_nodes = len(np.unique(np.concatenate((src, dst), axis=0)))
adj = np.zeros((num_nodes, num_nodes), dtype='float32')
adj_e_label = np.zeros((num_nodes, num_nodes), dtype='float32')
for i in range(len(src)):
n0 = src[i]
n1 = dst[i]
adj[n0, n1] = 1.0
comb = np.array(np.meshgrid(np.arange(num_nodes), np.arange(num_nodes))).T.reshape(-1, 2)
g = dgl.graph((torch.from_numpy(comb[:, 0]), torch.from_numpy(comb[:, 1])), num_nodes=num_nodes)
g.ndata['feat'] = torch.from_numpy(g_feats)
edge_weights = adj.reshape(1, -1)[0]
edge_labels = adj_e_label.reshape(1, -1)[0]
g.edata['weight'] = torch.from_numpy(edge_weights)
g.edata['label'] = torch.from_numpy(edge_labels)
self.graphs.append(g)
self.labels.append(g_label)
self.labels = torch.from_numpy(np.array(self.labels))
def process(self):
print('processing')
def __getitem__(self, i):
return self.graphs[i], self.labels[i]
def __len__(self):
return len(self.graphs)
def save_(self, save_path):
save_graphs(save_path, self.graphs, {'labels': self.labels})
def load_(self):
# load processed data from directory `self.save_path`
self.graphs, label_dict = load_graphs(os.path.join(self.load_path, 'dgl_graph.bin'))
self.labels = label_dict['labels']
self.feat_dim = self.graphs[0].ndata['feat'].shape[1]
def nci1_preprocessing(dataset_dir):
name = "NCI1"
# assign path
edge_path = os.path.join(dataset_dir, name + "_A.txt")
graph_indicator_path = os.path.join(dataset_dir, name + "_graph_indicator.txt")
node_label_path = os.path.join(dataset_dir, name + "_node_labels.txt")
graph_label_path = os.path.join(dataset_dir, name + "_graph_labels.txt")
edge_data = read_file(edge_path)
edge_data = np.array(edge_data)
edge_data = edge_data - 1
graph_indicator = read_file(graph_indicator_path) - 1
node_labels = np.array(read_file(node_label_path)) - 1
graph_labels = read_file((graph_label_path))
G_dataset = NCI1Dataset(edge_data, graph_indicator, node_labels, graph_labels)
return G_dataset
| 4,663 | 41.4 | 112 | py |
hifi-gan | hifi-gan-master/inference.py | from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import os
import argparse
import json
import torch
from scipy.io.wavfile import write
from env import AttrDict
from meldataset import mel_spectrogram, MAX_WAV_VALUE, load_wav
from models import Generator
h = None
device = None
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def get_mel(x):
return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def inference(a):
generator = Generator(h).to(device)
state_dict_g = load_checkpoint(a.checkpoint_file, device)
generator.load_state_dict(state_dict_g['generator'])
filelist = os.listdir(a.input_wavs_dir)
os.makedirs(a.output_dir, exist_ok=True)
generator.eval()
generator.remove_weight_norm()
with torch.no_grad():
for i, filname in enumerate(filelist):
wav, sr = load_wav(os.path.join(a.input_wavs_dir, filname))
wav = wav / MAX_WAV_VALUE
wav = torch.FloatTensor(wav).to(device)
x = get_mel(wav.unsqueeze(0))
y_g_hat = generator(x)
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + '_generated.wav')
write(output_file, h.sampling_rate, audio)
print(output_file)
def main():
print('Initializing Inference Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--input_wavs_dir', default='test_files')
parser.add_argument('--output_dir', default='generated_files')
parser.add_argument('--checkpoint_file', required=True)
a = parser.parse_args()
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
global h
json_config = json.loads(data)
h = AttrDict(json_config)
torch.manual_seed(h.seed)
global device
if torch.cuda.is_available():
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda')
else:
device = torch.device('cpu')
inference(a)
if __name__ == '__main__':
main()
| 2,652 | 26.635417 | 107 | py |
hifi-gan | hifi-gan-master/inference_e2e.py | from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import os
import numpy as np
import argparse
import json
import torch
from scipy.io.wavfile import write
from env import AttrDict
from meldataset import MAX_WAV_VALUE
from models import Generator
h = None
device = None
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def inference(a):
generator = Generator(h).to(device)
state_dict_g = load_checkpoint(a.checkpoint_file, device)
generator.load_state_dict(state_dict_g['generator'])
filelist = os.listdir(a.input_mels_dir)
os.makedirs(a.output_dir, exist_ok=True)
generator.eval()
generator.remove_weight_norm()
with torch.no_grad():
for i, filname in enumerate(filelist):
x = np.load(os.path.join(a.input_mels_dir, filname))
x = torch.FloatTensor(x).to(device)
y_g_hat = generator(x)
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + '_generated_e2e.wav')
write(output_file, h.sampling_rate, audio)
print(output_file)
def main():
print('Initializing Inference Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--input_mels_dir', default='test_mel_files')
parser.add_argument('--output_dir', default='generated_files_from_mel')
parser.add_argument('--checkpoint_file', required=True)
a = parser.parse_args()
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
global h
json_config = json.loads(data)
h = AttrDict(json_config)
torch.manual_seed(h.seed)
global device
if torch.cuda.is_available():
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda')
else:
device = torch.device('cpu')
inference(a)
if __name__ == '__main__':
main()
| 2,444 | 25.868132 | 105 | py |
hifi-gan | hifi-gan-master/meldataset.py | import math
import os
import random
import torch
import torch.utils.data
import numpy as np
from librosa.util import normalize
from scipy.io.wavfile import read
from librosa.filters import mel as librosa_mel_fn
MAX_WAV_VALUE = 32768.0
def load_wav(full_path):
sampling_rate, data = read(full_path)
return data, sampling_rate
def dynamic_range_compression(x, C=1, clip_val=1e-5):
return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
def dynamic_range_decompression(x, C=1):
return np.exp(x) / C
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression_torch(x, C=1):
return torch.exp(x) / C
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
def spectral_de_normalize_torch(magnitudes):
output = dynamic_range_decompression_torch(magnitudes)
return output
mel_basis = {}
hann_window = {}
def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
global mel_basis, hann_window
if fmax not in mel_basis:
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],
center=center, pad_mode='reflect', normalized=False, onesided=True)
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)
spec = spectral_normalize_torch(spec)
return spec
def get_dataset_filelist(a):
with open(a.input_training_file, 'r', encoding='utf-8') as fi:
training_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + '.wav')
for x in fi.read().split('\n') if len(x) > 0]
with open(a.input_validation_file, 'r', encoding='utf-8') as fi:
validation_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + '.wav')
for x in fi.read().split('\n') if len(x) > 0]
return training_files, validation_files
class MelDataset(torch.utils.data.Dataset):
def __init__(self, training_files, segment_size, n_fft, num_mels,
hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1,
device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None):
self.audio_files = training_files
random.seed(1234)
if shuffle:
random.shuffle(self.audio_files)
self.segment_size = segment_size
self.sampling_rate = sampling_rate
self.split = split
self.n_fft = n_fft
self.num_mels = num_mels
self.hop_size = hop_size
self.win_size = win_size
self.fmin = fmin
self.fmax = fmax
self.fmax_loss = fmax_loss
self.cached_wav = None
self.n_cache_reuse = n_cache_reuse
self._cache_ref_count = 0
self.device = device
self.fine_tuning = fine_tuning
self.base_mels_path = base_mels_path
def __getitem__(self, index):
filename = self.audio_files[index]
if self._cache_ref_count == 0:
audio, sampling_rate = load_wav(filename)
audio = audio / MAX_WAV_VALUE
if not self.fine_tuning:
audio = normalize(audio) * 0.95
self.cached_wav = audio
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
self._cache_ref_count = self.n_cache_reuse
else:
audio = self.cached_wav
self._cache_ref_count -= 1
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
if not self.fine_tuning:
if self.split:
if audio.size(1) >= self.segment_size:
max_audio_start = audio.size(1) - self.segment_size
audio_start = random.randint(0, max_audio_start)
audio = audio[:, audio_start:audio_start+self.segment_size]
else:
audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')
mel = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax,
center=False)
else:
mel = np.load(
os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))
mel = torch.from_numpy(mel)
if len(mel.shape) < 3:
mel = mel.unsqueeze(0)
if self.split:
frames_per_seg = math.ceil(self.segment_size / self.hop_size)
if audio.size(1) >= self.segment_size:
mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)
mel = mel[:, :, mel_start:mel_start + frames_per_seg]
audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size]
else:
mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant')
audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')
mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss,
center=False)
return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
def __len__(self):
return len(self.audio_files)
| 6,314 | 36.366864 | 115 | py |
hifi-gan | hifi-gan-master/utils.py | import glob
import os
import matplotlib
import torch
from torch.nn.utils import weight_norm
matplotlib.use("Agg")
import matplotlib.pylab as plt
def plot_spectrogram(spectrogram):
fig, ax = plt.subplots(figsize=(10, 2))
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
interpolation='none')
plt.colorbar(im, ax=ax)
fig.canvas.draw()
plt.close()
return fig
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def apply_weight_norm(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
weight_norm(m)
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def save_checkpoint(filepath, obj):
print("Saving checkpoint to {}".format(filepath))
torch.save(obj, filepath)
print("Complete.")
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '????????')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return None
return sorted(cp_list)[-1]
| 1,377 | 22.355932 | 63 | py |
hifi-gan | hifi-gan-master/models.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from utils import init_weights, get_padding
LRELU_SLOPE = 0.1
class ResBlock1(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.h = h
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock2, self).__init__()
self.h = h
self.convs = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))
])
self.convs.apply(init_weights)
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Generator(torch.nn.Module):
def __init__(self, h):
super(Generator, self).__init__()
self.h = h
self.num_kernels = len(h.resblock_kernel_sizes)
self.num_upsamples = len(h.upsample_rates)
self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))
resblock = ResBlock1 if h.resblock == '1' else ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),
k, u, padding=(k-u)//2)))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = h.upsample_initial_channel//(2**(i+1))
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
self.resblocks.append(resblock(h, ch, k, d))
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x):
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class DiscriminatorP(torch.nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.period = period
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self):
super(MultiPeriodDiscriminator, self).__init__()
self.discriminators = nn.ModuleList([
DiscriminatorP(2),
DiscriminatorP(3),
DiscriminatorP(5),
DiscriminatorP(7),
DiscriminatorP(11),
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(torch.nn.Module):
def __init__(self, use_spectral_norm=False):
super(DiscriminatorS, self).__init__()
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv1d(1, 128, 15, 1, padding=7)),
norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = []
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(torch.nn.Module):
def __init__(self):
super(MultiScaleDiscriminator, self).__init__()
self.discriminators = nn.ModuleList([
DiscriminatorS(use_spectral_norm=True),
DiscriminatorS(),
DiscriminatorS(),
])
self.meanpools = nn.ModuleList([
AvgPool1d(4, 2, padding=2),
AvgPool1d(4, 2, padding=2)
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if i != 0:
y = self.meanpools[i-1](y)
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss*2
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
return loss, gen_losses
| 9,905 | 33.880282 | 107 | py |
hifi-gan | hifi-gan-master/train.py | import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import itertools
import os
import time
import argparse
import json
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DistributedSampler, DataLoader
import torch.multiprocessing as mp
from torch.distributed import init_process_group
from torch.nn.parallel import DistributedDataParallel
from env import AttrDict, build_env
from meldataset import MelDataset, mel_spectrogram, get_dataset_filelist
from models import Generator, MultiPeriodDiscriminator, MultiScaleDiscriminator, feature_loss, generator_loss,\
discriminator_loss
from utils import plot_spectrogram, scan_checkpoint, load_checkpoint, save_checkpoint
torch.backends.cudnn.benchmark = True
def train(rank, a, h):
if h.num_gpus > 1:
init_process_group(backend=h.dist_config['dist_backend'], init_method=h.dist_config['dist_url'],
world_size=h.dist_config['world_size'] * h.num_gpus, rank=rank)
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda:{:d}'.format(rank))
generator = Generator(h).to(device)
mpd = MultiPeriodDiscriminator().to(device)
msd = MultiScaleDiscriminator().to(device)
if rank == 0:
print(generator)
os.makedirs(a.checkpoint_path, exist_ok=True)
print("checkpoints directory : ", a.checkpoint_path)
if os.path.isdir(a.checkpoint_path):
cp_g = scan_checkpoint(a.checkpoint_path, 'g_')
cp_do = scan_checkpoint(a.checkpoint_path, 'do_')
steps = 0
if cp_g is None or cp_do is None:
state_dict_do = None
last_epoch = -1
else:
state_dict_g = load_checkpoint(cp_g, device)
state_dict_do = load_checkpoint(cp_do, device)
generator.load_state_dict(state_dict_g['generator'])
mpd.load_state_dict(state_dict_do['mpd'])
msd.load_state_dict(state_dict_do['msd'])
steps = state_dict_do['steps'] + 1
last_epoch = state_dict_do['epoch']
if h.num_gpus > 1:
generator = DistributedDataParallel(generator, device_ids=[rank]).to(device)
mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device)
msd = DistributedDataParallel(msd, device_ids=[rank]).to(device)
optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2])
optim_d = torch.optim.AdamW(itertools.chain(msd.parameters(), mpd.parameters()),
h.learning_rate, betas=[h.adam_b1, h.adam_b2])
if state_dict_do is not None:
optim_g.load_state_dict(state_dict_do['optim_g'])
optim_d.load_state_dict(state_dict_do['optim_d'])
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch)
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch)
training_filelist, validation_filelist = get_dataset_filelist(a)
trainset = MelDataset(training_filelist, h.segment_size, h.n_fft, h.num_mels,
h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, n_cache_reuse=0,
shuffle=False if h.num_gpus > 1 else True, fmax_loss=h.fmax_for_loss, device=device,
fine_tuning=a.fine_tuning, base_mels_path=a.input_mels_dir)
train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None
train_loader = DataLoader(trainset, num_workers=h.num_workers, shuffle=False,
sampler=train_sampler,
batch_size=h.batch_size,
pin_memory=True,
drop_last=True)
if rank == 0:
validset = MelDataset(validation_filelist, h.segment_size, h.n_fft, h.num_mels,
h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, False, False, n_cache_reuse=0,
fmax_loss=h.fmax_for_loss, device=device, fine_tuning=a.fine_tuning,
base_mels_path=a.input_mels_dir)
validation_loader = DataLoader(validset, num_workers=1, shuffle=False,
sampler=None,
batch_size=1,
pin_memory=True,
drop_last=True)
sw = SummaryWriter(os.path.join(a.checkpoint_path, 'logs'))
generator.train()
mpd.train()
msd.train()
for epoch in range(max(0, last_epoch), a.training_epochs):
if rank == 0:
start = time.time()
print("Epoch: {}".format(epoch+1))
if h.num_gpus > 1:
train_sampler.set_epoch(epoch)
for i, batch in enumerate(train_loader):
if rank == 0:
start_b = time.time()
x, y, _, y_mel = batch
x = torch.autograd.Variable(x.to(device, non_blocking=True))
y = torch.autograd.Variable(y.to(device, non_blocking=True))
y_mel = torch.autograd.Variable(y_mel.to(device, non_blocking=True))
y = y.unsqueeze(1)
y_g_hat = generator(x)
y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size,
h.fmin, h.fmax_for_loss)
optim_d.zero_grad()
# MPD
y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach())
loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss(y_df_hat_r, y_df_hat_g)
# MSD
y_ds_hat_r, y_ds_hat_g, _, _ = msd(y, y_g_hat.detach())
loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
loss_disc_all = loss_disc_s + loss_disc_f
loss_disc_all.backward()
optim_d.step()
# Generator
optim_g.zero_grad()
# L1 Mel-Spectrogram Loss
loss_mel = F.l1_loss(y_mel, y_g_hat_mel) * 45
y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat)
y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd(y, y_g_hat)
loss_fm_f = feature_loss(fmap_f_r, fmap_f_g)
loss_fm_s = feature_loss(fmap_s_r, fmap_s_g)
loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g)
loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g)
loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel
loss_gen_all.backward()
optim_g.step()
if rank == 0:
# STDOUT logging
if steps % a.stdout_interval == 0:
with torch.no_grad():
mel_error = F.l1_loss(y_mel, y_g_hat_mel).item()
print('Steps : {:d}, Gen Loss Total : {:4.3f}, Mel-Spec. Error : {:4.3f}, s/b : {:4.3f}'.
format(steps, loss_gen_all, mel_error, time.time() - start_b))
# checkpointing
if steps % a.checkpoint_interval == 0 and steps != 0:
checkpoint_path = "{}/g_{:08d}".format(a.checkpoint_path, steps)
save_checkpoint(checkpoint_path,
{'generator': (generator.module if h.num_gpus > 1 else generator).state_dict()})
checkpoint_path = "{}/do_{:08d}".format(a.checkpoint_path, steps)
save_checkpoint(checkpoint_path,
{'mpd': (mpd.module if h.num_gpus > 1
else mpd).state_dict(),
'msd': (msd.module if h.num_gpus > 1
else msd).state_dict(),
'optim_g': optim_g.state_dict(), 'optim_d': optim_d.state_dict(), 'steps': steps,
'epoch': epoch})
# Tensorboard summary logging
if steps % a.summary_interval == 0:
sw.add_scalar("training/gen_loss_total", loss_gen_all, steps)
sw.add_scalar("training/mel_spec_error", mel_error, steps)
# Validation
if steps % a.validation_interval == 0: # and steps != 0:
generator.eval()
torch.cuda.empty_cache()
val_err_tot = 0
with torch.no_grad():
for j, batch in enumerate(validation_loader):
x, y, _, y_mel = batch
y_g_hat = generator(x.to(device))
y_mel = torch.autograd.Variable(y_mel.to(device, non_blocking=True))
y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate,
h.hop_size, h.win_size,
h.fmin, h.fmax_for_loss)
val_err_tot += F.l1_loss(y_mel, y_g_hat_mel).item()
if j <= 4:
if steps == 0:
sw.add_audio('gt/y_{}'.format(j), y[0], steps, h.sampling_rate)
sw.add_figure('gt/y_spec_{}'.format(j), plot_spectrogram(x[0]), steps)
sw.add_audio('generated/y_hat_{}'.format(j), y_g_hat[0], steps, h.sampling_rate)
y_hat_spec = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels,
h.sampling_rate, h.hop_size, h.win_size,
h.fmin, h.fmax)
sw.add_figure('generated/y_hat_spec_{}'.format(j),
plot_spectrogram(y_hat_spec.squeeze(0).cpu().numpy()), steps)
val_err = val_err_tot / (j+1)
sw.add_scalar("validation/mel_spec_error", val_err, steps)
generator.train()
steps += 1
scheduler_g.step()
scheduler_d.step()
if rank == 0:
print('Time taken for epoch {} is {} sec\n'.format(epoch + 1, int(time.time() - start)))
def main():
print('Initializing Training Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--group_name', default=None)
parser.add_argument('--input_wavs_dir', default='LJSpeech-1.1/wavs')
parser.add_argument('--input_mels_dir', default='ft_dataset')
parser.add_argument('--input_training_file', default='LJSpeech-1.1/training.txt')
parser.add_argument('--input_validation_file', default='LJSpeech-1.1/validation.txt')
parser.add_argument('--checkpoint_path', default='cp_hifigan')
parser.add_argument('--config', default='')
parser.add_argument('--training_epochs', default=3100, type=int)
parser.add_argument('--stdout_interval', default=5, type=int)
parser.add_argument('--checkpoint_interval', default=5000, type=int)
parser.add_argument('--summary_interval', default=100, type=int)
parser.add_argument('--validation_interval', default=1000, type=int)
parser.add_argument('--fine_tuning', default=False, type=bool)
a = parser.parse_args()
with open(a.config) as f:
data = f.read()
json_config = json.loads(data)
h = AttrDict(json_config)
build_env(a.config, 'config.json', a.checkpoint_path)
torch.manual_seed(h.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(h.seed)
h.num_gpus = torch.cuda.device_count()
h.batch_size = int(h.batch_size / h.num_gpus)
print('Batch size per GPU :', h.batch_size)
else:
pass
if h.num_gpus > 1:
mp.spawn(train, nprocs=h.num_gpus, args=(a, h,))
else:
train(0, a, h)
if __name__ == '__main__':
main()
| 12,153 | 43.683824 | 123 | py |
hifi-gan | hifi-gan-master/env.py | import os
import shutil
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def build_env(config, config_name, path):
t_path = os.path.join(path, config_name)
if config != t_path:
os.makedirs(path, exist_ok=True)
shutil.copyfile(config, os.path.join(path, config_name))
| 394 | 23.6875 | 64 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/clang-format/clang-format-diff.py | #!/usr/bin/env python
#
#===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
ClangFormat Diff Reformatter
============================
This script reads input from a unified diff and reformats all the changed
lines. This is useful to reformat all the lines touched by a specific patch.
Example usage for git/svn users:
git diff -U0 --no-color HEAD^ | clang-format-diff.py -p1 -i
svn diff --diff-cmd=diff -x-U0 | clang-format-diff.py -i
"""
import argparse
import difflib
import re
import string
import subprocess
import StringIO
import sys
def main():
parser = argparse.ArgumentParser(description=
'Reformat changed lines in diff. Without -i '
'option just output the diff that would be '
'introduced.')
parser.add_argument('-i', action='store_true', default=False,
help='apply edits to files instead of displaying a diff')
parser.add_argument('-p', metavar='NUM', default=0,
help='strip the smallest prefix containing P slashes')
parser.add_argument('-regex', metavar='PATTERN', default=None,
help='custom pattern selecting file paths to reformat '
'(case sensitive, overrides -iregex)')
parser.add_argument('-iregex', metavar='PATTERN', default=
r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc|js|ts|proto'
r'|protodevel|java)',
help='custom pattern selecting file paths to reformat '
'(case insensitive, overridden by -regex)')
parser.add_argument('-sort-includes', action='store_true', default=False,
help='let clang-format sort include blocks')
parser.add_argument('-v', '--verbose', action='store_true',
help='be more verbose, ineffective without -i')
parser.add_argument('-style',
help='formatting style to apply (LLVM, Google, Chromium, '
'Mozilla, WebKit)')
parser.add_argument('-binary', default='clang-format',
help='location of binary to use for clang-format')
args = parser.parse_args()
# Extract changed lines for each file.
filename = None
lines_by_file = {}
for line in sys.stdin:
match = re.search('^\+\+\+\ (.*?/){%s}(\S*)' % args.p, line)
if match:
filename = match.group(2)
if filename == None:
continue
if args.regex is not None:
if not re.match('^%s$' % args.regex, filename):
continue
else:
if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
continue
match = re.search('^@@.*\+(\d+)(,(\d+))?', line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count == 0:
continue
end_line = start_line + line_count - 1;
lines_by_file.setdefault(filename, []).extend(
['-lines', str(start_line) + ':' + str(end_line)])
# Reformat files containing changes in place.
for filename, lines in lines_by_file.iteritems():
if args.i and args.verbose:
print 'Formatting', filename
command = [args.binary, filename]
if args.i:
command.append('-i')
if args.sort_includes:
command.append('-sort-includes')
command.extend(lines)
if args.style:
command.extend(['-style', args.style])
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=None, stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
sys.exit(p.returncode);
if not args.i:
with open(filename) as f:
code = f.readlines()
formatted_code = StringIO.StringIO(stdout).readlines()
diff = difflib.unified_diff(code, formatted_code,
filename, filename,
'(before formatting)', '(after formatting)')
diff_string = string.join(diff, '')
if len(diff_string) > 0:
sys.stdout.write(diff_string)
if __name__ == '__main__':
main()
| 4,454 | 35.516393 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/clang-format/clang-format-sublime.py | # This file is a minimal clang-format sublime-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Put this file into your sublime Packages directory, e.g. on Linux:
# ~/.config/sublime-text-2/Packages/User/clang-format-sublime.py
# - Add a key binding:
# { "keys": ["ctrl+shift+c"], "command": "clang_format" },
#
# With this integration you can press the bound key and clang-format will
# format the current lines and selections for all cursor positions. The lines
# or regions are extended to the next bigger syntactic entities.
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
from __future__ import print_function
import sublime
import sublime_plugin
import subprocess
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
# Change this to format according to other formatting styles. See the output of
# 'clang-format --help' for a list of supported styles. The default looks for
# a '.clang-format' or '_clang-format' file to indicate the style that should be
# used.
style = 'file'
class ClangFormatCommand(sublime_plugin.TextCommand):
def run(self, edit):
encoding = self.view.encoding()
if encoding == 'Undefined':
encoding = 'utf-8'
regions = []
command = [binary, '-style', style]
for region in self.view.sel():
regions.append(region)
region_offset = min(region.a, region.b)
region_length = abs(region.b - region.a)
command.extend(['-offset', str(region_offset),
'-length', str(region_length),
'-assume-filename', str(self.view.file_name())])
old_viewport_position = self.view.viewport_position()
buf = self.view.substr(sublime.Region(0, self.view.size()))
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output, error = p.communicate(buf.encode(encoding))
if error:
print(error)
self.view.replace(
edit, sublime.Region(0, self.view.size()),
output.decode(encoding))
self.view.sel().clear()
for region in regions:
self.view.sel().add(region)
# FIXME: Without the 10ms delay, the viewport sometimes jumps.
sublime.set_timeout(lambda: self.view.set_viewport_position(
old_viewport_position, False), 10)
| 2,440 | 40.372881 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/clang-format/clang-format.py | # This file is a minimal clang-format vim-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Add to your .vimrc:
#
# map <C-I> :pyf <path-to-this-file>/clang-format.py<cr>
# imap <C-I> <c-o>:pyf <path-to-this-file>/clang-format.py<cr>
#
# The first line enables clang-format for NORMAL and VISUAL mode, the second
# line adds support for INSERT mode. Change "C-I" to another binding if you
# need clang-format on a different key (C-I stands for Ctrl+i).
#
# With this integration you can press the bound key and clang-format will
# format the current line in NORMAL and INSERT mode or the selected region in
# VISUAL mode. The line or region is extended to the next bigger syntactic
# entity.
#
# You can also pass in the variable "l:lines" to choose the range for
# formatting. This variable can either contain "<start line>:<end line>" or
# "all" to format the full file. So, to format the full file, write a function
# like:
# :function FormatFile()
# : let l:lines="all"
# : pyf <path-to-this-file>/clang-format.py
# :endfunction
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
from __future__ import print_function
import difflib
import json
import platform
import subprocess
import sys
import vim
# set g:clang_format_path to the path to clang-format if it is not on the path
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
if vim.eval('exists("g:clang_format_path")') == "1":
binary = vim.eval('g:clang_format_path')
# Change this to format according to other formatting styles. See the output of
# 'clang-format --help' for a list of supported styles. The default looks for
# a '.clang-format' or '_clang-format' file to indicate the style that should be
# used.
style = 'file'
fallback_style = None
if vim.eval('exists("g:clang_format_fallback_style")') == "1":
fallback_style = vim.eval('g:clang_format_fallback_style')
def get_buffer(encoding):
if platform.python_version_tuple()[0] == '3':
return vim.current.buffer
return [ line.decode(encoding) for line in vim.current.buffer ]
def main():
# Get the current text.
encoding = vim.eval("&encoding")
buf = get_buffer(encoding)
text = '\n'.join(buf)
# Determine range to format.
if vim.eval('exists("l:lines")') == '1':
lines = vim.eval('l:lines')
else:
lines = '%s:%s' % (vim.current.range.start + 1, vim.current.range.end + 1)
# Determine the cursor position.
cursor = int(vim.eval('line2byte(line("."))+col(".")')) - 2
if cursor < 0:
print('Couldn\'t determine cursor position. Is your file empty?')
return
# Avoid flashing an ugly, ugly cmd prompt on Windows when invoking clang-format.
startupinfo = None
if sys.platform.startswith('win32'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call formatter.
command = [binary, '-style', style, '-cursor', str(cursor)]
if lines != 'all':
command.extend(['-lines', lines])
if fallback_style:
command.extend(['-fallback-style', fallback_style])
if vim.current.buffer.name:
command.extend(['-assume-filename', vim.current.buffer.name])
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, startupinfo=startupinfo)
stdout, stderr = p.communicate(input=text.encode(encoding))
# If successful, replace buffer contents.
if stderr:
print(stderr)
if not stdout:
print(
'No output from clang-format (crashed?).\n'
'Please report to bugs.llvm.org.'
)
else:
lines = stdout.decode(encoding).split('\n')
output = json.loads(lines[0])
lines = lines[1:]
sequence = difflib.SequenceMatcher(None, buf, lines)
for op in reversed(sequence.get_opcodes()):
if op[0] is not 'equal':
vim.current.buffer[op[1]:op[2]] = lines[op[3]:op[4]]
if output.get('IncompleteFormat'):
print('clang-format: incomplete (syntax errors)')
vim.command('goto %d' % (output['Cursor'] + 1))
main()
| 4,222 | 35.094017 | 82 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-view/share/ScanView.py | import BaseHTTPServer
import SimpleHTTPServer
import os
import sys
import urllib, urlparse
import posixpath
import StringIO
import re
import shutil
import threading
import time
import socket
import itertools
import Reporter
import ConfigParser
###
# Various patterns matched or replaced by server.
kReportFileRE = re.compile('(.*/)?report-(.*)\\.html')
kBugKeyValueRE = re.compile('<!-- BUG([^ ]*) (.*) -->')
# <!-- REPORTPROBLEM file="crashes/clang_crash_ndSGF9.mi" stderr="crashes/clang_crash_ndSGF9.mi.stderr.txt" info="crashes/clang_crash_ndSGF9.mi.info" -->
kReportCrashEntryRE = re.compile('<!-- REPORTPROBLEM (.*?)-->')
kReportCrashEntryKeyValueRE = re.compile(' ?([^=]+)="(.*?)"')
kReportReplacements = []
# Add custom javascript.
kReportReplacements.append((re.compile('<!-- SUMMARYENDHEAD -->'), """\
<script language="javascript" type="text/javascript">
function load(url) {
if (window.XMLHttpRequest) {
req = new XMLHttpRequest();
} else if (window.ActiveXObject) {
req = new ActiveXObject("Microsoft.XMLHTTP");
}
if (req != undefined) {
req.open("GET", url, true);
req.send("");
}
}
</script>"""))
# Insert additional columns.
kReportReplacements.append((re.compile('<!-- REPORTBUGCOL -->'),
'<td></td><td></td>'))
# Insert report bug and open file links.
kReportReplacements.append((re.compile('<!-- REPORTBUG id="report-(.*)\\.html" -->'),
('<td class="Button"><a href="report/\\1">Report Bug</a></td>' +
'<td class="Button"><a href="javascript:load(\'open/\\1\')">Open File</a></td>')))
kReportReplacements.append((re.compile('<!-- REPORTHEADER -->'),
'<h3><a href="/">Summary</a> > Report %(report)s</h3>'))
kReportReplacements.append((re.compile('<!-- REPORTSUMMARYEXTRA -->'),
'<td class="Button"><a href="report/%(report)s">Report Bug</a></td>'))
# Insert report crashes link.
# Disabled for the time being until we decide exactly when this should
# be enabled. Also the radar reporter needs to be fixed to report
# multiple files.
#kReportReplacements.append((re.compile('<!-- REPORTCRASHES -->'),
# '<br>These files will automatically be attached to ' +
# 'reports filed here: <a href="report_crashes">Report Crashes</a>.'))
###
# Other simple parameters
kShare = posixpath.join(posixpath.dirname(__file__), '../share/scan-view')
kConfigPath = os.path.expanduser('~/.scanview.cfg')
###
__version__ = "0.1"
__all__ = ["create_server"]
class ReporterThread(threading.Thread):
def __init__(self, report, reporter, parameters, server):
threading.Thread.__init__(self)
self.report = report
self.server = server
self.reporter = reporter
self.parameters = parameters
self.success = False
self.status = None
def run(self):
result = None
try:
if self.server.options.debug:
print >>sys.stderr, "%s: SERVER: submitting bug."%(sys.argv[0],)
self.status = self.reporter.fileReport(self.report, self.parameters)
self.success = True
time.sleep(3)
if self.server.options.debug:
print >>sys.stderr, "%s: SERVER: submission complete."%(sys.argv[0],)
except Reporter.ReportFailure,e:
self.status = e.value
except Exception,e:
s = StringIO.StringIO()
import traceback
print >>s,'<b>Unhandled Exception</b><br><pre>'
traceback.print_exc(e,file=s)
print >>s,'</pre>'
self.status = s.getvalue()
class ScanViewServer(BaseHTTPServer.HTTPServer):
def __init__(self, address, handler, root, reporters, options):
BaseHTTPServer.HTTPServer.__init__(self, address, handler)
self.root = root
self.reporters = reporters
self.options = options
self.halted = False
self.config = None
self.load_config()
def load_config(self):
self.config = ConfigParser.RawConfigParser()
# Add defaults
self.config.add_section('ScanView')
for r in self.reporters:
self.config.add_section(r.getName())
for p in r.getParameters():
if p.saveConfigValue():
self.config.set(r.getName(), p.getName(), '')
# Ignore parse errors
try:
self.config.read([kConfigPath])
except:
pass
# Save on exit
import atexit
atexit.register(lambda: self.save_config())
def save_config(self):
# Ignore errors (only called on exit).
try:
f = open(kConfigPath,'w')
self.config.write(f)
f.close()
except:
pass
def halt(self):
self.halted = True
if self.options.debug:
print >>sys.stderr, "%s: SERVER: halting." % (sys.argv[0],)
def serve_forever(self):
while not self.halted:
if self.options.debug > 1:
print >>sys.stderr, "%s: SERVER: waiting..." % (sys.argv[0],)
try:
self.handle_request()
except OSError,e:
print 'OSError',e.errno
def finish_request(self, request, client_address):
if self.options.autoReload:
import ScanView
self.RequestHandlerClass = reload(ScanView).ScanViewRequestHandler
BaseHTTPServer.HTTPServer.finish_request(self, request, client_address)
def handle_error(self, request, client_address):
# Ignore socket errors
info = sys.exc_info()
if info and isinstance(info[1], socket.error):
if self.options.debug > 1:
print >>sys.stderr, "%s: SERVER: ignored socket error." % (sys.argv[0],)
return
BaseHTTPServer.HTTPServer.handle_error(self, request, client_address)
# Borrowed from Quixote, with simplifications.
def parse_query(qs, fields=None):
if fields is None:
fields = {}
for chunk in filter(None, qs.split('&')):
if '=' not in chunk:
name = chunk
value = ''
else:
name, value = chunk.split('=', 1)
name = urllib.unquote(name.replace('+', ' '))
value = urllib.unquote(value.replace('+', ' '))
item = fields.get(name)
if item is None:
fields[name] = [value]
else:
item.append(value)
return fields
class ScanViewRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
server_version = "ScanViewServer/" + __version__
dynamic_mtime = time.time()
def do_HEAD(self):
try:
SimpleHTTPServer.SimpleHTTPRequestHandler.do_HEAD(self)
except Exception,e:
self.handle_exception(e)
def do_GET(self):
try:
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
except Exception,e:
self.handle_exception(e)
def do_POST(self):
"""Serve a POST request."""
try:
length = self.headers.getheader('content-length') or "0"
try:
length = int(length)
except:
length = 0
content = self.rfile.read(length)
fields = parse_query(content)
f = self.send_head(fields)
if f:
self.copyfile(f, self.wfile)
f.close()
except Exception,e:
self.handle_exception(e)
def log_message(self, format, *args):
if self.server.options.debug:
sys.stderr.write("%s: SERVER: %s - - [%s] %s\n" %
(sys.argv[0],
self.address_string(),
self.log_date_time_string(),
format%args))
def load_report(self, report):
path = os.path.join(self.server.root, 'report-%s.html'%report)
data = open(path).read()
keys = {}
for item in kBugKeyValueRE.finditer(data):
k,v = item.groups()
keys[k] = v
return keys
def load_crashes(self):
path = posixpath.join(self.server.root, 'index.html')
data = open(path).read()
problems = []
for item in kReportCrashEntryRE.finditer(data):
fieldData = item.group(1)
fields = dict([i.groups() for i in
kReportCrashEntryKeyValueRE.finditer(fieldData)])
problems.append(fields)
return problems
def handle_exception(self, exc):
import traceback
s = StringIO.StringIO()
print >>s, "INTERNAL ERROR\n"
traceback.print_exc(exc, s)
f = self.send_string(s.getvalue(), 'text/plain')
if f:
self.copyfile(f, self.wfile)
f.close()
def get_scalar_field(self, name):
if name in self.fields:
return self.fields[name][0]
else:
return None
def submit_bug(self, c):
title = self.get_scalar_field('title')
description = self.get_scalar_field('description')
report = self.get_scalar_field('report')
reporterIndex = self.get_scalar_field('reporter')
files = []
for fileID in self.fields.get('files',[]):
try:
i = int(fileID)
except:
i = None
if i is None or i<0 or i>=len(c.files):
return (False, 'Invalid file ID')
files.append(c.files[i])
if not title:
return (False, "Missing title.")
if not description:
return (False, "Missing description.")
try:
reporterIndex = int(reporterIndex)
except:
return (False, "Invalid report method.")
# Get the reporter and parameters.
reporter = self.server.reporters[reporterIndex]
parameters = {}
for o in reporter.getParameters():
name = '%s_%s'%(reporter.getName(),o.getName())
if name not in self.fields:
return (False,
'Missing field "%s" for %s report method.'%(name,
reporter.getName()))
parameters[o.getName()] = self.get_scalar_field(name)
# Update config defaults.
if report != 'None':
self.server.config.set('ScanView', 'reporter', reporterIndex)
for o in reporter.getParameters():
if o.saveConfigValue():
name = o.getName()
self.server.config.set(reporter.getName(), name, parameters[name])
# Create the report.
bug = Reporter.BugReport(title, description, files)
# Kick off a reporting thread.
t = ReporterThread(bug, reporter, parameters, self.server)
t.start()
# Wait for thread to die...
while t.isAlive():
time.sleep(.25)
submitStatus = t.status
return (t.success, t.status)
def send_report_submit(self):
report = self.get_scalar_field('report')
c = self.get_report_context(report)
if c.reportSource is None:
reportingFor = "Report Crashes > "
fileBug = """\
<a href="/report_crashes">File Bug</a> > """%locals()
else:
reportingFor = '<a href="/%s">Report %s</a> > ' % (c.reportSource,
report)
fileBug = '<a href="/report/%s">File Bug</a> > ' % report
title = self.get_scalar_field('title')
description = self.get_scalar_field('description')
res,message = self.submit_bug(c)
if res:
statusClass = 'SubmitOk'
statusName = 'Succeeded'
else:
statusClass = 'SubmitFail'
statusName = 'Failed'
result = """
<head>
<title>Bug Submission</title>
<link rel="stylesheet" type="text/css" href="/scanview.css" />
</head>
<body>
<h3>
<a href="/">Summary</a> >
%(reportingFor)s
%(fileBug)s
Submit</h3>
<form name="form" action="">
<table class="form">
<tr><td>
<table class="form_group">
<tr>
<td class="form_clabel">Title:</td>
<td class="form_value">
<input type="text" name="title" size="50" value="%(title)s" disabled>
</td>
</tr>
<tr>
<td class="form_label">Description:</td>
<td class="form_value">
<textarea rows="10" cols="80" name="description" disabled>
%(description)s
</textarea>
</td>
</table>
</td></tr>
</table>
</form>
<h1 class="%(statusClass)s">Submission %(statusName)s</h1>
%(message)s
<p>
<hr>
<a href="/">Return to Summary</a>
</body>
</html>"""%locals()
return self.send_string(result)
def send_open_report(self, report):
try:
keys = self.load_report(report)
except IOError:
return self.send_error(400, 'Invalid report.')
file = keys.get('FILE')
if not file or not posixpath.exists(file):
return self.send_error(400, 'File does not exist: "%s"' % file)
import startfile
if self.server.options.debug:
print >>sys.stderr, '%s: SERVER: opening "%s"'%(sys.argv[0],
file)
status = startfile.open(file)
if status:
res = 'Opened: "%s"' % file
else:
res = 'Open failed: "%s"' % file
return self.send_string(res, 'text/plain')
def get_report_context(self, report):
class Context:
pass
if report is None or report == 'None':
data = self.load_crashes()
# Don't allow empty reports.
if not data:
raise ValueError, 'No crashes detected!'
c = Context()
c.title = 'clang static analyzer failures'
stderrSummary = ""
for item in data:
if 'stderr' in item:
path = posixpath.join(self.server.root, item['stderr'])
if os.path.exists(path):
lns = itertools.islice(open(path), 0, 10)
stderrSummary += '%s\n--\n%s' % (item.get('src',
'<unknown>'),
''.join(lns))
c.description = """\
The clang static analyzer failed on these inputs:
%s
STDERR Summary
--------------
%s
""" % ('\n'.join([item.get('src','<unknown>') for item in data]),
stderrSummary)
c.reportSource = None
c.navMarkup = "Report Crashes > "
c.files = []
for item in data:
c.files.append(item.get('src',''))
c.files.append(posixpath.join(self.server.root,
item.get('file','')))
c.files.append(posixpath.join(self.server.root,
item.get('clangfile','')))
c.files.append(posixpath.join(self.server.root,
item.get('stderr','')))
c.files.append(posixpath.join(self.server.root,
item.get('info','')))
# Just in case something failed, ignore files which don't
# exist.
c.files = [f for f in c.files
if os.path.exists(f) and os.path.isfile(f)]
else:
# Check that this is a valid report.
path = posixpath.join(self.server.root, 'report-%s.html' % report)
if not posixpath.exists(path):
raise ValueError, 'Invalid report ID'
keys = self.load_report(report)
c = Context()
c.title = keys.get('DESC','clang error (unrecognized')
c.description = """\
Bug reported by the clang static analyzer.
Description: %s
File: %s
Line: %s
"""%(c.title, keys.get('FILE','<unknown>'), keys.get('LINE', '<unknown>'))
c.reportSource = 'report-%s.html' % report
c.navMarkup = """<a href="/%s">Report %s</a> > """ % (c.reportSource,
report)
c.files = [path]
return c
def send_report(self, report, configOverrides=None):
def getConfigOption(section, field):
if (configOverrides is not None and
section in configOverrides and
field in configOverrides[section]):
return configOverrides[section][field]
return self.server.config.get(section, field)
# report is None is used for crashes
try:
c = self.get_report_context(report)
except ValueError, e:
return self.send_error(400, e.message)
title = c.title
description= c.description
reportingFor = c.navMarkup
if c.reportSource is None:
extraIFrame = ""
else:
extraIFrame = """\
<iframe src="/%s" width="100%%" height="40%%"
scrolling="auto" frameborder="1">
<a href="/%s">View Bug Report</a>
</iframe>""" % (c.reportSource, c.reportSource)
reporterSelections = []
reporterOptions = []
try:
active = int(getConfigOption('ScanView','reporter'))
except:
active = 0
for i,r in enumerate(self.server.reporters):
selected = (i == active)
if selected:
selectedStr = ' selected'
else:
selectedStr = ''
reporterSelections.append('<option value="%d"%s>%s</option>'%(i,selectedStr,r.getName()))
options = '\n'.join([ o.getHTML(r,title,getConfigOption) for o in r.getParameters()])
display = ('none','')[selected]
reporterOptions.append("""\
<tr id="%sReporterOptions" style="display:%s">
<td class="form_label">%s Options</td>
<td class="form_value">
<table class="form_inner_group">
%s
</table>
</td>
</tr>
"""%(r.getName(),display,r.getName(),options))
reporterSelections = '\n'.join(reporterSelections)
reporterOptionsDivs = '\n'.join(reporterOptions)
reportersArray = '[%s]'%(','.join([`r.getName()` for r in self.server.reporters]))
if c.files:
fieldSize = min(5, len(c.files))
attachFileOptions = '\n'.join(["""\
<option value="%d" selected>%s</option>""" % (i,v) for i,v in enumerate(c.files)])
attachFileRow = """\
<tr>
<td class="form_label">Attach:</td>
<td class="form_value">
<select style="width:100%%" name="files" multiple size=%d>
%s
</select>
</td>
</tr>
""" % (min(5, len(c.files)), attachFileOptions)
else:
attachFileRow = ""
result = """<html>
<head>
<title>File Bug</title>
<link rel="stylesheet" type="text/css" href="/scanview.css" />
</head>
<script language="javascript" type="text/javascript">
var reporters = %(reportersArray)s;
function updateReporterOptions() {
index = document.getElementById('reporter').selectedIndex;
for (var i=0; i < reporters.length; ++i) {
o = document.getElementById(reporters[i] + "ReporterOptions");
if (i == index) {
o.style.display = "";
} else {
o.style.display = "none";
}
}
}
</script>
<body onLoad="updateReporterOptions()">
<h3>
<a href="/">Summary</a> >
%(reportingFor)s
File Bug</h3>
<form name="form" action="/report_submit" method="post">
<input type="hidden" name="report" value="%(report)s">
<table class="form">
<tr><td>
<table class="form_group">
<tr>
<td class="form_clabel">Title:</td>
<td class="form_value">
<input type="text" name="title" size="50" value="%(title)s">
</td>
</tr>
<tr>
<td class="form_label">Description:</td>
<td class="form_value">
<textarea rows="10" cols="80" name="description">
%(description)s
</textarea>
</td>
</tr>
%(attachFileRow)s
</table>
<br>
<table class="form_group">
<tr>
<td class="form_clabel">Method:</td>
<td class="form_value">
<select id="reporter" name="reporter" onChange="updateReporterOptions()">
%(reporterSelections)s
</select>
</td>
</tr>
%(reporterOptionsDivs)s
</table>
<br>
</td></tr>
<tr><td class="form_submit">
<input align="right" type="submit" name="Submit" value="Submit">
</td></tr>
</table>
</form>
%(extraIFrame)s
</body>
</html>"""%locals()
return self.send_string(result)
def send_head(self, fields=None):
if (self.server.options.onlyServeLocal and
self.client_address[0] != '127.0.0.1'):
return self.send_error(401, 'Unauthorized host.')
if fields is None:
fields = {}
self.fields = fields
o = urlparse.urlparse(self.path)
self.fields = parse_query(o.query, fields)
path = posixpath.normpath(urllib.unquote(o.path))
# Split the components and strip the root prefix.
components = path.split('/')[1:]
# Special case some top-level entries.
if components:
name = components[0]
if len(components)==2:
if name=='report':
return self.send_report(components[1])
elif name=='open':
return self.send_open_report(components[1])
elif len(components)==1:
if name=='quit':
self.server.halt()
return self.send_string('Goodbye.', 'text/plain')
elif name=='report_submit':
return self.send_report_submit()
elif name=='report_crashes':
overrides = { 'ScanView' : {},
'Radar' : {},
'Email' : {} }
for i,r in enumerate(self.server.reporters):
if r.getName() == 'Radar':
overrides['ScanView']['reporter'] = i
break
overrides['Radar']['Component'] = 'llvm - checker'
overrides['Radar']['Component Version'] = 'X'
return self.send_report(None, overrides)
elif name=='favicon.ico':
return self.send_path(posixpath.join(kShare,'bugcatcher.ico'))
# Match directory entries.
if components[-1] == '':
components[-1] = 'index.html'
relpath = '/'.join(components)
path = posixpath.join(self.server.root, relpath)
if self.server.options.debug > 1:
print >>sys.stderr, '%s: SERVER: sending path "%s"'%(sys.argv[0],
path)
return self.send_path(path)
def send_404(self):
self.send_error(404, "File not found")
return None
def send_path(self, path):
# If the requested path is outside the root directory, do not open it
rel = os.path.abspath(path)
if not rel.startswith(os.path.abspath(self.server.root)):
return self.send_404()
ctype = self.guess_type(path)
if ctype.startswith('text/'):
# Patch file instead
return self.send_patched_file(path, ctype)
else:
mode = 'rb'
try:
f = open(path, mode)
except IOError:
return self.send_404()
return self.send_file(f, ctype)
def send_file(self, f, ctype):
# Patch files to add links, but skip binary files.
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def send_string(self, s, ctype='text/html', headers=True, mtime=None):
if headers:
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_header("Content-Length", str(len(s)))
if mtime is None:
mtime = self.dynamic_mtime
self.send_header("Last-Modified", self.date_time_string(mtime))
self.end_headers()
return StringIO.StringIO(s)
def send_patched_file(self, path, ctype):
# Allow a very limited set of variables. This is pretty gross.
variables = {}
variables['report'] = ''
m = kReportFileRE.match(path)
if m:
variables['report'] = m.group(2)
try:
f = open(path,'r')
except IOError:
return self.send_404()
fs = os.fstat(f.fileno())
data = f.read()
for a,b in kReportReplacements:
data = a.sub(b % variables, data)
return self.send_string(data, ctype, mtime=fs.st_mtime)
def create_server(address, options, root):
import Reporter
reporters = Reporter.getReporters()
return ScanViewServer(address, ScanViewRequestHandler,
root,
reporters,
options)
| 25,495 | 32.197917 | 154 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-view/share/startfile.py | """Utility for opening a file using the default application in a cross-platform
manner. Modified from http://code.activestate.com/recipes/511443/.
"""
__version__ = '1.1x'
__all__ = ['open']
import os
import sys
import webbrowser
import subprocess
_controllers = {}
_open = None
class BaseController(object):
'''Base class for open program controllers.'''
def __init__(self, name):
self.name = name
def open(self, filename):
raise NotImplementedError
class Controller(BaseController):
'''Controller for a generic open program.'''
def __init__(self, *args):
super(Controller, self).__init__(os.path.basename(args[0]))
self.args = list(args)
def _invoke(self, cmdline):
if sys.platform[:3] == 'win':
closefds = False
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
closefds = True
startupinfo = None
if (os.environ.get('DISPLAY') or sys.platform[:3] == 'win' or
sys.platform == 'darwin'):
inout = file(os.devnull, 'r+')
else:
# for TTY programs, we need stdin/out
inout = None
# if possible, put the child precess in separate process group,
# so keyboard interrupts don't affect child precess as well as
# Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
pipe = subprocess.Popen(cmdline, stdin=inout, stdout=inout,
stderr=inout, close_fds=closefds,
preexec_fn=setsid, startupinfo=startupinfo)
# It is assumed that this kind of tools (gnome-open, kfmclient,
# exo-open, xdg-open and open for OSX) immediately exit after lauching
# the specific application
returncode = pipe.wait()
if hasattr(self, 'fixreturncode'):
returncode = self.fixreturncode(returncode)
return not returncode
def open(self, filename):
if isinstance(filename, basestring):
cmdline = self.args + [filename]
else:
# assume it is a sequence
cmdline = self.args + filename
try:
return self._invoke(cmdline)
except OSError:
return False
# Platform support for Windows
if sys.platform[:3] == 'win':
class Start(BaseController):
'''Controller for the win32 start progam through os.startfile.'''
def open(self, filename):
try:
os.startfile(filename)
except WindowsError:
# [Error 22] No application is associated with the specified
# file for this operation: '<URL>'
return False
else:
return True
_controllers['windows-default'] = Start('start')
_open = _controllers['windows-default'].open
# Platform support for MacOS
elif sys.platform == 'darwin':
_controllers['open']= Controller('open')
_open = _controllers['open'].open
# Platform support for Unix
else:
import commands
# @WARNING: use the private API of the webbrowser module
from webbrowser import _iscommand
class KfmClient(Controller):
'''Controller for the KDE kfmclient program.'''
def __init__(self, kfmclient='kfmclient'):
super(KfmClient, self).__init__(kfmclient, 'exec')
self.kde_version = self.detect_kde_version()
def detect_kde_version(self):
kde_version = None
try:
info = commands.getoutput('kde-config --version')
for line in info.splitlines():
if line.startswith('KDE'):
kde_version = line.split(':')[-1].strip()
break
except (OSError, RuntimeError):
pass
return kde_version
def fixreturncode(self, returncode):
if returncode is not None and self.kde_version > '3.5.4':
return returncode
else:
return os.EX_OK
def detect_desktop_environment():
'''Checks for known desktop environments
Return the desktop environments name, lowercase (kde, gnome, xfce)
or "generic"
'''
desktop_environment = 'generic'
if os.environ.get('KDE_FULL_SESSION') == 'true':
desktop_environment = 'kde'
elif os.environ.get('GNOME_DESKTOP_SESSION_ID'):
desktop_environment = 'gnome'
else:
try:
info = commands.getoutput('xprop -root _DT_SAVE_MODE')
if ' = "xfce4"' in info:
desktop_environment = 'xfce'
except (OSError, RuntimeError):
pass
return desktop_environment
def register_X_controllers():
if _iscommand('kfmclient'):
_controllers['kde-open'] = KfmClient()
for command in ('gnome-open', 'exo-open', 'xdg-open'):
if _iscommand(command):
_controllers[command] = Controller(command)
def get():
controllers_map = {
'gnome': 'gnome-open',
'kde': 'kde-open',
'xfce': 'exo-open',
}
desktop_environment = detect_desktop_environment()
try:
controller_name = controllers_map[desktop_environment]
return _controllers[controller_name].open
except KeyError:
if _controllers.has_key('xdg-open'):
return _controllers['xdg-open'].open
else:
return webbrowser.open
if os.environ.get("DISPLAY"):
register_X_controllers()
_open = get()
def open(filename):
'''Open a file or an URL in the registered default application.'''
return _open(filename)
| 5,991 | 28.372549 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-view/share/Reporter.py | """Methods for reporting bugs."""
import subprocess, sys, os
__all__ = ['ReportFailure', 'BugReport', 'getReporters']
#
class ReportFailure(Exception):
"""Generic exception for failures in bug reporting."""
def __init__(self, value):
self.value = value
# Collect information about a bug.
class BugReport:
def __init__(self, title, description, files):
self.title = title
self.description = description
self.files = files
# Reporter interfaces.
import os
import email, mimetypes, smtplib
from email import encoders
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
#===------------------------------------------------------------------------===#
# ReporterParameter
#===------------------------------------------------------------------------===#
class ReporterParameter:
def __init__(self, n):
self.name = n
def getName(self):
return self.name
def getValue(self,r,bugtype,getConfigOption):
return getConfigOption(r.getName(),self.getName())
def saveConfigValue(self):
return True
class TextParameter (ReporterParameter):
def getHTML(self,r,bugtype,getConfigOption):
return """\
<tr>
<td class="form_clabel">%s:</td>
<td class="form_value"><input type="text" name="%s_%s" value="%s"></td>
</tr>"""%(self.getName(),r.getName(),self.getName(),self.getValue(r,bugtype,getConfigOption))
class SelectionParameter (ReporterParameter):
def __init__(self, n, values):
ReporterParameter.__init__(self,n)
self.values = values
def getHTML(self,r,bugtype,getConfigOption):
default = self.getValue(r,bugtype,getConfigOption)
return """\
<tr>
<td class="form_clabel">%s:</td><td class="form_value"><select name="%s_%s">
%s
</select></td>"""%(self.getName(),r.getName(),self.getName(),'\n'.join(["""\
<option value="%s"%s>%s</option>"""%(o[0],
o[0] == default and ' selected="selected"' or '',
o[1]) for o in self.values]))
#===------------------------------------------------------------------------===#
# Reporters
#===------------------------------------------------------------------------===#
class EmailReporter:
def getName(self):
return 'Email'
def getParameters(self):
return map(lambda x:TextParameter(x),['To', 'From', 'SMTP Server', 'SMTP Port'])
# Lifted from python email module examples.
def attachFile(self, outer, path):
# Guess the content type based on the file's extension. Encoding
# will be ignored, although we should check for simple things like
# gzip'd or compressed files.
ctype, encoding = mimetypes.guess_type(path)
if ctype is None or encoding is not None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
fp = open(path)
# Note: we should handle calculating the charset
msg = MIMEText(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(path, 'rb')
msg = MIMEBase(maintype, subtype)
msg.set_payload(fp.read())
fp.close()
# Encode the payload using Base64
encoders.encode_base64(msg)
# Set the filename parameter
msg.add_header('Content-Disposition', 'attachment', filename=os.path.basename(path))
outer.attach(msg)
def fileReport(self, report, parameters):
mainMsg = """\
BUG REPORT
---
Title: %s
Description: %s
"""%(report.title, report.description)
if not parameters.get('To'):
raise ReportFailure('No "To" address specified.')
if not parameters.get('From'):
raise ReportFailure('No "From" address specified.')
msg = MIMEMultipart()
msg['Subject'] = 'BUG REPORT: %s'%(report.title)
# FIXME: Get config parameters
msg['To'] = parameters.get('To')
msg['From'] = parameters.get('From')
msg.preamble = mainMsg
msg.attach(MIMEText(mainMsg, _subtype='text/plain'))
for file in report.files:
self.attachFile(msg, file)
try:
s = smtplib.SMTP(host=parameters.get('SMTP Server'),
port=parameters.get('SMTP Port'))
s.sendmail(msg['From'], msg['To'], msg.as_string())
s.close()
except:
raise ReportFailure('Unable to send message via SMTP.')
return "Message sent!"
class BugzillaReporter:
def getName(self):
return 'Bugzilla'
def getParameters(self):
return map(lambda x:TextParameter(x),['URL','Product'])
def fileReport(self, report, parameters):
raise NotImplementedError
class RadarClassificationParameter(SelectionParameter):
def __init__(self):
SelectionParameter.__init__(self,"Classification",
[['1', 'Security'], ['2', 'Crash/Hang/Data Loss'],
['3', 'Performance'], ['4', 'UI/Usability'],
['6', 'Serious Bug'], ['7', 'Other']])
def saveConfigValue(self):
return False
def getValue(self,r,bugtype,getConfigOption):
if bugtype.find("leak") != -1:
return '3'
elif bugtype.find("dereference") != -1:
return '2'
elif bugtype.find("missing ivar release") != -1:
return '3'
else:
return '7'
class RadarReporter:
@staticmethod
def isAvailable():
# FIXME: Find this .scpt better
path = os.path.join(os.path.dirname(__file__),'../share/scan-view/GetRadarVersion.scpt')
try:
p = subprocess.Popen(['osascript',path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
return False
data,err = p.communicate()
res = p.wait()
# FIXME: Check version? Check for no errors?
return res == 0
def getName(self):
return 'Radar'
def getParameters(self):
return [ TextParameter('Component'), TextParameter('Component Version'),
RadarClassificationParameter() ]
def fileReport(self, report, parameters):
component = parameters.get('Component', '')
componentVersion = parameters.get('Component Version', '')
classification = parameters.get('Classification', '')
personID = ""
diagnosis = ""
config = ""
if not component.strip():
component = 'Bugs found by clang Analyzer'
if not componentVersion.strip():
componentVersion = 'X'
script = os.path.join(os.path.dirname(__file__),'../share/scan-view/FileRadar.scpt')
args = ['osascript', script, component, componentVersion, classification, personID, report.title,
report.description, diagnosis, config] + map(os.path.abspath, report.files)
# print >>sys.stderr, args
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
raise ReportFailure("Unable to file radar (AppleScript failure).")
data, err = p.communicate()
res = p.wait()
if res:
raise ReportFailure("Unable to file radar (AppleScript failure).")
try:
values = eval(data)
except:
raise ReportFailure("Unable to process radar results.")
# We expect (int: bugID, str: message)
if len(values) != 2 or not isinstance(values[0], int):
raise ReportFailure("Unable to process radar results.")
bugID,message = values
bugID = int(bugID)
if not bugID:
raise ReportFailure(message)
return "Filed: <a href=\"rdar://%d/\">%d</a>"%(bugID,bugID)
###
def getReporters():
reporters = []
if RadarReporter.isAvailable():
reporters.append(RadarReporter())
reporters.append(EmailReporter())
return reporters
| 8,153 | 31.746988 | 105 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/libear/__init__.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module compiles the intercept library. """
import sys
import os
import os.path
import re
import tempfile
import shutil
import contextlib
import logging
__all__ = ['build_libear']
def build_libear(compiler, dst_dir):
""" Returns the full path to the 'libear' library. """
try:
src_dir = os.path.dirname(os.path.realpath(__file__))
toolset = make_toolset(src_dir)
toolset.set_compiler(compiler)
toolset.set_language_standard('c99')
toolset.add_definitions(['-D_GNU_SOURCE'])
configure = do_configure(toolset)
configure.check_function_exists('execve', 'HAVE_EXECVE')
configure.check_function_exists('execv', 'HAVE_EXECV')
configure.check_function_exists('execvpe', 'HAVE_EXECVPE')
configure.check_function_exists('execvp', 'HAVE_EXECVP')
configure.check_function_exists('execvP', 'HAVE_EXECVP2')
configure.check_function_exists('exect', 'HAVE_EXECT')
configure.check_function_exists('execl', 'HAVE_EXECL')
configure.check_function_exists('execlp', 'HAVE_EXECLP')
configure.check_function_exists('execle', 'HAVE_EXECLE')
configure.check_function_exists('posix_spawn', 'HAVE_POSIX_SPAWN')
configure.check_function_exists('posix_spawnp', 'HAVE_POSIX_SPAWNP')
configure.check_symbol_exists('_NSGetEnviron', 'crt_externs.h',
'HAVE_NSGETENVIRON')
configure.write_by_template(
os.path.join(src_dir, 'config.h.in'),
os.path.join(dst_dir, 'config.h'))
target = create_shared_library('ear', toolset)
target.add_include(dst_dir)
target.add_sources('ear.c')
target.link_against(toolset.dl_libraries())
target.link_against(['pthread'])
target.build_release(dst_dir)
return os.path.join(dst_dir, target.name)
except Exception:
logging.info("Could not build interception library.", exc_info=True)
return None
def execute(cmd, *args, **kwargs):
""" Make subprocess execution silent. """
import subprocess
kwargs.update({'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT})
return subprocess.check_call(cmd, *args, **kwargs)
@contextlib.contextmanager
def TemporaryDirectory(**kwargs):
name = tempfile.mkdtemp(**kwargs)
try:
yield name
finally:
shutil.rmtree(name)
class Toolset(object):
""" Abstract class to represent different toolset. """
def __init__(self, src_dir):
self.src_dir = src_dir
self.compiler = None
self.c_flags = []
def set_compiler(self, compiler):
""" part of public interface """
self.compiler = compiler
def set_language_standard(self, standard):
""" part of public interface """
self.c_flags.append('-std=' + standard)
def add_definitions(self, defines):
""" part of public interface """
self.c_flags.extend(defines)
def dl_libraries(self):
raise NotImplementedError()
def shared_library_name(self, name):
raise NotImplementedError()
def shared_library_c_flags(self, release):
extra = ['-DNDEBUG', '-O3'] if release else []
return extra + ['-fPIC'] + self.c_flags
def shared_library_ld_flags(self, release, name):
raise NotImplementedError()
class DarwinToolset(Toolset):
def __init__(self, src_dir):
Toolset.__init__(self, src_dir)
def dl_libraries(self):
return []
def shared_library_name(self, name):
return 'lib' + name + '.dylib'
def shared_library_ld_flags(self, release, name):
extra = ['-dead_strip'] if release else []
return extra + ['-dynamiclib', '-install_name', '@rpath/' + name]
class UnixToolset(Toolset):
def __init__(self, src_dir):
Toolset.__init__(self, src_dir)
def dl_libraries(self):
return []
def shared_library_name(self, name):
return 'lib' + name + '.so'
def shared_library_ld_flags(self, release, name):
extra = [] if release else []
return extra + ['-shared', '-Wl,-soname,' + name]
class LinuxToolset(UnixToolset):
def __init__(self, src_dir):
UnixToolset.__init__(self, src_dir)
def dl_libraries(self):
return ['dl']
def make_toolset(src_dir):
platform = sys.platform
if platform in {'win32', 'cygwin'}:
raise RuntimeError('not implemented on this platform')
elif platform == 'darwin':
return DarwinToolset(src_dir)
elif platform in {'linux', 'linux2'}:
return LinuxToolset(src_dir)
else:
return UnixToolset(src_dir)
class Configure(object):
def __init__(self, toolset):
self.ctx = toolset
self.results = {'APPLE': sys.platform == 'darwin'}
def _try_to_compile_and_link(self, source):
try:
with TemporaryDirectory() as work_dir:
src_file = 'check.c'
with open(os.path.join(work_dir, src_file), 'w') as handle:
handle.write(source)
execute([self.ctx.compiler, src_file] + self.ctx.c_flags,
cwd=work_dir)
return True
except Exception:
return False
def check_function_exists(self, function, name):
template = "int FUNCTION(); int main() { return FUNCTION(); }"
source = template.replace("FUNCTION", function)
logging.debug('Checking function %s', function)
found = self._try_to_compile_and_link(source)
logging.debug('Checking function %s -- %s', function,
'found' if found else 'not found')
self.results.update({name: found})
def check_symbol_exists(self, symbol, include, name):
template = """#include <INCLUDE>
int main() { return ((int*)(&SYMBOL))[0]; }"""
source = template.replace('INCLUDE', include).replace("SYMBOL", symbol)
logging.debug('Checking symbol %s', symbol)
found = self._try_to_compile_and_link(source)
logging.debug('Checking symbol %s -- %s', symbol,
'found' if found else 'not found')
self.results.update({name: found})
def write_by_template(self, template, output):
def transform(line, definitions):
pattern = re.compile(r'^#cmakedefine\s+(\S+)')
m = pattern.match(line)
if m:
key = m.group(1)
if key not in definitions or not definitions[key]:
return '/* #undef {} */\n'.format(key)
else:
return '#define {}\n'.format(key)
return line
with open(template, 'r') as src_handle:
logging.debug('Writing config to %s', output)
with open(output, 'w') as dst_handle:
for line in src_handle:
dst_handle.write(transform(line, self.results))
def do_configure(toolset):
return Configure(toolset)
class SharedLibrary(object):
def __init__(self, name, toolset):
self.name = toolset.shared_library_name(name)
self.ctx = toolset
self.inc = []
self.src = []
self.lib = []
def add_include(self, directory):
self.inc.extend(['-I', directory])
def add_sources(self, source):
self.src.append(source)
def link_against(self, libraries):
self.lib.extend(['-l' + lib for lib in libraries])
def build_release(self, directory):
for src in self.src:
logging.debug('Compiling %s', src)
execute(
[self.ctx.compiler, '-c', os.path.join(self.ctx.src_dir, src),
'-o', src + '.o'] + self.inc +
self.ctx.shared_library_c_flags(True),
cwd=directory)
logging.debug('Linking %s', self.name)
execute(
[self.ctx.compiler] + [src + '.o' for src in self.src] +
['-o', self.name] + self.lib +
self.ctx.shared_library_ld_flags(True, self.name),
cwd=directory)
def create_shared_library(name, toolset):
return SharedLibrary(name, toolset)
| 8,405 | 31.206897 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/__init__.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import unittest
import tests.unit
import tests.functional.cases
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromModule(tests.unit))
suite.addTests(loader.loadTestsFromModule(tests.functional.cases))
return suite
| 489 | 24.789474 | 71 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/unit/test_libear.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libear as sut
import unittest
import os.path
class TemporaryDirectoryTest(unittest.TestCase):
def test_creates_directory(self):
dirname = None
with sut.TemporaryDirectory() as tmpdir:
self.assertTrue(os.path.isdir(tmpdir))
dirname = tmpdir
self.assertIsNotNone(dirname)
self.assertFalse(os.path.exists(dirname))
def test_removes_directory_when_exception(self):
dirname = None
try:
with sut.TemporaryDirectory() as tmpdir:
self.assertTrue(os.path.isdir(tmpdir))
dirname = tmpdir
raise RuntimeError('message')
except:
self.assertIsNotNone(dirname)
self.assertFalse(os.path.exists(dirname))
| 964 | 30.129032 | 71 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/unit/test_compilation.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libscanbuild.compilation as sut
import unittest
class CompilerTest(unittest.TestCase):
def test_is_compiler_call(self):
self.assertIsNotNone(sut.compiler_language(['clang']))
self.assertIsNotNone(sut.compiler_language(['clang-3.6']))
self.assertIsNotNone(sut.compiler_language(['clang++']))
self.assertIsNotNone(sut.compiler_language(['clang++-3.5.1']))
self.assertIsNotNone(sut.compiler_language(['cc']))
self.assertIsNotNone(sut.compiler_language(['c++']))
self.assertIsNotNone(sut.compiler_language(['gcc']))
self.assertIsNotNone(sut.compiler_language(['g++']))
self.assertIsNotNone(sut.compiler_language(['/usr/local/bin/gcc']))
self.assertIsNotNone(sut.compiler_language(['/usr/local/bin/g++']))
self.assertIsNotNone(sut.compiler_language(['/usr/local/bin/clang']))
self.assertIsNotNone(
sut.compiler_language(['armv7_neno-linux-gnueabi-g++']))
self.assertIsNone(sut.compiler_language([]))
self.assertIsNone(sut.compiler_language(['']))
self.assertIsNone(sut.compiler_language(['ld']))
self.assertIsNone(sut.compiler_language(['as']))
self.assertIsNone(sut.compiler_language(['/usr/local/bin/compiler']))
class SplitTest(unittest.TestCase):
def test_detect_cxx_from_compiler_name(self):
def test(cmd):
result = sut.split_command([cmd, '-c', 'src.c'])
self.assertIsNotNone(result, "wrong input for test")
return result.compiler == 'c++'
self.assertFalse(test('cc'))
self.assertFalse(test('gcc'))
self.assertFalse(test('clang'))
self.assertTrue(test('c++'))
self.assertTrue(test('g++'))
self.assertTrue(test('g++-5.3.1'))
self.assertTrue(test('clang++'))
self.assertTrue(test('clang++-3.7.1'))
self.assertTrue(test('armv7_neno-linux-gnueabi-g++'))
def test_action(self):
self.assertIsNotNone(sut.split_command(['clang', 'source.c']))
self.assertIsNotNone(sut.split_command(['clang', '-c', 'source.c']))
self.assertIsNotNone(sut.split_command(['clang', '-c', 'source.c',
'-MF', 'a.d']))
self.assertIsNone(sut.split_command(['clang', '-E', 'source.c']))
self.assertIsNone(sut.split_command(['clang', '-c', '-E', 'source.c']))
self.assertIsNone(sut.split_command(['clang', '-c', '-M', 'source.c']))
self.assertIsNone(
sut.split_command(['clang', '-c', '-MM', 'source.c']))
def test_source_file(self):
def test(expected, cmd):
self.assertEqual(expected, sut.split_command(cmd).files)
test(['src.c'], ['clang', 'src.c'])
test(['src.c'], ['clang', '-c', 'src.c'])
test(['src.C'], ['clang', '-x', 'c', 'src.C'])
test(['src.cpp'], ['clang++', '-c', 'src.cpp'])
test(['s1.c', 's2.c'], ['clang', '-c', 's1.c', 's2.c'])
test(['s1.c', 's2.c'], ['cc', 's1.c', 's2.c', '-ldep', '-o', 'a.out'])
test(['src.c'], ['clang', '-c', '-I', './include', 'src.c'])
test(['src.c'], ['clang', '-c', '-I', '/opt/me/include', 'src.c'])
test(['src.c'], ['clang', '-c', '-D', 'config=file.c', 'src.c'])
self.assertIsNone(
sut.split_command(['cc', 'this.o', 'that.o', '-o', 'a.out']))
self.assertIsNone(
sut.split_command(['cc', 'this.o', '-lthat', '-o', 'a.out']))
def test_filter_flags(self):
def test(expected, flags):
command = ['clang', '-c', 'src.c'] + flags
self.assertEqual(expected, sut.split_command(command).flags)
def same(expected):
test(expected, expected)
def filtered(flags):
test([], flags)
same([])
same(['-I', '/opt/me/include', '-DNDEBUG', '-ULIMITS'])
same(['-O', '-O2'])
same(['-m32', '-mmms'])
same(['-Wall', '-Wno-unused', '-g', '-funroll-loops'])
filtered([])
filtered(['-lclien', '-L/opt/me/lib', '-L', '/opt/you/lib'])
filtered(['-static'])
filtered(['-MD', '-MT', 'something'])
filtered(['-MMD', '-MF', 'something'])
class SourceClassifierTest(unittest.TestCase):
def test_sources(self):
self.assertIsNone(sut.classify_source('file.o'))
self.assertIsNone(sut.classify_source('file.exe'))
self.assertIsNone(sut.classify_source('/path/file.o'))
self.assertIsNone(sut.classify_source('clang'))
self.assertEqual('c', sut.classify_source('file.c'))
self.assertEqual('c', sut.classify_source('./file.c'))
self.assertEqual('c', sut.classify_source('/path/file.c'))
self.assertEqual('c++', sut.classify_source('file.c', False))
self.assertEqual('c++', sut.classify_source('./file.c', False))
self.assertEqual('c++', sut.classify_source('/path/file.c', False))
| 5,144 | 40.829268 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/unit/test_analyze.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libscanbuild.analyze as sut
| 229 | 27.75 | 71 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/unit/test_clang.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libear
import libscanbuild.clang as sut
import unittest
import os.path
import sys
class ClangGetVersion(unittest.TestCase):
def test_get_version_is_not_empty(self):
self.assertTrue(sut.get_version('clang'))
def test_get_version_throws(self):
with self.assertRaises(OSError):
sut.get_version('notexists')
class ClangGetArgumentsTest(unittest.TestCase):
def test_get_clang_arguments(self):
with libear.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'test.c')
with open(filename, 'w') as handle:
handle.write('')
result = sut.get_arguments(
['clang', '-c', filename, '-DNDEBUG', '-Dvar="this is it"'],
tmpdir)
self.assertTrue('NDEBUG' in result)
self.assertTrue('var="this is it"' in result)
def test_get_clang_arguments_fails(self):
with self.assertRaises(Exception):
sut.get_arguments(['clang', '-x', 'c', 'notexist.c'], '.')
def test_get_clang_arguments_fails_badly(self):
with self.assertRaises(OSError):
sut.get_arguments(['notexist'], '.')
class ClangGetCheckersTest(unittest.TestCase):
def test_get_checkers(self):
# this test is only to see is not crashing
result = sut.get_checkers('clang', [])
self.assertTrue(len(result))
# do check result types
string_type = unicode if sys.version_info < (3,) else str
for key, value in result.items():
self.assertEqual(string_type, type(key))
self.assertEqual(string_type, type(value[0]))
self.assertEqual(bool, type(value[1]))
def test_get_active_checkers(self):
# this test is only to see is not crashing
result = sut.get_active_checkers('clang', [])
self.assertTrue(len(result))
# do check result types
for value in result:
self.assertEqual(str, type(value))
def test_is_active(self):
test = sut.is_active(['a', 'b.b', 'c.c.c'])
self.assertTrue(test('a'))
self.assertTrue(test('a.b'))
self.assertTrue(test('b.b'))
self.assertTrue(test('b.b.c'))
self.assertTrue(test('c.c.c.p'))
self.assertFalse(test('ab'))
self.assertFalse(test('ba'))
self.assertFalse(test('bb'))
self.assertFalse(test('c.c'))
self.assertFalse(test('b'))
self.assertFalse(test('d'))
def test_parse_checkers(self):
lines = [
'OVERVIEW: Clang Static Analyzer Checkers List',
'',
'CHECKERS:',
' checker.one Checker One description',
' checker.two',
' Checker Two description']
result = dict(sut.parse_checkers(lines))
self.assertTrue('checker.one' in result)
self.assertEqual('Checker One description', result.get('checker.one'))
self.assertTrue('checker.two' in result)
self.assertEqual('Checker Two description', result.get('checker.two'))
| 3,287 | 33.610526 | 78 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/unit/test_intercept.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libear
import libscanbuild.intercept as sut
import unittest
import os.path
class InterceptUtilTest(unittest.TestCase):
def test_format_entry_filters_action(self):
def test(command):
trace = {'command': command, 'directory': '/opt/src/project'}
return list(sut.format_entry(trace))
self.assertTrue(test(['cc', '-c', 'file.c', '-o', 'file.o']))
self.assertFalse(test(['cc', '-E', 'file.c']))
self.assertFalse(test(['cc', '-MM', 'file.c']))
self.assertFalse(test(['cc', 'this.o', 'that.o', '-o', 'a.out']))
def test_format_entry_normalize_filename(self):
parent = os.path.join(os.sep, 'home', 'me')
current = os.path.join(parent, 'project')
def test(filename):
trace = {'directory': current, 'command': ['cc', '-c', filename]}
return list(sut.format_entry(trace))[0]['file']
self.assertEqual(os.path.join(current, 'file.c'), test('file.c'))
self.assertEqual(os.path.join(current, 'file.c'), test('./file.c'))
self.assertEqual(os.path.join(parent, 'file.c'), test('../file.c'))
self.assertEqual(os.path.join(current, 'file.c'),
test(os.path.join(current, 'file.c')))
def test_sip(self):
def create_status_report(filename, message):
content = """#!/usr/bin/env sh
echo 'sa-la-la-la'
echo 'la-la-la'
echo '{0}'
echo 'sa-la-la-la'
echo 'la-la-la'
""".format(message)
lines = [line.strip() for line in content.split('\n')]
with open(filename, 'w') as handle:
handle.write('\n'.join(lines))
handle.close()
os.chmod(filename, 0x1ff)
def create_csrutil(dest_dir, status):
filename = os.path.join(dest_dir, 'csrutil')
message = 'System Integrity Protection status: {0}'.format(status)
return create_status_report(filename, message)
def create_sestatus(dest_dir, status):
filename = os.path.join(dest_dir, 'sestatus')
message = 'SELinux status:\t{0}'.format(status)
return create_status_report(filename, message)
ENABLED = 'enabled'
DISABLED = 'disabled'
OSX = 'darwin'
LINUX = 'linux'
with libear.TemporaryDirectory() as tmpdir:
try:
saved = os.environ['PATH']
os.environ['PATH'] = tmpdir + ':' + saved
create_csrutil(tmpdir, ENABLED)
self.assertTrue(sut.is_preload_disabled(OSX))
create_csrutil(tmpdir, DISABLED)
self.assertFalse(sut.is_preload_disabled(OSX))
create_sestatus(tmpdir, ENABLED)
self.assertTrue(sut.is_preload_disabled(LINUX))
create_sestatus(tmpdir, DISABLED)
self.assertFalse(sut.is_preload_disabled(LINUX))
finally:
os.environ['PATH'] = saved
try:
saved = os.environ['PATH']
os.environ['PATH'] = ''
# shall be false when it's not in the path
self.assertFalse(sut.is_preload_disabled(OSX))
self.assertFalse(sut.is_preload_disabled(LINUX))
self.assertFalse(sut.is_preload_disabled('unix'))
finally:
os.environ['PATH'] = saved
| 3,694 | 36.323232 | 78 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/unit/test_runner.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libear
import libscanbuild.runner as sut
import unittest
import re
import os
import os.path
class FilteringFlagsTest(unittest.TestCase):
def test_language_captured(self):
def test(flags):
cmd = ['clang', '-c', 'source.c'] + flags
opts = sut.classify_parameters(cmd)
return opts['language']
self.assertEqual(None, test([]))
self.assertEqual('c', test(['-x', 'c']))
self.assertEqual('cpp', test(['-x', 'cpp']))
def test_arch(self):
def test(flags):
cmd = ['clang', '-c', 'source.c'] + flags
opts = sut.classify_parameters(cmd)
return opts['arch_list']
self.assertEqual([], test([]))
self.assertEqual(['mips'], test(['-arch', 'mips']))
self.assertEqual(['mips', 'i386'],
test(['-arch', 'mips', '-arch', 'i386']))
def assertFlagsChanged(self, expected, flags):
cmd = ['clang', '-c', 'source.c'] + flags
opts = sut.classify_parameters(cmd)
self.assertEqual(expected, opts['flags'])
def assertFlagsUnchanged(self, flags):
self.assertFlagsChanged(flags, flags)
def assertFlagsFiltered(self, flags):
self.assertFlagsChanged([], flags)
def test_optimalizations_pass(self):
self.assertFlagsUnchanged(['-O'])
self.assertFlagsUnchanged(['-O1'])
self.assertFlagsUnchanged(['-Os'])
self.assertFlagsUnchanged(['-O2'])
self.assertFlagsUnchanged(['-O3'])
def test_include_pass(self):
self.assertFlagsUnchanged([])
self.assertFlagsUnchanged(['-include', '/usr/local/include'])
self.assertFlagsUnchanged(['-I.'])
self.assertFlagsUnchanged(['-I', '.'])
self.assertFlagsUnchanged(['-I/usr/local/include'])
self.assertFlagsUnchanged(['-I', '/usr/local/include'])
self.assertFlagsUnchanged(['-I/opt', '-I', '/opt/otp/include'])
self.assertFlagsUnchanged(['-isystem', '/path'])
self.assertFlagsUnchanged(['-isystem=/path'])
def test_define_pass(self):
self.assertFlagsUnchanged(['-DNDEBUG'])
self.assertFlagsUnchanged(['-UNDEBUG'])
self.assertFlagsUnchanged(['-Dvar1=val1', '-Dvar2=val2'])
self.assertFlagsUnchanged(['-Dvar="val ues"'])
def test_output_filtered(self):
self.assertFlagsFiltered(['-o', 'source.o'])
def test_some_warning_filtered(self):
self.assertFlagsFiltered(['-Wall'])
self.assertFlagsFiltered(['-Wnoexcept'])
self.assertFlagsFiltered(['-Wreorder', '-Wunused', '-Wundef'])
self.assertFlagsUnchanged(['-Wno-reorder', '-Wno-unused'])
def test_compile_only_flags_pass(self):
self.assertFlagsUnchanged(['-std=C99'])
self.assertFlagsUnchanged(['-nostdinc'])
self.assertFlagsUnchanged(['-isystem', '/image/debian'])
self.assertFlagsUnchanged(['-iprefix', '/usr/local'])
self.assertFlagsUnchanged(['-iquote=me'])
self.assertFlagsUnchanged(['-iquote', 'me'])
def test_compile_and_link_flags_pass(self):
self.assertFlagsUnchanged(['-fsinged-char'])
self.assertFlagsUnchanged(['-fPIC'])
self.assertFlagsUnchanged(['-stdlib=libc++'])
self.assertFlagsUnchanged(['--sysroot', '/'])
self.assertFlagsUnchanged(['-isysroot', '/'])
def test_some_flags_filtered(self):
self.assertFlagsFiltered(['-g'])
self.assertFlagsFiltered(['-fsyntax-only'])
self.assertFlagsFiltered(['-save-temps'])
self.assertFlagsFiltered(['-init', 'my_init'])
self.assertFlagsFiltered(['-sectorder', 'a', 'b', 'c'])
class Spy(object):
def __init__(self):
self.arg = None
self.success = 0
def call(self, params):
self.arg = params
return self.success
class RunAnalyzerTest(unittest.TestCase):
@staticmethod
def run_analyzer(content, failures_report):
with libear.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'test.cpp')
with open(filename, 'w') as handle:
handle.write(content)
opts = {
'clang': 'clang',
'directory': os.getcwd(),
'flags': [],
'direct_args': [],
'file': filename,
'output_dir': tmpdir,
'output_format': 'plist',
'output_failures': failures_report
}
spy = Spy()
result = sut.run_analyzer(opts, spy.call)
return (result, spy.arg)
def test_run_analyzer(self):
content = "int div(int n, int d) { return n / d; }"
(result, fwds) = RunAnalyzerTest.run_analyzer(content, False)
self.assertEqual(None, fwds)
self.assertEqual(0, result['exit_code'])
def test_run_analyzer_crash(self):
content = "int div(int n, int d) { return n / d }"
(result, fwds) = RunAnalyzerTest.run_analyzer(content, False)
self.assertEqual(None, fwds)
self.assertEqual(1, result['exit_code'])
def test_run_analyzer_crash_and_forwarded(self):
content = "int div(int n, int d) { return n / d }"
(_, fwds) = RunAnalyzerTest.run_analyzer(content, True)
self.assertEqual('crash', fwds['error_type'])
self.assertEqual(1, fwds['exit_code'])
self.assertTrue(len(fwds['error_output']) > 0)
class ReportFailureTest(unittest.TestCase):
def assertUnderFailures(self, path):
self.assertEqual('failures', os.path.basename(os.path.dirname(path)))
def test_report_failure_create_files(self):
with libear.TemporaryDirectory() as tmpdir:
# create input file
filename = os.path.join(tmpdir, 'test.c')
with open(filename, 'w') as handle:
handle.write('int main() { return 0')
uname_msg = ' '.join(os.uname()) + os.linesep
error_msg = 'this is my error output'
# execute test
opts = {
'clang': 'clang',
'directory': os.getcwd(),
'flags': [],
'file': filename,
'output_dir': tmpdir,
'language': 'c',
'error_type': 'other_error',
'error_output': error_msg,
'exit_code': 13
}
sut.report_failure(opts)
# verify the result
result = dict()
pp_file = None
for root, _, files in os.walk(tmpdir):
keys = [os.path.join(root, name) for name in files]
for key in keys:
with open(key, 'r') as handle:
result[key] = handle.readlines()
if re.match(r'^(.*/)+clang(.*)\.i$', key):
pp_file = key
# prepocessor file generated
self.assertUnderFailures(pp_file)
# info file generated and content dumped
info_file = pp_file + '.info.txt'
self.assertTrue(info_file in result)
self.assertEqual('Other Error\n', result[info_file][1])
self.assertEqual(uname_msg, result[info_file][3])
# error file generated and content dumped
error_file = pp_file + '.stderr.txt'
self.assertTrue(error_file in result)
self.assertEqual([error_msg], result[error_file])
class AnalyzerTest(unittest.TestCase):
def test_nodebug_macros_appended(self):
def test(flags):
spy = Spy()
opts = {'flags': flags, 'force_debug': True}
self.assertEqual(spy.success,
sut.filter_debug_flags(opts, spy.call))
return spy.arg['flags']
self.assertEqual(['-UNDEBUG'], test([]))
self.assertEqual(['-DNDEBUG', '-UNDEBUG'], test(['-DNDEBUG']))
self.assertEqual(['-DSomething', '-UNDEBUG'], test(['-DSomething']))
def test_set_language_fall_through(self):
def language(expected, input):
spy = Spy()
input.update({'compiler': 'c', 'file': 'test.c'})
self.assertEqual(spy.success, sut.language_check(input, spy.call))
self.assertEqual(expected, spy.arg['language'])
language('c', {'language': 'c', 'flags': []})
language('c++', {'language': 'c++', 'flags': []})
def test_set_language_stops_on_not_supported(self):
spy = Spy()
input = {
'compiler': 'c',
'flags': [],
'file': 'test.java',
'language': 'java'
}
self.assertIsNone(sut.language_check(input, spy.call))
self.assertIsNone(spy.arg)
def test_set_language_sets_flags(self):
def flags(expected, input):
spy = Spy()
input.update({'compiler': 'c', 'file': 'test.c'})
self.assertEqual(spy.success, sut.language_check(input, spy.call))
self.assertEqual(expected, spy.arg['flags'])
flags(['-x', 'c'], {'language': 'c', 'flags': []})
flags(['-x', 'c++'], {'language': 'c++', 'flags': []})
def test_set_language_from_filename(self):
def language(expected, input):
spy = Spy()
input.update({'language': None, 'flags': []})
self.assertEqual(spy.success, sut.language_check(input, spy.call))
self.assertEqual(expected, spy.arg['language'])
language('c', {'file': 'file.c', 'compiler': 'c'})
language('c++', {'file': 'file.c', 'compiler': 'c++'})
language('c++', {'file': 'file.cxx', 'compiler': 'c'})
language('c++', {'file': 'file.cxx', 'compiler': 'c++'})
language('c++', {'file': 'file.cpp', 'compiler': 'c++'})
language('c-cpp-output', {'file': 'file.i', 'compiler': 'c'})
language('c++-cpp-output', {'file': 'file.i', 'compiler': 'c++'})
def test_arch_loop_sets_flags(self):
def flags(archs):
spy = Spy()
input = {'flags': [], 'arch_list': archs}
sut.arch_check(input, spy.call)
return spy.arg['flags']
self.assertEqual([], flags([]))
self.assertEqual(['-arch', 'i386'], flags(['i386']))
self.assertEqual(['-arch', 'i386'], flags(['i386', 'ppc']))
self.assertEqual(['-arch', 'sparc'], flags(['i386', 'sparc']))
def test_arch_loop_stops_on_not_supported(self):
def stop(archs):
spy = Spy()
input = {'flags': [], 'arch_list': archs}
self.assertIsNone(sut.arch_check(input, spy.call))
self.assertIsNone(spy.arg)
stop(['ppc'])
stop(['ppc64'])
@sut.require([])
def method_without_expecteds(opts):
return 0
@sut.require(['this', 'that'])
def method_with_expecteds(opts):
return 0
@sut.require([])
def method_exception_from_inside(opts):
raise Exception('here is one')
class RequireDecoratorTest(unittest.TestCase):
def test_method_without_expecteds(self):
self.assertEqual(method_without_expecteds(dict()), 0)
self.assertEqual(method_without_expecteds({}), 0)
self.assertEqual(method_without_expecteds({'this': 2}), 0)
self.assertEqual(method_without_expecteds({'that': 3}), 0)
def test_method_with_expecteds(self):
self.assertRaises(KeyError, method_with_expecteds, dict())
self.assertRaises(KeyError, method_with_expecteds, {})
self.assertRaises(KeyError, method_with_expecteds, {'this': 2})
self.assertRaises(KeyError, method_with_expecteds, {'that': 3})
self.assertEqual(method_with_expecteds({'this': 0, 'that': 3}), 0)
def test_method_exception_not_caught(self):
self.assertRaises(Exception, method_exception_from_inside, dict())
| 12,015 | 36.201238 | 78 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/unit/__init__.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
from . import test_libear
from . import test_compilation
from . import test_clang
from . import test_runner
from . import test_report
from . import test_analyze
from . import test_intercept
from . import test_shell
def load_tests(loader, suite, _):
suite.addTests(loader.loadTestsFromModule(test_libear))
suite.addTests(loader.loadTestsFromModule(test_compilation))
suite.addTests(loader.loadTestsFromModule(test_clang))
suite.addTests(loader.loadTestsFromModule(test_runner))
suite.addTests(loader.loadTestsFromModule(test_report))
suite.addTests(loader.loadTestsFromModule(test_analyze))
suite.addTests(loader.loadTestsFromModule(test_intercept))
suite.addTests(loader.loadTestsFromModule(test_shell))
return suite
| 949 | 34.185185 | 71 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/unit/test_report.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libear
import libscanbuild.report as sut
import unittest
import os
import os.path
def run_bug_parse(content):
with libear.TemporaryDirectory() as tmpdir:
file_name = os.path.join(tmpdir, 'test.html')
with open(file_name, 'w') as handle:
handle.writelines(content)
for bug in sut.parse_bug_html(file_name):
return bug
def run_crash_parse(content, preproc):
with libear.TemporaryDirectory() as tmpdir:
file_name = os.path.join(tmpdir, preproc + '.info.txt')
with open(file_name, 'w') as handle:
handle.writelines(content)
return sut.parse_crash(file_name)
class ParseFileTest(unittest.TestCase):
def test_parse_bug(self):
content = [
"some header\n",
"<!-- BUGDESC Division by zero -->\n",
"<!-- BUGTYPE Division by zero -->\n",
"<!-- BUGCATEGORY Logic error -->\n",
"<!-- BUGFILE xx -->\n",
"<!-- BUGLINE 5 -->\n",
"<!-- BUGCOLUMN 22 -->\n",
"<!-- BUGPATHLENGTH 4 -->\n",
"<!-- BUGMETAEND -->\n",
"<!-- REPORTHEADER -->\n",
"some tails\n"]
result = run_bug_parse(content)
self.assertEqual(result['bug_category'], 'Logic error')
self.assertEqual(result['bug_path_length'], 4)
self.assertEqual(result['bug_line'], 5)
self.assertEqual(result['bug_description'], 'Division by zero')
self.assertEqual(result['bug_type'], 'Division by zero')
self.assertEqual(result['bug_file'], 'xx')
def test_parse_bug_empty(self):
content = []
result = run_bug_parse(content)
self.assertEqual(result['bug_category'], 'Other')
self.assertEqual(result['bug_path_length'], 1)
self.assertEqual(result['bug_line'], 0)
def test_parse_crash(self):
content = [
"/some/path/file.c\n",
"Some very serious Error\n",
"bla\n",
"bla-bla\n"]
result = run_crash_parse(content, 'file.i')
self.assertEqual(result['source'], content[0].rstrip())
self.assertEqual(result['problem'], content[1].rstrip())
self.assertEqual(os.path.basename(result['file']),
'file.i')
self.assertEqual(os.path.basename(result['info']),
'file.i.info.txt')
self.assertEqual(os.path.basename(result['stderr']),
'file.i.stderr.txt')
def test_parse_real_crash(self):
import libscanbuild.runner as sut2
import re
with libear.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'test.c')
with open(filename, 'w') as handle:
handle.write('int main() { return 0')
# produce failure report
opts = {
'clang': 'clang',
'directory': os.getcwd(),
'flags': [],
'file': filename,
'output_dir': tmpdir,
'language': 'c',
'error_type': 'other_error',
'error_output': 'some output',
'exit_code': 13
}
sut2.report_failure(opts)
# find the info file
pp_file = None
for root, _, files in os.walk(tmpdir):
keys = [os.path.join(root, name) for name in files]
for key in keys:
if re.match(r'^(.*/)+clang(.*)\.i$', key):
pp_file = key
self.assertIsNot(pp_file, None)
# read the failure report back
result = sut.parse_crash(pp_file + '.info.txt')
self.assertEqual(result['source'], filename)
self.assertEqual(result['problem'], 'Other Error')
self.assertEqual(result['file'], pp_file)
self.assertEqual(result['info'], pp_file + '.info.txt')
self.assertEqual(result['stderr'], pp_file + '.stderr.txt')
class ReportMethodTest(unittest.TestCase):
def test_chop(self):
self.assertEqual('file', sut.chop('/prefix', '/prefix/file'))
self.assertEqual('file', sut.chop('/prefix/', '/prefix/file'))
self.assertEqual('lib/file', sut.chop('/prefix/', '/prefix/lib/file'))
self.assertEqual('/prefix/file', sut.chop('', '/prefix/file'))
def test_chop_when_cwd(self):
self.assertEqual('../src/file', sut.chop('/cwd', '/src/file'))
self.assertEqual('../src/file', sut.chop('/prefix/cwd',
'/prefix/src/file'))
class GetPrefixFromCompilationDatabaseTest(unittest.TestCase):
def test_with_different_filenames(self):
self.assertEqual(
sut.commonprefix(['/tmp/a.c', '/tmp/b.c']), '/tmp')
def test_with_different_dirnames(self):
self.assertEqual(
sut.commonprefix(['/tmp/abs/a.c', '/tmp/ack/b.c']), '/tmp')
def test_no_common_prefix(self):
self.assertEqual(
sut.commonprefix(['/tmp/abs/a.c', '/usr/ack/b.c']), '/')
def test_with_single_file(self):
self.assertEqual(
sut.commonprefix(['/tmp/a.c']), '/tmp')
def test_empty(self):
self.assertEqual(
sut.commonprefix([]), '')
class ReportDirectoryTest(unittest.TestCase):
# Test that successive report directory names ascend in lexicographic
# order. This is required so that report directories from two runs of
# scan-build can be easily matched up to compare results.
def test_directory_name_comparison(self):
with libear.TemporaryDirectory() as tmpdir, \
sut.report_directory(tmpdir, False) as report_dir1, \
sut.report_directory(tmpdir, False) as report_dir2, \
sut.report_directory(tmpdir, False) as report_dir3:
self.assertLess(report_dir1, report_dir2)
self.assertLess(report_dir2, report_dir3)
| 6,173 | 37.111111 | 78 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/unit/test_shell.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libscanbuild.shell as sut
import unittest
class ShellTest(unittest.TestCase):
def test_encode_decode_are_same(self):
def test(value):
self.assertEqual(sut.encode(sut.decode(value)), value)
test("")
test("clang")
test("clang this and that")
def test_decode_encode_are_same(self):
def test(value):
self.assertEqual(sut.decode(sut.encode(value)), value)
test([])
test(['clang'])
test(['clang', 'this', 'and', 'that'])
test(['clang', 'this and', 'that'])
test(['clang', "it's me", 'again'])
test(['clang', 'some "words" are', 'quoted'])
def test_encode(self):
self.assertEqual(sut.encode(['clang', "it's me", 'again']),
'clang "it\'s me" again')
self.assertEqual(sut.encode(['clang', "it(s me", 'again)']),
'clang "it(s me" "again)"')
self.assertEqual(sut.encode(['clang', 'redirect > it']),
'clang "redirect > it"')
self.assertEqual(sut.encode(['clang', '-DKEY="VALUE"']),
'clang -DKEY=\\"VALUE\\"')
self.assertEqual(sut.encode(['clang', '-DKEY="value with spaces"']),
'clang -DKEY=\\"value with spaces\\"')
| 1,502 | 33.953488 | 76 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/functional/__init__.py | 0 | 0 | 0 | py |
|
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/functional/cases/test_create_cdb.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libear
from . import make_args, silent_check_call, silent_call, create_empty_file
import unittest
import os.path
import json
class CompilationDatabaseTest(unittest.TestCase):
@staticmethod
def run_intercept(tmpdir, args):
result = os.path.join(tmpdir, 'cdb.json')
make = make_args(tmpdir) + args
silent_check_call(
['intercept-build', '--cdb', result] + make)
return result
@staticmethod
def count_entries(filename):
with open(filename, 'r') as handler:
content = json.load(handler)
return len(content)
def test_successful_build(self):
with libear.TemporaryDirectory() as tmpdir:
result = self.run_intercept(tmpdir, ['build_regular'])
self.assertTrue(os.path.isfile(result))
self.assertEqual(5, self.count_entries(result))
def test_successful_build_with_wrapper(self):
with libear.TemporaryDirectory() as tmpdir:
result = os.path.join(tmpdir, 'cdb.json')
make = make_args(tmpdir) + ['build_regular']
silent_check_call(['intercept-build', '--cdb', result,
'--override-compiler'] + make)
self.assertTrue(os.path.isfile(result))
self.assertEqual(5, self.count_entries(result))
@unittest.skipIf(os.getenv('TRAVIS'), 'ubuntu make return -11')
def test_successful_build_parallel(self):
with libear.TemporaryDirectory() as tmpdir:
result = self.run_intercept(tmpdir, ['-j', '4', 'build_regular'])
self.assertTrue(os.path.isfile(result))
self.assertEqual(5, self.count_entries(result))
@unittest.skipIf(os.getenv('TRAVIS'), 'ubuntu env remove clang from path')
def test_successful_build_on_empty_env(self):
with libear.TemporaryDirectory() as tmpdir:
result = os.path.join(tmpdir, 'cdb.json')
make = make_args(tmpdir) + ['CC=clang', 'build_regular']
silent_check_call(['intercept-build', '--cdb', result,
'env', '-'] + make)
self.assertTrue(os.path.isfile(result))
self.assertEqual(5, self.count_entries(result))
def test_successful_build_all_in_one(self):
with libear.TemporaryDirectory() as tmpdir:
result = self.run_intercept(tmpdir, ['build_all_in_one'])
self.assertTrue(os.path.isfile(result))
self.assertEqual(5, self.count_entries(result))
def test_not_successful_build(self):
with libear.TemporaryDirectory() as tmpdir:
result = os.path.join(tmpdir, 'cdb.json')
make = make_args(tmpdir) + ['build_broken']
silent_call(
['intercept-build', '--cdb', result] + make)
self.assertTrue(os.path.isfile(result))
self.assertEqual(2, self.count_entries(result))
class ExitCodeTest(unittest.TestCase):
@staticmethod
def run_intercept(tmpdir, target):
result = os.path.join(tmpdir, 'cdb.json')
make = make_args(tmpdir) + [target]
return silent_call(
['intercept-build', '--cdb', result] + make)
def test_successful_build(self):
with libear.TemporaryDirectory() as tmpdir:
exitcode = self.run_intercept(tmpdir, 'build_clean')
self.assertFalse(exitcode)
def test_not_successful_build(self):
with libear.TemporaryDirectory() as tmpdir:
exitcode = self.run_intercept(tmpdir, 'build_broken')
self.assertTrue(exitcode)
class ResumeFeatureTest(unittest.TestCase):
@staticmethod
def run_intercept(tmpdir, target, args):
result = os.path.join(tmpdir, 'cdb.json')
make = make_args(tmpdir) + [target]
silent_check_call(
['intercept-build', '--cdb', result] + args + make)
return result
@staticmethod
def count_entries(filename):
with open(filename, 'r') as handler:
content = json.load(handler)
return len(content)
def test_overwrite_existing_cdb(self):
with libear.TemporaryDirectory() as tmpdir:
result = self.run_intercept(tmpdir, 'build_clean', [])
self.assertTrue(os.path.isfile(result))
result = self.run_intercept(tmpdir, 'build_regular', [])
self.assertTrue(os.path.isfile(result))
self.assertEqual(2, self.count_entries(result))
def test_append_to_existing_cdb(self):
with libear.TemporaryDirectory() as tmpdir:
result = self.run_intercept(tmpdir, 'build_clean', [])
self.assertTrue(os.path.isfile(result))
result = self.run_intercept(tmpdir, 'build_regular', ['--append'])
self.assertTrue(os.path.isfile(result))
self.assertEqual(5, self.count_entries(result))
class ResultFormatingTest(unittest.TestCase):
@staticmethod
def run_intercept(tmpdir, command):
result = os.path.join(tmpdir, 'cdb.json')
silent_check_call(
['intercept-build', '--cdb', result] + command,
cwd=tmpdir)
with open(result, 'r') as handler:
content = json.load(handler)
return content
def assert_creates_number_of_entries(self, command, count):
with libear.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'test.c')
create_empty_file(filename)
command.append(filename)
cmd = ['sh', '-c', ' '.join(command)]
cdb = self.run_intercept(tmpdir, cmd)
self.assertEqual(count, len(cdb))
def test_filter_preprocessor_only_calls(self):
self.assert_creates_number_of_entries(['cc', '-c'], 1)
self.assert_creates_number_of_entries(['cc', '-c', '-E'], 0)
self.assert_creates_number_of_entries(['cc', '-c', '-M'], 0)
self.assert_creates_number_of_entries(['cc', '-c', '-MM'], 0)
def assert_command_creates_entry(self, command, expected):
with libear.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, command[-1])
create_empty_file(filename)
cmd = ['sh', '-c', ' '.join(command)]
cdb = self.run_intercept(tmpdir, cmd)
self.assertEqual(' '.join(expected), cdb[0]['command'])
def test_filter_preprocessor_flags(self):
self.assert_command_creates_entry(
['cc', '-c', '-MD', 'test.c'],
['cc', '-c', 'test.c'])
self.assert_command_creates_entry(
['cc', '-c', '-MMD', 'test.c'],
['cc', '-c', 'test.c'])
self.assert_command_creates_entry(
['cc', '-c', '-MD', '-MF', 'test.d', 'test.c'],
['cc', '-c', 'test.c'])
def test_pass_language_flag(self):
self.assert_command_creates_entry(
['cc', '-c', '-x', 'c', 'test.c'],
['cc', '-c', '-x', 'c', 'test.c'])
self.assert_command_creates_entry(
['cc', '-c', 'test.c'],
['cc', '-c', 'test.c'])
def test_pass_arch_flags(self):
self.assert_command_creates_entry(
['clang', '-c', 'test.c'],
['cc', '-c', 'test.c'])
self.assert_command_creates_entry(
['clang', '-c', '-arch', 'i386', 'test.c'],
['cc', '-c', '-arch', 'i386', 'test.c'])
self.assert_command_creates_entry(
['clang', '-c', '-arch', 'i386', '-arch', 'armv7l', 'test.c'],
['cc', '-c', '-arch', 'i386', '-arch', 'armv7l', 'test.c'])
| 7,754 | 39.390625 | 78 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/functional/cases/test_from_cdb.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libear
from . import call_and_report
import unittest
import os.path
import string
import glob
def prepare_cdb(name, target_dir):
target_file = 'build_{0}.json'.format(name)
this_dir, _ = os.path.split(__file__)
path = os.path.normpath(os.path.join(this_dir, '..', 'src'))
source_dir = os.path.join(path, 'compilation_database')
source_file = os.path.join(source_dir, target_file + '.in')
target_file = os.path.join(target_dir, 'compile_commands.json')
with open(source_file, 'r') as in_handle:
with open(target_file, 'w') as out_handle:
for line in in_handle:
temp = string.Template(line)
out_handle.write(temp.substitute(path=path))
return target_file
def run_analyzer(directory, cdb, args):
cmd = ['analyze-build', '--cdb', cdb, '--output', directory] \
+ args
return call_and_report(cmd, [])
class OutputDirectoryTest(unittest.TestCase):
def test_regular_keeps_report_dir(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('regular', tmpdir)
exit_code, reportdir = run_analyzer(tmpdir, cdb, [])
self.assertTrue(os.path.isdir(reportdir))
def test_clear_deletes_report_dir(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('clean', tmpdir)
exit_code, reportdir = run_analyzer(tmpdir, cdb, [])
self.assertFalse(os.path.isdir(reportdir))
def test_clear_keeps_report_dir_when_asked(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('clean', tmpdir)
exit_code, reportdir = run_analyzer(tmpdir, cdb, ['--keep-empty'])
self.assertTrue(os.path.isdir(reportdir))
class ExitCodeTest(unittest.TestCase):
def test_regular_does_not_set_exit_code(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('regular', tmpdir)
exit_code, __ = run_analyzer(tmpdir, cdb, [])
self.assertFalse(exit_code)
def test_clear_does_not_set_exit_code(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('clean', tmpdir)
exit_code, __ = run_analyzer(tmpdir, cdb, [])
self.assertFalse(exit_code)
def test_regular_sets_exit_code_if_asked(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('regular', tmpdir)
exit_code, __ = run_analyzer(tmpdir, cdb, ['--status-bugs'])
self.assertTrue(exit_code)
def test_clear_does_not_set_exit_code_if_asked(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('clean', tmpdir)
exit_code, __ = run_analyzer(tmpdir, cdb, ['--status-bugs'])
self.assertFalse(exit_code)
def test_regular_sets_exit_code_if_asked_from_plist(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('regular', tmpdir)
exit_code, __ = run_analyzer(
tmpdir, cdb, ['--status-bugs', '--plist'])
self.assertTrue(exit_code)
def test_clear_does_not_set_exit_code_if_asked_from_plist(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('clean', tmpdir)
exit_code, __ = run_analyzer(
tmpdir, cdb, ['--status-bugs', '--plist'])
self.assertFalse(exit_code)
class OutputFormatTest(unittest.TestCase):
@staticmethod
def get_html_count(directory):
return len(glob.glob(os.path.join(directory, 'report-*.html')))
@staticmethod
def get_plist_count(directory):
return len(glob.glob(os.path.join(directory, 'report-*.plist')))
def test_default_creates_html_report(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('regular', tmpdir)
exit_code, reportdir = run_analyzer(tmpdir, cdb, [])
self.assertTrue(
os.path.exists(os.path.join(reportdir, 'index.html')))
self.assertEqual(self.get_html_count(reportdir), 2)
self.assertEqual(self.get_plist_count(reportdir), 0)
def test_plist_and_html_creates_html_report(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('regular', tmpdir)
exit_code, reportdir = run_analyzer(tmpdir, cdb, ['--plist-html'])
self.assertTrue(
os.path.exists(os.path.join(reportdir, 'index.html')))
self.assertEqual(self.get_html_count(reportdir), 2)
self.assertEqual(self.get_plist_count(reportdir), 5)
def test_plist_does_not_creates_html_report(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('regular', tmpdir)
exit_code, reportdir = run_analyzer(tmpdir, cdb, ['--plist'])
self.assertFalse(
os.path.exists(os.path.join(reportdir, 'index.html')))
self.assertEqual(self.get_html_count(reportdir), 0)
self.assertEqual(self.get_plist_count(reportdir), 5)
class FailureReportTest(unittest.TestCase):
def test_broken_creates_failure_reports(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('broken', tmpdir)
exit_code, reportdir = run_analyzer(tmpdir, cdb, [])
self.assertTrue(
os.path.isdir(os.path.join(reportdir, 'failures')))
def test_broken_does_not_creates_failure_reports(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('broken', tmpdir)
exit_code, reportdir = run_analyzer(
tmpdir, cdb, ['--no-failure-reports'])
self.assertFalse(
os.path.isdir(os.path.join(reportdir, 'failures')))
class TitleTest(unittest.TestCase):
def assertTitleEqual(self, directory, expected):
import re
patterns = [
re.compile(r'<title>(?P<page>.*)</title>'),
re.compile(r'<h1>(?P<head>.*)</h1>')
]
result = dict()
index = os.path.join(directory, 'index.html')
with open(index, 'r') as handler:
for line in handler.readlines():
for regex in patterns:
match = regex.match(line.strip())
if match:
result.update(match.groupdict())
break
self.assertEqual(result['page'], result['head'])
self.assertEqual(result['page'], expected)
def test_default_title_in_report(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('broken', tmpdir)
exit_code, reportdir = run_analyzer(tmpdir, cdb, [])
self.assertTitleEqual(reportdir, 'src - analyzer results')
def test_given_title_in_report(self):
with libear.TemporaryDirectory() as tmpdir:
cdb = prepare_cdb('broken', tmpdir)
exit_code, reportdir = run_analyzer(
tmpdir, cdb, ['--html-title', 'this is the title'])
self.assertTitleEqual(reportdir, 'this is the title')
| 7,382 | 39.344262 | 78 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/functional/cases/test_exec_anatomy.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libear
import unittest
import os.path
import subprocess
import json
def run(source_dir, target_dir):
def execute(cmd):
return subprocess.check_call(cmd,
cwd=target_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
execute(['cmake', source_dir])
execute(['make'])
result_file = os.path.join(target_dir, 'result.json')
expected_file = os.path.join(target_dir, 'expected.json')
execute(['intercept-build', '--cdb', result_file, './exec',
expected_file])
return (expected_file, result_file)
class ExecAnatomyTest(unittest.TestCase):
def assertEqualJson(self, expected, result):
def read_json(filename):
with open(filename) as handler:
return json.load(handler)
lhs = read_json(expected)
rhs = read_json(result)
for item in lhs:
self.assertTrue(rhs.count(item))
for item in rhs:
self.assertTrue(lhs.count(item))
def test_all_exec_calls(self):
this_dir, _ = os.path.split(__file__)
source_dir = os.path.normpath(os.path.join(this_dir, '..', 'exec'))
with libear.TemporaryDirectory() as tmp_dir:
expected, result = run(source_dir, tmp_dir)
self.assertEqualJson(expected, result)
| 1,595 | 30.294118 | 75 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/functional/cases/test_from_cmd.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libear
from . import make_args, check_call_and_report, create_empty_file
import unittest
import os
import os.path
import glob
class OutputDirectoryTest(unittest.TestCase):
@staticmethod
def run_analyzer(outdir, args, cmd):
return check_call_and_report(
['scan-build', '--intercept-first', '-o', outdir] + args,
cmd)
def test_regular_keeps_report_dir(self):
with libear.TemporaryDirectory() as tmpdir:
make = make_args(tmpdir) + ['build_regular']
outdir = self.run_analyzer(tmpdir, [], make)
self.assertTrue(os.path.isdir(outdir))
def test_clear_deletes_report_dir(self):
with libear.TemporaryDirectory() as tmpdir:
make = make_args(tmpdir) + ['build_clean']
outdir = self.run_analyzer(tmpdir, [], make)
self.assertFalse(os.path.isdir(outdir))
def test_clear_keeps_report_dir_when_asked(self):
with libear.TemporaryDirectory() as tmpdir:
make = make_args(tmpdir) + ['build_clean']
outdir = self.run_analyzer(tmpdir, ['--keep-empty'], make)
self.assertTrue(os.path.isdir(outdir))
class RunAnalyzerTest(unittest.TestCase):
@staticmethod
def get_plist_count(directory):
return len(glob.glob(os.path.join(directory, 'report-*.plist')))
def test_interposition_works(self):
with libear.TemporaryDirectory() as tmpdir:
make = make_args(tmpdir) + ['build_regular']
outdir = check_call_and_report(
['scan-build', '--plist', '-o', tmpdir, '--override-compiler'],
make)
self.assertTrue(os.path.isdir(outdir))
self.assertEqual(self.get_plist_count(outdir), 5)
def test_intercept_wrapper_works(self):
with libear.TemporaryDirectory() as tmpdir:
make = make_args(tmpdir) + ['build_regular']
outdir = check_call_and_report(
['scan-build', '--plist', '-o', tmpdir, '--intercept-first',
'--override-compiler'],
make)
self.assertTrue(os.path.isdir(outdir))
self.assertEqual(self.get_plist_count(outdir), 5)
def test_intercept_library_works(self):
with libear.TemporaryDirectory() as tmpdir:
make = make_args(tmpdir) + ['build_regular']
outdir = check_call_and_report(
['scan-build', '--plist', '-o', tmpdir, '--intercept-first'],
make)
self.assertTrue(os.path.isdir(outdir))
self.assertEqual(self.get_plist_count(outdir), 5)
@staticmethod
def compile_empty_source_file(target_dir, is_cxx):
compiler = '$CXX' if is_cxx else '$CC'
src_file_name = 'test.cxx' if is_cxx else 'test.c'
src_file = os.path.join(target_dir, src_file_name)
obj_file = os.path.join(target_dir, 'test.o')
create_empty_file(src_file)
command = ' '.join([compiler, '-c', src_file, '-o', obj_file])
return ['sh', '-c', command]
def test_interposition_cc_works(self):
with libear.TemporaryDirectory() as tmpdir:
outdir = check_call_and_report(
['scan-build', '--plist', '-o', tmpdir, '--override-compiler'],
self.compile_empty_source_file(tmpdir, False))
self.assertEqual(self.get_plist_count(outdir), 1)
def test_interposition_cxx_works(self):
with libear.TemporaryDirectory() as tmpdir:
outdir = check_call_and_report(
['scan-build', '--plist', '-o', tmpdir, '--override-compiler'],
self.compile_empty_source_file(tmpdir, True))
self.assertEqual(self.get_plist_count(outdir), 1)
def test_intercept_cc_works(self):
with libear.TemporaryDirectory() as tmpdir:
outdir = check_call_and_report(
['scan-build', '--plist', '-o', tmpdir, '--override-compiler',
'--intercept-first'],
self.compile_empty_source_file(tmpdir, False))
self.assertEqual(self.get_plist_count(outdir), 1)
def test_intercept_cxx_works(self):
with libear.TemporaryDirectory() as tmpdir:
outdir = check_call_and_report(
['scan-build', '--plist', '-o', tmpdir, '--override-compiler',
'--intercept-first'],
self.compile_empty_source_file(tmpdir, True))
self.assertEqual(self.get_plist_count(outdir), 1)
| 4,701 | 38.512605 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/tests/functional/cases/__init__.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import re
import os.path
import subprocess
def load_tests(loader, suite, pattern):
from . import test_from_cdb
suite.addTests(loader.loadTestsFromModule(test_from_cdb))
from . import test_from_cmd
suite.addTests(loader.loadTestsFromModule(test_from_cmd))
from . import test_create_cdb
suite.addTests(loader.loadTestsFromModule(test_create_cdb))
from . import test_exec_anatomy
suite.addTests(loader.loadTestsFromModule(test_exec_anatomy))
return suite
def make_args(target):
this_dir, _ = os.path.split(__file__)
path = os.path.normpath(os.path.join(this_dir, '..', 'src'))
return ['make', 'SRCDIR={}'.format(path), 'OBJDIR={}'.format(target), '-f',
os.path.join(path, 'build', 'Makefile')]
def silent_call(cmd, *args, **kwargs):
kwargs.update({'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT})
return subprocess.call(cmd, *args, **kwargs)
def silent_check_call(cmd, *args, **kwargs):
kwargs.update({'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT})
return subprocess.check_call(cmd, *args, **kwargs)
def call_and_report(analyzer_cmd, build_cmd):
child = subprocess.Popen(analyzer_cmd + ['-v'] + build_cmd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pattern = re.compile('Report directory created: (.+)')
directory = None
for line in child.stdout.readlines():
match = pattern.search(line)
if match and match.lastindex == 1:
directory = match.group(1)
break
child.stdout.close()
child.wait()
return (child.returncode, directory)
def check_call_and_report(analyzer_cmd, build_cmd):
exit_code, result = call_and_report(analyzer_cmd, build_cmd)
if exit_code != 0:
raise subprocess.CalledProcessError(
exit_code, analyzer_cmd + build_cmd, None)
else:
return result
def create_empty_file(filename):
with open(filename, 'a') as handle:
pass
| 2,273 | 30.583333 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/libscanbuild/clang.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module is responsible for the Clang executable.
Since Clang command line interface is so rich, but this project is using only
a subset of that, it makes sense to create a function specific wrapper. """
import re
import subprocess
import logging
from libscanbuild.shell import decode
__all__ = ['get_version', 'get_arguments', 'get_checkers']
# regex for activated checker
ACTIVE_CHECKER_PATTERN = re.compile(r'^-analyzer-checker=(.*)$')
def get_version(clang):
""" Returns the compiler version as string.
:param clang: the compiler we are using
:return: the version string printed to stderr """
output = subprocess.check_output([clang, '-v'], stderr=subprocess.STDOUT)
return output.decode('utf-8').splitlines()[0]
def get_arguments(command, cwd):
""" Capture Clang invocation.
:param command: the compilation command
:param cwd: the current working directory
:return: the detailed front-end invocation command """
cmd = command[:]
cmd.insert(1, '-###')
logging.debug('exec command in %s: %s', cwd, ' '.join(cmd))
output = subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
# The relevant information is in the last line of the output.
# Don't check if finding last line fails, would throw exception anyway.
last_line = output.decode('utf-8').splitlines()[-1]
if re.search(r'clang(.*): error:', last_line):
raise Exception(last_line)
return decode(last_line)
def get_active_checkers(clang, plugins):
""" Get the active checker list.
:param clang: the compiler we are using
:param plugins: list of plugins which was requested by the user
:return: list of checker names which are active
To get the default checkers we execute Clang to print how this
compilation would be called. And take out the enabled checker from the
arguments. For input file we specify stdin and pass only language
information. """
def get_active_checkers_for(language):
""" Returns a list of active checkers for the given language. """
load_args = [arg
for plugin in plugins
for arg in ['-Xclang', '-load', '-Xclang', plugin]]
cmd = [clang, '--analyze'] + load_args + ['-x', language, '-']
return [ACTIVE_CHECKER_PATTERN.match(arg).group(1)
for arg in get_arguments(cmd, '.')
if ACTIVE_CHECKER_PATTERN.match(arg)]
result = set()
for language in ['c', 'c++', 'objective-c', 'objective-c++']:
result.update(get_active_checkers_for(language))
return frozenset(result)
def is_active(checkers):
""" Returns a method, which classifies the checker active or not,
based on the received checker name list. """
def predicate(checker):
""" Returns True if the given checker is active. """
return any(pattern.match(checker) for pattern in predicate.patterns)
predicate.patterns = [re.compile(r'^' + a + r'(\.|$)') for a in checkers]
return predicate
def parse_checkers(stream):
""" Parse clang -analyzer-checker-help output.
Below the line 'CHECKERS:' are there the name description pairs.
Many of them are in one line, but some long named checker has the
name and the description in separate lines.
The checker name is always prefixed with two space character. The
name contains no whitespaces. Then followed by newline (if it's
too long) or other space characters comes the description of the
checker. The description ends with a newline character.
:param stream: list of lines to parse
:return: generator of tuples
(<checker name>, <checker description>) """
lines = iter(stream)
# find checkers header
for line in lines:
if re.match(r'^CHECKERS:', line):
break
# find entries
state = None
for line in lines:
if state and not re.match(r'^\s\s\S', line):
yield (state, line.strip())
state = None
elif re.match(r'^\s\s\S+$', line.rstrip()):
state = line.strip()
else:
pattern = re.compile(r'^\s\s(?P<key>\S*)\s*(?P<value>.*)')
match = pattern.match(line.rstrip())
if match:
current = match.groupdict()
yield (current['key'], current['value'])
def get_checkers(clang, plugins):
""" Get all the available checkers from default and from the plugins.
:param clang: the compiler we are using
:param plugins: list of plugins which was requested by the user
:return: a dictionary of all available checkers and its status
{<checker name>: (<checker description>, <is active by default>)} """
load = [elem for plugin in plugins for elem in ['-load', plugin]]
cmd = [clang, '-cc1'] + load + ['-analyzer-checker-help']
logging.debug('exec command: %s', ' '.join(cmd))
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
lines = output.decode('utf-8').splitlines()
is_active_checker = is_active(get_active_checkers(clang, plugins))
checkers = {
name: (description, is_active_checker(name))
for name, description in parse_checkers(lines)
}
if not checkers:
raise Exception('Could not query Clang for available checkers.')
return checkers
| 5,577 | 34.303797 | 77 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/libscanbuild/intercept.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module is responsible to capture the compiler invocation of any
build process. The result of that should be a compilation database.
This implementation is using the LD_PRELOAD or DYLD_INSERT_LIBRARIES
mechanisms provided by the dynamic linker. The related library is implemented
in C language and can be found under 'libear' directory.
The 'libear' library is capturing all child process creation and logging the
relevant information about it into separate files in a specified directory.
The parameter of this process is the output directory name, where the report
files shall be placed. This parameter is passed as an environment variable.
The module also implements compiler wrappers to intercept the compiler calls.
The module implements the build command execution and the post-processing of
the output files, which will condensates into a compilation database. """
import sys
import os
import os.path
import re
import itertools
import json
import glob
import argparse
import logging
import subprocess
from libear import build_libear, TemporaryDirectory
from libscanbuild import command_entry_point
from libscanbuild import duplicate_check, tempdir, initialize_logging
from libscanbuild.compilation import split_command
from libscanbuild.shell import encode, decode
__all__ = ['capture', 'intercept_build_main', 'intercept_build_wrapper']
GS = chr(0x1d)
RS = chr(0x1e)
US = chr(0x1f)
COMPILER_WRAPPER_CC = 'intercept-cc'
COMPILER_WRAPPER_CXX = 'intercept-c++'
@command_entry_point
def intercept_build_main(bin_dir):
""" Entry point for 'intercept-build' command. """
parser = create_parser()
args = parser.parse_args()
initialize_logging(args.verbose)
logging.debug('Parsed arguments: %s', args)
if not args.build:
parser.print_help()
return 0
return capture(args, bin_dir)
def capture(args, bin_dir):
""" The entry point of build command interception. """
def post_processing(commands):
""" To make a compilation database, it needs to filter out commands
which are not compiler calls. Needs to find the source file name
from the arguments. And do shell escaping on the command.
To support incremental builds, it is desired to read elements from
an existing compilation database from a previous run. These elements
shall be merged with the new elements. """
# create entries from the current run
current = itertools.chain.from_iterable(
# creates a sequence of entry generators from an exec,
format_entry(command) for command in commands)
# read entries from previous run
if 'append' in args and args.append and os.path.isfile(args.cdb):
with open(args.cdb) as handle:
previous = iter(json.load(handle))
else:
previous = iter([])
# filter out duplicate entries from both
duplicate = duplicate_check(entry_hash)
return (entry
for entry in itertools.chain(previous, current)
if os.path.exists(entry['file']) and not duplicate(entry))
with TemporaryDirectory(prefix='intercept-', dir=tempdir()) as tmp_dir:
# run the build command
environment = setup_environment(args, tmp_dir, bin_dir)
logging.debug('run build in environment: %s', environment)
exit_code = subprocess.call(args.build, env=environment)
logging.info('build finished with exit code: %d', exit_code)
# read the intercepted exec calls
exec_traces = itertools.chain.from_iterable(
parse_exec_trace(os.path.join(tmp_dir, filename))
for filename in sorted(glob.iglob(os.path.join(tmp_dir, '*.cmd'))))
# do post processing only if that was requested
if 'raw_entries' not in args or not args.raw_entries:
entries = post_processing(exec_traces)
else:
entries = exec_traces
# dump the compilation database
with open(args.cdb, 'w+') as handle:
json.dump(list(entries), handle, sort_keys=True, indent=4)
return exit_code
def setup_environment(args, destination, bin_dir):
""" Sets up the environment for the build command.
It sets the required environment variables and execute the given command.
The exec calls will be logged by the 'libear' preloaded library or by the
'wrapper' programs. """
c_compiler = args.cc if 'cc' in args else 'cc'
cxx_compiler = args.cxx if 'cxx' in args else 'c++'
libear_path = None if args.override_compiler or is_preload_disabled(
sys.platform) else build_libear(c_compiler, destination)
environment = dict(os.environ)
environment.update({'INTERCEPT_BUILD_TARGET_DIR': destination})
if not libear_path:
logging.debug('intercept gonna use compiler wrappers')
environment.update({
'CC': os.path.join(bin_dir, COMPILER_WRAPPER_CC),
'CXX': os.path.join(bin_dir, COMPILER_WRAPPER_CXX),
'INTERCEPT_BUILD_CC': c_compiler,
'INTERCEPT_BUILD_CXX': cxx_compiler,
'INTERCEPT_BUILD_VERBOSE': 'DEBUG' if args.verbose > 2 else 'INFO'
})
elif sys.platform == 'darwin':
logging.debug('intercept gonna preload libear on OSX')
environment.update({
'DYLD_INSERT_LIBRARIES': libear_path,
'DYLD_FORCE_FLAT_NAMESPACE': '1'
})
else:
logging.debug('intercept gonna preload libear on UNIX')
environment.update({'LD_PRELOAD': libear_path})
return environment
def intercept_build_wrapper(cplusplus):
""" Entry point for `intercept-cc` and `intercept-c++` compiler wrappers.
It does generate execution report into target directory. And execute
the wrapped compilation with the real compiler. The parameters for
report and execution are from environment variables.
Those parameters which for 'libear' library can't have meaningful
values are faked. """
# initialize wrapper logging
logging.basicConfig(format='intercept: %(levelname)s: %(message)s',
level=os.getenv('INTERCEPT_BUILD_VERBOSE', 'INFO'))
# write report
try:
target_dir = os.getenv('INTERCEPT_BUILD_TARGET_DIR')
if not target_dir:
raise UserWarning('exec report target directory not found')
pid = str(os.getpid())
target_file = os.path.join(target_dir, pid + '.cmd')
logging.debug('writing exec report to: %s', target_file)
with open(target_file, 'ab') as handler:
working_dir = os.getcwd()
command = US.join(sys.argv) + US
content = RS.join([pid, pid, 'wrapper', working_dir, command]) + GS
handler.write(content.encode('utf-8'))
except IOError:
logging.exception('writing exec report failed')
except UserWarning as warning:
logging.warning(warning)
# execute with real compiler
compiler = os.getenv('INTERCEPT_BUILD_CXX', 'c++') if cplusplus \
else os.getenv('INTERCEPT_BUILD_CC', 'cc')
compilation = [compiler] + sys.argv[1:]
logging.debug('execute compiler: %s', compilation)
return subprocess.call(compilation)
def parse_exec_trace(filename):
""" Parse the file generated by the 'libear' preloaded library.
Given filename points to a file which contains the basic report
generated by the interception library or wrapper command. A single
report file _might_ contain multiple process creation info. """
logging.debug('parse exec trace file: %s', filename)
with open(filename, 'r') as handler:
content = handler.read()
for group in filter(bool, content.split(GS)):
records = group.split(RS)
yield {
'pid': records[0],
'ppid': records[1],
'function': records[2],
'directory': records[3],
'command': records[4].split(US)[:-1]
}
def format_entry(exec_trace):
""" Generate the desired fields for compilation database entries. """
def abspath(cwd, name):
""" Create normalized absolute path from input filename. """
fullname = name if os.path.isabs(name) else os.path.join(cwd, name)
return os.path.normpath(fullname)
logging.debug('format this command: %s', exec_trace['command'])
compilation = split_command(exec_trace['command'])
if compilation:
for source in compilation.files:
compiler = 'c++' if compilation.compiler == 'c++' else 'cc'
command = [compiler, '-c'] + compilation.flags + [source]
logging.debug('formated as: %s', command)
yield {
'directory': exec_trace['directory'],
'command': encode(command),
'file': abspath(exec_trace['directory'], source)
}
def is_preload_disabled(platform):
""" Library-based interposition will fail silently if SIP is enabled,
so this should be detected. You can detect whether SIP is enabled on
Darwin by checking whether (1) there is a binary called 'csrutil' in
the path and, if so, (2) whether the output of executing 'csrutil status'
contains 'System Integrity Protection status: enabled'.
Same problem on linux when SELinux is enabled. The status query program
'sestatus' and the output when it's enabled 'SELinux status: enabled'. """
if platform == 'darwin':
pattern = re.compile(r'System Integrity Protection status:\s+enabled')
command = ['csrutil', 'status']
elif platform in {'linux', 'linux2'}:
pattern = re.compile(r'SELinux status:\s+enabled')
command = ['sestatus']
else:
return False
try:
lines = subprocess.check_output(command).decode('utf-8')
return any((pattern.match(line) for line in lines.splitlines()))
except:
return False
def entry_hash(entry):
""" Implement unique hash method for compilation database entries. """
# For faster lookup in set filename is reverted
filename = entry['file'][::-1]
# For faster lookup in set directory is reverted
directory = entry['directory'][::-1]
# On OS X the 'cc' and 'c++' compilers are wrappers for
# 'clang' therefore both call would be logged. To avoid
# this the hash does not contain the first word of the
# command.
command = ' '.join(decode(entry['command'])[1:])
return '<>'.join([filename, directory, command])
def create_parser():
""" Command line argument parser factory method. """
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--verbose', '-v',
action='count',
default=0,
help="""Enable verbose output from '%(prog)s'. A second and third
flag increases verbosity.""")
parser.add_argument(
'--cdb',
metavar='<file>',
default="compile_commands.json",
help="""The JSON compilation database.""")
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--append',
action='store_true',
help="""Append new entries to existing compilation database.""")
group.add_argument(
'--disable-filter', '-n',
dest='raw_entries',
action='store_true',
help="""Intercepted child process creation calls (exec calls) are all
logged to the output. The output is not a compilation database.
This flag is for debug purposes.""")
advanced = parser.add_argument_group('advanced options')
advanced.add_argument(
'--override-compiler',
action='store_true',
help="""Always resort to the compiler wrapper even when better
intercept methods are available.""")
advanced.add_argument(
'--use-cc',
metavar='<path>',
dest='cc',
default='cc',
help="""When '%(prog)s' analyzes a project by interposing a compiler
wrapper, which executes a real compiler for compilation and
do other tasks (record the compiler invocation). Because of
this interposing, '%(prog)s' does not know what compiler your
project normally uses. Instead, it simply overrides the CC
environment variable, and guesses your default compiler.
If you need '%(prog)s' to use a specific compiler for
*compilation* then you can use this option to specify a path
to that compiler.""")
advanced.add_argument(
'--use-c++',
metavar='<path>',
dest='cxx',
default='c++',
help="""This is the same as "--use-cc" but for C++ code.""")
parser.add_argument(
dest='build',
nargs=argparse.REMAINDER,
help="""Command to run.""")
return parser
| 13,164 | 37.720588 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/libscanbuild/shell.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module implements basic shell escaping/unescaping methods. """
import re
import shlex
__all__ = ['encode', 'decode']
def encode(command):
""" Takes a command as list and returns a string. """
def needs_quote(word):
""" Returns true if arguments needs to be protected by quotes.
Previous implementation was shlex.split method, but that's not good
for this job. Currently is running through the string with a basic
state checking. """
reserved = {' ', '$', '%', '&', '(', ')', '[', ']', '{', '}', '*', '|',
'<', '>', '@', '?', '!'}
state = 0
for current in word:
if state == 0 and current in reserved:
return True
elif state == 0 and current == '\\':
state = 1
elif state == 1 and current in reserved | {'\\'}:
state = 0
elif state == 0 and current == '"':
state = 2
elif state == 2 and current == '"':
state = 0
elif state == 0 and current == "'":
state = 3
elif state == 3 and current == "'":
state = 0
return state != 0
def escape(word):
""" Do protect argument if that's needed. """
table = {'\\': '\\\\', '"': '\\"'}
escaped = ''.join([table.get(c, c) for c in word])
return '"' + escaped + '"' if needs_quote(word) else escaped
return " ".join([escape(arg) for arg in command])
def decode(string):
""" Takes a command string and returns as a list. """
def unescape(arg):
""" Gets rid of the escaping characters. """
if len(arg) >= 2 and arg[0] == arg[-1] and arg[0] == '"':
arg = arg[1:-1]
return re.sub(r'\\(["\\])', r'\1', arg)
return re.sub(r'\\([\\ $%&\(\)\[\]\{\}\*|<>@?!])', r'\1', arg)
return [unescape(arg) for arg in shlex.split(string)]
| 2,157 | 31.208955 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.