repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/translate.py
|
# -*- coding: utf-8 -*-
import logging
import torch
import os
from beaver.data import build_dataset
from beaver.infer import beam_search
from beaver.model import NMTModel
from beaver.utils import parseopt, get_device, calculate_bleu
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
opt = parseopt.parse_translate_args()
device = get_device()
def translate(dataset, fields, model):
already_1, hypothesis_1, references_1 = 0, [], []
already_2, hypothesis_2, references_2 = 0, [], []
for batch, flag in dataset:
predictions = beam_search(opt, model, batch.src, fields, flag)
if flag:
hypothesis_1 += [fields["task1_tgt"].decode(p) for p in predictions]
already_1 += len(predictions)
logging.info("Task 1: %7d/%7d" % (already_1, dataset.task1_dataset.num_examples))
else:
hypothesis_2 += [fields["task2_tgt"].decode(p) for p in predictions]
already_2 += len(predictions)
logging.info("Task 2: %7d/%7d" % (already_2, dataset.task2_dataset.num_examples))
origin_1 = sorted(zip(hypothesis_1, dataset.task1_dataset.seed), key=lambda t: t[1])
hypothesis_1 = [h for h, _ in origin_1]
with open(opt.output[0], "w", encoding="UTF-8") as out_file:
out_file.write("\n".join(hypothesis_1))
out_file.write("\n")
origin_2 = sorted(zip(hypothesis_2, dataset.task2_dataset.seed), key=lambda t: t[1])
hypothesis_2 = [h for h, _ in origin_2]
with open(opt.output[1], "w", encoding="UTF-8") as out_file:
out_file.write("\n".join(hypothesis_2))
out_file.write("\n")
logging.info("Translation finished. ")
def main():
logging.info("Build dataset...")
dataset = build_dataset(opt, [opt.input[0], opt.input[0], opt.input[1], opt.input[1]], opt.vocab, device, train=False)
fields = dataset.fields
pad_ids = {"src": fields["src"].pad_id,
"task1_tgt": fields["task1_tgt"].pad_id,
"task2_tgt": fields["task2_tgt"].pad_id}
vocab_sizes = {"src": len(fields["src"].vocab),
"task1_tgt": len(fields["task1_tgt"].vocab),
"task2_tgt": len(fields["task2_tgt"].vocab)}
# load checkpoint from model_path
logging.info("Load checkpoint from %s." % opt.model_path)
checkpoint = torch.load(opt.model_path, map_location=lambda storage, loc: storage)
logging.info("Build model...")
model = NMTModel.load_model(checkpoint["opt"], pad_ids, vocab_sizes, checkpoint["model"]).to(device).eval()
logging.info("Start translation...")
with torch.set_grad_enabled(False):
translate(dataset, fields, model)
if __name__ == '__main__':
main()
| 2,731 | 33.582278 | 122 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/train.py
|
# -*- coding: utf-8 -*-
import logging
import torch
import torch.cuda
from beaver.data import build_dataset
from beaver.infer import beam_search
from beaver.loss import WarmAdam, LabelSmoothingLoss
from beaver.model import NMTModel
from beaver.utils import Saver
from beaver.utils import calculate_bleu
from beaver.utils import parseopt, get_device, printing_opt
from beaver.utils.metric import calculate_rouge
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
opt = parseopt.parse_train_args()
device = get_device()
logging.info("\n" + printing_opt(opt))
saver = Saver(opt)
def valid(model, criterion_task1, criterion_task2, valid_dataset, step):
model.eval()
total_n = 0
total_task1_loss = total_task2_loss = 0.0
task1_hypothesis, task1_references = [], []
task2_hypothesis, task2_references = [], []
for i, (batch, flag) in enumerate(valid_dataset):
scores = model(batch.src, batch.tgt, flag)
if flag:
loss = criterion_task1(scores, batch.tgt)
else:
loss = criterion_task2(scores, batch.tgt)
_, predictions = scores.topk(k=1, dim=-1)
if flag: # task1
total_task1_loss += loss.data
task1_hypothesis += [valid_dataset.fields["task1_tgt"].decode(p) for p in predictions]
task1_references += [valid_dataset.fields["task1_tgt"].decode(t) for t in batch.tgt]
else:
total_task2_loss += loss.data
task2_hypothesis += [valid_dataset.fields["task2_tgt"].decode(p) for p in predictions]
task2_references += [valid_dataset.fields["task2_tgt"].decode(t) for t in batch.tgt]
total_n += 1
bleu_task1 = calculate_bleu(task1_hypothesis, task1_references)
bleu_task2 = calculate_bleu(task2_hypothesis, task2_references)
rouge1_task1, rouge2_task1 = calculate_rouge(task1_hypothesis, task1_references)
rouge1_task2, rouge2_task2 = calculate_rouge(task2_hypothesis, task2_references)
mean_task1_loss = total_task1_loss / total_n
mean_task2_loss = total_task2_loss / total_n
logging.info("loss-task1: %.2f \t loss-task2 %.2f \t bleu-task1: %3.2f\t bleu-task2: %3.2f \t rouge1-task1: %3.2f \t rouge1-task2: %3.2f \t rouge2-task1: %3.2f \t rouge2-task2: %3.2f"
% (mean_task1_loss, mean_task2_loss, bleu_task1, bleu_task2, rouge1_task1, rouge1_task2, rouge2_task1, rouge2_task2))
checkpoint = {"model": model.state_dict(), "opt": opt}
saver.save(checkpoint, step, mean_task1_loss, mean_task2_loss, bleu_task1, bleu_task2, rouge1_task1, rouge1_task2, rouge2_task1, rouge2_task2)
def train(model, criterion_task1, criterion_task2, optimizer, train_dataset, valid_dataset):
total_task1_loss = total_task2_loss = 0.0
model.zero_grad()
for i, (batch, flag) in enumerate(train_dataset):
scores = model(batch.src, batch.tgt, flag)
if flag:
loss = criterion_task1(scores, batch.tgt)
else:
loss = criterion_task2(scores, batch.tgt)
loss.backward()
if flag: # task1
total_task1_loss += loss.data
else:
total_task2_loss += loss.data
if (i + 1) % opt.grad_accum == 0:
optimizer.step()
model.zero_grad()
if optimizer.n_step % opt.report_every == 0:
mean_task1_loss = total_task1_loss / opt.report_every / opt.grad_accum * 2
mean_task2_loss = total_task2_loss / opt.report_every / opt.grad_accum * 2
logging.info("step: %7d\t loss-task1: %.4f \t loss-task2: %.4f"
% (optimizer.n_step, mean_task1_loss, mean_task2_loss))
total_task1_loss = total_task2_loss = 0.0
if optimizer.n_step % opt.save_every == 0:
with torch.set_grad_enabled(False):
valid(model, criterion_task1, criterion_task2, valid_dataset, optimizer.n_step)
model.train()
del loss
def main():
logging.info("Build dataset...")
train_dataset = build_dataset(opt, opt.train, opt.vocab, device, train=True)
valid_dataset = build_dataset(opt, opt.valid, opt.vocab, device, train=False)
fields = valid_dataset.fields = train_dataset.fields
logging.info("Build model...")
pad_ids = {"src": fields["src"].pad_id,
"task1_tgt": fields["task1_tgt"].pad_id,
"task2_tgt": fields["task2_tgt"].pad_id}
vocab_sizes = {"src": len(fields["src"].vocab),
"task1_tgt": len(fields["task1_tgt"].vocab),
"task2_tgt": len(fields["task2_tgt"].vocab)}
model = NMTModel.load_model(opt, pad_ids, vocab_sizes).to(device)
criterion_task1 = LabelSmoothingLoss(opt.label_smoothing, vocab_sizes["task1_tgt"], pad_ids["task1_tgt"]).to(device)
criterion_task2 = LabelSmoothingLoss(opt.label_smoothing, vocab_sizes["task2_tgt"], pad_ids["task2_tgt"]).to(device)
n_step = int(opt.train_from.split("-")[-1]) if opt.train_from else 1
optimizer = WarmAdam(model.parameters(), opt.lr, opt.hidden_size, opt.warm_up, n_step)
logging.info("start training...")
train(model, criterion_task1, criterion_task2, optimizer, train_dataset, valid_dataset)
if __name__ == '__main__':
main()
| 5,295 | 40.700787 | 187 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/tools/model_average.py
|
# -*- coding: utf-8 -*-
import os
import torch
import sys
def main():
if len(sys.argv) != 3:
print("python model_average.py model_path n")
exit()
model_path = sys.argv[1]
n = int(sys.argv[2]) # last n model to be averaged
fs = [os.path.join(model_path, f) for f in os.listdir(model_path) if f.startswith("checkpoint")]
fs = sorted(fs, reverse=True)[:n] # last n file
n = len(fs) # actual file count
cks = [torch.load(f, map_location=lambda storage, loc: storage) for f in fs]
first_model = cks[0]["model"] # average all weights into first model and save it
for k, _ in first_model.items():
for ck in cks[1:]:
first_model[k] = (first_model[k] + ck["model"][k])
first_model[k] = first_model[k] / n
torch.save(cks[0], os.path.join(model_path, "averaged-%s-%s" % (fs[-1].split("-")[-1], fs[0].split("-")[-1])))
if __name__ == '__main__':
main()
| 941 | 29.387097 | 114 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/tools/build_vocab.py
|
# -*- coding: utf-8 -*-
import sys
import collections
log = sys.stderr.write
def main():
size = int(sys.argv[1])
counter = collections.Counter()
for line in sys.stdin:
counter.update(line.strip().split())
items = counter.most_common()
for word, _ in items[:size]:
print(word)
total = sum([c for _, c in items])
appear = sum([c for _, c in items[:size]])
log("total words: %d\n" % total)
log("words in vocab: %d\n" % appear)
log("vocab coverage: %.2f%%\n" % (1.0 * appear / total * 100))
log("total unique words: %d\n" % len(items))
if __name__ == '__main__':
main()
| 635 | 23.461538 | 66 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/__init__.py
|
# -*- coding: utf-8 -*-
| 24 | 11.5 | 23 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/loss/optimizers.py
|
# -*- coding: utf-8 -*-
import torch.nn as nn
import torch.optim as optim
class WarmAdam(object):
def __init__(self, params, lr, hidden_size, warm_up, n_step):
self.original_lr = lr
self.n_step = n_step
self.hidden_size = hidden_size
self.warm_up_step = warm_up
self.optimizer = optim.Adam(params, betas=[0.9, 0.998], eps=1e-9)
def step(self):
self.n_step += 1
warm_up = min(self.n_step ** (-0.5), self.n_step * self.warm_up_step ** (-1.5))
lr = self.original_lr * (self.hidden_size ** (-0.5) * warm_up)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.optimizer.step()
class LabelSmoothingLoss(nn.Module):
def __init__(self, label_smoothing, tgt_vocab_size, ignore_index):
self.padding_idx = ignore_index
self.label_smoothing = label_smoothing
self.vocab_size = tgt_vocab_size
super(LabelSmoothingLoss, self).__init__()
def forward(self, output, target):
target = target[:, 1:].contiguous().view(-1)
output = output.view(-1, self.vocab_size)
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -output.gather(dim=-1, index=target.view(-1, 1))[non_pad_mask].sum()
smooth_loss = -output.sum(dim=-1, keepdim=True)[non_pad_mask].sum()
eps_i = self.label_smoothing / self.vocab_size
loss = (1. - self.label_smoothing) * nll_loss + eps_i * smooth_loss
return loss / non_pad_mask.float().sum()
| 1,529 | 36.317073 | 87 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/loss/__init__.py
|
# -*- coding: utf-8 -*-
from beaver.loss.optimizers import WarmAdam, LabelSmoothingLoss
| 90 | 17.2 | 63 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/utils/saver.py
|
import json
import torch
import os
import datetime
class Saver(object):
def __init__(self, opt):
self.ckpt_names = []
self.model_path = opt.model_path + datetime.datetime.now().strftime("-%y%m%d-%H%M%S")
self.max_to_keep = opt.max_to_keep
os.mkdir(self.model_path)
with open(os.path.join(self.model_path, "params.json"), "w", encoding="UTF-8") as log:
log.write(json.dumps(vars(opt), indent=4) + "\n")
def save(self, save_dict, step, loss_task1, loss_task2, bleu_task1, bleu_task2, rouge1_task1, rouge1_task2, rouge2_task1, rouge2_task2):
filename = "checkpoint-step-%06d" % step
full_filename = os.path.join(self.model_path, filename)
self.ckpt_names.append(full_filename)
torch.save(save_dict, full_filename)
with open(os.path.join(self.model_path, "log"), "a", encoding="UTF-8") as log:
log.write("%s\t" % datetime.datetime.now())
log.write("step: %6d\t" % step)
log.write("loss-task1: %.2f\t" % loss_task1)
log.write("loss-task2: %.2f\t" % loss_task2)
log.write("bleu-task1: %3.2f\t" % bleu_task1)
log.write("bleu-task2: %3.2f\t" % bleu_task2)
log.write("rouge1-task1: %3.2f\t" % rouge1_task1)
log.write("rouge1-task2: %3.2f\t" % rouge1_task2)
log.write("rouge2-task1: %3.2f\t" % rouge2_task1)
log.write("rouge2-task2: %3.2f\t" % rouge2_task2)
log.write("\n")
if 0 < self.max_to_keep < len(self.ckpt_names):
earliest_ckpt = self.ckpt_names.pop(0)
os.remove(earliest_ckpt)
| 1,647 | 40.2 | 140 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/utils/parseopt.py
|
# -*- coding: utf-8 -*-
import argparse
import json
def common_opts(parser):
parser.add_argument("-vocab", type=str, nargs="*", help="Vocab file")
parser.add_argument("-batch_size", type=int, default=8192, help="Batch size")
parser.add_argument("-beam_size", type=int, default=4, help="Beam size")
parser.add_argument("-max_length", type=int, default=200, help="Maximum prediction length")
parser.add_argument("-length_penalty", type=float, default=0.6, help="Length penalty")
parser.add_argument("-model_path", default="train", help="Path to model checkpoint file")
parser.add_argument("-tf", action="store_true", help="Use teacher forcing for decoding")
parser.add_argument("-mono", action="store_true", help="任务1为单语摘要")
parser.add_argument("-min_length", type=int, default=1, help="Minimum prediction length")
def data_opts(parser):
parser.add_argument("-train", type=str, nargs=4, help="Training data")
parser.add_argument("-valid", type=str, nargs=4, help="Validation data")
def train_opts(parser):
parser.add_argument("-grad_accum", type=int, default=1, help="Accumulate gradients")
parser.add_argument("-max_to_keep", type=int, default=5, help="How many checkpoints to keep")
parser.add_argument("-report_every", type=int, default=1000, help="Report every n steps")
parser.add_argument("-save_every", type=int, default=2000, help="Valid and save model for every n steps")
parser.add_argument("-train_from", type=str, default=None, help="Train from checkpoint")
def model_opts(parser):
parser.add_argument("-layers", type=int, default=6, help="Number of layers")
parser.add_argument("-heads", type=int, default=8, help="Number of heads")
parser.add_argument("-hidden_size", type=int, default=512, help="Size of hidden states")
parser.add_argument("-ff_size", type=int, default=2048, help="Feed forward hidden size")
parser.add_argument("-lr", type=float, default=1.0, help="Learning rate")
parser.add_argument("-warm_up", type=int, default=8000, help="Warm up step")
parser.add_argument("-label_smoothing", type=float, default=0.1, help="Label smoothing rate")
parser.add_argument("-dropout", type=float, default=0.1, help="Dropout rate")
def translate_opts(parser):
parser.add_argument("-input", type=str, nargs=2, help="Translation data")
parser.add_argument("-truth", type=str, default=None, help="Truth target, used to calculate BLEU")
parser.add_argument("-output", default=["output1.txt", "output2.txt"], help="Path to output the predictions")
parser.add_argument("-bleu", action="store_true", help="Report BLEU")
def parse_train_args():
parser = argparse.ArgumentParser()
data_opts(parser)
train_opts(parser)
model_opts(parser)
common_opts(parser)
return parse_args(parser)
def parse_translate_args():
parser = argparse.ArgumentParser()
translate_opts(parser)
common_opts(parser)
return parse_args(parser)
def parse_args(parser):
parser.add_argument("-config", type=str, help="Config file")
opt = parser.parse_args()
if opt.config:
config = json.load(open(opt.config), object_hook=lambda d: {k: v for k, v in d.items() if k != "comment"})
parser.set_defaults(**config)
return parser.parse_args()
else:
return opt
| 3,348 | 41.935897 | 114 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/utils/rouge.py
|
# -*- coding: utf-8 -*-
def get_ngrams(n, text):
ngram_set = set()
text_length = len(text)
max_index_ngram = text_length - n
for i in range(max_index_ngram + 1):
ngram_set.add(tuple(text[i:i+n]))
return ngram_set
def rouge_n(evaluated_sentences, reference_sentences, n=2): #默认rouge_2
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
return 0
evaluated_ngrams = get_ngrams(n, evaluated_sentences)
reference_ngrams = get_ngrams(n, reference_sentences)
reference_ngrams_count = len(reference_ngrams)
if reference_ngrams_count == 0:
return 0
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_ngrams_count = len(overlapping_ngrams)
return overlapping_ngrams_count / reference_ngrams_count
def rouge_1(evaluated_sentences, reference_sentences):
evaluated_sentences = evaluated_sentences.split()
reference_sentences = reference_sentences.split()
return rouge_n(evaluated_sentences, reference_sentences, n=1)
def rouge_2(evaluated_sentences, reference_sentences):
evaluated_sentences = evaluated_sentences.split()
reference_sentences = reference_sentences.split()
return rouge_n(evaluated_sentences, reference_sentences, n=2)
def F_1(evaluated_sentences, reference_sentences, beta=1):
evaluated_sentences = evaluated_sentences.split()
reference_sentences = reference_sentences.split()
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
return 0
evaluated_ngrams = get_ngrams(beta, evaluated_sentences) # equal to retrieved set
reference_ngrams = get_ngrams(beta, reference_sentences) # equal to relevant set
evaluated_ngrams_num = len(evaluated_ngrams)
reference_ngrams_num = len(reference_ngrams)
if reference_ngrams_num == 0 or evaluated_ngrams_num == 0:
return 0
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_ngrams_num = len(overlapping_ngrams)
if overlapping_ngrams_num == 0:
return 0
return 2*overlapping_ngrams_num / (reference_ngrams_num + evaluated_ngrams_num)
| 2,152 | 35.491525 | 86 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/utils/__init__.py
|
# -*- coding: utf-8 -*-
import torch.cuda
from beaver.utils.metric import calculate_bleu, file_bleu
from beaver.utils.saver import Saver
def get_device():
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def printing_opt(opt):
return "\n".join(["%15s | %s" % (e[0], e[1]) for e in sorted(vars(opt).items(), key=lambda x: x[0])])
| 405 | 21.555556 | 105 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/utils/metric.py
|
import os
import re
import subprocess
import tempfile
from beaver.utils.rouge import F_1
def calculate_bleu(hypotheses, references, lowercase=False):
hypothesis_file = tempfile.NamedTemporaryFile(mode="w", encoding="UTF-8", delete=False)
hypothesis_file.write("\n".join(hypotheses) + "\n")
hypothesis_file.close()
reference_file = tempfile.NamedTemporaryFile(mode="w", encoding="UTF-8", delete=False)
reference_file.write("\n".join(references) + "\n")
reference_file.close()
return file_bleu(hypothesis_file.name, reference_file.name, lowercase)
def calculate_rouge(hypotheses, references):
rg1list = []
rg2list = []
for hypo, ref in zip(hypotheses, references):
rouge1 = F_1(hypo, ref, beta=1)
rouge2 = F_1(hypo, ref, beta=2)
rg1list.append(rouge1)
rg2list.append(rouge2)
rg1 = sum(rg1list) / len(rg1list)
rg2 = sum(rg2list) / len(rg2list)
return rg1 * 100, rg2 * 100
def file_bleu(hypothesis, reference, lowercase=False):
# ../../../tools/multi-bleu.perl, so take 3 levels up.
beaver_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
multi_bleu_path = os.path.join(beaver_path, "tools", "multi-bleu.perl")
with open(hypothesis, "r") as read_pred, open(os.devnull, "w") as black_hole:
bleu_cmd = ["perl", multi_bleu_path]
if lowercase:
bleu_cmd += ["-lc"]
bleu_cmd += [reference]
try:
bleu_out = subprocess.check_output(bleu_cmd, stdin=read_pred, stderr=black_hole).decode("utf-8")
bleu_score = re.search(r"BLEU = (.+?),", bleu_out).group(1)
except subprocess.CalledProcessError:
bleu_score = -1.0
return float(bleu_score)
| 1,745 | 35.375 | 108 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/data/field.py
|
# -*- coding: utf-8 -*-
from typing import List
import torch
EOS_TOKEN = "<eos>"
BOS_TOKEN = "<bos>"
UNK_TOKEN = "<unk>"
PAD_TOKEN = "<pad>"
class Field(object):
def __init__(self, bos: bool, eos: bool, pad: bool, unk: bool):
self.bos_token = BOS_TOKEN if bos else None
self.eos_token = EOS_TOKEN if eos else None
self.unk_token = UNK_TOKEN if unk else None
self.pad_token = PAD_TOKEN if pad else None
self.vocab = None
def load_vocab(self, words: List[str], specials: List[str]):
self.vocab = Vocab(words, specials)
def process(self, batch, device):
max_len = max(len(x) for x in batch)
padded, length = [], []
for x in batch:
bos = [self.bos_token] if self.bos_token else []
eos = [self.eos_token] if self.eos_token else []
pad = [self.pad_token] * (max_len - len(x))
padded.append(bos + x + eos + pad)
length.append(len(x) + len(bos) + len(eos))
padded = torch.tensor([self.encode(ex) for ex in padded])
return padded.long().to(device)
def encode(self, tokens):
ids = []
for tok in tokens:
if tok in self.vocab.stoi:
ids.append(self.vocab.stoi[tok])
else:
ids.append(self.unk_id)
return ids
def decode(self, ids):
tokens = []
for tok in ids:
tok = self.vocab.itos[tok]
if tok == self.eos_token:
break
if tok == self.bos_token:
continue
tokens.append(tok)
return " ".join(tokens).replace("@@ ", "").replace("@@", "")
@property
def special(self):
return [tok for tok in [self.unk_token, self.pad_token, self.bos_token, self.eos_token] if tok is not None]
@property
def pad_id(self):
return self.vocab.stoi[self.pad_token]
@property
def eos_id(self):
return self.vocab.stoi[self.eos_token]
@property
def bos_id(self):
return self.vocab.stoi[self.bos_token]
@property
def unk_id(self):
return self.vocab.stoi[self.unk_token]
class Vocab(object):
def __init__(self, words: List[str], specials: List[str]):
self.itos = specials + words
self.stoi = {tok: i for i, tok in enumerate(self.itos)}
def __len__(self):
return len(self.itos)
| 2,418 | 25.582418 | 115 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/data/dataset_wrapper.py
|
# -*- coding: utf-8 -*-
import itertools
from beaver.data.dataset import TranslationDataset
class Dataset(object):
def __init__(self, task1_dataset: TranslationDataset, task2_dataset: TranslationDataset):
self.task1_dataset = task1_dataset
self.task2_dataset = task2_dataset
self.fields = {
"src": task2_dataset.fields["src"],
"task1_tgt": task1_dataset.fields["tgt"],
"task2_tgt": task2_dataset.fields["tgt"]
}
def __iter__(self):
for batch1, batch2 in itertools.zip_longest(self.task1_dataset, self.task2_dataset):
if batch1 is not None:
yield batch1, True
if batch2 is not None:
yield batch2, False
| 748 | 28.96 | 93 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/data/utils.py
|
# -*- coding: utf-8 -*-
from beaver.data.dataset import TranslationDataset
from beaver.data.dataset_wrapper import Dataset
from beaver.data.field import Field
def build_dataset(opt, data_path, vocab_path, device, train=True):
task1_source_path = data_path[0]
task1_target_path = data_path[1]
task2_source_path = data_path[2]
task2_target_path = data_path[3]
source_field = Field(unk=True, pad=True, bos=False, eos=False)
translation_target_field = Field(unk=True, pad=True, bos=True, eos=True)
summary_target_field = Field(unk=True, pad=True, bos=True, eos=True)
source_vocab, target_vocab = vocab_path
source_special = source_field.special
translation_target_special = translation_target_field.special
summary_target_special = summary_target_field.special
with open(source_vocab, encoding="UTF-8") as f:
source_words = [line.strip() for line in f]
with open(target_vocab, encoding="UTF-8") as f:
target_words = [line.strip() for line in f]
if opt.mono:
# source和摘要target共享词表
source_special = summary_target_special = sorted(set(source_special + summary_target_special))
source_field.load_vocab(source_words, source_special)
translation_target_field.load_vocab(target_words, translation_target_special)
if opt.mono:
summary_target_field.load_vocab(source_words, summary_target_special)
else:
summary_target_field.load_vocab(target_words, summary_target_special)
data1 = TranslationDataset(task1_source_path, task1_target_path, opt.batch_size, device, train,
{'src': source_field, 'tgt': summary_target_field})
data2 = TranslationDataset(task2_source_path, task2_target_path, opt.batch_size, device, train,
{'src': source_field, 'tgt': translation_target_field})
return Dataset(data1, data2)
| 1,896 | 40.23913 | 102 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/data/dataset.py
|
# -*- coding: utf-8 -*-
import random
from collections import namedtuple
from typing import Dict
import torch
from beaver.data.field import Field
Batch = namedtuple("Batch", ['src', 'tgt', 'batch_size'])
Example = namedtuple("Example", ['src', 'tgt'])
class TranslationDataset(object):
def __init__(self,
src_path: str,
tgt_path: str,
batch_size: int,
device: torch.device,
train: bool,
fields: Dict[str, Field]):
self.batch_size = batch_size
self.train = train
self.device = device
self.fields = fields
self.sort_key = lambda ex: (len(ex.src), len(ex.tgt))
examples = []
for src_line, tgt_line in zip(read_file(src_path), read_file(tgt_path)):
examples.append(Example(src_line, tgt_line))
examples, self.seed = self.sort(examples)
self.num_examples = len(examples)
self.batches = list(batch(examples, self.batch_size))
def __iter__(self):
while True:
if self.train:
random.shuffle(self.batches)
for minibatch in self.batches:
src = self.fields["src"].process([x.src for x in minibatch], self.device)
tgt = self.fields["tgt"].process([x.tgt for x in minibatch], self.device)
yield Batch(src=src, tgt=tgt, batch_size=len(minibatch))
if not self.train:
break
def sort(self, examples):
seed = sorted(range(len(examples)), key=lambda idx: self.sort_key(examples[idx]))
return sorted(examples, key=self.sort_key), seed
def read_file(path):
with open(path, encoding="utf-8") as f:
for line in f:
yield line.strip().split()
def batch(data, batch_size):
minibatch, cur_len = [], 0
for ex in data:
minibatch.append(ex)
cur_len = max(cur_len, len(ex.src), len(ex.tgt))
if cur_len * len(minibatch) > batch_size:
yield minibatch[:-1]
minibatch, cur_len = [ex], max(len(ex.src), len(ex.tgt))
if minibatch:
yield minibatch
| 2,164 | 29.069444 | 89 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/data/__init__.py
|
# -*- coding: utf-8 -*-
from beaver.data.utils import build_dataset
| 69 | 16.5 | 43 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/infer/beam.py
|
# -*- coding: utf-8 -*-
import torch
class Beam(object):
def __init__(self, beam_size, pad, bos, eos, device, lp):
self.size = beam_size
self.alpha = lp
self.scores = torch.full([beam_size], -1e20).float().to(device)
self.scores[0] = 0.
self.hypotheses = torch.full([1, beam_size], fill_value=pad).long().to(device)
self.hypotheses[0][0] = bos
self.eos = eos
self.finished = []
@property
def current_state(self):
return self.hypotheses[-1]
def advance(self, scores, origin, tokens):
self.scores = scores
self.hypotheses = torch.index_select(self.hypotheses, 1, origin)
self.hypotheses = torch.cat([self.hypotheses, tokens.unsqueeze(0)])
for idx, tok in enumerate(self.hypotheses[-1]):
if tok == self.eos:
self.finished.append((self.scores[idx].clone(), self.hypotheses[1:, idx]))
self.scores[idx] = -1e20
@property
def done(self):
max_score = max([self.length_penalty(score, self.hypotheses.size(0)) for score in self.scores])
max_finish = max([self.length_penalty(t[0], t[1].size(0)) for t in self.finished]) if self.finished else -1e20
return bool(max_score < max_finish)
@property
def best_hypothesis(self):
finished = sorted(self.finished, key=lambda t: self.length_penalty(t[0], t[1].size(0)), reverse=True)
if not finished:
return self.hypotheses[1:, 0]
return finished[0][1]
def length_penalty(self, score, length):
return score * (6 ** self.alpha) / ((5 + length) ** self.alpha)
| 1,652 | 32.06 | 118 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/infer/__init__.py
|
# -*- coding: utf-8 -*-
from beaver.infer.translator import beam_search
| 74 | 14 | 47 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/infer/translator.py
|
# -*- coding: utf-8 -*-
import torch
from beaver.infer.beam import Beam
def beam_search(opt, model, src, fields, flag):
batch_size = src.size(0)
beam_size = opt.beam_size
device = src.device
encoder = model.encoder
if flag:
decoder = model.task1_decoder
generator = model.task1_generator
tgt_field = fields["task1_tgt"]
num_words = model.task1_generator.vocab_size
else:
decoder = model.task2_decoder
generator = model.task2_generator
tgt_field = fields["task2_tgt"]
num_words = model.task2_generator.vocab_size
beams = [Beam(opt.beam_size, tgt_field.pad_id, tgt_field.bos_id, tgt_field.eos_id,
device, opt.length_penalty) for _ in range(batch_size)]
src = src.repeat(1, beam_size).view(batch_size*beam_size, -1)
src_pad = src.eq(fields["src"].pad_id)
src_out = encoder(src, src_pad)
beam_expander = (torch.arange(batch_size) * beam_size).view(-1, 1).to(device)
previous = None
for i in range(opt.max_length):
if all((b.done for b in beams)):
break
# [batch_size x beam_size, 1]
current_token = torch.cat([b.current_state for b in beams]).unsqueeze(-1)
tgt_pad = current_token.eq(tgt_field.pad_id)
out, previous = decoder(current_token, src_out, src_pad, tgt_pad, previous, i)
previous_score = torch.stack([b.scores for b in beams]).unsqueeze(-1)
out = generator(out).view(batch_size, beam_size, -1)
if i < opt.min_length:
out[:, :, tgt_field.eos_id] = -1e15
# find topk candidates
scores, indexes = (out + previous_score).view(batch_size, -1).topk(beam_size)
# find origins and token
origins = (indexes.view(-1) // num_words).view(batch_size, beam_size)
tokens = (indexes.view(-1) % num_words).view(batch_size, beam_size)
for j, b in enumerate(beams):
b.advance(scores[j], origins[j], tokens[j])
origins = (origins + beam_expander).view(-1)
previous = torch.index_select(previous, 0, origins)
return [b.best_hypothesis for b in beams]
| 2,156 | 32.184615 | 86 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/model/embeddings.py
|
# -*- coding: utf-8 -*-
import math
import torch
import torch.nn as nn
def positional_encoding(dim, max_len=5000):
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
return pe
class Embedding(nn.Module):
def __init__(self, embedding_dim, vocab_size, padding_idx, dropout):
self.word_padding_idx = padding_idx
self.embedding_dim = embedding_dim
pe = positional_encoding(embedding_dim)
super(Embedding, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)
self.register_buffer('pe', pe)
self.dropout = nn.Dropout(p=dropout)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.embedding.weight, mean=0.0, std=self.embedding_dim ** -0.5)
@property
def padding_idx(self):
return self.word_padding_idx
def forward(self, x, timestep=0):
embedding = self.embedding(x) * math.sqrt(self.embedding_dim) + self.pe[timestep:timestep + x.size(1)]
return self.dropout(embedding)
| 1,313 | 31.04878 | 110 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/model/transformer.py
|
# -*- coding: utf-8 -*-
import math
import torch
import torch.nn as nn
class FeedForward(nn.Module):
def __init__(self, hidden_size, inner_size, dropout):
super(FeedForward, self).__init__()
self.linear_in = nn.Linear(hidden_size, inner_size, bias=False)
self.linear_out = nn.Linear(inner_size, hidden_size, bias=False)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.linear_in.weight)
nn.init.xavier_uniform_(self.linear_out.weight)
def forward(self, x):
y = self.linear_in(x)
y = self.relu(y)
y = self.dropout(y)
y = self.linear_out(y)
return y
class EncoderLayer(nn.Module):
def __init__(self, hidden_size, dropout, head_count, ff_size):
super(EncoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(head_count, hidden_size, dropout)
self.feed_forward = FeedForward(hidden_size, ff_size, dropout)
self.dropout = nn.ModuleList([nn.Dropout(dropout) for _ in range(2)])
self.norm = nn.ModuleList([nn.LayerNorm(hidden_size) for _ in range(2)])
def forward(self, x, mask):
# self attention
y = self.self_attn(self.norm[0](x), mask=mask)
x = x + self.dropout[0](y)
# feed forward
y = self.feed_forward(self.norm[1](x))
x = x + self.dropout[1](y)
return x
class Encoder(nn.Module):
def __init__(self, num_layers, num_heads, hidden_size, dropout, ff_size, embedding):
self.num_layers = num_layers
super(Encoder, self).__init__()
self.embedding = embedding
self.layers = nn.ModuleList([EncoderLayer(hidden_size, dropout, num_heads, ff_size) for _ in range(num_layers)])
self.norm = nn.LayerNorm(hidden_size)
def forward(self, src, src_pad):
src_mask = src_pad.unsqueeze(1)
output = self.embedding(src)
for i in range(self.num_layers):
output = self.layers[i](output, src_mask)
return self.norm(output)
class DecoderLayer(nn.Module):
def __init__(self, hidden_size, dropout, head_count, ff_size):
super(DecoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(head_count, hidden_size, dropout)
self.src_attn = MultiHeadedAttention(head_count, hidden_size, dropout)
self.feed_forward = FeedForward(hidden_size, ff_size, dropout)
self.norm = nn.ModuleList([nn.LayerNorm(hidden_size, eps=1e-6) for _ in range(3)])
self.dropout = nn.ModuleList([nn.Dropout(dropout) for _ in range(3)])
def forward(self, x, enc_out, src_mask, tgt_mask, previous=None):
all_input = x if previous is None else torch.cat((previous, x), dim=1)
# self attention
y = self.self_attn(self.norm[0](x), self.norm[0](all_input), mask=tgt_mask)
x = x + self.dropout[0](y)
# encoder decoder attention
y = self.src_attn(self.norm[1](x), enc_out, mask=src_mask)
x = x + self.dropout[1](y)
# feed forward
y = self.feed_forward(self.norm[2](x))
x = x + self.dropout[2](y)
return x, all_input
class Decoder(nn.Module):
def __init__(self, num_layers, num_heads, hidden_size, dropout, ff_size, embedding):
self.num_layers = num_layers
super(Decoder, self).__init__()
self.embedding = embedding
self.layers = nn.ModuleList([DecoderLayer(hidden_size, dropout, num_heads, ff_size) for _ in range(num_layers)])
self.register_buffer("upper_triangle", torch.triu(torch.ones(1000, 1000), diagonal=1).byte())
self.register_buffer("zero_mask", torch.zeros(1).byte())
self.norm = nn.LayerNorm(hidden_size, eps=1e-6)
def forward(self, tgt, enc_out, src_pad, tgt_pad, previous=None, timestep=0):
output = self.embedding(tgt, timestep)
tgt_len = tgt.size(1)
src_mask = src_pad.unsqueeze(1)
tgt_mask = tgt_pad.unsqueeze(1)
upper_triangle = self.upper_triangle[:tgt_len, :tgt_len]
# tgt mask: 0 if not upper and not pad
tgt_mask = torch.gt(tgt_mask + upper_triangle, 0)
saved_inputs = []
for i in range(self.num_layers):
prev_layer = None if previous is None else previous[:, i]
tgt_mask = tgt_mask if previous is None else self.zero_mask
output, all_input = self.layers[i](output, enc_out, src_mask, tgt_mask, prev_layer)
saved_inputs.append(all_input)
return self.norm(output), torch.stack(saved_inputs, dim=1)
class MultiHeadedAttention(nn.Module):
def __init__(self, head_count, model_dim, dropout):
self.dim_per_head = model_dim // head_count
self.head_count = head_count
super(MultiHeadedAttention, self).__init__()
self.linear_q = nn.Linear(model_dim, model_dim, bias=False)
self.linear_k = nn.Linear(model_dim, model_dim, bias=False)
self.linear_v = nn.Linear(model_dim, model_dim, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.final_linear = nn.Linear(model_dim, model_dim)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.linear_q.weight)
nn.init.xavier_uniform_(self.linear_k.weight)
nn.init.xavier_uniform_(self.linear_v.weight)
nn.init.xavier_uniform_(self.final_linear.weight)
def forward(self, query, memory=None, mask=None):
memory = query if memory is None else memory
def split_head(x):
# B x L x D => B x h x L x d
return x.view(x.size(0), -1, self.head_count, self.dim_per_head).transpose(1, 2)
def combine_head(x):
# B x h x L x d => B x L x D
return x.transpose(1, 2).contiguous().view(x.size(0), -1, self.head_count * self.dim_per_head)
# 1) Project q, k, v.
q = split_head(self.linear_q(query))
k = split_head(self.linear_k(memory))
v = split_head(self.linear_v(memory))
# 2) Calculate and scale scores.
q = q / math.sqrt(self.dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3))
mask = mask.unsqueeze(1).expand_as(scores)
scores.masked_fill_(mask, -1e18)
# 3) Apply attention dropout and compute context vectors.
weights = self.dropout(self.softmax(scores))
context = combine_head(torch.matmul(weights, v))
return self.final_linear(context)
| 6,591 | 35.622222 | 120 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/model/__init__.py
|
# -*- coding: utf-8 -*-
from beaver.model.nmt_model import NMTModel
| 70 | 13.2 | 43 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task+/beaver/model/nmt_model.py
|
# -*- coding: utf-8 -*-
from typing import Dict
import torch
import torch.nn as nn
from beaver.model.embeddings import Embedding
from beaver.model.transformer import Decoder, Encoder
class Generator(nn.Module):
def __init__(self, hidden_size: int, tgt_vocab_size: int):
self.vocab_size = tgt_vocab_size
super(Generator, self).__init__()
self.linear_hidden = nn.Linear(hidden_size, tgt_vocab_size)
self.lsm = nn.LogSoftmax(dim=-1)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.linear_hidden.weight)
def forward(self, dec_out):
score = self.linear_hidden(dec_out)
lsm_score = self.lsm(score)
return lsm_score
class NMTModel(nn.Module):
def __init__(self, encoder: Encoder,
task1_decoder: Decoder,
task2_decoder: Decoder,
task1_generator: Generator,
task2_generator: Generator):
super(NMTModel, self).__init__()
self.encoder = encoder
self.task1_decoder = task1_decoder
self.task2_decoder = task2_decoder
self.task1_generator = task1_generator
self.task2_generator = task2_generator
def forward(self, source, target, flag):
target = target[:, :-1] # shift left
source_pad = source.eq(self.encoder.embedding.word_padding_idx)
target_pad = target.eq(self.task1_decoder.embedding.word_padding_idx)
enc_out = self.encoder(source, source_pad)
if flag: # task1
decoder_outputs, _ = self.task1_decoder(target, enc_out, source_pad, target_pad)
return self.task1_generator(decoder_outputs)
else: # task2
decoder_outputs, _ = self.task2_decoder(target, enc_out, source_pad, target_pad)
return self.task2_generator(decoder_outputs)
@classmethod
def load_model(cls, model_opt,
pad_ids: Dict[str, int],
vocab_sizes: Dict[str, int],
checkpoint=None):
source_embedding = Embedding(embedding_dim=model_opt.hidden_size,
dropout=model_opt.dropout,
padding_idx=pad_ids["src"],
vocab_size=vocab_sizes["src"])
target_embedding_task2 = Embedding(embedding_dim=model_opt.hidden_size,
dropout=model_opt.dropout,
padding_idx=pad_ids["task2_tgt"],
vocab_size=vocab_sizes["task2_tgt"])
if model_opt.mono:
# 单语摘要,task1 share source embedding
target_embedding_task1 = source_embedding
else:
target_embedding_task1 = target_embedding_task2
encoder = Encoder(model_opt.layers,
model_opt.heads,
model_opt.hidden_size,
model_opt.dropout,
model_opt.ff_size,
source_embedding)
task1_decoder = Decoder(model_opt.layers,
model_opt.heads,
model_opt.hidden_size,
model_opt.dropout,
model_opt.ff_size,
target_embedding_task1)
task2_decoder = Decoder(model_opt.layers,
model_opt.heads,
model_opt.hidden_size,
model_opt.dropout,
model_opt.ff_size,
target_embedding_task2)
task1_generator = Generator(model_opt.hidden_size, vocab_sizes["task1_tgt"])
task2_generator = Generator(model_opt.hidden_size, vocab_sizes["task2_tgt"])
model = cls(encoder, task1_decoder, task2_decoder, task1_generator, task2_generator)
if checkpoint is None and model_opt.train_from:
checkpoint = torch.load(model_opt.train_from, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["model"])
elif checkpoint is not None:
model.load_state_dict(checkpoint)
return model
| 4,315 | 38.962963 | 100 |
py
|
labelImg2
|
labelImg2-master/setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from libs.version import __version__
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
# TODO: Different OS have different requirements
]
required_packages = find_packages()
required_packages.append('labelImg')
APP = ['labelImg.py']
OPTIONS = {
'argv_emulation': True,
'iconfile': 'icons/app.icns'
}
setup(
app=APP,
name='labelImg2',
version=__version__,
description="LabelImg2 is a graphical image annotation tool and label object bounding boxes in images",
long_description=readme + '\n\n' + history,
author="TzuTa Lin",
author_email='[email protected]',
url='https://github.com/chinakook/labelImg2',
package_dir={'labelImg': '.'},
packages=required_packages,
entry_points={
'console_scripts': [
'labelImg2=labelImg2.labelImg:main'
]
},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='labelImg labelTool development annotation deeplearning',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
package_data={'data/predefined_classes.txt': ['data/predefined_classes.txt']},
options={'py2app': OPTIONS},
setup_requires=['py2app']
)
| 1,974 | 29.384615 | 107 |
py
|
labelImg2
|
labelImg2-master/labelImg.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import codecs
import distutils.spawn
import os
import platform
import re
import sys
import subprocess
from functools import partial
from collections import defaultdict
from libs.naturalsort import natsort
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
# needed for py3+qt4
# Ref:
# http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html
# http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
if sys.version_info.major >= 3:
import sip
sip.setapi('QVariant', 2)
from PyQt4.QtGui import *
from PyQt4.QtCore import *
# Add internal libs
from libs.constants import *
from libs.lib import struct, newAction, newIcon, addActions, fmtShortcut, generateColorByText
from libs.settings import Settings
from libs.shape import Shape, DEFAULT_LINE_COLOR, DEFAULT_FILL_COLOR
from libs.canvas import Canvas
from libs.zoomWidget import ZoomWidget
from libs.labelDialog import LabelDialog
from libs.labelFile import LabelFile, LabelFileError
from libs.pascal_voc_io import PascalVocReader, XML_EXT
from libs.ustr import ustr
from libs.labelView import CLabelView, HashableQStandardItem
from libs.fileView import CFileView
__appname__ = 'labelImg2'
# Utility functions and classes.
def have_qstring():
'''p3/qt5 get rid of QString wrapper as py3 has native unicode str type'''
return not (sys.version_info.major >= 3 or QT_VERSION_STR.startswith('5.'))
def util_qt_strlistclass():
return QStringList if have_qstring() else list
class WindowMixin(object):
def menu(self, title, actions=None):
menu = self.menuBar().addMenu(title)
if actions:
addActions(menu, actions)
return menu
def toolbar(self, title, actions=None):
toolbar = QToolBar(title)
toolbar.setObjectName(u'%sToolBar' % title)
if actions:
if isinstance(action, QWidgetAction):
return super(ToolBar, self).addAction(action)
btn = QToolButton()
btn.setDefaultAction(action)
btn.setToolButtonStyle(Qt.ToolButtonIconOnly)
toolbar.addWidget(btn)
self.addToolBar(Qt.TopToolBarArea, toolbar)
return toolbar
class MainWindow(QMainWindow, WindowMixin):
FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = list(range(3))
def __init__(self, defaultFilename=None, defaultPrefdefClassFile=None, defaultSaveDir=None):
super(MainWindow, self).__init__()
self.setWindowTitle(__appname__)
# Load setting in the main thread
self.settings = Settings()
self.settings.load()
settings = self.settings
# Save as Pascal voc xml
self.defaultSaveDir = defaultSaveDir
# For loading all image under a directory
self.dirname = None
self.labelHist = []
self.lastOpenDir = None
# Whether we need to save or not.
self.dirty = False
self._noSelectionSlot = False
# Load predefined classes to the list
self.loadPredefinedClasses(defaultPrefdefClassFile)
# Main widgets and related state.
self.labelDialog = LabelDialog(parent=self, listItem=self.labelHist)
self.ShapeItemDict = {}
self.ItemShapeDict = {}
labellistLayout = QVBoxLayout()
labellistLayout.setContentsMargins(0, 0, 0, 0)
self.default_label = self.labelHist[0]
# Create a widget for edit and diffc button
self.diffcButton = QCheckBox(u'difficult')
self.diffcButton.setChecked(False)
self.diffcButton.stateChanged.connect(self.btnstate)
self.editButton = QToolButton()
self.editButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
labellistLayout.addWidget(self.editButton)
labellistLayout.addWidget(self.diffcButton)
# Create and add a widget for showing current label items
labelListContainer = QWidget()
labelListContainer.setLayout(labellistLayout)
self.labelList = CLabelView(self.labelHist)
self.labelModel = self.labelList.model()
self.labelModel.dataChanged.connect(self.labelDataChanged)
self.labelList.extraEditing.connect(self.updateLabelShowing)
self.labelsm = self.labelList.selectionModel()
self.labelsm.currentChanged.connect(self.labelCurrentChanged)
myHeader = self.labelList.verticalHeader()
myHeader.clicked.connect(self.labelHeaderClicked)
labellistLayout.addWidget(self.labelList)
self.dock = QDockWidget(u'Box Labels', self)
self.dock.setObjectName(u'Labels')
self.dock.setWidget(labelListContainer)
self.labelList.toggleEdit.connect(self.toggleExtraEditing)
self.fileListView = CFileView()
self.fileModel = self.fileListView.model()
self.filesm = self.fileListView.selectionModel()
self.filesm.currentChanged.connect(self.fileCurrentChanged)
filelistLayout = QVBoxLayout()
filelistLayout.setContentsMargins(0, 0, 0, 0)
self.prevButton = QToolButton()
self.nextButton = QToolButton()
self.playButton = QToolButton()
self.prevButton.setToolButtonStyle(Qt.ToolButtonIconOnly)
self.nextButton.setToolButtonStyle(Qt.ToolButtonIconOnly)
self.playButton.setToolButtonStyle(Qt.ToolButtonIconOnly)
self.controlButtonsLayout = QHBoxLayout()
self.controlButtonsLayout.setAlignment(Qt.AlignLeft)
self.controlButtonsLayout.addWidget(self.prevButton)
self.controlButtonsLayout.addWidget(self.nextButton)
self.controlButtonsLayout.addWidget(self.playButton)
filelistLayout.addLayout(self.controlButtonsLayout)
filelistLayout.addWidget(self.fileListView)
fileListContainer = QWidget()
fileListContainer.setLayout(filelistLayout)
self.filedock = QDockWidget(u'File List', self)
self.filedock.setObjectName(u'Files')
self.filedock.setWidget(fileListContainer)
self.zoomWidget = ZoomWidget()
scroll = QScrollArea()
self.canvas = Canvas(parent=scroll)
self.canvas.zoomRequest.connect(self.zoomRequest)
scroll.setWidget(self.canvas)
scroll.setWidgetResizable(True)
self.scrollBars = {
Qt.Vertical: scroll.verticalScrollBar(),
Qt.Horizontal: scroll.horizontalScrollBar()
}
self.scrollArea = scroll
self.canvas.scrollRequest.connect(self.scrollRequest)
self.canvas.newShape.connect(self.newShape)
self.canvas.shapeMoved.connect(self.setDirty)
self.canvas.selectionChanged.connect(self.shapeSelectionChanged)
self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive)
self.canvas.cancelDraw.connect(self.createCancel)
self.canvas.toggleEdit.connect(self.toggleExtraEditing)
self.setCentralWidget(scroll)
self.addDockWidget(Qt.RightDockWidgetArea, self.dock)
self.addDockWidget(Qt.RightDockWidgetArea, self.filedock)
self.dock.setFeatures(QDockWidget.DockWidgetFloatable)
self.filedock.setFeatures(QDockWidget.DockWidgetFloatable)
self.displayTimer = QTimer(self)
self.displayTimer.setInterval(1000)
self.displayTimer.timeout.connect(self.autoNext)
self.playing = False
# Actions
action = partial(newAction, self)
quit = action('&Quit', self.close,
'Ctrl+Q', 'power.svg', u'Quit application')
open = action('&Open', self.openFile,
'Ctrl+O', 'open.svg', u'Open image or label file')
opendir = action('&Open Dir', self.openDirDialog,
'Ctrl+u', 'dir.svg', u'Open Dir')
changeSavedir = action('&Change Save Dir', self.changeSavedirDialog,
'Ctrl+r', 'dir.svg', u'Change default saved Annotation dir')
openAnnotation = action('&Open Annotation', self.openAnnotationDialog,
'Ctrl+Shift+O', 'open.svg', u'Open Annotation')
verify = action('&Verify Image', self.verifyImg,
'space', 'downloaded.svg', u'Verify Image')
save = action('&Save', self.saveFile,
'Ctrl+S', 'save.svg', u'Save labels to file', enabled=False)
saveAs = action('&Save As', self.saveFileAs,
'Ctrl+Shift+S', 'save.svg', u'Save labels to a different file', enabled=False)
close = action('&Close', self.closeFile, 'Ctrl+W', 'close.svg', u'Close current file')
resetAll = action('&ResetAll', self.resetAll, None, 'reset.svg', u'Reset all')
create = action('Create\nRectBox', self.createShape,
'w', 'rect.png', u'Draw a new Box', enabled=False)
createSo = action('Create\nSolidRectBox', self.createSoShape,
None, 'rect.png', None, enabled=False)
createRo = action('Create\nRotatedRBox', self.createRoShape,
'e', 'rectRo.png', u'Draw a new RotatedRBox', enabled=False)
delete = action('Delete\nRectBox', self.deleteSelectedShape,
'Delete', 'cancel2.svg', u'Delete', enabled=False)
copy = action('&Duplicate\nRectBox', self.copySelectedShape,
'Ctrl+D', 'copy.svg', u'Create a duplicate of the selected Box',
enabled=False)
showInfo = action('&About', self.showInfoDialog, None, 'info.svg', u'About')
zoom = QWidgetAction(self)
zoom.setDefaultWidget(self.zoomWidget)
self.zoomWidget.setWhatsThis(
u"Zoom in or out of the image. Also accessible with"
" %s and %s from the canvas." % (fmtShortcut("Ctrl+[-+]"),
fmtShortcut("Ctrl+Wheel")))
self.zoomWidget.setEnabled(False)
zoomIn = action('Zoom &In', partial(self.addZoom, 10),
'Ctrl++', 'zoom-in.svg', u'Increase zoom level', enabled=False)
zoomOut = action('&Zoom Out', partial(self.addZoom, -10),
'Ctrl+-', 'zoom-out.svg', u'Decrease zoom level', enabled=False)
zoomOrg = action('&Original size', partial(self.setZoom, 100),
'Ctrl+=', 'zoom100.svg', u'Zoom to original size', enabled=False)
fitWindow = action('&Fit Window', self.setFitWindow,
'Ctrl+F', 'zoomReset.svg', u'Zoom follows window size',
checkable=True, enabled=False)
fitWidth = action('Fit &Width', self.setFitWidth,
'Ctrl+Shift+F', 'fit-width.svg', u'Zoom follows window width',
checkable=True, enabled=False)
openPrevImg = action('&Prev Image', self.openPrevImg,
'a', 'previous.svg', u'Open Prev')
openNextImg = action('&Next Image', self.openNextImg,
'd', 'next.svg', u'Open Next')
play = action('Play', self.playStart,
'Ctrl+Shift+P', 'play.svg', u'auto next',
checkable=True, enabled=True)
self.prevButton.setDefaultAction(openPrevImg)
self.nextButton.setDefaultAction(openNextImg)
self.playButton.setDefaultAction(play)
# Group zoom controls into a list for easier toggling.
zoomActions = (self.zoomWidget, zoomIn, zoomOut,
zoomOrg, fitWindow, fitWidth)
self.zoomMode = self.MANUAL_ZOOM
self.scalers = {
self.FIT_WINDOW: self.scaleFitWindow,
self.FIT_WIDTH: self.scaleFitWidth,
# Set to one to scale to 100% when loading files.
self.MANUAL_ZOOM: lambda: 1,
}
edit = action('&Manage Labels', self.editLabel,
'Ctrl+M', 'tags.svg', u'Modify the label of the selected Box',
enabled=True)
self.editButton.setDefaultAction(edit)
# Lavel list context menu.
labelMenu = QMenu()
addActions(labelMenu, (edit, delete))
# Store actions for further handling.
self.actions = struct(save=save, saveAs=saveAs, open=open, close=close, resetAll = resetAll,
create=create, createSo=createSo, createRo=createRo, delete=delete, edit=edit, copy=copy,
zoom=zoom, zoomIn=zoomIn, zoomOut=zoomOut, zoomOrg=zoomOrg,
fitWindow=fitWindow, fitWidth=fitWidth, play=play,
zoomActions=zoomActions,
fileMenuActions=(
open, opendir, save, saveAs, close, resetAll, quit),
beginner=(),
editMenu=(edit, copy, delete,
None),
beginnerContext=(create, createSo, createRo, copy, delete),
onLoadActive=(
close, create),
onShapesPresent=(saveAs,))
self.menus = struct(
file=self.menu('&File'),
edit=self.menu('&Edit'),
view=self.menu('&View'),
help=self.menu('&Help'),
recentFiles=QMenu('Open &Recent'),
labelList=labelMenu)
# Auto saving : Enable auto saving if pressing next
self.autoSaving = QAction("Auto Saving", self)
self.autoSaving.setCheckable(True)
self.autoSaving.setChecked(settings.get(SETTING_AUTO_SAVE, False))
# Add option to enable/disable labels being painted at the top of bounding boxes
self.paintLabelsOption = QAction("Paint Labels", self)
self.paintLabelsOption.setShortcut("Ctrl+Shift+P")
self.paintLabelsOption.setCheckable(True)
self.paintLabelsOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
self.paintLabelsOption.triggered.connect(self.togglePaintLabelsOption)
self.drawCorner = QAction('Always Draw Corner', self)
self.drawCorner.setCheckable(True)
self.drawCorner.setChecked(settings.get(SETTING_DRAW_CORNER, False))
self.drawCorner.triggered.connect(self.canvas.setDrawCornerState)
addActions(self.menus.file,
(open, opendir, changeSavedir, openAnnotation, self.menus.recentFiles, save, saveAs, close, resetAll, quit))
addActions(self.menus.help, (showInfo,))
addActions(self.menus.view, (
self.autoSaving,
self.paintLabelsOption,
self.drawCorner,
None,
None,
zoomIn, zoomOut, zoomOrg, None,
fitWindow, fitWidth))
self.menus.file.aboutToShow.connect(self.updateFileMenu)
# Custom context menu for the canvas widget:
addActions(self.canvas.menus[0], self.actions.beginnerContext)
addActions(self.canvas.menus[1], (
action('&Copy here', self.copyShape),
action('&Move here', self.moveShape)))
self.tools = self.toolbar('Tools')
self.actions.beginner = (open, opendir, changeSavedir, verify, save, None, create, createSo, createRo, copy, delete, None,
zoomIn, zoom, zoomOut, zoomOrg, fitWindow, fitWidth)
self.statusBar().showMessage('%s started.' % __appname__)
self.statusBar().show()
# Application state.
self.image = QImage()
self.filePath = ustr(defaultFilename)
self.recentFiles = []
self.maxRecent = 7
self.lineColor = None
self.fillColor = None
self.zoom_level = 100
self.fit_window = False
# Add Chris
self.difficult = False
## Fix the compatible issue for qt4 and qt5. Convert the QStringList to python list
if settings.get(SETTING_RECENT_FILES):
if have_qstring():
recentFileQStringList = settings.get(SETTING_RECENT_FILES)
self.recentFiles = [ustr(i) for i in recentFileQStringList]
else:
self.recentFiles = recentFileQStringList = settings.get(SETTING_RECENT_FILES)
size = settings.get(SETTING_WIN_SIZE, QSize(600, 500))
position = settings.get(SETTING_WIN_POSE, QPoint(0, 0))
self.resize(size)
self.move(position)
saveDir = ustr(settings.get(SETTING_SAVE_DIR, None))
self.lastOpenDir = ustr(settings.get(SETTING_LAST_OPEN_DIR, None))
if self.defaultSaveDir is None and saveDir is not None and os.path.exists(saveDir):
self.defaultSaveDir = saveDir
self.statusBar().showMessage('%s started. Annotation will be saved to %s' %
(__appname__, self.defaultSaveDir))
self.statusBar().show()
self.restoreState(settings.get(SETTING_WIN_STATE, QByteArray()))
Shape.line_color = self.lineColor = QColor(settings.get(SETTING_LINE_COLOR, DEFAULT_LINE_COLOR))
Shape.fill_color = self.fillColor = QColor(settings.get(SETTING_FILL_COLOR, DEFAULT_FILL_COLOR))
self.canvas.setDrawingColor(self.lineColor)
# Add chris
Shape.difficult = self.difficult
# Populate the File menu dynamically.
self.updateFileMenu()
# Since loading the file may take some time, make sure it runs in the background.
if self.filePath and os.path.isdir(self.filePath):
self.queueEvent(partial(self.importDirImages, self.filePath or ""))
elif self.filePath:
self.queueEvent(partial(self.loadFile, self.filePath or ""))
# Callbacks:
self.zoomWidget.valueChanged.connect(self.paintCanvas)
self.populateModeActions()
# Display cursor coordinates at the right of status bar
self.labelCoordinates = QLabel('')
self.statusBar().addPermanentWidget(self.labelCoordinates)
self.imageDim = QLabel('')
self.statusBar().addPermanentWidget(self.imageDim)
self.statFile = QLabel('')
self.statusBar().addPermanentWidget(self.statFile)
# Open Dir if deafult file
if self.filePath and os.path.isdir(self.filePath):
self.openDirDialog(dirpath=self.filePath)
def noShapes(self):
return not self.ItemShapeDict
def populateModeActions(self):
tool, menu = self.actions.beginner, self.actions.beginnerContext
self.tools.clear()
addActions(self.tools, tool)
self.canvas.menus[0].clear()
addActions(self.canvas.menus[0], menu)
self.menus.edit.clear()
actions = (self.actions.create, self.actions.createSo, self.actions.createRo)
addActions(self.menus.edit, actions + self.actions.editMenu)
def setDirty(self):
self.dirty = True
self.actions.save.setEnabled(True)
def setClean(self):
self.dirty = False
self.actions.save.setEnabled(False)
self.actions.create.setEnabled(True)
self.actions.createSo.setEnabled(True)
self.actions.createRo.setEnabled(True)
def autoNext(self):
if self.playing:
suc = self.openNextImg()
if not suc:
self.actions.play.triggered.emit(False)
self.actions.play.setChecked(False)
def playStart(self, value=True):
if value:
self.playing = True
self.displayTimer.start()
else:
self.playing = False
self.displayTimer.stop()
def toggleActions(self, value=True):
"""Enable/Disable widgets which depend on an opened image."""
for z in self.actions.zoomActions:
z.setEnabled(value)
for action in self.actions.onLoadActive:
action.setEnabled(value)
def queueEvent(self, function):
QTimer.singleShot(0, function)
def status(self, message, delay=5000):
self.statusBar().showMessage(message, delay)
def resetState(self):
self.labelModel.clear()
self.labelModel.setHorizontalHeaderLabels(["Label", "Extra Info"])
self.ShapeItemDict.clear()
self.ItemShapeDict.clear()
self.filePath = None
self.imageData = None
self.labelFile = None
self.canvas.resetState()
self.labelCoordinates.clear()
self.imageDim.clear()
def labelDataChanged(self, topLeft, bottomRight):
item0 = self.labelModel.item(topLeft.row(), 0)
shape = self.ItemShapeDict[item0]
if topLeft.column() == 0:
shape.label = self.labelModel.data(topLeft)
if sys.version_info < (3, 0, 0):
shape.label = shape.label.toPyObject()
color = generateColorByText(shape.label)
item1 = self.labelModel.item(topLeft.row(), 1)
item0.setBackground(color)
item1.setBackground(color)
shape.line_color = color
shape.fill_color = color
else:
shape.extra_label = self.labelModel.data(topLeft)
if sys.version_info < (3, 0, 0):
shape.extra_label = shape.extra_label.toPyObject()
self.setDirty()
return
def updateLabelShowing(self, index, str):
item0 = self.labelModel.item(index.row(), 0)
shape = self.ItemShapeDict[item0]
shape.extra_label = str
self.canvas.update()
def addRecentFile(self, filePath):
if filePath in self.recentFiles:
self.recentFiles.remove(filePath)
elif len(self.recentFiles) >= self.maxRecent:
self.recentFiles.pop()
self.recentFiles.insert(0, filePath)
def showInfoDialog(self):
msg = u'{0} \n©Chinakook 2018. [email protected]'.format(__appname__)
QMessageBox.information(self, u'About', msg)
def createShape(self):
self.canvas.setEditing(0)
self.canvas.canDrawRotatedRect = False
self.actions.create.setEnabled(False)
self.actions.createSo.setEnabled(False)
self.actions.createRo.setEnabled(False)
def createSoShape(self):
self.canvas.setEditing(2)
self.canvas.canDrawRotatedRect = False
self.actions.create.setEnabled(False)
self.actions.createSo.setEnabled(False)
self.actions.createRo.setEnabled(False)
def createRoShape(self):
self.canvas.setEditing(0)
self.canvas.canDrawRotatedRect = True
self.actions.create.setEnabled(False)
self.actions.createSo.setEnabled(False)
self.actions.createRo.setEnabled(False)
def createCancel(self):
self.canvas.setEditing(1)
self.canvas.restoreCursor()
self.actions.create.setEnabled(True)
self.actions.createSo.setEnabled(True)
self.actions.createRo.setEnabled(True)
def toggleDrawingSensitive(self, drawing=True):
if not drawing:
self.canvas.setEditing(1)
self.canvas.restoreCursor()
self.actions.create.setEnabled(True)
self.actions.createSo.setEnabled(True)
self.actions.createRo.setEnabled(True)
def toggleDrawMode(self, edit=1):
self.canvas.setEditing(edit)
def toggleExtraEditing(self, state):
index = self.labelsm.currentIndex()
#print("ExtraEditing", self.sender())
editindex = self.labelModel.index(index.row(), 1)
self.labelList.edit(editindex)
def updateFileMenu(self):
currFilePath = self.filePath
def exists(filename):
return os.path.exists(filename)
menu = self.menus.recentFiles
menu.clear()
files = [f for f in self.recentFiles if f !=
currFilePath and exists(f)]
for i, f in enumerate(files):
icon = newIcon('print-setup.svg')
action = QAction(
icon, '&%d %s' % (i + 1, QFileInfo(f).fileName()), self)
action.triggered.connect(partial(self.loadRecent, f))
menu.addAction(action)
def editLabel(self):
if not self.canvas.editing():
return
self.labelDialog.updateListItems(self.labelHist)
res = self.labelDialog.popUp()
if res is not None:
self.labelHist, self.default_label = res
self.labelList.updateLabelList(self.labelHist)
def fileCurrentChanged(self, current, previous):
self.statFile.setText('{0}/{1}'.format(current.row()+1, current.model().rowCount()))
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
self.labelList.earlyCommit()
if self.dirty is True:
self.fileModel.setData(previous, len(self.canvas.shapes), Qt.BackgroundRole)
self.saveFile()
else:
self.changeSavedirDialog()
return
filename = self.fileModel.data(current, Qt.EditRole)
if filename:
self.loadFile(filename)
if self.canvas.selectedShape:
self.canvas.selectedShape.selected = False
self.canvas.selectedShape = None
self.canvas.setHiding(False)
# Add chris
def btnstate(self, item= None):
""" Function to handle difficult examples
Update on each object """
if not self.canvas.editing():
return
item0 = self.labelModel.itemFromIndex(self.labelModel.index(self.labelsm.currentIndex().row(), 0))
if item0 is None:
item0 = self.labelModel.item(self.labelModel.rowCount() - 1,0)
difficult = self.diffcButton.isChecked()
try:
shape = self.ItemShapeDict[item0]
except:
pass
# Checked and Update
try:
if difficult != shape.difficult:
shape.difficult = difficult
self.setDirty()
else: # User probably changed item visibility
#self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
pass
except:
pass
# React to canvas signals.
def shapeSelectionChanged(self, selected=False):
if self._noSelectionSlot:
self._noSelectionSlot = False
else:
shape = self.canvas.selectedShape
if shape and shape in self.ShapeItemDict:
item0 = self.ShapeItemDict[shape]
index = self.labelModel.indexFromItem(item0)
self.labelList.selectRow(index.row())
#self.labelsm.setCurrentIndex(index, QItemSelectionModel.SelectCurrent)
else:
self.labelList.clearSelection()
self.actions.delete.setEnabled(selected)
self.actions.copy.setEnabled(selected)
def addLabel(self, shape):
shape.paintLabel = self.paintLabelsOption.isChecked()
item0 = HashableQStandardItem(shape.label)
item1 = QStandardItem(shape.extra_label)
color = generateColorByText(shape.label)
item0.setBackground(color)
item1.setBackground(color)
self.labelModel.appendRow([item0, item1])
self.ShapeItemDict[shape] = item0
self.ItemShapeDict[item0] = shape
for action in self.actions.onShapesPresent:
action.setEnabled(True)
def remLabel(self, shape):
if shape is None:
return
item0 = self.ShapeItemDict[shape]
index = self.labelModel.indexFromItem(item0)
self.labelModel.removeRows(index.row(), 1)
del self.ShapeItemDict[shape]
del self.ItemShapeDict[item0]
def loadLabels(self, shapes):
s = []
for shape_info in shapes:
if len(shape_info) == 5:
label, points, line_color, fill_color, difficult = shape_info
extra_label = ''
isRotated = False
direction = 0
elif len(shape_info) == 6:
label, points, line_color, fill_color, difficult, extra_label = shape_info
isRotated = False
direction = 0
elif len(shape_info) == 7:
label, points, line_color, fill_color, difficult, isRotated, direction = shape_info
extra_label = ''
elif len(shape_info) == 8:
label, points, line_color, fill_color, difficult, isRotated, direction, extra_label = shape_info
else:
pass
shape = Shape(label=label)
for x, y in points:
shape.addPoint(QPointF(x, y))
shape.difficult = difficult
shape.direction = direction
shape.isRotated = isRotated
shape.extra_label = extra_label
shape.close()
s.append(shape)
if line_color:
shape.line_color = QColor(*line_color)
else:
shape.line_color = generateColorByText(label)
if fill_color:
shape.fill_color = QColor(*fill_color)
else:
shape.fill_color = generateColorByText(label)
shape.alwaysShowCorner = self.drawCorner.isChecked()
if not label in self.labelHist:
self.labelHist.append(label)
self.labelList.updateLabelList(self.labelHist)
self.addLabel(shape)
self.canvas.loadShapes(s)
def saveLabels(self, annotationFilePath):
annotationFilePath = ustr(annotationFilePath)
if self.labelFile is None:
self.labelFile = LabelFile()
self.labelFile.verified = self.canvas.verified
def format_shape(s):
return dict(label=s.label,
line_color=s.line_color.getRgb(),
fill_color=s.fill_color.getRgb(),
points=[(p.x(), p.y()) for p in s.points],
# add chris
difficult = s.difficult,
direction = s.direction,
center = s.center,
isRotated = s.isRotated,
extra_text = s.extra_label)
shapes = [format_shape(shape) for shape in self.canvas.shapes]
# Can add differrent annotation formats here
try:
if ustr(annotationFilePath[-4:]) != ".xml":
annotationFilePath += XML_EXT
print ('Img: ' + self.filePath + ' -> Its xml: ' + annotationFilePath)
self.labelFile.savePascalVocFormat(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb())
return True
except LabelFileError as e:
self.errorMessage(u'Error saving label data', u'<b>%s</b>' % e)
return False
def copySelectedShape(self):
self.addLabel(self.canvas.copySelectedShape())
# fix copy and delete
self.shapeSelectionChanged(True)
def labelCurrentChanged(self, current, previous):
if current.row() < 0:
return
item0 = self.labelModel.itemFromIndex(self.labelModel.index(current.row(), 0))
if self.canvas.editing():
self._noSelectionSlot =True
shape = self.ItemShapeDict[item0]
self.canvas.selectShape(shape)
self.diffcButton.setChecked(shape.difficult)
def labelHeaderClicked(self, index, checked):
item0 = self.labelModel.item(index, 0)
shape = self.ItemShapeDict[item0]
self.canvas.setShapeVisible(shape, checked)
# Callback functions:
def newShape(self, continous):
text = self.default_label
extra_text = ""
if text is not None:
generate_color = generateColorByText(text)
shape = self.canvas.setLastLabel(text, generate_color, generate_color, extra_text)
shape.alwaysShowCorner=self.drawCorner.isChecked()
self.addLabel(shape)
if continous:
pass
else:
self.canvas.setEditing(1)
self.actions.create.setEnabled(True)
self.actions.createSo.setEnabled(True)
self.actions.createRo.setEnabled(True)
self.setDirty()
else:
# self.canvas.undoLastLine()
self.canvas.resetAllLines()
def scrollRequest(self, delta, orientation):
#units = - delta / (8 * 15)
units = - delta / (2 * 15)
bar = self.scrollBars[orientation]
bar.setValue(bar.value() + bar.singleStep() * units)
def setZoom(self, value):
self.actions.fitWidth.setChecked(False)
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.MANUAL_ZOOM
self.zoomWidget.setValue(value)
def addZoom(self, increment=10):
self.setZoom(self.zoomWidget.value() + increment)
def zoomRequest(self, delta):
# get the current scrollbar positions
# calculate the percentages ~ coordinates
h_bar = self.scrollBars[Qt.Horizontal]
v_bar = self.scrollBars[Qt.Vertical]
# get the current maximum, to know the difference after zooming
h_bar_max = h_bar.maximum()
v_bar_max = v_bar.maximum()
# get the cursor position and canvas size
# calculate the desired movement from 0 to 1
# where 0 = move left
# 1 = move right
# up and down analogous
cursor = QCursor()
pos = cursor.pos()
relative_pos = QWidget.mapFromGlobal(self, pos)
cursor_x = relative_pos.x()
cursor_y = relative_pos.y()
w = self.scrollArea.width()
h = self.scrollArea.height()
# the scaling from 0 to 1 has some padding
# you don't have to hit the very leftmost pixel for a maximum-left movement
margin = 0.1
move_x = (cursor_x - margin * w) / (w - 2 * margin * w)
move_y = (cursor_y - margin * h) / (h - 2 * margin * h)
# clamp the values from 0 to 1
move_x = min(max(move_x, 0), 1)
move_y = min(max(move_y, 0), 1)
# zoom in
units = delta / (8 * 15)
scale = 10
self.addZoom(scale * units)
# get the difference in scrollbar values
# this is how far we can move
d_h_bar_max = h_bar.maximum() - h_bar_max
d_v_bar_max = v_bar.maximum() - v_bar_max
# get the new scrollbar values
new_h_bar_value = h_bar.value() + move_x * d_h_bar_max
new_v_bar_value = v_bar.value() + move_y * d_v_bar_max
h_bar.setValue(new_h_bar_value)
v_bar.setValue(new_v_bar_value)
def setFitWindow(self, value=True):
if value:
self.actions.fitWidth.setChecked(False)
self.zoomMode = self.FIT_WINDOW if value else self.MANUAL_ZOOM
self.adjustScale()
def setFitWidth(self, value=True):
if value:
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.FIT_WIDTH if value else self.MANUAL_ZOOM
self.adjustScale()
def loadFile(self, filePath=None):
"""Load the specified file, or the last opened file if None."""
self.resetState()
self.canvas.setEnabled(False)
if filePath is None:
filePath = self.settings.get(SETTING_FILENAME)
# Make sure that filePath is a regular python string, rather than QString
if sys.version_info < (3, 0, 0):
filePath = filePath.toPyObject()
filePath = ustr(filePath)
unicodeFilePath = ustr(filePath)
if unicodeFilePath and os.path.exists(unicodeFilePath):
if LabelFile.isLabelFile(unicodeFilePath):
try:
self.labelFile = LabelFile(unicodeFilePath)
except LabelFileError as e:
self.errorMessage(u'Error opening file',
(u"<p><b>%s</b></p>"
u"<p>Make sure <i>%s</i> is a valid label file.")
% (e, unicodeFilePath))
self.status("Error reading %s" % unicodeFilePath)
return False
self.imageData = self.labelFile.imageData
self.lineColor = QColor(*self.labelFile.lineColor)
self.fillColor = QColor(*self.labelFile.fillColor)
self.canvas.verified = self.labelFile.verified
else:
# Load image:
# read data first and store for saving into label file.
# self.imageData = read(unicodeFilePath, None)
self.labelFile = None
self.canvas.verified = False
# image = QImage.fromData(self.imageData)
# if image.isNull():
# self.errorMessage(u'Error opening file',
# u"<p>Make sure <i>%s</i> is a valid image file." % unicodeFilePath)
# self.status("Error reading %s" % unicodeFilePath)
# return False
#self.status("Loaded %s" % os.path.basename(unicodeFilePath))
reader0 = QImageReader(unicodeFilePath)
reader0.setAutoTransform(True)
# transformation = reader0.transformation()
# print(transformation)
image = reader0.read()
self.image = image
self.filePath = unicodeFilePath
self.canvas.loadPixmap(QPixmap.fromImage(image))
self.imageDim.setText('%d x %d' % (self.image.width(), self.image.height()))
if self.labelFile is not None:
self.loadLabels(self.labelFile.shapes)
self.setClean()
self.canvas.setEnabled(True)
self.adjustScale(initial=True)
self.paintCanvas()
self.addRecentFile(self.filePath)
self.toggleActions(True)
# Label xml file and show bound box according to its filename
vocReader = None
if self.defaultSaveDir is not None:
relname = os.path.relpath(self.filePath, self.dirname)
relname = os.path.splitext(relname)[0]
# TODO: defaultSaveDir changed to another dir need mkdir for subdir
xmlPath = os.path.join(self.defaultSaveDir, relname + XML_EXT)
if os.path.exists(xmlPath) and os.path.isfile(xmlPath):
vocReader = self.loadPascalXMLByFilename(xmlPath)
else:
xmlPath = os.path.splitext(filePath)[0] + XML_EXT
if os.path.isfile(xmlPath):
vocReader = self.loadPascalXMLByFilename(xmlPath)
if vocReader is not None:
vocWidth, vocHeight, _ = vocReader.getSize()
if self.image.width() != vocWidth or self.image.height() != vocHeight:
#self.errorMessage("Image info not matched", "The width or height of annotation file is not matched with that of the image")
self.saveFile()
self.canvas.setFocus(True)
return True
return False
def resizeEvent(self, event):
if self.canvas and not self.image.isNull()\
and self.zoomMode != self.MANUAL_ZOOM:
self.adjustScale()
super(MainWindow, self).resizeEvent(event)
def paintCanvas(self):
if self.image.isNull():
return
self.canvas.scale = 0.01 * self.zoomWidget.value()
self.canvas.adjustSize()
self.canvas.update()
def adjustScale(self, initial=False):
value = self.scalers[self.FIT_WINDOW if initial else self.zoomMode]()
self.zoomWidget.setValue(int(100 * value))
def scaleFitWindow(self):
"""Figure out the size of the pixmap in order to fit the main widget."""
e = 2.0 # So that no scrollbars are generated.
w1 = self.centralWidget().width() - e
h1 = self.centralWidget().height() - e
a1 = w1 / h1
# Calculate a new scale value based on the pixmap's aspect ratio.
w2 = self.canvas.pixmap.width() - 0.0
h2 = self.canvas.pixmap.height() - 0.0
a2 = w2 / h2
return w1 / w2 if a2 >= a1 else h1 / h2
def scaleFitWidth(self):
# The epsilon does not seem to work too well here.
w = self.centralWidget().width() - 2.0
return w / self.canvas.pixmap.width()
def closeEvent(self, event):
if not self.mayContinue():
event.ignore()
settings = self.settings
# If it loads images from dir, don't load it at the begining
if self.dirname is None:
settings[SETTING_FILENAME] = self.filePath if self.filePath else ''
else:
settings[SETTING_FILENAME] = ''
settings[SETTING_WIN_SIZE] = self.size()
settings[SETTING_WIN_POSE] = self.pos()
settings[SETTING_WIN_STATE] = self.saveState()
settings[SETTING_LINE_COLOR] = self.lineColor
settings[SETTING_FILL_COLOR] = self.fillColor
settings[SETTING_RECENT_FILES] = self.recentFiles
if self.defaultSaveDir and os.path.exists(self.defaultSaveDir):
settings[SETTING_SAVE_DIR] = ustr(self.defaultSaveDir)
else:
settings[SETTING_SAVE_DIR] = ""
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
settings[SETTING_LAST_OPEN_DIR] = self.lastOpenDir
else:
settings[SETTING_LAST_OPEN_DIR] = ""
settings[SETTING_AUTO_SAVE] = self.autoSaving.isChecked()
settings[SETTING_DRAW_CORNER] = self.drawCorner.isChecked()
settings[SETTING_PAINT_LABEL] = self.paintLabelsOption.isChecked()
settings.save()
## User Dialogs ##
def loadRecent(self, filename):
if self.mayContinue():
self.loadFile(filename)
def scanAllImages(self, folderPath):
extensions = ['.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
images = []
for root, dirs, files in os.walk(folderPath):
for file in files:
if file.lower().endswith(tuple(extensions)):
relativePath = os.path.join(root, file)
path = ustr(os.path.abspath(relativePath))
images.append(path)
# TODO: ascii decode error in natsort
#images = natsort(images, key=lambda x: x.lower())
#images.sort(key= lambda a, b: lexicographical_compare(a,b) )
return images
def changeSavedirDialog(self, _value=False):
if self.defaultSaveDir is not None:
path = ustr(self.defaultSaveDir)
else:
path = '.'
dirpath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Save annotations to the directory' % __appname__, path, QFileDialog.ShowDirsOnly
| QFileDialog.DontResolveSymlinks))
if dirpath is not None and len(dirpath) > 1:
self.defaultSaveDir = dirpath
imglist = self.scanAllImages(self.dirname)
self.fileModel.setStringList(imglist, self.dirname, self.defaultSaveDir)
self.statusBar().showMessage('%s . Annotation will be saved to %s' %
('Change saved folder', self.defaultSaveDir))
self.statusBar().show()
def openAnnotationDialog(self, _value=False):
if self.filePath is None:
self.statusBar().showMessage('Please select image first')
self.statusBar().show()
return
path = os.path.dirname(ustr(self.filePath))\
if self.filePath else '.'
filters = "Open Annotation XML file (%s)" % ' '.join(['*.xml'])
filename = ustr(QFileDialog.getOpenFileName(self,'%s - Choose a xml file' % __appname__, path, filters))
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadPascalXMLByFilename(filename)
def openDirDialog(self, _value=False, dirpath=None):
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else '.'
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
targetDirPath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Open Directory' % __appname__, defaultOpenDirPath,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks))
self.importDirImages(targetDirPath)
def importDirImages(self, dirpath):
if not self.mayContinue() or not dirpath:
return
self.lastOpenDir = dirpath
self.dirname = dirpath
self.filePath = None
imglist = self.scanAllImages(dirpath)
self.fileModel.setStringList(imglist)
self.defaultSaveDir = dirpath
self.setWindowTitle(__appname__ + ' ' + self.dirname)
self.openNextImg()
def verifyImg(self, _value=False):
# Proceding next image without dialog if having any label
if self.filePath is not None:
try:
self.labelFile.toggleVerify()
except AttributeError:
# If the labelling file does not exist yet, create if and
# re-save it with the verified attribute.
self.saveFile()
self.labelFile.toggleVerify()
self.fileModel.setData(self.filesm.currentIndex(), len(self.canvas.shapes), Qt.BackgroundRole)
self.canvas.verified = self.labelFile.verified
self.paintCanvas()
self.saveFile()
def openPrevImg(self, _value=False):
currIndex = self.filesm.currentIndex()
if currIndex.row() - 1 < 0:
return False
prevIndex = self.fileModel.index(currIndex.row() - 1)
self.filesm.setCurrentIndex(prevIndex, QItemSelectionModel.SelectCurrent)
return True
def openNextImg(self, _value=False):
currIndex = self.filesm.currentIndex()
if currIndex.row() + 1 >= self.fileModel.rowCount():
return False
nextIndex = self.fileModel.index(currIndex.row() + 1)
self.filesm.setCurrentIndex(nextIndex, QItemSelectionModel.SelectCurrent)
return True
def openFile(self, _value=False):
if not self.mayContinue():
return
path = os.path.dirname(ustr(self.filePath)) if self.filePath else '.'
formats = ['*.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
filters = "Image & Label files (%s)" % ' '.join(formats + ['*%s' % LabelFile.suffix])
filename = QFileDialog.getOpenFileName(self, '%s - Choose Image or Label file' % __appname__, path, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadFile(filename)
def saveFile(self, _value=False):
if self.defaultSaveDir is not None and len(ustr(self.defaultSaveDir)):
if self.filePath:
relname = os.path.relpath(self.filePath, self.dirname)
relname = os.path.splitext(relname)[0]
savedPath = os.path.join(ustr(self.defaultSaveDir), relname)
self._saveFile(savedPath)
else:
imgFileDir = os.path.dirname(self.filePath)
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0]
savedPath = os.path.join(imgFileDir, savedFileName)
self._saveFile(savedPath if self.labelFile
else self.saveFileDialog())
def saveFileAs(self, _value=False):
assert not self.image.isNull(), "cannot save empty image"
self._saveFile(self.saveFileDialog())
def saveFileDialog(self):
caption = '%s - Choose File' % __appname__
filters = 'File (*%s)' % LabelFile.suffix
openDialogPath = self.currentPath()
dlg = QFileDialog(self, caption, openDialogPath, filters)
dlg.setDefaultSuffix(LabelFile.suffix[1:])
dlg.setAcceptMode(QFileDialog.AcceptSave)
filenameWithoutExtension = os.path.splitext(self.filePath)[0]
dlg.selectFile(filenameWithoutExtension)
dlg.setOption(QFileDialog.DontUseNativeDialog, False)
if dlg.exec_():
fullFilePath = ustr(dlg.selectedFiles()[0])
return os.path.splitext(fullFilePath)[0] # Return file path without the extension.
return ''
def _saveFile(self, annotationFilePath):
if annotationFilePath and self.saveLabels(annotationFilePath):
self.setClean()
self.statusBar().showMessage('Saved to %s' % annotationFilePath)
self.statusBar().show()
def closeFile(self, _value=False):
if not self.mayContinue():
return
self.resetState()
self.setClean()
self.toggleActions(False)
self.canvas.setEnabled(False)
self.actions.saveAs.setEnabled(False)
def resetAll(self):
self.settings.reset()
self.close()
proc = QProcess()
proc.startDetached(os.path.abspath(__file__))
def mayContinue(self):
return not (self.dirty and not self.discardChangesDialog())
def discardChangesDialog(self):
yes, no = QMessageBox.Yes, QMessageBox.No
msg = u'You have unsaved changes, proceed anyway?'
return yes == QMessageBox.warning(self, u'Attention', msg, yes | no)
def errorMessage(self, title, message):
return QMessageBox.critical(self, title,
'<p><b>%s</b></p>%s' % (title, message))
def currentPath(self):
return os.path.dirname(self.filePath) if self.filePath else '.'
def deleteSelectedShape(self):
self.remLabel(self.canvas.deleteSelected())
self.setDirty()
if self.noShapes():
for action in self.actions.onShapesPresent:
action.setEnabled(False)
def copyShape(self):
self.canvas.endMove(copy=True)
self.addLabel(self.canvas.selectedShape)
self.setDirty()
def moveShape(self):
self.canvas.endMove(copy=False)
self.setDirty()
def loadPredefinedClasses(self, predefClassesFile):
if os.path.exists(predefClassesFile) is True:
with codecs.open(predefClassesFile, 'r', 'utf8') as f:
for line in f:
line = line.strip()
if self.labelHist is None:
self.labelHist = [line]
else:
self.labelHist.append(line)
def loadPascalXMLByFilename(self, xmlPath):
if self.filePath is None:
return None
if os.path.isfile(xmlPath) is False:
return None
tVocParseReader = PascalVocReader(xmlPath)
shapes = tVocParseReader.getShapes()
self.loadLabels(shapes)
self.canvas.verified = tVocParseReader.verified
return tVocParseReader
def togglePaintLabelsOption(self):
paintLabelsOptionChecked = self.paintLabelsOption.isChecked()
for shape in self.canvas.shapes:
shape.paintLabel = paintLabelsOptionChecked
def inverted(color):
return QColor(*[255 - v for v in color.getRgb()])
def read(filename, default=None):
try:
with open(filename, 'rb') as f:
return f.read()
except:
return default
def get_main_app(argv=[]):
"""
Standard boilerplate Qt application code.
Do everything but app.exec_() -- so that we can test the application in one thread
"""
app = QApplication(argv)
app.setApplicationName(__appname__)
app.setWindowIcon(newIcon("tag-black-shape.svg"))
# Usage : labelImg.py image predefClassFile saveDir
win = MainWindow(argv[1] if len(argv) >= 2 else None,
argv[2] if len(argv) >= 3 else os.path.join(
os.path.dirname(sys.argv[0]),
'data', 'predefined_classes.txt'),
argv[3] if len(argv) >= 4 else None)
win.show()
return app, win
def main():
'''construct main app and run it'''
app, _win = get_main_app(sys.argv)
return app.exec_()
if __name__ == '__main__':
sys.exit(main())
| 53,244 | 37.667393 | 144 |
py
|
labelImg2
|
labelImg2-master/__init__.py
| 0 | 0 | 0 |
py
|
|
labelImg2
|
labelImg2-master/libs/shape.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from libs.lib import distance
from libs.ustr import ustr
import sys
import math
DEFAULT_LINE_COLOR = QColor(0, 255, 0, 128)
DEFAULT_FILL_COLOR = QColor(255, 0, 0, 128)
DEFAULT_SELECT_LINE_COLOR = QColor(255, 255, 255)
DEFAULT_SELECT_FILL_COLOR = QColor(0, 128, 255, 155)
DEFAULT_VERTEX_FILL_COLOR = QColor(0, 255, 0, 255)
DEFAULT_HVERTEX_FILL_COLOR = QColor(255, 0, 0)
class Shape(object):
P_SQUARE, P_ROUND = range(2)
MOVE_VERTEX, NEAR_VERTEX = range(2)
# The following class variables influence the drawing
# of _all_ shape objects.
line_color = DEFAULT_LINE_COLOR
fill_color = DEFAULT_FILL_COLOR
select_line_color = DEFAULT_SELECT_LINE_COLOR
select_fill_color = DEFAULT_SELECT_FILL_COLOR
vertex_fill_color = DEFAULT_VERTEX_FILL_COLOR
hvertex_fill_color = DEFAULT_HVERTEX_FILL_COLOR
point_type = P_ROUND
point_size = 8
scale = 1.0
def __init__(self, label=None, line_color=None, difficult=False, paintLabel=False, extra_label=''):
self.label = label
self.points = []
self.fill = False
self.selected = False
self.difficult = difficult
self.paintLabel = paintLabel
self.extra_label = extra_label
self.direction = 0
self.center = None
self.isRotated = True
self.highlightCorner = False
self.alwaysShowCorner = False
self._highlightIndex = None
self._highlightMode = self.NEAR_VERTEX
self._highlightSettings = {
self.NEAR_VERTEX: (4, self.P_ROUND),
self.MOVE_VERTEX: (1.5, self.P_SQUARE),
}
self._closed = False
if line_color is not None:
# Override the class line_color attribute
# with an object attribute. Currently this
# is used for drawing the pending line a different color.
self.line_color = line_color
def rotate(self, theta):
for i, p in enumerate(self.points):
self.points[i] = self.rotatePoint(p, theta)
self.direction -= theta
self.direction = self.direction % (2 * math.pi)
# self.direction is the angle between y axis and the dotline, clockwise
#print(self.direction * 180 / math.pi)
def rotatePoint(self, p, theta):
order = p - self.center
cosTheta = math.cos(theta)
sinTheta = math.sin(theta)
pResx = cosTheta * order.x() + sinTheta * order.y()
pResy = - sinTheta * order.x() + cosTheta * order.y()
pRes = QPointF(self.center.x() + pResx, self.center.y() + pResy)
return pRes
def close(self):
self.center = QPointF((self.points[0].x()+self.points[2].x()) / 2, (self.points[0].y()+self.points[2].y()) / 2)
self._closed = True
def reachMaxPoints(self):
if len(self.points) >= 4:
return True
return False
def addPoint(self, point):
if not self.reachMaxPoints():
self.points.append(point)
def popPoint(self):
if self.points:
return self.points.pop()
return None
def isClosed(self):
return self._closed
def setOpen(self):
self._closed = False
def paint(self, painter):
if self.points:
color = self.select_line_color if self.selected else self.line_color
pen = QPen(color)
# Try using integer sizes for smoother drawing(?)
pen.setWidth(max(1, int(round(2.0 / self.scale))))
#pen.setWidth(int(round(2.0/self.scale)))
painter.setPen(pen)
line_path = QPainterPath()
vrtx_path = QPainterPath()
line_path.moveTo(self.points[0])
# Uncommenting the following line will draw 2 paths
# for the 1st vertex, and make it non-filled, which
# may be desirable.
#self.drawVertex(vrtx_path, 0)
for i, p in enumerate(self.points):
line_path.lineTo(p)
self.drawVertex(vrtx_path, i)
if self.isClosed():
line_path.lineTo(self.points[0])
painter.drawPath(line_path)
if self.highlightCorner:
painter.drawPath(vrtx_path)
painter.fillPath(vrtx_path, self.vertex_fill_color)
# Draw text at the top-left
if self.paintLabel:
min_x = sys.maxsize
min_y = sys.maxsize
for point in self.points:
min_x = min(min_x, point.x())
min_y = min(min_y, point.y())
if min_x != sys.maxsize and min_y != sys.maxsize:
font = QFont()
font.setPointSize(20/self.scale) # TODO : max
font.setBold(False)
painter.setFont(font)
# TODO: optimize
if sys.version_info < (3, 0, 0) and isinstance(self.label, QVariant):
self.label = ustr(self.label.toPyObject())
if(self.label == None):
self.label = ""
painter.setPen(QColor(255,0,0))
painter.drawText(min_x, min_y, self.extra_label)
painter.setPen(pen)
if self.fill:
color = self.select_fill_color if self.selected else self.fill_color
painter.fillPath(line_path, color)
if self.center is not None and self.isRotated:
edgemid = (self.points[0] + self.points[1]) / 2
center_path = QPainterPath()
center_path.moveTo(edgemid)
center_path.lineTo(self.center)
pen.setStyle(Qt.DotLine)
painter.setPen(pen)
painter.drawPath(center_path)
#d = self.point_size / self.scale
#center_path.addRect(self.center.x() - d / 2, self.center.y() - d / 2, d, d)
#painter.drawPath(center_path)
#painter.fillPath(center_path, self.vertex_fill_color)
self.highlightCorner = self.alwaysShowCorner
def paintNormalCenter(self, painter):
if self.center is not None:
center_path = QPainterPath();
d = self.point_size / self.scale
center_path.addRect(self.center.x() - d / 2, self.center.y() - d / 2, d, d)
painter.drawPath(center_path)
if not self.isRotated:
painter.fillPath(center_path, QColor(0, 0, 0))
def drawVertex(self, path, i):
d = self.point_size / self.scale
shape = self.point_type
point = self.points[i]
if i == self._highlightIndex:
size, shape = self._highlightSettings[self._highlightMode]
d *= size
if self._highlightIndex is not None:
self.vertex_fill_color = self.hvertex_fill_color
else:
self.vertex_fill_color = Shape.vertex_fill_color
if shape == self.P_SQUARE:
path.addRect(point.x() - d / 2, point.y() - d / 2, d, d)
elif shape == self.P_ROUND:
path.addEllipse(point, d / 2.0, d / 2.0)
else:
assert False, "unsupported vertex shape"
def nearestVertex(self, point, epsilon):
for i, p in enumerate(self.points):
if distance(p - point) <= epsilon:
return i
return None
def containsPoint(self, point):
return self.makePath().contains(point)
def makePath(self):
path = QPainterPath(self.points[0])
for p in self.points[1:]:
path.lineTo(p)
return path
def boundingRect(self):
return self.makePath().boundingRect()
def moveBy(self, offset):
self.points = [p + offset for p in self.points]
def moveVertexBy(self, i, offset):
self.points[i] = self.points[i] + offset
def highlightVertex(self, i, action):
self._highlightIndex = i
self._highlightMode = action
def highlightClear(self):
self._highlightIndex = None
def copy(self):
shape = Shape("%s" % self.label)
shape.points = [p for p in self.points]
shape.center = self.center
shape.direction = self.direction
shape.isRotated = self.isRotated
shape.fill = self.fill
shape.selected = self.selected
shape._closed = self._closed
if self.line_color != Shape.line_color:
shape.line_color = self.line_color
if self.fill_color != Shape.fill_color:
shape.fill_color = self.fill_color
shape.difficult = self.difficult
shape.extra_label = self.extra_label
return shape
def __len__(self):
return len(self.points)
def __getitem__(self, key):
return self.points[key]
def __setitem__(self, key, value):
self.points[key] = value
| 9,215 | 32.512727 | 119 |
py
|
labelImg2
|
labelImg2-master/libs/zoomWidget.py
|
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class ZoomWidget(QSpinBox):
def __init__(self, value=100):
super(ZoomWidget, self).__init__()
self.setButtonSymbols(QAbstractSpinBox.NoButtons)
self.setRange(1, 500)
self.setSuffix(' %')
self.setValue(value)
self.setToolTip(u'Zoom Level')
self.setStatusTip(self.toolTip())
self.setAlignment(Qt.AlignCenter)
self.setInputMethodHints(Qt.ImhDigitsOnly)
self.setAttribute(Qt.WA_InputMethodEnabled, False)
def minimumSizeHint(self):
height = super(ZoomWidget, self).minimumSizeHint().height()
fm = QFontMetrics(self.font())
width = fm.width(str(self.maximum()))
return QSize(width, height)
| 899 | 29 | 67 |
py
|
labelImg2
|
labelImg2-master/libs/settings.py
|
#import json
import pickle
import os
import sys
class Settings(object):
def __init__(self):
# Be default, the home will be in the same folder as labelImg
self.data = {}
if sys.version_info < (3, 0, 0):
self.path = './labelImg2Settings2.pkl'
else:
self.path = './labelImg2Settings3.pkl'
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, key):
return self.data[key]
def get(self, key, default=None):
if key in self.data:
return self.data[key]
return default
def save(self):
if self.path:
with open(self.path, 'wb') as f:
pickle.dump(self.data, f, pickle.HIGHEST_PROTOCOL)
#json.dump(self.data, f)
return True
return False
def load(self):
if os.path.exists(self.path):
with open(self.path, 'rb') as f:
self.data = pickle.load(f)
#self.data = json.load(f)
return True
return False
def reset(self):
if os.path.exists(self.path):
os.remove(self.path)
print ('Remove setting pkl file ${0}'.format(self.path))
self.data = {}
self.path = None
| 1,298 | 26.638298 | 69 |
py
|
labelImg2
|
labelImg2-master/libs/pascal_voc_io.py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from lxml import etree
import codecs
import math
XML_EXT = '.xml'
ENCODE_METHOD = 'utf-8'
class PascalVocWriter:
def __init__(self, foldername, filename, imgSize,databaseSrc='Unknown', localImgPath=None):
self.foldername = foldername
self.filename = filename
self.databaseSrc = databaseSrc
self.imgSize = imgSize
self.boxlist = []
self.roboxlist = []
self.localImgPath = localImgPath
self.verified = False
def prettify(self, elem):
"""
Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(elem, 'utf8')
root = etree.fromstring(rough_string)
return etree.tostring(root, pretty_print=True, encoding=ENCODE_METHOD).replace(" ".encode(), "\t".encode())
# minidom does not support UTF-8
'''reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent="\t", encoding=ENCODE_METHOD)'''
def genXML(self):
"""
Return XML root
"""
# Check conditions
if self.filename is None or \
self.foldername is None or \
self.imgSize is None:
return None
top = Element('annotation')
if self.verified:
top.set('verified', 'yes')
folder = SubElement(top, 'folder')
folder.text = self.foldername
filename = SubElement(top, 'filename')
filename.text = self.filename
if self.localImgPath is not None:
localImgPath = SubElement(top, 'path')
localImgPath.text = self.localImgPath
source = SubElement(top, 'source')
database = SubElement(source, 'database')
database.text = self.databaseSrc
size_part = SubElement(top, 'size')
width = SubElement(size_part, 'width')
height = SubElement(size_part, 'height')
depth = SubElement(size_part, 'depth')
width.text = str(self.imgSize[1])
height.text = str(self.imgSize[0])
if len(self.imgSize) == 3:
depth.text = str(self.imgSize[2])
else:
depth.text = '1'
segmented = SubElement(top, 'segmented')
segmented.text = '0'
return top
def addBndBox(self, xmin, ymin, xmax, ymax, name, difficult, extra):
bndbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}
bndbox['name'] = name
bndbox['difficult'] = difficult
bndbox['extra'] = extra
self.boxlist.append(bndbox)
def addRotatedBndBox(self, cx, cy, w, h, angle, name, difficult, extra):
robndbox = {'cx': cx, 'cy': cy, 'w': w, 'h': h, 'angle': angle}
robndbox['name'] = name
robndbox['difficult'] = difficult
robndbox['extra'] = extra
self.roboxlist.append(robndbox)
def appendObjects(self, top):
for each_object in self.boxlist:
object_item = SubElement(top, 'object')
name = SubElement(object_item, 'name')
try:
name.text = unicode(each_object['name'])
except NameError:
# Py3: NameError: name 'unicode' is not defined
name.text = each_object['name']
pose = SubElement(object_item, 'pose')
pose.text = "Unspecified"
truncated = SubElement(object_item, 'truncated')
if int(each_object['ymax']) == int(self.imgSize[0]) or (int(each_object['ymin'])== 1):
truncated.text = "1" # max == height or min
elif (int(each_object['xmax'])==int(self.imgSize[1])) or (int(each_object['xmin'])== 1):
truncated.text = "1" # max == width or min
else:
truncated.text = "0"
difficult = SubElement(object_item, 'difficult')
difficult.text = str( bool(each_object['difficult']) & 1 )
bndbox = SubElement(object_item, 'bndbox')
xmin = SubElement(bndbox, 'xmin')
xmin.text = str(each_object['xmin'])
ymin = SubElement(bndbox, 'ymin')
ymin.text = str(each_object['ymin'])
xmax = SubElement(bndbox, 'xmax')
xmax.text = str(each_object['xmax'])
ymax = SubElement(bndbox, 'ymax')
ymax.text = str(each_object['ymax'])
extra = SubElement(object_item, 'extra')
try:
extra.text = unicode(each_object['extra'])
except NameError:
# Py3: NameError: extra 'unicode' is not defined
extra.text = each_object['extra']
for each_object in self.roboxlist:
object_item = SubElement(top, 'object')
name = SubElement(object_item, 'name')
try:
name.text = unicode(each_object['name'])
except NameError:
# Py3: NameError: name 'unicode' is not defined
name.text = each_object['name']
pose = SubElement(object_item, 'pose')
pose.text = "Unspecified"
truncated = SubElement(object_item, 'truncated')
# if int(each_object['ymax']) == int(self.imgSize[0]) or (int(each_object['ymin'])== 1):
# truncated.text = "1" # max == height or min
# elif (int(each_object['xmax'])==int(self.imgSize[1])) or (int(each_object['xmin'])== 1):
# truncated.text = "1" # max == width or min
# else:
truncated.text = "0"
difficult = SubElement(object_item, 'difficult')
difficult.text = str( bool(each_object['difficult']) & 1 )
robndbox = SubElement(object_item, 'robndbox')
cx = SubElement(robndbox, 'cx')
cx.text = str(each_object['cx'])
cy = SubElement(robndbox, 'cy')
cy.text = str(each_object['cy'])
w = SubElement(robndbox, 'w')
w.text = str(each_object['w'])
h = SubElement(robndbox, 'h')
h.text = str(each_object['h'])
angle = SubElement(robndbox, 'angle')
angle.text = str(each_object['angle'])
extra = SubElement(object_item, 'extra')
try:
extra.text = unicode(each_object['extra'])
except NameError:
# Py3: NameError: extra 'unicode' is not defined
extra.text = each_object['extra']
def save(self, targetFile=None):
root = self.genXML()
self.appendObjects(root)
out_file = None
if targetFile is None:
out_file = codecs.open(
self.filename + XML_EXT, 'w', encoding=ENCODE_METHOD)
else:
out_file = codecs.open(targetFile, 'w', encoding=ENCODE_METHOD)
prettifyResult = self.prettify(root)
out_file.write(prettifyResult.decode('utf8'))
out_file.close()
class PascalVocReader:
def __init__(self, filepath):
# shapes type:
# [labbel, [(x1,y1), (x2,y2), (x3,y3), (x4,y4)], color, color, difficult]
self.shapes = []
self.width = 0
self.height = 0
self.depth = 0
self.filepath = filepath
self.verified = False
try:
self.parseXML()
except:
pass
def getShapes(self):
return self.shapes
def getSize(self):
return self.width, self.height, self.depth
def addShape(self, label, bndbox, difficult, extra=None):
xmin = int(eval(bndbox.find('xmin').text))
ymin = int(eval(bndbox.find('ymin').text))
xmax = int(eval(bndbox.find('xmax').text))
ymax = int(eval(bndbox.find('ymax').text))
points = [(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]
if extra is not None:
self.shapes.append((label, points, None, None, difficult, extra))
else:
self.shapes.append((label, points, None, None, difficult))
def addRotatedShape(self, label, robndbox, difficult, extra=None):
cx = float(robndbox.find('cx').text)
cy = float(robndbox.find('cy').text)
w = float(robndbox.find('w').text)
h = float(robndbox.find('h').text)
angle = float(robndbox.find('angle').text)
p0x,p0y = self.rotatePoint(cx,cy, cx - w/2, cy - h/2, -angle)
p1x,p1y = self.rotatePoint(cx,cy, cx + w/2, cy - h/2, -angle)
p2x,p2y = self.rotatePoint(cx,cy, cx + w/2, cy + h/2, -angle)
p3x,p3y = self.rotatePoint(cx,cy, cx - w/2, cy + h/2, -angle)
points = [(p0x, p0y), (p1x, p1y), (p2x, p2y), (p3x, p3y)]
if extra is not None:
self.shapes.append((label, points, None, None, difficult, True, angle, extra))
else:
self.shapes.append((label, points, None, None, difficult, True, angle))
def rotatePoint(self, xc,yc, xp,yp, theta):
xoff = xp-xc
yoff = yp-yc
cosTheta = math.cos(theta)
sinTheta = math.sin(theta)
pResx = cosTheta * xoff + sinTheta * yoff
pResy = - sinTheta * xoff + cosTheta * yoff
# pRes = (xc + pResx, yc + pResy)
return xc+pResx,yc+pResy
def parseXML(self):
assert self.filepath.endswith(XML_EXT), "Unsupport file format"
parser = etree.XMLParser(encoding=ENCODE_METHOD)
xmltree = ElementTree.parse(self.filepath, parser=parser).getroot()
filename = xmltree.find('filename').text
try:
verified = xmltree.attrib['verified']
if verified == 'yes':
self.verified = True
except KeyError:
self.verified = False
sizetag = xmltree.find('size')
widthtag = sizetag.find('width')
heighttag = sizetag.find('height')
depthtag = sizetag.find('depth')
self.width = eval(widthtag.text)
self.height = eval(heighttag.text)
self.depth = eval(depthtag.text)
for object_iter in xmltree.findall('object'):
bndbox = object_iter.find("bndbox")
if bndbox is None:
robndbox = object_iter.find('robndbox')
label = object_iter.find('name').text
# Add chris
difficult = False
if object_iter.find('difficult') is not None:
difficult = bool(int(object_iter.find('difficult').text))
extra = None
if object_iter.find('extra') is not None:
extra = object_iter.find('extra').text
if bndbox is None:
self.addRotatedShape(label, robndbox, difficult, extra)
else:
self.addShape(label, bndbox, difficult, extra)
return True
| 10,867 | 37.676157 | 116 |
py
|
labelImg2
|
labelImg2-master/libs/canvas.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
#from PyQt4.QtOpenGL import *
from .shape import Shape
from .lib import distance
import math
CURSOR_DEFAULT = Qt.ArrowCursor
CURSOR_POINT = Qt.PointingHandCursor
CURSOR_DRAW = Qt.CrossCursor
CURSOR_MOVE = Qt.ClosedHandCursor
CURSOR_GRAB = Qt.OpenHandCursor
class Canvas(QWidget):
zoomRequest = pyqtSignal(int)
scrollRequest = pyqtSignal(int, int)
newShape = pyqtSignal(bool)
selectionChanged = pyqtSignal(bool)
shapeMoved = pyqtSignal()
drawingPolygon = pyqtSignal(bool)
hideRRect = pyqtSignal(bool)
hideNRect = pyqtSignal(bool)
status = pyqtSignal(str)
cancelDraw = pyqtSignal()
toggleEdit = pyqtSignal(bool)
#CREATE, EDIT = list(range(2))
CREATE = 0
EDIT = 1
CONTINUECREATE = 2
epsilon = 7.0
def __init__(self, *args, **kwargs):
super(Canvas, self).__init__(*args, **kwargs)
# Initialise local state.
self.mode = self.EDIT
self.shapes = []
self.current = None
self.selectedShape = None # save the selected shape here
self.selectedShapeCopy = None
self.drawingLineColor = QColor(0, 0, 255)
self.drawingRectColor = QColor(0, 0, 255)
self.line = Shape(line_color=self.drawingLineColor)
self.prevPoint = QPointF()
self.offsets = QPointF(), QPointF()
self.scale = 1.0
self.pixmap = QPixmap()
#self.localScalePixmap = QPixmap()
self.visible = {}
self._hideBackround = False
self.hideBackround = False
self.hShape = None
self.hVertex = None
self._painter = QPainter()
self._cursor = CURSOR_DEFAULT
# Menus:
self.menus = (QMenu(), QMenu())
# Set widget options.
self.setMouseTracking(True)
self.setFocusPolicy(Qt.WheelFocus)
self.verified = False
self.canDrawRotatedRect = True
self.hideRotated = False
self.hideNormal = False
self.canOutOfBounding = False
self.showCenter = False
#self.setAttribute(Qt.WA_PaintOnScreen)
def setDrawingColor(self, qColor):
self.drawingLineColor = qColor
self.drawingRectColor = qColor
def enterEvent(self, ev):
self.overrideCursor(self._cursor)
def leaveEvent(self, ev):
self.restoreCursor()
def focusOutEvent(self, ev):
self.restoreCursor()
def isVisible(self, shape):
return self.visible.get(shape, True)
def drawing(self):
return self.mode == self.CREATE
def continueDrawing(self):
return self.mode == self.CONTINUECREATE
def editing(self):
return self.mode == self.EDIT
def setDrawCornerState(self, enabled):
for shape in reversed([s for s in self.shapes if self.isVisible(s)]):
shape.alwaysShowCorner=enabled
self.repaint()
self.update()
def setEditing(self, value=1):
self.mode = value
if value == self.CREATE or value == self.CONTINUECREATE: # Create
self.unHighlight()
self.deSelectShape()
self.prevPoint = QPointF()
self.repaint()
def unHighlight(self):
if self.hShape:
self.hShape.highlightClear()
self.hVertex = self.hShape = None
def selectedVertex(self):
return self.hVertex is not None
# reserve function
def updateLocalScaleMap(self, x, y):
pass
#if self.pixmap is None:
# return
#rz = 15
#x0 = int(x) - rz
#x1 = int(x) + rz
#y0 = int(y) - rz
#y1 = int(y) + rz
#self.localScalePixmap = self.pixmap.copy(x0, y0, x1 - x0 + 1, y1 - y0 + 1) #self.grab(QRect(x0, y0, x1 - x0 + 1, y1 - y0 + 1))
##self.localScalePixmap.grabWidget(self, pos.x(), pos.y(), 30, 30) # TODO: pyQt4
#w = self.localScalePixmap.width()
#h = self.localScalePixmap.height()
#self.localScalePixmap = self.localScalePixmap.scaled(w * 5, h * 5, Qt.KeepAspectRatio)
def mouseMoveEvent(self, ev):
"""Update line with last point and current coordinates."""
pos = self.transformPos(ev.pos())
# Update coordinates in status bar if image is opened
window = self.parent().window()
if window.filePath is not None:
self.parent().window().labelCoordinates.setText(
'X: %d; Y: %d' % (pos.x(), pos.y()))
# Polygon drawing.
if self.drawing():
self.overrideCursor(CURSOR_DRAW)
if self.current:
color = self.drawingLineColor
if self.outOfPixmap(pos):
# Don't allow the user to draw outside the pixmap.
# Project the point to the pixmap's edges.
pos = self.intersectionPoint(self.current[-1], pos)
elif len(self.current) > 1 and self.closeEnough(pos, self.current[0]):
# Attract line to starting point and colorise to alert the
# user:
pos = self.current[0]
color = self.current.line_color
self.overrideCursor(CURSOR_POINT)
self.current.highlightVertex(0, Shape.NEAR_VERTEX)
self.line[1] = pos
self.line.line_color = color
self.prevPoint = QPointF()
self.current.highlightClear()
else:
self.prevPoint = pos
self.updateLocalScaleMap(pos.x(), pos.y())
self.repaint()
return
if self.continueDrawing():
self.prevPoint = pos
self.repaint()
return
# Polygon copy moving.
if Qt.RightButton & ev.buttons():
#if self.selectedShapeCopy and self.prevPoint:
# self.overrideCursor(CURSOR_MOVE)
# self.boundedMoveShape(self.selectedShapeCopy, pos)
# self.repaint()
#elif self.selectedShape:
# self.selectedShapeCopy = self.selectedShape.copy()
# self.repaint()
if self.selectedVertex() and self.selectedShape.isRotated:
self.boundedRotateShape(pos)
self.shapeMoved.emit()
self.selectedShape.highlightCorner = True
self.repaint()
self.status.emit("(%d,%d)." % (pos.x(), pos.y()))
return
# Polygon/Vertex moving.
if Qt.LeftButton & ev.buttons():
if self.selectedVertex():
self.boundedMoveVertex(pos)
self.shapeMoved.emit()
if self.selectedShape:
self.selectedShape.highlightCorner = True
elif self.selectedShape and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShape, pos)
self.shapeMoved.emit()
self.updateLocalScaleMap(pos.x(), pos.y())
self.repaint()
return
# Just hovering over the canvas, 2 posibilities:
# - Highlight shapes
# - Highlight vertex
# Update shape/vertex fill and tooltip value accordingly.
self.setToolTip("Background")
for shape in reversed([s for s in self.shapes if self.isVisible(s)]):
# Look for a nearby vertex to highlight. If that fails,
# check if we happen to be inside a shape.
index = shape.nearestVertex(pos, self.epsilon / self.scale if self.scale > 1 else self.epsilon)
if index is not None:
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = index, shape
shape.highlightCorner = True
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.overrideCursor(CURSOR_POINT)
self.setToolTip("Click & drag to move point")
#self.setStatusTip(self.toolTip())
self.updateLocalScaleMap(pos.x(), pos.y())
self.update()
break
elif shape.containsPoint(pos):
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = None, shape
shape.highlightCorner = True
# TODO: optimize here
self.setToolTip(
"%s\n X: (%f, %f)\nY: (%f, %f)" % (shape.label, shape.points[0].x(), shape.points[2].x(), shape.points[0].y(), shape.points[2].y()) )
#self.setStatusTip(self.toolTip())
self.overrideCursor(CURSOR_GRAB)
self.updateLocalScaleMap(pos.x(), pos.y())
self.update()
break
else: # Nothing found, clear highlights, reset state.
if self.hShape:
self.hShape.highlightClear()
#self.hShape.highlightCorner=False
self.updateLocalScaleMap(pos.x(), pos.y())
self.update()
else:
self.updateLocalScaleMap(pos.x(), pos.y())
self.repaint()
self.hVertex, self.hShape = None, None
self.overrideCursor(CURSOR_DEFAULT)
def mousePressEvent(self, ev):
pos = self.transformPos(ev.pos())
if ev.button() == Qt.LeftButton:
if self.drawing():
self.handleDrawing(pos)
if self.continueDrawing():
pass
else:
self.selectShapePoint(pos)
self.prevPoint = pos
self.repaint()
elif ev.button() == Qt.RightButton and self.editing():
self.selectShapePoint(pos)
self.prevPoint = pos
self.repaint()
def mouseReleaseEvent(self, ev):
if ev.button() == Qt.RightButton:
if self.selectedVertex() and self.selectedShape.isRotated:
return
menu = self.menus[bool(self.selectedShapeCopy)]
self.restoreCursor()
menu.exec_(self.mapToGlobal(ev.pos()))
#if not menu.exec_(self.mapToGlobal(ev.pos())) and self.selectedShapeCopy:
# # Cancel the move by deleting the shadow copy.
# self.selectedShapeCopy = None
# self.repaint()
elif ev.button() == Qt.LeftButton and self.selectedShape and self.editing():
if self.selectedVertex():
self.overrideCursor(CURSOR_POINT)
else:
self.overrideCursor(CURSOR_GRAB)
elif ev.button() == Qt.LeftButton:
pos = self.transformPos(ev.pos())
if self.drawing():
self.handleDrawing(pos)
if self.continueDrawing():
self.handleClickDrawing(pos)
def endMove(self, copy=False):
assert self.selectedShape and self.selectedShapeCopy
shape = self.selectedShapeCopy
#del shape.fill_color
#del shape.line_color
if copy:
self.shapes.append(shape)
self.selectedShape.selected = False
self.selectedShape = shape
self.repaint()
else:
self.selectedShape.points = [p for p in shape.points]
self.selectedShapeCopy = None
def hideBackroundShapes(self, value):
self.hideBackround = value
if self.selectedShape:
# Only hide other shapes if there is a current selection.
# Otherwise the user will not be able to select a shape.
self.setHiding(True)
self.repaint()
def handleDrawing(self, pos):
if self.current and self.current.reachMaxPoints() is False:
self.current.highlightCorner=True
initPos = self.current[0]
minX = initPos.x()
minY = initPos.y()
targetPos = self.line[1]
maxX = targetPos.x()
maxY = targetPos.y()
self.current.addPoint(QPointF(maxX, minY))
self.current.addPoint(targetPos)
self.current.addPoint(QPointF(minX, maxY))
self.finalise()
elif not self.outOfPixmap(pos):
self.current = Shape()
self.current.highlightCorner=True
self.current.addPoint(pos)
self.line.points = [pos, pos]
self.setHiding()
self.drawingPolygon.emit(True)
self.update()
def handleClickDrawing(self, pos):
if not self.outOfPixmap(pos):
self.current = Shape()
self.current.highlightCorner=True
minX = pos.x() - 30
maxX = pos.x() + 30
minY = pos.y() - 38
maxY = pos.y() + 38
self.current.addPoint(QPointF(minX, minY))
self.current.addPoint(QPointF(maxX, minY))
self.current.addPoint(QPointF(maxX, maxY))
self.current.addPoint(QPointF(minX, maxY))
self.finalise(continous=True)
def setHiding(self, enable=True):
self._hideBackround = self.hideBackround if enable else False
def canCloseShape(self):
return self.drawing() and self.current and len(self.current) > 2
def mouseDoubleClickEvent(self, ev):
# We need at least 4 points here, since the mousePress handler
# adds an extra one before this handler is called.
if self.canCloseShape() and len(self.current) > 3:
self.current.popPoint()
self.finalise()
def selectShape(self, shape):
self.deSelectShape()
shape.selected = True
self.selectedShape = shape
self.setHiding()
self.selectionChanged.emit(True)
self.update()
def selectShapePoint(self, point):
"""Select the first shape created which contains this point."""
self.deSelectShape()
if self.selectedVertex(): # A vertex is marked for selection.
index, shape = self.hVertex, self.hShape
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.selectShape(shape)
return
for shape in reversed(self.shapes):
if self.isVisible(shape) and shape.containsPoint(point):
self.selectShape(shape)
self.calculateOffsets(shape, point)
return
def calculateOffsets(self, shape, point):
rect = shape.boundingRect()
x1 = rect.x() - point.x()
y1 = rect.y() - point.y()
x2 = (rect.x() + rect.width()) - point.x()
y2 = (rect.y() + rect.height()) - point.y()
self.offsets = QPointF(x1, y1), QPointF(x2, y2)
def boundedMoveVertex(self, pos):
index, shape = self.hVertex, self.hShape
point = shape[index]
if not self.canOutOfBounding and self.outOfPixmap(pos):
return
# pos = self.intersectionPoint(point, pos)
sindex = (index + 2) % 4
p2,p3,p4 = self.getAdjointPoints(shape.direction, shape[sindex], pos, index)
pcenter = (pos+p3)/2
if self.canOutOfBounding and self.outOfPixmap(pcenter):
return
# if one pixal out of map , do nothing
if not self.canOutOfBounding and (self.outOfPixmap(p2) or
self.outOfPixmap(p3) or
self.outOfPixmap(p4)):
return
shiftPos = pos - point
shape.moveVertexBy(index, shiftPos)
lindex = (index + 1) % 4
rindex = (index + 3) % 4
shape[lindex] = p2
# shape[sindex] = p3
shape[rindex] = p4
shape.close()
# lshift = None
# rshift = None
# if index % 2 == 0:
# rshift = QPointF(shiftPos.x(), 0)
# lshift = QPointF(0, shiftPos.y())
# else:
# lshift = QPointF(shiftPos.x(), 0)
# rshift = QPointF(0, shiftPos.y())
# shape.moveVertexBy(rindex, rshift)
# shape.moveVertexBy(lindex, lshift)
def getAdjointPoints(self, theta, p3, p1, index):
# p3 = center
# p3 = 2*center-p1
a1 = math.tan(theta)
if (a1 == 0):
if index % 2 == 0:
p2 = QPointF(p3.x(), p1.y())
p4 = QPointF(p1.x(), p3.y())
else:
p4 = QPointF(p3.x(), p1.y())
p2 = QPointF(p1.x(), p3.y())
else:
a3 = a1
a2 = - 1/a1
a4 = - 1/a1
b1 = p1.y() - a1 * p1.x()
b2 = p1.y() - a2 * p1.x()
b3 = p3.y() - a1 * p3.x()
b4 = p3.y() - a2 * p3.x()
if index % 2 == 0:
p2 = self.getCrossPoint(a1,b1,a4,b4)
p4 = self.getCrossPoint(a2,b2,a3,b3)
else:
p4 = self.getCrossPoint(a1,b1,a4,b4)
p2 = self.getCrossPoint(a2,b2,a3,b3)
return p2,p3,p4
def getCrossPoint(self,a1,b1,a2,b2):
x = (b2-b1)/(a1-a2)
y = (a1*b2 - a2*b1)/(a1-a2)
return QPointF(x,y)
def boundedRotateShape(self, pos):
# print("Rotate Shape2")
# judge if some vertex is out of pixma
index, shape = self.hVertex, self.hShape
point = shape[index]
angle = self.getAngle(shape.center ,pos,point)
# for i, p in enumerate(shape.points):
# if self.outOfPixmap(shape.rotatePoint(p,angle)):
# # print("out of pixmap")
# return
if not self.rotateOutOfBound(angle):
shape.rotate(angle)
self.prevPoint = pos
def getAngle(self, center, p1, p2):
dx1 = p1.x() - center.x()
dy1 = p1.y() - center.y()
dx2 = p2.x() - center.x()
dy2 = p2.y() - center.y()
c = math.sqrt(dx1*dx1 + dy1*dy1) * math.sqrt(dx2*dx2 + dy2*dy2)
if c == 0: return 0
y = (dx1*dx2+dy1*dy2)/c
if y>1: return 0
angle = math.acos(y)
if (dx1*dy2-dx2*dy1)>0:
return angle
else:
return -angle
def boundedMoveShape(self, shape, pos):
if shape.isRotated and self.canOutOfBounding:
c = shape.center
dp = pos - self.prevPoint
dc = c + dp
if dc.x() < 0:
dp -= QPointF(min(0,dc.x()), 0)
if dc.y() < 0:
dp -= QPointF(0, min(0,dc.y()))
if dc.x() >= self.pixmap.width():
dp += QPointF(min(0, self.pixmap.width() - 1 - dc.x()), 0) # TODO
if dc.y() >= self.pixmap.height():
dp += QPointF(0, min(0, self.pixmap.height() - 1 - dc.y())) # TODO
else:
if self.outOfPixmap(pos):
return False # No need to move
o1 = pos + self.offsets[0]
if self.outOfPixmap(o1):
pos -= QPointF(min(0, o1.x()), min(0, o1.y()))
o2 = pos + self.offsets[1]
if self.outOfPixmap(o2):
pos += QPointF(min(0, self.pixmap.width() - 1 - o2.x()),
min(0, self.pixmap.height() - 1 - o2.y()))
dp = pos - self.prevPoint
# The next line tracks the new position of the cursor
# relative to the shape, but also results in making it
# a bit "shaky" when nearing the border and allows it to
# go outside of the shape's area for some reason. XXX
#self.calculateOffsets(self.selectedShape, pos)
if dp:
shape.moveBy(dp)
self.prevPoint = pos
shape.close()
return True
return False
def boundedMoveShape2(self, shape, pos):
if self.outOfPixmap(pos):
return False # No need to move
o1 = pos + self.offsets[0]
if self.outOfPixmap(o1):
pos -= QPointF(min(0, o1.x()), min(0, o1.y()))
o2 = pos + self.offsets[1]
if self.outOfPixmap(o2):
pos += QPointF(min(0, self.pixmap.width() - o2.x()),
min(0, self.pixmap.height() - o2.y()))
# The next line tracks the new position of the cursor
# relative to the shape, but also results in making it
# a bit "shaky" when nearing the border and allows it to
# go outside of the shape's area for some reason. XXX
#self.calculateOffsets(self.selectedShape, pos)
dp = pos - self.prevPoint
if dp:
shape.moveBy(dp)
self.prevPoint = pos
shape.close()
return True
return False
def deSelectShape(self):
if self.selectedShape:
self.selectedShape.selected = False
self.selectedShape = None
self.setHiding(False)
self.selectionChanged.emit(False)
self.update()
def deleteSelected(self):
if self.selectedShape:
shape = self.selectedShape
self.shapes.remove(self.selectedShape)
self.selectedShape = None
self.update()
return shape
def copySelectedShape(self):
if self.selectedShape:
shape = self.selectedShape.copy()
self.deSelectShape()
self.shapes.append(shape)
shape.selected = True
self.selectedShape = shape
self.boundedShiftShape(shape)
return shape
def boundedShiftShape(self, shape):
# Try to move in one direction, and if it fails in another.
# Give up if both fail.
point = shape[0]
offset = QPointF(2.0, 2.0)
self.calculateOffsets(shape, point)
self.prevPoint = point
if not self.boundedMoveShape(shape, point - offset):
self.boundedMoveShape(shape, point + offset)
def paintEvent(self, event):
if not self.pixmap:
return super(Canvas, self).paintEvent(event)
p = self._painter
#ur = event.rect()
#tmppix = QPixmap(ur.size())
#p = QPainter(tmppix)
#p.translate(-ur.x(), -ur.y())
##p.begin(self)
p.begin(self)
#p.setRenderHint(QPainter.Antialiasing)
p.setRenderHint(QPainter.HighQualityAntialiasing)
if self.scale < 1.0:
p.setRenderHint(QPainter.SmoothPixmapTransform)
p.scale(self.scale, self.scale)
p.translate(self.offsetToCenter())
p.drawPixmap(0, 0, self.pixmap)
Shape.scale = self.scale
for shape in self.shapes:
if (shape.selected or not self._hideBackround) and self.isVisible(shape):
if (shape.isRotated and not self.hideRotated) or (not shape.isRotated and not self.hideNormal):
shape.fill = shape.selected or shape == self.hShape
shape.paint(p)
elif self.showCenter:
shape.fill = shape.selected or shape == self.hShape
shape.paintNormalCenter(p) #shape.paint(p)
if self.current:
self.current.paint(p)
self.line.paint(p)
if self.selectedShapeCopy:
self.selectedShapeCopy.paint(p)
# Paint rect
if self.current is not None and len(self.line) == 2:
leftTop = self.line[0]
rightBottom = self.line[1]
rectWidth = rightBottom.x() - leftTop.x()
rectHeight = rightBottom.y() - leftTop.y()
p.setPen(self.drawingRectColor)
brush = QBrush(Qt.BDiagPattern)
p.setBrush(brush)
p.drawRect(leftTop.x(), leftTop.y(), rectWidth, rectHeight)
if (self.drawing() or self.continueDrawing()) and not self.prevPoint.isNull() and not self.outOfPixmap(self.prevPoint):
oldmode = p.compositionMode()
p.setCompositionMode(QPainter.RasterOp_SourceXorDestination)
p.setPen(QPen(QColor(255,255,255), 1/self.scale)) # TODO : limit pen width
p.drawLine(self.prevPoint.x(), 0, self.prevPoint.x(), self.pixmap.height())
p.drawLine(0, self.prevPoint.y(), self.pixmap.width(), self.prevPoint.y())
p.setCompositionMode(oldmode)
self.setAutoFillBackground(True)
if self.verified:
pal = self.palette()
pal.setColor(self.backgroundRole(), QColor(184, 239, 38, 128))
self.setPalette(pal)
else:
pal = self.palette()
pal.setColor(self.backgroundRole(), QColor(232, 232, 232, 255))
self.setPalette(pal)
#p.translate(-self.offsetToCenter())
#p.scale(1/self.scale, 1/self.scale)
#if self.localScalePixmap is not None:
# p0 = QPoint(0, 0)
# p1 = self.mapFromParent(p0)
# if p1.x() > 0:
# p0.setX(p1.x())
# if p1.y() > 0:
# p0.setY(p1.y())
# p.drawPixmap(p0.x(), p0.y(), self.localScalePixmap)
p.end()
#pp = self._painter
#pp.begin(self)
#pp.drawPixmap(0,0,tmppix)
#pp.end()
def transformPos(self, point):
"""Convert from widget-logical coordinates to painter-logical coordinates."""
return point / self.scale - self.offsetToCenter()
def offsetToCenter(self):
s = self.scale
area = super(Canvas, self).size()
w, h = self.pixmap.width() * s, self.pixmap.height() * s
aw, ah = area.width(), area.height()
x = (aw - w) / (2 * s) if aw > w else 0
y = (ah - h) / (2 * s) if ah > h else 0
return QPointF(x, y)
def outOfPixmap(self, p):
w, h = self.pixmap.width(), self.pixmap.height()
return not (0 <= p.x() <= w and 0 <= p.y() <= h)
def finalise(self, continous=False):
if self.current is None:
return
if self.current.points[0] == self.current.points[-1]:
self.current = None
self.drawingPolygon.emit(False)
self.update()
return
self.current.isRotated = self.canDrawRotatedRect
self.current.close()
self.shapes.append(self.current)
self.current = None
self.setHiding(False)
self.newShape.emit(continous) # TODO:
self.update()
def closeEnough(self, p1, p2):
#d = distance(p1 - p2)
#m = (p1-p2).manhattanLength()
# print "d %.2f, m %d, %.2f" % (d, m, d - m)
return distance(p1 - p2) < self.epsilon
def intersectionPoint(self, p1, p2):
# Cycle through each image edge in clockwise fashion,
# and find the one intersecting the current line segment.
# http://paulbourke.net/geometry/lineline2d/
size = self.pixmap.size()
points = [(0, 0),
(size.width(), 0),
(size.width(), size.height()),
(0, size.height())]
x1, y1 = p1.x(), p1.y()
x2, y2 = p2.x(), p2.y()
d, i, (x, y) = min(self.intersectingEdges((x1, y1), (x2, y2), points))
x3, y3 = points[i]
x4, y4 = points[(i + 1) % 4]
if (x, y) == (x1, y1):
# Handle cases where previous point is on one of the edges.
if x3 == x4:
return QPointF(x3, min(max(0, y2), max(y3, y4)))
else: # y3 == y4
return QPointF(min(max(0, x2), max(x3, x4)), y3)
return QPointF(x, y)
def intersectingEdges(self, x1y1, x2y2, points):
"""For each edge formed by `points', yield the intersection
with the line segment `(x1,y1) - (x2,y2)`, if it exists.
Also return the distance of `(x2,y2)' to the middle of the
edge along with its index, so that the one closest can be chosen."""
x1, y1 = x1y1
x2, y2 = x2y2
for i in range(4):
x3, y3 = points[i]
x4, y4 = points[(i + 1) % 4]
denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1)
nua = (x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3)
nub = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3)
if denom == 0:
# This covers two cases:
# nua == nub == 0: Coincident
# otherwise: Parallel
continue
ua, ub = nua / denom, nub / denom
if 0 <= ua <= 1 and 0 <= ub <= 1:
x = x1 + ua * (x2 - x1)
y = y1 + ua * (y2 - y1)
m = QPointF((x3 + x4) / 2, (y3 + y4) / 2)
d = distance(m - QPointF(x2, y2))
yield d, i, (x, y)
# These two, along with a call to adjustSize are required for the
# scroll area.
def sizeHint(self):
return self.minimumSizeHint()
def minimumSizeHint(self):
if self.pixmap:
return self.scale * self.pixmap.size()
return super(Canvas, self).minimumSizeHint()
def wheelEvent(self, ev):
qt_version = 4 if hasattr(ev, "delta") else 5
if qt_version == 4:
if ev.orientation() == Qt.Vertical:
v_delta = ev.delta()
h_delta = 0
else:
h_delta = ev.delta()
v_delta = 0
else:
delta = ev.angleDelta()
h_delta = delta.x()
v_delta = delta.y()
mods = ev.modifiers()
if Qt.ControlModifier == int(mods) and v_delta:
self.zoomRequest.emit(v_delta)
else:
v_delta and self.scrollRequest.emit(v_delta, Qt.Vertical)
h_delta and self.scrollRequest.emit(h_delta, Qt.Horizontal)
ev.accept()
def keyPressEvent(self, ev):
key = ev.key()
if key == Qt.Key_Escape and self.current:
self.current = None
self.drawingPolygon.emit(False)
self.update()
elif key == Qt.Key_Escape and self.current is None:
if self.drawing() or self.continueDrawing():
self.cancelDraw.emit()
self.finalise()
elif key == Qt.Key_Return or key == Qt.Key_Enter:
if self.canCloseShape():
self.finalise()
if self.selectedShape:
self.toggleEdit.emit(True)
else:
if len(self.shapes) > 0:
self.selectShape(self.shapes[0])
elif key == Qt.Key_Left and self.selectedShape:
self.moveOnePixel('Left')
elif key == Qt.Key_Right and self.selectedShape:
self.moveOnePixel('Right')
elif key == Qt.Key_Up and self.selectedShape:
self.moveOnePixel('Up')
elif key == Qt.Key_Down and self.selectedShape:
self.moveOnePixel('Down')
elif key == Qt.Key_Z and self.selectedShape and\
self.selectedShape.isRotated and not self.rotateOutOfBound(0.1):
self.selectedShape.rotate(0.1)
self.shapeMoved.emit()
self.update()
elif key == Qt.Key_X and self.selectedShape and\
self.selectedShape.isRotated and not self.rotateOutOfBound(0.01):
self.selectedShape.rotate(0.01)
self.shapeMoved.emit()
self.update()
elif key == Qt.Key_C and self.selectedShape and\
self.selectedShape.isRotated and not self.rotateOutOfBound(-0.01):
self.selectedShape.rotate(-0.01)
self.shapeMoved.emit()
self.update()
elif key == Qt.Key_V and self.selectedShape and\
self.selectedShape.isRotated and not self.rotateOutOfBound(-0.1):
self.selectedShape.rotate(-0.1)
self.shapeMoved.emit()
self.update()
elif key == Qt.Key_R:
self.hideRotated = not self.hideRotated
self.hideRRect.emit(self.hideRotated)
self.update()
elif key == Qt.Key_N:
self.hideNormal = not self.hideNormal
self.hideNRect.emit(self.hideNormal)
self.update()
elif key == Qt.Key_O:
self.canOutOfBounding = not self.canOutOfBounding
elif key == Qt.Key_B:
self.showCenter = not self.showCenter
self.update()
def rotateOutOfBound(self, angle):
if self.canOutOfBounding:
return False
for i, p in enumerate(self.selectedShape.points):
if self.outOfPixmap(self.selectedShape.rotatePoint(p,angle)):
return True
return False
def moveOnePixel(self, direction):
# print(self.selectedShape.points)
if direction == 'Left' and not self.moveOutOfBound(QPointF(-1.0, 0)):
# print("move Left one pixel")
self.selectedShape.points[0] += QPointF(-1.0, 0)
self.selectedShape.points[1] += QPointF(-1.0, 0)
self.selectedShape.points[2] += QPointF(-1.0, 0)
self.selectedShape.points[3] += QPointF(-1.0, 0)
self.selectedShape.center += QPointF(-1.0, 0)
elif direction == 'Right' and not self.moveOutOfBound(QPointF(1.0, 0)):
# print("move Right one pixel")
self.selectedShape.points[0] += QPointF(1.0, 0)
self.selectedShape.points[1] += QPointF(1.0, 0)
self.selectedShape.points[2] += QPointF(1.0, 0)
self.selectedShape.points[3] += QPointF(1.0, 0)
self.selectedShape.center += QPointF(1.0, 0)
elif direction == 'Up' and not self.moveOutOfBound(QPointF(0, -1.0)):
# print("move Up one pixel")
self.selectedShape.points[0] += QPointF(0, -1.0)
self.selectedShape.points[1] += QPointF(0, -1.0)
self.selectedShape.points[2] += QPointF(0, -1.0)
self.selectedShape.points[3] += QPointF(0, -1.0)
self.selectedShape.center += QPointF(0, -1.0)
elif direction == 'Down' and not self.moveOutOfBound(QPointF(0, 1.0)):
# print("move Down one pixel")
self.selectedShape.points[0] += QPointF(0, 1.0)
self.selectedShape.points[1] += QPointF(0, 1.0)
self.selectedShape.points[2] += QPointF(0, 1.0)
self.selectedShape.points[3] += QPointF(0, 1.0)
self.selectedShape.center += QPointF(0, 1.0)
self.shapeMoved.emit()
self.repaint()
def moveOutOfBound(self, step):
points = [p1+p2 for p1, p2 in zip(self.selectedShape.points, [step]*4)]
return True in map(self.outOfPixmap, points)
def setLastLabel(self, text, line_color = None, fill_color = None, extra_text=''):
assert text
self.shapes[-1].label = text
self.shapes[-1].extra_label = extra_text
if line_color:
self.shapes[-1].line_color = line_color
if fill_color:
self.shapes[-1].fill_color = fill_color
return self.shapes[-1]
def undoLastLine(self):
assert self.shapes
self.current = self.shapes.pop()
self.current.setOpen()
self.line.points = [self.current[-1], self.current[0]]
self.drawingPolygon.emit(True)
def resetAllLines(self):
assert self.shapes
self.current = self.shapes.pop()
self.current.setOpen()
self.line.points = [self.current[-1], self.current[0]]
self.drawingPolygon.emit(True)
self.current = None
self.drawingPolygon.emit(False)
self.update()
def loadPixmap(self, pixmap):
self.pixmap = pixmap
self.shapes = []
self.repaint()
def loadShapes(self, shapes):
self.shapes = list(shapes)
self.current = None
self.repaint()
def setShapeVisible(self, shape, value):
self.visible[shape] = value
self.repaint()
def currentCursor(self):
cursor = QApplication.overrideCursor()
if cursor is not None:
cursor = cursor.shape()
return cursor
def overrideCursor(self, cursor):
self._cursor = cursor
if self.currentCursor() is None:
QApplication.setOverrideCursor(cursor)
else:
QApplication.changeOverrideCursor(cursor)
def restoreCursor(self):
QApplication.restoreOverrideCursor()
def resetState(self):
self.restoreCursor()
self.pixmap = None
#self.localScalePixmap = None
self.update()
| 36,751 | 35.460317 | 153 |
py
|
labelImg2
|
labelImg2-master/libs/ustr.py
|
import sys
def ustr(x):
'''py2/py3 unicode helper'''
if sys.version_info < (3, 0, 0):
from PyQt4.QtCore import QString
if type(x) == str:
return x.decode('utf-8')
if type(x) == QString:
return unicode(x)
return x
else:
return x # py3
| 313 | 19.933333 | 40 |
py
|
labelImg2
|
labelImg2-master/libs/constants.py
|
SETTING_FILENAME = 'filename'
SETTING_RECENT_FILES = 'recentFiles'
SETTING_WIN_SIZE = 'window/size'
SETTING_WIN_POSE = 'window/position'
SETTING_WIN_GEOMETRY = 'window/geometry'
SETTING_LINE_COLOR = 'line/color'
SETTING_FILL_COLOR = 'fill/color'
SETTING_ADVANCE_MODE = 'advanced'
SETTING_WIN_STATE = 'window/state'
SETTING_SAVE_DIR = 'savedir'
SETTING_PAINT_LABEL = 'paintlabel'
SETTING_LAST_OPEN_DIR = 'lastOpenDir'
SETTING_AUTO_SAVE = 'autosave'
SETTING_DRAW_CORNER = 'drawcorner'
SETTING_SINGLE_CLASS = 'singleclass'
FORMAT_PASCALVOC='PscalVOC'
FORMAT_YOLO='YOLO'
| 566 | 32.352941 | 40 |
py
|
labelImg2
|
labelImg2-master/libs/labelView.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from .ustr import ustr
class HashableQStandardItem(QStandardItem):
def __init__(self, text):
super(HashableQStandardItem, self).__init__(text)
def __hash__(self):
return hash(id(self))
class CComboBoxDelegate(QStyledItemDelegate):
def __init__(self, parent, listItem):
super(CComboBoxDelegate, self).__init__(parent)
self.listItem = listItem
def updateListItem(self, listItem):
self.listItem = listItem
def createEditor(self, parent, option, index):
editor = QComboBox(parent)
for i in self.listItem:
editor.addItem(i)
editor.currentIndexChanged.connect(self.editorIndexChanged)
editor.setCurrentIndex(0)
return editor
# commit data early, prevent to loss data when clicking OpenNextImg
def editorIndexChanged(self, index):
combox = self.sender()
self.commitData.emit(combox)
def setEditorData(self, editor, index):
text = index.model().data(index, Qt.EditRole)
if sys.version_info < (3, 0, 0):
text = text.toPyObject()
combox = editor
tindex = combox.findText(ustr(text))
combox.setCurrentIndex(tindex)
def setModelData(self, editor, model, index):
comboBox = editor
strData = comboBox.currentText()
oldstrData = index.model().data(index, Qt.EditRole)
if strData != oldstrData:
model.setData(index, strData, Qt.EditRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
class CEditDelegate(QStyledItemDelegate):
def __init__(self, parent):
super(CEditDelegate, self).__init__(parent)
self.editor = None
self.parent = parent
def createEditor(self, parent, option, index):
self.editor = QLineEdit(parent)
self.editor.textEdited.connect(self.textEdited)
return self.editor
def textEdited(self, str):
self.parent.extraChanged(str)
def setEditorData(self, editor, index):
return super(CEditDelegate, self).setEditorData(editor, index)
def destroyEditor(self, editor, index):
self.parent.extraChanged(index.data())
ret = super(CEditDelegate, self).destroyEditor(editor, index)
self.editor = None
return ret
def earlyCommit(self, index):
if self.editor is not None:
self.commitData.emit(self.editor)
self.destroyEditor(self.editor, index)
class CHeaderView(QHeaderView):
clicked = pyqtSignal(int, bool)
_x_offset = 3
_y_offset = 0 # This value is calculated later, based on the height of the paint rect
_width = 20
_height = 20
def __init__(self, orientation, parent=None):
super(CHeaderView, self).__init__(orientation, parent)
self.setFixedWidth(40)
self.isChecked = []
def rowsInserted(self, parent, start, end):
self.isChecked.insert(start, 1)
return super(CHeaderView, self).rowsInserted(parent, start, end)
def rowsAboutToBeRemoved(self, parent, start, end):
del self.isChecked[start]
return super(CHeaderView, self).rowsAboutToBeRemoved(parent, start, end)
def paintSection(self, painter, rect, logicalIndex):
self._y_offset = int((rect.height()-self._width)/2.)
option = QStyleOptionButton()
option.state = QStyle.State_Enabled | QStyle.State_Active
option.rect = QRect(rect.x() + self._x_offset, rect.y() + self._y_offset, self._width, self._height)
if self.isChecked[logicalIndex]:
option.state |= QStyle.State_On
else:
option.state |= QStyle.State_Off
self.style().drawPrimitive(QStyle.PE_IndicatorCheckBox, option, painter)
#self.style().drawControl(QStyle.CE_CheckBox, option, painter)
def mouseReleaseEvent(self, e):
index = self.logicalIndexAt(e.pos())
if 0 <= index < self.count():
# vertical orientation
y = self.sectionViewportPosition(index)
if self._x_offset < e.pos().x() < self._x_offset + self._width \
and y + self._y_offset < e.pos().y() < y + self._y_offset + self._height:
if self.isChecked[index] == 1:
self.isChecked[index] = 0
else:
self.isChecked[index] = 1
self.clicked.emit(index, self.isChecked[index])
self.viewport().update()
else:
super(CHeaderView, self).mousePressEvent(e)
else:
super(CHeaderView, self).mousePressEvent(e)
class CLabelView(QTableView):
extraEditing = pyqtSignal(QModelIndex, str)
toggleEdit = pyqtSignal(bool)
def __init__(self, labelHist, parent = None):
super(CLabelView, self).__init__(parent)
header = CHeaderView(Qt.Vertical, self)
self.setVerticalHeader(header)
self.label_delegate = CComboBoxDelegate(self, labelHist)
self.setItemDelegateForColumn(0, self.label_delegate)
self.extra_delegate = CEditDelegate(self)
self.setItemDelegateForColumn(1, self.extra_delegate)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setStyleSheet("selection-background-color: rgb(0,90,140)")
model = QStandardItemModel(self)
model.setColumnCount(2)
model.setHorizontalHeaderLabels(["Label", "Extra Info"])
self.setModel(model)
self.sm = self.selectionModel()
def extraChanged(self, str):
self.extraEditing.emit(self.sm.currentIndex(), str)
def earlyCommit(self):
# TODO: verify currentIndex
extra_index = self.model().index(self.sm.currentIndex().row(), 1)
self.extra_delegate.earlyCommit(extra_index)
def updateLabelList(self, labelHist):
self.label_delegate.updateListItem(labelHist)
def keyPressEvent(self, e):
key = e.key()
if key == Qt.Key_Return or key == Qt.Key_Enter:
if self.extra_delegate.editor is None:
self.toggleEdit.emit(True)
return super(QTableView, self).keyPressEvent(e)
| 6,485 | 33.5 | 108 |
py
|
labelImg2
|
labelImg2-master/libs/lib.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from math import sqrt
from .ustr import ustr
import hashlib
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
def newIcon(icon):
return QIcon('img/' + icon)
def newButton(text, icon=None, slot=None):
b = QPushButton(text)
if icon is not None:
b.setIcon(newIcon(icon))
if slot is not None:
b.clicked.connect(slot)
return b
def newAction(parent, text, slot=None, shortcut=None, icon=None,
tip=None, checkable=False, enabled=True):
"""Create a new action and assign callbacks, shortcuts, etc."""
a = QAction(text, parent)
if icon is not None:
a.setIcon(newIcon(icon))
if shortcut is not None:
if isinstance(shortcut, (list, tuple)):
a.setShortcuts(shortcut)
else:
a.setShortcut(shortcut)
if tip is not None:
a.setToolTip(tip)
a.setStatusTip(tip)
if slot is not None:
a.triggered.connect(slot)
if checkable:
a.setCheckable(True)
a.setEnabled(enabled)
return a
def addActions(widget, actions):
for action in actions:
if action is None:
widget.addSeparator()
elif isinstance(action, QMenu):
widget.addMenu(action)
else:
widget.addAction(action)
def labelValidator():
return QRegExpValidator(QRegExp(r'^[^ \t].+'), None)
class struct(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def distance(p):
return sqrt(p.x() * p.x() + p.y() * p.y())
def fmtShortcut(text):
mod, key = text.split('+', 1)
return '<b>%s</b>+<b>%s</b>' % (mod, key)
def generateColorByText(text):
utext = ustr(text)
s = utext #str(utext)
hashCode = int(hashlib.sha256(s.encode('utf-8')).hexdigest(), 16)
r = int((hashCode / 255) % 255)
g = int((hashCode / 65025) % 255)
b = int((hashCode / 16581375) % 255)
return QColor(r, g, b, 200)
| 2,121 | 23.113636 | 69 |
py
|
labelImg2
|
labelImg2-master/libs/labelFile.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
try:
from PyQt5.QtGui import QImage
from PyQt5.QtGui import QImageReader
except ImportError:
from PyQt4.QtGui import QImage
from PyQt4.QtGui import QImageReader
from base64 import b64encode, b64decode
from .pascal_voc_io import PascalVocWriter
from .pascal_voc_io import XML_EXT
import os
import sys
import math
class LabelFileError(Exception):
pass
class LabelFile(object):
# It might be changed as window creates. By default, using XML ext
# suffix = '.lif'
suffix = XML_EXT
def __init__(self, filename=None):
self.shapes = ()
self.imagePath = None
self.imageData = None
self.verified = False
def savePascalVocFormat(self, filename, shapes, imagePath, imageData,
lineColor=None, fillColor=None, databaseSrc=None):
imgFolderPath = os.path.dirname(imagePath)
imgFolderName = os.path.split(imgFolderPath)[-1]
imgFileName = os.path.basename(imagePath)
#imgFileNameWithoutExt = os.path.splitext(imgFileName)[0]
# Read from file path because self.imageData might be empty if saving to
# Pascal format
reader0 = QImageReader(imagePath)
reader0.setAutoTransform(True)
image = reader0.read()
imageShape = [image.height(), image.width(),
1 if image.isGrayscale() else 3]
writer = PascalVocWriter(imgFolderName, imgFileName,
imageShape, localImgPath=imagePath)
writer.verified = self.verified
for shape in shapes:
points = shape['points']
label = shape['label']
# Add Chris
difficult = int(shape['difficult'])
direction = shape['direction']
isRotated = shape['isRotated']
extra_text = shape['extra_text']
if not isRotated:
bndbox = LabelFile.convertPoints2BndBox(points)
writer.addBndBox(bndbox[0], bndbox[1], bndbox[2],
bndbox[3], label, difficult, extra_text)
else: #if shape is rotated box, save as rotated bounding box
robndbox = LabelFile.convertPoints2RotatedBndBox(shape)
writer.addRotatedBndBox(robndbox[0],robndbox[1],
robndbox[2],robndbox[3],robndbox[4],label,difficult, extra_text)
writer.save(targetFile=filename)
return
def toggleVerify(self):
self.verified = not self.verified
''' ttf is disable
def load(self, filename):
import json
with open(filename, 'rb') as f:
data = json.load(f)
imagePath = data['imagePath']
imageData = b64decode(data['imageData'])
lineColor = data['lineColor']
fillColor = data['fillColor']
shapes = ((s['label'], s['points'], s['line_color'], s['fill_color'])\
for s in data['shapes'])
# Only replace data after everything is loaded.
self.shapes = shapes
self.imagePath = imagePath
self.imageData = imageData
self.lineColor = lineColor
self.fillColor = fillColor
def save(self, filename, shapes, imagePath, imageData, lineColor=None, fillColor=None):
import json
with open(filename, 'wb') as f:
json.dump(dict(
shapes=shapes,
lineColor=lineColor, fillColor=fillColor,
imagePath=imagePath,
imageData=b64encode(imageData)),
f, ensure_ascii=True, indent=2)
'''
@staticmethod
def isLabelFile(filename):
fileSuffix = os.path.splitext(filename)[1].lower()
return fileSuffix == LabelFile.suffix
@staticmethod
def convertPoints2BndBox(points):
xmin = float('inf')
ymin = float('inf')
xmax = float('-inf')
ymax = float('-inf')
for p in points:
x = p[0]
y = p[1]
xmin = min(x, xmin)
ymin = min(y, ymin)
xmax = max(x, xmax)
ymax = max(y, ymax)
# Martin Kersner, 2015/11/12
# 0-valued coordinates of BB caused an error while
# training faster-rcnn object detector.
if xmin < 1:
xmin = 1
if ymin < 1:
ymin = 1
return (int(xmin), int(ymin), int(xmax), int(ymax))
@staticmethod
def convertPoints2RotatedBndBox(shape):
points = shape['points']
center = shape['center']
direction = shape['direction']
cx = center.x()
cy = center.y()
w = math.sqrt((points[0][0]-points[1][0]) ** 2 +
(points[0][1]-points[1][1]) ** 2)
h = math.sqrt((points[2][0]-points[1][0]) ** 2 +
(points[2][1]-points[1][1]) ** 2)
angle = direction % (2*math.pi)
return (round(cx,4),round(cy,4),round(w,4),round(h,4),round(angle,6))
| 5,130 | 32.318182 | 91 |
py
|
labelImg2
|
labelImg2-master/libs/naturalsort.py
|
# Simple natural order sorting API for Python.
#
# Author: Peter Odding <[email protected]>
# Last Change: November 2, 2015
# URL: https://github.com/xolox/python-naturalsort
"""Simple natural order sorting API for Python."""
# Standard library modules.
import re
__version__ = '1.5.1'
"""Semi-standard module versioning."""
integer_pattern = re.compile('([0-9]+)')
"""Compiled regular expression to match a consecutive run of digits."""
integer_type = int
"""The type used to coerce strings of digits into Python numbers."""
def natsort(l, key=None, reverse=False):
"""
Sort the given list in the way that humans expect (using natural order sorting).
:param l: An iterable of strings to sort.
:param key: An optional sort key similar to the one accepted by Python's
built in :func:`sorted()` function. Expected to produce
strings.
:param reverse: Whether to reverse the resulting sorted list.
:returns: A sorted list of strings.
"""
return sorted(l, key=lambda v: NaturalOrderKey(key and key(v) or v), reverse=reverse)
def natsort_key(s):
"""
Turn a string into a list of substrings and numbers.
:param s: The string to split.
:returns: A list of strings and/or integers.
"""
return [coerce(c) for c in integer_pattern.split(s) if c != '']
def coerce(s):
"""
Coerce strings of digits into proper integers.
:param s: A string.
:returns: An integer (if coercion is possible) or the original string.
"""
if s.isdigit():
return integer_type(s)
else:
return s
class NaturalOrderKey(object):
"""
Rich comparison for natural order sorting keys.
This class implements rich comparison operators for natural order sorting
that is compatible with both Python 2 and Python 3.
Previous versions of the `naturalsort` package directly compared the
iterables produced by :func:`natsort_key()` however in Python 3 this can
raise :exc:`~exceptions.TypeError` due to comparisons between integers and
strings (which Python 3 does not allow).
"""
def __init__(self, value):
"""
Initialize a :class:`NaturalOrderKey` object.
:param value: A string given to :func:`natsort_key()` to get the
natural order sorting key used in the rich comparison
methods implemented by this class.
"""
self.key = natsort_key(value)
self.length = len(self.key)
def __eq__(self, other):
"""Equality comparison for natural order sorting keys."""
if self.is_compatible(other):
return self.key == other.key
else:
return NotImplemented
def __ne__(self, other):
"""Non equality comparison for natural order sorting keys."""
if self.is_compatible(other):
return self.key != other.key
else:
return NotImplemented
def __lt__(self, other):
"""Less than comparison for natural order sorting keys."""
if self.is_compatible(other):
for i in range(max(self.length, other.length)):
if self.length > i:
self_item = self.key[i]
else:
self_item = None
if other.length > i:
other_item = other.key[i]
else:
other_item = None
# If the natural order keys are not of equal length one of the
# items may be unavailable (None) so we have to compensate:
#
# - If the available item is a number then the unavailable
# item is treated as the number zero. This implements zero
# padding semantics which ensures that e.g. 0.15 sorts
# before 0.15.1.
#
# - If the available item is not a number then the two items
# are treated as being equal, otherwise the second dot in
# '0.15.1' (to continue the example from above) would sort
# before the zero padding in the tokenized version of '0.15'
# which would then be [0, '.', 15, 0, 0].
if self_item is None:
if isinstance(other_item, integer_type):
self_item = 0
else:
self_item = other_item
if other_item is None:
if isinstance(self_item, integer_type):
other_item = 0
else:
other_item = self_item
if self_item != other_item:
if not isinstance(self_item, integer_type) or not isinstance(other_item, integer_type):
# Comparisons between two integers are safe but
# otherwise we fall back to a string comparison
# to avoid type errors raised by Python 3.
self_item = str(self_item)
other_item = str(other_item)
if self_item < other_item:
return True
if self_item > other_item:
return False
return False
else:
return NotImplemented
def __le__(self, other):
"""Less than or equal comparison for natural order sorting keys."""
if self.is_compatible(other):
return self < other or self == other
else:
return NotImplemented
def __gt__(self, other):
"""Greater than comparison for natural order sorting keys."""
if self.is_compatible(other):
return not (self <= other)
else:
return NotImplemented
def __ge__(self, other):
"""Greater than or equal comparison for natural order sorting keys."""
if self.is_compatible(other):
return self > other or self == other
else:
return NotImplemented
def is_compatible(self, obj):
"""
Check if the given object has a compatible type.
:param obj: The object to check.
:returns: :data:`True` if the given object is an instance of
:class:`NaturalOrderKey`, :data:`False` otherwise.
"""
return isinstance(obj, self.__class__)
| 6,441 | 35.191011 | 107 |
py
|
labelImg2
|
labelImg2-master/libs/labelDialog.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from .lib import newIcon, labelValidator
BB = QDialogButtonBox
class CMyListModel(QStringListModel):
def __init__(self, parent = None):
super(CMyListModel, self).__init__(parent)
self.rowColors = {}
def data(self, index, role):
if role == Qt.BackgroundRole:
if index.row() in self.rowColors:
return self.rowColors[index.row()]
return super(CMyListModel, self).data(index, role)
def setData(self, index, value, role = None):
if role == Qt.BackgroundRole:
self.rowColors[index.row()] = value
return True
return super(CMyListModel, self).setData(index, value, role)
def flags(self, index):
flags = super(CMyListModel, self).flags(index)
flags ^= Qt.ItemIsEditable
return flags
class LabelDialog(QDialog):
def __init__(self, text="Enter object label", parent=None, listItem=None):
super(LabelDialog, self).__init__(parent)
self.edit = QLineEdit()
self.edit.setText(text)
self.edit.setValidator(labelValidator())
self.edit.editingFinished.connect(self.postProcess)
self.layout = QVBoxLayout()
self.layout.addWidget(self.edit)
self.buttonBox = bb = BB(BB.Ok | BB.Cancel, Qt.Horizontal, self)
bb.button(BB.Ok).setIcon(newIcon('done'))
bb.button(BB.Cancel).setIcon(newIcon('undo'))
bb.accepted.connect(self.validate)
bb.rejected.connect(self.reject)
self.horlayout = QHBoxLayout()
self.setDefaultBtn = QPushButton("set as default")
self.setDefaultBtn.clicked.connect(self.defaultLabel)
self.addBtn = QPushButton("add")
self.addBtn.clicked.connect(self.addLabel)
self.horlayout.addWidget(self.addBtn)
self.horlayout.addWidget(self.setDefaultBtn)
self.listView = QListView(self)
self.model = CMyListModel(self.listView)
self.model.setStringList(listItem)
self.listView.setModel(self.model)
self.sm = self.listView.selectionModel()
if listItem is not None:
self.default_label = listItem[0]
self.model.setData(self.model.index(0), QBrush(Qt.red), Qt.BackgroundRole)
else:
self.default_label = None
self.updateListItems(listItem)
self.layout.addWidget(self.listView)
self.layout.addLayout(self.horlayout)
self.layout.addWidget(bb)
self.setLayout(self.layout)
def updateListItems(self, listItem):
self.model.setStringList(listItem)
def addLabel(self):
if not self.edit.text() in self.model.stringList():
lastrow = self.model.rowCount()
self.model.insertRows(lastrow, 1)
self.model.setData(self.model.index(lastrow), self.edit.text(), Qt.EditRole)
self.listView.setCurrentIndex(self.model.index(lastrow))
def defaultLabel(self):
curr = self.sm.currentIndex()
sl = self.model.stringList()
if sys.version_info < (3, 0, 0):
j = sl.indexOf(self.default_label)
else:
j = sl.index(self.default_label)
self.model.setData(self.model.index(j), QBrush(Qt.transparent), Qt.BackgroundRole)
self.default_label = self.model.data(curr, Qt.EditRole)
if sys.version_info < (3, 0, 0):
self.default_label = self.default_label.toPyObject()
self.model.setData(self.model.index(curr.row()), QBrush(Qt.red), Qt.BackgroundRole)
def validate(self):
try:
if self.edit.text().trimmed():
self.accept()
except AttributeError:
# PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
if self.edit.text().strip():
self.accept()
def postProcess(self):
try:
self.edit.setText(self.edit.text().trimmed())
except AttributeError:
# PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
self.edit.setText(self.edit.text())
def popUp(self, move=True):
self.edit.setFocus(Qt.PopupFocusReason)
if move:
self.move(QCursor.pos())
if self.exec_():
return self.model.stringList(), self.default_label
else:
None
| 4,640 | 30.147651 | 91 |
py
|
labelImg2
|
labelImg2-master/libs/__init__.py
| 1 | 0 | 0 |
py
|
|
labelImg2
|
labelImg2-master/libs/fileView.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from .pascal_voc_io import PascalVocReader, XML_EXT
class CFileListModel(QStringListModel):
def __init__(self, parent = None):
super(CFileListModel, self).__init__(parent)
self.dispList = []
def parseOne(self, s, openedDir = None, defaultSaveDir = None):
if openedDir is not None and defaultSaveDir is not None:
relname = os.path.relpath(s, openedDir)
relname = os.path.splitext(relname)[0]
xmlPath = os.path.join(defaultSaveDir, relname + XML_EXT)
else:
xmlPath = os.path.splitext(s)[0] + XML_EXT
if os.path.exists(xmlPath) and os.path.isfile(xmlPath):
tVocParser = PascalVocReader(xmlPath)
shapes = tVocParser.getShapes()
info = [os.path.split(s)[1], len(shapes), False]
else:
info = [os.path.split(s)[1], None, False]
return info
def setStringList(self, strings, openedDir = None, defaultSaveDir = None):
self.dispList = []
for s in strings:
info = self.parseOne(s, openedDir, defaultSaveDir)
self.dispList.append(info)
return super(CFileListModel, self).setStringList(strings)
def data(self, index, role):
if role == Qt.DisplayRole:
return '%s [%d]' % (self.dispList[index.row()][0], 0 if self.dispList[index.row()][1] is None else self.dispList[index.row()][1])
elif role == Qt.ToolTipRole:
return super(CFileListModel, self).data(index, Qt.EditRole)
elif role == Qt.BackgroundRole:
item = self.dispList[index.row()]
if item[1] is None or item[1] == 0:
brush = QBrush(Qt.transparent)
else:
brush = QBrush(Qt.lightGray)
if item[2]:
brush = QBrush(Qt.green)
return brush
else:
return super(CFileListModel, self).data(index, role)
def setData(self, index, value, role = None):
if role == Qt.BackgroundRole:
info = self.dispList[index.row()]
info[1] = value
info[2] = True
self.dispList[index.row()] = info
return super(CFileListModel, self).setData(index, value, role)
class CFileItemEditDelegate(QStyledItemDelegate):
def __init__(self, parent):
super(CFileItemEditDelegate, self).__init__(parent)
def createEditor(self, parent, option, index):
editor = QLineEdit(parent)
editor.setReadOnly(True)
return editor
class CFileView(QListView):
def __init__(self, parent = None):
super(CFileView, self).__init__(parent)
model = CFileListModel(self)
self.setModel(model)
delegate = CFileItemEditDelegate(self)
self.setItemDelegateForColumn(0, delegate)
| 3,115 | 31.8 | 141 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/brusselator_pde_MLP.py
|
import os
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'
#
from jax import vmap, random, jit
from jax import numpy as np
import numpy as onp
from rpn_bo_utilities import uniform_prior
from rpn_bo_models import EnsembleRegression
from rpn_bo_dataloaders import BootstrapLoader
from rpn_bo_acquisitions import MCAcquisition
onp.random.seed(1234)
# Helper functions
normalize = vmap(lambda x, mu, std: (x-mu)/std, in_axes=(0,0,0))
denormalize = vmap(lambda x, mu, std: x*std + mu, in_axes=(0,0,0))
# vectorial input space dimension and its search space
dim = 4
lb = np.array([0.1, 0.1, 0.01, 0.01])
ub = np.array([5.0, 5.0, 5.0, 5.0])
p_x = uniform_prior(lb, ub)
bounds = (lb, ub)
# vectorial output space dimension
N_y = 64
dim_y = 2*N_y**2
# function mapping the vectorial input x to the vectorial output consisting of the solution to the 2D Brusselator PDE evaluated at N_yxN_y grid points
def f(x):
from pde import PDE, FieldCollection, ScalarField, UnitGrid
a = x[0]
b = x[1]
d0 = x[2]
d1 = x[3]
eq = PDE(
{
"u": f"{d0} * laplace(u) + {a} - ({b} + 1) * u + u**2 * v",
"v": f"{d1} * laplace(v) + {b} * u - u**2 * v",
}
)
# initialize state
grid = UnitGrid([N_y, N_y])
u = ScalarField(grid, a, label="Field $u$")
v = b / a + 0.1 * ScalarField.random_normal(grid, label="Field $v$", seed=10)
state = FieldCollection([u, v])
sol = eq.solve(state, t_range=20, dt=1e-3)
sol_tensor = []
sol_tensor.append(sol[0].data)
sol_tensor.append(sol[1].data)
sol_tensor = onp.array(sol_tensor)
ss = sol_tensor[onp.isnan(sol_tensor)]
sol_tensor[onp.isnan(sol_tensor)] = 1e5 * onp.random.randn(*ss.shape)
return sol_tensor.flatten()
#### General simulation params ####
N = 5
prev = 0 # previous independent random runs
nTrSet = 10-prev # total independent random runs to perform
#### RPN-BO hyperparameters ####
num_restarts_acq = 500
nIter = 20
q1 = 2
nIter_q1 = nIter//q1
ensemble_size = 128
batch_size = N
fraction = 0.8
layers = [dim, 64, 64, dim_y]
nIter_RPN = 10000
options = {'criterion': 'EI', # LCB EI
'kappa': 2.0,
'weights': None} # exact gmm None
train_key = random.PRNGKey(0)
case = 'results/brusselator_pde_MLP'
# prediction function mapping vectorial output to scalar obective value
def output(y):
y = y.reshape((2,N_y,N_y))
weighting = onp.ones((2,N_y,N_y))/10
weighting[:, [0, 1, -2, -1], :] = 1.0
weighting[:, :, [0, 1, -2, -1]] = 1.0
weighted_samples = weighting * y
return np.var(weighted_samples)
for j in range(nTrSet):
print('Train Set:',j+1)
# Initial training data
X = np.load(case+'/X_'+str(j+prev)+'.npy')
y = np.load(case+'/y_'+str(j+prev)+'.npy')
X_loc = X
y_loc = y
batch_size_loc = batch_size
# list to contain BO results
opt = []
yo_loc = vmap(output)(y_loc)
opt.append( np.min(yo_loc) )
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X_loc.shape[0], y_loc.shape[0], opt[0], opt[-1]))
for it in range(nIter_q1):
# Create data set
train_key = random.split(train_key, 2)[0]
dataset = BootstrapLoader(X_loc, y_loc, batch_size_loc, ensemble_size, fraction, 1, rng_key=train_key)
(mu_X, sigma_X), (mu_y, sigma_y) = dataset.norm_const
# Initialize model
train_key = random.split(train_key, 2)[0]
model = EnsembleRegression(layers, ensemble_size, train_key)
# Train model
model.train(dataset, nIter=nIter_RPN)
@jit
def predict(x):
# accepts and returns un-normalized data
x = np.tile(x[np.newaxis,:,:], (ensemble_size, 1, 1))
x = normalize(x, mu_X, sigma_X)
params = vmap(model.get_params)(model.opt_state)
params_prior = vmap(model.get_params)(model.prior_opt_state)
opt_params = (params, params_prior)
samples = model.posterior(opt_params, x)
samples = denormalize(samples, mu_y, sigma_y)
samples = samples.reshape((samples.shape[0],samples.shape[1],2,N_y,N_y))
weighting = onp.ones((2,N_y,N_y))/10
weighting[:, [0, 1, -2, -1], :] = 1.0
weighting[:, :, [0, 1, -2, -1]] = 1.0
weighted_samples = weighting * samples
return np.var(weighted_samples, axis=(-3,-2,-1))[:,:,None]
# Fit GMM if needed for weighted acquisition functions
weights_fn = lambda x: np.ones(x.shape[0],)
kappa = options['kappa']
args = (kappa,)
acq_model = MCAcquisition(predict,
bounds,
*args,
acq_fn = options['criterion'],
output_weights=weights_fn)
# Optimize acquisition with L-BFGS to inquire new point(s)
new_X = acq_model.next_best_point(q = q1, num_restarts = num_restarts_acq)
new_X = new_X.reshape(q1,dim)
# Obtain the new data
new_y = []
for i in range(new_X.shape[0]):
new_y.append(f(new_X[i,:]))
new_y = onp.array(new_y)
# Augment training data
X_loc = np.concatenate([X_loc, new_X], axis = 0) # augment the vectorial input dataset during the BO process
y_loc = np.concatenate([y_loc, new_y], axis = 0) # augment the vectorial output dataset during the BO process
yo_loc = vmap(output)(y_loc)
opt.append( np.min(yo_loc) ) # augment the objective values of the constructed dataset during the BO process
batch_size_loc += q1
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X_loc.shape[0], y_loc.shape[0], opt[0], opt[-1]))
np.save(case+'/opt_'+str(j+prev)+'.npy',onp.array(opt)) # save the constructed objective tensor by RPN-BO
| 6,056 | 32.65 | 150 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/environmental_model_function_DON.py
|
import os
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'
from jax import vmap, random, jit
from jax import numpy as np
import numpy as onp
from rpn_bo_utilities import uniform_prior
from rpn_bo_models import ParallelDeepOnet
from rpn_bo_dataloaders import DataGenerator_batch
from rpn_bo_acquisitions import MCAcquisition
onp.random.seed(1234)
# Helper functions
normalize = vmap(lambda x, mu, std: (x-mu)/std, in_axes=(0,0,0))
denormalize = vmap(lambda x, mu, std: x*std + mu, in_axes=(0,0,0))
# vectorial input space dimension and its search space
dim = 4
lb = np.array([7.0, 0.02, 0.01, 30.01])
ub = np.array([13.0, 0.12, 3.0, 30.295])
true_x = np.array([10.0, 0.07, 1.505, 30.1525])
p_x = uniform_prior(lb, ub)
bounds = (lb, ub)
# pollutant concentration function
def c(s,t,M,D,L,tau):
c1 = M/np.sqrt(4*np.pi*D*t)*np.exp(-s**2/4/D/t)
c2 = M/np.sqrt(4*np.pi*D*(t-tau))*np.exp(-(s-L)**2/4/D/(t-tau))
return np.where(t>tau, c1+c2, c1)
s1 = np.array([0.0, 1.0, 2.5])
t1 = np.array([15.0, 30.0, 45.0, 60.0])
ST = np.meshgrid(s1, t1)
STo = np.array(ST).T
# function mapping the vectorial input x to the vectorial output consisting of the concentration evaluation at 3x4 grid points
def f(x):
res = []
for i in range(STo.shape[0]):
resl = []
for j in range(STo.shape[1]):
resl.append( c(STo[i,j,0],STo[i,j,1],x[0],x[1],x[2],x[3]) )
res.append(np.array(resl))
return np.array(res)
#### General simulation params ####
N = 5
prev = 0 # previous independent random runs
nTrSet = 10-prev # total independent random runs to perform
#### DeepONet functional evaluation points ####
m = 4
P1 = 4
P2 = 3
Ms = 2.5
Mt = 60.0
soln_dim = 1
s1 = np.array([0.0, 1.0, 2.5])/Ms
t1 = np.array([15.0, 30.0, 45.0, 60.0])/Mt
Tm, Xm = np.meshgrid(t1, s1)
y_test_sample = np.hstack([Tm.flatten()[:,None], Xm.flatten()[:,None]])
#### RPN-BO hyperparameters ####
num_restarts_acq = 500
nIter = 30
q1 = 1
nIter_q1 = nIter//q1
N_ensemble = 128
fraction = 0.8
branch_layers = [m, 64, 64]
trunk_layers = [2, 64, 64]
nIter_RPN = 5000
options = {'criterion': 'TS', # LCB EI TS
'kappa': 2.0,
'weights': None} # exact gmm None
train_key = random.PRNGKey(0)
key_TS = random.PRNGKey(123)
case = 'results/environmental_model_function_DON'
true_y = f(true_x)
true_y = np.expand_dims(true_y, axis = 2)
for j in range(nTrSet):
print('Train Set:',j+1)
# Initial training data
X = np.load(case+'/X_'+str(j+prev)+'.npy')
y = np.load(case+'/y_'+str(j+prev)+'.npy')
X = np.array(X)
y = np.array(y)
y = y.reshape(y.shape[0],P2,P1)
y = np.expand_dims(y, axis = 3)
X_loc = X
y_loc = y
batch_size_loc = 12 # max value is P1*P2
# list to contain BO results
opt = []
yo_loc = np.sum((y_loc-true_y)**2, axis = (1,2))
opt.append( np.min(yo_loc) )
for it in range(nIter_q1):
sigma_X = X_loc.std(0)
mu_X = X_loc.mean(0)
sigma_y = y_loc.std(0)
mu_y = y_loc.mean(0)
# Create data set
usol_train = (y_loc-mu_y)/sigma_y
u0_train = (X_loc-mu_X)/sigma_X
batch_size_all_loc = int(fraction*12*X_loc.shape[0])
dataset = DataGenerator_batch(usol_train, u0_train, s1, t1, P1, P2, batch_size_loc, batch_size_all_loc, N_ensemble)
# Initialize model
model = ParallelDeepOnet(branch_layers, trunk_layers, N_ensemble, soln_dim)
# Train model
model.train(dataset, nIter=nIter_RPN)
@jit
def predict(x):
x = (x-mu_X)/sigma_X
u_test_sample = np.tile(x, (P1*P2, 1))
samples = model.predict_s(u_test_sample, y_test_sample) # N_ensemble x P1*P2 x soln_dim
samples = samples.reshape((samples.shape[0],P1,P2,samples.shape[-1])) # N_ensemble x P1 x P2 x soln_dim
samples = np.transpose(samples, (0, 2, 1, 3)) # N_ensemble x P2 x P1 x soln_dim
samples = sigma_y*samples+mu_y
samples = np.sum((samples-true_y)**2, axis = (1,2))[:,:,None]
return samples
# Fit GMM if needed for weighted acquisition functions
weights_fn = lambda x: np.ones(x.shape[0],)
if options['criterion']=='TS':
key_TS = random.split(key_TS, 2)[0]
args = (key_TS,)
else:
kappa = options['kappa']
args = (kappa,)
acq_model = MCAcquisition(predict,
bounds,
*args,
acq_fn = options['criterion'],
output_weights=weights_fn)
# Optimize acquisition with L-BFGS to inquire new point(s)
new_X = acq_model.next_best_point(q = q1, num_restarts = num_restarts_acq)
new_X = new_X.reshape(q1,dim)
# Obtain the new vectorial output
new_y = vmap(f)(new_X)
new_y = np.expand_dims(new_y, axis = 3)
# Augment training data
X_loc = np.concatenate([X_loc, new_X], axis = 0) # augment the vectorial input dataset during the BO process
y_loc = np.concatenate([y_loc, new_y], axis = 0) # augment the vectorial output dataset during the BO process
yo_loc = np.sum((y_loc-true_y)**2, axis = 1)
opt.append( np.min(yo_loc) ) # augment the objective values of the constructed dataset during the BO process
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X_loc.shape[0], y_loc.shape[0], opt[0], opt[-1]))
np.save(case+'/opt_'+str(j+prev)+'.npy',np.array(opt)) # save the constructed objective tensor by RPN-BO
| 5,739 | 32.764706 | 126 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/brusselator_pde_DON.py
|
import os
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'
from jax import vmap, random, jit
from jax import numpy as np
from pyDOE import lhs
import numpy as onp
from rpn_bo_utilities import uniform_prior, output_weights
from rpn_bo_models import ParallelDeepOnet
from rpn_bo_dataloaders import DataGenerator_batch
from rpn_bo_acquisitions import MCAcquisition
# vectorial input space dimension and its search space
dim = 4
lb = np.array([0.1, 0.1, 0.01, 0.01])
ub = np.array([5.0, 5.0, 5.0, 5.0])
p_x = uniform_prior(lb, ub)
# vectorial output space dimension and DeepONet functional evaluation points
N_y = 64
output_dim = (N_y, N_y, 2)
soln_dim = 2
P1 = output_dim[0]
P2 = output_dim[1]
arr_s = np.linspace(0, 1, P1)
arr_t = np.linspace(0, 1, P2)
s_grid, t_grid = np.meshgrid(arr_s, arr_t)
y_grid = np.concatenate([s_grid[:, :, None], t_grid[:, :, None]], axis=-1).reshape((-1, 2))
mu_grid = y_grid.mean(0)
sigma_grid = y_grid.std(0)
y_grid = (y_grid - mu_grid) / sigma_grid
# function mapping the vectorial input x to the vectorial output consisting of the solution to the 2D Brusselator PDE evaluated at N_yxN_y grid points
def f(x):
from pde import PDE, FieldCollection, ScalarField, UnitGrid
a = x[0]
b = x[1]
d0 = x[2]
d1 = x[3]
eq = PDE(
{
"u": f"{d0} * laplace(u) + {a} - ({b} + 1) * u + u**2 * v",
"v": f"{d1} * laplace(v) + {b} * u - u**2 * v",
}
)
# initialize state
grid = UnitGrid([N_y, N_y])
u = ScalarField(grid, a, label="Field $u$")
v = b / a + 0.1 * ScalarField.random_normal(grid, label="Field $v$", seed=10)
state = FieldCollection([u, v])
sol = eq.solve(state, t_range=20, dt=1e-3)
sol_tensor = []
sol_tensor.append(sol[0].data)
sol_tensor.append(sol[1].data)
sol_tensor = onp.array(sol_tensor)
ss = sol_tensor[onp.isnan(sol_tensor)]
sol_tensor[onp.isnan(sol_tensor)] = 1e5 * onp.random.randn(*ss.shape)
return np.transpose(np.array(sol_tensor),(1,2,0))
#### General simulation params ####
N = 5
prev = 0
nTrSet = 30-prev
#### RPN-BO hyperparameters ####
nIter = 30
N_ensemble = 16
batch_size = P1 * P2
batch_size_all = P1 * P2 * N
branch_layers = [dim, 64, 64]
trunk_layers = [2, 64, 64]
nIter_RPN = 1000
acq_fct = 'LCB' # 'LCB', 'TS', 'LW_LCB'
case = 'results/brusselator_pde_DON'
# prediction function mapping vectorial output to scalar obective value
def output(new_y):
weighting = onp.ones((2, 64, 64)) / 10
weighting[:, [0, 1, -2, -1], :] = 1.0
weighting[:, :, [0, 1, -2, -1]] = 1.0
weighted = weighting * np.transpose(new_y, (2, 0, 1))
return np.var(weighted, axis=(-3, -2, -1))
for j in range(nTrSet):
# Initial training data
onp.random.seed(j)
X = lb + (ub - lb) * lhs(dim, N)
y = np.array([f(x) for x in X])
opt = []
opt.append(np.min(np.array([output(yi) for yi in y])))
keys, keys_ts, keys_trans, keys_noise, keys_loader = random.split(random.PRNGKey(j), nIter * N).reshape((N, nIter, -1))
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X.shape[0], y.shape[0], opt[0], opt[-1]))
for it in range(nIter):
# Create data set
mu_X = X.mean(0)
sigma_X = X.std(0)
mu_y = y.mean(0)
sigma_y = y.std(0)
u0_train = (X - mu_X) / sigma_X
usol_train = (y - mu_y) / sigma_y
dataset = DataGenerator_batch(usol_train, u0_train, arr_s, arr_t, P1=P1, P2=P2, batch_size=batch_size, batch_size_all=batch_size_all, N_ensemble=N_ensemble, y=y_grid, rng_key=keys_loader[it])
# Initialize model
model = ParallelDeepOnet(branch_layers, trunk_layers, N_ensemble, soln_dim)
# Train model
model.train(dataset, nIter=nIter_RPN)
@jit
def predict(x):
x = (x - mu_X) / sigma_X
u_test_sample = vmap(lambda x: np.tile(x, (P1 * P2, 1)))(x)
samples = model.predict_s(u_test_sample.reshape((-1, dim)), np.tile(y_grid, (x.shape[0], 1)))
samples = samples.reshape((-1, P1, P2, samples.shape[-1]))
samples = vmap(lambda s: s * sigma_y + mu_y)(samples)
samples = samples.reshape((N_ensemble, x.shape[0], P1, P2, samples.shape[-1]))
samples = np.transpose(samples, (0, 1, 4, 2, 3))
weighting = onp.ones((2, 64, 64)) / 10
weighting[:, [0, 1, -2, -1], :] = 1.0
weighting[:, :, [0, 1, -2, -1]] = 1.0
weighted_samples = weighting * samples
return np.var(weighted_samples, axis=(-3, -2, -1))[:, :, None]
kappa = 2
weights_fn = lambda x: np.ones(x.shape[0])
if acq_fct == 'TS':
args = (keys_ts[it], )
num_restarts = 100
acq_fn = 'TS'
elif acq_fct == 'LCB':
weights_fn = lambda x: np.ones(x.shape[0],)
args = (kappa, )
num_restarts = 100
acq_fn = 'LCB'
elif acq_fct == 'LW_LCB':
predict_fn = lambda x: np.mean(predict(x), axis=0)
num_samples = 100
weights_fn = output_weights(predict_fn, uniform_prior(lb, ub).pdf, (lb, ub), method='gmm', num_samples=num_samples, num_comp=5)
args = (kappa, )
num_restarts = 100
acq_fn = 'LCB'
acq_model = MCAcquisition(predict, (lb, ub), *args, acq_fn=acq_fn, output_weights=weights_fn)
# Optimize acquisition with L-BFGS to inquire new point(s)
new_X = acq_model.next_best_point(q=1, num_restarts=num_restarts, seed_id=100 * j + it)
# Obtain the new data
new_y = f(new_X)
# Augment training data
X = np.concatenate([X, new_X[None, :]]) # augment the vectorial input dataset during the BO process
y = np.concatenate([y, new_y[None, :, :, :]]) # augment the vectorial output dataset during the BO process
opt.append(np.minimum(opt[-1],output(new_y))) # augment the objective values of the constructed dataset during the BO process
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X.shape[0], y.shape[0], opt[0], opt[-1]))
del model, dataset
np.save(case+'/opt_'+str(j+prev)+'.npy',onp.array(opt)) # save the constructed objective tensor by RPN-BO
| 6,387 | 35.090395 | 199 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/environmental_model_function_MLP.py
|
import os
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'
from jax import vmap, random, jit
from jax import numpy as np
import numpy as onp
from rpn_bo_utilities import uniform_prior
from rpn_bo_models import EnsembleRegression
from rpn_bo_dataloaders import BootstrapLoader
from rpn_bo_acquisitions import MCAcquisition
onp.random.seed(1234)
# Helper functions
normalize = vmap(lambda x, mu, std: (x-mu)/std, in_axes=(0,0,0))
denormalize = vmap(lambda x, mu, std: x*std + mu, in_axes=(0,0,0))
# vectorial input space dimension and its search space
dim = 4
lb = np.array([7.0, 0.02, 0.01, 30.01])
ub = np.array([13.0, 0.12, 3.0, 30.295])
true_x = np.array([10.0, 0.07, 1.505, 30.1525])
p_x = uniform_prior(lb, ub)
bounds = (lb, ub)
# vectorial output space dimension
dim_y = 12
# pollutant concentration function
def c(s,t,M,D,L,tau):
c1 = M/np.sqrt(4*np.pi*D*t)*np.exp(-s**2/4/D/t)
c2 = M/np.sqrt(4*np.pi*D*(t-tau))*np.exp(-(s-L)**2/4/D/(t-tau))
return np.where(t>tau, c1+c2, c1)
s1 = np.array([0.0, 1.0, 2.5])
t1 = np.array([15.0, 30.0, 45.0, 60.0])
ST = np.meshgrid(s1, t1)
ST = np.array(ST).T.reshape(-1,2)
# function mapping the vectorial input x to the vectorial output consisting of the concentration evaluation at 3x4 grid points
def f(x):
res = []
for i in range(ST.shape[0]):
res.append( c(ST[i,0],ST[i,1],x[0],x[1],x[2],x[3]) )
return np.array(res)
#### General simulation params ####
N = 5
prev = 0 # previous independent random runs
nTrSet = 10-prev # total independent random runs to perform
#### RPN-BO hyperparameters ####
num_restarts_acq = 500
nIter = 30
q1 = 1
nIter_q1 = nIter//q1
ensemble_size = 128
batch_size = N
fraction = 0.8
layers = [dim, 64, 64, 64, 64, dim_y]
nIter_RPN = 5000
options = {'criterion': 'LCB', # LCB EI TS
'kappa': 2.0,
'weights': None} # exact gmm None
train_key = random.PRNGKey(0)
key_TS = random.PRNGKey(123)
case = 'results/environmental_model_function_MLP'
true_y = f(true_x)
for j in range(nTrSet):
print('Train Set:',j+1)
# Initial training data
X = np.load(case+'/X_'+str(j+prev)+'.npy')
y = np.load(case+'/y_'+str(j+prev)+'.npy')
X_loc = X
y_loc = y
batch_size_loc = batch_size
# list to contain BO results
opt = []
yo_loc = np.sum((y_loc-true_y)**2, axis = 1)
opt.append( np.min(yo_loc) )
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X_loc.shape[0], y_loc.shape[0], opt[0], opt[-1]))
for it in range(nIter_q1):
# Create data set
train_key = random.split(train_key, 2)[0]
dataset = BootstrapLoader(X_loc, y_loc, batch_size_loc, ensemble_size, fraction, 1, rng_key=train_key)
(mu_X, sigma_X), (mu_y, sigma_y) = dataset.norm_const
# Initialize model
train_key = random.split(train_key, 2)[0]
model = EnsembleRegression(layers, ensemble_size, train_key)
# Train model
model.train(dataset, nIter=nIter_RPN)
@jit
def predict(x):
# accepts and returns un-normalized data
x = np.tile(x[np.newaxis,:,:], (ensemble_size, 1, 1))
x = normalize(x, mu_X, sigma_X)
params = vmap(model.get_params)(model.opt_state)
params_prior = vmap(model.get_params)(model.prior_opt_state)
opt_params = (params, params_prior)
samples = model.posterior(opt_params, x)
samples = denormalize(samples, mu_y, sigma_y)
samples = np.sum((samples-true_y)**2, axis = 2)[:,:,None]
return samples
# Fit GMM if needed for weighted acquisition functions
weights_fn = lambda x: np.ones(x.shape[0],)
if options['criterion']=='TS':
key_TS = random.split(key_TS, 2)[0]
args = (key_TS,)
else:
kappa = options['kappa']
args = (kappa,)
acq_model = MCAcquisition(predict,
bounds,
*args,
acq_fn = options['criterion'],
output_weights=weights_fn)
# Optimize acquisition with L-BFGS to inquire new point(s)
new_X = acq_model.next_best_point(q = q1, num_restarts = num_restarts_acq)
new_X = new_X.reshape(q1,dim)
# Obtain the new vectorial output
new_y = vmap(f)(new_X)
# Augment training data
X_loc = np.concatenate([X_loc, new_X], axis = 0) # augment the vectorial input dataset during the BO process
y_loc = np.concatenate([y_loc, new_y], axis = 0) # augment the vectorial output dataset during the BO process
yo_loc = np.sum((y_loc-true_y)**2, axis = 1)
opt.append( np.min(yo_loc) ) # augment the objective values of the constructed dataset during the BO process
batch_size_loc += q1
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X_loc.shape[0], y_loc.shape[0], opt[0], opt[-1]))
np.save(case+'/opt_'+str(j+prev)+'.npy',np.array(opt)) # save the constructed objective tensor by RPN-BO
| 5,266 | 33.424837 | 126 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/rpn_bo_architectures.py
|
from jax import numpy as np
from jax import random
def MLP(layers, activation=np.tanh):
def init(rng_key):
def init_layer(key, d_in, d_out):
k1, k2 = random.split(key)
glorot_stddev = 1. / np.sqrt((d_in + d_out) / 2.)
W = glorot_stddev*random.normal(k1, (d_in, d_out))
b = np.zeros(d_out)
return W, b
key, *keys = random.split(rng_key, len(layers))
params = list(map(init_layer, keys, layers[:-1], layers[1:]))
return params
def apply(params, inputs):
for W, b in params[:-1]:
outputs = np.dot(inputs, W) + b
inputs = activation(outputs)
W, b = params[-1]
outputs = np.dot(inputs, W) + b
return outputs
return init, apply
| 783 | 33.086957 | 69 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/rpn_bo_optimizers.py
|
from scipy.optimize import minimize
def minimize_lbfgs(objective, x0, bnds = None, verbose = False, maxfun = 15000):
if verbose:
def callback_fn(params):
print("Loss: {}".format(objective(params)[0]))
else:
callback_fn = None
result = minimize(objective, x0, jac=True,
method='L-BFGS-B', bounds = bnds,
callback=callback_fn, options = {'maxfun':maxfun})
return result.x, result.fun
| 480 | 33.357143 | 80 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/optical_interferometer_MLP_step_0.py
|
from jax import numpy as np
from jax.scipy.special import logsumexp
from jax import vmap
N_y = 64 # each frame is an N_y by N_y image
xx, yy = np.meshgrid( np.arange(N_y) / N_y, np.arange(N_y) / N_y )
# prediction function mapping vectorial output to scalar obective value
def output(y):
y = y.reshape((16,N_y,N_y)) # form the 16 frames, from the vectorial shaped tensors
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * y
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
return - (smax - smin) / (smax + smin)
case_l = ['results/optical_interferometer_MLP']
# create new files for vectorial inputs and outputs and best objective values which will be augmented by newly acquired points during BO process
for case in case_l:
for prev in range(5):
X = np.load(case+'/X_'+str(prev)+'.npy')
y = np.load(case+'/y_'+str(prev)+'.npy')
yo = vmap(output)(y)
np.save(case+'/opt_'+str(prev)+'.npy',np.array(np.min(yo))[None])
np.save(case+'/X_loc_'+str(prev)+'.npy',X)
np.save(case+'/y_loc_'+str(prev)+'.npy',y)
| 1,179 | 37.064516 | 144 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/rpn_bo_acquisitions.py
|
from jax import numpy as np
from jax import jit, vjp, random
from jax.scipy.special import expit as sigmoid
import numpy as onp
from functools import partial
from pyDOE import lhs
from tqdm import trange
from rpn_bo_optimizers import minimize_lbfgs
class MCAcquisition:
def __init__(self, posterior, bounds, *args,
acq_fn = 'LCB', output_weights=lambda x: np.ones(x.shape[0])):
self.posterior = posterior
self.bounds = bounds # domain bounds
self.args = args # arguments required by different acquisition functions
self.acq_fn = acq_fn # a string indicating the chosen acquisition function
self.weights = output_weights # a callable function returning the likelihood weighted weights
def evaluate(self, x):
# Inputs are (q x d), use vmap to vectorize across a batch
# samples[:,:,0] corresponds to the objective function
# samples[:,:,1:] corresponds to the constraints
# samples[:,:,i] are (ensemble_size x q)
q = x.shape[0]
# Common acquisition functions
if self.acq_fn == 'EI':
best = self.args[0]
samples = self.posterior(x)[:,:,0]
reparam = np.maximum(best-samples, 0)
EI = np.mean(np.max(reparam, axis=-1))
return -EI
elif self.acq_fn == 'LCB':
kappa = self.args[0]
samples = self.posterior(x)[:,:,0]
mu = np.mean(samples, axis=0, keepdims=True)
weights = self.weights(x).reshape(1,q)
reparam = mu - np.sqrt(0.5*np.pi*kappa) * weights * np.abs(samples - mu)
LCB = np.mean(np.min(reparam, axis=-1))
return LCB
elif self.acq_fn == 'TS':
rng_key = self.args[0]
samples = self.posterior(x)[:,:,0]
idx = random.randint(rng_key, (1,), minval=0, maxval=samples.shape[0])
reparam = samples[idx,:].reshape(1,q)
TS = np.mean(np.min(reparam, axis=-1))
return TS
elif self.acq_fn == 'US':
samples = self.posterior(x)[:,:,0]
mu = np.mean(samples, axis=0, keepdims=True)
weights = self.weights(x).reshape(1,q)
reparam = np.sqrt(0.5*np.pi) * weights * np.abs(samples - mu)
US = np.mean(np.max(reparam, axis=-1))
return -US
elif self.acq_fn == 'CLSF':
kappa = self.args[0]
samples = self.posterior(x)[:,:,0]
mu = np.mean(samples, axis=0, keepdims=True)
weights = self.weights(x).reshape(1,q)
reparam = np.abs(np.sqrt(0.5*np.pi) / (np.abs(mu)**(1.0/kappa) + 1e-8) * weights * np.abs(samples - mu))
CLSF = np.mean(np.max(reparam, axis=-1))
return -np.log(CLSF)
# Constrained acquisition functions
elif self.acq_fn == 'EIC':
best = self.args[0]
samples = self.posterior(x)
# Objective
objective = samples[:,:,0]
reparam = np.maximum(best-objective, 0)
EI = np.mean(np.max(reparam, axis=-1))
# Constraints
constraints = samples[:,:,1:]
indicator = sigmoid(constraints/1e-6) # a smooth indicator function
feasible = np.prod(np.mean(np.max(indicator, axis=1), axis=0))
return -EI*feasible
elif self.acq_fn == 'LCBC':
kappa = self.args[0]
threshold = self.args[1]
samples = self.posterior(x)
# Objective
objective = samples[:,:,0]
mu = np.mean(objective, axis=0, keepdims=True)
weights = self.weights(x).reshape(1,q)
reparam = mu - threshold - np.sqrt(0.5*np.pi*kappa) * weights * np.abs(objective - mu)
LCB = np.mean(np.min(reparam, axis=-1))
# Constraints
constraints = samples[:,:,1:] # (ensemble_size x q)
indicator = sigmoid(constraints/1e-6) # a smooth indicator function
feasible = np.prod(np.mean(np.max(indicator, axis=1), axis=0))
return LCB*feasible
# That's all for now..
else:
raise NotImplementedError
@partial(jit, static_argnums=(0,))
def acq_value_and_grad(self, inputs):
primals, f_vjp = vjp(self.evaluate, inputs)
grads = f_vjp(np.ones_like(primals))[0]
return primals, grads
# optimization is performed in the normalized input space
def next_best_point(self, q = 1, num_restarts = 10, seed_id=0, maxfun=15000):
lb, ub = self.bounds
dim = lb.shape[0]
# Define objective that returns float64 NumPy arrays
def objective(x):
x = x.reshape(q, dim)
value, grads = self.acq_value_and_grad(x)
out = (onp.array(value, dtype=onp.float64),
onp.array(grads.flatten(), dtype=onp.float64))
return out
# Optimize with random restarts
loc, acq = [], []
onp.random.seed(seed_id)
init = lb + (ub-lb)*lhs(dim, q*num_restarts)
x0 = init.reshape(num_restarts, q, dim)
dom_bounds = tuple(map(tuple, np.tile(np.vstack((lb, ub)).T,(q,1))))
for i in trange(num_restarts):
pos, val = minimize_lbfgs(objective, x0[i,:,:].flatten(), bnds = dom_bounds, maxfun=maxfun)
loc.append(pos)
acq.append(val)
loc = np.vstack(loc)
acq = np.vstack(acq)
idx_best = np.argmin(acq)
x_new = loc[idx_best,:]
return x_new
| 5,600 | 42.418605 | 116 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/rpn_bo_models.py
|
from jax import numpy as np
from jax import grad, vmap, random, jit
from jax.example_libraries import optimizers
from jax.nn import relu, gelu
from functools import partial
from tqdm import trange
import itertools
from rpn_bo_architectures import MLP
class EnsembleRegression:
def __init__(self, layers, ensemble_size, rng_key = random.PRNGKey(0), activation=np.tanh):
# Network initialization and evaluation functions
self.init, self.apply = MLP(layers, activation)
self.init_prior, self.apply_prior = MLP(layers, activation)
# Random keys
k1, k2, k3 = random.split(rng_key, 3)
keys_1 = random.split(k1, ensemble_size)
keys_2 = random.split(k2, ensemble_size)
keys_3 = random.split(k2, ensemble_size)
# Initialize
params = vmap(self.init)(keys_1)
params_prior = vmap(self.init_prior)(keys_2)
# Use optimizers to set optimizer initialization and update functions
lr = optimizers.exponential_decay(1e-3, decay_steps=1000, decay_rate=0.999)
self.opt_init, \
self.opt_update, \
self.get_params = optimizers.adam(lr)
self.opt_state = vmap(self.opt_init)(params)
self.prior_opt_state = vmap(self.opt_init)(params_prior)
self.key_opt_state = vmap(self.opt_init)(keys_3)
# Logger
self.itercount = itertools.count()
self.loss_log = []
# Define the forward pass
def net_forward(self, params, params_prior, inputs):
Y_pred = self.apply(params, inputs) + self.apply_prior(params_prior, inputs)
return Y_pred
def loss(self, params, params_prior, batch):
inputs, targets = batch
# Compute forward pass
outputs = vmap(self.net_forward, (None, None, 0))(params, params_prior, inputs)
# Compute loss
loss = np.mean((targets - outputs)**2)
return loss
# Define the update step
def step(self, i, opt_state, prior_opt_state, key_opt_state, batch):
params = self.get_params(opt_state)
params_prior = self.get_params(prior_opt_state)
g = grad(self.loss)(params, params_prior, batch)
return self.opt_update(i, g, opt_state)
def monitor_loss(self, opt_state, prior_opt_state, batch):
params = self.get_params(opt_state)
params_prior = self.get_params(prior_opt_state)
loss_value = self.loss(params, params_prior, batch)
return loss_value
# Optimize parameters in a loop
def train(self, dataset, nIter = 1000):
data = iter(dataset)
pbar = trange(nIter)
# Define vectorized SGD step across the entire ensemble
v_step = jit(vmap(self.step, in_axes = (None, 0, 0, 0, 0)))
v_monitor_loss = jit(vmap(self.monitor_loss, in_axes = (0, 0, 0)))
# Main training loop
for it in pbar:
batch = next(data)
self.opt_state = v_step(it, self.opt_state, self.prior_opt_state, self.key_opt_state, batch)
# Logger
if it % 100 == 0:
loss_value = v_monitor_loss(self.opt_state, self.prior_opt_state, batch)
self.loss_log.append(loss_value)
pbar.set_postfix({'Max loss': loss_value.max()})
# Evaluates predictions at test points
@partial(jit, static_argnums=(0,))
def posterior(self, params, inputs):
params, params_prior = params
samples = vmap(self.net_forward, (0, 0, 0))(params, params_prior, inputs)
return samples
class ParallelDeepOnet:
def __init__(self, branch_layers, trunk_layers, N_ensemble, dim):
self.dim = dim
# Network initialization and evaluation functions
self.branch_init, self.branch_apply = MLP(branch_layers, activation=relu) # jelu
self.branch_init_prior, self.branch_apply_prior = MLP(branch_layers, activation=relu)
self.trunk_init, self.trunk_apply = MLP(trunk_layers, activation=relu)
self.trunk_init_prior, self.trunk_apply_prior = MLP(trunk_layers, activation=relu)
# Initialize
v_branch_params = vmap(self.branch_init)(random.split(random.PRNGKey(1234), N_ensemble))
v_branch_params_prior = vmap(self.branch_init_prior)(random.split(random.PRNGKey(123), N_ensemble))
v_trunk_params = vmap(self.trunk_init)(random.split(random.PRNGKey(4321), N_ensemble))
v_trunk_params_prior = vmap(self.trunk_init_prior)(random.split(random.PRNGKey(321), N_ensemble))
# If you want to initialize the weight W with 0.1 for all elements
W = 0.1*np.ones((N_ensemble, branch_layers[-1], self.dim))
# If you want to initialize the weight W with Xavier initialization (This is helpful to check if the method work)
# Because if the value of different output dimension are same, using the above W will result in same predictions.
# glorot_stddev = 1. / np.sqrt((branch_layers[-1] + self.dim) / 2.)
# W = glorot_stddev*random.normal(random.PRNGKey(123), (N_ensemble, branch_layers[-1], self.dim))
v_params = (v_branch_params, v_trunk_params, W)
v_params_prior = (v_branch_params_prior, v_trunk_params_prior)
# Use optimizers to set optimizer initialization and update functions
lr = optimizers.exponential_decay(1e-3,decay_steps=1000,decay_rate=0.999)
# lr = 1e-4
self.opt_init, \
self.opt_update, \
self.get_params = optimizers.adam(lr)
self.v_opt_state = vmap(self.opt_init)(v_params)
self.v_prior_opt_state = vmap(self.opt_init)(v_params_prior)
# Logger
self.itercount = itertools.count()
self.loss_log = []
# Define the operator net
def operator_net(self, params, params_prior, u, y):
branch_params, trunk_params, W = params
branch_params_prior, trunk_params_prior = params_prior
B = self.branch_apply(branch_params, u) + self.branch_apply_prior(branch_params_prior, u)
T = self.trunk_apply(trunk_params, y) + self.trunk_apply_prior(trunk_params_prior, y)
#outputs = np.sum(B * T)
outputs = np.dot(B * T, W)
return outputs
@partial(jit, static_argnums=(0,))
def loss(self, params, params_prior, batch):
# Fetch data
# inputs: (u, y), shape = (N, m), (N,1)
# outputs: s, shape = (N,1)
inputs, outputs = batch
u, y = inputs
s, w = outputs
# Compute forward pass
pred = vmap(self.operator_net, (None, None, 0, 0))(params, params_prior, u, y)
# Compute loss
loss = np.mean(1./w**2 * (s - pred)**2)
return loss
# Define a compiled update step
# @partial(jit, static_argnums=(0,))
def step(self, i, opt_state, prior_opt_state, batch):
params = self.get_params(opt_state)
params_prior = self.get_params(prior_opt_state)
g = grad(self.loss, argnums = 0)(params, params_prior, batch)
return self.opt_update(i, g, opt_state)
# Optimize parameters in a loop
def train(self, dataset, nIter = 10000):
data = iter(dataset)
pbar = trange(nIter)
# Define v_step that vectorize the step operation
self.v_step = jit(vmap(self.step, in_axes = [None, 0, 0, 0]))
# Main training loop
for it in pbar:
batch = next(data)
self.v_opt_state = self.v_step(it, self.v_opt_state, self.v_prior_opt_state, batch)
# Logger
if it % 200 == 0:
params = vmap(self.get_params)(self.v_opt_state)
params_prior = vmap(self.get_params)(self.v_prior_opt_state)
branch_params_prior, trunk_params_prior = params_prior
loss_value = vmap(self.loss, (0, 0, 0))(params, params_prior, batch)
self.loss_log.append(loss_value)
pbar.set_postfix({'Max loss': loss_value.max()})
def operator_net_pred_single(self, params, params_prior, U_star, Y_star):
s_pred_single = vmap(self.operator_net, (None, None, 0, 0))(params, params_prior, U_star, Y_star)
return s_pred_single
# Evaluates predictions at test points
@partial(jit, static_argnums=(0,))
def predict_s(self, U_star, Y_star):
params = vmap(self.get_params)(self.v_opt_state)
params_prior = vmap(self.get_params)(self.v_prior_opt_state)
s_pred = vmap(self.operator_net_pred_single, (0, 0, None,None))(params, params_prior, U_star, Y_star)
return s_pred
| 8,651 | 41.411765 | 121 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/optical_interferometer_DON_step_0.py
|
from jax import numpy as np
from jax.scipy.special import logsumexp
output_dim = (64, 64, 16) # 16 frames of 64 by 64 images
P1 = output_dim[0]
P2 = output_dim[1]
xx, yy = np.meshgrid( np.arange(P1) / P1, np.arange(P2) / P2 )
# prediction function mapping vectorial output to scalar obective value
def output(new_y):
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * new_y
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
return - (smax - smin) / (smax + smin)
case_l = ['results/optical_interferometer_DON']
# create new files for vectorial inputs and outputs and best objective values which will be augmented by newly acquired points during BO process
for case in case_l:
for prev in range(5):
X = np.load(case+'/X_'+str(prev)+'.npy')
y = np.load(case+'/y_'+str(prev)+'.npy')
y = y.reshape((y.shape[0],output_dim[2],P1,P2))
errs = [output(yi) for yi in y]
np.save(case+'/opt_'+str(prev)+'.npy',np.array(np.min(np.array(errs)))[None])
y = np.transpose(y, (0, 2, 3, 1))
np.save(case+'/X_loc_'+str(prev)+'.npy',X)
np.save(case+'/y_loc_'+str(prev)+'.npy',y)
| 1,269 | 34.277778 | 144 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/optical_interferometer_MLP_all_steps.py
|
import os
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'
from jax import vmap, random, jit
from jax import numpy as np
from jax.scipy.special import logsumexp
from jax.nn import relu
from gym_interf import InterfEnv
import numpy as onp
from rpn_bo_utilities import uniform_prior
from rpn_bo_models import EnsembleRegression
from rpn_bo_dataloaders import BootstrapLoader
from rpn_bo_acquisitions import MCAcquisition
onp.random.seed(1234)
# Helper functions
normalize = vmap(lambda x, mu, std: (x-mu)/std, in_axes=(0,0,0))
denormalize = vmap(lambda x, mu, std: x*std + mu, in_axes=(0,0,0))
# vectorial input space dimension and its search space
dim = 4
lb = -np.ones((dim,))
ub = np.ones((dim,))
p_x = uniform_prior(lb, ub)
bounds = (lb, ub)
# vectorial output space dimension
N_y = 64
xx, yy = np.meshgrid( np.arange(N_y) / N_y, np.arange(N_y) / N_y )
dim_y = 16*N_y**2
# function mapping the vectorial input x to the vectorial output consisting of the 16 images
def f(x):
gym = InterfEnv()
gym.reset(actions=(1e-4, 1e-4, 1e-4, 1e-4))
action = x[:4]
state = gym.step(action)
return state[0].flatten()
#### General simulation params ####
N = 15
prev = 0 # previous independent random runs
nTrSet = 10-prev # total independent random runs to perform
#### RPN-BO hyperparameters ####
num_restarts_acq = 500
nIter = 100 - N
q1 = 1
nIter_q1 = nIter//q1
ensemble_size = 32
batch_size = N
fraction = 0.8
layers = [dim, 64, 64, 16*N_y**2]
nIter_RPN = 5000
options = {'criterion': 'LCB', # 'TS' 'LCB',
'kappa': 2.0,
'weights': None, # exact gmm None
}
train_key = random.PRNGKey(0)
key_TS = random.PRNGKey(123)
case = 'results/optical_interferometer_MLP'
# prediction function mapping vectorial output to scalar obective value
def output(y):
y = y.reshape((16,N_y,N_y))
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * y
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
return - (smax - smin) / (smax + smin)
for j in range(nTrSet):
print('Train Set:',j+1)
# Initial training data
X = np.load(case+'/X_'+str(j+prev)+'.npy')
y = np.load(case+'/y_'+str(j+prev)+'.npy')
X_loc = X
y_loc = y
batch_size_loc = batch_size
# array to contain BO results
yo_loc = vmap(output)(y)
opt = np.array(np.min(yo_loc))[None]
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X_loc.shape[0], y_loc.shape[0], opt[0], opt[-1]))
for it in range(nIter_q1):
# Create data set
train_key = random.split(train_key, 2)[0]
dataset = BootstrapLoader(X_loc, y_loc, batch_size_loc, ensemble_size, fraction, 0, rng_key=train_key)
(mu_X, sigma_X), (mu_y, sigma_y) = dataset.norm_const
# Initialize model
train_key = random.split(train_key, 2)[0]
model = EnsembleRegression(layers, ensemble_size, train_key, relu)
# Train model
model.train(dataset, nIter=nIter_RPN)
@jit
def predict(x):
# accepts and returns un-normalized data
x = np.tile(x[np.newaxis,:,:], (ensemble_size, 1, 1))
x = normalize(x, mu_X, sigma_X)
params = vmap(model.get_params)(model.opt_state)
params_prior = vmap(model.get_params)(model.prior_opt_state)
opt_params = (params, params_prior)
samples = model.posterior(opt_params, x)
samples = denormalize(samples, mu_y, sigma_y)
samples = samples.reshape((samples.shape[0],samples.shape[1],16,N_y,N_y))
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * samples
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
v = np.exp( (smax - smin) / (smax + smin) )
return -v[:,:,None]
# Fit GMM if needed for weighted acquisition functions
weights_fn = lambda x: np.ones(x.shape[0],)
if options['criterion']=='TS':
key_TS = random.split(key_TS, 2)[0]
args = (key_TS,)
else:
kappa = options['kappa']
args = (kappa,)
acq_model = MCAcquisition(predict,
bounds,
*args,
acq_fn = options['criterion'],
output_weights=weights_fn)
# Optimize acquisition with L-BFGS to inquire new point(s)
new_X = acq_model.next_best_point(q = q1, num_restarts = num_restarts_acq)
new_X = new_X.reshape(q1,dim)
# Obtain the new vectorial output
new_y = []
for i in range(new_X.shape[0]):
new_y.append(f(new_X[i,:]))
new_y = np.array(new_y)
# Augment training data
X_loc = np.concatenate([X_loc, new_X], axis = 0) # augment the vectorial input dataset during the BO process
y_loc = np.concatenate([y_loc, new_y], axis = 0) # augment the vectorial output dataset during the BO process
opt = np.concatenate( ( opt, np.minimum(opt[-1],output(new_y[0,:]))[None] ) , axis=0 ) # augment the objective values of the constructed dataset during the BO process
# Save augmented datasets and obejctive values
np.save(case+'/X_loc_'+str(j+prev)+'.npy', X_loc) # save the constructed vectorial input dataset by RPN-BO
np.save(case+'/y_loc_'+str(j+prev)+'.npy', y_loc) # save the constructed vectorial output dataset by RPN-BO
np.save(case+'/opt_'+str(j+prev)+'.npy',opt) # save the constructed objective tensor by RPN-BO
batch_size_loc += q1
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X_loc.shape[0], y_loc.shape[0], opt[0], opt[-1]))
| 5,958 | 35.335366 | 174 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/optical_interferometer_DON_step_1.py
|
import os
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'
from jax import vmap, random, jit
from jax import numpy as np
from jax.scipy.special import logsumexp
from rpn_bo_models import ParallelDeepOnet
from rpn_bo_dataloaders import DataGenerator_batch
from rpn_bo_acquisitions import MCAcquisition
from rpn_bo_utilities import uniform_prior
# vectorial input space dimension and its search space
dim = 4
lb = -np.ones((dim,))
ub = np.ones((dim,))
p_x = uniform_prior(lb, ub)
# vectorial output space dimension
output_dim = (64, 64, 16)
soln_dim = output_dim[2]
P1 = output_dim[0]
P2 = output_dim[1]
xx, yy = np.meshgrid( np.arange(P1) / P1, np.arange(P2) / P2 )
# initial training space
case = 'results/optical_interferometer_DON'
prev = 0
X = np.load(case+'/X_loc_'+str(prev)+'.npy')
y = np.load(case+'/y_loc_'+str(prev)+'.npy')
opt = np.load(case+'/opt_'+str(prev)+'.npy')
N = X.shape[0]
#### RPN-BO hyperparameters ####
num_restarts_acq = 500
q1 = 1
N_ensemble = 16
fraction = 1
branch_layers = [dim, 32, 32]
trunk_layers = [2, 32, 32]
nIter_RPN = 5000
acq_fct = 'EI'
batch_size = P1 * P2
batch_size_all = int(fraction * P1 * P2 * N)
#### DeepONet functional evaluation points ####
arr_s = np.linspace(0, 1, P1)
arr_t = np.linspace(0, 1, P2)
s_grid, t_grid = np.meshgrid(arr_s, arr_t)
y_grid = np.concatenate([s_grid[:, :, None], t_grid[:, :, None]], axis=-1).reshape((-1, 2))
mu_grid = y_grid.mean(0)
sigma_grid = y_grid.std(0)
y_grid = (y_grid - mu_grid) / sigma_grid
train_key = random.PRNGKey(0)
key_TS = random.PRNGKey(123)
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(prev), X.shape[0], y.shape[0], opt[0], np.min(opt)))
# Change random seed for different optimization iterations and different random independent runs
train_key = random.split(train_key, 2)[0]
key_TS = random.split(key_TS, 2)[0]
for i in range(prev):
for i in range(85):
train_key = random.split(train_key, 2)[0]
key_TS = random.split(key_TS, 2)[0]
for i in range(N-15):
train_key = random.split(train_key, 2)[0]
key_TS = random.split(key_TS, 2)[0]
# Create data set
mu_X = np.zeros(X.shape[1],)
sigma_X = np.ones(X.shape[1],)
mu_y = np.zeros((y.shape[1],y.shape[2],y.shape[3]))
sigma_y = np.max(np.abs(y)) * np.ones((y.shape[1],y.shape[2],y.shape[3]))
u0_train = (X - mu_X) / sigma_X
usol_train = (y - mu_y) / sigma_y
dataset = DataGenerator_batch(usol_train, u0_train, arr_s, arr_t, P1=P1, P2=P2, batch_size=batch_size, batch_size_all=batch_size_all, N_ensemble=N_ensemble, rng_key=train_key, y=y_grid)
# Initialize model
train_key = random.split(train_key, 2)[0]
model = ParallelDeepOnet(branch_layers, trunk_layers, N_ensemble, soln_dim)
# Train model
model.train(dataset, nIter=nIter_RPN)
@jit
def predict(x):
x = (x - mu_X) / sigma_X
u_test_sample = vmap(lambda x: np.tile(x, (P1 * P2, 1)))(x)
samples = model.predict_s(u_test_sample.reshape((-1, dim)), np.tile(y_grid, (x.shape[0], 1)))
samples = samples.reshape((-1, P1, P2, samples.shape[-1]))
samples = vmap(lambda s: s * sigma_y + mu_y)(samples)
samples = samples.reshape((N_ensemble, x.shape[0], P1, P2, samples.shape[-1]))
samples = np.transpose(samples, (0, 1, 4, 2, 3))
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * samples
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
v = np.exp( (smax - smin) / (smax + smin) )
return -v[:, :, None]
kappa = 2
weights_fn = lambda x: np.ones(x.shape[0])
if acq_fct == 'EI':
args = (opt[-1], )
elif acq_fct == 'TS':
key_TS = random.split(key_TS, 2)[0]
args = (key_TS, )
elif acq_fct == 'LCB':
weights_fn = lambda x: np.ones(x.shape[0],)
args = (kappa,)
acq_model = MCAcquisition(predict,
(lb, ub),
*args,
acq_fn=acq_fct,
output_weights=weights_fn)
# Optimize acquisition with L-BFGS to inquire new point(s)
new_X = acq_model.next_best_point(q=q1, num_restarts=num_restarts_acq, seed_id=85*prev + (N-15))
X = np.concatenate([X, new_X[None, :]]) # augment the vectorial input dataset during the BO process
np.save(case+'/X_loc_'+str(prev)+'.npy',X) # save the constructed vectorial input dataset by RPN-BO
| 4,309 | 33.206349 | 185 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/optical_interferometer_MLP_step_2.py
|
from jax import numpy as np
from jax.scipy.special import logsumexp
from gym_interf import InterfEnv
# function mapping the vectorial input x to the vectorial output consisting of the 16 images
def f(x):
gym = InterfEnv()
gym.reset(actions=(1e-4, 1e-4, 1e-4, 1e-4))
action = x[:4]
state = gym.step(action)
return state[0].flatten()
N_y = 64 # each frame is a N_y by N_y image
xx, yy = np.meshgrid( np.arange(N_y) / N_y, np.arange(N_y) / N_y )
# prediction function mapping vectorial output to scalar obective value
def output(y):
y = y.reshape((16,N_y,N_y))
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * y
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
return - (smax - smin) / (smax + smin)
case = 'results/optical_interferometer_MLP'
prev = 0 # change from 0 to 4 to consdier different random and independent run
X = np.load(case+'/X_loc_'+str(prev)+'.npy') # load vectorial inputs for the constructed dataset so far during the BO process
y = np.load(case+'/y_loc_'+str(prev)+'.npy') # load vectorial outputs for the constructed dataset so far during the BO process
new_y = f(X[-1,:]) # compute the vectorial output (the 16 images) of the newly acquired point
y = np.concatenate([y, new_y[None,:]], axis = 0) # augment the vectorial output dataset during the BO process
np.save(case+'/y_loc_'+str(prev)+'.npy',y) # save the constructed vectorial output dataset by RPN-BO
opt = np.load(case+'/opt_'+str(prev)+'.npy') # load best objective for the constructed dataset so far during the BO process
opt = np.concatenate( ( opt, np.minimum(opt[-1],output(new_y))[None] ) , axis=0 ) # augment the objective values of the constructed dataset during the BO process
np.save(case+'/opt_'+str(prev)+'.npy',opt) # save the constructed objective tensor by RPN-BO
print('new_X: ', X[-1,:], 'new obj:', output(new_y), 'opt obj: ',np.min(np.array(opt))) # output the newly acquired point, its corresponding objective value, and the best objective value so far in the BO process
| 2,088 | 51.225 | 211 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/optical_interferometer_DON_step_2.py
|
from jax import numpy as np
from jax.scipy.special import logsumexp
from gym_interf import InterfEnv
output_dim = (64, 64, 16) # 16 frames of 64 by 64 images
soln_dim = output_dim[2]
P1 = output_dim[0]
P2 = output_dim[1]
# function mapping the vectorial input x to the vectorial output consisting of the 16 images
def f(x):
gym = InterfEnv()
gym.reset(actions=(1e-4, 1e-4, 1e-4, 1e-4))
action = x[:4]
state = gym.step(action)
return state[0]
xx, yy = np.meshgrid( np.arange(P1) / P1, np.arange(P2) / P2 )
# prediction function mapping vectorial output to scalar obective value
def output(new_y):
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * new_y
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
return - (smax - smin) / (smax + smin)
case = 'results/optical_interferometer_DON'
prev = 0 # change from 0 to 4 to consdier different random and independent run
X = np.load(case+'/X_loc_'+str(prev)+'.npy') # load vectorial inputs for the constructed dataset so far during the BO process
y = np.load(case+'/y_loc_'+str(prev)+'.npy') # load vectorial outputs for the constructed dataset so far during the BO process
new_y = f(X[-1,:]) # compute the vectorial output (the 16 images) of the newly acquired point
y = np.concatenate([ y, np.transpose(new_y[None, :, :, :], (0, 2, 3, 1)) ]) # augment the vectorial output dataset during the BO process
np.save(case+'/y_loc_'+str(prev)+'.npy',y) # save the constructed vectorial output dataset by RPN-BO
opt = np.load(case+'/opt_'+str(prev)+'.npy') # load best objective for the constructed dataset so far during the BO process
opt = np.concatenate( ( opt, np.minimum(opt[-1],output(new_y))[None] ) , axis=0 )
np.save(case+'/opt_'+str(prev)+'.npy',opt) # save the constructed objective tensor by RPN-BO
print('new_X: ', X[-1,:], 'new obj:', output(new_y), 'min obj: ',np.min(np.array(opt)))
| 1,953 | 45.52381 | 136 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/optical_interferometer_MLP_step_1.py
|
import os
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'
from jax import vmap, random, jit
from jax import numpy as np
from jax.scipy.special import logsumexp
from jax.nn import relu
import numpy as onp
from rpn_bo_models import EnsembleRegression
from rpn_bo_dataloaders import BootstrapLoader
from rpn_bo_acquisitions import MCAcquisition
from rpn_bo_utilities import uniform_prior
onp.random.seed(1234)
# Helper functions
normalize = vmap(lambda x, mu, std: (x-mu)/std, in_axes=(0,0,0))
denormalize = vmap(lambda x, mu, std: x*std + mu, in_axes=(0,0,0))
# vectorial input space dimension and its search space
dim = 4
lb = -np.ones((dim,))
ub = np.ones((dim,))
p_x = uniform_prior(lb, ub)
bounds = (lb, ub)
# vectorial output space dimension
N_y = 64
xx, yy = np.meshgrid( np.arange(N_y) / N_y, np.arange(N_y) / N_y )
dim_y = 16*N_y**2
# initial training space
case = 'results/optical_interferometer_MLP'
prev = 0
X = np.load(case+'/X_loc_'+str(prev)+'.npy')
y = np.load(case+'/y_loc_'+str(prev)+'.npy')
opt = np.load(case+'/opt_'+str(prev)+'.npy')
N = X.shape[0]
#### RPN-BO hyperparameters ####
num_restarts_acq = 500
q1 = 1
ensemble_size = 32
batch_size = N
fraction = 1
layers = [dim, 64, 64, dim_y]
nIter_RPN = 5000
options = {'criterion': 'LCB', # 'TS' 'LCB', EI
'kappa': 2.0,
'weights': None, # exact gmm None
}
train_key = random.PRNGKey(0)
key_TS = random.PRNGKey(123)
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(prev), X.shape[0], y.shape[0], opt[0], np.min(opt)))
# Change random seed for different optimization iterations and different random independent runs
train_key = random.split(train_key, 2)[0]
key_TS = random.split(key_TS, 2)[0]
for i in range(prev):
for i in range(85):
train_key = random.split(train_key, 2)[0]
key_TS = random.split(key_TS, 2)[0]
for i in range(N-15):
train_key = random.split(train_key, 2)[0]
key_TS = random.split(key_TS, 2)[0]
# Create data set
dataset = BootstrapLoader(X, y, batch_size, ensemble_size, fraction, 0, rng_key=train_key)
(mu_X, sigma_X), (mu_y, sigma_y) = dataset.norm_const
# Initialize model
train_key = random.split(train_key, 2)[0]
model = EnsembleRegression(layers, ensemble_size, train_key, relu)
# Train model
model.train(dataset, nIter=nIter_RPN)
# prediction function using trained RPN
# mapping vectorial input to scalar obective value
@jit
def predict(x):
# accepts and returns un-normalized data
x = np.tile(x[np.newaxis,:,:], (ensemble_size, 1, 1))
x = normalize(x, mu_X, sigma_X)
params = vmap(model.get_params)(model.opt_state)
params_prior = vmap(model.get_params)(model.prior_opt_state)
opt_params = (params, params_prior)
samples = model.posterior(opt_params, x)
samples = denormalize(samples, mu_y, sigma_y)
samples = samples.reshape((samples.shape[0],samples.shape[1],16,N_y,N_y))
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * samples
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
v = np.exp( (smax - smin) / (smax + smin) )
return -v[:,:,None]
# Fit GMM if needed for weighted acquisition functions
weights_fn = lambda x: np.ones(x.shape[0],)
if options['criterion']=='TS':
args = (key_TS,)
else:
kappa = options['kappa']
args = (kappa,)
acq_model = MCAcquisition(predict,
bounds,
*args,
acq_fn = options['criterion'],
output_weights=weights_fn)
# Optimize acquisition with L-BFGS to inquire new point(s)
new_X = acq_model.next_best_point(q = q1, num_restarts = num_restarts_acq, seed_id = 85*prev + (N-15))
new_X = new_X.reshape(q1,dim)
X = np.concatenate([X, new_X], axis = 0) # augment the vectorial input dataset during the BO process
np.save(case+'/X_loc_'+str(prev)+'.npy',X) # save the constructed vectorial input dataset by RPN-BO
| 3,973 | 31.842975 | 106 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/rpn_bo_utilities.py
|
from jax import numpy as np
from jax import jit, vmap, random
from jax.scipy.stats import multivariate_normal, uniform
import numpy as onp
from scipy.stats import gaussian_kde
from sklearn import mixture
from pyDOE import lhs
from KDEpy import FFTKDE
def fit_kde(predict_fn, prior_pdf, bounds, num_samples=10000, bw=None):
onp.random.seed(1)
lb, ub = bounds
dim = lb.shape[0]
X = lb + (ub-lb)*lhs(dim, num_samples)
y = predict_fn(X)
weights = prior_pdf(X)
y, weights = onp.array(y), onp.array(weights)
y = y.flatten()
if bw is None:
try:
sc = gaussian_kde(y, weights=weights)
bw = onp.sqrt(sc.covariance).flatten()[0]
except:
bw = 1.0
if bw < 1e-8:
bw = 1.0
kde_pdf_x, kde_pdf_y = FFTKDE(bw=bw).fit(y, weights).evaluate()
return kde_pdf_x, kde_pdf_y
def fit_gmm(predict_fn, prior_pdf, bounds, num_samples, num_comp):
onp.random.seed(0)
lb, ub = bounds
dim = lb.shape[0]
# Evaluate input prior
X = lb + (ub-lb)*lhs(dim, num_samples)
p_x = prior_pdf(X)[:,None]
# Interpolate output KDE
y = predict_fn(X)
kde_pdf_x, kde_pdf_y = fit_kde(predict_fn, prior_pdf, bounds)
p_y = np.clip(np.interp(y, kde_pdf_x, kde_pdf_y), a_min=0.0) + 1e-8
# Weights
weights = p_x/p_y
# Rescale weights as probability distribution
weights = onp.array(weights, dtype = onp.float64)
weights = weights / onp.sum(weights)
# Scale inputs to [0, 1]^D
lb, ub = bounds
X = (X - lb) / (ub - lb)
# Sample from analytical w
indices = np.arange(num_samples)
idx = onp.random.choice(indices, num_samples, p=weights.flatten())
X_train = X[idx]
# fit GMM
clf = mixture.GaussianMixture(n_components=num_comp,
covariance_type='full')
clf.fit(onp.array(X_train, dtype=np.float64))
out = (np.array(clf.weights_),
np.array(clf.means_),
np.array(clf.covariances_))
return out
def output_weights(predict_fn, prior_pdf, bounds, method='exact', num_samples=10000, num_comp=2):
# Compute exact likelihood ratio
if method == 'exact':
onp.random.seed(0)
lb, ub = bounds
dim = lb.shape[0]
X = lb + (ub-lb)*lhs(dim, num_samples)
kde_pdf_x, kde_pdf_y = fit_kde(predict_fn, prior_pdf, bounds)
p_x = lambda x: prior_pdf(x)[:,None]
p_y = lambda x: np.clip(np.interp(predict_fn(x), kde_pdf_x, kde_pdf_y), a_min=0.0) + 1e-8
ratio = lambda x: p_x(x)/p_y(x)
volume = np.prod(ub-lb)
norm_const = np.mean(ratio(X))*volume
def compute_w(x):
w = ratio(x)/norm_const
return w.flatten()
# GMM approximation
elif method == 'gmm':
gmm_vars = fit_gmm(predict_fn, prior_pdf, bounds, num_samples, num_comp)
def compute_w(x):
# expects normalized inputs
weights, means, covs = gmm_vars
lb, ub = bounds
x = (x - lb) / (ub - lb)
gmm_mode = lambda w, mu, cov: w*multivariate_normal.pdf(x, mu, cov)
w = np.sum(vmap(gmm_mode)(weights, means, covs), axis = 0)
return w/np.prod(ub-lb)
elif method == 'None':
compute_w = lambda x: np.ones(x.shape[0])
else:
raise NotImplementedError
return jit(compute_w)
# Helper functions for computing output-weighted acquisitions
class uniform_prior:
def __init__(self, lb, ub):
self.lb = lb
self.ub = ub
self.dim = lb.shape[0]
def sample(self, rng_key, N):
return self.lb + (self.ub-self.lb)*random.uniform(rng_key, (N, self.dim))
def pdf(self, x):
return np.sum(uniform.pdf(x, self.lb, self.ub-self.lb), axis=-1)
class gaussian_prior:
def __init__(self, mu, cov):
self.mu = mu
self.cov = cov
self.dim = mu.shape[0]
def sample(self, rng_key, N):
return random.multivariate_normal(rng_key, self.mu, self.cov, (N,))
def pdf(self, x):
return multivariate_normal.pdf(x, self.mu, self.cov)
| 4,097 | 34.327586 | 97 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/create_BO_cv_plots.py
|
problem = 'comp_blades_shape' # choose from 'environment' 'brusselator' 'optical_interferometer' 'comp_blades_shape'
from matplotlib import pyplot as plt
plt.close('all')
plt.rcParams.update(plt.rcParamsDefault)
plt.rcParams.update({'font.weight': 'bold',
'font.size': 28,
'lines.linewidth': 1.5,
'axes.labelsize': 36,
'axes.titlesize': 36,
'xtick.labelsize': 28,
'ytick.labelsize': 28,
'legend.fontsize': 36,
'axes.linewidth': 4,
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
"text.usetex": True, # use LaTeX to write all text
})
plt.rcParams['text.latex.preamble'] = [r'\usepackage{sfmath} \boldmath']
import torch
import numpy as np
dat_file = np.load("./HOGP_results.npz", allow_pickle = True)
list(dat_file.keys())
dat = dat_file["obj"].reshape(-1)[0]
dat.keys()
from scipy import stats
################################################
############### environment ####################
################################################
if problem == 'environment':
N = 5
nIter = 30
q1 = 1
nTrSet = 10
dispersion_scale = 0.2
case = 'results/environmental_model_function_MLP_LCB'
case2 = 'results/environmental_model_function_MLP_EI'
case3 = 'results/environmental_model_function_MLP_TS'
case4 = 'results/environmental_model_function_DON_LCB'
case5 = 'results/environmental_model_function_DON_EI'
case6 = 'results/environmental_model_function_DON_TS'
opt_q1_RPN = []
opt_q1_RPN2 = []
opt_q1_RPN3 = []
opt_q1_RPN4 = []
opt_q1_RPN5 = []
opt_q1_RPN6 = []
for j in range(nTrSet):
opt = np.load(case+'/opt_'+str(j)+'.npy')
opt_q1_RPN.append(np.array(opt))
opt2 = np.load(case2+'/opt_'+str(j)+'.npy')
opt_q1_RPN2.append(np.array(opt2))
opt3 = np.load(case3+'/opt_'+str(j)+'.npy')
opt_q1_RPN3.append(np.array(opt3))
opt4 = np.load(case4+'/opt_'+str(j)+'.npy')
opt_q1_RPN4.append(np.array(opt4))
opt5 = np.load(case5+'/opt_'+str(j)+'.npy')
opt_q1_RPN5.append(np.array(opt5))
opt6 = np.load(case6+'/opt_'+str(j)+'.npy')
opt_q1_RPN6.append(np.array(opt6))
opt_q1_RPN = np.array(opt_q1_RPN)
opt_q1_RPN2 = np.array(opt_q1_RPN2)
opt_q1_RPN3 = np.array(opt_q1_RPN3)
opt_q1_RPN4 = np.array(opt_q1_RPN4)
opt_q1_RPN5 = np.array(opt_q1_RPN5)
opt_q1_RPN6 = np.array(opt_q1_RPN6)
m_q1_RPN, std_q1_RPN = np.median(opt_q1_RPN, axis = 0), stats.median_abs_deviation(opt_q1_RPN, axis = 0)
m_q1_RPN2, std_q1_RPN2 = np.median(opt_q1_RPN2, axis = 0), stats.median_abs_deviation(opt_q1_RPN2, axis = 0)
m_q1_RPN3, std_q1_RPN3 = np.median(opt_q1_RPN3, axis = 0), stats.median_abs_deviation(opt_q1_RPN3, axis = 0)
m_q1_RPN4, std_q1_RPN4 = np.median(opt_q1_RPN4, axis = 0), stats.median_abs_deviation(opt_q1_RPN4, axis = 0)
m_q1_RPN5, std_q1_RPN5 = np.median(opt_q1_RPN5, axis = 0), stats.median_abs_deviation(opt_q1_RPN5, axis = 0)
m_q1_RPN6, std_q1_RPN6 = np.median(opt_q1_RPN6, axis = 0), stats.median_abs_deviation(opt_q1_RPN6, axis = 0)
lower_q1_RPN = np.log10(np.clip(m_q1_RPN - dispersion_scale*std_q1_RPN, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN = np.log10(m_q1_RPN + dispersion_scale*std_q1_RPN + 1e-10)
lower_q1_RPN2 = np.log10(np.clip(m_q1_RPN2 - dispersion_scale*std_q1_RPN2, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN2 = np.log10(m_q1_RPN2 + dispersion_scale*std_q1_RPN2 + 1e-10)
lower_q1_RPN3 = np.log10(np.clip(m_q1_RPN3 - dispersion_scale*std_q1_RPN3, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN3 = np.log10(m_q1_RPN3 + dispersion_scale*std_q1_RPN3 + 1e-10)
lower_q1_RPN4 = np.log10(np.clip(m_q1_RPN4 - dispersion_scale*std_q1_RPN4, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN4 = np.log10(m_q1_RPN4 + dispersion_scale*std_q1_RPN4 + 1e-10)
lower_q1_RPN5 = np.log10(np.clip(m_q1_RPN5 - dispersion_scale*std_q1_RPN5, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN5 = np.log10(m_q1_RPN5 + dispersion_scale*std_q1_RPN5 + 1e-10)
lower_q1_RPN6 = np.log10(np.clip(m_q1_RPN6 - dispersion_scale*std_q1_RPN6, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN6 = np.log10(m_q1_RPN6 + dispersion_scale*std_q1_RPN6 + 1e-10)
fig = plt.figure(figsize=(21, 9))
ax = plt.subplot(111)
ax.plot(N+q1*np.arange(nIter+1), np.log10(m_q1_RPN)[:nIter+1], color='black', label = r'\textbf{RPN - MLP - LCB}')
ax.fill_between(N+q1*np.arange(nIter+1), lower_q1_RPN[:nIter+1], upper_q1_RPN[:nIter+1], facecolor='black', alpha=0.3)
ax.plot(N+q1*np.arange(nIter+1), np.log10(m_q1_RPN2)[:nIter+1], color='blue', label = r'\textbf{RPN - MLP - EI}')
ax.fill_between(N+q1*np.arange(nIter+1), lower_q1_RPN2[:nIter+1], upper_q1_RPN2[:nIter+1], facecolor='blue', alpha=0.3)
ax.plot(N+q1*np.arange(nIter+1), np.log10(m_q1_RPN3)[:nIter+1], color='limegreen', label = r'\textbf{RPN - MLP - TS}')
ax.fill_between(N+q1*np.arange(nIter+1), lower_q1_RPN3[:nIter+1], upper_q1_RPN3[:nIter+1], facecolor='limegreen', alpha=0.3)
ax.plot(N+q1*np.arange(nIter+1), np.log10(m_q1_RPN4), '-.', color='black', label = r'\textbf{RPN - DON - LCB}')
ax.fill_between(N+q1*np.arange(nIter+1), lower_q1_RPN4, upper_q1_RPN4, facecolor='red', alpha=0.3)
ax.plot(N+q1*np.arange(nIter+1), np.log10(m_q1_RPN5), '-.', color='blue', label = r'\textbf{RPN - DON - EI}')
ax.fill_between(N+q1*np.arange(nIter+1), lower_q1_RPN5, upper_q1_RPN5, facecolor='blue', alpha=0.3)
ax.plot(N+q1*np.arange(nIter+1), np.log10(m_q1_RPN6), '-.', color='limegreen', label = r'\textbf{RPN - DON - TS}')
ax.fill_between(N+q1*np.arange(nIter+1), lower_q1_RPN6, upper_q1_RPN6, facecolor='limegreen', alpha=0.3)
plt.xticks(np.arange(N, N+nIter+1, N))
plt.xlim([5,35])
ax.grid(color='Grey', linestyle='-', linewidth=0.5)
sample_means = dat["env_means"]
sample_stds = dat["env_stds"]
keys = dat["env_keys"]
key_dict = {"rnd": r'\textbf{Random}', "rnd_cf": r'\textbf{Random-CF}', "ei": r'\textbf{EI}', "ei_cf": r'\textbf{EI-CF}', \
"ei_hogp_cf": r'\textbf{EI-HOGP-CF}', "ei_hogp_cf_smooth": r'\textbf{EI-HOGP-CF + GP}'}
steps = torch.linspace(5, 35, 30)
for i, key in enumerate(keys):
ax.fill_between(steps,
sample_means[i] - sample_stds[i] / 20**0.5,
sample_means[i] + sample_stds[i] / 20**0.5,
alpha = 0.1)
ax.plot(steps, sample_means[i], '--', linewidth=3, label = key_dict[key])
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', frameon=False, fontsize = 36, bbox_to_anchor=(0.98, 0.5))
plt.xlabel(r'\textbf{Function Evaluations}')
plt.ylabel(r'\textbf{Log10(Regret)}')
plt.savefig('figures/environmental_model_function.png',dpi=300,bbox_inches='tight')
################################################
################ brusselator ###################
################################################
if problem == 'brusselator':
N = 5
nIter = 20
q1 = 1
q2 = 2
nIter_q1 = nIter//q1
nIter_q2 = nIter//q2
nIter_q1_DON = 30
nTrSet = 10
nTrSet_DON = 30
dispersion_scale = 0.2
case = 'results/brusselator_pde_MLP_LCB'
case_EI = 'results/brusselator_pde_MLP_EI'
case_q2 = 'results/brusselator_pde_MLP_EI_q_2'
case_LCB_q2 = 'results/brusselator_pde_MLP_LCB_q_2'
case2 = 'results/brusselator_pde_DON_TS'
case3 = 'results/brusselator_pde_DON_LW_LCB'
case4 = 'results/brusselator_pde_DON_LCB'
opt_q1_RPN = []
opt_q1_RPN_EI = []
opt_q2_RPN = []
opt_LCB_q2_RPN = []
opt_q1_RPN2 = []
opt_q1_RPN3 = []
opt_q1_RPN4 = []
for j in range(nTrSet):
opt = np.load(case+'/opt_'+str(j)+'.npy')
opt_q1_RPN.append(np.array(opt))
opt = np.load(case_q2+'/opt_'+str(j)+'.npy')
opt_q2_RPN.append(np.array(opt))
opt = np.load(case_EI+'/opt_'+str(j)+'.npy')
opt_q1_RPN_EI.append(np.array(opt))
opt = np.load(case_LCB_q2+'/opt_'+str(j)+'.npy')
opt_LCB_q2_RPN.append(np.array(opt))
opt_q1_RPN = np.array(opt_q1_RPN)
opt_q2_RPN = np.array(opt_q2_RPN)
opt_q1_RPN_EI = np.array(opt_q1_RPN_EI)
opt_LCB_q2_RPN = np.array(opt_LCB_q2_RPN)
for j in range(nTrSet):
opt = np.load(case2+'/opt_'+str(j)+'.npy')
opt_q1_RPN2.append(np.array(opt))
opt = np.load(case3+'/opt_'+str(j)+'.npy')
opt_q1_RPN3.append(np.array(opt))
opt = np.load(case4+'/opt_'+str(j)+'.npy')
opt_q1_RPN4.append(np.array(opt))
m_q1_RPN, std_q1_RPN = np.median(opt_q1_RPN, axis = 0), stats.median_abs_deviation(opt_q1_RPN, axis = 0)
m_q1_RPN2, std_q1_RPN2 = np.median(opt_q1_RPN2, axis = 0), stats.median_abs_deviation(opt_q1_RPN2, axis = 0)
m_q1_RPN3, std_q1_RPN3 = np.median(opt_q1_RPN3, axis = 0), stats.median_abs_deviation(opt_q1_RPN3, axis = 0)
m_q1_RPN4, std_q1_RPN4 = np.median(opt_q1_RPN4, axis = 0), stats.median_abs_deviation(opt_q1_RPN4, axis = 0)
m_q2_RPN, std_q2_RPN = np.median(opt_q2_RPN, axis = 0), stats.median_abs_deviation(opt_q2_RPN, axis = 0)
m_q1_RPN_EI, std_q1_RPN_EI = np.median(opt_q1_RPN_EI, axis = 0), stats.median_abs_deviation(opt_q1_RPN_EI, axis = 0)
m_LCB_q2_RPN, std_LCB_q2_RPN = np.median(opt_LCB_q2_RPN, axis = 0), stats.median_abs_deviation(opt_LCB_q2_RPN, axis = 0)
lower_q1_RPN = np.log10(np.clip(m_q1_RPN - dispersion_scale*std_q1_RPN, a_min=0., a_max = np.inf) + 1e-8)
upper_q1_RPN = np.log10(m_q1_RPN + dispersion_scale*std_q1_RPN + 1e-8)
lower_q1_RPN2 = np.log10(np.clip(m_q1_RPN2 - dispersion_scale*std_q1_RPN2, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN2 = np.log10(m_q1_RPN2 + dispersion_scale*std_q1_RPN2 + 1e-10)
lower_q1_RPN3 = np.log10(np.clip(m_q1_RPN3 - dispersion_scale*std_q1_RPN3, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN3 = np.log10(m_q1_RPN3 + dispersion_scale*std_q1_RPN3 + 1e-10)
lower_q1_RPN4 = np.log10(np.clip(m_q1_RPN4 - dispersion_scale*std_q1_RPN4, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN4 = np.log10(m_q1_RPN4 + dispersion_scale*std_q1_RPN4 + 1e-10)
lower_q2_RPN = np.log10(np.clip(m_q2_RPN - dispersion_scale*std_q2_RPN, a_min=0., a_max = np.inf) + 1e-8)
upper_q2_RPN = np.log10(m_q2_RPN + dispersion_scale*std_q2_RPN + 1e-8)
lower_q1_RPN_EI = np.log10(np.clip(m_q1_RPN_EI - dispersion_scale*std_q1_RPN_EI, a_min=0., a_max = np.inf) + 1e-8)
upper_q1_RPN_EI = np.log10(m_q1_RPN_EI + dispersion_scale*std_q1_RPN_EI + 1e-8)
lower_LCB_q2_RPN = np.log10(np.clip(m_LCB_q2_RPN - dispersion_scale*std_LCB_q2_RPN, a_min=0., a_max = np.inf) + 1e-8)
upper_LCB_q2_RPN = np.log10(m_LCB_q2_RPN + dispersion_scale*std_LCB_q2_RPN + 1e-8)
fig = plt.figure(figsize=(21, 9))
ax = plt.subplot(111)
ax.plot(N+q1*np.arange(nIter_q1+1), np.log10(m_q1_RPN), color='black', label = r'\textbf{RPN - MLP - LCB}')
ax.fill_between(N+q1*np.arange(nIter_q1+1), lower_q1_RPN, upper_q1_RPN, facecolor='black', alpha=0.3)
ax.plot(N+q2*np.arange(nIter_q2+1), np.log10(m_LCB_q2_RPN), color='slategrey', label = r'\textbf{RPN - MLP - LCB, q=2}')
ax.fill_between(N+q2*np.arange(nIter_q2+1), lower_LCB_q2_RPN, upper_LCB_q2_RPN, facecolor='slategrey', alpha=0.3)
ax.plot(N+q1*np.arange(nIter_q1+1), np.log10(m_q1_RPN_EI), color='blue', label = r'\textbf{RPN - MLP - EI}')
ax.fill_between(N+q1*np.arange(nIter_q1+1), lower_q1_RPN_EI, upper_q1_RPN_EI, facecolor='blue', alpha=0.3)
ax.plot(N+q2*np.arange(nIter_q2+1), np.log10(m_q2_RPN), color='lightskyblue', label = r'\textbf{RPN - MLP - EI, q=2}')
ax.fill_between(N+q2*np.arange(nIter_q2+1), lower_q2_RPN, upper_q2_RPN, facecolor='lightskyblue', alpha=0.3)
ax.plot(N+q1*np.arange(nIter_q1_DON+1), np.log10(m_q1_RPN4), '-.', color='black', label = r'\textbf{RPN - DON - LCB}')
ax.fill_between(N+q1*np.arange(nIter_q1_DON+1), lower_q1_RPN4, upper_q1_RPN4, facecolor='black', alpha=0.3)
ax.plot(N+q1*np.arange(nIter_q1_DON+1), np.log10(m_q1_RPN3), '-.', color='hotpink', label = r'\textbf{RPN - DON - LCB-LW}')
ax.fill_between(N+q1*np.arange(nIter_q1_DON+1), lower_q1_RPN3, upper_q1_RPN3, facecolor='hotpink', alpha=0.3)
ax.plot(N+q1*np.arange(nIter_q1_DON+1), np.log10(m_q1_RPN2), '-.', color='limegreen', label = r'\textbf{RPN - DON - TS}')
ax.fill_between(N+q1*np.arange(nIter_q1_DON+1), lower_q1_RPN2, upper_q1_RPN2, facecolor='limegreen', alpha=0.3)
plt.xticks(np.arange(N, N+nIter_q1_DON+1, N))
plt.xlim([5,35])
ax.grid(color='Grey', linestyle='-', linewidth=0.5)
sample_means = dat["pde_means"]
sample_stds = dat["pde_stds"]
keys = dat["pde_keys"]
key_dict = {"rnd": r'\textbf{Random}', "rnd_cf": r'\textbf{Random-CF}', "ei": r'\textbf{EI}', "ei_cf": r'\textbf{EI-CF}', \
"ei_hogp_cf": r'\textbf{EI-HOGP-CF}', "ei_hogp_cf_smooth": r'\textbf{EI-HOGP-CF + GP}'}
steps = torch.linspace(5, 35, 30)
for i, key in enumerate(keys):
ax.fill_between(steps,
sample_means[i] - sample_stds[i] / 20**0.5,
sample_means[i] + sample_stds[i] / 20**0.5,
alpha = 0.1)
ax.plot(steps, sample_means[i], '--', linewidth=3, label = key_dict[key])
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', frameon=False, fontsize = 36, bbox_to_anchor=(0.98, 0.5))
plt.xlabel(r'\textbf{Function Evaluations}')
plt.ylabel(r'\textbf{Log10(Variance)}')
plt.savefig('figures/brusselator_pde.png', dpi=300,bbox_inches='tight')
################################################
############ optical_interferometer ############
################################################
if problem == 'optical_interferometer':
N = 15
nIter = 85
q1 = 1
nIter_q1 = nIter//q1
nTrSet = 5
dispersion_scale = 0.2
case = 'results/optical_interferometer_MLP_LCB'
case2 = 'results/optical_interferometer_MLP_EI'
case3 = 'results/optical_interferometer_MLP_TS'
case4 = 'results/optical_interferometer_DON_EI'
opt_q1_RPN = []
opt_q1_RPN2 = []
opt_q1_RPN3 = []
opt_q1_RPN4 = []
for j in range(nTrSet):
opt = -np.load(case+'/'+'opt_'+str(j)+'.npy')[:nIter_q1+1]
opt2 = -np.load(case2+'/'+'opt_'+str(j)+'.npy')[:nIter_q1+1]
opt3 = -np.load(case3+'/'+'opt_'+str(j)+'.npy')[:nIter_q1+1]
opt4 = -np.load(case4+'/'+'opt_'+str(j)+'.npy')[:nIter_q1+1]
opt_q1_RPN.append(np.array(opt))
opt_q1_RPN2.append(np.array(opt2))
opt_q1_RPN3.append(np.array(opt3))
opt_q1_RPN4.append(np.array(opt4))
opt_q1_RPN = np.array(opt_q1_RPN)
opt_q1_RPN2 = np.array(opt_q1_RPN2)
opt_q1_RPN3 = np.array(opt_q1_RPN3)
opt_q1_RPN4 = np.array(opt_q1_RPN4)
m_q1_RPN, std_q1_RPN = np.median(opt_q1_RPN, axis = 0), stats.median_abs_deviation(opt_q1_RPN, axis = 0)
m_q1_RPN2, std_q1_RPN2 = np.median(opt_q1_RPN2, axis = 0), stats.median_abs_deviation(opt_q1_RPN2, axis = 0)
m_q1_RPN3, std_q1_RPN3 = np.median(opt_q1_RPN3, axis = 0), stats.median_abs_deviation(opt_q1_RPN3, axis = 0)
m_q1_RPN4, std_q1_RPN4 = np.median(opt_q1_RPN4, axis = 0), stats.median_abs_deviation(opt_q1_RPN4, axis = 0)
lower_q1_RPN = np.clip(m_q1_RPN - dispersion_scale*std_q1_RPN, a_min=0., a_max = np.inf) + 1e-8
upper_q1_RPN = m_q1_RPN + dispersion_scale*std_q1_RPN + 1e-8
lower_q1_RPN2 = np.clip(m_q1_RPN2 - dispersion_scale*std_q1_RPN2, a_min=0., a_max = np.inf) + 1e-8
upper_q1_RPN2 = m_q1_RPN2 + dispersion_scale*std_q1_RPN2 + 1e-8
lower_q1_RPN3 = np.clip(m_q1_RPN3 - dispersion_scale*std_q1_RPN3, a_min=0., a_max = np.inf) + 1e-8
upper_q1_RPN3 = m_q1_RPN3 + dispersion_scale*std_q1_RPN3 + 1e-8
lower_q1_RPN4 = np.clip(m_q1_RPN4 - dispersion_scale*std_q1_RPN4, a_min=0., a_max = np.inf) + 1e-8
upper_q1_RPN4 = m_q1_RPN4 + dispersion_scale*std_q1_RPN4 + 1e-8
fig = plt.figure(figsize=(21, 9))
ax = plt.subplot(111)
ax.plot(N+q1*np.arange(nIter_q1+1), m_q1_RPN, color='black', label = r'\textbf{RPN - MLP - LCB}')
ax.fill_between(N+q1*np.arange(nIter_q1+1), lower_q1_RPN, upper_q1_RPN, facecolor='black', alpha=0.3)
ax.plot(N+q1*np.arange(nIter_q1+1), m_q1_RPN2, color='blue', label = r'\textbf{RPN - MLP - EI}')
ax.fill_between(N+q1*np.arange(nIter_q1+1), lower_q1_RPN2, upper_q1_RPN2, facecolor='blue', alpha=0.3)
ax.plot(N+q1*np.arange(nIter_q1+1), m_q1_RPN3, color='limegreen', label = r'\textbf{RPN - MLP - TS}')
ax.fill_between(N+q1*np.arange(nIter_q1+1), lower_q1_RPN3, upper_q1_RPN3, facecolor='limegreen', alpha=0.3)
ax.plot(N+q1*np.arange(nIter_q1+1), m_q1_RPN4, '-.', color='blue', label = r'\textbf{RPN - DON - EI}')
ax.fill_between(N+q1*np.arange(nIter_q1+1), lower_q1_RPN4, upper_q1_RPN4, facecolor='blue', alpha=0.3)
ax.grid(color='Grey', linestyle='-', linewidth=0.5)
plt.xlim([15,100])
sample_means = dat["optics_means"]
sample_stds = dat["optics_stds"]
keys = dat["optics_keys"]
key_dict = {"rnd": r'\textbf{Random}', "rnd_cf": r'\textbf{Random-CF}', "ei": r'\textbf{EI}', "ei_cf": r'\textbf{EI-CF}', \
"ei_hogp_cf": r'\textbf{EI-HOGP-CF}', "ei_hogp_cf_smooth": r'\textbf{EI-HOGP-CF + GP}'}
steps = torch.linspace(0, 100, 100)
for i, key in enumerate(keys):
plt.fill_between(steps,
sample_means[i] - sample_stds[i] / 45**0.5,
sample_means[i] + sample_stds[i] / 45**0.5,
alpha = 0.1)
plt.plot(steps, sample_means[i], '--', linewidth=3, label = key_dict[key])
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', frameon=False, fontsize = 36, bbox_to_anchor=(0.98, 0.5))
plt.xlabel(r'\textbf{Function Evaluations}')
plt.ylabel(r'\textbf{Visibility (V)}')
plt.savefig('figures/optical_interferometer.png', dpi=300,bbox_inches='tight')
################################################
############### comp_blades_shape ###############
################################################
if problem == 'comp_blades_shape':
dispersion_scale = 0.2
x_MFGP = np.load('results/compressor_blades_shape_MLP/x_MFGP.npy')
mean_MFGP = np.load('results/compressor_blades_shape_MLP/mean_MFGP.npy')
std_MFGP = np.load('results/compressor_blades_shape_MLP/std_MFGP.npy')
x_SFGP = np.load('results/compressor_blades_shape_MLP/x_SFGP.npy')
mean_SFGP = np.load('results/compressor_blades_shape_MLP/mean_SFGP.npy')
std_SFGP = np.load('results/compressor_blades_shape_MLP/std_SFGP.npy')
x_MFRPN = np.load('results/compressor_blades_shape_MLP/x_MFRPN.npy')
mean_MFRPN = np.load('results/compressor_blades_shape_MLP/mean_MFRPN.npy')
std_MFRPN = np.load('results/compressor_blades_shape_MLP/std_MFRPN.npy')
lower_SFGP = mean_SFGP - dispersion_scale*std_SFGP
upper_SFGP = mean_SFGP + dispersion_scale*std_SFGP
lower_MFGP = mean_MFGP - dispersion_scale*std_MFGP
upper_MFGP = mean_MFGP + dispersion_scale*std_MFGP
lower_MFRPN = mean_MFRPN - dispersion_scale*std_MFRPN
upper_MFRPN = mean_MFRPN + dispersion_scale*std_MFRPN
plt.figure(figsize = (16, 9), facecolor = "w")
plt.plot(x_MFRPN,mean_MFRPN, linewidth=3, color='black', label = r'\textbf{MF - RPN - LCBC}')
plt.fill_between(x_MFRPN, lower_MFRPN, upper_MFRPN, facecolor='black', alpha=0.3)
plt.plot(x_SFGP,mean_SFGP, '--', linewidth=3, color='magenta', label = r'\textbf{SF - GP - LCBC}')
plt.fill_between(x_SFGP, lower_SFGP, upper_SFGP, facecolor='magenta', alpha=0.3)
plt.plot(x_MFGP,mean_MFGP, '--', linewidth=3, color='orange', label = r'\textbf{MF - GP - LCBC}')
plt.fill_between(x_MFGP, lower_MFGP, upper_MFGP, facecolor='orange', alpha=0.3)
plt.xlim([0,200])
plt.grid(color='Grey', linestyle='-', linewidth=0.5)
plt.legend(fontsize = 28)
plt.xlabel(r'\textbf{Cost (unit of CFD HF evaluations) for aqcuired points}')
plt.ylabel(r'\textbf{Optimization objective}')
plt.show()
plt.savefig('figures/comp_blades_shape.png',dpi=300,bbox_inches='tight')
| 20,736 | 52.862338 | 128 |
py
|
rpn_bo
|
rpn_bo-main/Code and results/rpn_bo_dataloaders.py
|
from jax import vmap, random, jit
from jax import numpy as np
from functools import partial
from torch.utils import data
class BootstrapLoader(data.Dataset):
def __init__(self, X, y, batch_size=128, ensemble_size=32, fraction=0.5, is_Gauss=1, LF_pred=None, rng_key=random.PRNGKey(1234)):
'Initialization'
self.N = X.shape[0]
self.batch_size = batch_size
self.ensemble_size = ensemble_size
self.bootstrap_size = int(self.N*fraction)
self.is_Gauss = is_Gauss
self.key = rng_key
# Create the bootstrapped partitions
keys = random.split(rng_key, ensemble_size)
if LF_pred is None:
self.X, self.y = vmap(self.__bootstrap, (None,None,0))(X, y, keys)
else:
self.X, self.y = vmap(self.__bootstrapMF, (None,None,0,0))(X, y, LF_pred, keys)
# Each bootstrapped data-set has its own normalization constants
self.norm_const = vmap(self.normalization_constants, in_axes=(0,0))(self.X, self.y)
@partial(jit, static_argnums=(0,))
def normalization_constants(self, X, y):
if self.is_Gauss == 1:
mu_X, sigma_X = X.mean(0), X.std(0)
mu_y, sigma_y = y.mean(0), y.std(0)
else:
mu_X, sigma_X = np.zeros(X.shape[1],), np.ones(X.shape[1],)
mu_y = np.zeros(y.shape[1],)
sigma_y = np.max(np.abs(y)) * np.ones(y.shape[1],)
return (mu_X, sigma_X), (mu_y, sigma_y)
@partial(jit, static_argnums=(0,))
def __bootstrap(self, X, y, key):
idx = random.choice(key, self.N, (self.bootstrap_size,), replace=False)
inputs = X[idx,:]
targets = y[idx,:]
return inputs, targets
@partial(jit, static_argnums=(0,))
def __bootstrapMF(self, X, y, yLH, key):
idx = random.choice(key, self.N, (self.bootstrap_size,), replace=False)
inputs = np.concatenate([X[idx,:], yLH[idx,:]], axis=1)
targets = y[idx,:]
return inputs, targets
@partial(jit, static_argnums=(0,))
def __data_generation(self, key, X, y, norm_const):
'Generates data containing batch_size samples'
(mu_X, sigma_X), (mu_y, sigma_y) = norm_const
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
X = X[idx,:]
y = y[idx,:]
X = (X - mu_X)/sigma_X
y = (y - mu_y)/sigma_y
return X, y
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
keys = random.split(self.key, self.ensemble_size)
inputs, targets = vmap(self.__data_generation, (0,0,0,0))(keys,
self.X,
self.y,
self.norm_const)
return inputs, targets
class DataGenerator_batch(data.Dataset):
def __init__(self, usol, u0_train, s1, t1, P1 = 100, P2 = 100,
batch_size=64, batch_size_all=512, N_ensemble = 10, rng_key=random.PRNGKey(1234), y=None):
'Initialization'
self.usol = usol
self.u0_train = u0_train
self.N_train_realizations = usol.shape[0]
self.P1 = P1
self.P2 = P2
self.dim = usol.shape[-1]
u_samples_reshape = usol.reshape(self.N_train_realizations, P1*P2, self.dim) # realizations x (mxp) x dim
self.norms = vmap(np.linalg.norm, (0, None, None))(u_samples_reshape, np.inf, 0) # realizations x dim
T, X = np.meshgrid(t1, s1)
if y == None:
self.y = np.hstack([T.flatten()[:,None], X.flatten()[:,None]])
else:
self.y = y
self.batch_size = batch_size
self.batch_size_all = batch_size_all
self.N_ensemble = N_ensemble
self.key = rng_key
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
v_subkey = random.split(subkey, self.N_train_realizations)
u_temp, y_temp, s_temp, w_temp = self.__get_realizations(v_subkey)
self.key, subkey = random.split(self.key)
v_subkey = random.split(subkey, self.N_ensemble)
inputs, outputs = vmap(self.__data_generation, (0, None, None, None, None))(v_subkey, u_temp, y_temp, s_temp, w_temp)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key, u_temp, y_temp, s_temp, w_temp):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N_train_realizations * self.batch_size, (self.batch_size_all,), replace=False)
u = u_temp[idx,:]
y = y_temp[idx,:]
s = s_temp[idx,:]
w = w_temp[idx,:]
# Construct batch
inputs = (u, y)
outputs = (s, w)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __get_realizations(self, key):
idx_train = np.arange(self.N_train_realizations)
u_temp, y_temp, s_temp, w_temp = vmap(self.__generate_one_realization_data, (0, 0, None, None, None))(key, idx_train, self.usol, self.u0_train, self.norms)
u_temp = np.float32(u_temp.reshape(self.N_train_realizations * self.batch_size,-1))
y_temp = np.float32(y_temp.reshape(self.N_train_realizations * self.batch_size,-1))
s_temp = np.float32(s_temp.reshape(self.N_train_realizations * self.batch_size,-1))
w_temp = np.float32(w_temp.reshape(self.N_train_realizations * self.batch_size,-1))
return u_temp, y_temp, s_temp, w_temp
def __generate_one_realization_data(self, key, idx, usol, u0_train, norms):
u = usol[idx]
u0 = u0_train[idx]
ww = norms[idx]
s = np.swapaxes(u, 0, 1)
s = s.reshape(self.P1*self.P2, self.dim)
u = np.tile(u0, (self.batch_size, 1))
w = np.tile(ww, (self.batch_size, 1)) # for dim > 1, otherwise, w = np.tile(ww, (self.batch_size))
idx_keep = random.choice(key, s.shape[0], (self.batch_size,), replace=False)
return u, self.y[idx_keep,:], s[idx_keep], w
| 6,289 | 42.680556 | 163 |
py
|
mcfit
|
mcfit-master/setup.py
|
from setuptools import setup
def find_version(path):
with open(path, 'r') as fp:
file = fp.read()
import re
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
file, re.M)
if match:
return match.group(1)
raise RuntimeError("Version not found")
setup(
name = 'mcfit',
version = find_version("mcfit/__init__.py"),
description = 'Multiplicatively convolutional fast integral transforms',
url = 'https://github.com/eelregit/mcfit',
author = 'Yin Li',
author_email = '[email protected]',
license = 'GPLv3',
keywords = 'numerical integral transform FFTLog cosmology',
packages = ['mcfit', 'mcfit.tests'],
install_requires = ['numpy', 'scipy', 'mpmath'],
)
| 762 | 29.52 | 76 |
py
|
mcfit
|
mcfit-master/mcfit/kernels.py
|
from numpy import arange, exp, log, ndim, pi, sqrt
from scipy.special import gamma
try:
from scipy.special import loggamma
except ImportError:
def loggamma(x):
return log(gamma(x))
def _deriv(MK, deriv):
"""Real deriv is wrt :math:`t`, complex deriv is wrt :math:`\ln t`"""
if deriv == 0:
return MK
if isinstance(deriv, complex):
def MKderiv(z):
return (-z) ** deriv.imag * MK(z)
return MKderiv
def MKderiv(z):
poly = arange(deriv) + 1
poly = poly - z if ndim(z) == 0 else poly - z.reshape(-1, 1)
poly = poly.prod(axis=-1)
return poly * MK(z - deriv)
return MKderiv
def Mellin_BesselJ(nu, deriv=0):
def MK(z):
return exp(log(2)*(z-1) + loggamma(0.5*(nu+z)) - loggamma(0.5*(2+nu-z)))
return _deriv(MK, deriv)
def Mellin_SphericalBesselJ(nu, deriv=0):
def MK(z):
return exp(log(2)*(z-1.5) + loggamma(0.5*(nu+z)) - loggamma(0.5*(3+nu-z)))
return _deriv(MK, deriv)
def Mellin_FourierSine(deriv=0):
def MK(z):
return exp(log(2)*(z-0.5) + loggamma(0.5*(1+z)) - loggamma(0.5*(2-z)))
return _deriv(MK, deriv)
def Mellin_FourierCosine(deriv=0):
def MK(z):
return exp(log(2)*(z-0.5) + loggamma(0.5*z) - loggamma(0.5*(1-z)))
return _deriv(MK, deriv)
def Mellin_DoubleBesselJ(alpha, nu1, nu2):
import mpmath
from numpy import frompyfunc
hyp2f1 = frompyfunc(lambda *a: complex(mpmath.hyp2f1(*a)), 4, 1)
if 0 < alpha < 1:
def MK(z):
return exp(log(2)*(z-1) + log(alpha)*nu2 + loggamma(0.5*(nu1+nu2+z))
- loggamma(0.5*(2+nu1-nu2-z)) - loggamma(1+nu2)) \
* hyp2f1(0.5*(-nu1+nu2+z), 0.5*(nu1+nu2+z), 1+nu2, alpha**2)
elif alpha > 1:
def MK(z):
return exp(log(2)*(z-1) + log(alpha)*(-nu1-z) + loggamma(0.5*(nu1+nu2+z))
- loggamma(0.5*(2-nu1+nu2-z)) - loggamma(1+nu1)) \
* hyp2f1(0.5*(nu1-nu2+z), 0.5*(nu1+nu2+z), 1+nu1, alpha**-2)
elif alpha == 1:
def MK(z):
return exp(log(2)*(z-1) + loggamma(1-z) + loggamma(0.5*(nu1+nu2+z))
- loggamma(0.5*(2+nu1-nu2-z))- loggamma(0.5*(2-nu1+nu2-z))
- loggamma(0.5*(2+nu1+nu2-z)))
else:
raise ValueError
return MK
def Mellin_DoubleSphericalBesselJ(alpha, nu1, nu2):
import mpmath
from numpy import frompyfunc
hyp2f1 = frompyfunc(lambda *a: complex(mpmath.hyp2f1(*a)), 4, 1)
if 0 < alpha < 1:
def MK(z):
return pi * exp(log(2)*(z-3) + log(alpha)*nu2 + loggamma(0.5*(nu1+nu2+z))
- loggamma(0.5*(3+nu1-nu2-z)) - loggamma(1.5+nu2)) \
* hyp2f1(0.5*(-1-nu1+nu2+z), 0.5*(nu1+nu2+z), 1.5+nu2, alpha**2)
elif alpha > 1:
def MK(z):
return pi * exp(log(2)*(z-3) + log(alpha)*(-nu1-z) + loggamma(0.5*(nu1+nu2+z))
- loggamma(0.5*(3-nu1+nu2-z)) - loggamma(1.5+nu1)) \
* hyp2f1(0.5*(-1+nu1-nu2+z), 0.5*(nu1+nu2+z), 1.5+nu1, alpha**-2)
elif alpha == 1:
def MK(z):
return pi * exp(log(2)*(z-3) + loggamma(2-z) + loggamma(0.5*(nu1+nu2+z))
- loggamma(0.5*(3+nu1-nu2-z))- loggamma(0.5*(3-nu1+nu2-z))
- loggamma(0.5*(4+nu1+nu2-z)))
else:
raise ValueError
return MK
def Mellin_Tophat(dim, deriv=0):
def MK(z):
return exp(log(2)*(z-1) + loggamma(1+0.5*dim) + loggamma(0.5*z) \
- loggamma(0.5*(2+dim-z)))
return _deriv(MK, deriv)
def Mellin_TophatSq(dim, deriv=0):
if dim == 1:
def MK(z):
return -0.25*sqrt(pi) * exp(loggamma(0.5*(z-2)) - loggamma(0.5*(3-z)))
elif dim == 3:
def MK(z):
return 2.25*sqrt(pi)*(z-2)/(z-6) \
* exp(loggamma(0.5*(z-4)) - loggamma(0.5*(5-z)))
else:
def MK(z):
return exp(log(2)*(dim-1) + 2*loggamma(1+0.5*dim) \
+ loggamma(0.5*(1+dim-z)) + loggamma(0.5*z) \
- loggamma(1+dim-0.5*z) - loggamma(0.5*(2+dim-z))) / sqrt(pi)
return _deriv(MK, deriv)
def Mellin_Gauss(deriv=0):
def MK(z):
return 2**(0.5*z-1) * gamma(0.5*z)
return _deriv(MK, deriv)
def Mellin_GaussSq(deriv=0):
def MK(z):
return 0.5 * gamma(0.5*z)
return _deriv(MK, deriv)
| 4,440 | 35.401639 | 90 |
py
|
mcfit
|
mcfit-master/mcfit/cosmology.py
|
"""Cosmology applications"""
from mcfit.mcfit import mcfit
from mcfit import kernels
from numpy import pi
__all__ = ['P2xi', 'xi2P', 'TophatVar', 'GaussVar']
class P2xi(mcfit):
"""Power spectrum to correlation function.
Parameters
----------
k : see `x` in :class:`mcfit.mcfit`
l : int
order
n : int
to generalize correlation function with extra power law factor
:math:`k^n` in the integrand. If not None, the phase factor is ignored.
The tilt parameter `q` is automatically adjusted (to `q+n`) based on
the provided value
See :class:`mcfit.mcfit`
"""
def __init__(self, k, l=0, n=None, deriv=0, q=1.5, **kwargs):
self.l = l
MK = kernels.Mellin_SphericalBesselJ(l, deriv)
if n is None:
phase = (-1 if l & 2 else 1) * (1j if l & 1 else 1) # i^l
n = 0
else:
phase = 1
mcfit.__init__(self, k, MK, q+n, **kwargs)
self.prefac *= self.x**(3+n) / (2*pi)**1.5
self.postfac *= phase
class xi2P(mcfit):
"""Correlation function to power spectrum, also radial profile to its
Fourier transform.
Parameters
----------
r : see `x` in :class:`mcfit.mcfit`
l : int
order
See :class:`mcfit.mcfit`
"""
def __init__(self, r, l=0, deriv=0, q=1.5, **kwargs):
self.l = l
MK = kernels.Mellin_SphericalBesselJ(l, deriv)
mcfit.__init__(self, r, MK, q, **kwargs)
self.prefac *= self.x**3
phase = (-1 if l & 2 else 1) * (1j if l & 1 else 1) # i^l
self.postfac *= (2*pi)**1.5 / phase
class TophatVar(mcfit):
r"""Variance in a top-hat window.
Parameters
----------
k : see `x` in :class:`mcfit.mcfit`
Examples
--------
To compute :math:`\sigma_8` of a linear power spectrum :math:`P(k)`
>>> R, var = TophatVar(k, lowring=True)(P, extrap=True)
>>> from scipy.interpolate import CubicSpline
>>> varR = CubicSpline(R, var)
>>> sigma8 = numpy.sqrt(varR(8))
See :class:`mcfit.mcfit`
"""
def __init__(self, k, deriv=0, q=1.5, **kwargs):
MK = kernels.Mellin_TophatSq(3, deriv)
mcfit.__init__(self, k, MK, q, **kwargs)
self.prefac *= self.x**3 / (2 * pi**2)
class GaussVar(mcfit):
"""Variance in a Gaussian window.
Parameters
----------
k : see `x` in :class:`mcfit.mcfit`
See :class:`mcfit.mcfit`
"""
def __init__(self, k, deriv=0, q=1.5, **kwargs):
MK = kernels.Mellin_GaussSq(deriv)
mcfit.__init__(self, k, MK, q, **kwargs)
self.prefac *= self.x**3 / (2 * pi**2)
| 2,644 | 25.717172 | 79 |
py
|
mcfit
|
mcfit-master/mcfit/__init__.py
|
"""Multiplicatively Convolutional Fast Integral Transform"""
from mcfit.mcfit import mcfit
from mcfit.transforms import *
from mcfit.cosmology import *
__version__ = "0.0.18"
| 177 | 21.25 | 60 |
py
|
mcfit
|
mcfit-master/mcfit/transforms.py
|
"""Common Integral transforms and applications"""
from mcfit.mcfit import mcfit
from mcfit import kernels
from numpy import pi
from scipy.special import gamma
__all__ = ['Hankel', 'SphericalBessel', 'DoubleBessel', 'DoubleSphericalBessel',
'FourierSine', 'FourierCosine', 'TophatSmooth', 'GaussSmooth']
class Hankel(mcfit):
"""Hankel transform pair.
Parameters
----------
nu : float
order
See :class:`mcfit.mcfit`
"""
def __init__(self, x, nu=0, deriv=0, q=1, **kwargs):
self.nu = nu
MK = kernels.Mellin_BesselJ(nu, deriv)
mcfit.__init__(self, x, MK, q, **kwargs)
self.prefac *= self.x**2
class SphericalBessel(mcfit):
"""Spherical Bessel transform pair.
Parameters
----------
nu : int
order
See :class:`mcfit.mcfit`
"""
def __init__(self, x, nu=0, deriv=0, q=1.5, **kwargs):
self.nu = nu
MK = kernels.Mellin_SphericalBesselJ(nu, deriv)
mcfit.__init__(self, x, MK, q, **kwargs)
self.prefac *= self.x**3
class FourierSine(mcfit):
"""Fourier sine transform pair.
See :class:`mcfit.mcfit`
"""
def __init__(self, x, deriv=0, q=0.5, **kwargs):
MK = kernels.Mellin_FourierSine(deriv)
mcfit.__init__(self, x, MK, q, **kwargs)
self.prefac *= self.x
class FourierCosine(mcfit):
"""Fourier cosine transform pair.
See :class:`mcfit.mcfit`
"""
def __init__(self, x, deriv=0, q=0.5, **kwargs):
MK = kernels.Mellin_FourierCosine(deriv)
mcfit.__init__(self, x, MK, q, **kwargs)
self.prefac *= self.x
class DoubleBessel(mcfit):
r"""Compute integrals with two Bessel functions.
.. math:: G(y_1; \alpha) \equiv G(y_1, y_2=\alpha y_1)
= \int_0^\infty F(x) J_{\nu_1}(xy_1) J_{\nu_2}(xy_2) \,x\d x
Parameters
----------
alpha : float
y2 / y1
nu : float, optional
default is 0
nu1 : float, optional
default is nu
nu2 : float, optional
default is nu
See :class:`mcfit.mcfit`
"""
def __init__(self, x, alpha, nu=0, nu1=None, nu2=None, q=None, **kwargs):
self.alpha = alpha
if nu1 is None:
nu1 = nu
if nu2 is None:
nu2 = nu
self.nu1 = nu1
self.nu2 = nu2
MK = kernels.Mellin_DoubleBesselJ(alpha, nu1, nu2)
if q is None:
q = 1
if alpha == 1:
q = 0.5
mcfit.__init__(self, x, MK, q, **kwargs)
self.prefac *= self.x**2
class DoubleSphericalBessel(mcfit):
r"""Compute integrals with two spherical Bessel functions.
.. math:: G(y_1; \alpha) \equiv G(y_1, y_2=\alpha y_1)
= \int_0^\infty F(x) j_{\nu_1}(xy_1) j_{\nu_2}(xy_2) \,x^2\d x
Parameters
----------
alpha : float
y2 / y1
nu : float, optional
default is 0
nu1 : float, optional
default is nu
nu2 : float, optional
default is nu
See :class:`mcfit.mcfit`
"""
def __init__(self, x, alpha, nu=0, nu1=None, nu2=None, q=None, **kwargs):
self.alpha = alpha
if nu1 is None:
nu1 = nu
if nu2 is None:
nu2 = nu
self.nu1 = nu1
self.nu2 = nu2
MK = kernels.Mellin_DoubleSphericalBesselJ(alpha, nu1, nu2)
if q is None:
q = 2
if alpha == 1:
q = 1.5
mcfit.__init__(self, x, MK, q, **kwargs)
self.prefac *= self.x**3
class TophatSmooth(mcfit):
"""Top-hat smoothing of a radial function.
Parameters
----------
dim : int
dimension of the smoothing filter
See :class:`mcfit.mcfit`
"""
def __init__(self, x, dim=3, deriv=0, q=0, **kwargs):
self.dim = dim
MK = kernels.Mellin_Tophat(dim, deriv)
mcfit.__init__(self, x, MK, q, **kwargs)
self.prefac *= self.x**dim / (2**(dim-1) * pi**(dim/2) * gamma(dim/2))
class GaussSmooth(mcfit):
"""Gaussian smoothing of a radial function.
Parameters
----------
dim : int
dimension of the smoothing filter
See :class:`mcfit.mcfit`
"""
def __init__(self, x, dim=3, deriv=0, q=0, **kwargs):
self.dim = dim
MK = kernels.Mellin_Gauss(deriv)
mcfit.__init__(self, x, MK, q, **kwargs)
self.prefac *= self.x**dim / (2**(dim-1) * pi**(dim/2) * gamma(dim/2))
| 4,441 | 24.676301 | 80 |
py
|
mcfit
|
mcfit-master/mcfit/mcfit.py
|
import math
import cmath
import warnings
import numpy
try:
import jax
jax.config.update("jax_enable_x64", True)
except ModuleNotFoundError as e:
JAXNotFoundError = e
class mcfit(object):
r"""Compute integral transforms as a multiplicative convolution.
The generic form is
.. math:: G(y) = \int_0^\infty F(x) K(xy) \frac{dx}x
Here :math:`F(x)` is the input function, :math:`G(y)` is the output
function, and :math:`K(xy)` is the integral kernel.
One is free to scale all three functions by a power law
.. math:: g(y) = \int_0^\infty f(x) k(xy) \frac{dx}x
in which :math:`f(x) = x^{-q} F(x)`, :math:`g(y) = y^q G(y)`, and
:math:`k(t) = t^q K(t)`.
The tilt parameter :math:`q` shifts power of :math:`x` between the input
function and the kernel.
Parameters
----------
x : (Nin,) array_like
log-spaced input argument
MK : callable
Mellin transform of the kernel
.. math:: U_K(z) \equiv \int_0^\infty t^{z-1} K(t) dt
q : float
power-law tilt, can be used to balance :math:`f` at large and small
:math:`x`. Avoid the singularities in `MK`
N : int or complex, optional
size of convolution, if complex then replaced by the smallest power of
2 that is at least `N.imag` times the size of `x`; the input function
is padded symmetrically to this size before convolution (see the
`extrap` argument for available options); `N=len(x)` turns off the
padding
lowring : bool, optional
if True and `N` is even, set `y` according to the low-ringing
condition, otherwise see `xy`
xy : float, optional
reciprocal product :math:`x_{min} y_{max} = x_{max} y_{min}` to be used
when `lowring` is False or `N` is odd.
`xy = x[0] * y_max = x[1] * y[-1] = ... = x[i] * y[-i] = ... = x[-1] * y[1] = x_max * y[0]`.
Note that :math:`x_{max}` is not included in `x` but bigger than
`x.max()` by one log interval due to the discretization of the periodic
approximant, and likewise for :math:`y_{max}`
backend : str in {'numpy', 'jax'}, optional
Which backend to use.
Attributes
----------
Nin : int
input size, and that of the output if not `keeppads`
N : int
convolution size, and that of the output if `keeppads`
x : (Nin,) ndarray
input argument
y : (Nin,) ndarray
output argument
_x_ : (N,) ndarray
padded `x`
_y_ : (N,) ndarray
padded `y`
xy : float
reciprocal product
prefac : array_like
a function of `x` (excluding the tilt factor :math:`x^{-q}`) to
convert an integral to the normal form
postfac : array_like
a function of `y` (excluding the tilt factor :math:`y^{-q}`) to
convert an integral to the normal form
xfac : (Nin,) ndarray
a function of `x` (including the tilt factor :math:`x^{-q}`) to
multiply before the convolution
yfac : (Nin,) ndarray
a function of `y` (including the tilt factor :math:`y^{-q}`) to
multiply after the convolution
_xfac_ : (N,) ndarray
padded `_xfac_`
_yfac_ : (N,) ndarray
padded `_yfac_`
Methods
-------
__call__
matrix
Examples
--------
>>> x = numpy.logspace(-3, 3, num=60, endpoint=False)
>>> A = 1 / (1 + x*x)**1.5
>>> H = mcfit.mcfit(x, mcfit.kernels.Mellin_BesselJ(0), q=1, lowring=True)
>>> y, B = H(x**2 * A, extrap=True)
>>> numpy.allclose(B, numpy.exp(-y))
More conveniently, use the Hankel transform subclass
>>> y, B = mcfit.transforms.Hankel(x, lowring=True)(A, extrap=True)
Notes
-----
Caveats about q
References
----------
.. [1] J. D. Talman. Numerical Fourier and Bessel Transforms in Logarithmic Variables.
Journal of Computational Physics, 29:35-48, October 1978.
.. [2] A. J. S. Hamilton. Uncorrelated modes of the non-linear power spectrum.
MNRAS, 312:257-284, February 2000.
"""
def __init__(self, x, MK, q, N=2j, lowring=False, xy=1, backend='numpy'):
if backend == 'numpy':
self.np = numpy
#self.jit = lambda fun: fun # TODO maybe use Numba?
elif backend == 'jax':
try:
self.np = jax.numpy
#self.jit = jax.jit # TODO maybe leave it to the user? jax.jit for CPU too
except NameError:
raise JAXNotFoundError
else:
raise ValueError(f"backend {backend} not supported")
#self.__call__ = self.jit(self.__call__)
#self.matrix = self.jit(self.matrix)
self.x = self.np.asarray(x)
self.Nin = len(x)
self.MK = MK
self.q = q
self.N = N
self.lowring = lowring
self.xy = xy
self._setup()
self.prefac = 1
self.postfac = 1
@property
def prefac(self):
return self._prefac
@prefac.setter
def prefac(self, value):
self._prefac = value
self.xfac = self._prefac * self.x**(-self.q)
self._xfac_ = self._pad(self.xfac, 0, True, False)
@property
def postfac(self):
return self._postfac
@postfac.setter
def postfac(self, value):
self._postfac = value
self.yfac = self._postfac * self.y**(-self.q)
self._yfac_ = self._pad(self.yfac, 0, True, True)
def _setup(self):
if self.Nin < 2:
raise ValueError(f"input size {self.Nin} must not be smaller than 2")
Delta = math.log(self.x[-1] / self.x[0]) / (self.Nin - 1)
x_head = self.x[:8]
if not self.np.allclose(self.np.log(x_head[1:] / x_head[:-1]), Delta,
rtol=1e-3):
warnings.warn("input must be log-spaced")
if isinstance(self.N, complex):
folds = math.ceil(math.log2(self.Nin * self.N.imag))
self.N = 2**folds
if self.N < self.Nin:
raise ValueError(f"convolution size {self.N} must not be smaller than "
f"the input size {self.Nin}")
if self.lowring and self.N % 2 == 0:
lnxy = Delta / math.pi * cmath.phase(self.MK(self.q + 1j * math.pi / Delta))
self.xy = math.exp(lnxy)
else:
lnxy = math.log(self.xy)
self.y = math.exp(lnxy - Delta) / self.x[::-1]
self._x_ = self._pad(self.x, 0, True, False)
self._y_ = self._pad(self.y, 0, True, True)
m = numpy.arange(0, self.N//2 + 1)
self._u = self.MK(self.q + 2j * math.pi / self.N / Delta * m)
self._u *= numpy.exp(-2j * math.pi * lnxy / self.N / Delta * m)
self._u = self.np.asarray(self._u, dtype=(self.x[0] + 0j).dtype)
# following is unnecessary because hfft ignores the imag at Nyquist anyway
#if not self.lowring and self.N % 2 == 0:
# self._u[self.N//2] = self._u[self.N//2].real
def __call__(self, F, axis=-1, extrap=False, keeppads=False, convonly=False):
"""Evaluate the integral.
Parameters
----------
F : (..., Nin, ...) or (..., N, ...) array_like
input function; to be padded according to `extrap` in size from
`Nin` to `N`, but not if already of size `N`
axis : int, optional
axis along which to integrate
extrap : {bool, 'const'} or 2-tuple, optional
Method to extrapolate `F`.
For a 2-tuple, the two elements are for the left and right pads,
whereas a single value applies to both ends.
Options are:
* True: power-law extrapolation using the end segment
* False: zero padding
* 'const': constant padding with the end point value
keeppads : bool, optional
whether to keep the padding in the output
convonly : bool, optional
whether to skip the scaling by `_xfac_` and `_yfac_`, useful for
evaluating integral with multiple kernels
Returns
-------
y : (Nin,) or (N,) ndarray
log-spaced output argument
G : (..., Nin, ...) or (..., N, ...) ndarray
output function
"""
F = self.np.asarray(F)
to_axis = [1] * F.ndim
to_axis[axis] = -1
f = self._pad(F, axis, extrap, False)
if not convonly:
f = self._xfac_.reshape(to_axis) * f
# convolution
f = self.np.fft.rfft(f, axis=axis) # f(x_n) -> f_m
g = f * self._u.reshape(to_axis) # f_m -> g_m
g = self.np.fft.hfft(g, n=self.N, axis=axis) / self.N # g_m -> g(y_n)
if not keeppads:
G = self._unpad(g, axis, True)
if not convonly:
G = self.yfac.reshape(to_axis) * G
return self.y, G
else:
_G_ = g
if not convonly:
_G_ = self._yfac_.reshape(to_axis) * _G_
return self._y_, _G_
def inv(self):
"""Invert the transform.
After calling this method, calling the instance will do the inverse
transform. Calling this twice return the instance to the original
transform.
"""
self.x, self.y = self.y, self.x
self._x_, self._y_ = self._y_, self._x_
self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac
self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_
self._u = 1 / self._u.conj()
def matrix(self, full=False, keeppads=True):
"""Return matrix form of the integral transform.
Parameters
----------
full : bool, optional
when False return two vector factors and convolution matrix
separately, otherwise return full transformation matrix
keeppads : bool, optional
whether to keep the padding in the output
Returns
-------
If full is False, output separately
a : (1, N) or (1, Nin) ndarray
"After" factor, `_yfac_` or `yfac`
b : (N,) or (Nin,) ndarray
"Before" factor, `_xfac_` or `xfac`
C : (N, N) or (Nin, Nin) ndarray
Convolution matrix, circulant
Otherwise, output the full matrix, combining `a`, `b`, and `C`
M : (N, N) or (Nin, Nin) ndarray
Full transformation matrix, `M = a * C * b`
Notes
-----
`M`, `a`, `b`, and `C` are padded by default.
This is not meant for evaluation with matrix multiplication but in case
one is interested in the tranformation itself.
When `N` is even and `lowring` is False, :math:`C C^{-1}` and :math:`M
M^{-1}` can deviate from the identity matrix because the imaginary part
of the Nyquist modes are dropped.
The convolution matrix is a circulant matrix, with its first row and
first column being the Fourier transform of :math:`u_m`.
Indeed :math:`u_m` are the eigenvalues of the convolution matrix, that
are diagonalized by the DFT matrix.
Thus :math:`1/u_m` are the eigenvalues of the inverse convolution
matrix.
"""
v = self.np.fft.hfft(self._u, n=self.N) / self.N
idx = sum(self.np.ogrid[0:self.N, -self.N:0])
C = v[idx] # follow scipy.linalg.{circulant,toeplitz,hankel}
if keeppads:
a = self._yfac_.copy()
b = self._xfac_.copy()
else:
a = self.yfac.copy()
b = self.xfac.copy()
C = self._unpad(C, 0, True)
C = self._unpad(C, 1, False)
a = a.reshape(-1, 1)
if not full:
return a, b, C
else:
return a * C * b
def _pad(self, a, axis, extrap, out):
"""Add padding to an array.
Parameters
----------
a : (..., Nin, ...) or (..., N, ...) ndarray
array to be padded, but not if already of size `N`
axis : int
axis along which to pad
extrap : {bool, 'const'} or 2-tuple
Method to extrapolate `a`.
For a 2-tuple, the two elements are for the left and right pads,
whereas a single value applies to both ends.
Options are:
* True: power-law extrapolation using the end segment
* False: zero padding
* 'const': constant padding with the end point value
out : bool
pad the output if True, otherwise the input; the two cases have
their left and right pad sizes reversed
"""
if a.shape[axis] == self.N:
return a
elif a.shape[axis] != self.Nin:
raise ValueError("array size must be that of the input or the convolution")
axis %= a.ndim # to fix the indexing below with axis+1
to_axis = [1] * a.ndim
to_axis[axis] = -1
Npad = self.N - self.Nin
if out:
_Npad, Npad_ = Npad - Npad//2, Npad//2
else:
_Npad, Npad_ = Npad//2, Npad - Npad//2
try:
_extrap, extrap_ = extrap
except (TypeError, ValueError):
_extrap = extrap_ = extrap
if isinstance(_extrap, bool):
if _extrap:
end = self.np.take(a, self.np.array([0]), axis=axis)
ratio = self.np.take(a, self.np.array([1]), axis=axis) / end
exp = self.np.arange(-_Npad, 0).reshape(to_axis)
_a = end * ratio ** exp
else:
_a = self.np.zeros(a.shape[:axis] + (_Npad,) + a.shape[axis+1:])
elif _extrap == 'const':
end = self.np.take(a, self.np.array([0]), axis=axis)
_a = self.np.repeat(end, _Npad, axis=axis)
else:
raise ValueError(f"left extrap {_extrap} not supported")
if isinstance(extrap_, bool):
if extrap_:
end = self.np.take(a, self.np.array([-1]), axis=axis)
ratio = end / self.np.take(a, self.np.array([-2]), axis=axis)
exp = self.np.arange(1, Npad_ + 1).reshape(to_axis)
a_ = end * ratio ** exp
else:
a_ = self.np.zeros(a.shape[:axis] + (Npad_,) + a.shape[axis+1:])
elif extrap_ == 'const':
end = self.np.take(a, self.np.array([-1]), axis=axis)
a_ = self.np.repeat(end, Npad_, axis=axis)
else:
raise ValueError(f"right extrap {extrap_} not supported")
return self.np.concatenate((_a, a, a_), axis=axis)
def _unpad(self, a, axis, out):
"""Undo padding in an array.
Parameters
----------
a : (..., N, ...) or (..., Nin, ...) ndarray
array to be unpadded, but not if already of size `Nin`
axis : int
axis along which to unpad
out : bool
unpad the output if True, otherwise the input; the two cases have
their left and right pad sizes reversed
"""
if a.shape[axis] == self.Nin:
return a
elif a.shape[axis] != self.N:
raise ValueError("array size must be that of the input or the convolution")
Npad = self.N - self.Nin
if out:
_Npad, Npad_ = Npad - Npad//2, Npad//2
else:
_Npad, Npad_ = Npad//2, Npad - Npad//2
return self.np.take(a, self.np.arange(_Npad, self.N - Npad_), axis=axis)
| 15,495 | 34.541284 | 100 |
py
|
mcfit
|
mcfit-master/mcfit/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
mcfit
|
mcfit-master/mcfit/tests/test_transforms.py
|
import numpy as np
from numpy.testing import assert_allclose
from mcfit.transforms import *
def test_Hankel():
def F_fun(x): return 1 / (1 + x*x)**1.5
def G_fun(y): return np.exp(-y)
x = np.logspace(-3, 3, num=60, endpoint=False)
F = F_fun(x)
H = Hankel(x, nu=0, q=1, N=128, lowring=True)
y, G = H(F, extrap=True)
assert_allclose(G, G_fun(y), rtol=1e-8, atol=1e-8)
# NOTE the range for best accuracy does not exactly "match"
y = np.logspace(-4, 2, num=60, endpoint=False)
G = G_fun(y)
H_inv = Hankel(y, nu=0, q=1, N=128, lowring=True)
x, F = H_inv(G, extrap=True)
assert_allclose(F, F_fun(x), rtol=1e-10, atol=1e-10)
| 672 | 31.047619 | 63 |
py
|
mcfit
|
mcfit-master/mcfit/tests/test_mcfit.py
|
import numpy as np
from numpy.testing import assert_allclose
from mcfit.transforms import *
def test_matrix():
N = 81
x = np.logspace(-3, 3, num=N, endpoint=False)
F = 1 / (1 + x*x)**1.5
H1 = Hankel(x, nu=0, q=1, N=N)
y, G = H1(F)
a1, b1, C1 = H1.matrix(full=False)
M1 = H1.matrix(full=True)
assert_allclose(a1.ravel() * (C1 @ (b1 * F)), G, rtol=1e-10, atol=0)
assert_allclose(M1 @ F, G, rtol=1e-10, atol=0)
H2 = Hankel(y, nu=0, q=1, N=N)
a2, b2, C2 = H2.matrix(full=False)
M2 = H2.matrix(full=True)
assert_allclose(C1 @ C2, np.eye(N), rtol=0, atol=1e-14)
assert_allclose(C2 @ C1, np.eye(N), rtol=0, atol=1e-14)
assert_allclose(M1 @ M2, np.eye(N), rtol=0, atol=1e-9)
assert_allclose(M2 @ M1, np.eye(N), rtol=0, atol=1e-9)
def test_pad():
# _x_, _y_, extrap pad, and unpad
x = np.logspace(-3, 3, num=6, endpoint=False)
_x_ = np.logspace(-6, 6, num=13, endpoint=True)
y = np.logspace(-3, 3, num=6, endpoint=False)
_y_ = np.logspace(-7, 5, num=13, endpoint=True)
H = Hankel(x, N=13, xy=1)
assert_allclose(H._x_, _x_)
assert_allclose(H._y_, _y_)
assert_allclose(H._pad(x, 0, True, False), _x_)
assert_allclose(H._pad(y, 0, True, True), _y_)
assert_allclose(H._unpad(_x_, 0, False), x)
assert_allclose(H._unpad(_y_, 0, True), y)
# zero pad and axis
a = b = np.ones((6, 6))
_a_ = np.zeros((13, 6))
_a_[3:9, :] = 1
_b_ = np.zeros((6, 13))
_b_[:, 4:10] = 1
assert_allclose(H._pad(a, 0, False, False), _a_)
assert_allclose(H._pad(b, 1, False, True), _b_)
| 1,606 | 26.706897 | 72 |
py
|
FCtL
|
FCtL-main/train_deep_globe.py
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import torch
import torch.nn as nn
from torchvision import transforms
from tqdm import tqdm
from dataset.deep_globe import DeepGlobe, classToRGB, is_image_file
from utils.loss import FocalLoss
from utils.lr_scheduler import LR_Scheduler
from tensorboardX import SummaryWriter
from helper import create_model_load_weights, get_optimizer, Trainer, Evaluator, collate, collate_test
from option import Options
args = Options().parse()
dataset = args.dataset
if dataset == 1:
pass
elif dataset == 2:
args.n_class = 2
args.data_path = "./data_1/"
args.model_path = "./saved_models_1/"
args.log_path = "./runs_1/"
n_class = args.n_class #2
print("n_class:",n_class)
torch.backends.cudnn.deterministic = True
data_path = args.data_path #data
model_path = args.model_path #saved_models
log_path = args.log_path #log
if not os.path.isdir(model_path): os.mkdir(model_path)
if not os.path.isdir(log_path): os.mkdir(log_path)
print("data_path:",data_path , "model_path:",model_path, "log_path",log_path)
task_name = args.task_name
print("task_name:",task_name)
mode = args.mode
train = args.train
val = args.val
print("mode:",mode, "train:",train, "val:",val)
###################################
print("preparing datasets and dataloaders......")
batch_size = args.batch_size
num_worker = 0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
ids_train = [image_name for image_name in os.listdir(os.path.join(data_path, "train", "Sat")) if is_image_file(image_name)]
ids_test = [image_name for image_name in os.listdir(os.path.join(data_path, "offical_crossvali", "Sat")) if is_image_file(image_name)]
ids_val = [image_name for image_name in os.listdir(os.path.join(data_path, "crossvali", "Sat")) if is_image_file(image_name)]
dataset_train = DeepGlobe(dataset, os.path.join(data_path, "train"), ids_train, label=True, transform=True)
dataloader_train = torch.utils.data.DataLoader(dataset=dataset_train, batch_size=batch_size, num_workers=num_worker, collate_fn=collate, shuffle=True, pin_memory=True)
dataset_test = DeepGlobe(dataset, os.path.join(data_path, "offical_crossvali"), ids_test, label=False)
dataloader_test = torch.utils.data.DataLoader(dataset=dataset_test, batch_size=batch_size, num_workers=num_worker, collate_fn=collate_test, shuffle=False, pin_memory=True)
dataset_val = DeepGlobe(dataset, os.path.join(data_path, "crossvali"), ids_val, label=True)
dataloader_val = torch.utils.data.DataLoader(dataset=dataset_val, batch_size=batch_size, num_workers=num_worker, collate_fn=collate, shuffle=False, pin_memory=True)
print('train_len:',len(ids_train))
print('test_len:',len(ids_test))
print('val_len:',len(ids_val))
##### sizes are (w, h) ##############################
# make sure margin / 32 is over 1.5 AND size_g is divisible by 4
size_p = (args.size_p, args.size_p) # cropped local patch size 508
size_g = (args.size_g, args.size_g) # resize global image size 508
sub_batch_size = args.sub_batch_size # batch size for train local patches 6
###################################
print("creating models......")
pre_path = os.path.join(model_path, args.pre_path)
glo_path_10 = os.path.join(model_path, args.glo_path_10)
glo_path_15 = os.path.join(model_path, args.glo_path_15)
print("pre_path:", pre_path, "medium_path:", glo_path_10, "large_path:", glo_path_15)
model, global_fixed_medium, global_fixed_large = create_model_load_weights(n_class, pre_path, glo_path_10, glo_path_15, mode)
###################################
num_epochs = args.num_epochs #50
lens = args.lens
start = args.start
learning_rate = args.lr #5e-05
context10 = args.context10
context15 = args.context15
optimizer = get_optimizer(model, learning_rate)
scheduler = LR_Scheduler('poly', learning_rate, num_epochs, len(dataloader_train))
##################################
criterion1 = FocalLoss(gamma=3)
criterion = lambda x,y: criterion1(x, y)
if val:
writer = SummaryWriter(log_dir=log_path + task_name)
f_log = open(log_path + task_name + ".log", 'w')
trainer = Trainer(criterion, optimizer, n_class, size_p, size_g, sub_batch_size, mode, dataset, context10, context15)
evaluator = Evaluator(n_class, size_p, size_g, sub_batch_size, mode, train, dataset, context10, context15)
best_pred = 0.0
print("start training......")
for epoch in range(start, start + lens):
if not train:
break
trainer.set_train(model)
optimizer.zero_grad()
tbar = tqdm(dataloader_train); train_loss = 0
for i_batch, sample_batched in enumerate(tbar):
scheduler(optimizer, i_batch, epoch, best_pred) #update lr
loss = trainer.train(sample_batched, model, global_fixed_medium, global_fixed_large)
train_loss += loss.item()
score_train = trainer.get_scores()
tbar.set_description('epoch:%d Train loss: %.3f; mIoU: %.3f' % (epoch, train_loss / (i_batch + 1),
np.mean(np.nan_to_num(score_train["iou"][1:]))))
writer.add_scalar('train_loss', loss, epoch * len(dataloader_train) + i_batch)
writer.add_scalar('train_miou', np.mean(np.nan_to_num(score_train["iou"][1:])), epoch * len(dataloader_train) + i_batch)
score_train = trainer.get_scores()
trainer.reset_metrics()
# torch.cuda.empty_cache()
if (epoch+1) % 5 == 0:
with torch.no_grad():
print("evaling...")
model.eval()
tbar = tqdm(dataloader_val)
for i_batch, sample_batched in enumerate(tbar):
predictions = evaluator.eval_test(sample_batched, model, global_fixed_medium, global_fixed_large)
score_val = evaluator.get_scores()
# use [1:] since class0 is not considered in deep_globe metric
tbar.set_description('mIoU: %.3f' % (np.mean(np.nan_to_num(score_val["iou"])[1:])))
images = sample_batched['image']
labels = sample_batched['label'] # PIL images
if i_batch * batch_size + len(images) > (epoch % len(dataloader_val)) and i_batch * batch_size <= (epoch % len(dataloader_val)):
writer.add_image('image', transforms.ToTensor()(images[(epoch % len(dataloader_val)) - i_batch * batch_size]), epoch)
writer.add_image('mask', classToRGB(dataset, np.array(labels[(epoch % len(dataloader_val)) - i_batch * batch_size])) , epoch)
writer.add_image('prediction', classToRGB(dataset, predictions[(epoch % len(dataloader_val)) - i_batch * batch_size]), epoch)
torch.save(model.state_dict(), model_path + task_name + ".epoch" + str(epoch) + ".pth")
score_val = evaluator.get_scores()
evaluator.reset_metrics()
if np.mean(np.nan_to_num(score_val["iou"][1:])) > best_pred: best_pred = np.mean(np.nan_to_num(score_val["iou"][1:]))
log = ""
log = log + 'epoch [{}/{}] IoU: train = {:.4f}, val = {:.4f}'.format(epoch+1, num_epochs, np.mean(np.nan_to_num(score_train["iou"][1:])), np.mean(np.nan_to_num(score_val["iou"][1:]))) + "\n"
log = log + "train: " + str(score_train["iou"]) + "\n"
log = log + "val:" + str(score_val["iou"]) + "\n"
log += "================================\n"
print(log)
f_log.write(log)
f_log.flush()
writer.add_scalars('IoU', {'train iou': np.mean(np.nan_to_num(score_train["iou"][1:])), 'validation iou': np.mean(np.nan_to_num(score_val["iou"][1:]))}, epoch)
if val: f_log.close()
if not train:
with torch.no_grad():
print("testing...")
model.eval()
tbar = tqdm(dataloader_test)
for i_batch, sample_batched in enumerate(tbar):
predictions = evaluator.eval_test(sample_batched, model, global_fixed_medium, global_fixed_large)
images = sample_batched['image']
if not os.path.isdir("./prediction/"): os.mkdir("./prediction/")
for i in range(len(images)):
transforms.functional.to_pil_image(classToRGB(dataset, predictions[i])).save("./prediction/" + sample_batched['id'][i] + "_mask.png")
| 8,219 | 46.514451 | 202 |
py
|
FCtL
|
FCtL-main/helper.py
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from utils.metrics import ConfusionMatrix
from PIL import Image, ImageOps
from models.fcn import FCN8, MiniFCN8
# torch.cuda.synchronize()
# torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
transformer = transforms.Compose([
transforms.ToTensor(),
])
def resize(images, shape, label=False):
'''
resize PIL images
shape: (w, h)
'''
resized = list(images)
for i in range(len(images)):
if label:
resized[i] = images[i].resize(shape, Image.NEAREST)
else:
resized[i] = images[i].resize(shape, Image.BILINEAR)
return resized
def _mask_transform(mask):
target = np.array(mask).astype('int32')
return target
def masks_transform(masks, numpy=False):
'''
masks: list of PIL images
'''
targets = []
for m in masks:
targets.append(_mask_transform(m))
targets = np.array(targets)
if numpy:
return targets
else:
return torch.from_numpy(targets).long().cuda()
def images_transform(images):
'''
images: list of PIL images
'''
inputs = []
for img in images:
inputs.append(transformer(img))
inputs = torch.stack(inputs, dim=0).cuda()
return inputs
def get_patch_info(shape, p_size):
'''
shape: origin image size, (x, y)
p_size: patch size (square)
return: n_x, n_y, step_x, step_y
'''
x = shape[0]
y = shape[1]
n = m = 1
while x > n * p_size:
n += 1
while p_size - 1.0 * (x - p_size) / (n - 1) < 50:
n += 1
while y > m * p_size:
m += 1
while p_size - 1.0 * (y - p_size) / (m - 1) < 50:
m += 1
return n, m, (x - p_size) * 1.0 / (n - 1), (y - p_size) * 1.0 / (m - 1)
def global2patch(images, p_size):
'''
image/label => patches
p_size: patch size
return: list of PIL patch images; coordinates: images->patches; ratios: (h, w)
'''
patches = []; coordinates = []; templates = []; sizes = []; ratios = [(0, 0)] * len(images); patch_ones = np.ones(p_size)
for i in range(len(images)):
w, h = images[i].size
size = (h, w)
sizes.append(size)
ratios[i] = (float(p_size[0]) / size[0], float(p_size[1]) / size[1])
template = np.zeros(size)
n_x, n_y, step_x, step_y = get_patch_info(size, p_size[0])
patches.append([images[i]] * (n_x * n_y))
coordinates.append([(0, 0)] * (n_x * n_y))
for x in range(n_x):
if x < n_x - 1: top = int(np.round(x * step_x))
else: top = size[0] - p_size[0]
for y in range(n_y):
if y < n_y - 1: left = int(np.round(y * step_y))
else: left = size[1] - p_size[1]
template[top:top+p_size[0], left:left+p_size[1]] += patch_ones
coordinates[i][x * n_y + y] = (1.0 * top / size[0], 1.0 * left / size[1])
patches[i][x * n_y + y] = transforms.functional.crop(images[i], top, left, p_size[0], p_size[1]) #508 508
templates.append(Variable(torch.Tensor(template).expand(1, 1, -1, -1)).cuda())
return patches, coordinates, templates, sizes, ratios
def global2bigpatch(images, p_size, mul=2):
if mul == 1.5:
sz = int(p_size[0]/4)
elif mul == 2:
sz = int(p_size[0]/2)
elif mul == 2.5:
sz = int(p_size[0]*3/4)
elif mul == 3:
sz = int(p_size[0])
elif mul == 4:
sz = int(p_size[0]*3/2)
patches = []; coordinates = []; templates = []; sizes = []; ratios = [(0, 0)] * len(images); patch_ones = np.ones(p_size)
for i in range(len(images)):
w, h = images[i].size
big = ImageOps.expand(images[i],(sz, sz, sz, sz),fill='black')
size = (h, w)
n_x, n_y, step_x, step_y = get_patch_info(size, p_size[0])
patches.append([big] * (n_x * n_y))
for x in range(n_x):
if x < n_x - 1: top = int(np.round(x * step_x))
else: top = size[0] - p_size[0]
for y in range(n_y):
if y < n_y - 1: left = int(np.round(y * step_y))
else: left = size[1] - p_size[1]
patches[i][x * n_y + y] = transforms.functional.crop(big, top, left, int(p_size[0]*mul), int(p_size[1]*mul)).resize(p_size, Image.BILINEAR) #508 508
return patches#, coordinates, templates, sizes, ratios
def patch2global(patches, n_class, sizes, coordinates, p_size):
'''
predicted patches (after classify layer) => predictions
return: list of np.array
'''
predictions = [ np.zeros((n_class, size[0], size[1])) for size in sizes ]
for i in range(len(sizes)):
for j in range(len(coordinates[i])):
top, left = coordinates[i][j]
top = int(np.round(top * sizes[i][0])); left = int(np.round(left * sizes[i][1]))
predictions[i][:, top: top + p_size[0], left: left + p_size[1]] += patches[i][j]
return predictions
def collate(batch):
image = [ b['image'] for b in batch ] # w, h
label = [ b['label'] for b in batch ]
id = [ b['id'] for b in batch ]
return {'image': image, 'label': label, 'id': id}
def collate_test(batch):
image = [ b['image'] for b in batch ] # w, h
id = [ b['id'] for b in batch ]
return {'image': image, 'id': id}
def create_model_load_weights(n_class, pre_path="", glo_path_10="", glo_path_15="", mode=1):
model = FCN8(n_class, mode)
model = nn.DataParallel(model)
model = model.cuda()
if pre_path != './saved_models_1/':
print('prepareing model...')
# load fixed basic global branch
partial = torch.load(pre_path)
state = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in partial.items() if k in state}
# 2. overwrite entries in the existing state dict
state.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(state)
global_fixed_10 = None
if mode == 2 or mode == 3:
print('prepareing global_10 model...')
global_fixed_10 = MiniFCN8(n_class)
global_fixed_10 = nn.DataParallel(global_fixed_10)
global_fixed_10 = global_fixed_10.cuda()
if glo_path_10 != './saved_models_1/':
partial = torch.load(glo_path_10)
state = global_fixed_10.state_dict()
pretrained_dict = {k: v for k, v in partial.items() if k in state}
state.update(pretrained_dict)
global_fixed_10.load_state_dict(state)
global_fixed_10.eval()
global_fixed_15 = None
if mode == 3:
print('prepareing global_15 model...')
global_fixed_15 = MiniFCN8(n_class)
global_fixed_15 = nn.DataParallel(global_fixed_15)
global_fixed_15 = global_fixed_15.cuda()
if glo_path_15 != './saved_models_1/':
partial = torch.load(glo_path_15)
state = global_fixed_15.state_dict()
pretrained_dict = {k: v for k, v in partial.items() if k in state}
state.update(pretrained_dict)
global_fixed_15.load_state_dict(state)
global_fixed_15.eval()
return model, global_fixed_10, global_fixed_15
def get_optimizer(model, learning_rate=2e-5):
optimizer = torch.optim.Adam([
{'params': model.module.parameters(), 'lr': learning_rate},
], weight_decay=5e-4)
return optimizer
class Trainer(object):
def __init__(self, criterion, optimizer, n_class, size_p, size_g, sub_batch_size=6, mode=1, dataset=1, context10=2, context15=3):
self.criterion = criterion
self.optimizer = optimizer
self.metrics = ConfusionMatrix(n_class)
self.n_class = n_class
self.size_p = size_p
self.size_g = size_g
self.sub_batch_size = sub_batch_size
self.mode = mode
self.context10 = context10
self.context15 = context15
def set_train(self, model):
model.module.train()
def get_scores(self):
score = self.metrics.get_scores()
return score
def reset_metrics(self):
self.metrics.reset()
def train(self, sample, model, global_fixed_10, global_fixed_15):
images, labels = sample['image'], sample['label'] # PIL images
labels_npy = masks_transform(labels, numpy=True) # label of origin size in numpy
patches, coordinates, templates, sizes, ratios = global2patch(images, self.size_p)
label_patches, _, _, _, _ = global2patch(labels, self.size_p)
predicted_patches = [ np.zeros((len(coordinates[i]), self.n_class, self.size_p[0], self.size_p[1])) for i in range(len(images)) ]
##################1 2 3
if self.mode != 1:
big_patches_10 = global2bigpatch(images, self.size_p, self.context10)
if self.mode == 3:
big_patches_15 = global2bigpatch(images, self.size_p, self.context15)
pool5_10, pool5_15 = None, None
# training with patches ###########################################
for i in range(len(images)):
j = 0
while j < len(coordinates[i]):
patches_var = images_transform(patches[i][j : j+self.sub_batch_size]) # b, c, h, w
label_patches_var = masks_transform(label_patches[i][j : j+self.sub_batch_size])
big_patches_10_var=None
if self.mode != 1:
big_patches_10_var = images_transform(big_patches_10[i][j : j+self.sub_batch_size])
if self.mode == 3:
with torch.no_grad():
pool5_10 = global_fixed_10.forward(big_patches_10_var)
big_patches_15_var = images_transform(big_patches_15[i][j : j+self.sub_batch_size])
pool5_15 = global_fixed_15.forward(big_patches_15_var)
if self.mode == 1 or self.mode == 2:
output_patches = model.forward(patches_var, y=big_patches_10_var)
else:
output_patches = model.forward(patches_var, pool5_10, pool5_15)
loss = self.criterion(output_patches, label_patches_var)
loss.backward()
# patch predictions
predicted_patches[i][j:j+output_patches.size()[0]] = F.interpolate(output_patches, size=self.size_p, mode='nearest').data.cpu().numpy()
j += self.sub_batch_size
self.optimizer.step()
self.optimizer.zero_grad()
####################################################################################
scores = np.array(patch2global(predicted_patches, self.n_class, sizes, coordinates, self.size_p)) # merge softmax scores from patches (overlaps)
predictions = scores.argmax(1) # b, h, w
self.metrics.update(labels_npy, predictions)
return loss
class Evaluator(object):
def __init__(self, n_class, size_p, size_g, sub_batch_size=6, mode=1, val=True, dataset=1, context10=2, context15=3):
self.metrics = ConfusionMatrix(n_class)
self.n_class = n_class
self.size_p = size_p
self.size_g = size_g
self.sub_batch_size = sub_batch_size
self.mode = mode
self.val = val
self.context10 = context10
self.context15 = context15
if not val:
self.flip_range = [False, True]
self.rotate_range = [0, 1, 2, 3]
else:
self.flip_range = [False]
self.rotate_range = [0]
def get_scores(self):
score = self.metrics.get_scores()
return score
def reset_metrics(self):
self.metrics.reset()
def eval_test(self, sample, model, global_fixed_10, global_fixed_15):
with torch.no_grad():
images = sample['image']
if self.val:
labels = sample['label'] # PIL images
labels_npy = masks_transform(labels, numpy=True)
images = [ image.copy() for image in images ]
scores = [ np.zeros((1, self.n_class, images[i].size[1], images[i].size[0])) for i in range(len(images)) ]
for flip in self.flip_range:
if flip:
# we already rotated images for 270'
for b in range(len(images)):
images[b] = transforms.functional.rotate(images[b], 90) # rotate back!
images[b] = transforms.functional.hflip(images[b])
for angle in self.rotate_range:
if angle > 0:
for b in range(len(images)):
images[b] = transforms.functional.rotate(images[b], 90)
# prepare global images onto cuda
patches, coordinates, templates, sizes, ratios = global2patch(images, self.size_p)
predicted_patches = [ np.zeros((len(coordinates[i]), self.n_class, self.size_p[0], self.size_p[1])) for i in range(len(images)) ]
if self.mode == 2 or self.mode == 3:
big_patches_10 = global2bigpatch(images, self.size_p, self.context10)
if self.mode == 3:
big_patches_15 = global2bigpatch(images, self.size_p, self.context15)
# eval with patches ###########################################
for i in range(len(images)):
j = 0
while j < len(coordinates[i]):
patches_var = images_transform(patches[i][j : j+self.sub_batch_size]) # b, c, h, w
big_patches_10_var = None
if self.mode == 2 or self.mode == 3:
big_patches_10_var = images_transform(big_patches_10[i][j : j+self.sub_batch_size])
if self.mode == 1 or self.mode == 2:
output_patches = model.forward(patches_var, y=big_patches_10_var)
else: ##3
pool5_10 = global_fixed_10.forward(big_patches_10_var)
big_patches_15_var = images_transform(big_patches_15[i][j : j+self.sub_batch_size])
pool5_15 = global_fixed_15.forward(big_patches_15_var)
output_patches = model.forward(patches_var, pool5_10, pool5_15)
# patch predictions
predicted_patches[i][j:j+output_patches.size()[0]] += F.interpolate(output_patches, size=self.size_p, mode='nearest').data.cpu().numpy()
j += patches_var.size()[0]
if flip:
scores[i] += np.flip(np.rot90(np.array(patch2global(predicted_patches[i:i+1], self.n_class, sizes[i:i+1], coordinates[i:i+1], self.size_p)), k=angle, axes=(3, 2)), axis=3) # merge softmax scores from patches (overlaps)
else:
scores[i] += np.rot90(np.array(patch2global(predicted_patches[i:i+1], self.n_class, sizes[i:i+1], coordinates[i:i+1], self.size_p)), k=angle, axes=(3, 2)) # merge softmax scores from patches (overlaps)
###############################################################
# patch predictions ###########################
predictions = [ score.argmax(1)[0] for score in scores ]
if self.val:
self.metrics.update(labels_npy, predictions)
###################################################
return predictions
| 16,004 | 42.140162 | 246 |
py
|
FCtL
|
FCtL-main/option.py
|
import os
import argparse
import torch
class Options():
def __init__(self):
parser = argparse.ArgumentParser(description='PyTorch Segmentation')
# model and dataset
parser.add_argument('--n_class', type=int, default=7, help='segmentation classes')
parser.add_argument('--data_path', type=str, help='path to dataset where images store')
parser.add_argument('--model_path', type=str, help='path to store trained model files, no need to include task specific name')
parser.add_argument('--log_path', type=str, help='path to store tensorboard log files, no need to include task specific name')
parser.add_argument('--task_name', type=str, help='task name for naming saved model files and log files')
parser.add_argument('--mode', type=int, default=1, choices=[1, 2, 3], help='mode for training procedure. 1.fcn 2.fcn+1 3.fcn+2')
parser.add_argument('--dataset', type=int, default=2, choices=[1, 2], help='dataset for training procedure. 1.deep 2.IA')
parser.add_argument('--train', action='store_true', default=False, help='train')
parser.add_argument('--val', action='store_true', default=False, help='val')
parser.add_argument('--context10', type=int, default=2, help='context10')
parser.add_argument('--context15', type=int, default=3, help='context15')
parser.add_argument('--pre_path', type=str, default="", help='name for pre model path')
parser.add_argument('--glo_path_10', type=str, default="", help='name for medium model path')
parser.add_argument('--glo_path_15', type=str, default="", help='name for large model path')
parser.add_argument('--batch_size', type=int, default=6, help='batch size for origin global image (without downsampling)')
parser.add_argument('--sub_batch_size', type=int, default=6, help='batch size for using local image patches')
parser.add_argument('--size_p', type=int, default=508, help='size (in pixel) for cropped local image')
parser.add_argument('--size_g', type=int, default=508, help='size (in pixel) for resized global image')
# the parser
self.parser = parser
def parse(self):
args = self.parser.parse_args()
args.num_epochs = 100
args.start = 50
args.lens = 50
args.lr = 5e-5
return args
| 2,367 | 61.315789 | 136 |
py
|
FCtL
|
FCtL-main/dataset/deep_globe.py
|
import os
import torch.utils.data as data
import numpy as np
from PIL import Image, ImageFile
import random
from torchvision.transforms import ToTensor
from torchvision import transforms
import cv2
ImageFile.LOAD_TRUNCATED_IMAGES = True
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg", "tif"])
def classToRGB(dataset, label):
l, w = label.shape[0], label.shape[1]
colmap = np.zeros(shape=(l, w, 3)).astype(np.float32)
if dataset == 1:
pass
else:
indices = np.where(label == 1)
colmap[indices[0].tolist(), indices[1].tolist(), :] = [255, 255, 255]
indices = np.where(label == 0)
colmap[indices[0].tolist(), indices[1].tolist(), :] = [0, 0, 0]
transform = ToTensor();
# plt.imshow(colmap)
# plt.show()
return transform(colmap)
def class_to_target(inputs, numClass):
batchSize, l, w = inputs.shape[0], inputs.shape[1], inputs.shape[2]
target = np.zeros(shape=(batchSize, l, w, numClass), dtype=np.float32)
for index in range(numClass):
indices = np.where(inputs == index)
temp = np.zeros(shape=numClass, dtype=np.float32)
temp[index] = 1
target[indices[0].tolist(), indices[1].tolist(), indices[2].tolist(), :] = temp
return target.transpose(0, 3, 1, 2)
def label_bluring(inputs):
batchSize, numClass, height, width = inputs.shape
outputs = np.ones((batchSize, numClass, height, width), dtype=np.float)
for batchCnt in range(batchSize):
for index in range(numClass):
outputs[batchCnt, index, ...] = cv2.GaussianBlur(inputs[batchCnt, index, ...].astype(np.float), (7, 7), 0)
return outputs
class DeepGlobe(data.Dataset):
"""input and label image dataset"""
def __init__(self, dataset, root, ids, label=False, transform=False):
super(DeepGlobe, self).__init__()
"""
Args:
fileDir(string): directory with all the input images.
transform(callable, optional): Optional transform to be applied on a sample
"""
self.dataset = dataset
self.root = root
self.label = label
self.transform = transform
self.ids = ids
self.color_jitter = transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.04)
def __getitem__(self, index):
sample = {}
sample['id'] = self.ids[index][:-8]
image = Image.open(os.path.join(self.root, "Sat/" + self.ids[index])) # w, h
sample['image'] = image
if self.label:
if self.dataset == 1 :
pass
else:
label = Image.open(os.path.join(self.root, 'Label/' + self.ids[index].replace('_sat.tif', '_mask.png')))
sample['label'] = label
if self.transform and self.label:
image, label = self._transform(image, label)
sample['image'] = image
sample['label'] = label
return sample
def _transform(self, image, label):
if np.random.random() > 0.5:
image = transforms.functional.hflip(image)
label = transforms.functional.hflip(label)
if np.random.random() > 0.5:
degree = random.choice([90, 180, 270])
image = transforms.functional.rotate(image, degree)
label = transforms.functional.rotate(label, degree)
return image, label
def __len__(self):
return len(self.ids)
| 3,527 | 32.6 | 120 |
py
|
FCtL
|
FCtL-main/dataset/__init__.py
| 0 | 0 | 0 |
py
|
|
FCtL
|
FCtL-main/models/base_model.py
|
import logging
import torch.nn as nn
import numpy as np
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def forward(self):
raise NotImplementedError
def summary(self):
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
nbr_params = sum([np.prod(p.size()) for p in model_parameters])
def __str__(self):
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
nbr_params = sum([np.prod(p.size()) for p in model_parameters])
return super(BaseModel, self).__str__()
#return summary(self, input_shape=(2, 3, 224, 224))
| 734 | 32.409091 | 79 |
py
|
FCtL
|
FCtL-main/models/fcn.py
|
from .base_model import BaseModel
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from .helpers import get_upsampling_weight
import torch
from itertools import chain
from .FCtL import FCtL
class MiniFCN8(BaseModel):
def __init__(self, num_classes, pretrained=True):
super(MiniFCN8, self).__init__()
self.vgg = models.vgg16(pretrained)
self.features = list(self.vgg.features.children())
self.classifier = list(self.vgg.classifier.children())
# Pad the input to enable small inputs and allow matching feature maps
self.features[0].padding = (100, 100)
# Enbale ceil in max pool, to avoid different sizes when upsampling
for layer in self.features:
if 'MaxPool' in layer.__class__.__name__:
layer.ceil_mode = True
self.big_pool3 = nn.Sequential(*self.features[:17])
self.big_pool4 = nn.Sequential(*self.features[17:24])
self.big_pool5 = nn.Sequential(*self.features[24:])
def forward(self, x):
pool3_2 = self.big_pool3(x)
pool4_2 = self.big_pool4(pool3_2)
pool5_2 = self.big_pool5(pool4_2)
return pool5_2
class FCN8(BaseModel):
def __init__(self, num_classes, mode=1, pretrained=True, freeze_bn=False, freeze_backbone=False):
super(FCN8, self).__init__()
self.mode = mode
self.vgg = models.vgg16(pretrained)
self.features = list(self.vgg.features.children())
self.classifier = list(self.vgg.classifier.children())
# Pad the input to enable small inputs and allow matching feature maps
self.features[0].padding = (100, 100)
# Enbale ceil in max pool, to avoid different sizes when upsampling
for layer in self.features:
if 'MaxPool' in layer.__class__.__name__:
layer.ceil_mode = True
# Extract pool3, pool4 and pool5 from the VGG net
self.pool3 = nn.Sequential(*self.features[:17])
self.pool4 = nn.Sequential(*self.features[17:24])
self.pool5 = nn.Sequential(*self.features[24:])
if self.mode == 2:
self.big_pool3 = nn.Sequential(*self.features[:17])
self.big_pool4 = nn.Sequential(*self.features[17:24])
self.big_pool5 = nn.Sequential(*self.features[24:])
if self.mode == 2 or self.mode == 3:
self.big_attention = FCtL(512, 512)
# Adjust the depth of pool3 and pool4 to num_classe
self.adj_pool3 = nn.Conv2d(256, num_classes, kernel_size=1)
self.adj_pool4 = nn.Conv2d(512, num_classes, kernel_size=1)
# Replace the FC layer of VGG with conv layers
conv6 = nn.Conv2d(512, 4096, kernel_size=7)
conv7 = nn.Conv2d(4096, 4096, kernel_size=1)
output = nn.Conv2d(4096, num_classes, kernel_size=1)
# Copy the weights from VGG's FC pretrained layers
conv6.weight.data.copy_(self.classifier[0].weight.data.view(
conv6.weight.data.size()))
conv6.bias.data.copy_(self.classifier[0].bias.data)
conv7.weight.data.copy_(self.classifier[3].weight.data.view(
conv7.weight.data.size()))
conv7.bias.data.copy_(self.classifier[3].bias.data)
# Get the outputs
self.output = nn.Sequential(conv6, nn.ReLU(inplace=True), nn.Dropout(),
conv7, nn.ReLU(inplace=True), nn.Dropout(),
output)
# We'll need three upsampling layers, upsampling (x2 +2) the ouputs
# upsampling (x2 +2) addition of pool4 and upsampled output
# upsampling (x8 +8) the final value (pool3 + added output and pool4)
self.up_output = nn.ConvTranspose2d(num_classes, num_classes,
kernel_size=4, stride=2, bias=False)
self.up_pool4_out = nn.ConvTranspose2d(num_classes, num_classes,
kernel_size=4, stride=2, bias=False)
self.up_final = nn.ConvTranspose2d(num_classes, num_classes,
kernel_size=16, stride=8, bias=False)
# We'll use guassian kernels for the upsampling weights
self.up_output.weight.data.copy_(
get_upsampling_weight(num_classes, num_classes, 4))
self.up_pool4_out.weight.data.copy_(
get_upsampling_weight(num_classes, num_classes, 4))
self.up_final.weight.data.copy_(
get_upsampling_weight(num_classes, num_classes, 16))
# We'll freeze the wights, this is a fixed upsampling and not deconv
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
m.weight.requires_grad = False
if freeze_bn: self.freeze_bn()
if freeze_backbone:
set_trainable([self.pool3, self.pool4, self.pool5], False)
def forward(self, x, pool5_10=None, pool5_15=None, y=None):
imh_H, img_W = x.size()[2], x.size()[3]
# Forward the image
pool3 = self.pool3(x)
pool4 = self.pool4(pool3)
pool5 = self.pool5(pool4)
if self.mode == 2:
pool3_10 = self.big_pool3(y)
pool4_10 = self.big_pool4(pool3_10)
pool5_10 = self.big_pool5(pool4_10)
if self.mode == 2 or self.mode == 3:
pool5 = self.big_attention(pool5, pool5_10, pool5_15)
output = self.output(pool5)
# Get the outputs and upsmaple them
up_output = self.up_output(output) #7*36*36
# Adjust pool4 and add the uped-outputs to pool4
adjstd_pool4 = self.adj_pool4(0.01 * pool4)
add_out_pool4 = self.up_pool4_out(adjstd_pool4[:, :, 5: (5 + up_output.size()[2]),
5: (5 + up_output.size()[3])]
+ up_output)
# Adjust pool3 and add it to the uped last addition
adjstd_pool3 = self.adj_pool3(0.0001 * pool3)
final_value = self.up_final(adjstd_pool3[:, :, 9: (9 + add_out_pool4.size()[2]), 9: (9 + add_out_pool4.size()[3])]
+ add_out_pool4)
# Remove the corresponding padded regions to the input img size
final_value = final_value[:, :, 31: (31 + imh_H), 31: (31 + img_W)].contiguous()
return final_value
def get_backbone_params(self):
return chain(self.pool3.parameters(), self.pool4.parameters(), self.pool5.parameters(), self.output.parameters())
def get_decoder_params(self):
return chain(self.up_output.parameters(), self.adj_pool4.parameters(), self.up_pool4_out.parameters(),
self.adj_pool3.parameters(), self.up_final.parameters())
def freeze_bn(self):
for module in self.modules():
if isinstance(module, nn.BatchNorm2d): module.eval()
| 6,910 | 43.301282 | 122 |
py
|
FCtL
|
FCtL-main/models/FCtL.py
|
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import init
import math
class _FCtL(nn.Module):
def __init__(self, inplanes, planes, lr_mult, weight_init_scale):
conv_nd = nn.Conv2d
bn_nd = nn.BatchNorm2d
super(_FCtL, self).__init__()
self.conv_value = conv_nd(inplanes, inplanes, kernel_size=1, bias=False)
self.conv_value_1 = conv_nd(inplanes, inplanes, kernel_size=1, bias=False)
self.conv_value_2 = conv_nd(inplanes, inplanes, kernel_size=1, bias=False)
self.conv_out = None
self.conv_query = conv_nd(inplanes, planes, kernel_size=1)
self.conv_key = conv_nd(inplanes, planes, kernel_size=1)
self.conv_query_1 = conv_nd(inplanes, planes, kernel_size=1)
self.conv_key_1 = conv_nd(inplanes, planes, kernel_size=1)
self.conv_query_2 = conv_nd(inplanes, planes, kernel_size=1)
self.conv_key_2 = conv_nd(inplanes, planes, kernel_size=1)
self.in_1 = conv_nd(512, 512, kernel_size=1)
self.in_2 = conv_nd(512, 512, kernel_size=1)
self.in_3 = conv_nd(512, 512, kernel_size=1)
self.trans = conv_nd(512*3, 512*3, kernel_size=1)
self.out_1 = conv_nd(512, 512, kernel_size=1)
self.out_2 = conv_nd(512, 512, kernel_size=1)
self.out_3 = conv_nd(512, 512, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
self.softmax_H = nn.Softmax(dim=0)
self.gamma = nn.Parameter(torch.zeros(1))
self.gamma_1 = nn.Parameter(torch.zeros(1))
self.gamma_2 = nn.Parameter(torch.zeros(1))
self.weight_init_scale = weight_init_scale
self.reset_parameters()
self.reset_lr_mult(lr_mult)
self.reset_weight_and_weight_decay()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv3d) or isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
init.zeros_(m.bias)
m.inited = True
def reset_lr_mult(self, lr_mult):
if lr_mult is not None:
for m in self.modules():
m.lr_mult = lr_mult
else:
print('not change lr_mult')
def reset_weight_and_weight_decay(self):
init.normal_(self.conv_query.weight, 0, 0.01*self.weight_init_scale)
init.normal_(self.conv_key.weight, 0, 0.01*self.weight_init_scale)
self.conv_query.weight.wd=0.0
self.conv_query.bias.wd=0.0
self.conv_key.weight.wd=0.0
self.conv_key.bias.wd=0.0
def forward(self, x, y=None, z=None):
residual = x
value = self.conv_value(y)
value = value.view(value.size(0), value.size(1), -1)
out_sim = None
if z is not None:
value_1 = self.conv_value_1(z)
value_1 = value_1.view(value_1.size(0), value_1.size(1), -1)
out_sim_1 = None
value_2 = self.conv_value_2(x)
value_2 = value_2.view(value_2.size(0), value_2.size(1), -1)
out_sim_2 = None
query = self.conv_query(x)
key = self.conv_key(y)
query = query.view(query.size(0), query.size(1), -1)
key = key.view(key.size(0), key.size(1), -1)
if z is not None:
query_1 = self.conv_query_1(x)
key_1 = self.conv_key_1(z)
query_1 = query_1.view(query_1.size(0), query_1.size(1), -1)
key_1 = key_1.view(key_1.size(0), key_1.size(1), -1)
query_2 = self.conv_query_2(x)
key_2 = self.conv_key_2(x)
query_2 = query_2.view(query_2.size(0), query_2.size(1), -1)
key_2 = key_2.view(key_2.size(0), key_2.size(1), -1)
sim_map = torch.bmm(query.transpose(1, 2), key)
sim_map = self.softmax(sim_map)
out_sim = torch.bmm(sim_map, value.transpose(1, 2))
out_sim = out_sim.transpose(1, 2)
out_sim = out_sim.view(out_sim.size(0), out_sim.size(1), *x.size()[2:])
out_sim = self.gamma * out_sim
if z is not None:
sim_map_1 = torch.bmm(query_1.transpose(1, 2), key_1)
sim_map_1 = self.softmax(sim_map_1)
out_sim_1 = torch.bmm(sim_map_1, value_1.transpose(1, 2))
out_sim_1 = out_sim_1.transpose(1, 2)
out_sim_1 = out_sim_1.view(out_sim_1.size(0), out_sim_1.size(1), *x.size()[2:])
out_sim_1 = self.gamma_1 * out_sim_1
sim_map_2 = torch.bmm(query_2.transpose(1, 2), key_2)
sim_map_2 = self.softmax(sim_map_2)
out_sim_2 = torch.bmm(sim_map_2, value_2.transpose(1, 2))
out_sim_2 = out_sim_2.transpose(1, 2)
out_sim_2 = out_sim_2.view(out_sim_2.size(0), out_sim_2.size(1), *x.size()[2:])
out_sim_2 = self.gamma_2 * out_sim_2
if z is not None:
H_1 = self.in_1(out_sim)
H_2 = self.in_2(out_sim_1)
H_3 = self.in_3(out_sim_2)
H_cat = torch.cat((H_1, H_2, H_3), 1)
H_tra = self.trans(H_cat)
H_spl = torch.split(H_tra, 512, dim=1)
H_4 = torch.sigmoid(self.out_1(H_spl[0]))
H_5 = torch.sigmoid(self.out_2(H_spl[1]))
H_6 = torch.sigmoid(self.out_3(H_spl[2]))
H_st = torch.stack((H_4, H_5, H_6), 0)
H_all = self.softmax_H(H_st)
if z is not None:
out = residual + H_all[0] * out_sim + H_all[1] * out_sim_1 + H_all[2] * out_sim_2
else:
out = residual + out_sim
return out
class FCtL(_FCtL):
def __init__(self, inplanes, planes, lr_mult=None, weight_init_scale=1.0):
super(FCtL, self).__init__(inplanes=inplanes, planes=planes, lr_mult=lr_mult, weight_init_scale=weight_init_scale)
| 5,866 | 39.462069 | 122 |
py
|
FCtL
|
FCtL-main/models/model_store.py
|
"""Model store which provides pretrained models."""
from __future__ import print_function
__all__ = ['get_model_file', 'purge']
import os
import zipfile
from .utils import download, check_sha1
_model_sha1 = {name: checksum for checksum, name in [
('ebb6acbbd1d1c90b7f446ae59d30bf70c74febc1', 'resnet50'),
('2a57e44de9c853fa015b172309a1ee7e2d0e4e2a', 'resnet101'),
('0d43d698c66aceaa2bc0309f55efdd7ff4b143af', 'resnet152'),
('2e22611a7f3992ebdee6726af169991bc26d7363', 'deepten_minc'),
('662e979de25a389f11c65e9f1df7e06c2c356381', 'fcn_resnet50_ade'),
('eeed8e582f0fdccdba8579e7490570adc6d85c7c', 'fcn_resnet50_pcontext'),
('54f70c772505064e30efd1ddd3a14e1759faa363', 'psp_resnet50_ade'),
('075195c5237b778c718fd73ceddfa1376c18dfd0', 'deeplab_resnet50_ade'),
('5ee47ee28b480cc781a195d13b5806d5bbc616bf', 'encnet_resnet101_coco'),
('4de91d5922d4d3264f678b663f874da72e82db00', 'encnet_resnet50_pcontext'),
('9f27ea13d514d7010e59988341bcbd4140fcc33d', 'encnet_resnet101_pcontext'),
('07ac287cd77e53ea583f37454e17d30ce1509a4a', 'encnet_resnet50_ade'),
('3f54fa3b67bac7619cd9b3673f5c8227cf8f4718', 'encnet_resnet101_ade'),
]}
encoding_repo_url = 'https://hangzh.s3.amazonaws.com/'
_url_format = '{repo_url}encoding/models/{file_name}.zip'
def short_hash(name):
if name not in _model_sha1:
raise ValueError('Pretrained model for {name} is not available.'.format(name=name))
return _model_sha1[name][:8]
def get_model_file(name, root=os.path.join('~', '.encoding', 'models')):
r"""Return location for the pretrained on local file system.
This function will download from online model zoo when model cannot be found or has mismatch.
The root directory will be created if it doesn't exist.
Parameters
----------
name : str
Name of the model.
root : str, default '~/.encoding/models'
Location for keeping the model parameters.
Returns
-------
file_path
Path to the requested pretrained model file.
"""
file_name = '{name}-{short_hash}'.format(name=name, short_hash=short_hash(name))
root = os.path.expanduser(root)
file_path = os.path.join(root, file_name+'.pth')
sha1_hash = _model_sha1[name]
if os.path.exists(file_path):
if check_sha1(file_path, sha1_hash):
return file_path
else:
print('Mismatch in the content of model file {} detected.' +
' Downloading again.'.format(file_path))
else:
print('Model file {} is not found. Downloading.'.format(file_path))
if not os.path.exists(root):
os.makedirs(root)
zip_file_path = os.path.join(root, file_name+'.zip')
repo_url = os.environ.get('ENCODING_REPO', encoding_repo_url)
if repo_url[-1] != '/':
repo_url = repo_url + '/'
download(_url_format.format(repo_url=repo_url, file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(root)
os.remove(zip_file_path)
if check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError('Downloaded file has different hash. Please try again.')
def purge(root=os.path.join('~', '.encoding', 'models')):
r"""Purge all pretrained model files in local file store.
Parameters
----------
root : str, default '~/.encoding/models'
Location for keeping the model parameters.
"""
root = os.path.expanduser(root)
files = os.listdir(root)
for f in files:
if f.endswith(".pth"):
os.remove(os.path.join(root, f))
def pretrained_model_list():
return list(_model_sha1.keys())
| 3,714 | 36.525253 | 97 |
py
|
FCtL
|
FCtL-main/models/__init__.py
| 0 | 0 | 0 |
py
|
|
FCtL
|
FCtL-main/models/helpers.py
|
import os
import torch
import torch.nn as nn
import numpy as np
import math
import PIL
def dir_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def initialize_weights(*models):
for model in models:
for m in model.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.)
m.bias.data.fill_(1e-4)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, 0.0001)
m.bias.data.zero_()
def get_upsampling_weight(in_channels, out_channels, kernel_size):
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64)
weight[list(range(in_channels)), list(range(out_channels)), :, :] = filt
return torch.from_numpy(weight).float()
def colorize_mask(mask, palette):
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
new_mask = PIL.Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def set_trainable_attr(m,b):
m.trainable = b
for p in m.parameters(): p.requires_grad = b
def apply_leaf(m, f):
c = m if isinstance(m, (list, tuple)) else list(m.children())
if isinstance(m, nn.Module):
f(m)
if len(c)>0:
for l in c:
apply_leaf(l,f)
def set_trainable(l, b):
apply_leaf(l, lambda m: set_trainable_attr(m,b))
| 1,837 | 31.245614 | 94 |
py
|
FCtL
|
FCtL-main/utils/lr_scheduler.py
|
import math
class LR_Scheduler(object):
"""Learning Rate Scheduler
Step mode: ``lr = baselr * 0.1 ^ {floor(epoch-1 / lr_step)}``
Cosine mode: ``lr = baselr * 0.5 * (1 + cos(iter/maxiter))``
Poly mode: ``lr = baselr * (1 - iter/maxiter) ^ 0.9``
Args:
args: :attr:`args.lr_scheduler` lr scheduler mode (`cos`, `poly`),
:attr:`args.lr` base learning rate, :attr:`args.epochs` number of epochs,
:attr:`args.lr_step`
iters_per_epoch: number of iterations per epoch
"""
def __init__(self, mode, base_lr, num_epochs, iters_per_epoch=0,
lr_step=0, warmup_epochs=0):
self.mode = mode
print('Using {} LR Scheduler!'.format(self.mode))
self.lr = base_lr
if mode == 'step':
assert lr_step
self.lr_step = lr_step
self.iters_per_epoch = iters_per_epoch
self.N = num_epochs * iters_per_epoch
self.epoch = -1
self.warmup_iters = warmup_epochs * iters_per_epoch
def __call__(self, optimizer, i, epoch, best_pred):
T = epoch * self.iters_per_epoch + i
if self.mode == 'cos':
lr = 0.5 * self.lr * (1 + math.cos(1.0 * T / self.N * math.pi))
elif self.mode == 'poly':
lr = self.lr * pow((1 - 1.0 * T / self.N), 0.9)
elif self.mode == 'step':
lr = self.lr * (0.1 ** (epoch // self.lr_step))
else:
raise NotImplemented
# warm up lr schedule
if self.warmup_iters > 0 and T < self.warmup_iters:
lr = lr * 1.0 * T / self.warmup_iters
if epoch > self.epoch:
print('\n=>Epoches %i, learning rate = %.7f, \
previous best = %.4f' % (epoch, lr, best_pred))
self.epoch = epoch
assert lr >= 0
self._adjust_learning_rate(optimizer, lr)
def _adjust_learning_rate(self, optimizer, lr):
if len(optimizer.param_groups) == 1:
optimizer.param_groups[0]['lr'] = lr
else:
# enlarge the lr at the head
for i in range(len(optimizer.param_groups)):
if optimizer.param_groups[i]['lr'] > 0: optimizer.param_groups[i]['lr'] = lr
# optimizer.param_groups[0]['lr'] = lr
# for i in range(1, len(optimizer.param_groups)):
# optimizer.param_groups[i]['lr'] = lr * 10
| 2,393 | 37 | 92 |
py
|
FCtL
|
FCtL-main/utils/loss.py
|
import torch.nn as nn
import torch.nn.functional as F
import torch
def one_hot(index, classes):
# index is flatten (during ignore) ##################
size = index.size()[:1] + (classes,)
view = index.size()[:1] + (1,)
#####################################################
mask = torch.Tensor(size).fill_(0).cuda()
index = index.view(view)
ones = 1.
return mask.scatter_(1, index, ones)
class FocalLoss(nn.Module):
def __init__(self, gamma=0, eps=1e-7, size_average=True, one_hot=True, ignore=None):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.eps = eps
self.size_average = size_average
self.one_hot = one_hot
self.ignore = ignore
def forward(self, input, target):
'''
only support ignore at 0
'''
B, C, H, W = input.size()
input = input.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
target = target.view(-1) #96774
if self.ignore is not None:
valid = (target != self.ignore)
input = input[valid]
target = target[valid]
if self.one_hot: target = one_hot(target, input.size(1))
probs = F.softmax(input, dim=1)
probs = (probs * target).sum(1)
probs = probs.clamp(self.eps, 1. - self.eps)
log_p = probs.log()
batch_loss = -(torch.pow((1 - probs), self.gamma)) * log_p
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
| 1,585 | 27.321429 | 90 |
py
|
FCtL
|
FCtL-main/utils/metrics.py
|
import numpy as np
import math
class ConfusionMatrix(object):
def __init__(self, n_classes):
self.n_classes = n_classes
# axis = 0: target
# axis = 1: prediction
self.confusion_matrix = np.zeros((n_classes, n_classes))
def _fast_hist(self, label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class**2).reshape(n_class, n_class)
return hist
def update(self, label_trues, label_preds):
for lt, lp in zip(label_trues, label_preds):
tmp = self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes)
self.confusion_matrix += tmp
def get_scores(self):
hist = self.confusion_matrix
intersect = np.diag(hist)
union = hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)
iou = intersect / union
mean_iou = np.mean(np.nan_to_num(iou[1:]))
return {
'iou': iou,
'iou_mean': mean_iou,
}
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
| 1,230 | 31.394737 | 133 |
py
|
FCtL
|
FCtL-main/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
andi_datasets
|
andi_datasets-master/setup.py
|
from pkg_resources import parse_version
from configparser import ConfigParser
import setuptools
assert parse_version(setuptools.__version__)>=parse_version('36.2')
# note: all settings are in settings.ini; edit there, not here
config = ConfigParser(delimiters=['='])
config.read('settings.ini')
cfg = config['DEFAULT']
cfg_keys = 'version description keywords author author_email'.split()
expected = cfg_keys + "lib_name user branch license status min_python audience language".split()
for o in expected: assert o in cfg, "missing expected setting: {}".format(o)
setup_cfg = {o:cfg[o] for o in cfg_keys}
licenses = {
'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'),
'mit': ('MIT License', 'OSI Approved :: MIT License'),
'gpl2': ('GNU General Public License v2', 'OSI Approved :: GNU General Public License v2 (GPLv2)'),
'gpl3': ('GNU General Public License v3', 'OSI Approved :: GNU General Public License v3 (GPLv3)'),
'bsd3': ('BSD License', 'OSI Approved :: BSD License'),
}
statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
'4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
py_versions = '3.6 3.7 3.8 3.9 3.10'.split()
requirements = cfg.get('requirements','').split()
if cfg.get('pip_requirements'): requirements += cfg.get('pip_requirements','').split()
min_python = cfg['min_python']
lic = licenses.get(cfg['license'].lower(), (cfg['license'], None))
dev_requirements = (cfg.get('dev_requirements') or '').split()
setuptools.setup(
name = cfg['lib_name'],
license = lic[0],
classifiers = [
'Development Status :: ' + statuses[int(cfg['status'])],
'Intended Audience :: ' + cfg['audience'].title(),
'Natural Language :: ' + cfg['language'].title(),
] + ['Programming Language :: Python :: '+o for o in py_versions[py_versions.index(min_python):]] + (['License :: ' + lic[1] ] if lic[1] else []),
url = cfg['git_url'],
packages = setuptools.find_packages(),
include_package_data = True,
install_requires = requirements,
extras_require={ 'dev': dev_requirements },
dependency_links = cfg.get('dep_links','').split(),
python_requires = '>=' + cfg['min_python'],
long_description = open('README.md').read(),
long_description_content_type = 'text/markdown',
zip_safe = False,
entry_points = {
'console_scripts': cfg.get('console_scripts','').split(),
'nbdev': [f'{cfg.get("lib_path")}={cfg.get("lib_path")}._modidx:d']
},
**setup_cfg)
| 2,539 | 44.357143 | 150 |
py
|
andi_datasets
|
andi_datasets-master/andi_datasets/datasets_theory.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../source_nbs/lib_nbs/datasets_theory.ipynb.
# %% auto 0
__all__ = ['datasets_theory']
# %% ../source_nbs/lib_nbs/datasets_theory.ipynb 3
import numpy as np
import os
import inspect
import h5py
from tqdm.auto import trange
import csv
# %% ../source_nbs/lib_nbs/datasets_theory.ipynb 4
from .utils_trajectories import normalize
from .models_theory import models_theory as models_theory
# %% ../source_nbs/lib_nbs/datasets_theory.ipynb 6
class datasets_theory():
def __init__(self):
'''
This class generates, saves and loads datasets of theoretical trajectories simulated
from various diffusion models (available at andi_datasets.models_theory).
'''
self._dimension = 1
self._get_models()
def _get_models(self):
'''Loading subclass of models'''
if self._dimension == 1:
self._models = models_theory._oneD()
elif self._dimension == 2:
self._models = models_theory._twoD()
elif self._dimension == 3:
self._models = models_theory._threeD()
else:
raise ValueError('Our current understanding of the physical world is three dimensional and so are the diffusion models available in this class')
available_models = inspect.getmembers(self._models, inspect.ismethod)
self.avail_models_name = [x[0] for x in available_models]
self.avail_models_func = [x[1] for x in available_models]
def create_dataset(self, T, N_models, exponents, models,
dimension = 1,
save_trajectories = False, load_trajectories = False,
path = 'datasets/',
N_save = 1000, t_save = 1000):
'''
Creates a dataset of trajectories via the theoretical models defined in `.models_theory`. Check our tutorials for use cases of this function.
Parameters
----------
T : int
Length of the trajectories.
N_models : int, numpy.array
- if int, number of trajectories per class (i.e. exponent and model) in the dataset.
- if numpy.array, number of trajectories per classes: size (number of models)x(number of classes)
exponents : float, array
Anomalous exponents to include in the dataset. Allows for two digit precision.
models : bool, int, list
Labels of the models to include in the dataset.
Correspodance between models and labels is given by self.label_correspodance, defined at init.
If int/list, choose the given models. If False, choose all of them.
dimensions : int
Dimensions of the generated trajectories. Three possible values: 1, 2 and 3.
save_trajectories : bool
If True, the module saves a .h5 file for each model considered, with N_save trajectories
and T = T_save.
load_trajectories : bool
If True, the module loads the trajectories of an .h5 file.
path : str
Path to the folder where to save/load the trajectories dataset.
N_save : int
Number of trajectories to save for each exponents/model.
Advise: save at the beggining a big dataset (t_save ~ 1e3 and N_save ~ 1e4)
which then allows you to load any other combiantion of T and N_models.
t_save : int
Length of the trajectories to be saved. See comments on N_save.
Returns
-------
numpy.array
- Dataset of trajectories of lenght Nx(T+2), with the following structure:
o First column: model label
o Second column: value of the anomalous exponent
o 2:T columns: trajectories
'''
'''Managing probable errors in inputs'''
if T < 2:
raise ValueError('The time of the trajectories has to be bigger than 1.')
if isinstance(exponents, int) or isinstance(exponents, float):
exponents = [exponents]
'''Managing folders of the datasets'''
if save_trajectories or load_trajectories:
if load_trajectories:
save_trajectories = False
if not os.path.exists(path) and load_trajectories:
raise FileNotFoundError('The directory from where you want to load the dataset does not exist')
if not os.path.exists(path) and save_trajectories:
os.makedirs(path)
'''Establish dimensions and corresponding models'''
self._dimension = dimension
self._get_models()
'''Managing models to load'''
# Load from a list of models
if isinstance(models, list):
self._models_name = [self.avail_models_name[idx] for idx in models]
self._models_func = [self.avail_models_func[idx] for idx in models]
# Load from a single model
elif isinstance(models, int) and not isinstance(models, bool):
self._models_name = [self.avail_models_name[models]]
self._models_func = [self.avail_models_func[models]]
# Load all available models
else:
self._models_name = self.avail_models_name
self._models_func = self.avail_models_func
'''Managing number of trajectory per class:
- Defines array num_class as a function of N'''
if isinstance(N_models, int):
n_per_class = N_models*np.ones((len(self._models_name), len(exponents)))
elif type(N_models).__module__ == np.__name__:
if len(self._models_name) != N_models.shape[0] or len(exponents) != N_models.shape[1]:
raise ValueError('Mismatch between the dimensions of N and the number of different classes.'+
f'N must be either an int (balanced classes) or an array of length {len(models)}x'
f'{len(exponents)} (inbalaced classes).')
n_per_class = N_models
else:
raise TypeError('Type of variable N not recognized.')
'''Defining default values for saved datasets'''
N_save = np.ones_like(n_per_class)*N_save
# If the number of class of a given class is bigger than N_save, we
# change the value of N_save for that particular class.
N_save = np.max([N_save, n_per_class], axis = 0)
''' Loading/Saving/Creating datasets'''
if load_trajectories:
data_models = self._load_trajectories(T = T,
exponents = exponents,
models_name = self._models_name,
dimension = self._dimension,
n_per_class = n_per_class,
path = path,
N_save = N_save,
t_save = t_save)
elif save_trajectories:
self._save_trajectories(exponents = exponents,
dimension = self._dimension,
models_name = self._models_name,
models_func = self._models_func,
path = path,
n_per_class = n_per_class,
N_save = N_save,
t_save = t_save)
data_models = self._load_trajectories(T = T,
exponents = exponents,
dimension = self._dimension,
models_name = self._models_name,
n_per_class = n_per_class,
path = path,
N_save = N_save,
t_save = t_save)
else:
data_models = self._create_trajectories(T = T,
exponents = exponents,
dimension = self._dimension,
models_name = self._models_name,
models_func = self._models_func,
n_per_class = n_per_class)
return data_models
def _load_trajectories(self, T, exponents, dimension,
models_name, n_per_class,
path, N_save = 1000, t_save = 1000):
''' Load trajectories from a h5py file of the given path. The name of the datasets in the
file have the following structure:
'(exponent with 2 digit_precision)_T_(lenght of trajectories in the dataset)_N_(number of trajectories in the dataset)'
Arguments:
:T (int):
- length of the trajectories.
:exponents (array):
- anomalous exponents to include in the dataset. Allows for two digit precision.
:dimension (int):
- Dimensions of the generated trajectories. Three possible values: 1, 2 and 3.
:models_name (list of str):
- names of the models to include in the output dataset.
:n_per_class:
- number of trajectories to consider per exponent/model.
:path (str):
- path to the folder from where to load the trajectories dataset.
:t_save (int):
- length of the trajectories in the datasets to load.
:N_save (array):
- number of trajectories contained in the datasets to load.
Return:
:dataset (numpy.array):
- Dataset of trajectories of lenght (number of models)x(T+2), with the following structure:
o First column: model label
o Second column: value of the anomalous exponent
o 2:T columns: trajectories'''
'''Establish dimensions and corresponding models'''
self._dimension = dimension
self._get_models()
if isinstance(models_name, int):
models_name = [models_name]
for idx_m, name in enumerate(models_name):
hf = h5py.File(path+name+'.h5', 'r+')
for idx_e, exp in enumerate(exponents):
name_dataset = f'{exp:.2f}_T_{t_save}_N_'+ \
str(int(N_save[idx_m, idx_e]))+f'_dim_{self._dimension}'
n = int(n_per_class[idx_m, idx_e])
if n == 0:
continue
try:
data = (hf.get(name_dataset)[()][:n,:self._dimension*T])
except:
raise TypeError('The dataset you want to load does not exist.')
data = self._label_trajectories(trajs = data, model_name = name, exponent = exp)
if idx_e + idx_m == 0:
dataset = data
else:
dataset = np.concatenate((dataset, data), axis = 0)
return dataset
def _save_trajectories(self, exponents, models_name, models_func, path, n_per_class,
N_save = 1000, t_save = 1000, dimension = 1):
''' Saves a dataset for the exponents and models considered.
Arguments:
:exponents (array):
- anomalous exponents to include in the dataset. Allows for two digit precision.
:models_name (list of str):
- names of the models to include in the output dataset.
:models_func (list of funcs):
- function generating the models to include in the output dataset.
:path (str):
- path to the folder where to save the trajectories dataset.
:t_save (int):
- length of the trajectories to save in the datasets.
:N_save (array):
- number of trajectories to include in the datasets saved.
:dimension (int):
- Dimensions of the generated trajectories. Three possible values: 1, 2 and 3.
No return '''
'''Establish dimensions and corresponding models'''
self._dimension = dimension
self._get_models()
for idx_m, (name, func) in enumerate(zip(models_name, models_func)):
if os.path.isfile(path+name+'.h5'):
action = 'r+'
else:
action = 'w'
with h5py.File(path+name+'.h5', action) as hf:
for idx_e, exp in enumerate(exponents):
if n_per_class[idx_m, idx_e] == 0:
continue
n = int(N_save[idx_m, idx_e])
name_dataset = f'{exp:.2f}_T_{t_save}_N_{n}_dim_{self._dimension}'
if name_dataset not in hf:
data = np.zeros((n, self._dimension*t_save))
# TQDM variables
tq = trange(n)
tq.set_postfix(saving = True, model = name, exponent = exp)
for i in tq:
data[i, :] = func(t_save, exp)
hf.create_dataset(name_dataset, data=data)
else:
print(f'The dataset for {name} with exponent {round(exp,3)}'
+' already exists, no need of saving it again.')
def _create_trajectories(self, T, exponents, dimension, models_name, models_func, n_per_class):
''' create a dataset for the exponents and models considered.
Arguments:
:T (int):
- length of the trajectories.
:exponents (array):
- anomalous exponents to include in the dataset. Allows for two digit precision.
:dimension (int):
- Dimensions of the generated trajectories. Three possible values: 1, 2 and 3.
:models_name (list of str):
- names of the models to include in the output dataset.
:models_func (list of funcs):
- function generating the models to include in the output dataset.
:n_per_class:
- number of trajectories to consider per exponent/model.
Return:
:dataset (numpy.array):
- Dataset of trajectories of lenght (number of models)x(T+2), with the following structure:
o First column: model label.
o Second column: value of the anomalous exponent.
o 2:T columns: trajectories.'''
for idx_m, (name, func) in enumerate(zip(models_name, models_func)):
for idx_e, exp in enumerate(exponents):
n = int(n_per_class[idx_m, idx_e])
data = np.zeros((n, self._dimension*T))
for i in range(n):
data[i, :] = func(T, exp)
data = self._label_trajectories(trajs = data, model_name = name, exponent = exp)
if idx_e + idx_m == 0:
dataset = data
else:
dataset = np.concatenate((dataset, data), axis = 0)
return dataset
def _label_trajectories(self, trajs, model_name, exponent):
''' Labels given trajectories given the corresponding label for the model and exponent.
For models, the label correspond to the position of the model in self.avail_models_name.
For exponents, the label if the value of the exponent.
Arguments:
:trajs (numpy array):
- trajectories to label
:model_name (str):
- name of the model from which the trajectories are coming from.
:exponent (float):
- Anomalous exponent of the trajectories.
Return:
:trajs (numpy array):
- Labelled trajectoreis, with the following structure:
o First column: model label
o Second columnd: exponent label
o Rest of the array: trajectory. '''
label_model = self.avail_models_name.index(model_name)
labels_mod = np.ones((trajs.shape[0], 1))*label_model
labels_alpha = np.ones((trajs.shape[0], 1))*exponent
trajs = np.concatenate((labels_mod, labels_alpha, trajs), axis = 1)
return trajs
def create_noisy_localization_dataset(self,
dataset = False,
T = False, N = False, exponents = False, models = False, dimension = 1,
noise_func = False, sigma = 1, mu = 0,
save_trajectories = False, load_trajectories = False,
path = 'datasets/',
N_save = 1000, t_save = 1000):
'''
Create a dataset of noisy trajectories.
This function creates trajectories with _create_trajectories and then adds given noise to them.
All parameters are the same as _create_trajectories but noise_func.
Parameters
----------
dataset : bool, numpy array
If False, creates a dataset with the given parameters.
If numpy array, dataset to which the function applies the noise.
noise_func : bool, function
If False, the noise added to the trajectories will be Gaussian distributed, with
variance sigma and mean value mu.
If function, uses the given function to generate noise to be added to the trajectory.
The function must have as input two ints, N and M and the output must be a matrix of size NxM.
Returns
-------
numpy.array
Dataset of trajectories of lenght Nx(T+2), with the following structure:
o First column: model label
o Second column: value of the anomalous exponent
o 2:T columns: trajectories'''
if not dataset.any():
dataset = self.create_dataset(T, N, exponents, models, dimension,
save_trajectories, load_trajectories,
path,
N_save, t_save)
# Add the noise to the trajectories
trajs = dataset[:, 2:].reshape(dataset.shape[0]*dimension, T)
trajs = self._add_noisy_localization(trajs, noise_func, sigma, mu)
dataset[:, 2:] = trajs.reshape(dataset.shape[0], T*dimension)
return dataset
def create_noisy_diffusion_dataset(self,
dataset = False,
T = False, N = False, exponents = False, models = False, dimension = 1,
diffusion_coefficients = False, sigma = 1, mu = 0,
save_trajectories = False, load_trajectories = False,
path = 'datasets/',
N_save = 1000, t_save = 1000):
'''
Create a dataset of noisy trajectories.
This function creates trajectories with `_create_trajectories` and then adds given noise to them.
All arguments are the same as `_create_trajectories` but dataset and diffusion_coefficients.
Parameters
----------
dataset : bool, numpy array
- If False, creates a dataset with the given parameters.
- If numpy array, dataset to which the function applies the noise.
diffusion_coefficient : bool, function
- If False, the diffusion noise added to the trajectories will
be Gaussian distributed, with variance sigma and mean value mu.
- If numpy array, multiply the displacements by them.
Returns
--------
data_models : numpy.array
Dataset of trajectories of lenght Nx(T+2), with the following structure:
o First column: model label
o Second column: value of the anomalous exponent
o 2:T columns: trajectories'''
if not dataset.any():
dataset = self.create_dataset(T, N, exponents, models, dimension,
save_trajectories, load_trajectories,
path,
N_save, t_save)
# Add the noise to the trajectories
trajs = dataset[:, 2:].reshape(dataset.shape[0]*dimension, T)
trajs = self._add_noisy_diffusion(trajs, diffusion_coefficients, sigma = sigma, mu = mu)
dataset[:, 2:] = trajs.reshape(dataset.shape[0], T*dimension)
return dataset
@staticmethod
def _add_noisy_localization(trajs, noise_func = False, sigma = 1, mu = 0):
if isinstance(noise_func, np.ndarray):
noise_matrix = noise_func
elif not noise_func:
noise_matrix = sigma*np.random.randn(trajs.shape)+mu
elif hasattr(noise_func, '__call__'):
noise_matrix = noise_func(trajs)
else:
raise ValueError('noise_func has to be either False for Gaussian noise, a Python function or numpy array.')
trajs += noise_matrix
return trajs
@staticmethod
def _add_noisy_diffusion(trajs, diffusion_coefficients = False):
# First normalize the trajectories
trajs = normalize(trajs)
# Check if diffusion coefficient are an array
if isinstance(diffusion_coefficients, np.ndarray):
pass
# If no new diffusion coefficients given, create new ones randonmly
elif not diffusion_coefficients:
diffusion_coefficients = np.random.randn(trajs.shape[0])
# Apply new diffusion coefficients
trajs = (trajs.transpose()*diffusion_coefficients).transpose()
return trajs
@staticmethod
def create_segmented_dataset(dataset1, dataset2, dimension = 1,
final_length = 200, random_shuffle = False):
'''
Creates a dataset with trajectories which change diffusive feature (either model or anomalous exponent) after a time 't_change'.
Parameters
----------
dataset1 : numpy.array
Array of size Nx(t+2), where the first columns values correspond
to the labels of the model and anomalous exponent. The rest
correspond to the trajectories of length t.
dataset2 : numpy.array
Same as dataset1
dimension : int
Dimensions of the generated trajectories. Three possible values: 1, 2 and 3.
final_length : int
Length of the output trajectories.
random_shuffle : bool
If True, shuffles the first axis of dataset1 and dataset2.
Returns
-------
numpy.array
Array of size Nx(t+5) whose columns represent:
o Column 0: changing time
o Column 1,2: labels first part of the trajectory (model, exponent)
o Column 3,4: labels second part of the trajectory (model, exponent)
o Column 5:(t+5): trajectories of lenght t.
'''
'''Establish dimensions and corresponding models'''
if dataset1.shape[0] != dataset2.shape[0]:
raise ValueError(f'Input datasets must have the same number of trajectories. Current ones have size {dataset1.shape[0]} and {dataset2.shape[0]}.')
if dataset1.shape[1]-2 < final_length or dataset2.shape[1]-2 < final_length:
raise ValueError(f'The trajectories in the input datasets are too short. They must be at least {final_length} steps long.')
if random_shuffle:
np.random.shuffle(dataset1)
np.random.shuffle(dataset2)
n_trajs = dataset1.shape[0]
trajs_1 = np.copy(dataset1[:, 2:].reshape(n_trajs, dimension, int((dataset1.shape[1]-2)/dimension)))
trajs_2 = np.copy(dataset2[:, 2:].reshape(n_trajs, dimension, int((dataset2.shape[1]-2)/dimension)))
trajs_1 = trajs_1[:, :, :final_length]
trajs_2 = trajs_2[:, :, :final_length]
t_change = np.random.randint(1, final_length, n_trajs)
seg_dataset = np.zeros((n_trajs, dimension*final_length+5))
for idx, (tC, traj1, traj2, label1, label2) in enumerate(zip(t_change,
trajs_1, trajs_2,
dataset1[:, :2], dataset2[:, :2])):
seg_dataset[idx, 0] = tC
seg_dataset[idx, 1:5] = np.append(label1, label2)
if dimension == 1:
seg_dataset[idx, 5:tC+5] = traj1[:, :tC]
seg_dataset[idx, tC+5:] = traj2[:, tC:final_length]-traj2[:, tC]+traj1[:, tC]
elif dimension == 2 or dimension == 3:
traj2 = (traj2.transpose()-traj2[:, tC]+traj1[:, tC]).transpose()
traj1[:,tC:] = 0
traj2[:, :tC] = 0
seg_dataset[idx, 5:] = (traj1 + traj2).reshape(dimension*final_length)
return seg_dataset
@staticmethod
def _save_row(data:np.array, # Row to be appended to the filed
file:str # File where to append data
):
''' Auxiliary function to save append data in existing files using csv. '''
with open(file, 'a') as f:
writer = csv.writer(f, delimiter=';', lineterminator='\n',)
writer.writerow(data)
@staticmethod
def _cut_trajectory(traj, t_cut, dim=1):
''' Takes a trajectory and cuts it to `t_cut` length. '''
cut_traj = traj.reshape(dim, -1)[:, :t_cut]
return cut_traj.reshape(-1)
| 27,969 | 47.559028 | 158 |
py
|
andi_datasets
|
andi_datasets-master/andi_datasets/models_phenom.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../source_nbs/lib_nbs/models_phenom.ipynb.
# %% auto 0
__all__ = ['models_phenom']
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 2
import numpy as np
from stochastic.processes.noise import FractionalGaussianNoise as FGN
from .utils_trajectories import gaussian
import warnings
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 5
class models_phenom():
def __init__(self):
'''
This class handles the generation of trajectories from different theoretical models.
'''
# We define here the bounds of the anomalous exponent and diffusion coefficient
self.bound_D = [1e-12, 1e6]
self.bound_alpha = [0, 1.999]
# We also define the value in which we consider directed motion
self.alpha_directed = 1.9
# Diffusion state labels: the position of each type defines its numerical label
# i: immobile/trapped; c: confined; f: free-diffusive (normal and anomalous); d: directed
self.lab_state = ['i', 'c', 'f', 'd']
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 7
class models_phenom(models_phenom):
@staticmethod
def disp_fbm(alpha : float,
D : float,
T: int,
deltaT : int = 1):
''' Generates normalized Fractional Gaussian noise. This means that, in
general:
<x^2(t)> = 2Dt^alpha
and in particular:
<x^2(t = 1)> = 2D
Parameters
----------
alpha : float in [0,2]
Anomalous exponent
D : float
Diffusion coefficient
T : int
Number of displacements to generate
deltaT : int, optional
Sampling time
Returns
-------
numpy.array
Array containing T displacements of given parameters
'''
# Generate displacements
disp = FGN(hurst = alpha/2).sample(n = T)
# Normalization factor
disp *= np.sqrt(T)**(alpha)
# Add D
disp *= np.sqrt(2*D*deltaT)
return disp
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 15
class models_phenom(models_phenom):
@staticmethod
def _constraint_alpha(alpha_1, alpha_2, epsilon_a):
''' Defines the metric for constraining the changes in anomalous
exponent'''
return alpha_1 - alpha_2 < epsilon_a
@staticmethod
def _constraint_d(d1, d2, gamma_d):
''' Defines the metric for constraining the changes in anomalous
exponent'''
if gamma_d < 1:
return d2 > d1*gamma_d
if gamma_d > 1:
return d2 < d1*gamma_d
@staticmethod
def _sample_diff_parameters(alphas : list, # List containing the parameters to sample anomalous exponent in state (adapt to sampling function)
Ds : list, # List containing the parameters to sample the diffusion coefficient in state (adapt to sampling function).
num_states : int, # Number of diffusive states.
epsilon_a : float, # Minimum distance between anomalous exponents of various states.
gamma_d : float, # Factor between diffusion coefficient of various states.
) :
'''
Given information of the anomalous exponents (alphas), diffusion coefficients (Ds), the function
samples these from a bounded Gaussian distribution with the indicated constraints (epsilon_a,
gamma_d). Outputs the list of demanded alphas and Ds.
Parameters
----------
alphas : list
List containing the parameters to sample anomalous exponent in state (adapt to sampling function).
Ds : list
List containing the parameters to sample the diffusion coefficient in state (adapt to sampling function).
num_states : int
Number of diffusive states.
epsilon_a : float
Minimum distance between anomalous exponents of various states.
epsilon workflow: we check val[i] - val[i-1] < epsilon
if you want that val[i] > val[i-1]: epsilon has to be positive
if you want that val[i] < val[i-1]: epsilon has to be negative
if you don't care: epsilon = 0
gamma_d : float
Factor between diffusion coefficient of various states.
gamma workflow:
for gamma < 1: val[i] < val[i-1]*gamma
for gamma > 1: val[i] > val[i-1]*gamma
for gamma = 1: no check
Returns
-------
:alphas_traj (list): list of anomalous exponents
:Ds_traj (list): list of diffusion coefficients
'''
alphas_traj = []
Ds_traj = []
for i in range(num_states):
# for the first state we just sample normally
if i == 0:
alphas_traj.append(float(gaussian(alphas[i], bound = models_phenom().bound_alpha)))
Ds_traj.append(float(gaussian(Ds[i], bound = models_phenom().bound_D)))
# For next states we take into account epsilon distance between diffusion
# parameter
else:
## Checking alpha
alpha_state = float(gaussian(alphas[i], bound = models_phenom().bound_alpha))
D_state = float(gaussian(Ds[i], bound = models_phenom().bound_D))
if epsilon_a[i-1] != 0:
idx_while = 0
while models_phenom()._constraint_alpha(alphas_traj[-1], alpha_state, epsilon_a[i-1]):
#alphas_traj[-1] - alpha_state < epsilon_a[i-1]:
alpha_state = float(gaussian(alphas[i], bound = models_phenom().bound_alpha))
idx_while += 1
if idx_while > 100: # check that we are not stuck forever in the while loop
raise FileNotFoundError(f'Could not find correct alpha for state {i} in 100 steps. State distributions probably too close.')
alphas_traj.append(alpha_state)
## Checking D
if gamma_d[i-1] != 1:
idx_while = 0
while models_phenom()._constraint_d(Ds_traj[-1], D_state, gamma_d[i-1]):
D_state = float(gaussian(Ds[i], bound = models_phenom().bound_D))
idx_while += 1
if idx_while > 100: # check that we are not stuck forever in the while loop
raise FileNotFoundError(f'Could not find correct D for state {i} in 100 steps. State distributions probably too close.')
Ds_traj.append(D_state)
return alphas_traj, Ds_traj
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 23
class models_phenom(models_phenom):
@staticmethod
def _single_state_traj(T :int = 200,
D : float = 1,
alpha : float = 1,
L : float = None,
deltaT : int = 1):
'''
Generates a single state trajectory with given parameters.
Parameters
----------
T : int
Length of the trajectory
D : float
Diffusion coefficient
alpha : float
Anomalous exponent
L : float
Length of the box acting as the environment
deltaT : int, optional
Sampling time
Returns
-------
tuple
- pos: position of the particle
- labels: anomalous exponent, D and state at each timestep. State is always free here.
'''
# Trajectory displacements
dispx, dispy = models_phenom().disp_fbm(alpha, D, T), models_phenom().disp_fbm(alpha, D, T)
# Labels
lab_diff_state = np.ones(T)*models_phenom().lab_state.index('f') if alpha < models_phenom().alpha_directed else np.ones(T)*models_phenom().lab_state.index('d')
labels = np.vstack((np.ones(T)*alpha,
np.ones(T)*D,
lab_diff_state
)).transpose()
# If there are no boundaries
if not L:
posx, posy = np.cumsum(dispx) - dispx[0], np.cumsum(dispy) - dispy[0]
return np.vstack((posx, posy)).transpose(), labels
# If there are, apply reflecting boundary conditions
else:
pos = np.zeros((T, 2))
# Initialize the particle in a random position of the box
pos[0, :] = np.random.rand(2)*L
for t in range(1, T):
pos[t, :] = [pos[t-1, 0]+dispx[t], pos[t-1, 1]+dispy[t]]
# Reflecting boundary conditions
while np.max(pos[t, :])>L or np.min(pos[t, :])< 0:
pos[t, pos[t, :] > L] = pos[t, pos[t, :] > L] - 2*(pos[t, pos[t, :] > L] - L)
pos[t, pos[t, :] < 0] = - pos[t, pos[t, :] < 0]
return pos, labels
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 27
class models_phenom(models_phenom):
def single_state(self,
N:int = 10,
T:int = 200,
Ds:list = [1, 0],
alphas:list = [1, 0],
L:float = None):
'''
Generates a dataset made of single state trajectories with given parameters.
Parameters
----------
N : int, list
Number of trajectories in the dataset
T : int
Length of the trajectory
Ds : float
If list, mean and variance from which to sample the diffusion coefficient. If float, we consider variance = 0.
alphas : float
If list, mean and variance from which to sample the anomalous exponent. If float, we consider variance = 0.
L : float
Length of the box acting as the environment
deltaT : int, optional
Sampling time
Returns
-------
tuple
- positions: position of the N trajectories.
- labels: anomalous exponent, D and state at each timestep. State is always free here.
'''
positions = np.zeros((T, N, 2))
labels = np.zeros((T, N, 3))
for n in range(N):
alpha_traj = gaussian(alphas, bound = self.bound_alpha)
D_traj = gaussian(Ds, bound = self.bound_D)
# Get trajectory from single traj function
pos, lab = self._single_state_traj(T = T,
D = D_traj,
alpha = alpha_traj,
L = L)
positions[:, n, :] = pos
labels[:, n, :] = lab
return positions, labels
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 33
class models_phenom(models_phenom):
@staticmethod
def _multiple_state_traj(T = 200,
M = [[0.95 , 0.05],[0.05 ,0.95]],
Ds = [1, 0.1],
alphas = [1, 1],
L = None,
deltaT = 1,
return_state_num = False,
init_state = None
):
'''
Generates a 2D multi state trajectory with given parameters.
Parameters
----------
T : int
Length of the trajectory
M : list, array
Transition matrix between diffusive states.
Ds : list
Diffusion coefficients of the diffusive states. Must have as many Ds as states defined by M.
alphas : list
Anomalous exponents of the diffusive states. Must have as many alphas as states defined by M.
L : float
Length of the box acting as the environment
deltaT : int, optional
Sampling time
return_state_num : bool
If True, returns as label the number assigned to the state at each time step.
init_state : bool
If True, the particle starts in state 0. If not, sample initial state.
Returns
-------
tuple
- pos: position of the particle
- alphas_t: anomalous exponent at each step
- Ds_t: diffusion coefficient at each step.
- label_diff_state: particle's state (can be either free or directed for alpha ~ 2) at each step.
- state (optional): state label at each step.
'''
# transform lists to numpy if needed
if isinstance(M, list):
M = np.array(M)
if isinstance(Ds, list):
Ds = np.array(Ds)
if isinstance(alphas, list):
alphas = np.array(alphas)
pos = np.zeros((T, 2))
if L: pos[0,:] = np.random.rand(2)*L
# Diffusing state of the particle
state = np.zeros(T).astype(int)
if init_state is None:
state[0] = np.random.randint(M.shape[0])
else: state[0] = init_state
# Init alphas, Ds
alphas_t = np.array(alphas[state[0]]).repeat(T)
Ds_t = np.array(Ds[state[0]]).repeat(T)
# Trajectory displacements
dispx, dispy = [models_phenom().disp_fbm(alphas_t[0], Ds_t[0], T),
models_phenom().disp_fbm(alphas_t[0], Ds_t[0], T)]
for t in range(1, T):
pos[t, :] = [pos[t-1, 0]+dispx[t], pos[t-1, 1]+dispy[t]]
# at each time, check new state
state[t] = np.random.choice(np.arange(M.shape[0]), p = M[state[t-1], :])
if state[t] != state[t-1]:
alphas_t[t:] = np.array(alphas[state[t]]).repeat(T-t)
Ds_t[t:] = np.array(Ds[state[t]]).repeat(T-t)
# Recalculate new displacements for next steps
if len(dispx[t:]) > 1:
dispx[t:], dispy[t:] = [models_phenom().disp_fbm(alphas_t[t], Ds_t[t], T-t),
models_phenom().disp_fbm(alphas_t[t], Ds_t[t], T-t)]
else:
dispx[t:], dispy[t:] = [np.sqrt(2*Ds[state[t]]*deltaT)*np.random.randn(),
np.sqrt(2*Ds[state[t]]*deltaT)*np.random.randn()]
if L is not None:
# Reflecting boundary conditions
while np.max(pos[t, :])>L or np.min(pos[t, :])< 0:
pos[t, pos[t, :] > L] = pos[t, pos[t, :] > L] - 2*(pos[t, pos[t, :] > L] - L)
pos[t, pos[t, :] < 0] = - pos[t, pos[t, :] < 0]
# Define state of particles based on values of alphas: either free or directed
label_diff_state = np.zeros_like(alphas_t)
label_diff_state[alphas_t < models_phenom().alpha_directed] = models_phenom().lab_state.index('f')
label_diff_state[alphas_t >= models_phenom().alpha_directed] = models_phenom().lab_state.index('d')
if return_state_num:
return pos, np.array((alphas_t,
Ds_t,
label_diff_state,
state)).transpose()
else:
return pos, np.array((alphas_t,
Ds_t,
label_diff_state)).transpose()
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 37
class models_phenom(models_phenom):
def multi_state(self,
N = 10,
T = 200,
M: np.array = [[0.9 , 0.1],[0.1 ,0.9]],
Ds: np.array = [[1, 0], [0.1, 0]],
alphas: np.array = [[1, 0], [1, 0]],
gamma_d = [1],
epsilon_a = [0],
L = None,
return_state_num = False,
init_state = None):
'''
Generates a dataset of 2D multi state trajectory with given parameters.
Parameters
----------
N : int
Number of trajectories
T : int
Length of the trajectory
M : list, array
Transition matrix between diffusive states
Ds : list
List of means and variances from which to sample the diffusion coefficient of each state. If element size is one, we consider variance = 0.
alphas : float
List of means and variances from which to sample the anomalous exponent of each state. If element size is one, we consider variance = 0.
gamma_d : list
Minimum factor between D of diffusive states (see ._sampling_diff_parameters)
epsilon_a : list
Distance between alpha of diffusive states (see ._sampling_diff_parameters)
L : float
Length of the box acting as the environment
deltaT : int, optional
Sampling time
return_state_num : bool
If True, returns as label the number assigned to the state at each time step.
init_state : bool
If True, the particle starts in state 0. If not, sample initial state.
Returns
-------
tuple
- trajs (array TxNx2): particles' position
- labels (array TxNx2): particles' labels (see ._multi_state for details on labels)
'''
# transform lists to numpy if needed
if isinstance(M, list):
M = np.array(M)
if isinstance(Ds, list):
Ds = np.array(Ds)
if isinstance(alphas, list):
alphas = np.array(alphas)
trajs = np.zeros((T, N, 2))
if return_state_num:
labels = np.zeros((T, N, 4))
else:
labels = np.zeros((T, N, 3))
for n in range(N):
### Sampling diffusion parameters for each state
alphas_traj = []
Ds_traj = []
alphas_traj, Ds_traj = self._sample_diff_parameters(alphas = alphas,
Ds = Ds,
num_states = M.shape[0],
epsilon_a = epsilon_a,
gamma_d = gamma_d)
#### Get trajectory from single traj function
traj, lab = self._multiple_state_traj(T = T,
L = L,
M = M,
alphas = alphas_traj,
Ds = Ds_traj,
return_state_num = return_state_num,
init_state = init_state
)
trajs[:, n, :] = traj
labels[:, n, :] = lab
return trajs, labels
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 46
class models_phenom(models_phenom):
@staticmethod
def _get_distance(x):
'''
Given a matrix of size Nx2, calculates the distance between the N particles.
Parameters
----------
x : array
Particles' positions
Returns
-------
array
Distance between particles
'''
M = np.reshape(np.repeat(x[ :, :], x.shape[0], axis = 0), (x.shape[0], x.shape[0], 2))
Mtrans = M.transpose(1,0,2)
distance = np.sqrt(np.square(M[:,:, 0]-Mtrans[:,:, 0])
+ np.square(M[:,:, 1]-Mtrans[:,:, 1]))
return distance
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 49
class models_phenom(models_phenom):
@staticmethod
def _make_escape(Pu, label, diff_state):
'''
Given an unbinding probablity (Pu), the current labeling of particles (label)
and the current state of particle (diff_state, either bound, 1, or unbound, 0), simulate an
stochastic binding mechanism.
Parameters
----------
Pu : float
Unbinding probablity
label : array
Current labeling of the particles (i.e. to which condensate they belong)
diff_state : array
Current state of the particles
Returns
-------
tuple
New labeling and diffusive state of the particles
'''
# if unbinding probability is zero
if Pu == 0:
return label, diff_state
label = label.copy()
diff_state = diff_state.copy()
label_dimers = np.unique(label[np.argwhere(diff_state == 1)])
for l in label_dimers:
if np.random.rand() < Pu:
# give new label to escaping particles
diff_state[label == l] = 0
label[label == l] = np.max(label)+np.arange(2)+1
return label, diff_state
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 52
class models_phenom(models_phenom):
@staticmethod
def _make_condensates(Pb, label, diff_state, r, distance, max_label):
'''
Given a binding probability Pb, the current label of particles (label),
their current diffusive state (diff_state), the particle size (r), their
distances (distance) and the label from which binding is not possible
(max_label), simulates a binding mechanism.
Parameters
----------
Pb : float
Binding probablity.
label : array
Current labeling of the particles (i.e. to which condensate they belong)
diff_state : array
Current state of the particles
r : float
Particle size.
distance : array
Distance between particles
max_label : int
Maximum label from which particles will not be considered for binding
Returns
-------
tuple
New labeling and diffusive state of the particles
'''
label = label.copy()
diff_state = diff_state.copy()
# Keeping track of the ones that will dimerize
already_dimer = []
for n, l in enumerate(label):
# Consider conditions in which particles do not dimerize
if n in already_dimer or diff_state[n] == 1 or l > max_label:
continue
# Extract distances to current particle
distance_to_current = distance[n,:]
distance_to_current[n] == 0
close_particles = np.argwhere((distance_to_current < 2*r) & (distance_to_current > 0)).flatten()
# Loop over all posible dimerizing candidates
for chosen in close_particles:
# Consider conditions in which particles do not dimerize
if chosen in already_dimer or diff_state[chosen] == 1 or label[chosen] > max_label:
continue
# Draw coin to see if particle dimerizes
if np.random.rand() < Pb:
# Add dimerized particles to the new dimer counter
already_dimer.append(chosen)
already_dimer.append(n)
# Update their diffusive state
diff_state[n] = 1
diff_state[chosen] = 1
# dimerize particles
label[chosen] = l
# if one particles dimers, not more clustering!
break
return label, diff_state
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 57
class models_phenom(models_phenom):
@staticmethod
def _stokes(D):
'''
Applies a Stokes-Einstein-like transformation to two diffusion coefficients.
Parameters
----------
D : tuple
Diffusion coefficients of the two binding particles.
Returns
-------
float
Resulting diffusion coefficient.
'''
D1 = D[0]; D2 = D[1]
return 1/((1/D1)+(1/D2))
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 60
class models_phenom(models_phenom):
def dimerization(self,
N = 10,
T = 200,
L = 100,
r = 1,
Pu = 0.1,
Pb = 0.01,
Ds: np.array = [[1, 0], [0.1, 0]],
alphas: np.array = [[1, 0], [1, 0]],
epsilon_a = 0, stokes = False,
return_state_num = False,
deltaT = 1
):
'''
Generates a dataset of 2D trajectories of particles perfoming stochastic dimerization.
Parameters
----------
N : int
Number of trajectories
T : int
Length of the trajectory
L : float
Length of the box acting as the environment
r : float
Radius of particles.
Pu : float in [0,1]
Unbinding probability.
Pb : float in [0,1])
Binding probability.
Ds : array
List of means and variances from which to sample the diffusion coefficient of each state. If element size is one, we consider variance = 0.
alphas : array
List of means and variances from which to sample the anomalous exponent of each state. If element size is one, we consider variance = 0.
epsilon_a : float
Distance between alpha of diffusive states (see ._sampling_diff_parameters)
stokes : bool
If True, applies a Stokes-Einstein like coefficient to calculate the diffusion coefficient of dimerized particles.
If False, we use as D resulting from the dimerization the D assigned to the dimerized state of one of the two particles.
deltaT : int
Sampling time
return_state_num : bool
If True, returns as label the number assigned to the state at each time step.
Returns
-------
tuple
- trajs (array TxNx2): particles' position
- labels (array TxNx2): particles' labels (see ._multi_state for details on labels)
'''
# transform lists to numpy if needed
if isinstance(Ds, list):
Ds = np.array(Ds)
if isinstance(alphas, list):
alphas = np.array(alphas)
# Info to save
pos = np.zeros((T, N, 2)) # position over time
label = np.zeros((T, N)).astype(int)
diff_state = np.zeros((T, N)).astype(int)
# Init position, labels
pos[0, :, :] = np.random.rand(N, 2)*L
label[0, :] = np.arange(pos.shape[1])
# Init alphas, Ds
# Calculate alpha/D for each particle in each state
alphas_N = np.array([gaussian(alphas[0], size = N, bound = self.bound_alpha),
gaussian(alphas[1], size = N, bound = self.bound_alpha)])
Ds_N = np.array([gaussian(Ds[0], size = N, bound = self.bound_D),
gaussian(Ds[1], size = N, bound = self.bound_D)])
# define labels over time by means of state 0
alphas_t = alphas_N[0,:].repeat(T).reshape(N,T).transpose()
Ds_t = Ds_N[0,:].repeat(T).reshape(N,T).transpose()
# initial displacements (all free particles)
disps = np.zeros((T, N, 2))
for n in range(N):
disps[:, n, 0] = models_phenom().disp_fbm(alphas_t[0, n], Ds_t[0, n], T, deltaT = deltaT)
disps[:, n, 1] = models_phenom().disp_fbm(alphas_t[0, n], Ds_t[0, n], T, deltaT = deltaT)
for t in (range(1, T)):
# Find max label to account later for escaped
max_label = np.max(label[t-1, :])
# Make particles escape
label[t, :], diff_state[t, :] = self._make_escape(Pu,
label[t-1, :],
diff_state[t-1, :])
lab, diff = label[t, :].copy(), diff_state[t, :].copy()
# get distance + increasing it for escaped to avoid reclustering
distance = self._get_distance(pos[t-1, :, :])
# Merge particles in condensates
label[t, :], diff_state[t, :] = self._make_condensates(Pb,
label[t, :],
diff_state[t, :],
r, distance, max_label)
# Find particles which changed state
label_changed, counts = np.unique(label[t, np.not_equal(diff_state[t-1,:], diff_state[t,:])],
return_counts = True)
# Calculate new displacements for particles which changed state
for l, count in zip(label_changed, counts):
index = int(np.argwhere(label[t,:] == l)[0])
state = diff_state[t, index]
### Calculating new diffusion parameters
# anomalous exponent
if epsilon_a != 0 and state == 1:
new_alpha = gaussian(alphas[1], size = 1, bound = self.bound_alpha)
idx_while = 0
while models_phenom()._constraint_alpha(alphas_N[0, label[t, :] == l].min(), new_alpha, epsilon_a):
new_alpha = gaussian(alphas[1], size = 1, bound = self.bound_alpha)
idx_while += 1
if idx_while > 100: # check that we are not stuck forever in the while loop
raise FileNotFoundError(f'Could not find correct alpha in 100 steps. State distributions probably too close.')
alphas_t[t:, label[t, :] == l] = new_alpha
else:
# if no epsilon is given, use the alpha of the first particle
# While here it seems we take both, in the for loop where we assign the displacements below we only
# sample with the first value.
alphas_t[t:, label[t, :] == l] = alphas_N[state, label[t, :] == l].repeat(T-t).reshape(count, T-t).transpose()
# diffusion coefficient
if stokes and state == 1:
Ds_t[t:, label[t, :] == l] = models_phenom()._stokes(Ds_t[t-1, label[t, :] == l])
else: # if no stokes is given, use the D assgined to the dimerized state of the first particle
Ds_t[t:, label[t, :] == l] = Ds_N[state, label[t, :] == l].repeat(T-t).reshape(count, T-t).transpose()
for idx, i in enumerate(np.argwhere(label[t,:] == l)):
# We first calculate the displacements so dimers have same motion
if idx == 0:
if T-t > 1:
disp_current_x = models_phenom().disp_fbm(float(alphas_t[t, i]), float(Ds_t[t, i]), T-t, deltaT = deltaT).reshape(T-t, 1)
disp_current_y = models_phenom().disp_fbm(float(alphas_t[t, i]), float(Ds_t[t, i]), T-t, deltaT = deltaT).reshape(T-t, 1)
else:
disp_current_x = np.sqrt(2*float(Ds_t[t, i])*deltaT)*np.random.randn(1)
disp_current_y = np.sqrt(2*float(Ds_t[t, i])*deltaT)*np.random.randn(1)
disps[t:, i, 0] = disp_current_x
disps[t:, i, 1] = disp_current_y
# Update position
pos[t, :, :] = pos[t-1,:,:]+disps[t, :, :]
# Consider boundary conditions
if L is not None:
while np.max(pos[t,:, :])>L or np.min(pos[t,:, :])< 0:
pos[t, pos[t,:, :] > L] = pos[t, pos[t,:, :] > L] - 2*(pos[t, pos[t,:, :] > L] - L)
pos[t, pos[t,:, :] < 0] = - pos[t, pos[t,:, :] < 0]
# Define state of particles based on values of alphas: either free or directed
label_diff_state = np.zeros_like(alphas_t)
label_diff_state[alphas_t < self.alpha_directed] = self.lab_state.index('f')
label_diff_state[alphas_t >= self.alpha_directed] = self.lab_state.index('d')
if return_state_num:
return pos, np.array((alphas_t,
Ds_t,
label_diff_state,
diff_state)).transpose(1,2,0)
else:
return pos, np.array((alphas_t,
Ds_t,
label_diff_state
)).transpose(1,2,0)
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 67
class models_phenom(models_phenom):
@staticmethod
def _update_bound(mask, # Current binding array
N, # Number of particles
pos, # Position of particles
Nt, # Number of traps
traps_pos, # Position of traps
Pb, # Binding probability
Pu, # Unbinding probability
r, # Trap radius
): # Updated binding array
'''
Binds and unbinds particles to traps based on their position and binding and unbinding probabilities
Parameters
----------
mask : array
Current binding array
N : int
Number of particles
pos : array
Position of particles
Nt : int
Number of traps
traps_pos : array
Position of traps
Pb : float in [0,1]
Binding probability
Pu : float in [0,1]
Unbinding probability
r : float
Trap radius
Returns
-------
array
Updated binding array
'''
# from the ones that are bound, get the ones that unbind. These will be descarted for binding in same time step
mask_new_free = np.array(1-(np.random.rand(N) < Pu)*mask).astype(bool)
# calculate the distance between traps and particles
d = models_phenom._get_distance(np.vstack((traps_pos, pos)))[Nt:, :Nt]
mask_close = (d < r).sum(1).astype(bool)
# get mask for binding
mask_new_bind = np.random.rand(N) < Pb
# update the bound vector with the previous conditions:
# first, the ones that unbind
mask *= mask_new_free
# then, the ones that are close + bind. Mask_new_free is added to avoid binding
# of the ones that just unbound
mask += mask_close*mask_new_bind*mask_new_free
return mask
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 69
class models_phenom(models_phenom):
def immobile_traps(self,
N = 10,
T = 200,
L = 100,
r = 1,
Pu = 0.1,
Pb = 0.01,
Ds = [1, 0],
alphas = [1, 0],
Nt = 10,
traps_pos: np.array = None,
deltaT = 1
):
'''
Generates a dataset of 2D trajectories of particles diffusing in an environment with immobilizing traps.
Parameters
----------
N : int
Number of trajectories
T : int
Length of the trajectory
L : float
Length of the box acting as the environment
r : float
Radius of particles.
Pu : float in [0,1]
Unbinding probability.
Pb : float in [0,1])
Binding probability.
Ds : list, float
Mean and variance from which to sample the diffusion coefficient of the free state. If float, we consider variance = 0
alphas : list, float
Mean and variance from which to sample the anomalous exponent of the free state. If float, we consider variance = 0
Nt : int
Number of traps
traps_pos : array, None
Positions of the traps. Can be given by array or sampled randomly if None.
deltaT : int
Sampling time.
Returns
-------
tuple
- trajs (array TxNx2): particles' position
- labels (array TxNx2): particles' labels (see ._multi_state for details on labels)
'''
# Info to output
pos = np.zeros((T, N, 2)) # position over time
output_label = np.zeros((T, N, 3))
disps = np.zeros((T, N, 2))
diff_state = np.zeros((T, N)).astype(int)
mask_bound = diff_state[0, :].astype(bool)
# Init position, labels
pos[0, :, :] = np.random.rand(N, 2)*L
# Init alphas, Ds
# Calculate alpha/D for each particle in state free state
alphas_N = gaussian(alphas, size = N, bound = self.bound_alpha)
Ds_N = gaussian(Ds, size = N, bound = self.bound_D)
# Single particle case
if N == 1:
alphas_N = [alphas_N]
Ds_N = [Ds_N]
# Traps positions
if traps_pos is None:
traps_pos = np.random.rand(Nt, 2)*L
# Get displacement for every particle
for n in range(N):
disps[:, n, 0] = models_phenom().disp_fbm(alphas_N[n], Ds_N[n], T, deltaT = deltaT)
disps[:, n, 1] = models_phenom().disp_fbm(alphas_N[n], Ds_N[n], T, deltaT = deltaT)
# Set initial values of labels
output_label[0, :, 0] = alphas_N
output_label[0, :, 1] = Ds_N
for t in (range(1, T)):
mask_bound = self._update_bound(mask = mask_bound, # current bind vector
N = N, # number of particles
pos = pos[t-1, :, :], # position of particles
Nt = Nt, # number of traps
traps_pos = traps_pos, # position of traps
Pb = Pb, # binding probability
Pu = Pu, # unbinding probability
r = r, # trap radius
)
# Update the diffusive state
diff_state[t,:] = mask_bound
# Regenerate trajectories for untrapped particles
untrapped = np.argwhere((diff_state[t,:] - diff_state[t-1,:]) == -1).flatten()
for un_part in untrapped:
if T-t > 1:
# Recalculate new displacements for next steps
disps[t:, un_part, 0] = models_phenom().disp_fbm(alphas_N[un_part], Ds_N[un_part], T-t, deltaT = deltaT)
disps[t:, un_part, 1] = models_phenom().disp_fbm(alphas_N[un_part], Ds_N[un_part], T-t, deltaT = deltaT)
else:
disps[t:, un_part, 0] = np.sqrt(2*Ds_N[un_part]*deltaT)*np.random.randn()
disps[t:, un_part, 1] = np.sqrt(2*Ds_N[un_part]*deltaT)*np.random.randn()
# Update the position
pos[t, :, :] = pos[t-1, :, :] + (1-mask_bound).reshape(N,1)*disps[t, :, :]
# Update labels
output_label[t, :, 0] = alphas_N*(1-mask_bound)
output_label[t, :, 1] = Ds_N*(1-mask_bound)
# Consider boundary conditions
if L is not None:
while np.max(pos[t,:, :])>L or np.min(pos[t,:, :])< 0:
pos[t, pos[t,:, :] > L] = pos[t, pos[t,:, :] > L] - 2*(pos[t, pos[t,:, :] > L] - L)
pos[t, pos[t,:, :] < 0] = - pos[t, pos[t,:, :] < 0]
# Define state of particles based on values of Ds and alphas. Here, we use the fact
# that alpha = 0 for immobilization
output_label[output_label[:,:,0] == 0, -1] = self.lab_state.index('i')
idx_f = np.argwhere
output_label[(output_label[:,:,0] > 0) & (output_label[:,:,0] < self.alpha_directed), -1] = self.lab_state.index('f')
output_label[output_label[:,:,0] > self.alpha_directed, -1] = self.lab_state.index('d')
return pos, output_label
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 76
class models_phenom(models_phenom):
@staticmethod
def _distribute_circular_compartments(Nc, r, L):
'''
Distributes circular compartments over an environment without overlapping. Raises a warning and stops when no more compartments can be inserted.
Parameters
----------
Nc : float
Number of compartments
r : float
Size of the compartments
L : float
Side length of the squared environment.
Returns
-------
array
Position of the centers of the compartments
'''
comp_center = np.random.rand(1, 2)*(L - 2*r) + r
hardness = 0
while comp_center.shape[0] < Nc:
new_pos = np.random.rand(2)*(L - 2*r) + r
distance = np.linalg.norm(comp_center - new_pos, axis = 1)
if min(distance) > 2*r:
comp_center = np.vstack((comp_center, new_pos.reshape(1,2)))
hardness += 1
if hardness > Nc*100:
warn_str = f'Could accomodate {comp_center.shape[0]} circles of the {Nc} requested. Increase size of environment or decrease radius of compartments.'
warnings.warn(warn_str)
break
return comp_center
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 80
from .utils_trajectories import trigo
class models_phenom(models_phenom):
@staticmethod
def _reflected_position(circle_center,
circle_radius,
beg,
end,
precision_boundary = 1e-4):
'''
Given the begining and end of a segment crossing the boundary of a circle, calculates the new position considering that boundaries are fully reflective.
Parameters
----------
circle_center : float
Center of the circle
circle_radius : float
Radius of the circle
beg : tuple
Position (in 2D) of the begining of the segment
end : tuple
Position (in 2D) of the begining of the segment
precision_boundary : float
Small area around the real boundary which is also considered as boundary. For numerical stability
Returns
-------
tuple
- Reflected position
- Intersection point
'''
# If the begining of the segment is in the exact boundary, no intersection is found.
# In that case, we bring closer the point to the center of the cercle so it is
# at a distance 'precision_boundary' from the border
if np.linalg.norm(circle_center - beg) > circle_radius - precision_boundary:
vec = trigo.seg_to_vec([circle_center, beg])
beg = np.array(circle_center)+(circle_radius-precision_boundary)*(-np.array(vec)/np.dot(vec, vec)**0.5)
# find the intersection between the line drawn by the displacement and the circle
intersect = trigo.circle_line_segment_intersection(circle_center = circle_center,
circle_radius = circle_radius,
pt1 = beg,
pt2 = end)[-1]
# Draw lines and calculate angles between radius and begining-intersection
line1 = [circle_center, intersect]
line2 = [beg, intersect]
angle = trigo.ang_line(line1, line2)
# Calculate distance between intersection and end of displacement
dist_int_end = np.linalg.norm(np.array(intersect) - end)
# Create radius vector and calculate the tangent vector
vec_radius = trigo.seg_to_vec([circle_center, intersect])
tangent = trigo.rotate_vec(vec_radius, np.pi/2)
# Calculate the angle between the tangent and the displacement vector
angle_tan = trigo.ang_vec(tangent, trigo.seg_to_vec([beg, intersect]))
# Change sign to correct get the reflection
if angle_tan < np.pi/2: angle = - angle
# Rotate the radius vector with the reflection angle and normalize by magnitude
vec_bounce = trigo.rotate_vec(vec_radius, angle)
vec_bounce /= np.dot(vec_bounce, vec_bounce)**0.5
# Final point is the previous vector times the distance starting at the intersect point
return np.array(intersect)+dist_int_end*np.array(vec_bounce), intersect
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 84
class models_phenom(models_phenom):
@staticmethod
def _confinement_traj(T = 200,
L = 100,
Ds = [1, 0.1],
alphas = [1, 1],
r = 1,
comp_center = None,
Nc = 10,
trans = 0.1,
deltaT = 1):
'''
Generates a 2D trajectory of particles diffusing in an environment with partially transmitting circular compartments.
Parameters
----------
T : int
Length of the trajectory
L : float
Length of the box acting as the environment
Ds : list
Diffusion coefficients of the two diffusive states (first free, the confined). Size must be 2.
alphas : list
Anomalous exponents of the two diffusive states (first free, the confined). Size must be 2.
r : float
Radius of the compartments.
comp_center : array, None
If given, center of the compartments. If None, centers are uniformly sampled.
Nc : int
Number of compartments
trans : float
Transmittance of the boundaries
deltaT : int
Sampling time.
Returns
-------
tuple
- pos (array Tx2): particle's position
- labels (array Tx2): particle's labels (see ._multi_state for details on labels)
'''
# transform lists to numpy if needed
if isinstance(Ds, list):
Ds = np.array(Ds)
if isinstance(alphas, list):
alphas = np.array(alphas)
# Traps positions
if comp_center is None:
comp_center = models_phenom._distribute_circular_compartments(Nc = Nc,
r = r,
L = L)
# Particle's properties
pos = np.zeros((T, 2))
pos[0,:] = np.random.rand(2)*L
state = np.zeros(T).astype(int)
# Check if particle is compartment
distance_centers = np.linalg.norm(comp_center - pos[0, :], axis = 1)
if distance_centers.min() < r:
# we assign the state to the compartment the particle is on
compartment = distance_centers.argmin()
state[0] = 1
# Output labels
labels = np.zeros((T, 3))
labels[0, 0] = alphas[state[0]]
labels[0, 1] = Ds[state[0]]
# Trajectory
dispx = models_phenom().disp_fbm(alphas[state[0]], Ds[state[0]], T, deltaT = deltaT)
dispy = models_phenom().disp_fbm(alphas[state[0]], Ds[state[0]], T, deltaT = deltaT)
disp_t = 0
for t in range(1, T):
pos[t, :] = [pos[t-1, 0]+dispx[disp_t], pos[t-1, 1]+dispy[disp_t]]
# if the particle was inside a compartment
if state[t-1] == 1:
# check if it exited of the compartment
current_distance = np.linalg.norm(comp_center[compartment, :] - pos[t, :])
if current_distance > r:
coin = np.random.rand()
# particle escaping
if coin < trans:
# check that if we entered in a different comparmetn
distance_centers = np.linalg.norm(comp_center - pos[t, :], axis = 1)
if distance_centers.min() < r:
# we assign the state to the compartment the particle is on
compartment = distance_centers.argmin()
state[t] = 1
else: state[t] = 0
# particle reflecting
else:
beg = pos[t-1, :]
while current_distance > r:
pos[t, :], intersect = models_phenom._reflected_position(circle_center = comp_center[compartment, :],
circle_radius = r,
beg = beg,
end = pos[t, :])
beg = intersect
distance_beg = np.linalg.norm(comp_center[compartment, :] - beg)
current_distance = np.linalg.norm(comp_center[compartment, :] - pos[t,:])
state[t] = 1
# if the particle stayed inside the compartment
else: state[t] = 1
# If particle was outside of the compartment
elif state[t-1] == 0:
# Check if particle entered a new compartment
distance_centers = np.linalg.norm(comp_center - pos[t, :], axis = 1)
if distance_centers.min() < r:
# we assign the state to the compartment the particle is on
compartment = distance_centers.argmin()
state[t] = 1
# if the particle stayed outside the comparments
else: state[t] = 0
# If the state changed
if state[t] != state[t-1]:
if T-t > 1:
dispx = models_phenom().disp_fbm(alphas[state[t]], Ds[state[t]], T-t, deltaT = deltaT)
dispy = models_phenom().disp_fbm(alphas[state[t]], Ds[state[t]], T-t, deltaT = deltaT)
else:
dispx, dispy = [np.sqrt(2*Ds[state[t]]*deltaT)*np.random.randn(),
np.sqrt(2*Ds[state[t]]*deltaT)*np.random.randn()]
disp_t = 0
# If the state did not change:
else: disp_t += 1
# Boundary conditions
if L is not None:
# Reflecting boundary conditions
while np.max(pos[t, :])>L or np.min(pos[t, :])< 0:
pos[t, pos[t, :] > L] = pos[t, pos[t, :] > L] - 2*(pos[t, pos[t, :] > L] - L)
pos[t, pos[t, :] < 0] = - pos[t, pos[t, :] < 0]
labels[t, 0] = alphas[state[t]]
labels[t, 1] = Ds[state[t]]
# Define state of particles based on the state array. First free/directed
if alphas[0] < models_phenom().alpha_directed:
labels[state == 0, -1] = models_phenom().lab_state.index('f')
else:
labels[state == 0, -1] = models_phenom().lab_state.index('d')
# Then confined
labels[state == 1, -1] = models_phenom().lab_state.index('c')
return pos, labels
# %% ../source_nbs/lib_nbs/models_phenom.ipynb 88
class models_phenom(models_phenom):
def confinement(self,
N = 10,
T = 200,
L = 100,
Ds = [[1, 0], [0.1, 0]],
alphas = [[1, 0], [1, 0]],
gamma_d = [1],
epsilon_a = [0],
r = 1,
comp_center = None,
Nc = 10,
trans = 0.1,
deltaT = 1):
'''
Generates a dataset of 2D trajectories of particles diffusing in an environment with partially transmitting circular compartments.
Parameters
----------
N : int
Number of trajectories
T : int
Length of the trajectory
L : float
Length of the box acting as the environment
Ds : list
List of means and variances from which to sample the diffusion coefficient of each state. If element size is one, we consider variance = 0.
alphas : float
List of means and variances from which to sample the anomalous exponent of each state. If element size is one, we consider variance = 0.
gamma_d : list
Minimum factor between D of diffusive states (see ._sampling_diff_parameters). Size is number of states -1 (in this case size 1)
epsilon_a : list
Distance between alpha of diffusive states (see ._sampling_diff_parameters). Size is number of states -1 (in this case size 1)
r : float
Radius of the compartments.
comp_center : array, None
If given, center of the compartments. If None, centers are uniformly sampled.
Nc : int
Number of compartments
trans : float
Transmittance of the boundaries
deltaT : int
Sampling time.
Returns
-------
tuple
- pos (array Tx2): particle's position
- labels (array Tx2): particle's labels (see ._multi_state for details on labels)
'''
if isinstance(Ds, list):
Ds = np.array(Ds)
if isinstance(alphas, list):
alphas = np.array(alphas)
data = np.zeros((T, N, 2))
labels = np.zeros((T, N, 3))
for n in range(N):
# Defined physical parameters for each trajectory
alphas_traj, Ds_traj = self._sample_diff_parameters(alphas = alphas,
Ds = Ds,
num_states = 2,
epsilon_a = epsilon_a,
gamma_d = gamma_d)
# Get trajectory from single traj function
pos, lab = self._confinement_traj(T = T,
Ds = Ds_traj,
alphas = alphas_traj,
L = L,
deltaT = deltaT,
r = r,
comp_center = comp_center,
Nc = Nc,
trans = trans)
data[:, n, :] = pos
labels[:, n, :] = lab
return data, labels
| 57,628 | 40.10485 | 171 |
py
|
andi_datasets
|
andi_datasets-master/andi_datasets/models_theory.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../source_nbs/lib_nbs/models_theory.ipynb.
# %% auto 0
__all__ = ['models_theory']
# %% ../source_nbs/lib_nbs/models_theory.ipynb 3
import numpy as np
from stochastic.processes.continuous import FractionalBrownianMotion as fbm
from math import pi as pi
from scipy.special import erfcinv
from .utils_trajectories import regularize, bm1D, sample_sphere
# %% ../source_nbs/lib_nbs/models_theory.ipynb 5
class models_theory(object):
def __init__(self):
'''Constructor of the class'''
def attm(self, T, alpha, D = 1):
if D == 1: return self._oneD().attm(T, alpha)
elif D == 2: return self._twoD().attm(T, alpha)
elif D == 3: return self._threeD().attm(T, alpha)
else:
raise ValueError('Incorrect walk dimension')
def sbm(self, T, alpha, D = 1):
if D == 1: return self._oneD().sbm(T, alpha)
elif D == 2: return self._twoD().sbm(T, alpha)
elif D == 3: return self._threeD().sbm(T, alpha)
else:
raise ValueError('Incorrect walk dimension')
def ctrw(self, T, alpha, D = 1):
if D == 1: return self._oneD().ctrw(T, alpha)
elif D == 2: return self._twoD().ctrw(T, alpha)
elif D == 3: return self._threeD().ctrw(T, alpha)
else:
raise ValueError('Incorrect walk dimension')
def fbm(self, T, alpha, D = 1):
if D == 1: return self._oneD().fbm(T, alpha)
elif D == 2: return self._twoD().fbm(T, alpha)
elif D == 3: return self._threeD().fbm(T, alpha)
else:
raise ValueError('Incorrect walk dimension')
def lw(self, T, alpha, D = 1):
if D == 1: return self._oneD().lw(T, alpha)
elif D == 2: return self._twoD().lw(T, alpha)
elif D == 3: return self._threeD().lw(T, alpha)
else:
raise ValueError('Incorrect walk dimension')
# %% ../source_nbs/lib_nbs/models_theory.ipynb 7
class models_theory(models_theory):
class _oneD():
'''Class cointaning one dimensional diffusion models'''
def fbm(self, T, alpha):
''' Creates a 1D fractional brownian motion trajectory'''
H = alpha*0.5
return fbm(hurst=H).sample(int(T-1))
def ctrw(self, T, alpha, regular_time = True):
''' Creates a 1D continuous time tandom walk trajectory
Optional parameters:
:regular_time (bool):
- True if to transform the trajectory to regular time. '''
if alpha > 1:
raise ValueError('Continuous random walks only allow for anomalous exponents <= 1.')
# Generate the waiting times from power-law distribution
times = np.cumsum((1-np.random.rand(T))**(-1/alpha))
times = times[:np.argmax(times>T)+1]
# Generate the positions of the walk
positions = np.cumsum(np.random.randn(len(times)))
positions -= positions[0]
# Output
if regular_time:
return regularize(positions, times, T)
else:
return np.stack((times, positions))
def lw(self, T, alpha):
''' Creates a 1D Levy walk trajectory '''
if alpha < 1:
raise ValueError('Levy walks only allow for anomalous exponents > 1.')
# Define exponents for the distribution of flight times
if alpha == 2:
sigma = np.random.rand()
else:
sigma = 3-alpha
dt = (1-np.random.rand(T))**(-1/sigma)
dt[dt > T] = T+1
# Define the velocity
v = 10*np.random.rand()
# Generate the trajectory
positions = np.empty(0)
for t in dt:
positions = np.append(positions, v*np.ones(int(t))*(2*np.random.randint(0,2)-1))
if len(positions) > T:
break
return np.cumsum(positions[:int(T)]) - positions[0]
def attm(self, T, alpha, regime = 1):
'''Creates a 1D trajectory following the annealed transient time model
Optional parameters:
:regime (int):
- Defines the ATTM regime. Accepts three values: 0,1,2.'''
if regime not in [0,1,2]:
raise ValueError('ATTM has only three regimes: 0, 1 or 2.')
if alpha > 1:
raise ValueError('ATTM only allows for anomalous exponents <= 1.')
# Gamma and sigma selection
if regime == 0:
sigma = 3*np.random.rand()
gamma = np.random.uniform(low = -5, high = sigma)
if alpha < 1:
raise ValueError('ATTM regime 0 only allows for anomalous exponents = 1.')
elif regime == 1:
sigma = 3*np.random.uniform(low = 1e-2, high = 1.1)
gamma = sigma/alpha
while sigma > gamma or gamma > sigma + 1:
sigma = 3*np.random.uniform(low = 1e-2, high = 1.1)
gamma = sigma/alpha
elif regime == 2:
gamma = 1/(1-alpha)
sigma = np.random.uniform(low = 1e-2, high = gamma-1)
# Generate the trajectory
positions = np.array([0])
while len(positions) < T:
Ds =(1-np.random.uniform(low=0.1, high=0.99))**(1/sigma)
ts = Ds**(-gamma)
if ts > T:
ts = T
positions = np.append(positions, positions[-1]+bm1D(ts, Ds))
return positions[:T]-positions[0]
def sbm(self, T, alpha, sigma = 1):
'''Creates a scaled brownian motion trajectory'''
msd = (sigma**2)*np.arange(T+1)**alpha
dx = np.sqrt(msd[1:]-msd[:-1])
dx = np.sqrt(2)*dx*erfcinv(2-2*np.random.rand(len(dx)))
return np.cumsum(dx)-dx[0]
# %% ../source_nbs/lib_nbs/models_theory.ipynb 9
class models_theory(models_theory):
class _twoD():
def ctrw(self, T, alpha, regular_time = True):
''' Creates a 2D continuous time tandom walk trajectory
Optional parameters:
:regular_time (bool):
- True if to transform the trajectory to regular time. '''
if alpha > 1:
raise ValueError('Continuous random walks only allow for anomalous exponents <= 1.')
# Generate the waiting times from power-law distribution
times = np.cumsum((1-np.random.rand(T))**(-1/alpha))
times = times[:np.argmax(times>T)+1]
# Generate the positions of the walk
posX = np.cumsum(np.random.randn(len(times)))
posY = np.cumsum(np.random.randn(len(times)))
posX -= posX[0]
posY -= posY[0]
# Regularize and output
if regular_time:
regX = regularize(posX, times, T)
regY = regularize(posY, times, T)
return np.concatenate((regX, regY))
else:
return np.stack((times, posX, posY))
def fbm(self, T, alpha):
''' Creates a 2D fractional brownian motion trajectory'''
# Defin Hurst exponent
H = alpha*0.5
return np.concatenate((fbm(hurst=H).sample(int(T-1)), fbm(hurst=H).sample(int(T-1))))
def lw(self, T, alpha):
''' Creates a 2D Levy walk trajectory '''
if alpha < 1:
raise ValueError('Levy walks only allow for anomalous exponents > 1.')
# Define exponents for the distribution of times
if alpha == 2:
sigma = np.random.rand()
else:
sigma = 3-alpha
dt = (1-np.random.rand(T))**(-1/sigma)
dt[dt > T] = T+1
# Define the velocity
v = 10*np.random.rand()
# Define the array where we save step length
d= np.empty(0)
# Define the array where we save the angle of the step
angles = np.empty(0)
# Generate trajectory
for t in dt:
d = np.append(d, v*np.ones(int(t))*(2*np.random.randint(0,2)-1))
angles = np.append(angles, np.random.uniform(low = 0, high = 2*pi)*np.ones(int(t)))
if len(d) > T:
break
d = d[:int(T)]
angles = angles[:int(T)]
posX, posY = [d*np.cos(angles), d*np.sin(angles)]
return np.concatenate((np.cumsum(posX)-posX[0], np.cumsum(posY)-posY[0]))
def attm(self, T, alpha, regime = 1):
'''Creates a 2D trajectory following the annealed transient time model
Optional parameters:
:regime (int):
- Defines the ATTM regime. Accepts three values: 0,1,2.'''
if regime not in [0,1,2]:
raise ValueError('ATTM has only three regimes: 0, 1 or 2.')
if alpha > 1:
raise ValueError('ATTM only allows for anomalous exponents <= 1.')
# Gamma and sigma selection
if regime == 0:
sigma = 3*np.random.rand()
gamma = np.random.uniform(low = -5, high = sigma)
if alpha < 1:
raise ValueError('ATTM regime 0 only allows for anomalous exponents = 1.')
elif regime == 1:
sigma = 3*np.random.uniform(low = 1e-2, high = 1.1)
gamma = sigma/alpha
while sigma > gamma or gamma > sigma + 1:
sigma = 3*np.random.uniform(low = 1e-2, high = 1.1)
gamma = sigma/alpha
elif regime == 2:
gamma = 1/(1-alpha)
sigma = np.random.uniform(low = 1e-2, high = gamma-1)
# Generate the trajectory
posX = np.array([0])
posY = np.array([0])
while len(posX) < T:
Ds =(1-np.random.uniform(low=0.1, high=0.99))**(1/sigma)
ts = Ds**(-gamma)
if ts > T:
ts = T
posX = np.append(posX, posX[-1]+bm1D(ts, Ds))
posY = np.append(posY, posY[-1]+bm1D(ts, Ds))
return np.concatenate((posX[:T]-posX[0], posY[:T]-posY[0]))
def sbm(self, T, alpha, sigma = 1):
'''Creates a scaled brownian motion trajectory'''
msd = (sigma**2)*np.arange(T+1)**alpha
deltas = np.sqrt(msd[1:]-msd[:-1])
dx = np.sqrt(2)*deltas*erfcinv(2-2*np.random.rand(len(deltas)))
dy = np.sqrt(2)*deltas*erfcinv(2-2*np.random.rand(len(deltas)))
return np.concatenate((np.cumsum(dx)-dx[0], np.cumsum(dy)-dy[0]))
# %% ../source_nbs/lib_nbs/models_theory.ipynb 11
class models_theory(models_theory):
class _threeD():
def ctrw(self, T, alpha, regular_time = True):
''' Creates a 3D continuous time tandom walk trajectory
Optional parameters:
:regular_time (bool):
- True if to transform the trajectory to regular time. '''
if alpha > 1:
raise ValueError('Continuous random walks only allow for anomalous exponents <= 1.')
# Generate the waiting times from power-law distribution
times = np.cumsum((1-np.random.rand(T))**(-1/alpha))
times = np.append(0, times)
times = times[:np.argmax(times>T)+1]
# Generate the positions of the walk
lengths = np.random.randn(len(times))
posX, posY, posZ = np.cumsum(sample_sphere(len(times), lengths), axis=1)
posX = posX - posX[0]
posY = posY - posY[0]
posZ = posZ - posZ[0]
# Regularize and output
if regular_time:
regX = regularize(posX, times, T)
regY = regularize(posY, times, T)
regZ = regularize(posZ, times, T)
return np.concatenate((regX, regY, regZ))
else:
return np.stack((times, posX, posY, posZ))
def fbm(self, T, alpha):
''' Creates a 3D fractional brownian motion trajectory'''
# Define Hurst exponent
H = alpha*0.5
return np.concatenate((fbm(hurst=H).sample(int(T-1)), fbm(hurst=H).sample(int(T-1)), fbm(hurst=H).sample(int(T-1))))
def lw(self, T, alpha, regular_time = True):
''' Creates a 3D Levy walk trajectory '''
if alpha < 1:
raise ValueError('Levy walks only allow for anomalous exponents > 1.')
# Define exponents for the distribution of times
if alpha == 2:
sigma = np.random.rand()
else:
sigma = 3-alpha
dt = (1-np.random.rand(T))**(-1/sigma)
dt[dt>T] = T+1
# Define the velocity
v = 10*np.random.rand()
# Create the trajectory
posX = np.empty(0)
posY = np.empty(0)
posZ = np.empty(0)
for t in dt:
distX, distY, distZ = sample_sphere(1, v)
posX = np.append(posX, distX*np.ones(int(t)))
posY = np.append(posY, distY*np.ones(int(t)))
posZ = np.append(posZ, distZ*np.ones(int(t)))
if len(posX) > T:
break
return np.concatenate((np.cumsum(posX[:T])-posX[0],
np.cumsum(posY[:T])-posY[0],
np.cumsum(posZ[:T])-posZ[0]))
def attm(self, T, alpha, regime = 1):
'''Creates a 3D trajectory following the annealed transient time model
Optional parameters:
:regime (int):
- Defines the ATTM regime. Accepts three values: 0,1,2.'''
if regime not in [0,1,2]:
raise ValueError('ATTM has only three regimes: 0, 1 or 2.')
if alpha > 1:
raise ValueError('ATTM only allows for anomalous exponents <= 1.')
# Parameter selection
if regime == 0:
sigma = 3*np.random.rand()
gamma = np.random.uniform(low = -5, high = sigma)
if alpha < 1:
raise ValueError('ATTM Regime 0 can only produce trajectories with anomalous exponents = 1')
elif regime == 1:
sigma = 3*np.random.uniform(low = 1e-2, high = 1.1)
gamma = sigma/alpha
while sigma > gamma or gamma > sigma + 1:
sigma = 3*np.random.uniform(low = 1e-2, high = 1.1)
gamma = sigma/alpha
elif regime == 2:
gamma = 1/(1-alpha)
sigma = np.random.uniform(low = 1e-2, high = gamma-1)
# Create the trajectory
posX = np.array([0])
posY = np.array([0])
posZ = np.array([0])
while len(posX) < T:
Ds =(1-np.random.uniform(low=0.1, high=0.99))**(1/sigma)
ts = Ds**(-gamma)
if ts > T:
ts = T
steps = np.sqrt(2*Ds)*np.random.randn(int(ts))
distX, distY, distZ = sample_sphere(len(steps), steps)
posX = np.append(posX, posX[-1]+distX)
posY = np.append(posY, posY[-1]+distY)
posZ = np.append(posZ, posZ[-1]+distZ)
return np.concatenate((posX[:T]-posX[0], posY[:T]-posY[0], posZ[:T]-posZ[0]))
def sbm(self, T, alpha, sigma = 1):
'''Creates a scaled brownian motion trajectory'''
msd = (sigma**2)*np.arange(T+1)**alpha
deltas = np.sqrt(msd[1:]-msd[:-1])
dx = np.sqrt(2)*deltas*erfcinv(2-2*np.random.rand(len(deltas)))
dy = np.sqrt(2)*deltas*erfcinv(2-2*np.random.rand(len(deltas)))
dz = np.sqrt(2)*deltas*erfcinv(2-2*np.random.rand(len(deltas)))
return np.concatenate((np.cumsum(dx)-dx[0], np.cumsum(dy)-dy[0], np.cumsum(dz)-dz[0]))
| 17,039 | 44.198939 | 128 |
py
|
andi_datasets
|
andi_datasets-master/andi_datasets/_modidx.py
|
# Autogenerated by nbdev
d = { 'settings': { 'branch': 'master',
'doc_baseurl': '/andi_datasets',
'doc_host': 'https://andichallenge.github.io',
'git_url': 'https://github.com/andichallenge/andi_datasets',
'lib_path': 'andi_datasets'},
'syms': { 'andi_datasets.analysis': { 'andi_datasets.analysis.CH_changepoints': ( 'lib_nbs/analysis.html#ch_changepoints',
'andi_datasets/analysis.py'),
'andi_datasets.analysis.dataset_angles': ( 'lib_nbs/analysis.html#dataset_angles',
'andi_datasets/analysis.py'),
'andi_datasets.analysis.get_angle': ( 'lib_nbs/analysis.html#get_angle',
'andi_datasets/analysis.py'),
'andi_datasets.analysis.msd_analysis': ( 'lib_nbs/analysis.html#msd_analysis',
'andi_datasets/analysis.py'),
'andi_datasets.analysis.msd_analysis.__init__': ( 'lib_nbs/analysis.html#msd_analysis.__init__',
'andi_datasets/analysis.py'),
'andi_datasets.analysis.msd_analysis.get_diff_coeff': ( 'lib_nbs/analysis.html#msd_analysis.get_diff_coeff',
'andi_datasets/analysis.py'),
'andi_datasets.analysis.msd_analysis.get_exponent': ( 'lib_nbs/analysis.html#msd_analysis.get_exponent',
'andi_datasets/analysis.py'),
'andi_datasets.analysis.msd_analysis.tamsd': ( 'lib_nbs/analysis.html#msd_analysis.tamsd',
'andi_datasets/analysis.py'),
'andi_datasets.analysis.vacf': ('lib_nbs/analysis.html#vacf', 'andi_datasets/analysis.py')},
'andi_datasets.datasets_challenge': { 'andi_datasets.datasets_challenge._defaults_andi2': ( 'lib_nbs/datasets_challenge.html#_defaults_andi2',
'andi_datasets/datasets_challenge.py'),
'andi_datasets.datasets_challenge._defaults_andi2.__init__': ( 'lib_nbs/datasets_challenge.html#_defaults_andi2.__init__',
'andi_datasets/datasets_challenge.py'),
'andi_datasets.datasets_challenge._get_dic_andi2': ( 'lib_nbs/datasets_challenge.html#_get_dic_andi2',
'andi_datasets/datasets_challenge.py'),
'andi_datasets.datasets_challenge.challenge_phenom_dataset': ( 'lib_nbs/datasets_challenge.html#challenge_phenom_dataset',
'andi_datasets/datasets_challenge.py'),
'andi_datasets.datasets_challenge.challenge_theory_dataset': ( 'lib_nbs/datasets_challenge.html#challenge_theory_dataset',
'andi_datasets/datasets_challenge.py')},
'andi_datasets.datasets_phenom': { 'andi_datasets.datasets_phenom.datasets_phenom': ( 'lib_nbs/datasets_phenom.html#datasets_phenom',
'andi_datasets/datasets_phenom.py'),
'andi_datasets.datasets_phenom.datasets_phenom.__init__': ( 'lib_nbs/datasets_phenom.html#datasets_phenom.__init__',
'andi_datasets/datasets_phenom.py'),
'andi_datasets.datasets_phenom.datasets_phenom._create_trajectories': ( 'lib_nbs/datasets_phenom.html#datasets_phenom._create_trajectories',
'andi_datasets/datasets_phenom.py'),
'andi_datasets.datasets_phenom.datasets_phenom._get_args': ( 'lib_nbs/datasets_phenom.html#datasets_phenom._get_args',
'andi_datasets/datasets_phenom.py'),
'andi_datasets.datasets_phenom.datasets_phenom._get_inputs_models': ( 'lib_nbs/datasets_phenom.html#datasets_phenom._get_inputs_models',
'andi_datasets/datasets_phenom.py'),
'andi_datasets.datasets_phenom.datasets_phenom._get_models': ( 'lib_nbs/datasets_phenom.html#datasets_phenom._get_models',
'andi_datasets/datasets_phenom.py'),
'andi_datasets.datasets_phenom.datasets_phenom._get_states': ( 'lib_nbs/datasets_phenom.html#datasets_phenom._get_states',
'andi_datasets/datasets_phenom.py'),
'andi_datasets.datasets_phenom.datasets_phenom._inspect_dic': ( 'lib_nbs/datasets_phenom.html#datasets_phenom._inspect_dic',
'andi_datasets/datasets_phenom.py'),
'andi_datasets.datasets_phenom.datasets_phenom._load_trajectories': ( 'lib_nbs/datasets_phenom.html#datasets_phenom._load_trajectories',
'andi_datasets/datasets_phenom.py'),
'andi_datasets.datasets_phenom.datasets_phenom._save_trajectories': ( 'lib_nbs/datasets_phenom.html#datasets_phenom._save_trajectories',
'andi_datasets/datasets_phenom.py'),
'andi_datasets.datasets_phenom.datasets_phenom.create_dataset': ( 'lib_nbs/datasets_phenom.html#datasets_phenom.create_dataset',
'andi_datasets/datasets_phenom.py')},
'andi_datasets.datasets_theory': { 'andi_datasets.datasets_theory.datasets_theory': ( 'lib_nbs/datasets_theory.html#datasets_theory',
'andi_datasets/datasets_theory.py'),
'andi_datasets.datasets_theory.datasets_theory.__init__': ( 'lib_nbs/datasets_theory.html#datasets_theory.__init__',
'andi_datasets/datasets_theory.py'),
'andi_datasets.datasets_theory.datasets_theory._add_noisy_diffusion': ( 'lib_nbs/datasets_theory.html#datasets_theory._add_noisy_diffusion',
'andi_datasets/datasets_theory.py'),
'andi_datasets.datasets_theory.datasets_theory._add_noisy_localization': ( 'lib_nbs/datasets_theory.html#datasets_theory._add_noisy_localization',
'andi_datasets/datasets_theory.py'),
'andi_datasets.datasets_theory.datasets_theory._create_trajectories': ( 'lib_nbs/datasets_theory.html#datasets_theory._create_trajectories',
'andi_datasets/datasets_theory.py'),
'andi_datasets.datasets_theory.datasets_theory._cut_trajectory': ( 'lib_nbs/datasets_theory.html#datasets_theory._cut_trajectory',
'andi_datasets/datasets_theory.py'),
'andi_datasets.datasets_theory.datasets_theory._get_models': ( 'lib_nbs/datasets_theory.html#datasets_theory._get_models',
'andi_datasets/datasets_theory.py'),
'andi_datasets.datasets_theory.datasets_theory._label_trajectories': ( 'lib_nbs/datasets_theory.html#datasets_theory._label_trajectories',
'andi_datasets/datasets_theory.py'),
'andi_datasets.datasets_theory.datasets_theory._load_trajectories': ( 'lib_nbs/datasets_theory.html#datasets_theory._load_trajectories',
'andi_datasets/datasets_theory.py'),
'andi_datasets.datasets_theory.datasets_theory._save_row': ( 'lib_nbs/datasets_theory.html#datasets_theory._save_row',
'andi_datasets/datasets_theory.py'),
'andi_datasets.datasets_theory.datasets_theory._save_trajectories': ( 'lib_nbs/datasets_theory.html#datasets_theory._save_trajectories',
'andi_datasets/datasets_theory.py'),
'andi_datasets.datasets_theory.datasets_theory.create_dataset': ( 'lib_nbs/datasets_theory.html#datasets_theory.create_dataset',
'andi_datasets/datasets_theory.py'),
'andi_datasets.datasets_theory.datasets_theory.create_noisy_diffusion_dataset': ( 'lib_nbs/datasets_theory.html#datasets_theory.create_noisy_diffusion_dataset',
'andi_datasets/datasets_theory.py'),
'andi_datasets.datasets_theory.datasets_theory.create_noisy_localization_dataset': ( 'lib_nbs/datasets_theory.html#datasets_theory.create_noisy_localization_dataset',
'andi_datasets/datasets_theory.py'),
'andi_datasets.datasets_theory.datasets_theory.create_segmented_dataset': ( 'lib_nbs/datasets_theory.html#datasets_theory.create_segmented_dataset',
'andi_datasets/datasets_theory.py')},
'andi_datasets.models_phenom': { 'andi_datasets.models_phenom.models_phenom': ( 'lib_nbs/models_phenom.html#models_phenom',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom.__init__': ( 'lib_nbs/models_phenom.html#models_phenom.__init__',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom._confinement_traj': ( 'lib_nbs/models_phenom.html#models_phenom._confinement_traj',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom._constraint_alpha': ( 'lib_nbs/models_phenom.html#models_phenom._constraint_alpha',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom._constraint_d': ( 'lib_nbs/models_phenom.html#models_phenom._constraint_d',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom._distribute_circular_compartments': ( 'lib_nbs/models_phenom.html#models_phenom._distribute_circular_compartments',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom._get_distance': ( 'lib_nbs/models_phenom.html#models_phenom._get_distance',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom._make_condensates': ( 'lib_nbs/models_phenom.html#models_phenom._make_condensates',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom._make_escape': ( 'lib_nbs/models_phenom.html#models_phenom._make_escape',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom._multiple_state_traj': ( 'lib_nbs/models_phenom.html#models_phenom._multiple_state_traj',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom._reflected_position': ( 'lib_nbs/models_phenom.html#models_phenom._reflected_position',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom._sample_diff_parameters': ( 'lib_nbs/models_phenom.html#models_phenom._sample_diff_parameters',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom._single_state_traj': ( 'lib_nbs/models_phenom.html#models_phenom._single_state_traj',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom._stokes': ( 'lib_nbs/models_phenom.html#models_phenom._stokes',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom._update_bound': ( 'lib_nbs/models_phenom.html#models_phenom._update_bound',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom.confinement': ( 'lib_nbs/models_phenom.html#models_phenom.confinement',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom.dimerization': ( 'lib_nbs/models_phenom.html#models_phenom.dimerization',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom.disp_fbm': ( 'lib_nbs/models_phenom.html#models_phenom.disp_fbm',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom.immobile_traps': ( 'lib_nbs/models_phenom.html#models_phenom.immobile_traps',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom.multi_state': ( 'lib_nbs/models_phenom.html#models_phenom.multi_state',
'andi_datasets/models_phenom.py'),
'andi_datasets.models_phenom.models_phenom.single_state': ( 'lib_nbs/models_phenom.html#models_phenom.single_state',
'andi_datasets/models_phenom.py')},
'andi_datasets.models_theory': { 'andi_datasets.models_theory.models_theory': ( 'lib_nbs/models_theory.html#models_theory',
'andi_datasets/models_theory.py'),
'andi_datasets.models_theory.models_theory.__init__': ( 'lib_nbs/models_theory.html#models_theory.__init__',
'andi_datasets/models_theory.py'),
'andi_datasets.models_theory.models_theory._oneD': ( 'lib_nbs/models_theory.html#models_theory._oned',
'andi_datasets/models_theory.py'),
'andi_datasets.models_theory.models_theory._threeD': ( 'lib_nbs/models_theory.html#models_theory._threed',
'andi_datasets/models_theory.py'),
'andi_datasets.models_theory.models_theory._twoD': ( 'lib_nbs/models_theory.html#models_theory._twod',
'andi_datasets/models_theory.py'),
'andi_datasets.models_theory.models_theory.attm': ( 'lib_nbs/models_theory.html#models_theory.attm',
'andi_datasets/models_theory.py'),
'andi_datasets.models_theory.models_theory.ctrw': ( 'lib_nbs/models_theory.html#models_theory.ctrw',
'andi_datasets/models_theory.py'),
'andi_datasets.models_theory.models_theory.fbm': ( 'lib_nbs/models_theory.html#models_theory.fbm',
'andi_datasets/models_theory.py'),
'andi_datasets.models_theory.models_theory.lw': ( 'lib_nbs/models_theory.html#models_theory.lw',
'andi_datasets/models_theory.py'),
'andi_datasets.models_theory.models_theory.sbm': ( 'lib_nbs/models_theory.html#models_theory.sbm',
'andi_datasets/models_theory.py')},
'andi_datasets.utils_challenge': { 'andi_datasets.utils_challenge._get_error_bounds': ( 'lib_nbs/utils_challenge.html#_get_error_bounds',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.array_to_df': ( 'lib_nbs/utils_challenge.html#array_to_df',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.changepoint_alpha_beta': ( 'lib_nbs/utils_challenge.html#changepoint_alpha_beta',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.changepoint_assignment': ( 'lib_nbs/utils_challenge.html#changepoint_assignment',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.check_no_changepoints': ( 'lib_nbs/utils_challenge.html#check_no_changepoints',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.check_prediction_length': ( 'lib_nbs/utils_challenge.html#check_prediction_length',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.codalab_scoring': ( 'lib_nbs/utils_challenge.html#codalab_scoring',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.create_binary_segment': ( 'lib_nbs/utils_challenge.html#create_binary_segment',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.df_to_array': ( 'lib_nbs/utils_challenge.html#df_to_array',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.distribution_distance': ( 'lib_nbs/utils_challenge.html#distribution_distance',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.ensemble_changepoint_error': ( 'lib_nbs/utils_challenge.html#ensemble_changepoint_error',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.error_Ensemble_dataset': ( 'lib_nbs/utils_challenge.html#error_ensemble_dataset',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.error_SingleTraj_dataset': ( 'lib_nbs/utils_challenge.html#error_singletraj_dataset',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.extract_ensemble': ( 'lib_nbs/utils_challenge.html#extract_ensemble',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.get_VIP': ( 'lib_nbs/utils_challenge.html#get_vip',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.jaccard_between_segments': ( 'lib_nbs/utils_challenge.html#jaccard_between_segments',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.jaccard_index': ( 'lib_nbs/utils_challenge.html#jaccard_index',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.label_continuous_to_list': ( 'lib_nbs/utils_challenge.html#label_continuous_to_list',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.label_filter': ( 'lib_nbs/utils_challenge.html#label_filter',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.label_list_to_continuous': ( 'lib_nbs/utils_challenge.html#label_list_to_continuous',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.listdir_nohidden': ( 'lib_nbs/utils_challenge.html#listdir_nohidden',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.load_file_to_df': ( 'lib_nbs/utils_challenge.html#load_file_to_df',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.majority_filter': ( 'lib_nbs/utils_challenge.html#majority_filter',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.metric_anomalous_exponent': ( 'lib_nbs/utils_challenge.html#metric_anomalous_exponent',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.metric_diffusion_coefficient': ( 'lib_nbs/utils_challenge.html#metric_diffusion_coefficient',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.metric_diffusive_state': ( 'lib_nbs/utils_challenge.html#metric_diffusive_state',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.multimode_dist': ( 'lib_nbs/utils_challenge.html#multimode_dist',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.segment_assignment': ( 'lib_nbs/utils_challenge.html#segment_assignment',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.segment_property_errors': ( 'lib_nbs/utils_challenge.html#segment_property_errors',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.separate_prediction_values': ( 'lib_nbs/utils_challenge.html#separate_prediction_values',
'andi_datasets/utils_challenge.py'),
'andi_datasets.utils_challenge.single_changepoint_error': ( 'lib_nbs/utils_challenge.html#single_changepoint_error',
'andi_datasets/utils_challenge.py')},
'andi_datasets.utils_trajectories': { 'andi_datasets.utils_trajectories.bm1D': ( 'lib_nbs/utils_trajectories.html#bm1d',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.find_nan_segments': ( 'lib_nbs/utils_trajectories.html#find_nan_segments',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.gaussian': ( 'lib_nbs/utils_trajectories.html#gaussian',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.inside_fov_dataset': ( 'lib_nbs/utils_trajectories.html#inside_fov_dataset',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.normalize': ( 'lib_nbs/utils_trajectories.html#normalize',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.normalize_fGN': ( 'lib_nbs/utils_trajectories.html#normalize_fgn',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.pert': ( 'lib_nbs/utils_trajectories.html#pert',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.plot_trajs': ( 'lib_nbs/utils_trajectories.html#plot_trajs',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.regularize': ( 'lib_nbs/utils_trajectories.html#regularize',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.sample_sphere': ( 'lib_nbs/utils_trajectories.html#sample_sphere',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.segs_inside_fov': ( 'lib_nbs/utils_trajectories.html#segs_inside_fov',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.trigo': ( 'lib_nbs/utils_trajectories.html#trigo',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.trigo.ang_line': ( 'lib_nbs/utils_trajectories.html#trigo.ang_line',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.trigo.ang_vec': ( 'lib_nbs/utils_trajectories.html#trigo.ang_vec',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.trigo.circle_line_segment_intersection': ( 'lib_nbs/utils_trajectories.html#trigo.circle_line_segment_intersection',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.trigo.rotate_vec': ( 'lib_nbs/utils_trajectories.html#trigo.rotate_vec',
'andi_datasets/utils_trajectories.py'),
'andi_datasets.utils_trajectories.trigo.seg_to_vec': ( 'lib_nbs/utils_trajectories.html#trigo.seg_to_vec',
'andi_datasets/utils_trajectories.py')},
'andi_datasets.utils_videos': { 'andi_datasets.utils_videos.convert_uint8': ( 'lib_nbs/utils_videos.html#convert_uint8',
'andi_datasets/utils_videos.py'),
'andi_datasets.utils_videos.func_poisson_noise': ( 'lib_nbs/utils_videos.html#func_poisson_noise',
'andi_datasets/utils_videos.py'),
'andi_datasets.utils_videos.mask': ( 'lib_nbs/utils_videos.html#mask',
'andi_datasets/utils_videos.py'),
'andi_datasets.utils_videos.play_video': ( 'lib_nbs/utils_videos.html#play_video',
'andi_datasets/utils_videos.py'),
'andi_datasets.utils_videos.psf_width': ( 'lib_nbs/utils_videos.html#psf_width',
'andi_datasets/utils_videos.py'),
'andi_datasets.utils_videos.transform_to_video': ( 'lib_nbs/utils_videos.html#transform_to_video',
'andi_datasets/utils_videos.py')}}}
| 37,462 | 144.770428 | 213 |
py
|
andi_datasets
|
andi_datasets-master/andi_datasets/datasets_phenom.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../source_nbs/lib_nbs/datasets_phenom.ipynb.
# %% auto 0
__all__ = ['datasets_phenom']
# %% ../source_nbs/lib_nbs/datasets_phenom.ipynb 2
from .models_phenom import models_phenom
import inspect
import numpy as np
import pandas as pd
import csv
from tqdm.auto import tqdm
import copy
import os
import warnings
# %% ../source_nbs/lib_nbs/datasets_phenom.ipynb 5
class datasets_phenom():
def __init__(self,
models_class = models_phenom()):
'''
This class generates, saves and loads datasets of trajectories simulated from various phenomenological diffusion models (available at andi_datasets.models_phenom).
'''
self.models_class = models_class
self._get_models()
def _get_models(self):
'''Loads the available models from the subclass'''
available_models = inspect.getmembers(self.models_class, inspect.ismethod)
available_models = available_models[1:][::-1] # we need this to get rid of the init
self.avail_models_name = [x[0] for x in available_models]
self.avail_models_func = [x[1] for x in available_models]
def _get_inputs_models(self, model, get_default_values = False):
model_f = self.avail_models_func[self.avail_models_name.index(model)]
defaults = inspect.getfullargspec(model_f).defaults
params = inspect.getfullargspec(model_f).args[1:]
if get_default_values:
return params, defaults
else:
return params
def _get_states(self):
''' Definition of the possible states found in the ANDI 2 challenge (phenom) and their
assigned label:
0: immobile; 1: confined; 2: brownian; 3: anomalous '''
self._states = ['immobile', 'confined', 'brownian', 'anomalous']
# %% ../source_nbs/lib_nbs/datasets_phenom.ipynb 7
class datasets_phenom(datasets_phenom):
def create_dataset(self,
dics: list|dict|bool = False,
T: None|int = None,
N_model: None|int = None,
path: str = '',
save: bool = False,
load: bool = False):
'''
Given a list of dictionaries, generates trajectories of the demanded properties.
The only compulsory input for every dictionary is 'model', i.e. the model from which
trajectories must be generated. The rest of inputs are optional.
You can see the input parameters of the different models in andi_datasets.models_phenom,
This function checks and handles the input dataset and the manages both the creation,
loading and saving of trajectories.
Parameters
----------
dics : list, dictionary, bool
- if list or dictionary: the function generates trajectories with the properties stated in each dictionary.
- if bool: the function generates trajectories with default parameters set for the ANDI 2 challenge (phenom) for every available diffusion model.
T : int, None
- if int: overrides the values of trajectory length in the dictionaries.
- if None: uses the trajectory length values in the dictionaries.
Caution: the minim T of all dictionaries will be considered!
N_model : int, None
- if int: overrides the values of number of trajectories in the dictionaries.
- if None: uses the number of trajectories in the dictionaries
save : bool
If True, saves the generated dataset (see self._save_trajectories).
load : bool
If True, loads a dataset from path (see self._load_trajectories).
path : str
Path from where to save or load the dataset.
Returns
-------
tuple
- trajs (array TxNx2): particles' position. N considers here the sum of all trajectories generated from the input dictionaries.
- labels (array TxNx2): particles' labels (see ._multi_state for details on labels)
'''
self.T = T
self.N_model = N_model
self.path = path
self.dics = dics
'''Managing dictionaries'''
# If the input is a single dictionary, transform it to list
if isinstance(self.dics, dict): self.dics = [self.dics]
# if dics is False, we select trajectories from all models with default values
if self.dics is False: self.dics = [{'model': model} for model in self.avail_models_name]
'''Managing folders of the datasets'''
self.save = save
self.load = load
if self.save or self.load:
if self.load:
self.save = False
if not os.path.exists(self.path) and self.load:
raise FileNotFoundError('The directory from where you want to load the dataset does not exist')
if not os.path.exists(self.path) and self.save:
os.makedirs(self.path)
'''Create trajectories'''
trajs, labels = self._create_trajectories()
return trajs, labels
# %% ../source_nbs/lib_nbs/datasets_phenom.ipynb 12
class datasets_phenom(datasets_phenom):
def _create_trajectories(self):
'''
Given a list of dictionaries, generates trajectories of the demanded properties.
First checks in the .csv of each demanded model if a dataset of similar properties exists.
If it does, it loads it from the corresponding file.
Returns
----------
tuple
data_t array containing the generated trajectories
data_l array containing the corresponding labels.
'''
for dic in copy.deepcopy(self.dics):
df, dataset_idx = self._inspect_dic(dic)
# If the dataset does not yet exists
if dataset_idx is False:
# Retrieve name and function of diffusion model
model_f = self.avail_models_func[self.avail_models_name.index(dic['model'])]
# Create dictionary with only arguments
dic_args = dict(dic); dic_args.pop('model')
trajs, labels = model_f(**dic_args)
# Save the trajectories if asked
if self.save:
self._save_trajectories(trajs = trajs,
labels = labels,
dic = dic,
df = df,
dataset_idx = dataset_idx,
path = self.path)
else:
trajs, labels = self._load_trajectories(model_name = dic['model'],
dataset_idx = dataset_idx,
path = self.path)
# Stack dataset
try:
data_t = np.hstack((data_t, trajs))
data_l = np.hstack((data_l, labels))
except:
data_t = trajs
data_l = labels
return data_t, data_l
def _save_trajectories(self, trajs, labels, dic, df, dataset_idx, path):
'''
Given a set of trajectories and labels, saves two things:
- In the .csv corresponding to the demanded model, all the input parameters of the generated dataset. This allows to keed that of what was created before.
- In a .npy file, the trajectories and labels generated.
'''
file_name = path+dic['model']+'_'+str(df.shape[0])+'.npy'
# Save information in CSV handler
df = df.append(dic, ignore_index = True)
#print(dic)
#df = pd.concat([df,
# pd.DataFrame(dic)],
# ignore_index=True)
df.to_csv(path+dic['model']+'.csv')
# Save trajectories and labels
data = np.dstack((trajs, labels))
np.save(file_name, data)
def _load_trajectories(self, model_name, dataset_idx, path):
'''
Given the path for a dataset, loads the trajectories and labels
'''
file_name = path+model_name+'_'+str(dataset_idx)+'.npy'
data = np.load(file_name)
return data[:, :, :2], data[:, : , 2:]
# %% ../source_nbs/lib_nbs/datasets_phenom.ipynb 21
class datasets_phenom(datasets_phenom):
def _inspect_dic(self, dic):
'''
Checks the information of the input dictionaries so that they fulfil the constraints of the program , completes missing information
with default values and then decides about loading/saving depending on parameters.
Parameters
----------
dic : dict
Dictionary with the information of the trajectories we want to generate
Returns
-----------
tuple
df: dataframe collecting the information of the dataset to load.
dataset_idx: location in the previous dataframe of the particular dataset we want to generate.
'''
# Add time and number of trajectories information
if self.N_model is not None:
dic['N'] = self.N_model
if self.T is not None:
dic['T'] = self.T
# Check if CSV with information of dataset exists. If not, create it
model_m = dic['model']
model_f = self.avail_models_func[self.avail_models_name.index(model_m)]
# Check arguments and defaults from model's function
args = inspect.getfullargspec(model_f).args[1:]
defaults = inspect.getfullargspec(model_f).defaults
try:
df = pd.read_csv(self.path+model_m+'.csv', index_col=0)
except:
# convert to dataframe and add model
df = pd.DataFrame(columns = args+['model'])
# Assign missing keys in dic with default values
for arg, default in zip(args, defaults):
if arg not in dic.keys():
dic[arg] = default
# Check if updated keys of dic equal keys of csv.
if set(list(df.keys())) != set(list(dic.keys())):
raise ValueError('Input model dictionary does not match models properties')
# Check if the dataset already exists:
df_conditions = df.copy()
# Nones in dataframes are transformed into Nans. We change back this here
# but instead of putting None, we put False.
df_conditions = df_conditions.where(pd.notnull(df_conditions), False)
for key in dic:
# Transforming Nones to False in variables dictionaries (see problem with df just above)
if dic[key] is None: dic[key] = False
# We need to transform it to str to do a fair comparison between matrices (e.g. transition matrix, Ds, alphas,...)
df_conditions = df_conditions.loc[(df_conditions[key].astype(str) == str(dic[key]))]
if len(df_conditions.index) == 0:
break
# If dataset exists
if len(df_conditions.index) > 0:
# if the dataset exists and save was True, do not save but load
if self.save:
wrn_str = f'The dataset you want to save already exists (file: {model_m}_{df_conditions.index[0]}.npy). Switching to Load mode.'
warnings.warn(wrn_str)
dataset_idx = df_conditions.index[0]
elif self.load:
dataset_idx = df_conditions.index[0]
else:
dataset_idx = False
# If dataset does no exists
else:
if self.load:
raise ValueError('The dataset you want to load does not exist.')
else: # If the dataset does not exist, append empty string.
# This allows to mix saving and loading
dataset_idx = False
return df, dataset_idx
# %% ../source_nbs/lib_nbs/datasets_phenom.ipynb 34
class datasets_phenom(datasets_phenom):
def _get_args(self, model, return_defaults = False):
'''
Given the name of a diffusion model, return its inputs arguments.
Parameters
----------
model : str
Name of the diffusion model (see self.available_models_name)
return_defaults : bool
If True, the function will also return the default values of each input argument.
Returns
-------
tuple
args (list): list of input arguments.
defaults (optional, list): list of default value for the input arguments.
'''
model_f = self.avail_models_func[self.avail_models_name.index(model)]
# Check arguments and defaults from model's function
args = inspect.getfullargspec(model_f).args[1:]
defaults = inspect.getfullargspec(model_f).defaults
if return_defaults:
return args, defaults
else:
return args
| 13,682 | 41.626168 | 176 |
py
|
andi_datasets
|
andi_datasets-master/andi_datasets/analysis.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../source_nbs/lib_nbs/analysis.ipynb.
# %% auto 0
__all__ = ['get_angle', 'dataset_angles', 'msd_analysis', 'vacf', 'CH_changepoints']
# %% ../source_nbs/lib_nbs/analysis.ipynb 2
import numpy as np
import math
# %% ../source_nbs/lib_nbs/analysis.ipynb 5
def get_angle(a:tuple, # 2d position point A
b:tuple, # 2d position point B
c:tuple # 2d position point C
) -> tuple: # angle between segments AB and BC points
''' Calculates the angle between the segments generate by three points '''
ang = math.degrees(math.atan2(c[1]-b[1], c[0]-b[0]) - math.atan2(a[1]-b[1], a[0]-b[0]))
return ang + 360 if ang < 0 else ang
def dataset_angles(trajs:list, # set of trajectories from which to calculate angles
) -> list: # list of angles between displacements
'''Given a set of trajectories, calculate all angles between displacements'''
angles = []
for traj in trajs:
for a, b, c in zip(traj[:, :-2].transpose(), traj[:, 1:-1].transpose(), traj[:, 2:].transpose()):
angles.append(get_angle(a, b, c))
return angles
# %% ../source_nbs/lib_nbs/analysis.ipynb 7
class msd_analysis():
def __init__(self):
''' Contains mean squared displacement (MSD) based methods to analyze trajectories. '''
def tamsd(self,
traj:np.ndarray,
t_lags:np.ndarray):
'''
Calculates the time average mean squared displacement (TA-MSD) of a trajectory at various time lags,
Parameters
----------
traj : np.array
Trajectory from whicto calculate TA-MSD.
t_lags : list | np.array
Time lags used for the TA-MSD
Returns
-------
np.array
TA-MSD of the given trayectory
'''
tamsd = np.zeros_like(t_lags, dtype= float)
for idx, t in enumerate(t_lags):
for p in range(len(traj)-t):
tamsd[idx] += (traj[p]-traj[p+t])**2
tamsd[idx] /= len(traj)-t
return tamsd
def get_diff_coeff(self,
traj:np.ndarray,
t_lags:list = None):
'''
Calculates the diffusion coefficient of a trajectory by means of the linear
fitting of the TA-MSD.
Parameters
----------
traj : np.array
1D trajectory from whicto calculate TA-MSD.
t_lags : bool | list
Time lags used for the TA-MSD.
Returns
-------
np.array
Diffusion coefficient of the given trajectory.
'''
if not t_lags:
N_t_lags = max(4, int(len(traj)*0.1))
t_lags = np.arange(1, N_t_lags)
tasmd = self.tamsd(traj, t_lags)
return np.polyfit(t_lags, tasmd, deg = 1)[0]/2
def get_exponent(self,
traj,
t_lags:list = None):
'''
Calculates the anolaous of a trajectory by means of the linear
fitting of the logarithm of the TA-MSD.
Parameters
----------
traj : np.array
1D trajectory from whicto calculate TA-MSD.
t_lags : bool, list
Time lags used for the TA-MSD.
Returns
-------
np.array
Anomalous exponent of the given trajectory.
'''
if not t_lags:
N_t_lags = max(4, int(len(traj)*0.1))
t_lags = np.arange(1, N_t_lags)
tasmd = self.tamsd(traj, t_lags)
return np.polyfit(np.log(t_lags), np.log(tasmd), deg = 1)[0]
# %% ../source_nbs/lib_nbs/analysis.ipynb 16
def vacf(trajs,
delta_t:int | list | np.ndarray = 1,
taus:bool | list | np.ndarray = None):
'''
Calculates the velocity autocorrelation function for
the given set of trajectories.
Parameters
----------
trajs : np.array
NxT matrix containing N trajectories of length T.
delta_t : int | list | array
If not None, the vacf is calculated in the demanded time lags.
taus : bool | list | array
Time windows at wich the vacf is calculated.
Returns
-------
np.array
VACF of the given trajectories and the given time windows.
'''
if isinstance(delta_t, int): delta_t = [delta_t]
if taus is None: taus = np.arange(1, trajs.shape[1]).astype(int)
V = np.zeros((len(delta_t), len(taus)))
for idx_d, delta in enumerate(delta_t):
# Calculate the velocity
velocity = trajs[: ,delta:] - trajs[:,:-delta]
velocity /= delta_t
for idx_t, tau in enumerate(taus):
if tau == 0:
V[idx_d, idx_t] = (velocity**2).mean()
else:
V[idx_d, idx_t] = (velocity[:, :-tau]*velocity[:, tau:]).mean()
V[idx_d, :] /= V[idx_d, 0]
return V
# %% ../source_nbs/lib_nbs/analysis.ipynb 20
from scipy.spatial import ConvexHull
def CH_changepoints(trajs,
tau:int = 10,
metric:{'volume', 'area'} = 'volume'):
'''
Computes the changes points a multistate trajectory based on the Convex Hull approach proposed in PRE 96 (022144), 2017.
Parameters
----------
trajs : np.array
NxT matrix containing N trajectories of length T.
tau : int
Time window over which the CH is calculated.
metric : {'volume', 'area'}
Calculate change points w.r.t. area or volume of CH.
Returns
-------
list
Change points of the given trajectory.
'''
CPs = []
for traj in trajs:
traj = np.array(traj)
Sd = np.zeros(traj.shape[0]-2*tau)
for k in range(traj.shape[0]-2*tau):
if metric == 'volume':
Sd[k] = ConvexHull(traj[k:(k+2*tau)]).volume
elif metric == 'area':
Sd[k] = ConvexHull(traj[k:(k+2*tau)]).area
below_mean = Sd < Sd.mean()
cp_traj = np.argwhere(below_mean[1:] != below_mean[:-1])+1
CPs.append(cp_traj+tau)
return CPs
| 6,405 | 29.94686 | 124 |
py
|
andi_datasets
|
andi_datasets-master/andi_datasets/datasets_challenge.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../source_nbs/lib_nbs/datasets_challenge.ipynb.
# %% auto 0
__all__ = ['challenge_theory_dataset', 'challenge_phenom_dataset']
# %% ../source_nbs/lib_nbs/datasets_challenge.ipynb 2
import numpy as np
from tqdm.auto import tqdm
import pandas as pd
import os
import csv
from .utils_challenge import segs_inside_fov, label_continuous_to_list, extract_ensemble, label_filter, df_to_array, get_VIP
from .datasets_phenom import datasets_phenom
from .datasets_theory import datasets_theory
from .utils_trajectories import normalize
from .utils_videos import transform_to_video, psf_width
# %% ../source_nbs/lib_nbs/datasets_challenge.ipynb 5
def challenge_theory_dataset(N:np.ndarray|int = 1000,
max_T:int = 1000,
min_T:int = 10,
tasks:list|int = [1, 2, 3],
dimensions:list|int = [1, 2, 3],
load_dataset:{'True', 'False'} = False,
save_dataset:{'True', 'False'} = False,
path_datasets:str = '',
load_labels:{'True', 'False'} = True,
load_trajectories:{'True', 'False'} = False,
save_trajectories:{'True', 'False'} = False,
path_trajectories:str = 'datasets/',
N_save:int = 1000,
t_save:int = 1000,
return_noise:{'True', 'False'} = False):
'''
Creates a dataset similar to the one given by in the ANDI 1 challenge.
Check the webpage of the challenge for more details. The default values
are similar to the ones used to generate the available dataset.
The function returns 6 variables, three variables for the trajectories and three
for the corresponding labels. Each variable is a list of three lists. Each of the
three lists corresponds to a given dimension, in ascending order. If one of the
tasks/dimensions was not calculated, the given list will be empty.
See the tutorials in our Github repository to learn about this function.
Parameters
----------
N :
Number of trajectories per class (i.e.size # models x # classes). If int, all classes have same number.
max_T :
Maximum length of the trajectories in the dataset.
min_T :
Minimum length of the trajectories in the dataset.
tasks :
Task(s) of the ANDI challenge I for which datasets will be generated.
dimensions :
Dimension(s) for which trajectories will be generated. Three possible values: 1, 2 and 3.
load_dataset :
If True, the module loads existing datasets from the files task{}.txt and ref{}.txt.
save_dataset :
If True, the module saves the datasets in a .txt following the competition format.
path_datasets :
Path from where to load the dataset.
load_labels :
If False, only loads trajectories and avoids the files refX.txt.
load_trajectories :
If True, the module loads the trajectories of an .h5 file.
save_trajectories :
If True, the module saves a .h5 file for each model considered, with N_save trajectories and T = T_save.
path_trajectories :
Path from where to load trajectories.
N_save :
Number of trajectories to save for each exponents/model. Advise: save at the beggining
a big dataset (i.e. with default t_save N_save) which allows you to load any
other combiantionof T and N.
t_save :
Length of the trajectories to be saved. See comments on N_save.
return_noise :
If True, returns the amplitudes of the noises added to the trajectories.
Returns
-------
x : multiple
Xn (lists): trajectories
Yn (lists): labels
loc_noise_tn (lists): localization noise amplitudes
diff_noise_tn (lists): variance of the diffusion noise
'''
print(f'Creating a dataset for task(s) {tasks} and dimension(s) {dimensions}.')
# Checking inputs for errors
if isinstance(dimensions, int) or isinstance(dimensions, float):
dimensions = [dimensions]
if isinstance(tasks, int) or isinstance(tasks, float):
tasks = [tasks]
# Define return datasets
X1 = [[],[],[]]; X2 = [[],[],[]]; X3 = [[],[],[]]
Y1 = [[],[],[]]; Y2 = [[],[],[]]; Y3 = [[],[],[]]
if return_noise:
loc_noise_t1 = [[],[],[]]; loc_noise_t2 = [[],[],[]]; loc_noise_t3 = [[],[],[]]
diff_noise_t1 = [[],[],[]]; diff_noise_t2 = [[],[],[]]; diff_noise_t3 = [[],[],[]]
if load_dataset or save_dataset:
# Define name of result files, if needed
task1 = path_datasets+'task1.txt'; ref1 = path_datasets+'ref1.txt'
task2 = path_datasets+'task2.txt'; ref2 = path_datasets+'ref2.txt'
task3 = path_datasets+'task3.txt'; ref3 = path_datasets+'ref3.txt'
# Loading the datasets if chosen.
if load_dataset:
for idx, (task, lab) in enumerate(zip([task1, task2, task3], [ref1, ref2, ref3])):
if idx+1 in tasks:
try:
t = csv.reader(open(task,'r'), delimiter=';',
lineterminator='\n',quoting=csv.QUOTE_NONNUMERIC)
if load_labels:
l = csv.reader(open(lab,'r'), delimiter=';',
lineterminator='\n',quoting=csv.QUOTE_NONNUMERIC)
except:
raise FileNotFoundError(f'File for task {idx+1} not found.')
if load_labels:
for trajs, labels in zip(t, l):
if task == task1:
X1[int(trajs[0])-1].append(trajs[1:])
Y1[int(trajs[0])-1].append(labels[1])
if task == task2:
X2[int(trajs[0])-1].append(trajs[1:])
Y2[int(trajs[0])-1].append(labels[1])
if task == task3:
X3[int(trajs[0])-1].append(trajs[1:])
Y3[int(trajs[0])-1].append(labels[1:])
else:
for trajs in t:
if task == task1:
X1[int(trajs[0])-1].append(trajs[1:])
if task == task2:
X2[int(trajs[0])-1].append(trajs[1:])
if task == task3:
X3[int(trajs[0])-1].append(trajs[1:])
# Checking that the dataset exists in the files
for dim in dimensions:
if task == task1 and X1[dim-1] == []:
raise FileNotFoundError('Dataset for dimension '+str(dim)+' not contained in file task1.txt.')
if task == task2 and X2[dim-1] == []:
raise FileNotFoundError('Dataset for dimension '+str(dim)+' not contained in file task2.txt.')
if task == task3 and X3[dim-1] == []:
raise FileNotFoundError('Dataset for dimension '+str(dim)+' not contained in file task3.txt.')
if load_labels:
return X1, Y1, X2, Y2, X3, Y3
else:
return X1, X2, X3
exponents = np.arange(0.05, 2.01, 0.05)
n_exp = len(exponents)
# Trajectories per model and exponent. Arbitrarely chosen to obtain balanced classes
n_per_model = np.ceil(1.6*N/5)
subdif, superdif = n_exp//2, n_exp//2+1
n_per_class = np.zeros((len(datasets_theory().avail_models_name), n_exp))
# ctrw, attm
n_per_class[:2, :subdif] = np.ceil(n_per_model/subdif)
# fbm
n_per_class[2, :] = np.ceil(n_per_model/(n_exp-1))
n_per_class[2, exponents == 2] = 0 # FBM can't be ballistic
# lw
n_per_class[3, subdif:] = np.ceil((n_per_model/superdif)*0.8)
# sbm
n_per_class[4, :] = np.ceil(n_per_model/n_exp)
# Define return datasets
X1 = [[],[],[]]; X2 = [[],[],[]]; X3 = [[],[],[]]
Y1 = [[],[],[]]; Y2 = [[],[],[]]; Y3 = [[],[],[]]
# Initialize the files
if save_dataset:
if 1 in tasks:
csv.writer(open(task1,'w'), delimiter=';', lineterminator='\n',)
csv.writer(open(ref1,'w'), delimiter=';', lineterminator='\n',)
elif 2 in tasks:
csv.writer(open(task2,'w'), delimiter=';', lineterminator='\n',)
csv.writer(open(ref2,'w'), delimiter=';',lineterminator='\n',)
elif 3 in tasks:
csv.writer(open(task3,'w'), delimiter=';', lineterminator='\n',)
csv.writer(open(ref3,'w'), delimiter=';',lineterminator='\n',)
for dim in dimensions:
# Generate the dataset of the given dimension
print(f'Generating dataset for dimension {dim}.')
dataset = datasets_theory().create_dataset(T = max_T, N_models = n_per_class, exponents = exponents,
dimension = dim, models = np.arange(len(datasets_theory().avail_models_name)),
load_trajectories = False, save_trajectories = False, N_save = 100,
path = path_trajectories)
# Normalize trajectories
n_traj = dataset.shape[0]
norm_trajs = normalize(dataset[:, 2:].reshape(n_traj*dim, max_T))
dataset[:, 2:] = norm_trajs.reshape(dataset[:, 2:].shape)
# Save unnoisy dataset for task3
dataset_t3 = dataset.copy()
# Add localization error, Gaussian noise with sigma = [0.1, 0.5, 1]
loc_error_amplitude = np.random.choice(np.array([0.1, 0.5, 1]), size = n_traj).repeat(dim)
loc_error = (np.random.randn(n_traj*dim, int(max_T)).transpose()*loc_error_amplitude).transpose()
dataset = datasets_theory().create_noisy_localization_dataset(dataset, dimension = dim, T = max_T, noise_func = loc_error)
# Add random diffusion coefficients
trajs = dataset[:, 2:].reshape(n_traj*dim, max_T)
displacements = trajs[:, 1:] - trajs[:, :-1]
# Get new diffusion coefficients and displacements
diffusion_coefficients = np.random.randn(n_traj).repeat(dim)
new_displacements = (displacements.transpose()*diffusion_coefficients).transpose()
# Generate new trajectories and add to dataset
new_trajs = np.cumsum(new_displacements, axis = 1)
new_trajs = np.concatenate((np.zeros((new_trajs.shape[0], 1)), new_trajs), axis = 1)
dataset[:, 2:] = new_trajs.reshape(dataset[:, 2:].shape)
# Task 1 - Anomalous exponent
if 1 in tasks:
# Creating semi-balanced datasets
n_exp_max = int(np.ceil(1.1*N/n_exp))
for exponent in exponents:
dataset_exp = dataset[dataset[:, 1] == exponent].copy()
dataset_exp = dataset_exp[:n_exp_max, :]
try:
dataset_1 = np.concatenate((dataset_1, dataset_exp), axis = 0)
except:
dataset_1 = dataset_exp
# Shuffle trajectories and noise
p = np.random.permutation(dataset_1.shape[0])
diffusion_coefficients_t1 = diffusion_coefficients[p].copy()
loc_error_amplitude_t1 = loc_error_amplitude[p].copy()
dataset_1 = dataset_1[p]
# Saving noise with correct number of elements
if return_noise:
loc_noise_t1[dim-1] = loc_error_amplitude_t1[:N]
diff_noise_t1[dim-1] = diffusion_coefficients_t1[:N]
for traj in dataset_1[:N, :]:
# Cutting trajectories
cut_T = np.random.randint(min_T, max_T)
traj_cut = datasets_theory()._cut_trajectory(traj[2:], cut_T, dim=dim).tolist()
# Saving dataset
X1[dim-1].append(traj_cut)
Y1[dim-1].append(np.around(traj[1], 2))
if save_dataset:
datasets_theory()._save_row(np.append(dim, traj_cut), task1)
datasets_theory()._save_row(np.append(dim, np.around([traj[1]], 2)), ref1)
# Task 2 - Diffusion model
if 2 in tasks:
# Creating semi-balanced datasets
# If number of traejectories N is too small, consider at least
# one trajectory per model
n_per_model = max(1, int(1.1*N/5))
for model in range(5):
dataset_mod = dataset[dataset[:, 0] == model].copy()
dataset_mod = dataset_mod[:n_per_model, :]
try:
dataset_2 = np.concatenate((dataset_2, dataset_mod), axis = 0)
except:
dataset_2 = dataset_mod
# Shuffle trajectories and noise
p = np.random.permutation(dataset_2.shape[0])
diffusion_coefficients_t2 = diffusion_coefficients[p].copy()
loc_error_amplitude_t2 = loc_error_amplitude[p].copy()
dataset_2 = dataset_2[p]
# Saving noise wityh correct number of elements
if return_noise:
loc_noise_t2[dim-1] = loc_error_amplitude_t2[:N]
diff_noise_t2[dim-1] = diffusion_coefficients_t2[:N]
for traj in dataset_2[:N, :]:
# Cutting trajectories
cut_T = np.random.randint(min_T, max_T)
traj_cut = datasets_theory()._cut_trajectory(traj[2:], cut_T, dim=dim).tolist()
# Saving dataset
X2[dim-1].append(traj_cut)
Y2[dim-1].append(np.around(traj[0], 2))
if save_dataset:
datasets_theory()._save_row(np.append(dim, traj_cut), task2)
datasets_theory()._save_row(np.append(dim, traj[0]), ref2)
# Task 3 - Segmentated trajectories
if 3 in tasks:
# Create a copy of the dataset and use it to create the
# segmented dataset
dataset_copy1 = dataset_t3.copy()
dataset_copy2 = dataset_t3.copy()
# Shuffling the hard way
order_dataset1 = np.random.choice(np.arange(n_traj), n_traj, replace = False)
order_dataset2 = np.random.choice(np.arange(n_traj), n_traj, replace = False)
dataset_copy1 = dataset_copy1[order_dataset1]
dataset_copy2 = dataset_copy1[order_dataset2]
seg_dataset = datasets_theory().create_segmented_dataset(dataset_copy1, dataset_copy2, dimension = dim)
seg_dataset = np.c_[np.ones(n_traj)*dim, seg_dataset]
# Checking that there are no segmented trajectories with same exponent and model
# in each segment. First we compute the difference between labels
diff = np.abs(seg_dataset[:, 2]-seg_dataset[:, 4]) + np.abs(seg_dataset[:, 3]-seg_dataset[:, 5])
# Then, if there are repeated labels, we eliminate those trajectories
while len(np.argwhere(diff == 0)) > 0:
seg_dataset = np.delete(seg_dataset, np.argwhere(diff == 0), axis = 0)
# If the size of the dataset is too small, we generate new segmented trajectories
# and add them to the dataset
if seg_dataset.shape[0] < N:
# Shuffling the hard way
new_order_dataset1 = np.random.choice(np.arange(n_traj), n_traj, replace = False)
new_order_dataset2 = np.random.choice(np.arange(n_traj), n_traj, replace = False)
dataset_copy1 = dataset_copy1[new_order_dataset1]
dataset_copy2 = dataset_copy1[new_order_dataset2]
order_dataset1 = np.concatenate((order_dataset1, new_order_dataset1))
order_dataset2 = np.concatenate((order_dataset2, new_order_dataset2))
aux_seg_dataset = datasets_theory().create_segmented_dataset(dataset_copy1, dataset_copy2, dimension = dim)
aux_seg_dataset = np.c_[np.ones(aux_seg_dataset.shape[0])*dim, aux_seg_dataset]
seg_dataset = np.concatenate((seg_dataset, aux_seg_dataset), axis = 0)
diff = np.abs(seg_dataset[:, 2]-seg_dataset[:, 4]) + np.abs(seg_dataset[:, 3]-seg_dataset[:, 5])
else:
break
# Add localization error, Gaussian noise with sigma = [0.1, 0.5, 1]
loc_error_amplitude_t3 = np.random.choice(np.array([0.1, 0.5, 1]), size = seg_dataset.shape[0]).repeat(dim)
loc_error_t3 = (np.random.randn(seg_dataset.shape[0]*dim, 200).transpose()*loc_error_amplitude_t3).transpose()
seg_dataset[:, 4:] = datasets_theory().create_noisy_localization_dataset(seg_dataset[:, 4:],
dimension = dim, T = 200,
noise_func = loc_error_t3)
# Add random diffusion coefficients
trajs = seg_dataset[:, 6:].reshape(seg_dataset.shape[0]*dim, 200)
displacements = trajs[:, 1:] - trajs[:, :-1]
# Get new diffusion coefficients and displacements
diffusion_coefficients_t3 = np.random.randn(seg_dataset.shape[0]).repeat(dim)
new_displacements = (displacements.transpose()*diffusion_coefficients_t3).transpose()
# Generate new trajectories and add to dataset
new_trajs = np.cumsum(new_displacements, axis = 1)
new_trajs = np.concatenate((np.zeros((new_trajs.shape[0], 1)), new_trajs), axis = 1)
seg_dataset[:, 6:] = new_trajs.reshape(seg_dataset[:, 6:].shape)
if return_noise:
loc_noise_t3[dim-1] = loc_error_amplitude_t3[:N].tolist()
diff_noise_t3[dim-1] = diffusion_coefficients_t3[:N].tolist()
X3[dim-1] = seg_dataset[:N, 6:]
Y3[dim-1] = seg_dataset[:N, :6]
if save_dataset:
for label, traj in zip(seg_dataset[:N, :6], seg_dataset[:N, 6:]):
datasets_theory()._save_row(np.append(dim, traj), task3)
datasets_theory()._save_row(np.around(label, 2), ref3)
if return_noise:
return [X1, Y1, loc_noise_t1, diff_noise_t1,
X2, Y2, loc_noise_t2, diff_noise_t2,
X3, Y3, loc_noise_t3, diff_noise_t3]
else:
return X1, Y1, X2, Y2, X3, Y3
# %% ../source_nbs/lib_nbs/datasets_challenge.ipynb 7
class _defaults_andi2:
'''
This class defines the default values set for the ANDI 2 challenge.
'''
def __init__(self):
# General parameters
self.T = 500 # Length of simulated trajectories
self._min_T = 20 # Minimal length of output trajectories
self.FOV_L = 128 # Length side of the FOV (px)
self.L = 1.8*self.FOV_L # Length of the simulated environment
self.D = 1 # Baseline diffusion coefficient (px^2/frame)
self.density = 2 # Particle density
self.N = 50 # Number of particle in the whole experiment
self.sigma_noise = 0.12 # Variance of the localization noise
self.label_filter = lambda x: label_filter(x, window_size = 5, min_seg = 3)
# %% ../source_nbs/lib_nbs/datasets_challenge.ipynb 9
def _get_dic_andi2(model):
'''
Given the number label of diffusion model, returns a default
dictionary of the model's parameters to be fed to create_dataset
The numeration is as follow:
1: single state
2: N-state
3: immobilization
4: dimerization
5: confinement
Parameters
----------
model : int in [1,6]
Number of the diffusion model
Returns
-------
dic : dictionary
Dictionary containing the default parameters for ANDI 2 of the indicated model.
'''
dic = {'N': _defaults_andi2().N,
'T': _defaults_andi2().T,
'L': _defaults_andi2().L}
# alpha and D for single-state and immobilization
if model == 1 or model == 3:
dic.update({'Ds': [_defaults_andi2().D, _defaults_andi2().D*0.01], # mean and varianve for D
'alphas': np.array([np.random.rand()*(1.5-0.5)+0.5, 0.01])})
# alphas and Ds for 2-state, confinement and dimerization
if model == 2 or model == 4 or model == 5:
fast_D = _defaults_andi2().D + np.random.randn()*_defaults_andi2().D*0.01
slow_D = fast_D*np.random.rand()*(0.1-0.01)+0.01
alpha1 = np.random.rand()*(1.2-0.8)+0.8
# The second state will be at least 0.2 afar. We make sure not being
# outside [0,2]
alpha2 = alpha1 - (np.random.rand()*(0.6-0.2)+0.2)
dic.update({'Ds': np.array([[fast_D, 0.01],
[slow_D, 0.01]]),
'alphas': np.array([[alpha1, 0.01],
[alpha2, 0.01]])})
# Particle/trap radius and ninding and unbinding probs for dimerization and immobilization
if model == 3 or model == 4:
dic.update({'Pu': 0.01, # Unbinding probability
'Pb': 1}) # Binding probabilitiy
if model == 1:
dic.update({'model': datasets_phenom().avail_models_name[0]})
if model == 2:
dic.update({'model': datasets_phenom().avail_models_name[1],
'M': np.array([[0.99, 0.01], # Transition Matrix
[0.01, 0.99]]),
'return_state_num': True # To get the state numeration back, , hence labels.shape = TxNx4
})
if model == 3:
dic.update({'model': datasets_phenom().avail_models_name[2],
'Nt': 300, # Number of traps (density = 1 currently)
'r': 0.4} # Size of trap
)
if model == 4:
dic.update({'model': datasets_phenom().avail_models_name[3],
'r': 0.6, # Size of particles
'return_state_num': True # To get the state numeration back, hence labels.shape = TxNx4
})
if model == 5:
dic.update({'model': datasets_phenom().avail_models_name[4],
'r': 5,
'Nc': 30,
'trans': 0.1})
return dic
# %% ../source_nbs/lib_nbs/datasets_challenge.ipynb 11
def challenge_phenom_dataset(
experiments = 5,
dics = None,
repeat_exp = True,
num_fovs = 20,
return_timestep_labs = False,
save_data = False,
path = 'data/',
prefix = '',
get_video = False, num_vip = None, get_video_masks = False
):
'''
Creates a datasets with same structure as ones given in the ANDI 2 challenge.
Default values for the various diffusion models have been set such as to be in the same ranges as the ones expected for the
challenge.
For details, check the ANDI 2 challenge webpage (soon).
This function will generate as many experiments (associated to one the diffusion models) as demanded.
There are two ways of defining that:
- Give number of experiments (and optional parameters such as repeat_exp) to create. The diffusion
parameters are then taken from the default values are taken from datasets_phenom._defaults_andi2.
- Feed a list of dictionaries (dics) from which data will be generated
For each experiment, as many field of view as wanted can be generated
Parameters
----------
experiments : int, list
- if int: Number of experiments to generate. Each experiment is
generated from one of the available diffusion models.
- if list: diffusion models to generate (starting with 1!!!!!)
dics : dictionary, list of dics
If given, uses this to set the parameters of the experiments.
Must be of length equal to experiments.
This overrides any info about chosen models, as the model is set by the dictionary.
repeat_exp : bool, list
-> Does not enter into play if experiments is list
If True: picks at random the diffusion model from the pool.
If False: picks the diffusion in an ordered way from the pool.
num_fovs : int
Number of field of views to get trajectories from in each experiment.
return_timestep_labs : bool
If True, the output trajectories dataframes containing also the labels alpha, D and state at each time step.
save_data : bool
If True, saves all pertinent data.
path : str
Path where to store the data.
prefix : str
Extra prefix that can be added in front of the files' names.
get_video : bool
If true, get as output the videos generated with Deeptrack for the generated datasets (see utils_videos for details).
num_vip : int
Number of VIP highlighted in the videos.
get_video_masks : bool
If True, get masks of videos
Returns
-------
tuple
- trajs_out:
List of lenght (experiments x num_fovs). Each elements are is dataframe
containing the trajectories of a particular experiment/fov, in order of
generation (i.e. [exp1_fov1, exp1_fov2, ..., exp2_fov1 ....]).
If return_timestep_labs = True, the dataframes also contain the labels
at each time step.
- labels_traj_out:
list of same length of trajs_out containing the labels of the
corresponding trajectories. Each element contains a list with the
labels of each trajectory, following the scheme:
[idx_traj, D_1, alpha_1, state_1, CP_1, D_2, alpha_2, .... state_N]
- labels_ens_out:
list of same length of trajs_out containing the ensemble labels of
given experiment. See description of output matrix in
utils_challenge._extract_ensemble()
'''
# Set prefixes for saved files
if save_data:
if not os.path.exists(path):
os.makedirs(path)
pf_labs_traj = path+prefix+'traj_labs'
pf_labs_ens = path+prefix+'ens_labs'
pf_trajs = path+prefix+'trajs'
pf_videos = path+prefix+'videos'
if return_timestep_labs:
df_list = []
# Sets the models of the experiments that will be output by the function
if dics is None:
if isinstance(experiments, int):
if repeat_exp: # If experiments can be repeated, we just sample randomly
model_exp = np.random.randint(len(datasets_phenom().avail_models_name), size = experiments)
else: # If not, we sampled them in an ordered way
if experiments >= len(datasets_phenom().avail_models_name):
num_repeats = (experiments % len(datasets_phenom().avail_models_name))+1
else:
num_repeats = 1
model_exp = np.tile(np.arange(len(datasets_phenom().avail_models_name)), num_repeats)[:experiments]
# We add one to get into non-Python numeration
model_exp += 1
else:
model_exp = experiments
# If list of dics is given, then just create a list of length = len(dics)
else:
model_exp = [0]*len(dics)
# Output lists
trajs_out, labels_traj_out, labels_ens_out = [], [], []
for idx_experiment, model in enumerate(tqdm(model_exp)):
''' Generate the trajectories '''
if dics is None:
dic = _get_dic_andi2(model)
else:
dic = dics[idx_experiment]
# Overide the info about model
model = datasets_phenom().avail_models_name.index(dic['model'])+1
trajs, labels = datasets_phenom().create_dataset(dics = dic)
''' Apply the FOV '''
for fov in range(num_fovs):
# Checking if file exist and creating an error
if save_data:
if os.path.exists(pf_labs_traj+f'_exp_{idx_experiment}_fov_{fov}.txt') or os.path.exists(pf_labs_ens+f'_exp_{idx_experiment}_fov_{fov}.txt'):
raise FileExistsError(f'Target files for experiment {idx_experiment} and FOV {fov}. Delete the file or change path/prefix.')
# We take as min/max for the fovs a 5 % distance of L
dist = 0.05
min_fov = int(dist*_defaults_andi2().L)
max_fov = int((1-dist)*_defaults_andi2().L)-_defaults_andi2().FOV_L
# sample the position of the FOV
fov_origin = (np.random.randint(min_fov, max_fov), np.random.randint(min_fov, max_fov))
''' Go over trajectories in FOV (copied from utils_trajectories for efficiency) '''
trajs_fov, array_labels_fov, list_labels_fov, idx_segs_fov, frames_fov = [], [], [], [], []
idx_seg = -1
# Total frames
frames = np.arange(trajs.shape[0])
# We save the correspondance between idx in FOV and idx in trajs dataset
for idx, (traj, label) in enumerate(zip(trajs[:, :, :].transpose(1,0,2),
labels[:, :, :].transpose(1,0,2))):
nan_segms = segs_inside_fov(traj,
fov_origin = fov_origin,
fov_length = _defaults_andi2().FOV_L,
cutoff_length = _defaults_andi2()._min_T)
if nan_segms is not None:
for idx_nan in nan_segms:
idx_seg+= 1
seg_x = traj[idx_nan[0]:idx_nan[1], 0]
seg_y = traj[idx_nan[0]:idx_nan[1], 1]
trajs_fov.append(np.vstack((seg_x, seg_y)).transpose())
frames_fov.append(frames[idx_nan[0]:idx_nan[1]])
lab_seg = []
for idx_lab in range(labels.shape[-1]):
lab_seg.append(_defaults_andi2().label_filter(label[idx_nan[0]:idx_nan[1], idx_lab]))
lab_seg = np.vstack(lab_seg).transpose()
array_labels_fov.append(lab_seg)
# Tranform continuous labels to list for correct output
if model == 2 or model == 4:
# if multi-state or dimerization, we get rid of the label of state numbering
CP, alphas, Ds, states = label_continuous_to_list(lab_seg[:, :-1])
else:
CP, alphas, Ds, states = label_continuous_to_list(lab_seg)
# Extract final point of trajectory
T = CP[-1]
CP = CP[:-1]
list_gt = [idx_seg, Ds[0], alphas[0], states[0]]
for gtc, gta, gtd, gts in zip(CP, alphas[1:], Ds[1:], states[1:]):
list_gt += [gtc, gtd, gta, gts]
# Add end point of trajectory
list_gt.append(T)
list_labels_fov.append(list_gt)
if save_data:
with open(pf_labs_traj+f'_exp_{idx_experiment}_fov_{fov}.txt', 'a') as f:
writer = csv.writer(f, delimiter=',', lineterminator='\n',)
writer.writerow(list_gt)
# Save index of segment with its length to latter append in the dataframe
idx_segs_fov.append(np.ones_like(seg_x)*idx_seg)
'''Extract ensemble trajectories'''
ensemble_fov = extract_ensemble(np.concatenate(array_labels_fov)[:, -1], dic)
df_data = np.hstack((np.expand_dims(np.concatenate(idx_segs_fov), axis=1),
np.expand_dims(np.concatenate(frames_fov), axis=1).astype(int),
np.concatenate(trajs_fov)))
df_traj = pd.DataFrame(df_data, columns = ['traj_idx', 'frame', 'x', 'y'])
if get_video:
print(f'Generating video for EXP {idx_experiment} FOV {fov}')
pad = -20 # padding has to be further enough from the FOV so that the PSF
# of particles does not enter in the videos
array_traj_fov = df_to_array(df_traj.copy(), pad = pad)
min_distance = psf_width()
idx_vip = get_VIP(array_traj_fov, num_vip = num_vip, min_distance = min_distance, pad = pad)
if not save_data:
pf_videos = ''
video_fov = transform_to_video(array_traj_fov, # see that we insert the trajectories without noise!
optics_props={
"output_region":[fov_origin[0], fov_origin[1],
fov_origin[0] + _defaults_andi2().FOV_L, fov_origin[1] + _defaults_andi2().FOV_L]
},
get_vip_particles=idx_vip,
with_masks = get_video_masks,
save_video = save_data, path = pf_videos+f'_exp_{idx_experiment}_fov_{fov}.tiff')
try:
videos_out.append(video_fov)
except:
videos_out = [video_fov]
# Add noise to the trajectories (see that this has to be done
# after the videos, so these are not affected by the noise).
df_traj.x += np.random.randn(df_traj.shape[0])*_defaults_andi2().sigma_noise
df_traj.y += np.random.randn(df_traj.shape[0])*_defaults_andi2().sigma_noise
if return_timestep_labs:
array_labels_fov = np.concatenate(array_labels_fov)
df_traj['alpha'] = array_labels_fov[:, 0]
df_traj['D'] = array_labels_fov[:, 1]
df_traj['state'] = array_labels_fov[:, 2]
if save_data:
# Trajectories
df_traj.to_csv(pf_trajs+f'_exp_{idx_experiment}_fov_{fov}.csv', index = False)
# Ensemble labels
with open(pf_labs_ens+f'_exp_{idx_experiment}_fov_{fov}.txt', 'a') as f:
if model == 2: num_states = dic['alphas'].shape[0]
elif model == 1: num_states = 1
else: num_states = 2
model_n = dic['model']
f.write(f'model: {model_n}; num_state: {num_states} \n')
np.savetxt(f, ensemble_fov, delimiter = ';')
# Add data to main lists (trajectories and lists with labels)
trajs_out.append(df_traj)
labels_traj_out.append(list_labels_fov)
labels_ens_out.append(ensemble_fov)
if get_video:
return trajs_out, videos_out, labels_traj_out, labels_ens_out
else:
return trajs_out, labels_traj_out, labels_ens_out
| 36,775 | 47.969374 | 157 |
py
|
andi_datasets
|
andi_datasets-master/andi_datasets/utils_videos.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../source_nbs/lib_nbs/utils_videos.ipynb.
# %% auto 0
__all__ = ['play_video', 'convert_uint8', 'psf_width', 'func_poisson_noise', 'mask', 'transform_to_video']
# %% ../source_nbs/lib_nbs/utils_videos.ipynb 2
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from IPython.display import HTML
import numpy as np
import imageio
# Deeptrack is not automatically installed in andi_datasets
# due to its load.
import warnings
try:
import deeptrack as dt
except:
warnings.warn('Deeptrack is currently not installed. Install if needed using pip install deeptrack.')
# %% ../source_nbs/lib_nbs/utils_videos.ipynb 4
def play_video(video, figsize=(5, 5), fps=10):
"""
Displays a stack of images as a video inside jupyter notebooks.
Parameters
----------
video : ndarray
Stack of images.
figsize : tuple, optional
Canvas size of the video.
fps : int, optional
Video frame rate.
Returns
-------
Video object
Returns a video player with input stack of images.
"""
fig = plt.figure(figsize=figsize)
images = []
plt.axis("off")
for image in video:
images.append([plt.imshow(image[:, :, 0], cmap="gray")])
anim = animation.ArtistAnimation(
fig, images, interval=1e3 / fps, blit=True, repeat_delay=0
)
html = HTML(anim.to_jshtml())
display(html)
plt.close()
# %% ../source_nbs/lib_nbs/utils_videos.ipynb 5
def convert_uint8(vid, with_vips = False):
"""
Converts a stack of images in to 8bit pixel format.
This is a helper function for `transform_to_video`
Parameters
----------
vid : ndarray
Stack of images.
with_vips: bool, optional
Appends a mask of vip particles in the first frame to the converted video.
Returns
-------
ndarray
Image stack in 8bit.
"""
new_vid = []
for idx_im, im in enumerate(vid):
if idx_im == 0 and with_vips:
im[im == -1] = 255
new_vid.append(im.astype(np.uint8))
else:
im = im[:,:,0]
im = im / im.max()
im = im * 255
im = im.astype(np.uint8)
new_vid.append(im)
return new_vid
# %% ../source_nbs/lib_nbs/utils_videos.ipynb 6
def psf_width(NA = 1.46, wavelength = 500e-9, resolution = 100e-9):
"""
Computes the PSF width.
This is a helper function for `transform_to_video`
Parameters
----------
NA : float
Numerical aperture.
wavelength : float
Wavelength.
resolution : float
Resolution of the camera.
Returns
-------
int
PSF width in pixels.
"""
_psf = 1.22 * wavelength / (2 * NA)
return int(_psf / resolution)
# %% ../source_nbs/lib_nbs/utils_videos.ipynb 7
def func_poisson_noise():
"""
Applies poisson noise to an image.
This is a custom DeepTrack feature, and a helper function for `transform_to_video`
"""
def inner(image):
image[image<0] = 0
rescale = 1
noisy_image = np.random.poisson(image * rescale) / rescale
return noisy_image
return inner
# %% ../source_nbs/lib_nbs/utils_videos.ipynb 8
def mask(circle_radius, particle_list=[]):
"""
Computes binary masks for particles in microscopy videos.
This is a custom DeepTrack feature, and a helper function for `transform_to_video`.
Parameters
----------
particle_list: list of int
List of particles whose masks need to be created
"""
def inner(image):
X, Y = np.mgrid[:2*circle_radius, :2*circle_radius]
CIRCLE = (X - circle_radius+0.5)**2 + (Y- circle_radius+0.5)**2 < circle_radius**2
CIRCLE = np.expand_dims(CIRCLE, axis=-1)
_index = image.get_property("replicate_index")[0]
if particle_list:
if _index in particle_list:
pix_val = (_index + 1) * CIRCLE
else:
pix_val = 0 * CIRCLE
else:
pix_val = (_index + 1) * CIRCLE
return pix_val
return inner
# %% ../source_nbs/lib_nbs/utils_videos.ipynb 10
def transform_to_video(
trajectory_data,
particle_props={},
optics_props={},
background_props={},
get_vip_particles=[],
with_masks=False,
save_video=False,
path="",
):
"""
Transforms trajectory data into microscopy imagery data.
Trajectories generated through phenomenological models in andi-datasets are imaged under a Fluorescence microscope to generate 2D timelapse videos.
Parameters
----------
trajectory_data : ndarray
Generated through models_phenom. Array of the shape (T, N, 2) containing the trajectories.
particle_props : dict
Dictionary containing the properties of particles to be simulated as keyword arguments. Valid keys are:
'`particle_intensity`' : array_like[int, int]
Intensity distribution of particles within a frame given as mean and standard deviations.
'`intensity_variation`' : int
Intensity variation of particles in subsequent frames given as standard deviation.
'`z`' : float
Particle positions with respect to the focal plane in pixel units defined by the pixel size in **optics_props**. For example, particles will be at focus when `z=0`.
'`refractive_index`' : float
Refractive index of particle.
optics_props : dict
Dictionary containing the properties of microscope as keyword arguments. Valid keys are:
'`NA`': float
Numerical aperture of the microscope.
'`wavelength`' : float
Wavelength of light in meters.
'`resolution`' : float
Effective pixel size of the camera in meters.
'`magnification`' : float
Magnification of the optical system.
'`refractive_index_medium`' : float
Refractive index of the medium sorrounding the particles.
'`output_region`': array_like[int, int, int, int]
ROI of the image to output.
Given in the format : [x, y, x + width, y + height].
background_props : dict
Dictionary containing properties related to background intensity as keyword arguments. Valid keys are:
'`background_mean`' : int
Mean background intensity.
'`backgound_std`' : int
Standard deviation of the background intesity with subsequent frames of a video.
get_vip_particles : list of int
List of particles for which the masks are needed in the output.
with_masks : bool
If True, particle masks are returned in the output along with the video.
If False (default), only the video is returned in the output.
save_video : bool
If True, the generated video will be saved at the given path.
path : str
File path for saving the video, the path should be given along the video format.
For example: 'path' = './video.mp4' will save the video in the current folder.
Returns
-------
tuple | ndarray
Output type I
If `with_masks = True`,
The function returns a tuple containing:
masks : ndarray
video : ndarray
Note: If `get_vip_particles` is a non-empty list, the masks will contain only the vip particle masks.
Output type II
If `with_masks = False`,
The function returns:
video : ndarray
Note: If `get_vip_particles` is a non-empty list, the first frame in the output will be the masks of the given vip particles in the first frame, else (default) the output will be a ndarray of just the video.
"""
_particle_dict = {
"particle_intensity": [
500,
20,
], # Mean and standard deviation of the particle intensity
"intensity": lambda particle_intensity: particle_intensity[0]
+ np.random.randn() * particle_intensity[1],
"intensity_variation": 0, # Intensity variation of particle (in standard deviation)
"z": 0, # Particles are always at focus
"refractive_index": 1.45, # Refractive index of the particle
"position_unit": "pixel",
}
_optics_dict = {
"NA": 1.46, # Numerical aperture
"wavelength": 500e-9, # Wavelength
"resolution": 100e-9, # Camera resolution or effective resolution
"magnification": 1,
"refractive_index_medium": 1.33,
"output_region": [0, 0, 128, 128],
}
# Background offset
_background_dict = {
"background_mean": 100, # Mean background intensity
"background_std": 0, # Standard deviation of background intensity within a video
}
# Update the dictionaries with the user-defined values
_particle_dict.update(particle_props)
_optics_dict.update(optics_props)
_background_dict.update(background_props)
# Reshape the trajectory
trajectory_data = np.moveaxis(trajectory_data, 0, 1)
# Generate point particles
particle = dt.PointParticle(
trajectories=trajectory_data,
replicate_index=lambda _ID: _ID,
trajectory=lambda replicate_index, trajectories: dt.units.pixel
* trajectories[replicate_index[-1]],
number_of_particles=trajectory_data.shape[0],
traj_length=trajectory_data.shape[1],
position=lambda trajectory: trajectory[0],
**_particle_dict,
)
# Intensity variation of particles - controlled by "intensity_variation"
def intensity_noise(previous_values, previous_value):
return (previous_values or [previous_value])[0] + _particle_dict[
"intensity_variation"
] * np.random.randn()
# Make it sequential
sequential_particle = dt.Sequential(
particle,
position=lambda trajectory, sequence_step: trajectory[sequence_step],
intensity=intensity_noise,
)
# Adding background offset
background = dt.Add(
value=_background_dict["background_mean"]
+ np.random.randn() * _background_dict["background_std"]
)
def background_variation(previous_values, previous_value):
return (previous_values or [previous_value])[
0
] + np.random.randn() * _background_dict["background_std"]
## This will change the background offset within a sequence with a given standard deviation
sequential_background = dt.Sequential(background, value=background_variation)
# Define optical setup
optics = dt.Fluorescence(**_optics_dict)
# Normalising image plane particle intensity
scale_factor = (
(
optics.magnification()
* optics.wavelength()
/ (optics.NA() * optics.resolution())
)
** 2
) * (1 / np.pi)
# Poisson noise
poisson_noise = dt.Lambda(func_poisson_noise)
# Sample
sample = (
optics(sequential_particle ^ sequential_particle.number_of_particles)
>> dt.Multiply(scale_factor)
>> sequential_background
>> poisson_noise
)
# Masks
get_masks = dt.SampleToMasks(
lambda: mask(circle_radius=1, particle_list=get_vip_particles),
output_region=optics.output_region,
merge_method="add",
)
masks = sample >> get_masks >> dt.Add(-1)
# Sequential sample
sequential_sample = dt.Sequence(
(sample & masks),
trajectory=particle.trajectories,
sequence_length=particle.traj_length,
)
# Resolve the sample
_video, _masks = sequential_sample.update().resolve()
if with_masks == True:
final_output = (_video, _masks)
elif get_vip_particles:
final_output = (_masks[0], *_video)
else:
final_output = _video
if save_video:
if len(final_output) == 2:
video_8bit = convert_uint8(final_output[0])
else:
video_8bit = convert_uint8(final_output, with_vips=get_vip_particles)
imageio.mimwrite(path, video_8bit)
return final_output
| 12,246 | 30.564433 | 215 |
py
|
andi_datasets
|
andi_datasets-master/andi_datasets/__init__.py
|
__version__ = "2.0.0"
| 22 | 10.5 | 21 |
py
|
andi_datasets
|
andi_datasets-master/andi_datasets/utils_challenge.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../source_nbs/lib_nbs/utils_challenge.ipynb.
# %% auto 0
__all__ = ['majority_filter', 'label_filter', 'label_continuous_to_list', 'label_list_to_continuous', 'array_to_df',
'df_to_array', 'get_VIP', 'changepoint_assignment', 'changepoint_alpha_beta', 'jaccard_index',
'single_changepoint_error', 'ensemble_changepoint_error', 'create_binary_segment',
'jaccard_between_segments', 'segment_assignment', 'metric_anomalous_exponent',
'metric_diffusion_coefficient', 'metric_diffusive_state', 'check_no_changepoints', 'segment_property_errors',
'extract_ensemble', 'multimode_dist', 'distribution_distance', 'error_Ensemble_dataset',
'check_prediction_length', 'separate_prediction_values', 'load_file_to_df', 'error_SingleTraj_dataset',
'listdir_nohidden', 'codalab_scoring']
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 2
import numpy as np
from scipy.optimize import linear_sum_assignment
import pandas
from tqdm.auto import tqdm
import warnings
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 6
def majority_filter(seq, width):
'''
Given a vector, applies a majority filter of given width.
Parameters
----------
seq : list
Vector to filter.
width : int
Size of the window in which the filter is applied.
Returns
-------
list
Filtered vector
'''
offset = width // 2
seq = [0] * offset + seq
return [max(set(a), key=a.count)
for a in (seq[i:i+width] for i in range(len(seq) - offset))]
def label_filter(label,
window_size = 5,
min_seg = 3):
'''
Given a vector of changing labels, applies a majority filter such that the minimum segment of a particular label is
bigger than the minimum set segment.
Parameters
----------
label : list
label vector to filter.
window_size : int
Size of the window in which the majority filter is applied.
min_seg : int
Minimum segment size after filtering.
Returns
-------
np.array
Filtered label vector
'''
if np.min(label) < 0:
raise ValueError('This function only works with positive labels')
# if there are no changes:
if np.sum(label[1:] != label[:-1]) == 0:
return label
# define dummy vector with all zeros and ones
values, dummy = np.unique(label, return_inverse = True)
# check if there are segment smaller than minimal segment (min_seg)
cp = np.argwhere(dummy[1:] != dummy[:-1])
cp = np.append(0, cp)
current_min = (cp[1:]-cp[:-1]).flatten().min()
while (current_min < min_seg):
filt = majority_filter(dummy.tolist(), width = window_size)
filt = np.array(filt)
# check if there are segment smaller than minimal segment (min_seg)
cp = np.argwhere(filt[1:] != filt[:-1])
# If all changepoints were eliminated
if cp.size == 0:
break
cp = np.append(0, cp)
current_min = (cp[1:]-cp[:-1]).flatten().min()
if (dummy == filt).all():
# If all failed and still have segments smaller than min_seg
seg_lengths = (cp[1:]-cp[:-1]).flatten().astype(int)
seg_smaller = np.argwhere(seg_lengths < min_seg).flatten()
# We go over each segment and we asign the values 'by hand'
for idxsegs in seg_smaller:
if seg_lengths[idxsegs] == 1:
filt[(cp[idxsegs]+1)] = filt[cp[idxsegs]]
elif seg_lengths[idxsegs] == 2:
filt[(cp[idxsegs]+1)] = filt[cp[idxsegs]]
filt[(cp[idxsegs]+2)] = filt[cp[idxsegs]+3]
dummy = filt
break
dummy = filt
# Check boundaries
if dummy[0] != dummy[1] or dummy[1] != dummy[2]:
dummy[:2] = dummy[2]
if dummy[-2] != dummy[-3] or dummy[-1] != dummy[-2]:
dummy[-3:] = dummy[-3]
# reset to label values
dummy_ret = np.zeros_like(dummy).astype(float)
for idx, v in enumerate(values):
dummy_ret[dummy == idx] = v
return dummy_ret
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 16
def label_continuous_to_list(labs):
'''
Given an array of T x 2 labels containing the anomalous exponent and diffusion
coefficient at each timestep, returns 3 arrays, each containing the changepoints,
exponents and coefficient, respectively.
If labs is size T x 3, then we consider that diffusive states are given and also
return those.
Parameters
----------
labs : array
T x 2 or T x 3 labels containing the anomalous exponent, diffusion
and diffusive state.
Returns
-------
tuple
- First element is the list of change points
- The rest are corresponding segment properties (order: alpha, Ds and states)
'''
# Check if states were given
are_states = False
if labs.shape[1] == 3:
are_states = True
# Check in which variable there is changes
CP = np.argwhere((labs[:-1, :] != labs[1:, :]).sum(1) != 0).flatten()+1
T = labs.shape[0]
alphas = np.zeros(len(CP)+1)
Ds = np.zeros(len(CP)+1)
if are_states: states = np.zeros(len(CP)+1)
for idx, cp in enumerate(np.append(CP, T)):
alphas[idx] = labs[cp-1, 0]
Ds[idx] = labs[cp-1, 1]
if are_states: states[idx] = labs[cp-1, 2]
CP = np.append(CP, T)
if are_states:
return CP, alphas, Ds, states
else:
return CP, alphas, Ds
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 20
def label_list_to_continuous(CP, label):
'''
Given a list of change points and the labels of the diffusion properties of the
resulting segments, generates and array of continuous labels. The last change point
indicates the array length.
Parameters
----------
CP : array, list
list of change points. Last change point indicates label length.
label : array, list
list of segment properties
Returns
-------
array
Continuous label created from the given change points and segment properties
'''
if isinstance(label, list):
label = np.array(label)
segs = create_binary_segment(CP[:-1], CP[-1])
return (segs.transpose()*label).sum(1)
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 24
from .utils_trajectories import segs_inside_fov
def array_to_df(trajs,
labels,
min_length = 10,
fov_origin = [0,0], fov_length= 100.0, cutoff_length = 10):
'''
Given arrays for the position and labels of trajectories, creates a dataframe with that
data. The function also applies the demanded FOV. If you don't want a field of view, chose a
FOV length bigger (smaller) that your maximum (minimum) trajectory position.
Parameters
----------
trajs : array
Trajectories to store in the df (dimension: T x N x 3)
labels : array
Labels to store in the df (dimension: T x N x 3)
fov_origin : tuple
Bottom left point of the square defining the FOV.
fov_length : float
Size of the box defining the FOV.
cutoff_length : int
Minimum length of a trajectory inside the FOV to be considered in the output dataset.
Returns
-------
tuple
- df_in (dataframe): dataframe with trajectories
- df_out (datafram): dataframe with labels
'''
xs = []
ys = []
idxs = []
df_out = pandas.DataFrame(columns = ['traj_idx', 'Ds', 'alphas', 'states', 'changepoints'])
idx_t = 0
for traj, l_alpha, l_D, l_s in zip(tqdm(trajs), labels[:, :, 0], labels[:, :, 1], labels[:, :, 2]):
# Check FOV and
idx_inside_segments = segs_inside_fov(traj, fov_origin, fov_length, cutoff_length)
if idx_inside_segments is not None:
for idx_in in idx_inside_segments:
seg_x = traj[idx_in[0]:idx_in[1], 0]
seg_y = traj[idx_in[0]:idx_in[1], 1]
seg_alpha = l_alpha[idx_in[0]:idx_in[1]]
seg_D = l_D[idx_in[0]:idx_in[1]]
seg_state = l_s[idx_in[0]:idx_in[1]]
# Filtering
seg_alpha = label_filter(seg_alpha)
seg_D = label_filter(seg_D)
seg_state = label_filter(seg_state)
# Stacking data of input dataframe
xs += seg_x.tolist()
ys += seg_y.tolist()
idxs += (np.ones(len(seg_x))*idx_t).tolist()
# Transforming to list of changepoints and physical properties
merge = np.hstack((seg_alpha.reshape(seg_alpha.shape[0], 1),
seg_D.reshape(seg_D.shape[0], 1),
seg_state.reshape(seg_state.shape[0], 1)))
CP, alphas, Ds, states = label_continuous_to_list(merge)
# Saving each segment info in output dataframe
df_out.loc[df_out.shape[0]] = [idx_t, Ds, alphas, states, CP]
# Updating segment index
idx_t += 1
# Saving trajectories in Dataframe
tr_to_df = np.vstack((idxs,
xs,
ys)).transpose()
df_in = pandas.DataFrame(tr_to_df, columns = ['traj_idx', 'x', 'y'])
return df_in, df_out
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 29
def df_to_array(df, pad = -1):
'''
Transform a dataframe as the ones given in the ANDI 2 challenge (i.e. 4 columns:
traj_idx, frame, x, y) into a numpy array. To deal with irregular temporal supports,
we pad the array whenever the trajectory is not present.
The output array has the typical shape of ANDI datasets: TxNx2
Parameters
----------
df : dataframe
Dataframe with four columns 'traj_idx': the trajectory index, 'frame' the time frame and
'x' and 'y' the positions of the particle.
pad : int
Number to use as padding.
Returns
-------
array
Array containing the trajectories from the dataframe, with usual ANDI shape (TxNx2).
'''
max_T = int(df.frame.max()+1)
num_part = int(df.iloc[-1].traj_idx)
array_trajs = np.ones((max_T, num_part+1, 2))*pad
for idx in np.unique(df.traj_idx).astype(int):
df_part = df.loc[df.traj_idx == idx]
array_trajs[df_part.frame.values.astype(int), idx, 0] = df_part.x.values
array_trajs[df_part.frame.values.astype(int), idx, 1] = df_part.y.values
return array_trajs
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 31
from scipy.spatial import distance
def get_VIP(array_trajs, num_vip = 5, min_distance = 2, pad = -1):
'''
Given an array of trajectories, finds the particles VIP particles that participants will
need to characterize in the video trakcl.
The function first finds the particles that exist at frame 0 (i.e. that their first value
is different from pad). Then, iterates over this particles to find num_vip that are at
distance > than min_distance in the first frame.
Parameters
----------
array_trajs : array
Position of the trajectories that will be considered for the VIP search.
num_vip : int
Number of VIP particles to flag.
min_distance : float
Minimum distance between two VIP particles.
pad : int
Number used to indicate in the temporal support that the particle is outside of the FOV.
Returns
-------
list
List of indices of the chosen VIP particles
'''
candidates_vip = np.argwhere(array_trajs[0,:,0] != pad).flatten()
if len(candidates_vip) < num_vip:
raise ValueError('Number of VIP demanded is bigger than available particles.')
elected = []
count_while = 0
while len(elected) < num_vip:
elected = [np.random.choice(candidates_vip)]
for c_idx in candidates_vip:
if c_idx == elected[0]:
continue
if len(array_trajs[0, elected,:].shape) < 2:
all_rest = np.expand_dims(array_trajs[0, elected,:], 0)
else:
all_rest = array_trajs[0, elected,:]
dist = distance.cdist(np.expand_dims(array_trajs[0,c_idx,:], 0), all_rest, metric='euclidean').transpose()
if dist.min() > 2:
elected.append(c_idx)
if len(elected) == num_vip:
break
count_while += 1
if count_while > 100:
raise ValueError('Could not find suitable VIP particles. This is due to either having to few particles or them being too close')
return elected
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 34
def changepoint_assignment(GT, preds):
'''
Given a list of groundtruth and predicted changepoints, solves the assignment problem via
the Munkres algorithm (aka Hungarian algorithm) and returns two arrays containing the index of the
paired groundtruth and predicted changepoints, respectively.
The distance between change point is the Euclidean distance.
Parameters
----------
GT : list
List of groundtruth change points.
preds : list
List of predicted change points.
Returns
-------
tuple
- tuple of two arrays, each corresponding to the assigned GT and pred changepoints
- Cost matrix
'''
cost_matrix = np.zeros((len(GT), len(preds)))
for idxg, gt in enumerate(GT):
for idxp, pred in enumerate(preds):
cost_matrix[idxg, idxp] = np.abs(gt-pred)
return linear_sum_assignment(cost_matrix), cost_matrix
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 36
def changepoint_alpha_beta(GT, preds, threshold = 10):
'''
Calculate the alpha and beta measure of paired changepoints.
Inspired from Supplemantary Note 3 in https://www.nature.com/articles/nmeth.2808
Parameters
----------
GT : list
List of groundtruth change points.
preds : list
List of predicted change points.
threshold : float
Distance from which predictions are considered to have failed. They are then assigned this number.
Returns
-------
tuple
alpha, beta
'''
assignment, _ = changepoint_assignment(GT, preds)
assignment = np.array(assignment)
threshold = 10
distance = np.abs(GT[assignment[0]] - preds[assignment[1]])
distance[distance > threshold] = threshold
distance = np.sum(distance)
d_x_phi = threshold*len(GT)
d_ybar_phi = max([0, (len(preds)-len(GT))*threshold])
alpha = 1-distance/d_x_phi
beta = (d_x_phi-distance)/(d_x_phi+d_ybar_phi)
return alpha, beta
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 38
def jaccard_index(TP: int, # true positive
FP: int, # false positive
FN: int # false negative
)-> float: # Jaccard Index
'''
Given the true positive, false positive and false negative rates, calculates the Jaccard Index
'''
return TP/(TP+FP+FN)
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 39
def single_changepoint_error(GT, preds, threshold = 5):
'''
Given the groundtruth and predicted changepoints for a single trajectory, first solves the assignment problem between changepoints,
then calculates the RMSE of the true positive pairs and the Jaccard index.
Parameters
----------
GT : list
List of groundtruth change points.
preds : list
List of predicted change points.
threshold : float
Distance from which predictions are considered to have failed. They are then assigned this number.
Returns
-------
tuple
- TP_rmse: root mean square error of the true positive change points.
- Jaccard Index of the ensemble predictions
'''
assignment, _ = changepoint_assignment(GT, preds)
assignment = np.array(assignment)
TP, FP, FN = 0, 0, 0
TP_rmse = []
for p in assignment.transpose():
if np.abs(GT[p[0]] - preds[p[1]]) < threshold:
TP += 1
TP_rmse.append((GT[p[0]] - preds[p[1]])**2)
else:
FP += 1
FN += 1
# Calculating RMSE
TP_rmse = np.sqrt(np.mean(TP_rmse))
# Checking false positive and missed events
if len(preds) > len(GT):
FP += len(preds) - len(GT)
elif len(preds) < len(GT):
FN += len(GT) - len(preds)
return TP_rmse, jaccard_index(TP, FP, FN)
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 40
def ensemble_changepoint_error(GT_ensemble, pred_ensemble, threshold = 5):
'''
Given an ensemble of groundtruth and predicted change points, iterates
over each trajectory's changepoints. For each, it solves the assignment problem
between changepoints. Then, calculates the RMSE of the true positive pairs and
the Jaccard index over the ensemble of changepoints (i.e. not the mean of them
w.r.t. to the trajectories)
Parameters
----------
GT_ensemble : list, array
Ensemble of groutruth change points.
pred_ensemble : list
Ensemble of predicted change points.
threshold : float
Distance from which predictions are considered to have failed. They are then assigned this number.
Returns
-------
tuple
- TP_rmse: root mean square error of the true positive change points.
- Jaccard Index of the ensemble predictions
'''
TP, FP, FN = 0, 0, 0
TP_rmse = []
for gt_traj, pred_traj in zip(GT_ensemble, pred_ensemble):
assignment, _ = changepoint_assignment(gt_traj, pred_traj)
assignment = np.array(assignment)
for p in assignment.transpose():
if np.abs(gt_traj[p[0]] - pred_traj[p[1]]) < threshold:
TP += 1
TP_rmse.append((gt_traj[p[0]] - pred_traj[p[1]])**2)
else:
FP += 1
FN += 1
# Checking false positive and missed events
if len(pred_traj) > len(gt_traj):
FP += len(pred_traj) - len(gt_traj)
elif len(pred_traj) < len(gt_traj):
FN += len(gt_traj) - len(pred_traj)
if TP+FP+FN == 0:
wrn_str = f'No segments found in this dataset.'
warnings.warn(wrn_str)
return threshold, 0
# Calculating RMSE
TP_rmse = np.sqrt(np.mean(TP_rmse))
return TP_rmse, jaccard_index(TP, FP, FN)
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 43
def create_binary_segment(CP: list, # list of changepoints
T: int # length of the trajectory
)-> list: # list of arrays with value 1 in the temporal support of the current segment.
'''
Given a set of changepoints and the lenght of the trajectory, create segments which are equal to one
if the segment takes place at that position and zero otherwise.
'''
segments = np.zeros((len(CP)+1, T))
CP = np.append(0, CP)
for idx, (cp1, cp2) in enumerate(zip(CP[:-1], CP[1:])):
segments[idx, cp1+1:cp2+1] = 1
segments[-1, CP[-1]+1:] = 1
segments[0, 0] = 1
return segments
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 45
def jaccard_between_segments(gt, pred):
'''
Given two segments, calculates the Jaccard index between them by considering TP as correct labeling,
FN as missed events and FP leftover predictions.
Parameters
----------
gt : array
groundtruth segment, equal to one in the temporal support of the given segment, zero otherwise.
pred : array
predicted segment, equal to one in the temporal support of the given segment, zero otherwise.
Returns
-------
float
Jaccard index between the given segments.
'''
if len(gt) > len(pred):
pred = np.append(pred, np.zeros(len(gt) - len(pred)))
elif len(pred) > len(gt):
gt = np.append(gt, np.zeros(len(pred) - len(gt)))
tp = np.sum(np.logical_and(pred == 1, gt == 1))
fp = np.sum(np.logical_and(pred == 1, gt == 0))
fn = np.sum(np.logical_and(pred == 0, gt == 1))
# special case for absence of changepoint
if tp+fp+fn == 0: return 0
else: return jaccard_index(tp, fp, fn)
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 46
def segment_assignment(GT, preds, T:int = None):
'''
Given a list of groundtruth and predicted changepoints, generates a set of segments. Then constructs
a cost matrix by calculting the Jaccard Index between segments. From this cost matrix, we solve the
assignment problem via the Munkres algorithm (aka Hungarian algorithm) and returns two arrays
containing the index of the groundtruth and predicted segments, respectively.
If T = None, then we consider that GT and preds may have different lenghts. In that case, the end
of the segments is the the last CP of each set of CPs.
Parameters
----------
GT : list
List of groundtruth change points.
preds : list
List of predicted change points.
T : int, None
Length of the trajectory. If None, considers different GT and preds length.
Returns
-------
tuple
- tuple of two arrays, each corresponding to the assigned GT and pred changepoints
- Cost matrix calculated via JI of segments
'''
if T is not None:
T_gt = T_pred = T
# Check if the GT or predictions are a single integer or an empty array
if isinstance(GT, int): GT = [GT]
elif len(GT) == 0: GT = [T-1]
if isinstance(preds, int): preds = [preds]
elif len(preds) == 0: preds = [T-1]
else:
T_gt = GT[-1]
if len(GT) > 1:
GT = GT[:-1]
T_pred = preds[-1]
if len(preds) > 1:
preds = preds[:-1]
seg_GT = create_binary_segment(GT, T_gt)
seg_preds = create_binary_segment(preds, T_pred)
cost_matrix = np.zeros((seg_GT.shape[0], seg_preds.shape[0]))
for idxg, gt in enumerate(seg_GT):
for idxp, pred in enumerate(seg_preds):
cost_matrix[idxg, idxp] = 1-jaccard_between_segments(gt, pred)
return linear_sum_assignment(cost_matrix), cost_matrix
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 56
from sklearn.metrics import mean_squared_log_error as msle, f1_score
from .models_phenom import models_phenom
def metric_anomalous_exponent(gt = None,
pred = None,
max_error = np.abs(models_phenom().bound_alpha[0]-models_phenom().bound_alpha[1])):
'''
Compute the mean absolute error (mae) between anomalous exponents.
Checks the current bounds of anomalous exponents from models_phenom to calculate the maximum error.
'''
error = np.mean(np.abs(gt-pred))
if error > max_error:
return max_error
else:
return error
def metric_diffusion_coefficient(gt = None, pred = None,
threshold_min = models_phenom().bound_D[0],
max_error = msle([models_phenom().bound_D[0]],
[models_phenom().bound_D[1]])):
'''
Compute the mean squared log error (msle) between diffusion coefficients.
Checks the current bounds of diffusion from models_phenom to calculate the maximum error.
'''
# considering the presence of zeros and negatives
pred = np.array(pred).copy(); gt = np.array(gt).copy()
pred[pred <= threshold_min] = threshold_min
gt[gt <= threshold_min] = threshold_min
# mean squared log error
error = msle(gt, pred)
if error > max_error:
return max_error
else:
return error
def metric_diffusive_state(gt = None, pred = None, max_error = False):
'''
Compute the F1 score between diffusive states.
'''
return f1_score(gt.astype(int), pred.astype(int), average = 'micro')
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 60
def check_no_changepoints(GT_cp, GT_alpha, GT_D, GT_s,
preds_cp, preds_alpha, preds_D, preds_s,
T:bool|int = None):
'''
Given predicionts over changepoints and variables, checks if in both GT and preds there is an
absence of change point. If so, takes that into account to pair variables.
Parameters
----------
GT_cp : list, int, float
Groundtruth change points
GT_alpha : list, float
Groundtruth anomalous exponent
GT_D : list, float
Groundtruth diffusion coefficient
GT_s : list, float
Groundtruth diffusive state
preds_cp : list, int, float
Predicted change points
preds_alpha : list, float
Predicted anomalous exponent
preds_D : list, float
Predicted diffusion coefficient
preds_s : list, float
Predicted diffusive state
T : bool,int
(optional) Length of the trajectories. If none, last change point is length.
Returns
-------
tuple
- False if there are change points. True if there were missing change points.
- Next three are either all Nones if change points were detected, or paired exponents,
coefficient and states if some change points were missing.
'''
if isinstance(GT_cp, int) or isinstance(GT_cp, float):
GT_cp = [GT_cp]
if isinstance(preds_cp, int) or isinstance(preds_cp, float):
preds_cp = [preds_cp]
no_GT_cp = False; no_preds_cp = False
# CP always contain the final point of the trajectory, hence minimal length is one
if len(GT_cp) == 1: no_GT_cp = True
if len(preds_cp) == 1: no_preds_cp = True
if no_GT_cp + no_preds_cp == 0:
return False, None, None, None
else:
[row_ind, col_ind], _ = segment_assignment(GT_cp, preds_cp, T)
if no_GT_cp and not no_preds_cp:
paired_alpha = np.array([[GT_alpha[0], preds_alpha[col_ind[0]]]])
paired_D = np.array([[GT_D[0], preds_D[col_ind[0]]]])
paired_s = np.array([[GT_s[0], preds_s[col_ind[0]]]])
if no_preds_cp and not no_GT_cp:
row_position = np.argwhere(col_ind == 0).flatten()[0]
paired_alpha = np.array([[GT_alpha[row_position], preds_alpha[col_ind[row_position]]]])
paired_D = np.array([[GT_D[row_position], preds_D[col_ind[row_position]]]])
paired_s = np.array([[GT_s[row_position], preds_s[col_ind[row_position]]]])
if no_preds_cp and no_GT_cp:
paired_alpha = np.array([[GT_alpha[0], preds_alpha[0]]])
paired_D = np.array([[GT_D[0], preds_D[0]]])
paired_s = np.array([[GT_s[0], preds_s[0]]])
return True, paired_alpha, paired_D, paired_s
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 61
def segment_property_errors(GT_cp, GT_alpha, GT_D, GT_s,
preds_cp, preds_alpha, preds_D, preds_s,
return_pairs = False,
T = None):
'''
Given predicionts over change points and the value of diffusion parameters in the generated
segments, computes the defined metrics.
Parameters
----------
GT_cp : list, int, float
Groundtruth change points
GT_alpha : list, float
Groundtruth anomalous exponent
GT_D : list, float
Groundtruth diffusion coefficient
GT_s : list, float
Groundtruth diffusive state
preds_cp : list, int, float
Predicted change points
preds_alpha : list, float
Predicted anomalous exponent
preds_D : list, float
Predicted diffusion coefficient
preds_s : list, float
Predicted diffusive state
return_pairs : bool
If True, returns the assigment pairs for each diffusive property.
T : bool,int
(optional) Length of the trajectories. If none, last change point is length.
Returns
-------
tuple
- if return_pairs = True, returns the assigned pairs of diffusive properties
- if return_pairs = False, returns the errors for each diffusive property
'''
# Check cases in which changepoint where not detected or there were none in groundtruth
no_change_point_case, paired_alpha, paired_D, paired_s = check_no_changepoints(GT_cp, GT_alpha, GT_D, GT_s,
preds_cp, preds_alpha, preds_D, preds_s, T)
if not no_change_point_case:
# Solve the assignment problem
[row_ind, col_ind], _ = segment_assignment(GT_cp, preds_cp, T)
# iterate over the groundtruth segments
paired_alpha, paired_D, paired_s = [], [], []
for idx_seg, (gt_alpha, gt_D) in enumerate(zip(GT_alpha, GT_D)):
row_position = np.argwhere(row_ind == idx_seg).flatten()
# if the GT segment was associated to a prediction
if len(row_position) > 0:
row_position = int(row_position)
# alpha
gt_a_seg = GT_alpha[idx_seg]
pred_a_seg = preds_alpha[col_ind[row_position]]
# d
gt_d_seg = GT_D[idx_seg]
pred_d_seg = preds_D[col_ind[row_position]]
# state
gt_s_seg = GT_s[idx_seg]
pred_s_seg = preds_s[col_ind[row_position]]
paired_alpha.append([gt_a_seg, pred_a_seg])
paired_D.append([gt_d_seg, pred_d_seg])
paired_s.append([gt_s_seg, pred_s_seg])
paired_alpha, paired_D, paired_s = np.array(paired_alpha), np.array(paired_D), np.array(paired_s)
if return_pairs:
return paired_alpha, paired_D, paired_s
else:
error_alpha = metric_anomalous_exponent(paired_alpha[:,0], paired_alpha[:,1])
error_D = metric_diffusion_coefficient(paired_D[:,0], paired_D[:,1])
error_s = metric_diffusive_state(paired_s[:,0], paired_s[:,1])
return error_alpha, error_D, error_s
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 69
from .models_phenom import models_phenom
def extract_ensemble(state_label, dic):
'''
Given an array of the diffusive state and a dictionary with the diffusion information,
returns a summary of the ensemble properties for the current dataset.
Parameters
----------
state_label : array
Array containing the diffusive state of the particles in the dataset.
For multi-state and dimerization, this must be the number associated to the
state (for dimerization, 0 is free, 1 is dimerized). For the rest, we follow
the numeration of models_phenom().lab_state.
dic : dict
Dictionary containing the information of the input dataset.
Returns
-------
array
Matrix containing the ensemble information of the input dataset. It has the following shape:
|mu_alpha1 mu_alpha2 ... |
|sigma_alpha1 sigma_alpha2 ... |
|mu_D1 mu_D1 ... |
|sigma_D1 sigma_D2 ... |
|counts_state1 counts_state2 ... |
'''
# Single state
if dic['model'] == 'single_state':
ensemble = np.vstack((dic['alphas'][0],
dic['alphas'][1],
dic['Ds'][0],
dic['Ds'][1],
len(state_label)
))
# Multi-state
if dic['model'] == 'multi_state':
states, counts = np.unique(state_label, return_counts=True)
# If the number of visited stated is not equal to the expected number of states
if len(states) != dic['alphas'].shape[0]:
states_corrected = np.ones(dic['alphas'].shape[0])
counts_corrected = np.ones(dic['alphas'].shape[0])
for s, c in zip(states, counts):
counts_corrected[int(s)] = c
else:
counts_corrected = counts
ensemble = np.vstack((dic['alphas'][:, 0],
dic['alphas'][:, 1],
dic['Ds'][:, 0],
dic['Ds'][:, 1],
counts_corrected
))
# Immobile
if dic['model'] == 'immobile_traps':
counts = [len(state_label[state_label == models_phenom().lab_state.index('i')]),
len(state_label[state_label == models_phenom().lab_state.index('f')])]
ensemble = np.vstack(([0, dic['alphas'][0]],
[0, dic['alphas'][1]],
[0, dic['Ds'][0]],
[0, dic['Ds'][1]],
counts
))
# dimerization
if dic['model'] == 'dimerization':
counts = [len(state_label[state_label == 0]),
len(state_label[state_label == 1])]
ensemble = np.vstack((dic['alphas'][:, 0],
dic['alphas'][:, 1],
dic['Ds'][:, 0],
dic['Ds'][:, 1],
counts
))
if dic['model'] == 'confinement':
counts = [len(state_label[state_label == models_phenom().lab_state.index('f')]),
len(state_label[state_label == models_phenom().lab_state.index('c')])]
ensemble = np.vstack((dic['alphas'][:, 0],
dic['alphas'][:, 1],
dic['Ds'][:, 0],
dic['Ds'][:, 1],
counts
))
return ensemble
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 70
import scipy.stats
def multimode_dist(params, weights, bound, x, normalized = False):
'''
Generates a multimodal distribution with given parameters.
Also accounts for single mode if weight is float or int.
Parameters
----------
params : list
Mean and variances of every mode.
weights : list, float
Weight of every mode. If float, we consider a single mode.
bound : tuple
Bounds (min, max) of the functions support.
x : array
Support upon which the distribution is created.
normalize : bool
If True, returns the normalized distribution.
Returns
-------
array
Value of the distribution in each point of the given support
'''
func = scipy.stats.truncnorm
dist = np.zeros_like(x)
lower, upper = bound
# If we have single state, change values to list to still
# have a loop:
if isinstance(weights, float) or isinstance(weights, int):
params = [params]
weights = [weights]
for param, w in zip(params, weights):
mean, var = param
# introduce a cutoff to avoid nan when var = 0
if var == 0: var = 1e-9
unimodal = func.pdf(x,
(lower-mean)/np.sqrt(var),
(upper-mean)/np.sqrt(var),
loc = mean,
scale = np.sqrt(var))
dist += w*unimodal
if normalized:
dist /= np.sum(dist)
return dist
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 71
def distribution_distance(p:np.array, # distribution 1
q:np.array # distribution 2
)-> float: # distance between distributions
''' Calculates mean absolute error between two distributions. '''
# return np.sum(np.where(p != 0, p * np.log(p / q), 0))
return np.abs(p-q).mean()
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 74
from .models_phenom import models_phenom
def error_Ensemble_dataset(true_data, pred_data, return_distributions = False):
'''
Calculates the ensemble metrics for the ANDI 2 challenge. The input are matrices of shape:
| col1 (state 1) | col2 (state 2) | col3 (state 3) | ... |
|:--------------:|:--------------:|:--------------:|:---:|
| $\mu_a^1$ | $\mu_a^2$ | $\mu_a^3$ | ... |
| $\sigma_a^1$ | $\sigma_a^2$ | $\sigma_a^3$ | ... |
| $\mu_D^1$ | $\mu_D^2$ | $\mu_D^3$ | ... |
| $\sigma_D^1$ | $\sigma_D^2$ | $\sigma_D^3$ | ... |
| $N_1$ | $N_2$ | $N_3$ | ... |
Parameters
----------
true_data : array
Matrix containing the groundtruth data.
pred_data : array
Matrix containing the predicted data.
return_distributions : bool
If True, the function also outputs the generated distributions.
Returns
-------
tuple
- distance_alpha: distance between anomalous exponents
- distance_D: distance between diffusion coefficients
- dists (if asked): distributions of both groundtruth and predicted data.
'''
dists = []
for data in [true_data, pred_data]:
if len(data.shape) > 1: # If we have more than one state
alpha_info = np.delete(data, [2,3, -1], 0)
d_info = data[2:-1,:]
weights = data[-1,:]
if weights.sum() > 1: weights /= weights.sum()
else: # If single state
alpha_info = data[:2]
d_info = data[2:-1]
weights = 1
for idx, (var, bound) in enumerate(zip([alpha_info, d_info],
[models_phenom().bound_alpha, models_phenom().bound_D])):
if idx == 0: x = np.linspace(bound[0], bound[1], 1000)
else: x = np.logspace(np.log10(bound[0]), np.log10(bound[1]), 1000)
dists.append(multimode_dist(var.T, weights, bound, x))
# Distance between alpha dists
distance_alpha = distribution_distance(dists[0], dists[2])
distance_D = distribution_distance(dists[1], dists[3])
if return_distributions:
return distance_alpha, distance_D, dists
else:
return distance_alpha, distance_D
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 76
def check_prediction_length(pred):
'''
Given a trajectory segments prediction, checks whether it has C changepoints and C+1 segments properties values.
As it must also contain the index of the trajectory, this is summarized by being multiple of 4.
In some cases, the user needs to also predict the final point of the trajectory. In this case,
we will have a residu of 1.
'''
if len(pred) % 4 == 0 or len(pred) % 4 == 1 :
return True
else:
return False
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 77
def separate_prediction_values(pred):
'''
Given a prediction over trjaectory segments, extracts the predictions for each segment property
as well as the changepoint values.
'''
Ds = pred[1::4]
alphas = pred[2::4]
states = pred[3::4]
cp = pred[4::4]
return Ds, alphas, states, cp
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 78
def load_file_to_df(path_file,
columns = ['traj_idx', 'Ds', 'alphas', 'states', 'changepoints']):
'''
Given the path of a .txt file, extract the segmentation predictions based on
the rules of the ANDI 2 challenge022
'''
with open(path_file) as f:
lines_pred = f.read().splitlines()
df = pandas.DataFrame(columns = columns)
for line in lines_pred:
# Extract values with comma separator and transform to float
pred_traj = line.split(',')
pred = [float(i) for i in pred_traj]
# Check that prediction has the correct shape
pred_correct = check_prediction_length(pred)
# If correct size, then extract parameters and add it to dataframe
if pred_correct:
preds_D, preds_a, preds_s, preds_cp = separate_prediction_values(pred)
current_row = df.shape[0]
for param, pred_param in zip(columns, [pred[0], preds_D, preds_a, preds_s, preds_cp]):
df.loc[current_row, param] = pred_param
return df
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 83
def _get_error_bounds():
'''
Sets the current maximum errors we can do in the different diffusive properties.
'''
threshold_error_alpha = 2
threshold_error_D = 1e5
threshold_error_s = -1
threshold_cp = 10
return threshold_error_alpha, threshold_error_D, threshold_error_s, threshold_cp
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 84
def error_SingleTraj_dataset(df_pred, df_true,
threshold_error_alpha = 2, max_val_alpha = 2, min_val_alpha = 0,
threshold_error_D = 1e5, max_val_D = 1e6, min_val_D = 1e-6,
threshold_error_s = -1,
threshold_cp = 10,
prints = True, disable_tqdm = False
):
'''
Given two dataframes, corresponding to the predictions and true labels of a set
of trajectories from the ANDI 2 challenge022, calculates the corresponding metrics
Columns must be for both (no order needed):
traj_idx | alphas | Ds | changepoints | states
df_true must also contain a column 'T'.
Parameters
----------
df_pred : dataframe
Predictions
df_true : dataframe
Groundtruth
threshold_error_alpha : float
(same for D, s, cp) Maximum possible error allowed. If bigger, it is substituted by this error.
max_val_alpha : float
(same for D, s, cp) Maximum value of the parameter.
min_val_alpha : float
(same for D, s, cp) Minimum value of the parameter.
print : bool
If True, prints the results.
disable_tqdm : bool
If True, disables the progress bar.
Returns
-------
tuple
- rmse_CP: root mean squared error change points
- JI: Jaccard index change points
- error_alpha: mean absolute error anomalous exponents
- error_D: mean square log error diffusion coefficients
- error_s: Jaccar index diffusive states
'''
# Initiate counting missing trajectories
missing_traj = False
# Deleter saving variables, just in case...
try: del paired_alpha, paired_D, paired_s
except: pass
# for every trajectory, we stack paired segment properties. We also store changepoints info
ensemble_pred_cp, ensemble_true_cp = [], []
for t_idx in tqdm(df_true['traj_idx'].values, disable = disable_tqdm):
traj_trues = df_true.loc[df_true.traj_idx == t_idx]
traj_preds = df_pred.loc[df_pred.traj_idx == t_idx]
if traj_preds.shape[0] == 0:
# If there is no trajectory, we give maximum error. To do so, we redefine predictions
# and trues so that they give maximum error
missing_traj += 1
preds_cp, preds_alpha, preds_D, preds_s = [[10],
[0],
[1],
[0]]
trues_cp, trues_alpha, trues_D, trues_s = [[10+threshold_cp],
[threshold_error_alpha],
[1+threshold_error_D],
[10]]
else:
preds_cp, preds_alpha, preds_D, preds_s = [np.array(traj_preds.changepoints.values[0]).astype(int),
traj_preds.alphas.values[0],
traj_preds.Ds.values[0],
traj_preds.states.values[0]]
trues_cp, trues_alpha, trues_D, trues_s = [np.array(traj_trues.changepoints.values[0]).astype(int),
traj_trues.alphas.values[0],
traj_trues.Ds.values[0],
traj_trues.states.values[0]]
# Collecting changepoints for metric
# In this metric, we don't want to enter the final point of the trajectory
ensemble_pred_cp.append(preds_cp[:-1])
ensemble_true_cp.append(trues_cp[:-1])
# collecting segment properties error after segment assignment
pair_a, pair_d, pair_s = segment_property_errors(trues_cp, trues_alpha, trues_D, trues_s,
preds_cp, preds_alpha, preds_D, preds_s,
return_pairs = True)
try:
paired_alpha = np.vstack((paired_alpha, pair_a))
paired_D = np.vstack((paired_D, pair_d))
paired_s = np.vstack((paired_s, pair_s))
except:
paired_alpha = pair_a
paired_D = pair_d
paired_s = pair_s
#### Calculate metrics from assembled properties
# checking for nans and problems in predictions
wrong_alphas = np.argwhere(np.isnan(paired_alpha[:, 1]) | (paired_alpha[:, 1] > 2) | (paired_alpha[:, 1] < 0)).flatten()
paired_alpha[wrong_alphas, 1] = paired_alpha[wrong_alphas, 0] + threshold_error_alpha
wrong_ds = np.argwhere(np.isnan(paired_D[:, 1])).flatten()
paired_D = np.abs(paired_D)
paired_D[wrong_ds, 1] = paired_D[wrong_ds, 0] + threshold_error_D
wrong_s = np.argwhere((paired_s[:, 1] > 4) | (paired_s[:, 1]<0))
paired_s[wrong_s, 1] = threshold_error_s
# Changepoints
rmse_CP, JI = ensemble_changepoint_error(ensemble_true_cp, ensemble_pred_cp, threshold = threshold_cp)
# Segment properties
error_alpha = metric_anomalous_exponent(paired_alpha[:,0], paired_alpha[:,1])
error_D = metric_diffusion_coefficient(paired_D[:,0], paired_D[:,1])
error_s = metric_diffusive_state(paired_s[:,0], paired_s[:,1])
if prints:
print(f'Summary of metrics assesments:')
if missing_traj is not False:
print(f'\n{missing_traj} missing trajectory/ies. ')
if rmse_CP == threshold_cp:
print(f'No change points found. RMSE set to max ({threshold_cp})')
print(f'\nChangepoint Metrics \nRMSE: {round(rmse_CP, 3)} \nJaccard Index: {round(JI, 3)}',
f'\n\nDiffusion property metrics \nMetric anomalous exponent: {error_alpha} \nMetric diffusion coefficient: {error_D} \nMetric diffusive state: {error_s}')
return rmse_CP, JI, error_alpha, error_D, error_s
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 96
import re
import sys
import os
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 97
def listdir_nohidden(path):
for f in os.listdir(path):
if not f.startswith(('.','_')):
yield f
# %% ../source_nbs/lib_nbs/utils_challenge.ipynb 98
def codalab_scoring(input_dir , output_dir):
'''
Given an input directoy where predictions and groundtruths for the ANDI 2 challenge can be found,
calculates metrics and outputs the results in a file in the given output directory.
This code is prepared to be run in Codalab.
'''
# Error bounds
threshold_error_alpha, threshold_error_D, threshold_error_s, threshold_cp = _get_error_bounds()
### Saving variables
# Track 1 - Videos
t1_ens = {'alpha': [],
'D': []}
t1_st = {'RMSE': [],
'JI': [],
'alpha': [],
'D': [],
'state': [],
'num_traj': [],
'num_traj_CP': []} # this last one takes into account no changepoint from single state
# Track 2 - Trajectories
t2_ens = {'alpha': [],
'D': []}
t2_st = {'RMSE': [],
'JI': [],
'alpha': [],
'D': [],
'state': [],
'num_traj': [],
'num_traj_CP': []} # this last one takes into account no changepoint from single state
# Handling paths of input files
submit_dir = os.path.join(input_dir, 'pred')
truth_dir = os.path.join(input_dir, 'true')
if not os.path.isdir(submit_dir):
print( "%s doesn't exist", truth_dir)
# Calculate metrics if directories exist
if os.path.isdir(submit_dir) and os.path.isdir(truth_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Extracts all files in reference directory
true_files_list = sorted(list(listdir_nohidden(truth_dir)))#os.listdir(truth_dir)
# Run over all files
missing_tracks = []
for filename in tqdm(true_files_list):
task = re.search('_(.+?)_labs', filename).group(1)
exp = re.search('exp_(.+?)_', filename).group(1)
fov = re.search('fov_(.+?).', filename).group(1)
# check track and save found tracks
track = int(filename[1])
true_file = os.path.join(truth_dir, filename)
corresponding_submission_file = os.path.join(submit_dir, filename)
if not os.path.isfile(corresponding_submission_file):
if track not in missing_tracks:
missing_tracks.append(track)
if len(missing_tracks) == 2:
raise FileNotFoundError(f'Failed to find prediction files.')
else:
continue
# if not os.path.isfile(corresponding_submission_file) and missing_tracks == 1:
# raise FileNotFoundError(f'Failed to find prediction files.')
# raise FileNotFoundError(f'Prediction file for: track {track}, task {task}, experiment {exp} and FOV {fov} not found.')
# extract model
if task == 'ens':
model = np.genfromtxt(true_file, dtype='str', skip_footer=5)[1][:-1]
else:
file_ens = os.path.join(truth_dir, f't{track}_ens_labs_exp_{exp}_fov_{fov}.txt')
model = np.genfromtxt(file_ens, dtype='str', skip_footer=5)[1][:-1]
# Ensemble
if task == 'ens':
true = np.loadtxt(true_file, skiprows=1, delimiter = ';')
pred = np.loadtxt(corresponding_submission_file, skiprows=1, delimiter = ';')
mae_alpha, mae_D, = error_Ensemble_dataset(true_data = true,
pred_data = pred)
if track == 1:
t1_ens['alpha'].append(mae_alpha)
t1_ens['D'].append(mae_D)
if track == 2:
t2_ens['alpha'].append(mae_alpha)
t2_ens['D'].append(mae_D)
# Single trajectory
if task == 'traj':
df_true = load_file_to_df(true_file)
df_pred = load_file_to_df(corresponding_submission_file)
rmse_CP, JI, error_alpha, error_D, error_s = error_SingleTraj_dataset(df_true = df_true, df_pred = df_pred,
threshold_error_alpha = threshold_error_alpha,
threshold_error_D = threshold_error_D,
threshold_error_s = threshold_error_s,
threshold_cp = threshold_cp,
prints = False, disable_tqdm = True)
if track == 1:
# to avoid single state entering in CP metrics
if model != 'single_state':
t1_st['RMSE'].append(rmse_CP)
t1_st['JI'].append(JI)
t1_st['num_traj_CP'].append(df_true.shape[0])
t1_st['alpha'].append(error_alpha)
t1_st['D'].append(error_D)
t1_st['state'].append(error_s)
t1_st['num_traj'].append(df_true.shape[0])
if track == 2:
# to avoid single state entering in CP metrics
if model != 'single_state':
t2_st['RMSE'].append(rmse_CP)
t2_st['JI'].append(JI)
t2_st['num_traj_CP'].append(df_true.shape[0])
t2_st['alpha'].append(error_alpha)
t2_st['D'].append(error_D)
t2_st['state'].append(error_s)
t2_st['num_traj'].append(df_true.shape[0])
# print(f'Track {track}, Task {task}, Exp {exp}, FOV {fov}: OK!')
### Saving data
'''CHECK HOW TO DO THE MEAN!'''
# Define output file
output_filename = os.path.join(output_dir, 'scores.txt')
output_file = open(output_filename, 'w')
# Single trajectory data
# We define a variable that gives the worst values for each metric. This is applied
# separetedly for every FOV
worst_value_st = {'RMSE': threshold_cp,
'JI': 0,
'alpha': threshold_error_alpha,
'D': threshold_error_D,
'state': 0}
# Run over all keys
for key in t1_st:
# Compare results with
if key in ['RMSE', 'alpha', 'D']:
if key == 'RMSE': avg_against = 'num_traj_CP'
else: avg_against = 'num_traj'
if 1 not in missing_tracks:
save_t1 = np.nanmin(np.vstack([t1_st[key],
np.ones_like(t1_st[key])*worst_value_st[key]]),
axis = 0)
save_t1 = np.average(save_t1, axis = 0, weights = t1_st[avg_against])
if 2 not in missing_tracks:
save_t2 = np.nanmin(np.vstack([t2_st[key],
np.ones_like(t2_st[key])*worst_value_st[key]]),
axis = 0)
save_t2 = np.average(save_t2, axis = 0, weights = t2_st[avg_against])
elif key in ['JI', 'state']:
if key == 'JI': avg_against = 'num_traj_CP'
else: avg_against = 'num_traj'
if 1 not in missing_tracks:
save_t1 = np.nanmax(np.vstack([t1_st[key],
np.ones_like(t1_st[key])*worst_value_st[key]]),
axis = 0)
save_t1 = np.average(save_t1, axis = 0, weights = t1_st[avg_against])
if 2 not in missing_tracks:
save_t2 = np.nanmax(np.vstack([t2_st[key],
np.ones_like(t2_st[key])*worst_value_st[key]]),
axis = 0)
save_t2 = np.average(save_t2, axis = 0, weights = t2_st[avg_against])
if 1 not in missing_tracks:
output_file.write('T1_st_'+ key +f' : {save_t1}\n')
if 2 not in missing_tracks:
output_file.write('T2_st_'+ key +f' : {save_t2}\n')
### Saving ensemble data
'''WHAT ARE THE THRESHOLDS FOR THIS?'''
worst_value_ens = {'alpha': 100,
'D': 100}
for key in t1_ens:
if key == 'num_traj': continue
if 1 not in missing_tracks:
save_t1 = np.nanmin(np.vstack([t1_ens[key],
np.ones_like(t1_ens[key])*worst_value_ens[key]]),
axis = 0).mean()
if 2 not in missing_tracks:
save_t2 = np.nanmin(np.vstack([t2_ens[key],
np.ones_like(t2_ens[key])*worst_value_ens[key]]),
axis = 0).mean()
if 1 not in missing_tracks:
output_file.write('T1_ens_'+ key +f' : {save_t1}\n')
if 2 not in missing_tracks:
output_file.write('T2_ens_'+ key +f' : {save_t2}\n')
output_file.close()
| 58,236 | 37.288626 | 169 |
py
|
andi_datasets
|
andi_datasets-master/andi_datasets/utils_trajectories.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../source_nbs/lib_nbs/utils_trajectories.ipynb.
# %% auto 0
__all__ = ['pert', 'gaussian', 'bm1D', 'regularize', 'sample_sphere', 'normalize', 'normalize_fGN', 'trigo', 'find_nan_segments',
'segs_inside_fov', 'inside_fov_dataset', 'plot_trajs']
# %% ../source_nbs/lib_nbs/utils_trajectories.ipynb 2
import numpy as np
# %% ../source_nbs/lib_nbs/utils_trajectories.ipynb 5
def pert(params:list, # Pert parameters a, b, c
size:int = 1, # number of samples to get
lamb = 4 # lambda pert parameters
)-> np.array: # samples from the given Pert distribution
'''
Samples from a Pert distribution of given parameters
'''
if isinstance(params, float) or isinstance(params, int):
if size == 1:
return params
else:
return np.array(params).repeat(size)
a, b, c = params
# if all parameters are the same, we consider it a delta distribution
if a == b == c:
return np.array([a]*size)
r = c - a
alpha = 1 + lamb * (b - a) / r
beta = 1 + lamb * (c - b) / r
return a + np.random.beta(alpha, beta, size=size) * r
# %% ../source_nbs/lib_nbs/utils_trajectories.ipynb 7
import scipy.stats
def gaussian(params:list|int, # If list, mu and sigma of the gaussian. If int, we consider sigma = 0
size = 1, # Number of samples to get.
bound = None # Bound of the Gaussian, if any.
)-> np.array: # Samples from the given Gaussian distribution
'''
Samples from a Gaussian distribution of given parameters.
'''
# if we are given a single number, we consider equal to mean and variance = 0
if isinstance(params, float) or isinstance(params, int):
if size == 1:
return params
else:
return np.array(params).repeat(size)
else:
mean, var = params
if bound is None:
val = np.random.normal(mean, np.sqrt(var), size)
if bound is not None:
lower, upper = bound
if var == 0:
if mean > upper or mean < lower:
raise ValueError('Demanded value outside of range.')
val = np.ones(size)*mean
else:
val = scipy.stats.truncnorm.rvs((lower-mean)/np.sqrt(var),
(upper-mean)/np.sqrt(var),
loc = mean,
scale = np.sqrt(var),
size = size)
if size == 1:
return val[0]
else:
return val
# %% ../source_nbs/lib_nbs/utils_trajectories.ipynb 11
def bm1D(T:int, # Length of the trajecgory
D:float, # Diffusion coefficient
deltaT = False # Sampling time
)-> np.array: # Brownian motion trajectory
'''Creates a 1D Brownian motion trajectory'''
if D < 0:
raise ValueError('Only positive diffusion coefficients allowed.')
if not deltaT:
deltaT = 1
return np.cumsum(np.sqrt(2*D*deltaT)*np.random.randn(int(T)))
# %% ../source_nbs/lib_nbs/utils_trajectories.ipynb 13
def regularize(positions:np.array, # Positions of the trajectory to regularize
times:np.array, # Times at which previous positions were recorded
T:int # Length of the output trajectory
)->np.array: # Regularized trajectory.
'''
Regularizes a trajectory with irregular sampling times.
'''
times = np.append(0, times)
pos_r = np.zeros(T)
for idx in range(len(times)-1):
pos_r[int(times[idx]):int(times[idx+1])] = positions[idx]
pos_r -= pos_r[0]
return pos_r
# %% ../source_nbs/lib_nbs/utils_trajectories.ipynb 15
def sample_sphere(N:int, # Number of points to generate.
# Radius of the sphere. If int, all points have
# the same radius, if numpy.array, each number has different radius.
R:int|list,
)->np.array: # Sampled numbers
'''
Samples random number that lay in the surface of a 3D sphere centered in
zero and with radius R.
'''
vecs = np.random.randn(3, N)
vecs /= np.linalg.norm(vecs, axis=0)
return R*vecs
# %% ../source_nbs/lib_nbs/utils_trajectories.ipynb 17
def normalize(trajs):
'''
Normalizes trajectories by substracting average and dividing by
SQRT of their standard deviation.
Parameters
----------
trajs : np.array
Array of length N x T or just T containing the ensemble or single trajectory to normalize.
'''
# Checking and saving initial shape
initial_shape = trajs.shape
if len(trajs.shape) == 1: # single one d trajectory
trajs = trajs.reshape(1, trajs.shape[0], 1)
if len(trajs.shape) == 2: # ensemble of one d trajectories
trajs = trajs.reshape(trajs.shape[0], trajs.shape[1], 1)
trajs = trajs - trajs.mean(axis=1, keepdims=True)
displacements = (trajs[:,1:,:] - trajs[:,:-1,:]).copy()
variance = np.std(displacements, axis=1)
variance[variance == 0] = 1
new_trajs = np.cumsum((displacements/np.expand_dims(variance, axis = 1)), axis = 1)
initial_zeros = np.expand_dims(np.zeros((new_trajs.shape[0], new_trajs.shape[-1])), axis = 1)
return np.concatenate((initial_zeros, new_trajs), axis = 1).reshape(initial_shape)
# %% ../source_nbs/lib_nbs/utils_trajectories.ipynb 18
def normalize_fGN(disp, alpha, D, T: int, deltaT : int = 1):
'''
Normalizes fractional Gaussian Noise created with `stochastic` library.
Parameters
----------
disp : Array-like of shape N x T or just T containing the displacements to normalize.
alpha : float in [0,2] or array-like of length N x 1
Anomalous exponent
D : float or array-like of shape N x 1
Diffusion coefficient
T : int
Number of timesteps the displacements were generated with
deltaT : int, optional
Sampling time
Returns
-------
Array-like containing T displacements of given parameters
'''
return disp*np.sqrt(T)**(alpha)*np.sqrt(2*D*deltaT)
# %% ../source_nbs/lib_nbs/utils_trajectories.ipynb 22
import math
class trigo():
'''
This class gathers multiple useful trigonometric relations.
Inspired from:
https://stackoverflow.com/questions/30844482/what-is-most-efficient-way-to-find-the-intersection-of-a-line-and-a-circle-in-py
and http://mathworld.wolfram.com/Circle-LineIntersection.html'''
def circle_line_segment_intersection(circle_center, circle_radius,
pt1, pt2,
full_line=False, tangent_tol=1e-9):
"""
Find the points at which a circle intersects a line-segment. This can happen at 0, 1, or 2 points.
Parameters
----------
circle_center : tuple
The (x, y) location of the circle center
circle_radius : float
The radius of the circle
pt1 : tuple
The (x, y) location of the first point of the segment
pt2 : tuple
The (x, y) location of the second point of the segment
full_line : bool
True to find intersections along full line - not just in the segment.
False will just return intersections within the segment.
tangent_tol : float
Numerical tolerance at which we decide the intersections are close enough to consider it a tangent
Returns
-------
Sequence[Tuple[float, float]]
A list of length 0, 1, or 2, where each element is a point at which the circle intercepts a line segment.
"""
(p1x, p1y), (p2x, p2y), (cx, cy) = pt1, pt2, circle_center
(x1, y1), (x2, y2) = (p1x - cx, p1y - cy), (p2x - cx, p2y - cy)
dx, dy = (x2 - x1), (y2 - y1)
dr = (dx ** 2 + dy ** 2)**.5
big_d = x1 * y2 - x2 * y1
discriminant = circle_radius ** 2 * dr ** 2 - big_d ** 2
if discriminant < 0: # No intersection between circle and line
return []
else: # There may be 0, 1, or 2 intersections with the segment
intersections = [
(cx + (big_d * dy + sign * (-1 if dy < 0 else 1) * dx * discriminant**.5) / dr ** 2,
cy + (-big_d * dx + sign * abs(dy) * discriminant**.5) / dr ** 2)
for sign in ((1, -1) if dy < 0 else (-1, 1))] # This makes sure the order along the segment is correct
if not full_line: # If only considering the segment, filter out intersections that do not fall within the segment
fraction_along_segment = [(xi - p1x) / dx if abs(dx) > abs(dy) else (yi - p1y) / dy for xi, yi in intersections]
intersections = [pt for pt, frac in zip(intersections, fraction_along_segment) if 0 <= frac <= 1]
if len(intersections) == 2 and abs(discriminant) <= tangent_tol: # If line is tangent to circle, return just one point (as both intersections have same location)
return [intersections[0]]
else:
return intersections
def seg_to_vec(seg):
''' Find the vector given a segment created by two 2D points'''
return [(seg[0][0]-seg[1][0]), (seg[0][1]-seg[1][1])]
def ang_line(lineA, lineB):
''' Calculates the angle between two lines/segments'''
# Get vector form
vA = trigo.seg_to_vec(lineA)
vB = trigo.seg_to_vec(lineB)
return trigo.ang_vec(vA, vB)
def ang_vec(vA, vB):
''' Calculates the angle between two vectors'''
# Get dot prod
dot_prod = np.dot(vA, vB)
# Get magnitudes
magA = np.dot(vA, vA)**0.5
magB = np.dot(vB, vB)**0.5
# Get cosine value
cos_ = dot_prod/magA/magB
# Get angle in radians and then convert to degrees
return math.acos(dot_prod/magB/magA)
def rotate_vec(vec, angle):
return (vec[0]*np.cos(angle) + vec[1]*np.sin(angle), -vec[0]*np.sin(angle) + vec[1]*np.cos(angle))
# %% ../source_nbs/lib_nbs/utils_trajectories.ipynb 25
def find_nan_segments(a, cutoff_length):
''' Extract all segments of nans bigger than the set cutoff_length. If no segments are found, returns None.
For each segments, returns the begining and end index of it.
Output: array of size (number of segments) x 2.
'''
mask = np.concatenate(([False],np.isnan(a),[False]))
if ~mask.any():
return None
else:
idx = np.nonzero(mask[1:] != mask[:-1])[0]
seg_length = (idx[1::2] - idx[::2])
idx_seg_long = np.argwhere(seg_length >= cutoff_length).flatten()
if idx_seg_long.shape[0] == 0:
return None
else:
return np.array([idx[::2][idx_seg_long], idx[1::2][idx_seg_long]]).transpose()
# %% ../source_nbs/lib_nbs/utils_trajectories.ipynb 26
def segs_inside_fov(traj, fov_origin, fov_length, cutoff_length):
'''
Given a trajectory, finds the segments inside the field of view (FOV).
Parameters
----------
traj : array
Set of trajectories of size N x T (N: number trajectories, T: length).
fov_origin : tuple
Bottom right point of the square defining the FOV.
fov_length : float
Size of the box defining the FOV.
cutoff_length : float
Minimum length of a trajectory inside the FOV to be considered in the output dataset.
Returns
-------
array
Set of segments inside the FOV.
'''
import warnings
warnings.filterwarnings('ignore') # nanmin gives an undesired warning..
fov_min_x, fov_min_y = fov_origin
fov_max_x, fov_max_y = np.array(fov_origin)+fov_length
# extract components
x, y = traj[:, 0].copy(), traj[:, 1].copy()
# Interior points to the FOV are set to nans
x[np.argwhere((x > fov_min_x) & (x < fov_max_x))] = np.nan
y[np.argwhere((y > fov_min_y) & (y < fov_max_y))] = np.nan
# Compare the minimums of each array. This way, if at least one dimension
# is outside (i.e. not nan), the whole segment will be considered outside
merge_dims = np.nanmin(np.vstack((x, y)), axis = 0)
# Find nan segments bigger than cutoff_length
nan_segms = find_nan_segments(merge_dims, cutoff_length = cutoff_length)
return nan_segms
# %% ../source_nbs/lib_nbs/utils_trajectories.ipynb 27
def inside_fov_dataset(trajs, labels,
fov_origin, fov_length,
cutoff_length = 10,
func_labels = None,
return_frames = False):
''' Given a dataset of trajectories with labels and a FOV parameters, returns a list of
trajectories with the corresponding labels inside the FOV
Parameters
----------
trajs : array
Set of trajectories with shape T x N x 2.
labels : array
Set of labels with shape T x N x 2.
fov_origin : tuple
Bottom left point of the square defining the FOV.
fov_length : float
Size of the box defining the FOV.
cutoff_length : float
Minimum length of a trajectory inside the FOV to be considered in the output dataset.
func_labels : func
(optional) Function to be applied to the labels to take advantage of the loop.
Returns
-------
tuple
- trajs_fov (list): list 2D arrays containing the trajectories inside the field of view.
- labels_fov (list): corresponding labels of the trajectories.
'''
trajs_fov, labels_fov = [], []
frames = np.arange(trajs.shape[0])
for idx, (traj, label) in enumerate(zip(trajs[:, :, :].transpose(1,0,2),
labels[:, :, :].transpose(1,0,2))):
nan_segms = segs_inside_fov(traj, fov_origin, fov_length, cutoff_length)
if nan_segms is not None:
for idx_nan in nan_segms:
traj_x = traj[idx_nan[0]:idx_nan[1], 0]
traj_y = traj[idx_nan[0]:idx_nan[1], 1]
if return_frames:
frames_cut = frames[idx_nan[0]:idx_nan[1]]
trajs_fov.append(np.vstack((frames_cut, traj_x, traj_y)))
else:
trajs_fov.append(np.vstack((traj_x, traj_y)))
lab_list = []
for idx_lab in range(label.shape[-1]):
if func_labels is not None: # If feeded, apply func_label (mostly for smoothing)
lab = func_labels(label[idx_nan[0]:idx_nan[1], idx_lab])
else:
lab = label[idx_nan[0]:idx_nan[1], idx_lab]
lab_list.append(lab)
labels_fov.append(np.vstack(lab_list))
return trajs_fov, labels_fov
# %% ../source_nbs/lib_nbs/utils_trajectories.ipynb 33
import matplotlib.pyplot as plt
def plot_trajs(trajs, L , N,
num_to_plot = 3,
labels = None,
plot_labels = False,
traps_positions = None,
comp_center = None, r_cercle = None
):
if plot_labels:
fig, axs = plt.subplots(3, num_to_plot, figsize = (num_to_plot*3, 3*3), tight_layout = True)
else:
fig, axs = plt.subplots(2, num_to_plot, figsize = (num_to_plot*3, 2*3), tight_layout = True)
for ax in axs.transpose():
if traps_positions is not None:
ax[0].scatter(traps_positions[:,0], traps_positions[:,1], c = 'C1')
if comp_center is not None:
for c in comp_center:
circle = plt.Circle((c[0], c[1]), r_cercle, facecolor = 'None', edgecolor = 'C1', zorder = 10)
ax[0].add_patch(circle)
part = np.random.randint(N)
ax[0].set_title(f'Particle # {part}')
ax[0].plot(trajs[:, part, 0], trajs[:, part, 1], alpha = 0.8)
ax[0].axhline(L, ls = '--', alpha = 0.3, c = 'k', label = 'Boundary')
ax[0].axhline(0, ls = '--', alpha = 0.3, c = 'k')
ax[0].axvline(L, ls = '--', alpha = 0.3, c = 'k')
ax[0].axvline(0, ls = '--', alpha = 0.3, c = 'k')
ax[1].plot(trajs[:, part, 0], 'o-', label = 'X', ms = 3, lw = 0.1)
ax[1].plot(trajs[:, part, 1], 'o-', label = 'Y', ms = 3, lw = 0.1)
ax[1].axhline(L, ls = '--', alpha = 0.3, c = 'k')
ax[1].axhline(0, ls = '--', alpha = 0.3, c = 'k')
if plot_labels:
ax[2].plot(labels[:, part, 0], 'o-', label = r'$\alpha$', ms = 3, lw = 0.1)
ax[2].plot(labels[:, part, 1], 'o-', label = r'$D$', ms = 3, lw = 0.1)
plt.setp(axs[0, :], xlabel = 'X', ylabel = 'Y')
axs[0, 0].legend()
axs[1, 0].legend()
plt.setp(axs[1, 0], ylabel = 'Position')
plt.setp(axs[1:-1, :], xticklabels = '')
if plot_labels:
axs[2, 0].legend()
plt.setp(axs[2, 0], ylabel = 'Labels')
plt.setp(axs[2, :], xlabel = 'Time');
else:
plt.setp(axs[1, :], xlabel = 'Time');
| 17,485 | 39.01373 | 174 |
py
|
inFairness
|
inFairness-main/setup.py
|
from setuptools import setup, find_packages
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="inFairness",
packages=[
"inFairness",
*["inFairness." + p for p in find_packages(where="./inFairness")],
],
package_dir={"": ".",},
install_requires=[
"numpy>=1.21.6",
"pandas>=1.3.5",
"POT>=0.8.0",
"scikit-learn>=0.24.2",
"scipy>=1.5.4",
"torch>=1.13.0"
],
description="inFairness is a Python package to train and audit individually fair PyTorch models",
long_description=long_description,
long_description_content_type="text/markdown",
version="0.2.3",
url="https://github.com/IBM/inFairness",
author="IBM Research",
author_email="[email protected], [email protected], [email protected], [email protected]",
keywords=[
"individual fairness",
"ai fairness",
"trustworthy ai",
"machine learning",
],
python_requires=">=3.7",
)
| 1,031 | 27.666667 | 113 |
py
|
inFairness
|
inFairness-main/examples/postprocess-sentiment-analysis/data.py
|
import torch
import re
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
sns.set_context(rc={'figure.figsize': (9, 9)}, font_scale=2.)
TOKEN_RE = re.compile(r"\w.*?\b")
def load_embeddings(filename):
"""
Load a DataFrame from the generalized text format used by word2vec, GloVe,
fastText, and ConceptNet Numberbatch. The main point where they differ is
whether there is an initial line with the dimensions of the matrix.
"""
labels = []
rows = []
with open(filename, encoding='utf-8') as infile:
for i, line in enumerate(infile):
items = line.rstrip().split(' ')
if len(items) == 2:
# This is a header row giving the shape of the matrix
continue
labels.append(items[0])
values = np.array([float(x) for x in items[1:]], 'f')
rows.append(values)
arr = np.vstack(rows)
return pd.DataFrame(arr, index=labels, dtype='f')
def load_lexicon(filepath):
"""
load a file from Bing Liu's sentiment lexicon containing
English words in Latin-1 encoding
One file contains a list of positive words, and the other
contains a list of negative words. The files contain comment
lines starting with ';' and blank lines, which should be skipped
"""
lexicon = []
with open(filepath, encoding='latin-1') as infile:
for line in infile:
line = line.rstrip()
if line and not line.startswith(';'):
lexicon.append(line)
return lexicon
def load_data(data_path, embeddings_path, state=0):
pos_words = load_lexicon(data_path + '/positive-words.txt')
neg_words = load_lexicon(data_path + '/negative-words.txt')
embeddings = load_embeddings(embeddings_path)
# filter words that do not appear in the embedding index
pos_words = [word for word in pos_words if word in embeddings.index]
neg_words = [word for word in neg_words if word in embeddings.index]
pos_vectors = embeddings.loc[pos_words].dropna()
neg_vectors = embeddings.loc[neg_words].dropna()
vectors = pd.concat([pos_vectors, neg_vectors])
targets = np.array([1 for entry in pos_vectors.index] + [-1 for entry in neg_vectors.index])
labels = list(pos_vectors.index) + list(neg_vectors.index)
train_vectors, test_vectors, train_targets, test_targets, train_vocab, test_vocab = \
train_test_split(vectors, targets, labels, test_size=0.1, random_state=state)
## Data
X_train = train_vectors.values
X_test = test_vectors.values
y_train = train_targets
y_train[y_train == -1] = 0
y_test = test_targets
y_test[y_test == -1] = 0
return embeddings, X_train, X_test, y_train, y_test, train_vocab, test_vocab
def load_test_names(embeddings):
NAMES_BY_ETHNICITY = {
# The first two lists are from the Caliskan et al. appendix describing the
# Word Embedding Association Test.
'White': [
'Adam', 'Chip', 'Harry', 'Josh', 'Roger', 'Alan', 'Frank', 'Ian', 'Justin',
'Ryan', 'Andrew', 'Fred', 'Jack', 'Matthew', 'Stephen', 'Brad', 'Greg', 'Jed',
'Paul', 'Todd', 'Brandon', 'Hank', 'Jonathan', 'Peter', 'Wilbur', 'Amanda',
'Courtney', 'Heather', 'Melanie', 'Sara', 'Amber', 'Crystal', 'Katie',
'Meredith', 'Shannon', 'Betsy', 'Donna', 'Kristin', 'Nancy', 'Stephanie',
'Bobbie-Sue', 'Ellen', 'Lauren', 'Peggy', 'Sue-Ellen', 'Colleen', 'Emily',
'Megan', 'Rachel', 'Wendy'
],
'Black': [
'Alonzo', 'Jamel', 'Lerone', 'Percell', 'Theo', 'Alphonse', 'Jerome',
'Leroy', 'Rasaan', 'Torrance', 'Darnell', 'Lamar', 'Lionel', 'Rashaun',
'Tyree', 'Deion', 'Lamont', 'Malik', 'Terrence', 'Tyrone', 'Everol',
'Lavon', 'Marcellus', 'Terryl', 'Wardell', 'Aiesha', 'Lashelle', 'Nichelle',
'Shereen', 'Temeka', 'Ebony', 'Latisha', 'Shaniqua', 'Tameisha', 'Teretha',
'Jasmine', 'Latonya', 'Shanise', 'Tanisha', 'Tia', 'Lakisha', 'Latoya',
'Sharise', 'Tashika', 'Yolanda', 'Lashandra', 'Malika', 'Shavonn',
'Tawanda', 'Yvette'
]
}
NAMES_BY_ETHNICITY['White'] = [n.lower() for n in NAMES_BY_ETHNICITY['White'] if n.lower() in embeddings.index]
NAMES_BY_ETHNICITY['Black'] = [n.lower() for n in NAMES_BY_ETHNICITY['Black'] if n.lower() in embeddings.index]
white_female_start = NAMES_BY_ETHNICITY['White'].index('amanda')
black_female_start = NAMES_BY_ETHNICITY['Black'].index('aiesha')
test_gender = white_female_start*['Male'] + (len(NAMES_BY_ETHNICITY['White']) - white_female_start)*['Female']
test_gender += black_female_start*['Male'] + (len(NAMES_BY_ETHNICITY['Black']) - black_female_start)*['Female']
test_df = pd.DataFrame({'name':NAMES_BY_ETHNICITY['White'] + NAMES_BY_ETHNICITY['Black'],
'race':len(NAMES_BY_ETHNICITY['White'])*['White'] + len(NAMES_BY_ETHNICITY['Black'])*['Black'],
'gender':test_gender})
test_names_embed = embeddings.loc[test_df['name']].values
return test_df, test_names_embed
def load_nyc_names(names_path, embeddings):
names_df = pd.read_csv(names_path)
ethnicity_fixed = []
for n in names_df['Ethnicity']:
if n.startswith('BLACK'):
ethnicity_fixed.append('Black')
if n.startswith('WHITE'):
ethnicity_fixed.append('White')
if n.startswith('ASIAN'):
ethnicity_fixed.append('Asian')
if n.startswith('HISPANIC'):
ethnicity_fixed.append('Hispanic')
names_df['Ethnicity'] = ethnicity_fixed
names_df = names_df[np.logical_or(names_df['Ethnicity']=='Black', names_df['Ethnicity']=='White')]
names_df['Child\'s First Name'] = [n.lower() for n in names_df['Child\'s First Name']]
names_from_df = names_df['Child\'s First Name'].values.tolist()
idx_keep = []
for i, n in enumerate(names_from_df):
if n in embeddings.index:
idx_keep.append(i)
names_df = names_df.iloc[idx_keep]
names_from_df = names_df['Child\'s First Name'].values.tolist()
names_embed = embeddings.loc[names_from_df].values
return names_embed
def print_summary(test_df, method_name, test_accuracy):
print(method_name + ' test accuracy %f' % test_accuracy)
mean_sentiments_race = []
for r in ['Black', 'White']:
mean_sent = test_df[method_name + '_logits'][test_df['race']==r].mean()
mean_sentiments_race.append(mean_sent)
print(method_name + ' %s mean sentiment is %f' %(r, mean_sent))
print(method_name + ' race mean sentiment difference is %f\n' % np.abs(mean_sentiments_race[0] - mean_sentiments_race[1]))
mean_sentiments_gender = []
for g in ['Female', 'Male']:
mean_sent = test_df[method_name + '_logits'][test_df['gender']==g].mean()
mean_sentiments_gender.append(mean_sent)
print(method_name + ' %s mean sentiment is %f' %(g, mean_sent))
print(method_name + ' gender mean sentiment difference is %f\n' % np.abs(mean_sentiments_gender[0] - mean_sentiments_gender[1]))
fig, axs = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(6*2, 6))
sns.boxplot(x='race', y=method_name + '_logits', data=test_df, ax=axs[0]).set_title(method_name, fontsize=20)
sns.boxplot(x='gender', y=method_name + '_logits', data=test_df, ax=axs[1]).set_title(method_name, fontsize=20)
axs[0].set_ylim([-0.1, 1.1])
axs[0].set_xlabel('Race', size=18)
axs[0].set_ylabel('Sentiment', size=18, labelpad=-5)
axs[1].set_ylim([-0.1, 1.1])
axs[1].set_xlabel('Gender', size=18)
axs[1].set_ylabel('Sentiment', size=18, labelpad=-5)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show()
return
def embed_sentence(text, embedding):
tokens = [token.casefold() for token in TOKEN_RE.findall(text)]
with torch.no_grad():
sentence_embeddings = []
for token in tokens:
vec = embedding.loc[token].dropna()
sentence_embeddings.append(torch.Tensor(vec).view(1, -1))
sentence_embeddings = torch.cat(sentence_embeddings, dim=0).mean(dim=0, keepdim=True)
return sentence_embeddings
def text_to_sentiment(text, network, embedding):
tokens = [token.casefold() for token in TOKEN_RE.findall(text)]
with torch.no_grad():
sentence_embeddings = []
for token in tokens:
vec = embedding.loc[token].dropna()
sentence_embeddings.append(torch.Tensor(vec).view(1, -1))
sentence_embeddings = torch.cat(sentence_embeddings, dim=0).mean(dim=0, keepdim=True)
sentiment = network(sentence_embeddings)
sentiment = torch.nn.functional.softmax(sentiment.mean(dim=0, keepdim=True), dim=-1)
mean_sentiment = sentiment.data.numpy()[0]
return mean_sentiment
def format_sentiment_score(score):
if score[0] > score[1]:
return 'Negative with score ' + '{:.2f}%'.format(score[1]*100)
elif score[1] > score[0]:
return 'Positive with score ' + '{:.2f}%'.format(score[1]*100)
return 'Neutral with score ' + '{:.2f}%'.format(score[1]*100)
def get_positive_negative_sents(template=None):
if template is None:
template= "This movie is "
pos_words = "remarkable magnificent wondrous amazing astounding incredible stunning astonishing awe-inspiring breathtaking grand majestic spectacular splendid stupendous tremendous wonderful extraordinary impressive jaw-dropping marvellousUK mind-blowing overwhelming staggering striking beautiful brilliant eye-opening eye-popping fabulous glorious humbling imposing intense marvelousUS mind-boggling phenomenal startling stupefying amazeballs confounding dramatic miraculous monumental moving out of this world portentous prodigious sublime unbelievable something else surprising awful sensational fantastic fab great terrific unreal utmost exceptional unusual preternatural stellar heavy outstanding bad fantabulous flabbergasting exciting fascinating out-of-this-world embarrassing state-of-the-art mortifying superb shaming discomfiting awe-striking sobering dazzling super chastening uncommon inspiring inspirational notable noteworthy overcoming thrilling all that and a bag of chips stirring formidable magical excellent enthralling fantastical theatrical exhilarating superior gee-whizz royal dynamite fat large smashing considerable radical titantic surpassing belief too much first-rate heart-stopping first-class"
pos_words = pos_words.split(" ")
neg_words = "ordinary boring mediocre unremarkable lackluster mundane plain unimpressive uninteresting vapid average drab dreary dull insipid mind-numbing monotonous run-of-the-mill standard tame trite trivial unamazing unexceptional unexciting uninspired uninspiring vanilla aweless common commonplace customary flat humdrum lifeless normal prosaic routine simple stale typical unmemorable unnoteworthy usual wearisome everyday indifferent pedestrian undistinguished regular traditional familiar conventional household insignificant unpretentious generic characterless bland stereotypical uneventful unstimulating discreet inconspicuous habitual minor predictable quotidian wonted workaday unimportant inferior modest fixed general stock mainstream fair nondescript humble stereotyped cut-and-dry cut-and-dried not special banal day-to-day garden variety OK tedious unmoving tiresome staid quiet discouraging depressing upsetting"
neg_words = neg_words.split(" ")
pos_sentences = [template + word for word in pos_words]
neg_sentences = [template + word for word in neg_words]
return pos_sentences, neg_sentences
| 11,968 | 44.858238 | 1,229 |
py
|
inFairness
|
inFairness-main/examples/fair-ranking-synthetic-data/trainer.py
|
class Trainer(object):
"""Main trainer class that orchestrates the entire learning routine
Use this class to start training a model using individual fairness routines
Args:
dataloader (torch.util.data.DataLoader): training data loader
model (inFairness.fairalgo): Individual fairness algorithm
optimizer (torch.optim): Model optimizer
max_iterations (int): Number of training steps
"""
def __init__(self, dataloader, model, optimizer, max_iterations, print_loss_period=0):
self.dataloader = dataloader
self.model = model
self.optimizer = optimizer
self.max_iterations = max_iterations
self._dataloader_iter = iter(self.dataloader)
self.print_loss_period = print_loss_period
def run_step(self):
try:
data = next(self._dataloader_iter)
except StopIteration:
self._dataloader_iter = iter(self.dataloader)
data = next(self._dataloader_iter)
if isinstance(data, list) or isinstance(data, tuple):
model_output = self.model(*data)
elif isinstance(data, dict):
model_output = self.model(**data)
else:
raise AttributeError(
"Data format not recognized. Only `list`, `tuple`, and `dict` are recognized."
)
if self.print_loss_period:
if self.step_count % self.print_loss_period == 0:
print(f'loss {self.step_count}', model_output.loss)
self.optimizer.zero_grad()
model_output.loss.backward()
self.optimizer.step()
def train(self):
self.model.train(True)
for self.step_count in range(self.max_iterations):
self.run_step()
| 1,760 | 31.018182 | 94 |
py
|
inFairness
|
inFairness-main/examples/adult-income-prediction/data.py
|
import os
import requests
import pandas as pd
import numpy as np
import torch
from sklearn.preprocessing import StandardScaler
from sklearn.utils.random import sample_without_replacement
def _download_data_(rootdir=None):
URLS = {
'train': 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',
'test': 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test'
}
dirpaths = {}
if rootdir is None:
rootdir = "./dataset"
os.makedirs(rootdir, exist_ok=True)
for fname, url in URLS.items():
fout = os.path.join(rootdir, f'{fname}.csv')
r = requests.get(url)
with open(fout, 'w') as f:
f.write(r.content.decode('utf-8'))
dirpaths[fname] = fout
return dirpaths
def _read_data_(fpath, train_or_test):
names = [
'age', 'workclass', 'fnlwgt', 'education',
'education-num', 'marital-status', 'occupation',
'relationship', 'race', 'sex', 'capital-gain',
'capital-loss', 'hours-per-week', 'native-country',
'annual-income'
]
if train_or_test == 'train':
data = pd.read_csv(
fpath, sep=',', header=None, names=names,
na_values=['?'], skipinitialspace=True
)
elif train_or_test == 'test':
data = pd.read_csv(
fpath, sep=',', header=None, names=names,
na_values=['?'], skiprows=1, skipinitialspace=True
)
data['annual-income'] = data['annual-income'].str.rstrip('.')
data['annual-income'] = data['annual-income'].replace({'<=50K': 0, '>50K': 1})
return data
def load_data(rootdir=None):
# download data from UCI repository
dirpaths = _download_data_(rootdir=rootdir)
train_data = _read_data_(dirpaths['train'], 'train')
test_data = _read_data_(dirpaths['test'], 'test')
data = pd.concat([train_data, test_data], ignore_index=True)
# remove rows with NaNs
data.dropna(inplace=True)
categorical_vars = [
'workclass', 'marital-status', 'occupation',
'relationship', 'race', 'sex', 'native-country'
]
data = pd.get_dummies(data, columns=categorical_vars)
cols_to_drop = [
'race_Amer-Indian-Eskimo', 'race_Asian-Pac-Islander', 'race_Black',
'race_Other', 'sex_Female', 'native-country_Cambodia', 'native-country_Canada',
'native-country_China', 'native-country_Columbia', 'native-country_Cuba',
'native-country_Dominican-Republic', 'native-country_Ecuador',
'native-country_El-Salvador', 'native-country_England', 'native-country_France',
'native-country_Germany', 'native-country_Greece', 'native-country_Guatemala',
'native-country_Haiti', 'native-country_Holand-Netherlands', 'native-country_Honduras',
'native-country_Hong', 'native-country_Hungary', 'native-country_India', 'native-country_Iran',
'native-country_Ireland', 'native-country_Italy', 'native-country_Jamaica', 'native-country_Japan',
'native-country_Laos', 'native-country_Mexico', 'native-country_Nicaragua',
'native-country_Outlying-US(Guam-USVI-etc)', 'native-country_Peru', 'native-country_Philippines',
'native-country_Poland', 'native-country_Portugal', 'native-country_Puerto-Rico', 'native-country_Scotland',
'native-country_South', 'native-country_Taiwan', 'native-country_Thailand', 'native-country_Trinadad&Tobago',
'native-country_United-States', 'native-country_Vietnam', 'native-country_Yugoslavia',
'fnlwgt', 'education'
]
data.drop(cols_to_drop, axis=1, inplace=True)
# Split into train/test splits
train_data = data.sample(frac=0.8, random_state=123)
test_data = data.drop(train_data.index).reset_index(drop=True)
train_data = train_data.reset_index(drop=True)
# Standardize continuous columns
continuous_vars = [
'age', 'education-num', 'capital-gain',
'capital-loss', 'hours-per-week'
]
scaler = StandardScaler().fit(train_data[continuous_vars])
train_data[continuous_vars] = scaler.transform(train_data[continuous_vars])
test_data[continuous_vars] = scaler.transform(test_data[continuous_vars])
train_data = get_input_output_df(train_data)
test_data = get_input_output_df(test_data)
return train_data, test_data
def get_input_output_df(data):
cols = sorted(data.columns)
output_col = 'annual-income'
input_cols = [col for col in cols if col not in output_col]
df_X = data[input_cols]
df_Y = data[output_col]
return df_X, df_Y
def convert_df_to_tensor(data_X_df, data_Y_df):
data_X = torch.tensor(data_X_df.values).float()
data_Y = torch.tensor(data_Y_df.values)
return data_X, data_Y
def generate_pairs(len1, len2, n_pairs=100):
"""
vanilla sampler of random pairs (might sample same pair up to permutation)
n_pairs > len1*len2 should be satisfied
"""
idx = sample_without_replacement(len1*len2, n_pairs)
return np.vstack(np.unravel_index(idx, (len1, len2)))
def create_data_pairs(X_train, Y_train, Y_gender_train, n_comparable=10000, n_incomparable=10000):
y_gender_train_np = Y_gender_train.detach().numpy()
y_train_np = Y_train.detach().numpy()
X_train_np = X_train.detach().numpy()
# Create comparable pairs
comparable_X1 = None
comparable_X2 = None
K = 2
for i in range(K):
c0_idx = np.where((1*(y_gender_train_np==0) + (y_train_np==i))==2)[0]
c1_idx = np.where((1*(y_gender_train_np==1) + (y_train_np==i))==2)[0]
pairs_idx = generate_pairs(len(c0_idx), len(c1_idx), n_pairs=n_comparable // K)
if comparable_X1 is None:
comparable_X1 = X_train_np[c0_idx[pairs_idx[0]]]
comparable_X2 = X_train_np[c1_idx[pairs_idx[1]]]
else:
comparable_X1 = np.vstack((comparable_X1, X_train_np[c0_idx[pairs_idx[0]]]))
comparable_X2 = np.vstack((comparable_X2, X_train_np[c1_idx[pairs_idx[1]]]))
# Create incomparable pairs
c0_idx = np.where(y_train_np==0)[0]
c1_idx = np.where(y_train_np==1)[0]
pairs_idx = generate_pairs(len(c0_idx), len(c1_idx), n_pairs=n_incomparable)
incomparable_X1 = X_train_np[c0_idx[pairs_idx[0]]]
incomparable_X2 = X_train_np[c1_idx[pairs_idx[1]]]
# Join the two sets (comparable and incomparable) to create X and Y
X1 = np.vstack((comparable_X1, incomparable_X1))
X2 = np.vstack((comparable_X2, incomparable_X2))
Y_pairs = np.zeros(n_comparable + n_incomparable)
Y_pairs[:n_comparable] = 1
X1 = torch.from_numpy(X1)
X2 = torch.from_numpy(X2)
Y_pairs = torch.from_numpy(Y_pairs)
return X1, X2, Y_pairs
| 6,759 | 33.666667 | 118 |
py
|
inFairness
|
inFairness-main/examples/adult-income-prediction/metrics.py
|
import torch
import numpy as np
from sklearn.metrics import confusion_matrix
def accuracy(model, test_dl, device):
model.eval()
corr, total = 0, 0
for x, y in test_dl:
x, y = x.to(device), y.to(device)
y_pred = model(x)
_, y_pred = torch.max(y_pred, dim=1)
total += y.shape[0]
corr += torch.sum(y_pred == y)
score = corr / float(total)
return score
def balanced_accuracy(model, test_dl, device):
model.eval()
Y_gold, Y_predicted = [], []
for x, y in test_dl:
x, y = x.to(device), y.to(device)
y_pred = model(x)
_, y_pred = torch.max(y_pred, dim=-1)
y_pred = y_pred.squeeze().detach().cpu().tolist()
Y_predicted.extend(y_pred)
Y_gold.extend(y.detach().cpu().tolist())
conf_matrix = confusion_matrix(Y_gold, Y_predicted)
true_neg = conf_matrix[0][0]
false_neg = conf_matrix[1][0]
true_pos = conf_matrix[1][1]
false_pos = conf_matrix[0][1]
TPR = true_pos / float(true_pos + false_neg)
TNR = true_neg / float(true_neg + false_pos)
acc = 0.5 * (TPR + TNR)
return acc
def spouse_consistency(model, test_dl, test_dl_flipped, device):
model.eval()
predictions_original = []
for x, _ in test_dl:
x = x.to(device)
y_pred = model(x)
_, y_pred = torch.max(y_pred, dim=-1)
y_pred = y_pred.squeeze().detach().cpu().tolist()
predictions_original.extend(y_pred)
predictions_flipped = []
for x, _ in test_dl_flipped:
x = x.to(device)
y_pred = model(x)
_, y_pred = torch.max(y_pred, dim=-1)
y_pred = y_pred.squeeze().detach().cpu().tolist()
predictions_flipped.extend(y_pred)
predictions_original = np.array(predictions_original)
predictions_flipped = np.array(predictions_flipped)
score = np.mean(predictions_original == predictions_flipped)
return score
| 1,950 | 24.012821 | 64 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.