repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
MICO | MICO-main/training/train_purchase100.py | import os
import argparse
import warnings
import git
import csv
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchcsprng import create_mt19937_generator, create_random_device_generator
from torch.utils.data import DataLoader
from opacus import PrivacyEngine
from opacus.validators import ModuleValidator
from opacus.utils.batch_memory_manager import BatchMemoryManager
from prv_accountant.dpsgd import find_noise_multiplier
from accountant import PRVAccountant
from mico_competition import ChallengeDataset, MLP, load_purchase100
from tqdm import tqdm, trange
from datetime import datetime
from typing import Callable, Optional
def accuracy(preds: torch.Tensor, labels: torch.Tensor) -> float:
return (preds == labels).mean()
def train(args: argparse.Namespace,
model: nn.Module,
device: torch.device,
train_loader: DataLoader,
criterion,
optimizer: optim.Optimizer,
epoch: int,
compute_epsilon: Optional[Callable[[int], float]] = None):
model.train()
losses = []
top1_acc = []
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=args.max_physical_batch_size,
optimizer=optimizer
) as memory_safe_data_loader:
if args.disable_dp:
data_loader = train_loader
else:
data_loader = memory_safe_data_loader
# BatchSplittingSampler.__len__() approximates (badly) the length in physical batches
# See https://github.com/pytorch/opacus/issues/516
# We instead heuristically keep track of logical batches processed
pbar = tqdm(data_loader, desc="Batch", unit="batch", position=1, leave=True, total=len(train_loader), disable=None)
logical_batch_len = 0
for i, (inputs, target) in enumerate(data_loader):
inputs = inputs.to(device)
target = target.to(device)
logical_batch_len += len(target)
if logical_batch_len >= args.batch_size:
pbar.update(1)
logical_batch_len = logical_batch_len % args.max_physical_batch_size
optimizer.zero_grad()
output = model(inputs)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc = accuracy(preds, labels)
losses.append(loss.item())
top1_acc.append(acc)
loss.backward()
optimizer.step()
if (pbar.n + 1) % args.logging_steps == 0 or (pbar.n + 1) == pbar.total:
if not args.disable_dp:
epsilon = compute_epsilon(delta=args.target_delta)
pbar.set_postfix(
epoch=f"{epoch:02}",
train_loss=f"{np.mean(losses):.3f}",
accuracy=f"{np.mean(top1_acc) * 100:.3f}",
dp=f"(ε={epsilon:.2f}, δ={args.target_delta})"
)
else:
pbar.set_postfix(
epoch=f"{epoch:02}",
train_loss=f"{np.mean(losses):.3f}",
accuracy=f"{np.mean(top1_acc) * 100:.3f}",
dp="(ε = ∞, δ = 0)"
)
pbar.update(pbar.total - pbar.n)
def test(args: argparse.Namespace,
model: nn.Module,
device: torch.device,
test_loader: DataLoader,
criterion):
model.eval()
losses = []
top1_acc = []
with torch.no_grad():
for inputs, target in tqdm(test_loader, desc="Test ", unit="batch", disable=None):
inputs = inputs.to(device)
target = target.to(device)
output = model(inputs)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc = accuracy(preds, labels)
losses.append(loss.item())
top1_acc.append(acc)
top1_avg = np.mean(top1_acc)
loss_avg = np.mean(losses)
print(
f"Test Loss : {loss_avg:.6f}\n"
f"Test Accuracy: {top1_avg * 100:.6f}"
)
return np.mean(top1_acc)
def main(args: argparse.Namespace):
noise_generator = None
if not args.secure_mode and args.train_seed is not None:
# Following the advice on https://pytorch.org/docs/1.8.1/notes/randomness.html
if torch.cuda.is_available():
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"
torch.use_deterministic_algorithms(True)
torch.cuda.manual_seed(args.train_seed)
torch.cuda.manual_seed_all(args.train_seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
import random
random.seed(args.train_seed)
os.environ['PYTHONHASHSEED'] = str(args.train_seed)
# Required to get deterministic batches because Opacus uses secure_rng as a generator for
# train_loader when poisson_sampling = True even though secure_mode = False, which sets secure_rng = None
# https://github.com/pytorch/opacus/blob/5e632cdb8d497aade29e5555ad79921c239c78f7/opacus/privacy_engine.py#L206
torch.manual_seed(args.train_seed)
np.random.seed(args.train_seed)
noise_generator = create_mt19937_generator(args.train_seed)
if (args.seed_challenge is None or args.seed_training is None or args.seed_membership is None):
if args.split_seed is None:
seed_generator = create_random_device_generator()
else:
seed_generator = create_mt19937_generator(args.split_seed)
args.seed_challenge, args.seed_training, args.seed_membership = torch.empty(
3, dtype=torch.int64).random_(0, to=None, generator=seed_generator)
print("Using generated seeds\n"
f" seed_challenge = {args.seed_challenge}\n"
f" seed_training = {args.seed_training}\n"
f" seed_membership = {args.seed_membership}\n")
else:
print("Using specified seeds")
full_dataset = load_purchase100(dataset_dir=args.dataset_dir)
challenge_dataset = ChallengeDataset(
full_dataset,
len_challenge=args.len_challenge,
len_training=args.len_training,
seed_challenge=args.seed_challenge,
seed_training=args.seed_training,
seed_membership=args.seed_membership)
train_dataset = challenge_dataset.get_train_dataset()
test_dataset = challenge_dataset.get_eval_dataset()
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
num_workers=args.dataloader_num_workers,
pin_memory=True,
)
test_loader = DataLoader(
test_dataset,
batch_size=args.max_physical_batch_size,
num_workers=args.dataloader_num_workers
)
# Supress warnings
warnings.filterwarnings(action="ignore", module="opacus", message=".*Secure RNG turned off")
warnings.filterwarnings(action="ignore", module="torch", message=".*Using a non-full backward hook")
model = MLP()
assert ModuleValidator.is_valid(model)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
# Not the same as args.batch_size / len(train_dataset)
args.sample_rate = 1 / len(train_loader)
num_steps = int(len(train_loader) * args.num_epochs)
if not args.disable_dp:
args.noise_multiplier = find_noise_multiplier(
sampling_probability=args.sample_rate,
num_steps=num_steps,
target_epsilon=args.target_epsilon,
target_delta=args.target_delta,
eps_error=0.1
)
privacy_engine = PrivacyEngine(secure_mode=args.secure_mode)
# Override Opacus accountant
# Revise if https://github.com/pytorch/opacus/pull/493 is merged
privacy_engine.accountant = PRVAccountant(
noise_multiplier=args.noise_multiplier,
sample_rate=args.sample_rate,
max_steps=num_steps,
eps_error=0.1,
delta_error=1e-9)
model, optimizer, train_loader = privacy_engine.make_private(
module=model,
optimizer=optimizer,
data_loader=train_loader,
noise_multiplier=args.noise_multiplier,
max_grad_norm=args.max_grad_norm,
poisson_sampling=True,
noise_generator=noise_generator
)
print(f"Training using DP-SGD with {optimizer.original_optimizer.__class__.__name__} optimizer\n"
f" noise multiplier σ = {optimizer.noise_multiplier},\n"
f" clipping norm C = {optimizer.max_grad_norm:},\n"
f" average batch size L = {args.batch_size},\n"
f" sample rate = {args.sample_rate},\n"
f" for {args.num_epochs} epochs ({num_steps} steps)\n"
f" to target ε = {args.target_epsilon}, δ = {args.target_delta}")
compute_epsilon: Optional[Callable[[float], float]] = lambda delta: privacy_engine.get_epsilon(delta=delta)
else:
print(f"Training using SGD with {optimizer.__class__.__name__} optimizer\n"
f" batch size L = {args.batch_size},\n"
f" for {args.num_epochs} epochs ({num_steps} steps)")
compute_epsilon = None
# Must be initialized after attaching the privacy engine.
# See https://discuss.pytorch.org/t/how-to-use-lr-scheduler-in-opacus/111718
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_scheduler_step, gamma=args.lr_scheduler_gamma)
pbar = trange(args.num_epochs, desc="Epoch", unit="epoch", position=0, leave=True, disable=None)
for epoch in pbar:
pbar.set_postfix(lr=f"{scheduler.get_last_lr()}")
train(args, model, device, train_loader, criterion, optimizer, epoch + 1, compute_epsilon=compute_epsilon)
scheduler.step()
acc = test(args, model, device, test_loader, criterion)
with open(os.path.join(args.output_dir, "accuracy"), "w") as f:
print(f"{acc:.3f}", file=f)
if not args.disable_dp:
final_epsilon = compute_epsilon(args.target_delta)
print(f"The trained model is (ε = {final_epsilon}, δ = {args.target_delta})-DP")
with open(os.path.join(args.output_dir, "epsilon"), "w") as f:
print(f"{final_epsilon:.3f}", file=f)
with open(os.path.join(args.output_dir, "seed_challenge"), "w") as f:
print(f"{args.seed_challenge}", file=f)
with open(os.path.join(args.output_dir, "seed_training"), "w") as f:
print(f"{args.seed_training}", file=f)
with open(os.path.join(args.output_dir, "seed_membership"), "w") as f:
print(f"{args.seed_membership}", file=f)
with open(os.path.join(args.output_dir, "solution.csv"), "w") as f:
solution = challenge_dataset.get_solutions()
csv.writer(f).writerow(solution)
torch.save(model.state_dict(), os.path.join(args.output_dir, "model.pt"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_id", type=int, metavar='ID',
help="an identifier for the trained model")
# Seeds
parser.add_argument("--train_seed", type=int, metavar='TS',
help="seed for reproducibility")
parser.add_argument("--split_seed", type=int, metavar='SS',
help="seed to deterministically generate the 3 seeds for creating splits "
"(--seed_challenge, --seed_trainig, seed_membership)")
parser.add_argument("--seed_challenge", type=int, metavar='SC',
help="seed to select challenge examples")
parser.add_argument("--seed_training", type=int, metavar='ST',
help="seed to select non-challenge training examples")
parser.add_argument("--seed_membership", type=int, metavar='SM',
help="seed to split challenge examples into members/non-members")
# Split lengths
parser.add_argument("--len_training", type=int, metavar="N", required=True,
help="(required) number of examples used for training")
parser.add_argument("--len_challenge", type=int, metavar="m", required=True,
help="(required) number of member and non-member challenge examples "
"(i.e., m members and m non-members)")
# General
parser.add_argument("--secure_mode", action="store_true", default=False,
help="whether to use Opacus secure mode for training (default=True)")
parser.add_argument("--disable_dp", action="store_true", default=False,
help="whether to disable differentially private training altogether (default=False)")
parser.add_argument("--dataloader_num_workers", type=int, metavar='W', default=2,
help="number of workers for data loading. 0 means that the data will be loaded in the main process (default=2). "
"See torch.utils.data.DataLoader")
parser.add_argument("--logging_steps", type=int, metavar='k', default=10,
help="prints accuracy, loss, and privacy accounting information during training every k logical batches "
"(default=10)")
parser.add_argument("--dataset_dir", type=str, metavar="DATA", default=".",
help="root directory for cached dataset (default='.')")
parser.add_argument("--output_dir", type=str, metavar="OUT",
help="output directory. If none given, will pick one based on hyperparameters")
# Training hyperparameters
parser.add_argument("--target_epsilon", type=float, metavar="EPSILON",
help="target DP epsilon. Required unless specifying --disable_dp")
parser.add_argument("--target_delta", type=float, metavar="DELTA",
help="target DP delta. Will use 1/N if unspecified")
parser.add_argument("--batch_size", type=int, metavar="L",
help="expected logical batch size; determines the sample rate of DP-SGD. "
"Actual batch size varies because batches are constructed using Poisson sampling")
parser.add_argument("--max_physical_batch_size", type=int, metavar="B",
help="maximum physical batch size. Use to simulate logical batches larger than available memory and "
"to safeguard against unusually large batches produces by Poisson sampling. "
"See opacus.utils.batch_memory_manager.BatchMemoryManager")
parser.add_argument("--num_epochs", metavar='E', type=int, default=10,
help="number of training epochs (default=10)")
parser.add_argument("--max_grad_norm", type=float, metavar='C', default=1.0,
help="clipping norm for per-sample gradients in DP-SGD (default=1.0)")
parser.add_argument("--learning_rate", type=float, metavar="LR", default=1.0,
help="initial learning rate (default=1.0)")
parser.add_argument("--lr_scheduler_gamma", type=float, metavar="GAMMA", default=1.0,
help="gamma parameter for exponential learning rate scheduler")
parser.add_argument("--lr_scheduler_step", type=int, metavar="S", default=1,
help="step size for exponential learning rate scheduler")
args = parser.parse_args()
if args.len_training is None:
raise ValueError("Please specify --len_training")
if args.len_challenge is None:
raise ValueError("Please specify --len_challenge")
# Parameter validation
if args.secure_mode and args.train_seed is not None:
raise ValueError("Specify either secure mode or a seed for reproducibility, but not both")
if args.target_delta is None:
args.target_delta = 1 / args.len_training
if args.split_seed is not None and (args.seed_challenge is not None or args.seed_training is not None or args.seed_membership is not None):
raise ValueError("A --split_seed was given to generate seeds to construct splits but at least one explicit seed was specified. Bailing out.")
if args.output_dir is None:
now = datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
if args.disable_dp:
args.output_dir = f"{now}-nodp-lr{args.learning_rate}-gamma{args.lr_scheduler_gamma}-S{args.lr_scheduler_step}-L{args.batch_size}-" + \
f"E{args.num_epochs}"
else:
args.output_dir = f"{now}-eps{args.target_epsilon}-delta{args.target_delta}-lr{args.learning_rate}-" + \
f"gamma{args.lr_scheduler_gamma}-S{args.lr_scheduler_step}-L{args.batch_size}-E{args.num_epochs}-C{args.max_grad_norm}" + \
f"{'-secure' if args.secure_mode else ''}"
print(f"No --output_dir specified. Will use {args.output_dir}")
if args.model_id is not None:
args.output_dir = args.output_dir + f"_{args.model_id}"
os.makedirs(args.output_dir, exist_ok=True)
with open(os.path.join(args.output_dir, "arguments"), "w") as argfile:
try:
commit_hash = git.Repo(".", search_parent_directories=True).git.rev_parse("HEAD")
except git.exc.InvalidGitRepositoryError:
commit_hash = "unknown"
print(f"Commit hash: {commit_hash}")
print(f"# Commit hash: {commit_hash}", file=argfile)
for arg in vars(args):
print(f"--{arg} {getattr(args, arg)}")
print(f"--{arg} {getattr(args, arg)}", file=argfile)
main(args)
| 17,940 | 41.921053 | 149 | py |
MICO | MICO-main/training/accountant.py | from typing import List, Optional
from prv_accountant.dpsgd import DPSGDAccountant
from opacus.accountants.accountant import IAccountant
class PRVAccountant(IAccountant):
def __init__(self, noise_multiplier, sample_rate, max_steps, eps_error = 0.1, delta_error = 1e-9):
super().__init__()
self.noise_multiplier = noise_multiplier
self.sample_rate = sample_rate
self.max_steps = max_steps
self.eps_error = eps_error
self.delta_error = delta_error
self.accountant = DPSGDAccountant(
noise_multiplier=noise_multiplier,
sampling_probability=sample_rate,
max_steps=max_steps,
eps_error=eps_error,
delta_error=delta_error)
def step(self, *, noise_multiplier: float, sample_rate: float):
if not (noise_multiplier == self.noise_multiplier and sample_rate == self.sample_rate):
raise ValueError("Noise multplier and sample rate must be constant for DPSGDAccountant")
if len(self.history) > 0:
_, _, num_steps = self.history.pop()
self.history.append((noise_multiplier, sample_rate, num_steps + 1))
else:
self.history.append((noise_multiplier, sample_rate, 1))
def get_epsilon(self, delta: float, *, eps_error: float = 0.1, delta_error: float = 1e-9) -> float:
"""
Compute upper bound for epsilon
:param float delta: Target delta
:return: Returns upper bound for $\varepsilon$
:rtype: float
"""
if not (eps_error == self.eps_error and delta_error == self.delta_error):
raise ValueError("Attempted to override eps_error and delta_error which are fixed at initialization")
if len(self.history) == 0:
return 0
_, _, num_steps = self.history[-1]
_, _, eps = self.accountant.compute_epsilon(delta=delta, num_steps=num_steps)
return eps
@classmethod
def mechanism(cls) -> str:
return "PRV"
def __len__(self):
return len(self.history) | 2,074 | 37.425926 | 113 | py |
MICO | MICO-main/training/train_sst2.py | import numpy as np
import pandas as pd
import os
import torch
import sys
import csv
import yaml
import warnings
import datasets
from opacus import PrivacyEngine
from dp_transformers import TrainingArguments, PrivacyArguments, PrivacyEngineCallback
from prv_accountant.dpsgd import find_noise_multiplier, DPSGDAccountant
from torchcsprng import create_mt19937_generator, create_random_device_generator
from transformers import (
HfArgumentParser, AutoTokenizer, AutoModelForSequenceClassification,
Trainer, EvalPrediction, PreTrainedTokenizerBase
)
from dataclasses import dataclass
from pathlib import Path
from mico_competition import ChallengeDataset, load_sst2
from typing import Optional
@dataclass
class ModelArguments:
model_name: str
@dataclass
class DataArguments:
model_index: int
len_training: int = 67349
len_challenge: int = 100
seed_challenge: Optional[int] = None
seed_training: Optional[int] = None
seed_membership: Optional[int] = None
split_seed: Optional[int] = None
@dataclass
class SecurePrivacyArguments(PrivacyArguments):
delta: float = None
use_secure_prng: bool = False
@dataclass
class Arguments:
training: TrainingArguments
model: ModelArguments
privacy: SecurePrivacyArguments
data: DataArguments
def preprocess_text(D: datasets.DatasetDict, tokenizer: PreTrainedTokenizerBase,
max_sequence_length: int = None) -> datasets.DatasetDict:
processed_data = D.map(
lambda batch: tokenizer(batch["sentence"], padding="max_length", max_length=max_sequence_length),
batched=True
)
return processed_data.remove_columns(["sentence"])
def load_dataset() -> datasets.DatasetDict:
if (args.data.seed_challenge is None or args.data.seed_training is None or args.data.seed_membership is None):
if args.data.split_seed is None:
seed_generator = create_random_device_generator()
else:
seed_generator = create_mt19937_generator(args.split_seed)
args.data.seed_challenge, args.data.seed_training, args.data.seed_membership = torch.empty(
3, dtype=torch.int64).random_(0, to=None, generator=seed_generator)
print("Using generated seeds\n"
f" seed_challenge = {args.data.seed_challenge}\n"
f" seed_training = {args.data.seed_training}\n"
f" seed_membership = {args.data.seed_membership}\n")
else:
print("Using specified seeds")
full_dataset = load_sst2()
challenge_dataset = ChallengeDataset(
full_dataset,
len_challenge=args.data.len_challenge,
len_training=args.data.len_training,
seed_challenge=args.data.seed_challenge,
seed_training=args.data.seed_training,
seed_membership=args.data.seed_membership)
with open(os.path.join(args.training.output_dir, "challenge", "seed_challenge"), "w") as f:
print(f"{args.data.seed_challenge}", file=f)
with open(os.path.join(args.training.output_dir, "challenge", "seed_training"), "w") as f:
print(f"{args.data.seed_training}", file=f)
with open(os.path.join(args.training.output_dir, "challenge", "seed_membership"), "w") as f:
print(f"{args.data.seed_membership}", file=f)
with open(os.path.join(args.training.output_dir, "challenge", "solution.csv"), "w") as f:
solution = challenge_dataset.get_solutions()
csv.writer(f).writerow(solution)
ds_train = pd.DataFrame.from_records(challenge_dataset.get_train_dataset())
ds_test = pd.DataFrame.from_records(challenge_dataset.get_eval_dataset())
return datasets.DatasetDict({
"train": datasets.Dataset.from_pandas(ds_train),
"test": datasets.Dataset.from_pandas(ds_test)
}).remove_columns("idx")
def main(args: Arguments):
output_dir = Path(args.training.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
with open(os.path.join(args.training.output_dir, "arguments.yml"), "w") as f:
yaml.dump(args, f)
print(yaml.dump(args))
os.mkdir(output_dir/"challenge")
ds = load_dataset()
if args.privacy.use_secure_prng:
import torchcsprng as csprng
mt19937_gen = csprng.create_mt19937_generator()
ds['train'] = ds['train'].select(torch.randperm(len(ds['train']), generator=mt19937_gen).tolist())
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
warnings.filterwarnings(action="ignore", module="torch", message=".*Using a non-full backward hook")
model = AutoModelForSequenceClassification.from_pretrained(args.model.model_name, num_labels=2)
tokenizer = AutoTokenizer.from_pretrained(args.model.model_name)
ds = preprocess_text(ds, tokenizer=tokenizer, max_sequence_length=67)
model.train()
model = model.to(args.training.device)
if (not args.training.no_cuda) and (not torch.cuda.is_available()):
raise RuntimeError("CUDA is not available. Please use --no-cuda to run this script.")
callbacks = []
if not args.privacy.disable_dp:
sampling_probability = training_args.train_batch_size * training_args.gradient_accumulation_steps / len(ds["train"])
num_steps = int(np.ceil(1 / sampling_probability) * training_args.num_train_epochs)
noise_multiplier = find_noise_multiplier(
sampling_probability=sampling_probability, num_steps=num_steps, target_epsilon=args.privacy.target_epsilon,
target_delta=args.privacy.delta,
eps_error=0.1
)
engine = PrivacyEngine(
module=model,
batch_size=training_args.per_device_train_batch_size*training_args.gradient_accumulation_steps,
sample_size=len(ds['train']),
noise_multiplier=noise_multiplier,
max_grad_norm=args.privacy.per_sample_max_grad_norm,
secure_rng=args.privacy.use_secure_prng,
)
accountant = DPSGDAccountant(
noise_multiplier=noise_multiplier, sampling_probability=sampling_probability, max_steps=num_steps,
eps_error=0.2
)
privacy_callback = PrivacyEngineCallback(
engine,
compute_epsilon=lambda s: accountant.compute_epsilon(num_steps=s, delta=args.privacy.delta)[2]
)
callbacks.append(privacy_callback)
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.argmax(preds, axis=1)
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
trainer = Trainer(
args=training_args,
train_dataset=ds["train"],
eval_dataset=ds["test"],
model=model,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
callbacks=callbacks
)
try:
trainer.train()
finally:
trainer.save_model(output_dir/"challenge")
if args.privacy.disable_dp:
epsilon_final = float('inf')
else:
epsilon_final = accountant.compute_epsilon(num_steps=engine.steps, delta=args.privacy.delta)[2]
trainer.log({"epsilon_final": epsilon_final})
assert np.isclose(epsilon_final, args.privacy.target_epsilon, atol=0.2, rtol=0.0)
print("Training successful. Exiting...")
return 0
if __name__ == "__main__":
parser = HfArgumentParser((TrainingArguments, ModelArguments, SecurePrivacyArguments, DataArguments))
training_args, model_args, privacy_args, data_args = parser.parse_args_into_dataclasses()
args = Arguments(training=training_args, model=model_args, privacy=privacy_args, data=data_args)
sys.exit(main(args))
| 7,676 | 35.042254 | 124 | py |
MICO | MICO-main/training/train_cifar10.py | import os
import argparse
import warnings
import git
import csv
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchcsprng import create_mt19937_generator, create_random_device_generator
from torch.utils.data import DataLoader
from opacus import PrivacyEngine
from opacus.validators import ModuleValidator
from opacus.utils.batch_memory_manager import BatchMemoryManager
from prv_accountant.dpsgd import find_noise_multiplier
from accountant import PRVAccountant
from mico_competition import ChallengeDataset, CNN, load_cifar10
from tqdm import tqdm, trange
from datetime import datetime
from typing import Callable, Optional
def accuracy(preds: torch.Tensor, labels: torch.Tensor) -> float:
return (preds == labels).mean()
def train(args: argparse.Namespace,
model: nn.Module,
device: torch.device,
train_loader: DataLoader,
criterion,
optimizer: optim.Optimizer,
epoch: int,
compute_epsilon: Optional[Callable[[int], float]] = None):
model.train()
losses = []
top1_acc = []
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=args.max_physical_batch_size,
optimizer=optimizer
) as memory_safe_data_loader:
if args.disable_dp:
data_loader = train_loader
else:
data_loader = memory_safe_data_loader
# BatchSplittingSampler.__len__() approximates (badly) the length in physical batches
# See https://github.com/pytorch/opacus/issues/516
# We instead heuristically keep track of logical batches processed
pbar = tqdm(data_loader, desc="Batch", unit="batch", position=1, leave=True, total=len(train_loader), disable=None)
logical_batch_len = 0
for i, (inputs, target) in enumerate(data_loader):
inputs = inputs.to(device)
target = target.to(device)
logical_batch_len += len(target)
if logical_batch_len >= args.batch_size:
pbar.update(1)
logical_batch_len = logical_batch_len % args.max_physical_batch_size
optimizer.zero_grad()
output = model(inputs)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc = accuracy(preds, labels)
losses.append(loss.item())
top1_acc.append(acc)
loss.backward()
optimizer.step()
if (pbar.n + 1) % args.logging_steps == 0 or (pbar.n + 1) == pbar.total:
if not args.disable_dp:
epsilon = compute_epsilon(delta=args.target_delta)
pbar.set_postfix(
epoch=f"{epoch:02}",
train_loss=f"{np.mean(losses):.3f}",
accuracy=f"{np.mean(top1_acc) * 100:.3f}",
dp=f"(ε={epsilon:.2f}, δ={args.target_delta})"
)
else:
pbar.set_postfix(
epoch=f"{epoch:02}",
train_loss=f"{np.mean(losses):.3f}",
accuracy=f"{np.mean(top1_acc) * 100:.3f}",
dp="(ε = ∞, δ = 0)"
)
pbar.update(pbar.total - pbar.n)
def test(args: argparse.Namespace,
model: nn.Module,
device: torch.device,
test_loader: DataLoader,
criterion):
model.eval()
losses = []
top1_acc = []
with torch.no_grad():
for inputs, target in tqdm(test_loader, desc="Test ", unit="batch", disable=None):
inputs = inputs.to(device)
target = target.to(device)
output = model(inputs)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc = accuracy(preds, labels)
losses.append(loss.item())
top1_acc.append(acc)
top1_avg = np.mean(top1_acc)
loss_avg = np.mean(losses)
print(
f"Test Loss : {loss_avg:.6f}\n"
f"Test Accuracy: {top1_avg * 100:.6f}"
)
return np.mean(top1_acc)
def main(args: argparse.Namespace):
noise_generator = None
if not args.secure_mode and args.train_seed is not None:
# Following the advice on https://pytorch.org/docs/1.8.1/notes/randomness.html
if torch.cuda.is_available():
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"
torch.use_deterministic_algorithms(True)
torch.cuda.manual_seed(args.train_seed)
torch.cuda.manual_seed_all(args.train_seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
import random
random.seed(args.train_seed)
os.environ['PYTHONHASHSEED'] = str(args.train_seed)
# Required to get deterministic batches because Opacus uses secure_rng as a generator for
# train_loader when poisson_sampling = True even though secure_mode = False, which sets secure_rng = None
# https://github.com/pytorch/opacus/blob/5e632cdb8d497aade29e5555ad79921c239c78f7/opacus/privacy_engine.py#L206
torch.manual_seed(args.train_seed)
np.random.seed(args.train_seed)
noise_generator = create_mt19937_generator(args.train_seed)
if (args.seed_challenge is None or args.seed_training is None or args.seed_membership is None):
if args.split_seed is None:
seed_generator = create_random_device_generator()
else:
seed_generator = create_mt19937_generator(args.split_seed)
args.seed_challenge, args.seed_training, args.seed_membership = torch.empty(
3, dtype=torch.int64).random_(0, to=None, generator=seed_generator)
print("Using generated seeds\n"
f" seed_challenge = {args.seed_challenge}\n"
f" seed_training = {args.seed_training}\n"
f" seed_membership = {args.seed_membership}\n")
else:
print("Using specified seeds")
full_dataset = load_cifar10(dataset_dir=args.dataset_dir, download=False)
challenge_dataset = ChallengeDataset(
full_dataset,
len_challenge=args.len_challenge,
len_training=args.len_training,
seed_challenge=args.seed_challenge,
seed_training=args.seed_training,
seed_membership=args.seed_membership)
train_dataset = challenge_dataset.get_train_dataset()
test_dataset = challenge_dataset.get_eval_dataset()
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
num_workers=args.dataloader_num_workers,
pin_memory=True,
)
test_loader = DataLoader(
test_dataset,
batch_size=args.max_physical_batch_size,
num_workers=args.dataloader_num_workers
)
# Supress warnings
warnings.filterwarnings(action="ignore", module="opacus", message=".*Secure RNG turned off")
warnings.filterwarnings(action="ignore", module="torch", message=".*Using a non-full backward hook")
model = CNN()
assert ModuleValidator.is_valid(model)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0)
# Not the same as args.batch_size / len(train_dataset)
args.sample_rate = 1 / len(train_loader)
num_steps = int(len(train_loader) * args.num_epochs)
if not args.disable_dp:
args.noise_multiplier = find_noise_multiplier(
sampling_probability=args.sample_rate,
num_steps=num_steps,
target_epsilon=args.target_epsilon,
target_delta=args.target_delta,
eps_error=0.1
)
privacy_engine = PrivacyEngine(secure_mode=args.secure_mode)
# Override Opacus accountant
# Revise if https://github.com/pytorch/opacus/pull/493 is merged
privacy_engine.accountant = PRVAccountant(
noise_multiplier=args.noise_multiplier,
sample_rate=args.sample_rate,
max_steps=num_steps,
eps_error=0.1,
delta_error=1e-9)
model, optimizer, train_loader = privacy_engine.make_private(
module=model,
optimizer=optimizer,
data_loader=train_loader,
noise_multiplier=args.noise_multiplier,
max_grad_norm=args.max_grad_norm,
poisson_sampling=True,
noise_generator=noise_generator
)
print(f"Training using DP-SGD with {optimizer.original_optimizer.__class__.__name__} optimizer\n"
f" noise multiplier σ = {optimizer.noise_multiplier},\n"
f" clipping norm C = {optimizer.max_grad_norm:},\n"
f" average batch size L = {args.batch_size},\n"
f" sample rate = {args.sample_rate},\n"
f" for {args.num_epochs} epochs ({num_steps} steps)\n"
f" to target ε = {args.target_epsilon}, δ = {args.target_delta}")
compute_epsilon: Optional[Callable[[float], float]] = lambda delta: privacy_engine.get_epsilon(delta=delta)
else:
print(f"Training using SGD with {optimizer.__class__.__name__} optimizer\n"
f" batch size L = {args.batch_size},\n"
f" for {args.num_epochs} epochs ({num_steps} steps)")
compute_epsilon = None
# Must be initialized after attaching the privacy engine.
# See https://discuss.pytorch.org/t/how-to-use-lr-scheduler-in-opacus/111718
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_scheduler_step, gamma=args.lr_scheduler_gamma)
pbar = trange(args.num_epochs, desc="Epoch", unit="epoch", position=0, leave=True, disable=None)
for epoch in pbar:
pbar.set_postfix(lr=f"{scheduler.get_last_lr()}")
train(args, model, device, train_loader, criterion, optimizer, epoch + 1, compute_epsilon=compute_epsilon)
scheduler.step()
acc = test(args, model, device, test_loader, criterion)
with open(os.path.join(args.output_dir, "accuracy"), "w") as f:
print(f"{acc:.3f}", file=f)
if not args.disable_dp:
final_epsilon = compute_epsilon(args.target_delta)
print(f"The trained model is (ε = {final_epsilon}, δ = {args.target_delta})-DP")
with open(os.path.join(args.output_dir, "epsilon"), "w") as f:
print(f"{final_epsilon:.3f}", file=f)
with open(os.path.join(args.output_dir, "seed_challenge"), "w") as f:
print(f"{args.seed_challenge}", file=f)
with open(os.path.join(args.output_dir, "seed_training"), "w") as f:
print(f"{args.seed_training}", file=f)
with open(os.path.join(args.output_dir, "seed_membership"), "w") as f:
print(f"{args.seed_membership}", file=f)
with open(os.path.join(args.output_dir, "solution.csv"), "w") as f:
solution = challenge_dataset.get_solutions()
csv.writer(f).writerow(solution)
torch.save(model.state_dict(), os.path.join(args.output_dir, "model.pt"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_id", type=int, metavar='ID',
help="an identifier for the trained model")
# Seeds
parser.add_argument("--train_seed", type=int, metavar='TS',
help="seed for reproducibility")
parser.add_argument("--split_seed", type=int, metavar='SS',
help="seed to deterministically generate the 3 seeds for creating splits "
"(--seed_challenge, --seed_trainig, seed_membership)")
parser.add_argument("--seed_challenge", type=int, metavar='SC',
help="seed to select challenge examples")
parser.add_argument("--seed_training", type=int, metavar='ST',
help="seed to select non-challenge training examples")
parser.add_argument("--seed_membership", type=int, metavar='SM',
help="seed to split challenge examples into members/non-members")
# Split lengths
parser.add_argument("--len_training", type=int, metavar="N", required=True,
help="(required) number of examples used for training")
parser.add_argument("--len_challenge", type=int, metavar="m", required=True,
help="(required) number of member and non-member challenge examples "
"(i.e., m members and m non-members)")
# General
parser.add_argument("--secure_mode", action="store_true", default=False,
help="whether to use Opacus secure mode for training (default=True)")
parser.add_argument("--disable_dp", action="store_true", default=False,
help="whether to disable differentially private training altogether (default=False)")
parser.add_argument("--dataloader_num_workers", type=int, metavar='W', default=2,
help="number of workers for data loading. 0 means that the data will be loaded in the main process (default=2). "
"See torch.utils.data.DataLoader")
parser.add_argument("--logging_steps", type=int, metavar='k', default=10,
help="prints accuracy, loss, and privacy accounting information during training every k logical batches "
"(default=10)")
parser.add_argument("--dataset_dir", type=str, metavar="DATA", default=".",
help="root directory for cached dataset (default='.')")
parser.add_argument("--output_dir", type=str, metavar="OUT",
help="output directory. If none given, will pick one based on hyperparameters")
# Training hyperparameters
parser.add_argument("--target_epsilon", type=float, metavar="EPSILON",
help="target DP epsilon. Required unless specifying --disable_dp")
parser.add_argument("--target_delta", type=float, metavar="DELTA",
help="target DP delta. Will use 1/N if unspecified")
parser.add_argument("--batch_size", type=int, metavar="L",
help="expected logical batch size; determines the sample rate of DP-SGD. "
"Actual batch size varies because batches are constructed using Poisson sampling")
parser.add_argument("--max_physical_batch_size", type=int, metavar="B",
help="maximum physical batch size. Use to simulate logical batches larger than available memory and "
"to safeguard against unusually large batches produces by Poisson sampling. "
"See opacus.utils.batch_memory_manager.BatchMemoryManager")
parser.add_argument("--num_epochs", metavar='E', type=int, default=10,
help="number of training epochs (default=10)")
parser.add_argument("--max_grad_norm", type=float, metavar='C', default=1.0,
help="clipping norm for per-sample gradients in DP-SGD (default=1.0)")
parser.add_argument("--learning_rate", type=float, metavar="LR", default=1.0,
help="initial learning rate (default=1.0)")
parser.add_argument("--lr_scheduler_gamma", type=float, metavar="GAMMA", default=1.0,
help="gamma parameter for exponential learning rate scheduler")
parser.add_argument("--lr_scheduler_step", type=int, metavar="S", default=1,
help="step size for exponential learning rate scheduler")
args = parser.parse_args()
if args.len_training is None:
raise ValueError("Please specify --len_training")
if args.len_challenge is None:
raise ValueError("Please specify --len_challenge")
# Parameter validation
if args.secure_mode and args.train_seed is not None:
raise ValueError("Specify either secure mode or a seed for reproducibility, but not both")
if args.target_delta is None:
args.target_delta = 1 / args.len_training
if args.split_seed is not None and (args.seed_challenge is not None or args.seed_training is not None or args.seed_membership is not None):
raise ValueError("A --split_seed was given to generate seeds to construct splits but at least one explicit seed was specified. Bailing out.")
if args.output_dir is None:
now = datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
if args.disable_dp:
args.output_dir = f"{now}-nodp-lr{args.learning_rate}-gamma{args.lr_scheduler_gamma}-S{args.lr_scheduler_step}-L{args.batch_size}-" + \
f"E{args.num_epochs}"
else:
args.output_dir = f"{now}-eps{args.target_epsilon}-delta{args.target_delta}-lr{args.learning_rate}-" + \
f"gamma{args.lr_scheduler_gamma}-S{args.lr_scheduler_step}-L{args.batch_size}-E{args.num_epochs}-C{args.max_grad_norm}" + \
f"{'-secure' if args.secure_mode else ''}"
print(f"No --output_dir specified. Will use {args.output_dir}")
if args.model_id is not None:
args.output_dir = args.output_dir + f"_{args.model_id}"
os.makedirs(args.output_dir, exist_ok=True)
with open(os.path.join(args.output_dir, "arguments"), "w") as argfile:
try:
commit_hash = git.Repo(".", search_parent_directories=True).git.rev_parse("HEAD")
except git.exc.InvalidGitRepositoryError:
commit_hash = "unknown"
print(f"Commit hash: {commit_hash}")
print(f"# Commit hash: {commit_hash}", file=argfile)
for arg in vars(args):
print(f"--{arg} {getattr(args, arg)}")
print(f"--{arg} {getattr(args, arg)}", file=argfile)
main(args)
| 17,963 | 41.976077 | 149 | py |
MICO | MICO-main/src/mico-competition/mico.py | from __future__ import annotations
import os
import torch
import torch.nn as nn
from collections import OrderedDict
from typing import List, Optional, Union, Type, TypeVar
from torch.utils.data import Dataset, ConcatDataset, random_split
D = TypeVar("D", bound="ChallengeDataset")
LEN_CHALLENGE = 100
class ChallengeDataset:
"""Reconstructs the data splits associated with a model from stored seeds.
Given a `torch.utils.Dataset`, the desired length of the training dataset `n`,
and the desired number of members/non-member challenge examples `m`, it uses
`torch.utils.data.random_split` with the stored seeds to produce:
- `challenge` : `2m` challenge examples
- `nonmember` : `m` non-members challenge examples from `challenge`
- `member` : `m` member challenge examples, from `challenge`
- `training` : non-challenge examples to use for model training
- `evaluation`: non-challenge examples to use for model evaluation
Use `get_training_dataset` to construct the full training dataset
(the concatenation of `member` and `training`) to train a model.
Use `get_eval_dataset` to retrieve `evaluation`. Importantly, do not
attempt to use `nonmember` for model evaluation, as releasing the
evaluation results would leak membership information.
The diagram below details the process, where arrows denote calls to
`torch.utils.data.random_split` and `N = len(dataset)`:
┌────────────────────────────────────────────────────────────┐
│ dataset │
└──────────────────────────────┬─────────────────────────────┘
│N
seed_challenge │
┌────────────────────┴────────┐
│2m │N - 2m
▼ ▼
┌───────────────────┬────────────────────────────────────────┐
│ challenge │ rest │
└─────────┬─────────┴───────────────────┬────────────────────┘
│2m │N - 2m
seed_membership │ seed_training │
┌────┴────┐ ┌─────────┴────────┐
│m │m │n - m │N - n - m
▼ ▼ ▼ ▼
┌─────────┬─────────┬───────────────────┬────────────────────┐
│nonmember│ member │ training │ evaluation │
└─────────┴─────────┴───────────────────┴────────────────────┘
- Models are trained on `member + training` and evaluated on `evaluation`
- Standard scenarios disclose `challenge` (equivalently, `seed_challenge`)
- DP distinguisher scenarios also disclose `training` and `evaluation` (equivalently, `seed_training`)
- To disclose ground truth, disclose `nonmember` and `member` (equivalently, `seed_membership`)
"""
def __init__(self, dataset: Dataset, len_training: int, len_challenge: int,
seed_challenge: int, seed_training: Optional[int], seed_membership: Optional[int]) -> None:
"""Pseudorandomly select examples for `challenge`, `non-member`, `member`, `training`, and `evaluation`
splits from given seeds. Only the seed for `challenge` is mandatory.
Args:
dataset (Dataset): Dataset to select examples from.
len_training (int): Length of the training dataset.
len_challenge (int): Number of challenge examples (`len_challenge` members and `len_challenge` non-members).
seed_challenge (int): Seed to select challenge examples.
seed_training (Optional[int]): Seed to select non-challenge training examples.
seed_membership (Optional[int]): Seed to split challenge examples into members/non-members.
"""
from torchcsprng import create_mt19937_generator
challenge_gen = create_mt19937_generator(seed_challenge)
self.challenge, self.rest = random_split(
dataset,
[2 * len_challenge, len(dataset) - 2 * len_challenge],
generator = challenge_gen)
if seed_training is not None:
training_gen = create_mt19937_generator(seed_training)
self.training, self.evaluation = random_split(
self.rest,
[len_training - len_challenge, len(dataset) - len_training - len_challenge],
generator = training_gen)
if seed_membership is not None:
membership_gen = create_mt19937_generator(seed_membership)
self.nonmember, self.member = random_split(
self.challenge,
[len_challenge, len_challenge],
generator = membership_gen)
def get_challenges(self) -> Dataset:
"""Returns the challenge dataset.
Returns:
Dataset: The challenge examples.
"""
return self.challenge
def get_train_dataset(self) -> Dataset:
"""Returns the training dataset.
Raises:
ValueError: If the seed to select non-challenge training examples has not been set.
ValueError: If the seed to split challenges into members/non-members has not been set.
Returns:
Dataset: The training dataset.
"""
if self.training is None:
raise ValueError("The seed to generate the training dataset has not been set.")
if self.member is None:
raise ValueError("The seed to split challenges into members/non-members has not been set.")
return ConcatDataset([self.member, self.training])
def get_eval_dataset(self) -> Dataset:
"""Returns the evaluation dataset.
Raises:
ValueError: If the seed to generate the evaluation dataset has not been set.
Returns:
Dataset: The evaluation dataset.
"""
if self.evaluation is None:
raise ValueError("The seed to generate the evaluation dataset has not been set.")
return self.evaluation
def get_solutions(self) -> List:
"""Returns the membership labels of the challenges.
Raises:
ValueError: If the seed to generate the evaluation dataset has not been set.
Returns:
List: The list of membership labels for challenges, indexed as in the
Dataset returned by `get_challenges()`.
"""
if self.member is None:
raise ValueError("The seed to split challenges into members/non-members has not been set.")
member_indices = set(self.challenge.indices[i] for i in self.member.indices)
labels = [1 if i in member_indices else 0 for i in self.challenge.indices]
return labels
@classmethod
def from_path(cls: Type[D], path: Union[str, os.PathLike], dataset: Dataset, len_training: int, len_challenge: int=LEN_CHALLENGE) -> D:
"""Loads a ChallengeDataset from a directory `path`.
The directory must contain, at a minimum, the file `seed_challenge`.
Args:
path (str): Path to the folder containing the dataset.
Returns:
ChallengeDataset: The loaded ChallengeDataset.
"""
# Load the seeds.
if os.path.exists(os.path.join(path, "seed_challenge")):
with open(os.path.join(path, "seed_challenge"), "r") as f:
seed_challenge = int(f.read())
else:
raise Exception(f"`seed_challenge` was not found in {path}")
seed_training = None
if os.path.exists(os.path.join(path, "seed_training")):
with open(os.path.join(path, "seed_training"), "r") as f:
seed_training = int(f.read())
seed_membership = None
if os.path.exists(os.path.join(path, "seed_membership")):
with open(os.path.join(path, "seed_membership"), "r") as f:
seed_membership = int(f.read())
return cls(
dataset=dataset,
len_training=len_training,
len_challenge=len_challenge,
seed_challenge=seed_challenge,
seed_training=seed_training,
seed_membership=seed_membership
)
X = TypeVar("X", bound="CNN")
class CNN(nn.Module):
def __init__(self):
super().__init__()
self.cnn = nn.Sequential(
nn.Conv2d(3, 128, kernel_size=8, stride=2, padding=3), nn.Tanh(),
nn.MaxPool2d(kernel_size=3, stride=1),
nn.Conv2d(128, 256, kernel_size=3), nn.Tanh(),
nn.Conv2d(256, 256, kernel_size=3), nn.Tanh(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Flatten(),
nn.Linear(in_features=6400, out_features=10)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# shape of x is [B, 3, 32, 32] for CIFAR10
logits = self.cnn(x)
return logits
@classmethod
def load(cls: Type[X], path: Union[str, os.PathLike]) -> X:
model = cls()
state_dict = torch.load(path)
new_state_dict = OrderedDict((k.replace('_module.', ''), v) for k, v in state_dict.items())
model.load_state_dict(new_state_dict)
model.eval()
return model
Y = TypeVar("Y", bound="MLP")
class MLP(nn.Module):
"""
The fully-connected network architecture from Bao et al. (2022).
"""
def __init__(self):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(600, 128), nn.Tanh(),
nn.Linear(128, 100)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.mlp(x)
@classmethod
def load(cls: Type[Y], path: Union[str, os.PathLike]) -> Y:
model = cls()
state_dict = torch.load(path)
new_state_dict = OrderedDict((k.replace('_module.', ''), v) for k, v in state_dict.items())
model.load_state_dict(new_state_dict)
model.eval()
return model
def load_model(task: str, path: Union[str, os.PathLike]) -> nn.Module:
if task == 'cifar10':
return CNN.load(os.path.join(path, 'model.pt'))
elif task == 'purchase100':
return MLP.load(os.path.join(path, 'model.pt'))
elif task == 'sst2':
from transformers import AutoModelForSequenceClassification
# tokenizer = AutoTokenizer.from_pretrained('roberta-base')
model = AutoModelForSequenceClassification.from_pretrained(path, num_labels=2)
model.eval()
return model
else:
raise ValueError("`task` must be one of {'cifar10', 'purchase100', 'sst2'}")
| 10,705 | 39.55303 | 139 | py |
MICO | MICO-main/src/mico-competition/challenge_datasets.py | import os
import numpy as np
import torch
from torch.utils.data import Dataset, ConcatDataset
def load_cifar10(dataset_dir: str = ".", download=True) -> Dataset:
"""Loads the CIFAR10 dataset.
"""
from torchvision.datasets import CIFAR10
import torchvision.transforms as transforms
# Precomputed statistics of CIFAR10 dataset
# Exact values are assumed to be known, but can be estimated with a modest privacy budget
# Opacus wrongly uses CIFAR10_STD = (0.2023, 0.1994, 0.2010)
# This is the _average_ std across all images (see https://github.com/kuangliu/pytorch-cifar/issues/8)
CIFAR10_MEAN = (0.49139968, 0.48215841, 0.44653091)
CIFAR10_STD = (0.24703223, 0.24348513, 0.26158784)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR10_MEAN, CIFAR10_STD)
])
# NB: torchvision checks the integrity of downloaded files
train_dataset = CIFAR10(
root=f"{dataset_dir}/cifar10",
train=True,
download=download,
transform=transform
)
test_dataset = CIFAR10(
root=f"{dataset_dir}/cifar10",
train=False,
download=download,
transform=transform
)
return ConcatDataset([train_dataset, test_dataset])
def load_sst2() -> Dataset:
"""Loads the SST2 dataset.
"""
import datasets
# Specify cache_dir as argument?
ds = datasets.load_dataset("glue", "sst2")
return ConcatDataset([ds['train'], ds['validation']])
class Purchase100(Dataset):
"""
Purchase100 dataset pre-processed by Shokri et al.
(https://github.com/privacytrustlab/datasets/blob/master/dataset_purchase.tgz).
We save the dataset in a .pickle version because it is much faster to load
than the original file.
"""
def __init__(self, dataset_dir: str) -> None:
import pickle
dataset_path = os.path.join(dataset_dir, 'purchase100', 'dataset_purchase')
# Saving the dataset in pickle format because it is quicker to load.
dataset_path_pickle = dataset_path + '.pickle'
if not os.path.exists(dataset_path) and not os.path.exists(dataset_path_pickle):
raise ValueError("Purchase-100 dataset not found.\n"
"You may download the dataset from https://www.comp.nus.edu.sg/~reza/files/datasets.html\n"
f"and unzip it in the {dataset_dir}/purchase100 directory")
if not os.path.exists(dataset_path_pickle):
print('Found the dataset. Saving it in a pickle file that takes less time to load...')
purchase = np.loadtxt(dataset_path, dtype=int, delimiter=',')
with open(dataset_path_pickle, 'wb') as f:
pickle.dump({'dataset': purchase}, f)
with open(dataset_path_pickle, 'rb') as f:
dataset = pickle.load(f)['dataset']
self.labels = list(dataset[:, 0] - 1)
self.records = torch.FloatTensor(dataset[:, 1:])
assert len(self.labels) == len(self.records), f'ERROR: {len(self.labels)} and {len(self.records)}'
print('Successfully loaded the Purchase-100 dataset consisting of',
f'{len(self.records)} records and {len(self.records[0])}', 'attributes.')
def __len__(self) -> int:
return len(self.records)
def __getitem__(self, idx: int):
return self.records[idx], self.labels[idx]
def load_purchase100(dataset_dir: str = ".") -> Dataset:
"""Loads the Purchase-100 dataset.
"""
return Purchase100(dataset_dir)
| 3,560 | 34.61 | 120 | py |
MICO | MICO-main/src/mico-competition/__init__.py | from .mico import ChallengeDataset, CNN, MLP, load_model
from .challenge_datasets import load_cifar10, load_purchase100, load_sst2
__all__ = [
"ChallengeDataset",
"load_model",
"load_cifar10",
"load_purchase100",
"load_sst2",
"CNN",
"MLP"
] | 269 | 21.5 | 73 | py |
MICO | MICO-main/src/mico-competition/scoring/score.py | """Scoring program for the CodaLab competition platform.
Usage:
score.py <input directory> <output directory>
This program expects the following directory structure for <input directory>:
- <input directory>/ref/: Contains the solutions directories
(e.g., cifar10/cifar10_lo, cifar10/cifar10_hi, cifar10/cifar10_inf) for either the dev or the final phase.
- <input directory>/res/: Contains the predictions directories (containing prediction.csv) for both
phases.
"""
import os
import numpy as np
from sklearn.metrics import roc_curve, roc_auc_score
from typing import List, Dict
FPR_THRESHOLD = 0.1
FPR_THRESHOLD_LIST = [0.001, 0.01, 0.05, 0.1, 0.15, 0.2]
def tpr_at_fpr(true_membership: List, predictions: List, max_fpr=FPR_THRESHOLD) -> float:
"""Calculates the best True Positive Rate when the False Positive Rate is
at most `max_fpr`.
Args:
true_membership (List): A list of values in {0,1} indicating the membership of a
challenge point. 0: "non-member", 1: "member".
predictions (List): A list of values in the range [0,1] indicating the confidence
that a challenge point is a member. The closer the value to 1, the more
confident the predictor is about the hypothesis that the challenge point is
a member.
max_fpr (float, optional): Threshold on the FPR. Defaults to 0.1.
Returns:
float: The TPR @ `max_fpr` FPR.
"""
fpr, tpr, _ = roc_curve(true_membership, predictions)
return max(tpr[fpr < max_fpr])
def score(solutions: List, predictions: List) -> Dict:
scores = {}
for max_fpr in FPR_THRESHOLD_LIST:
scores[f"TPR_FPR_{int(1e4 * max_fpr)}"] = tpr_at_fpr(solutions, predictions, max_fpr=max_fpr)
fpr, tpr, _ = roc_curve(solutions, predictions)
scores["fpr"] = fpr
scores["tpr"] = tpr
scores["AUC"] = roc_auc_score(solutions, predictions)
scores["MIA"] = np.max(tpr - fpr)
# This is the balanced accuracy, which coincides with accuracy for balanced classes
scores["accuracy"] = np.max(1 - (fpr + (1 - tpr)) / 2)
return scores
if __name__ == "__main__":
from score_html import generate_html
# Parse arguments.
assert len(os.sys.argv) == 3, "Usage: score.py <predictions and solutions directory> <output directory>"
solutions_dir = os.path.join(os.sys.argv[1], "ref")
predictions_dir = os.path.join(os.sys.argv[1], "res")
output_dir = os.sys.argv[2]
current_phase = None
# Which competition?
dataset = os.listdir(solutions_dir)
assert len(dataset) == 1, f"Wrong content: {solutions_dir}: {dataset}"
dataset = dataset[0]
print(f"[*] Competition: {dataset}")
# Update solutions and predictions directories.
solutions_dir = os.path.join(solutions_dir, dataset)
assert os.path.exists(solutions_dir), f"Couldn't find soultions directory: {solutions_dir}"
predictions_dir = os.path.join(predictions_dir, dataset)
assert os.path.exists(predictions_dir), f"Couldn't find predictions directory: {predictions_dir}"
scenarios = sorted(os.listdir(solutions_dir))
assert len(scenarios) == 3, f"Found spurious directories in solutions directory: {solutions_dir}: {scenarios}"
found_scenarios = sorted(os.listdir(predictions_dir))
assert scenarios == found_scenarios, f"Found spurious directories in predictions directory {solutions_dir}: {found_scenarios}"
# Compute the scores for each scenario
all_scores = {}
for scenario in scenarios:
print(f"[*] Processing {scenario}...")
# What phase are we in?
phase = os.listdir(os.path.join(solutions_dir, scenario))
assert len(phase) == 1, "Corrupted solutions directory"
assert phase[0] in ["dev", "final"], "Corrupted solutions directory"
current_phase = phase[0]
print(f"[**] Scoring `{current_phase}` phase...")
# We compute the scores globally, across the models. This is somewhat equivalent to having
# one attack (threshold) for all the attacks.
# Load the predictions.
predictions = []
solutions = []
for model_id in os.listdir(os.path.join(solutions_dir, scenario, current_phase)):
basedir = os.path.join(scenario, current_phase, model_id)
solutions.append(np.loadtxt(os.path.join(solutions_dir, basedir, "solution.csv"), delimiter=","))
predictions.append(np.loadtxt(os.path.join(predictions_dir, basedir, "prediction.csv"), delimiter=","))
solutions = np.concatenate(solutions)
predictions = np.concatenate(predictions)
# Verify that the predictions are valid.
assert len(predictions) == len(solutions)
assert np.all(predictions >= 0), "Some predictions are < 0"
assert np.all(predictions <= 1), "Some predictions are > 1"
scores = score(solutions, predictions)
print(f"[*] Scores: {scores}")
all_scores[scenario] = scores
# Store the scores.
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, "scores.txt"), "w") as f:
for i, scenario in enumerate(scenarios):
assert scenario in all_scores, f"Score for scenario {scenario} not found. Corrupted ref/?"
for score in {"AUC", "MIA", "accuracy"}:
f.write(f"scenario{i+1}_{score}: {all_scores[scenario][score]}\n")
for max_fpr in FPR_THRESHOLD_LIST:
score = f"TPR_FPR_{int(1e4 * max_fpr)}"
f.write(f"scenario{i+1}_{score}: {all_scores[scenario][score]}\n")
# Average [email protected] (used for ranking)
avg = np.mean([all_scores[scenario]["TPR_FPR_1000"] for scenario in scenarios])
f.write(f"average_TPR_FPR_1000: {avg}")
# Detailed scoring (HTML)
html = generate_html(all_scores)
with open(os.path.join(output_dir, "scores.html"), "w") as f:
f.write(html)
| 5,923 | 41.014184 | 130 | py |
MICO | MICO-main/src/mico-competition/scoring/score_html.py | import io
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
def image_to_html(fig):
"""Converts a matplotlib plot to SVG"""
iostring = io.StringIO()
fig.savefig(iostring, format="svg", bbox_inches=0, dpi=300)
iostring.seek(0)
return iostring.read()
def generate_roc(fpr, tpr):
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8,3.5))
ax2.semilogx()
ax2.semilogy()
ax2.set_xlim(1e-5,1)
ax2.set_ylim(1e-5,1)
ax2.set_xlabel("False Positive Rate")
#ax2.set_ylabel("True Positive Rate")
ax2.plot([0, 1], [0, 1], ls=':', color='grey')
ax1.set_xlim(0,1)
ax1.set_ylim(0,1)
ax1.set_xlabel("False Positive Rate")
ax1.set_ylabel("True Positive Rate")
ax1.plot([0,1], [0,1], ls=':', color='grey')
ax1.plot(fpr, tpr)
ax2.plot(fpr, tpr)
return fig
def generate_table(scores):
table = pd.DataFrame(scores).T
table.drop(["fpr", "tpr"], axis=1, inplace=True)
# replace = {
# "inf": "No DP",
# "hi": "High ε",
# "lo": "Low ε",
# }
# table.index = [replace[i] for i in table.index]
replace_column = {
"accuracy": "Accuracy",
"AUC": "AUC-ROC",
"MIA": "MIA",
"TPR_FPR_10": "TPR @ 0.001 FPR",
"TPR_FPR_100": "TPR @ 0.01 FPR",
"TPR_FPR_500": "TPR @ 0.05 FPR",
"TPR_FPR_1000": "TPR @ 0.1 FPR",
"TPR_FPR_1500": "TPR @ 0.15 FPR",
"TPR_FPR_2000": "TPR @ 0.2 FPR",
}
table.columns = [replace_column[c] for c in table.columns]
return table
def generate_html(scores):
"""Generates the HTML document as a string, containing the various detailed scores"""
matplotlib.use('Agg')
img = {}
for scenario in scores:
fpr = scores[scenario]["fpr"]
tpr = scores[scenario]["tpr"]
fig = generate_roc(fpr, tpr)
fig.tight_layout(pad=1.0)
img[scenario] = f"<h2>{scenario}</h2><div>{image_to_html(fig)}</div>"
table = generate_table(scores)
# Generate the HTML document.
css = '''
body {
background-color: #ffffff;
}
h1 {
text-align: center;
}
h2 {
text-align: center;
}
div {
white-space: normal;
text-align: center;
}
table {
border-collapse: collapse;
margin: auto;
}
table > :is(thead, tbody) > tr > :is(th, td) {
padding: 5px;
}
table > thead > tr > :is(th, td) {
border-top: 2px solid; /* \toprule */
border-bottom: 1px solid; /* \midrule */
}
table > tbody > tr:last-child > :is(th, td) {
border-bottom: 2px solid; /* \bottomrule */
}'''
html = f'''<!DOCTYPE html>
<html>
<head>
<title>MICO - Detailed scores</title>
<style>
{css}
</style>
</head>
<body>
<div>
{table.to_html(border=0, float_format='{:0.4f}'.format, escape=False)}
</div>'''
for scenario in scores:
html += img[scenario]
html += "</body></html>"
return html | 3,052 | 22.851563 | 89 | py |
MICO | MICO-main/src/mico-competition/scoring/__init__.py | from .score import tpr_at_fpr, score
from .score_html import generate_roc, generate_table, generate_html
__all__ = [
"tpr_at_fpr",
"score",
"generate_roc",
"generate_table",
"generate_html",
] | 213 | 20.4 | 67 | py |
MKIDGen3 | MKIDGen3-master/setup.py | import setuptools
from setuptools.command.install import install
from setuptools.command.develop import develop
import subprocess
import numpy
from setuptools.extension import Extension
#pip install -e git+http://github.com/mazinlab/mkiggen3.git@develop#egg=mkidgen3
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mkidgen3",
version="0.0.1",
author="MazinLab, J. Bailey et ak.",
author_email="[email protected]",
description="An UVOIR MKID Dector package for the ZCU111",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/MazinLab/MKIDGen3",
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Development Status :: 1 - Planning",
"Intended Audience :: Science/Research"),
) | 969 | 31.333333 | 80 | py |
MKIDGen3 | MKIDGen3-master/mkidgen3/hlsinputgen_dds.py |
"""
Generate a file containing tone to bin center offsets for 2048 resonators and a file containing IQ values
for the resonators over some number of cycles
IQ values are complex numbers on the unit circle
"""
import numpy as np
from daclutgen2gen3 import SweepFile
class Testdata:
def __init__(self, iq=None, offset_hz=None, bincount=None, freqs=None):
self.iq=iq
self.bincount=bincount
self.offset_hz=offset_hz
self.freqs=freqs
def test_data(ncycles=10, MIN_FREQ_HZ=4096e6, MAX_FREQ_HZ =8192e6, NUM_RES = 2048, MAX_PER_BIN = 4):
mec_freqfilea='/Users/one/Box Sync/ucsb/mec/psfreq/psfreqs_FL9a_clip_new_atten_plus_freq_shift.txt'
mec_freqfileb='/Users/one/Box Sync/ucsb/mec/psfreq/psfreqs_FL9b_clip_new_atten_plus_freq_shift.txt'
freqfilea = SweepFile(mec_freqfilea)
freqfileb = SweepFile(mec_freqfileb)
freqs=np.concatenate((freqfilea.freq,freqfileb.freq))
freqs-=freqs.min()
freqs+=MIN_FREQ_HZ
bincount, binedges = np.histogram(np.round(freqs/1e6), bins=np.arange(4096,8192+1))
print('Most resonators in a 1MHz bin in loaded files: {}'.format(max(bincount)))
if freqs.size<NUM_RES:
print("Inserting {} resonators at the end".format(NUM_RES-freqs.size))
addable = (MAX_PER_BIN-bincount[::-1]).clip(0, MAX_PER_BIN)
total_added = np.cumsum(addable)
last_add_ndx = np.where(total_added >= NUM_RES-freqs.size)[0][0]
addable[last_add_ndx] += NUM_RES-(total_added[last_add_ndx]+freqs.size) # in case we would add a few too many
addable[last_add_ndx+1:]=0
bincount[::-1] += addable
#TODO properly add frequencies
freqs = np.concatenate((freqs, [MAX_FREQ_HZ]*(NUM_RES-freqs.size)))
offset_hz=(freqs-1e6*np.round(freqs/1e6)) # on a 1MHz grid
iq=np.random.uniform(low=0, high=2*np.pi, size=ncycles*NUM_RES)
iq=np.array((np.sin(iq),np.cos(iq))).T
return Testdata(iq=iq, freqs=freqs, offset_hz=offset_hz, bincount=bincount)
if __name__=='__main__':
td=test_data()
with open('/Users/one/Desktop/toneoffsets.dat','w') as f:
f.writelines(['{}\n'.format(oset) for oset in td.offset_hz])
with open('/Users/one/Desktop/resiqs.dat','w') as f:
f.writelines(['{} {}\n'.format(i,q) for i,q in td.iq])
with open('/Users/one/Desktop/res_in_bin.dat','w') as f:
f.writelines(['{}\n'.format(n) for n in td.bincount])
| 2,419 | 35.119403 | 118 | py |
MKIDGen3 | MKIDGen3-master/mkidgen3/gen2.py | from logging import getLogger
import numpy as np
ISGOOD = 0b1
ISREVIEWED = 0b10
ISBAD = 0
MAX_ML_SCORE = 1
MAX_ATTEN = 100
LOCUT = 1e9
A_RANGE_CUTOFF = 6e9
def parse_lo(lofreq, frequencies=None, sample_rate=2.048e9):
""" Sets the attribute LOFreq (in Hz) """
lo = round(lofreq / (2.0 ** -16) / 1e6) * (2.0 ** -16) * 1e6
try:
delta = np.abs(frequencies - lo)
except AttributeError:
getLogger(__name__).warning('No frequency list yet loaded. Unable to check if LO is reasonable.')
return lo
tofar = delta > sample_rate / 2
if tofar.all():
getLogger(__name__).warning('All frequencies more than half a sample rate from '
'the LO. LO: {} Delta min: {} Halfsamp: {} )'.format(lo, delta.min(),
sample_rate / 2))
raise ValueError('LO out of bounds')
elif tofar.any():
getLogger(__name__).warning('Frequencies more than half a sample rate from the LO')
return lo
class SweepFile(object):
def __init__(self, file):
self.file = file
self.feedline = None
self.resIDs = None
self.wsfreq = None
self.flag = None
self.wsatten = None
self.mlatten = None
self.mlfreq = None
self.ml_isgood_score = None
self.ml_isbad_score = None
self.phases = None
self.iqRatios = None
self.freq = None
self.atten = None
self._load()
self._vet()
@property
def goodmlfreq(self):
return self.mlfreq[self.flag & ISGOOD]
def sort(self):
s = np.argsort(self.resIDs)
self.resIDs = self.resIDs[s]
self.wsfreq = self.wsfreq[s]
self.flag = self.flag[s]
self.mlfreq = self.mlfreq[s]
self.mlatten = self.mlatten[s]
self.atten = self.atten[s]
self.ml_isgood_score = self.ml_isgood_score[s]
self.ml_isbad_score = self.ml_isbad_score[s]
self.freq = self.freq[s]
def toarray(self):
return np.array([self.resIDs, self.flag, self.wsfreq, self.mlfreq, self.mlatten, self.freq,
self.atten, self.ml_isgood_score, self.ml_isbad_score, self.phases, self.iqRatios])
def lomask(self, lo):
return ((self.flag & ISGOOD) & (~np.isnan(self.freq)) & (np.abs(self.freq - lo) < LOCUT) & (
self.atten > 0)).astype(bool)
def vet(self):
if (np.abs(self.atten[~np.isnan(self.atten)]) > MAX_ATTEN).any():
getLogger(__name__).warning('odd attens')
if (np.abs(self.ml_isgood_score[~np.isnan(self.ml_isgood_score)]) > MAX_ML_SCORE).any():
getLogger(__name__).warning('bad ml good score')
if (np.abs(self.ml_isbad_score[~np.isnan(self.ml_isbad_score)]) > MAX_ML_SCORE).any():
getLogger(__name__).warning('bad ml bad scores')
assert self.resIDs.size == np.unique(self.resIDs).size, "Resonator IDs must be unique."
assert (self.resIDs.size == self.wsfreq.size == self.flag.size ==
self.atten.size == self.mlfreq.size == self.ml_isgood_score.size ==
self.ml_isbad_score.size)
def genheader(self, useSBSup=False):
if useSBSup:
header = ('feedline={}\n'
'wsatten={}\n'
'rID\trFlag\twsFreq\tmlFreq\tmlatten\tfreq\tatten\tmlGood\tmlBad\tphases\tiqratios')
else:
header = ('feedline={}\n'
'wsatten={}\n'
'rID\trFlag\twsFreq\tmlFreq\tmlatten\tfreq\tatten\tmlGood\tmlBad')
return header.format(self.feedline, self.wsatten)
def save(self, file='', saveSBSupData=False):
sf = file.format(feedline=self.feedline) if file else self.file.format(feedline=self.feedline)
self.vet()
if saveSBSupData:
np.savetxt(sf, self.toarray().T, fmt="%8d %1u %16.7f %16.7f %5.1f %16.7f %5.1f %6.4f %6.4f %6.4f %6.4f",
header=self.genheader(True))
else:
np.savetxt(sf, self.toarray().T[:, :-2], fmt="%8d %1u %16.7f %16.7f %5.1f %16.7f %5.1f %6.4f %6.4f",
header=self.genheader(False))
def _vet(self):
assert (self.resIDs.size == self.wsfreq.size == self.flag.size == self.atten.size == self.freq.size ==
self.mlatten.size == self.mlfreq.size == self.ml_isgood_score.size == self.ml_isbad_score.size)
for x in (self.freq, self.mlfreq, self.wsfreq):
use = ~np.isnan(x)
if x[use].size != np.unique(x[use]).size:
getLogger(__name__).warning("Found non-unique frequencies")
self.flag = self.flag.astype(int)
self.resIDs = self.resIDs.astype(int)
self.feedline = int(self.resIDs[0] / 10000)
def _load(self):
d = np.loadtxt(self.file.format(feedline=self.feedline), unpack=True)
if d.ndim == 1: # allows files with single res
d = np.expand_dims(d, axis=1)
try:
if d.shape[0] == 11:
self.resIDs, self.flag, self.wsfreq, self.mlfreq, self.mlatten, \
self.freq, self.atten, self.ml_isgood_score, self.ml_isbad_score, self.phases, self.iqRatios = d
if d.shape[0] == 9:
self.resIDs, self.flag, self.wsfreq, self.mlfreq, self.mlatten, \
self.freq, self.atten, self.ml_isgood_score, self.ml_isbad_score = d
self.phases = np.full_like(self.resIDs, 0, dtype=float)
self.iqRatios = np.full_like(self.resIDs, 1, dtype=float)
elif d.shape[0] == 7:
self.resIDs, self.flag, self.wsfreq, self.mlfreq, self.mlatten, \
self.ml_isgood_score, self.ml_isbad_score = d
self.freq = self.mlfreq.copy()
self.atten = self.mlatten.copy()
self.phases = np.full_like(self.resIDs, 0, dtype=float)
self.iqRatios = np.full_like(self.resIDs, 1, dtype=float)
elif d.shape[0] == 5:
self.resIDs, self.freq, self.atten, self.phases, self.iqRatios = d
self.wsfreq = self.freq.copy()
self.mlfreq = self.freq.copy()
self.mlatten = self.atten.copy()
self.flag = np.full_like(self.resIDs, ISGOOD, dtype=int)
self.ml_isgood_score = np.full_like(self.resIDs, np.nan, dtype=float)
self.ml_isbad_score = np.full_like(self.resIDs, np.nan, dtype=float)
else:
self.resIDs, self.freq, self.atten = d
self.wsfreq = self.freq.copy()
self.mlfreq = self.freq.copy()
self.mlatten = self.atten.copy()
self.flag = np.full_like(self.resIDs, ISGOOD, dtype=int)
self.ml_isgood_score = np.full_like(self.resIDs, np.nan, dtype=float)
self.ml_isbad_score = np.full_like(self.resIDs, np.nan, dtype=float)
self.phases = np.full_like(self.resIDs, 0, dtype=float)
self.iqRatios = np.full_like(self.resIDs, 1, dtype=float)
except:
raise ValueError('Unknown number of columns')
self.freq[np.isnan(self.freq)] = self.mlfreq[np.isnan(self.freq)]
self.freq[np.isnan(self.freq)] = self.wsfreq[np.isnan(self.freq)]
self.atten[np.isnan(self.atten)] = self.mlatten[np.isnan(self.atten)]
self.flag = self.flag.astype(int)
self.mlfreq[self.flag & ISBAD] = self.wsfreq[self.flag & ISBAD]
self.ml_isgood_score[self.flag & ISBAD] = 0
self.ml_isbad_score[self.flag & ISBAD] = 1
self._vet()
| 7,724 | 41.679558 | 116 | py |
MKIDGen3 | MKIDGen3-master/mkidgen3/daccomb.py | import numpy as np
import scipy.special
from logging import getLogger
import logging
from .gen2 import SweepFile, parse_lo
nDacSamplesPerCycle = 8
nLutRowsToUse = 2 ** 15
dacSampleRate = 2.048e9
nBitsPerSamplePair = 32
nChannels = 1024
def generateTones(frequencies, nSamples, sampleRate, amplitudes=None, phases=None, iq_ratios=None,
phase_offsets=None, return_merged=True):
"""
Generate a list of complex signals with amplitudes and phases specified and frequencies quantized
All specified inputs must be of the same shape
INPUTS:
freqList - list of resonator frequencies
nSamples - Number of time samples
sampleRate - Used to quantize the frequencies
amplitudeList - list of amplitudes. If None, use 1.
phaseList - list of phases. If None, use random phase
return_merged - if set to fault use frequencies.size times more memory and return an unmerged frequency comb
OUTPUTS:
dictionary with keywords
I - each element is a list of I(t) values for specific freq if not return_merged else the summed I(t)
Q - Same as I but for Q(t)
frequencies - list of frequencies after digital quantization
phases - list of phases for each frequency
"""
if amplitudes is None:
amplitudes = np.ones_like(frequencies)
if phases is None:
phases = np.random.uniform(0., 2. * np.pi, len(frequencies))
if iq_ratios is None:
iq_ratios = np.ones_like(frequencies)
if phase_offsets is None:
phase_offsets = np.zeros_like(frequencies)
# Quantize the frequencies to their closest digital value
freq_res = sampleRate / nSamples
quantized_freqs = np.round(frequencies / freq_res) * freq_res
phase_offsets_radians = np.deg2rad(phase_offsets)
if return_merged:
ivals = np.zeros(nSamples)
qvals = np.zeros(nSamples)
else:
ivals = np.zeros((frequencies.size, nSamples))
qvals = np.zeros((frequencies.size, nSamples))
# generate each signal
t = 2 * np.pi * np.arange(nSamples)/sampleRate
for i in range(frequencies.size):
phi = t * quantized_freqs[i]
exp = amplitudes[i] * np.exp(1j * (phi + phases[i]))
iScale = np.sqrt(2) * iq_ratios[i] / np.sqrt(1. + iq_ratios[i] ** 2)
qScale = np.sqrt(2) / np.sqrt(1 + iq_ratios[i] ** 2)
if return_merged:
ivals += iScale * (np.cos(phase_offsets_radians[i]) * np.real(exp) +
np.sin(phase_offsets_radians[i]) * np.imag(exp))
ivals += qScale * np.imag(exp)
else:
ivals[i] = iScale * (np.cos(phase_offsets_radians[i]) * np.real(exp) +
np.sin(phase_offsets_radians[i]) * np.imag(exp))
ivals[i] = qScale * np.imag(exp)
return {'I': ivals, 'Q': qvals, 'frequencies': quantized_freqs, 'phases': phases}
def generate(frequencies, attenuations, phases=None, iq_ratios=None, phase_offsets=None, spike_percentile_limit=.9,
globalDacAtten=None, lo=None, return_full=True, MAX_CHAN=2048):
"""
Creates DAC frequency comb by adding many complex frequencies together with specified amplitudes and phases.
The attenuations holds the absolute attenuation for each resonator signal coming out of the DAC.
Zero attenuation means that the tone amplitude is set to the full dynamic range of the DAC and the
DAC attenuator(s) are set to 0. Thus, all values in attenuations must be larger than globalDacAtten.
If you decrease the globalDacAtten, the amplitude in the DAC LUT decreases so that the total
attenuation of the signal is the same.
Note: The freqList need not be unique. If there are repeated values in the freqList then
they are completely ignored when making the comb along with their corresponding attenuation, phase, etc...
INPUTS:
frequencies - list of all resonator frequencies.
attenuations - list of absolute attenuation values (dB) for each resonator.
phases - list of phases for each complex signal. If None, generates random phases.
iq_ratios -
phase_offsets -
spike_percentile_limit - loop generateTones() function with random phases to avoid spikes greater than the
specified percentile in the output comb. Set to >=1 to disable.
OUTPUTS:
dictionary with keywords
I - I(t) values for frequency comb [signed 32-bit integers]
Q - Q(t)
quantizedFreqList - list of frequencies after digitial quantiziation
dacAtten - The global dac hardware attenuation in dB that should be set
"""
spike_percentile_limit=max(spike_percentile_limit, .01)
if len(frequencies) != len(attenuations):
raise ValueError("Need exactly one attenuation value for each resonant frequency!")
if phases is not None and len(frequencies) != len(phases):
raise ValueError("Need exactly one phase value for each resonant frequency!")
if iq_ratios is not None and len(frequencies) != len(iq_ratios):
raise ValueError("Need exactly one iqRatio value for each resonant frequency!")
if phase_offsets is not None and len(frequencies) != len(phase_offsets):
raise ValueError("Need exactly one iqPhaseOffs value for each resonant frequency!")
if frequencies > MAX_CHAN:
getLogger(__name__).warning(f"Clipping the last {frequencies.size-MAX_CHAN}. MAX_CHAN={MAX_CHAN}.")
frequencies = frequencies[:MAX_CHAN]
attenuations = attenuations[:MAX_CHAN]
if phase_offsets is not None:
phase_offsets=phase_offsets[:MAX_CHAN]
if iq_ratios is not None:
iq_ratios=iq_ratios[:MAX_CHAN]
if phases is not None:
phases=phases[:MAX_CHAN]
getLogger(__name__).debug('Generating DAC comb...')
autoDacAtten = globalDacAtten is None
if autoDacAtten:
globalDacAtten = np.amin(attenuations)
# Calculate relative amplitudes for DAC LUT
nBitsPerSampleComponent = nBitsPerSamplePair / 2
maxAmp = int(np.round(2 ** (nBitsPerSampleComponent - 1) - 1)) # 1 bit for sign
amplitudes = maxAmp * 10 ** (-(attenuations - globalDacAtten) / 20)
# Calculate nSamples and sampleRate
nSamples = nDacSamplesPerCycle * nLutRowsToUse
sampleRate = dacSampleRate
# Calculate resonator frequencies for DAC
LOFreq = parse_lo(lo, freqList=frequencies)
dacFreqList = frequencies - LOFreq
dacFreqList[dacFreqList < 0.] += dacSampleRate # For +/- freq
# Make sure dac tones are unique
dacFreqList, args, args_inv = np.unique(dacFreqList, return_index=True, return_inverse=True)
rstate = np.random.get_state()
np.random.seed(0)
# Generate and add up individual tone time series.
toneDict = generateTones(frequencies= dacFreqList, nSamples=nSamples, sampleRate=sampleRate,
amplitudes=amplitudes[args],
phases=None if phases is None else phases[args],
iq_ratios=None if iq_ratios is None else iq_ratios[args],
phase_offsets=None if phase_offsets is None else phase_offsets[args],
return_merged=True)
# This part takes the longest
iValues = toneDict['I']
qValues = toneDict['Q']
# check that we are utilizing the dynamic range of the DAC correctly
sig_i = iValues.std()
sig_q = qValues.std()
# 10% of the time there should be a point this many sigmas higher than average
expectedHighestVal_sig = scipy.special.erfinv((len(iValues) + spike_percentile_limit -1)/ len(iValues)) * np.sqrt(2)
if spike_percentile_limit<1 and sig_i > 0 and sig_q > 0:
while max(np.abs(iValues).max() / sig_i, np.abs(qValues).max() / sig_q) >= expectedHighestVal_sig:
getLogger(__name__).warning("The freq comb's relative phases may have added up sub-optimally. "
"Calculating with new random phases")
toneDict = generateTones(frequencies=dacFreqList, nSamples=nSamples, sampleRate=sampleRate,
amplitudes=amplitudes[args], phases=None,
iq_ratios=None if iq_ratios is None else iq_ratios[args],
phase_offsets=None if phase_offsets is None else phase_offsets[args],
return_merged=True)
iValues = toneDict['I']
qValues = toneDict['Q']
np.random.set_state(rstate)
dacQuantizedFreqList = (toneDict['frequencies'])[args_inv]
dacPhaseList = (toneDict['phases'])[args_inv]
if autoDacAtten:
highestVal = max(np.abs(iValues).max(), np.abs(qValues).max())
dBexcess = 20 * np.log10(highestVal / maxAmp)
dBexcess = np.ceil(4 * dBexcess) / 4 # rounded up to nearest 1/4 dB
# reduce to fit into DAC dynamic range and quantize to integer
iValues_new = np.round(iValues / 10 ** (dBexcess / 20)).astype(np.int)
qValues_new = np.round(qValues / 10 ** (dBexcess / 20)).astype(np.int)
if np.max((np.abs(iValues).max(), np.abs(qValues).max())) > maxAmp:
dBexcess += 0.25 # Since there's some rounding there's a small chance we need to decrease by another atten step
iValues_new = np.round(iValues / 10 ** (dBexcess / 20)).astype(np.int)
qValues_new = np.round(qValues / 10 ** (dBexcess / 20)).astype(np.int)
globalDacAtten -= dBexcess
if globalDacAtten > 31.75 * 2:
dB_reduce = globalDacAtten - 31.75 * 2
getLogger(__name__).warning(f"Unable to fully utilize DAC dynamic range by {dB_reduce} dB")
globalDacAtten -= dB_reduce
dBexcess += dB_reduce
iValues_new = np.round(iValues / 10 ** (dBexcess / 20)).astype(np.int)
qValues_new = np.round(qValues / 10 ** (dBexcess / 20)).astype(np.int)
iValues = iValues_new
qValues = qValues_new
else:
iValues = np.round(iValues).astype(np.int)
qValues = np.round(qValues).astype(np.int)
highestVal = max(np.abs(iValues).max(), np.abs(qValues).max())
dacFreqComb = iValues + 1j * qValues
msg = ('\tGlobal DAC atten: {} dB'.format(globalDacAtten) +
'\tUsing {} percent of DAC dynamic range\n'.format(highestVal / maxAmp * 100) +
'\thighest: {} out of {}\n'.format(highestVal, maxAmp) +
'\tsigma_I: {} sigma_Q:{}\n'.format(np.std(iValues), np.std(qValues)) +
'\tLargest val_I: {} sigma. '.format(np.abs(iValues).max() / np.std(iValues)) +
'val_Q: {} sigma.\n'.format(np.abs(qValues).max() / np.std(qValues)) +
'\tExpected val: {} sigmas\n'.format(expectedHighestVal_sig))
getLogger(__name__).debug(msg)
if globalDacAtten < 0:
raise ValueError("Desired resonator powers are unacheivable. "
f"Increase resonator attens by {-1 * globalDacAtten} dB")
if return_full:
return {'i': iValues, 'q': qValues, 'frequencies': dacQuantizedFreqList, 'attenuation': globalDacAtten,
'comb': dacFreqComb, 'phases': dacPhaseList}
else:
return dacFreqComb
def generate_from_MEC(mec_freqfile, lo):
freqfile = SweepFile(mec_freqfile)
combdata = generate(frequencies=freqfile.freq, attenuations=freqfile.atten, phases=freqfile.phases,
iq_ratios=freqfile.iqRatios, globalDacAtten=None, lo=lo)
return combdata
if __name__ == '__main__':
mec_freqfile='/Users/one/Desktop/untitled folder/psfreqs_FL8a_clip.txt'
lo=4428029278.278099
freqfile = SweepFile(mec_freqfile)
logging.basicConfig()
combdata = generate(frequencies=freqfile.freq, attenuations=freqfile.atten, phases=freqfile.phases,
iq_ratios=freqfile.iqRatios, globalDacAtten=None, lo=lo)
np.savez('mec_fl8a_dec19_62dB.npz', combdata['comb']) | 12,020 | 44.707224 | 124 | py |
pineko | pineko-main/benchmarks/bench_checks.py | import eko
import numpy as np
import pineappl
import pytest
import pineko.check
def benchmark_check_grid_and_eko_compatible(test_files, tmp_path):
grid = pineappl.grid.Grid.read(
test_files / "data/grids/400/HERA_NC_225GEV_EP_SIGMARED.pineappl.lz4"
)
wrong_grid = pineappl.grid.Grid.read(
test_files / "data/grids/208/NUTEV_CC_NU_FE_SIGMARED.pineappl.lz4"
)
with eko.EKO.edit(
test_files / "data/ekos/400/HERA_NC_225GEV_EP_SIGMARED.tar"
) as ekoop:
with pytest.raises(ValueError):
pineko.check.check_grid_and_eko_compatible(wrong_grid, ekoop, 1.0, 3, 3)
pineko.check.check_grid_and_eko_compatible(grid, ekoop, 1.0, 3, 3)
eko.io.manipulate.xgrid_reshape(
ekoop, targetgrid=eko.interpolation.XGrid([0.0001, 0.001, 0.1, 0.5, 1.0])
)
with pytest.raises(ValueError):
pineko.check.check_grid_and_eko_compatible(grid, ekoop, 1.0, 10, 10)
eko.io.manipulate.xgrid_reshape(ekoop, targetgrid=ekoop.xgrid)
| 1,029 | 35.785714 | 85 | py |
pineko | pineko-main/benchmarks/bench_kfactor.py | import lhapdf
import numpy as np
import pineappl
from pineko import kfactor
def benchmark_kfactor_inclusion(test_files, tmp_path, test_pdf, lhapdf_path):
fake_yaml_path = test_files / "data" / "yamldb" / "ATLAS_TTB_FAKE.yaml"
max_as = 3
pdf_name = "NNPDF40_nnlo_as_01180"
kfactor.compute_k_factor_grid(
test_files / "data" / "grids" / "400",
test_files / "data" / "kfactors",
fake_yaml_path,
max_as,
target_folder=tmp_path,
)
pluskfactor_grid_path = tmp_path / "ATLAS_TTB_8TEV_LJ_TRAP.pineappl.lz4"
with lhapdf_path(test_pdf):
pdf = lhapdf.mkPDF(pdf_name)
pluskfactor_grid = pineappl.grid.Grid.read(pluskfactor_grid_path)
sv_list = [(1.0, 1.0)] # Only ren sv have to be tested
bin_number = pluskfactor_grid.bins()
order_mask_nloQCD = pineappl.grid.Order.create_mask(
pluskfactor_grid.orders(), 2, 0, True
)
order_mask_nnloQCD = pineappl.grid.Order.create_mask(
pluskfactor_grid.orders(), 3, 0, True
)
to_test_res_nlo = pluskfactor_grid.convolute_with_one(
2212,
pdf.xfxQ2,
pdf.alphasQ2,
order_mask_nloQCD,
np.array([], dtype=np.uint64),
np.array([], dtype=bool),
sv_list,
).reshape(bin_number, len(sv_list))
to_test_res_nnlo = pluskfactor_grid.convolute_with_one(
2212,
pdf.xfxQ2,
pdf.alphasQ2,
order_mask_nnloQCD,
np.array([], dtype=np.uint64),
np.array([], dtype=bool),
sv_list,
).reshape(bin_number, len(sv_list))
centrals_kfactor, _ = kfactor.read_kfactor(
test_files / "data" / "kfactors" / "CF_QCD_ATLAS_TTB_8TEV_LJ_TRAP.dat"
)
rtol = 1.0e-15
for pred_ratio, kf in zip(
to_test_res_nnlo.transpose()[0] / to_test_res_nlo.transpose()[0],
centrals_kfactor,
):
np.testing.assert_allclose(kf, pred_ratio, rtol=rtol)
| 1,921 | 32.137931 | 78 | py |
pineko | pineko-main/benchmarks/conftest.py | import pathlib
import shutil
from contextlib import contextmanager
import pytest
import pineko
import pineko.configs
@pytest.fixture
def test_files():
return pathlib.Path(__file__).parents[0] / "data_files/"
@pytest.fixture
def test_empty_proj(test_files):
path = test_files / "empty_proj/"
yield path
# Let's clean
shutil.rmtree(path / "data")
shutil.rmtree(path / "logs")
@pytest.fixture
def test_configs(test_files):
config_path = pineko.configs.detect(test_files)
base_configs = pineko.configs.load(config_path)
pineko.configs.configs = pineko.configs.defaults(base_configs)
return pineko.configs.configs
@pytest.fixture
def test_pdf():
return pathlib.Path(__file__).parents[0] / "fakepdfs/"
@pytest.fixture
def lhapdf_path():
@contextmanager
def wrapped(newdir):
import lhapdf # pylint: disable=import-error, import-outside-toplevel
paths = lhapdf.paths()
lhapdf.pathsPrepend(str(newdir))
try:
yield
finally:
lhapdf.setPaths(paths)
return wrapped
| 1,087 | 19.923077 | 78 | py |
pineko | pineko-main/benchmarks/bench_cli.py | import pathlib
import shutil
import lhapdf
from click.testing import CliRunner
from pineko.cli._base import command
def benchmark_check_cli(test_files):
grid_path = pathlib.Path(
test_files / "data/grids/400/HERA_NC_225GEV_EP_SIGMARED.pineappl.lz4"
)
wrong_grid_path = pathlib.Path(
test_files / "data/grids/208/HERA_CC_318GEV_EM_SIGMARED.pineappl.lz4"
)
eko_path = pathlib.Path(test_files / "data/ekos/400/HERA_NC_225GEV_EP_SIGMARED.tar")
runner = CliRunner()
result = runner.invoke(
command, ["check", "compatibility", str(grid_path), str(eko_path)]
)
assert "Success: grids are compatible" in result.output
wrong_result = runner.invoke(
command, ["check", "compatibility", str(wrong_grid_path), str(eko_path)]
)
assert (
"Error: Q2 grid in pineappl grid and eko operator are NOT compatible!"
in wrong_result.output
)
wrong_scvar_res = runner.invoke(
command, ["check", "scvar", str(grid_path), "wrong_string", "2", "0"]
)
assert "Invalid value for 'SCALE'" in wrong_scvar_res.output
ren_res = runner.invoke(
command, ["check", "scvar", str(grid_path), "ren", "3", "0"]
)
assert (
"Success: grids contain renormalization scale variations for as"
in ren_res.output
)
fact_res = runner.invoke(
command, ["check", "scvar", str(grid_path), "fact", "3", "0"]
)
assert (
"Success: grids contain factorization scale variations for as"
in fact_res.output
)
def benchmark_opcard_cli(tmp_path, test_files):
grid_path = pathlib.Path(
test_files / "data/grids/400/HERA_NC_225GEV_EP_SIGMARED.pineappl.lz4"
)
default_card_path = pathlib.Path(
test_files / "data/operator_cards/400/_template.yaml"
)
thcard_path = pathlib.Path(test_files / "data" / "theory_cards" / "400.yaml")
target_path = pathlib.Path(tmp_path / "test_ope_card.yaml")
runner = CliRunner()
result = runner.invoke(
command,
[
"opcard",
str(grid_path),
str(default_card_path),
str(thcard_path),
str(target_path),
],
)
assert "Success" in result.output
def benchmark_compare_cli(lhapdf_path, test_files, test_pdf):
grid_path = pathlib.Path(
test_files / "data/grids/208/LHCB_DY_13TEV_DIMUON.pineappl.lz4"
)
fk_path = pathlib.Path(
test_files / "data/fktables/208/LHCB_DY_13TEV_DIMUON.pineappl.lz4"
)
runner = CliRunner()
with lhapdf_path(test_pdf):
result = runner.invoke(
command,
["compare", str(grid_path), str(fk_path), "2", "0", "NNPDF40_nlo_as_01180"],
)
assert "yll left" in result.output
def benchmark_convolute_cli(test_files, tmp_path):
grid_path = pathlib.Path(
test_files / "data/grids/400/HERA_NC_225GEV_EP_SIGMARED.pineappl.lz4"
)
eko_path = pathlib.Path(test_files / "data/ekos/400/HERA_NC_225GEV_EP_SIGMARED.tar")
fk_path = tmp_path / "testfk.pineappl.lz4"
runner = CliRunner()
result = runner.invoke(
command,
["convolute", str(grid_path), str(eko_path), str(fk_path), "2", "0"],
)
assert "Optimizing for Nf6Ind" in result.output
def benchmark_scaffold_cli(test_empty_proj):
runner = CliRunner()
conf_file = test_empty_proj / "pineko.toml"
# empty project is not correctly configured
res = runner.invoke(command, ["scaffold", "-c", str(conf_file), "check"])
assert "Error: Project is not correctly configured." in res.output
# so we need to create all the folders
res = runner.invoke(command, ["scaffold", "-c", str(conf_file), "new"])
# and then I can check again
res = runner.invoke(command, ["scaffold", "-c", str(conf_file), "check"])
assert "Success: All the folders are correctly configured" in res.output
def benchmark_gen_sv_cli(test_files, tmp_path, test_pdf, lhapdf_path):
runner = CliRunner()
pdf_name = "NNPDF40_nlo_as_01180"
max_as = "2"
nf = "5"
name_grid = "ATLAS_TTB_8TEV_LJ_TRAP_norensv_fixed.pineappl.lz4"
grid_path = test_files / "data" / "grids" / "400" / name_grid
new_grid_path = tmp_path / name_grid
target_path = tmp_path
shutil.copy(grid_path, new_grid_path)
with lhapdf_path(test_pdf):
pdf = lhapdf.mkPDF(pdf_name)
res = runner.invoke(
command,
["ren_sv_grid", str(new_grid_path), str(target_path), max_as, nf, "False"],
)
assert "ReturnState.SUCCESS" in res.output
def benchmark_kfactor_cli(test_files, tmp_path):
runner = CliRunner()
grid_folder = test_files / "data" / "grids" / "400"
kfolder = test_files / "data" / "kfactors"
fake_yaml_path = test_files / "data" / "yamldb" / "ATLAS_TTB_FAKE.yaml"
max_as = "3"
target_path = tmp_path
res = runner.invoke(
command,
[
"kfactor",
str(grid_folder),
str(kfolder),
str(fake_yaml_path),
str(target_path),
max_as,
"False",
],
)
assert "The number of bins match the lenght of the k-factor" in res.output
| 5,202 | 32.567742 | 88 | py |
pineko | pineko-main/benchmarks/bench_evolve.py | import pathlib
import eko
import eko.io.legacy
import numpy as np
import pineappl
import pytest
import yaml
from eko import couplings as sc
import pineko
import pineko.evolve
import pineko.theory_card
def benchmark_write_operator_card_from_file(tmp_path, test_files, test_configs):
pine_path = test_files / "data/grids/400/HERA_NC_225GEV_EP_SIGMARED.pineappl.lz4"
default_path = test_files / "data/operator_cards/400/_template.yaml"
target_path = pathlib.Path(tmp_path / "test_operator.yaml")
tcard = pineko.theory_card.load(400)
x_grid, _q2_grid = pineko.evolve.write_operator_card_from_file(
pine_path, default_path, target_path, tcard
)
# Load the operator card
myopcard = yaml.safe_load(target_path.read_text(encoding="utf-8"))
# Check if it contains all the information for eko
assert np.allclose(myopcard["xgrid"], x_grid)
wrong_pine_path = test_files / "data/grids/208/HERA_CC_318GEV_EM_wrong.pineappl.lz4"
with pytest.raises(FileNotFoundError):
_ = pineko.evolve.write_operator_card_from_file(
wrong_pine_path, default_path, target_path, 1.0
)
def benchmark_dglap(tmp_path, test_files, test_configs):
pine_path = test_files / "data/grids/400/HERA_NC_225GEV_EP_SIGMARED.pineappl.lz4"
default_path = test_files / "data/operator_cards/400/_template.yaml"
target_path = pathlib.Path(tmp_path / "test_operator.yaml")
theory_id = 400
tcard = pineko.theory_card.load(theory_id)
# In order to check if the operator card is enough for eko, let's compute the eko
pineko.evolve.write_operator_card_from_file(
pine_path, default_path, target_path, tcard
)
# Load the opcard
myopcard = yaml.safe_load(target_path.read_text(encoding="utf-8"))
# I need smaller x and q grids in order to compute a small eko
small_x_grid = np.geomspace(1e-3, 1.0, 5)
target = (10.0, 5)
myopcard["xgrid"] = small_x_grid
myopcard["mugrid"] = [target]
legacy_class = eko.io.runcards.Legacy(tcard, myopcard)
new_theory = legacy_class.new_theory
new_op = eko.io.runcards.OperatorCard.from_dict(myopcard)
eko_path = pathlib.Path(tmp_path / "test_eko.tar")
_ = eko.runner.solve(new_theory, new_op, eko_path)
def benchmark_evolve_grid(tmp_path, lhapdf_path, test_files, test_pdf):
pine_path = test_files / "data/grids/400/HERA_NC_225GEV_EP_SIGMARED.pineappl.lz4"
pinegrid = pineappl.grid.Grid.read(pine_path)
eko_path = test_files / "data/ekos/400/HERA_NC_225GEV_EP_SIGMARED.tar"
target_path = pathlib.Path(tmp_path / "test_fktable.pineappl.lz4")
max_as = 3
max_al = 0
base_configs = pineko.configs.load(test_files)
pineko.configs.configs = pineko.configs.defaults(base_configs)
tcard = pineko.theory_card.load(400)
assumptions = pineko.theory_card.construct_assumptions(tcard)
with eko.EKO.edit(eko_path) as eko_op:
with lhapdf_path(test_pdf):
pineko.evolve.evolve_grid(
pinegrid,
eko_op,
target_path,
max_as,
max_al,
1.0,
1.0,
assumptions=assumptions,
comparison_pdf="NNPDF40_nnlo_as_01180",
)
# check metadata is there - fixes https://github.com/NNPDF/pineko/issues/70
fk = pineappl.fk_table.FkTable.read(target_path)
assert "results_fk" in fk.key_values()
| 3,470 | 36.322581 | 88 | py |
pineko | pineko-main/benchmarks/bench_theory_card.py | import pineko
def benchmark_load(test_files):
base_configs = pineko.configs.load(test_files)
pineko.configs.configs = pineko.configs.defaults(base_configs)
tcard = pineko.theory_card.load(208)
assert tcard["MP"] == 0.938
assert tcard["PTO"] == 1
def benchmark_construct_assumption(test_files):
base_configs = pineko.configs.load(test_files)
pineko.configs.configs = pineko.configs.defaults(base_configs)
tcard = pineko.theory_card.load(208)
ass_ash = pineko.theory_card.construct_assumptions(tcard)
assert ass_ash == "Nf4Ind"
| 570 | 30.722222 | 66 | py |
pineko | pineko-main/benchmarks/bench_theory.py | import os
import pathlib
import pineko
import pineko.configs
import pineko.theory
import pineko.theory_card
theory_obj = pineko.theory.TheoryBuilder(208, ["LHCB_Z_13TEV_DIMUON"])
theory_obj_hera = pineko.theory.TheoryBuilder(400, ["HERACOMBNCEP460"])
theory_obj_test = pineko.theory.TheoryBuilder(208, ["HERACOMBCCEM"], silent=True)
def benchmark_operators_cards_path(test_files, test_configs):
pineko.configs.configs = pineko.configs.defaults(test_configs)
path = theory_obj.operator_cards_path
assert path == pathlib.Path(test_files / "data/operator_cards/208/")
def benchmark_ekos_path(test_files):
path = theory_obj.ekos_path()
assert path == pathlib.Path(test_files / "data/ekos/208/")
def benchmark_fks_path(test_files):
path = theory_obj.fks_path
assert path == pathlib.Path(test_files / "data/fktables/208/")
def benchmark_grids_path(test_files):
path = theory_obj.grids_path()
assert path == pathlib.Path(test_files / "data/grids/208/")
def benchmark_load_grids(test_files):
dataset_name = "LHCB_Z_13TEV_DIMUON"
grids = theory_obj.load_grids(dataset_name)
assert grids["LHCB_DY_13TEV_DIMUON"] == pathlib.Path(
test_files / "data/grids/208/LHCB_DY_13TEV_DIMUON.pineappl.lz4"
)
def benchmark_inherit_grid(tmp_path):
from_grid = theory_obj.grids_path()
theory_obj.inherit_grid("TestGrid", from_grid, tmp_path)
def benchmark_inherit_grids(test_files):
new_theory_ID = 2081
theory_obj.inherit_grids(new_theory_ID)
folder_path = pathlib.Path(test_files / "data" / "grids" / str(new_theory_ID))
assert folder_path.is_dir()
assert (folder_path / "LHCB_DY_13TEV_DIMUON.pineappl.lz4").is_file()
theory_obj.inherit_grids(new_theory_ID)
for item in folder_path.iterdir():
item.unlink()
folder_path.rmdir()
def benchmark_inherit_eko(tmp_path):
from_eko = theory_obj.ekos_path()
theory_obj.inherit_eko("TestEko", from_eko, tmp_path)
def benchmark_inherit_ekos(test_files):
new_theory_ID = 2081
theory_obj.inherit_ekos(new_theory_ID)
folder_path = pathlib.Path(test_files / "data" / "ekos" / str(new_theory_ID))
assert folder_path.is_dir()
assert (folder_path / "LHCB_DY_13TEV_DIMUON.tar").is_file()
theory_obj.inherit_ekos(new_theory_ID)
for item in folder_path.iterdir():
item.unlink()
folder_path.rmdir()
def benchmark_opcard(test_files, test_configs):
tcard = pineko.theory_card.load(400)
grid_name = "HERA_NC_225GEV_EP_SIGMARED"
theory_obj_hera.opcard(
grid_name,
pathlib.Path(
test_files / "data/grids/400/HERA_NC_225GEV_EP_SIGMARED.pineappl.lz4"
),
tcard,
)
op_path = pathlib.Path(
test_files
/ theory_obj_hera.operator_cards_path
/ "HERA_NC_225GEV_EP_SIGMARED.yaml"
)
theory_obj_hera.opcard(
grid_name,
pathlib.Path(
test_files / "data/grids/400/HERA_NC_225GEV_EP_SIGMARED.pineappl.lz4"
),
tcard,
)
if os.path.exists(op_path):
os.remove(op_path)
else:
raise ValueError("operator card not found")
def benchmark_eko(test_files, test_configs):
tcard = pineko.theory_card.load(400)
grid_name = "HERA_NC_225GEV_EP_SIGMARED"
grid_path = pathlib.Path(
theory_obj_hera.grids_path() / (grid_name + ".pineappl.lz4")
)
base_configs = pineko.configs.load(test_files)
pineko.configs.configs = pineko.configs.defaults(base_configs)
theory_obj_hera.activate_logging(
pathlib.Path(test_files / "logs/eko/"),
"400-HERA_NC_225GEV_EP_SIGMARED.log",
["400-HERA_NC_225GEV_EP_SIGMARED.log"],
)
theory_obj_hera.opcard(grid_name, pathlib.Path(test_files / grid_path), tcard)
theory_obj_hera.eko(grid_name, grid_path, tcard)
log_path = pathlib.Path(test_files / "logs/eko/400-HERA_NC_225GEV_EP_SIGMARED.log")
if os.path.exists(log_path):
os.remove(log_path)
else:
raise ValueError("log file not found")
op_path = pathlib.Path(
test_files
/ theory_obj_hera.operator_cards_path
/ "HERA_NC_225GEV_EP_SIGMARED.yaml"
)
if os.path.exists(op_path):
os.remove(op_path)
else:
raise ValueError("operator card not found")
def benchmark_activate_logging(test_files):
assert not theory_obj_test.activate_logging(
pathlib.Path(test_files / "logs/fk/"), "test_log.log", ["test_log.log"]
)
theory_obj.activate_logging(
pathlib.Path(test_files / "logs/fk/"), "test_log.log", ["test_log.log"]
)
log_path = pathlib.Path(test_files / "logs/fk/test_log.log")
if os.path.exists(log_path):
os.remove(log_path)
else:
raise ValueError("log file not found")
def benchmark_fk(test_files, test_configs):
tcard = pineko.theory_card.load(400)
grid_name = "HERA_NC_225GEV_EP_SIGMARED"
grid_path = pathlib.Path(
theory_obj_hera.grids_path() / (grid_name + ".pineappl.lz4")
)
base_configs = pineko.configs.load(test_files)
pineko.configs.configs = pineko.configs.defaults(base_configs)
theory_obj_hera.activate_logging(
pathlib.Path(test_files / "logs/fk/"),
"400-HERA_NC_225GEV_EP_SIGMARED.log",
["400-HERA_NC_225GEV_EP_SIGMARED.log"],
)
theory_obj_hera.opcard(grid_name, pathlib.Path(test_files / grid_path), tcard)
theory_obj_hera.fk(grid_name, grid_path, tcard, pdf=None)
# test overwrite function
theory_obj_hera.fk(grid_name, grid_path, tcard, pdf=None)
log_path = pathlib.Path(test_files / "logs/fk/400-HERA_NC_225GEV_EP_SIGMARED.log")
if os.path.exists(log_path):
os.remove(log_path)
else:
raise ValueError("log file not found")
op_path = pathlib.Path(
test_files
/ theory_obj_hera.operator_cards_path
/ "HERA_NC_225GEV_EP_SIGMARED.yaml"
)
if os.path.exists(op_path):
os.remove(op_path)
else:
raise ValueError("operator card not found")
fk_path = pathlib.Path(
test_files
/ theory_obj_hera.fks_path
/ "HERA_NC_225GEV_EP_SIGMARED.pineappl.lz4"
)
if os.path.exists(fk_path):
os.remove(fk_path)
else:
raise ValueError("fktable not found")
| 6,282 | 31.056122 | 87 | py |
pineko | pineko-main/benchmarks/bench_comparator.py | import pineappl
import pineko
def benchmark_compare(lhapdf_path, test_files, test_pdf):
pine_path = test_files / "data/grids/208/LHCB_DY_13TEV_DIMUON.pineappl.lz4"
grid = pineappl.grid.Grid.read(pine_path)
fk_path = test_files / "data/fktables/208/LHCB_DY_13TEV_DIMUON.pineappl.lz4"
fk = pineappl.fk_table.FkTable.read(fk_path)
pdf = "NNPDF40_nlo_as_01180"
with lhapdf_path(test_pdf):
comp_table = pineko.comparator.compare(grid, fk, 2, 0, pdf, 1.0, 1.0)
errors = comp_table["permille_error"].values
assertions = [er < 5.0 for er in errors]
assert False not in assertions
| 619 | 35.470588 | 80 | py |
pineko | pineko-main/benchmarks/bench_autosv.py | import shutil
import lhapdf
import numpy as np
import pineappl
from pineko import scale_variations
def benchmark_compute_ren_sv_grid(test_files, tmp_path, test_pdf, lhapdf_path):
to_test_grid_path = (
test_files
/ "data"
/ "grids"
/ "400"
/ "ATLAS_TTB_8TEV_LJ_TRAP_totest.pineappl.lz4"
)
name_grid = "ATLAS_TTB_8TEV_LJ_TRAP_norensv_fixed.pineappl.lz4"
grid_path = test_files / "data" / "grids" / "400" / name_grid
new_grid_path = tmp_path / name_grid
shutil.copy(grid_path, new_grid_path)
max_as = 2
nf = 5
pdf_name = "NNPDF40_nlo_as_01180"
already_there_res = scale_variations.compute_ren_sv_grid(
new_grid_path, max_as - 1, nf
)
assert already_there_res == scale_variations.ReturnState.ALREADY_THERE
order_exist_res = scale_variations.compute_ren_sv_grid(
new_grid_path, max_as, nf, order_exists=True
)
assert order_exist_res == scale_variations.ReturnState.ORDER_EXISTS_FAILURE
result_state = scale_variations.compute_ren_sv_grid(new_grid_path, max_as, nf)
assert result_state == scale_variations.ReturnState.SUCCESS
# We are saving the new grid with the same name of the original
plusrensv_grid_path = tmp_path / name_grid
with lhapdf_path(test_pdf):
pdf = lhapdf.mkPDF(pdf_name)
to_test_grid = pineappl.grid.Grid.read(to_test_grid_path)
plusrensv_grid = pineappl.grid.Grid.read(plusrensv_grid_path)
sv_list = [(0.5, 1.0), (2.0, 1.0)] # Only ren sv have to be tested
bin_number = to_test_grid.bins()
to_test_res = to_test_grid.convolute_with_one(
2212,
pdf.xfxQ2,
pdf.alphasQ2,
np.array([], dtype=bool),
np.array([], dtype=np.uint64),
np.array([], dtype=bool),
sv_list,
).reshape(bin_number, len(sv_list))
plusrensv_res = plusrensv_grid.convolute_with_one(
2212,
pdf.xfxQ2,
pdf.alphasQ2,
np.array([], dtype=bool),
np.array([], dtype=np.uint64),
np.array([], dtype=bool),
sv_list,
).reshape(bin_number, len(sv_list))
rtol = 1.0e-14
for sv in sv_list:
for n_res, old_res in zip(
to_test_res.transpose()[sv_list.index(sv)],
plusrensv_res.transpose()[sv_list.index(sv)],
):
np.testing.assert_allclose(n_res, old_res, rtol=rtol)
| 2,381 | 34.029412 | 82 | py |
pineko | pineko-main/benchmarks/bench_configs.py | import pytest
import pineko
def benchmark_detect(test_files):
with pytest.raises(FileNotFoundError):
pineko.configs.detect()
conf_file = pineko.configs.detect(test_files)
def benchmark_load(test_files):
conf_file = pineko.configs.load(test_files)
assert conf_file["paths"]["root"] == test_files
assert conf_file["paths"]["grids"] == "data/grids/"
| 380 | 22.8125 | 55 | py |
pineko | pineko-main/src/pineko/parser.py | """Interface to ymldb."""
# ATTENTION: this is a partial copy from
# https://github.com/NNPDF/nnpdf/blob/7cb96fc05ca2a2914bc1ccc864865e0ca4e66983/validphys2/src/validphys/pineparser.py
import yaml
EXT = "pineappl.lz4"
class YamlFileNotFound(FileNotFoundError):
"""ymldb file for dataset not found."""
class GridFileNotFound(FileNotFoundError):
"""PineAPPL file for FK table not found."""
def _load_yaml(yaml_file):
"""Load a dataset.yaml file.
Parameters
----------
yaml_file : Path
path of the yaml file for the given dataset
Returns
-------
dict :
noramlized parsed file content
"""
if not yaml_file.exists():
raise YamlFileNotFound(yaml_file)
ret = yaml.safe_load(yaml_file.read_text())
# Make sure the operations are upper-cased for compound-compatibility
ret["operation"] = "NULL" if ret["operation"] is None else ret["operation"].upper()
return ret
def get_yaml_information(yaml_file, grids_folder):
"""Given a yaml_file, returns the corresponding dictionary and grids.
The dictionary contains all information and we return an extra field
with all the grids to be loaded for the given dataset.
Parameters
----------
yaml_file : pathlib.Path
path of the yaml file for the given dataset
grids_folder : pathlib.Path
path of the grids folder
Returns
-------
yaml_content: dict
Metadata prepared for the FKTables
paths: list(list(path))
List (of lists) with all the grids that will need to be loaded
"""
yaml_content = _load_yaml(yaml_file)
# Turn the operands and the members into paths (and check all of them exist)
ret = []
for operand in yaml_content["operands"]:
tmp = []
for member in operand:
p = grids_folder / f"{member}.{EXT}"
if not p.exists():
raise GridFileNotFound(f"Failed to find {p}")
tmp.append(p)
ret.append(tmp)
return yaml_content, ret
| 2,035 | 26.890411 | 117 | py |
pineko | pineko-main/src/pineko/theory.py | """Tools related to generation of a list of FK tables.
The typical use case of pineko is the generation of a list of FK tables,
all with common theory parameters. The collective list of this FK tables
together with other theory ingredients (such as C-factors) are often
commonly referred to as 'theory'.
"""
import logging
import time
import eko
import eko.io.legacy
import numpy as np
import pineappl
import rich
import yaml
from . import check, configs, evolve, parser, scale_variations, theory_card
logger = logging.getLogger(__name__)
def check_scvar_evolve(grid, max_as, max_al, kind: check.Scale):
"""Check scale variations and central orders consistency."""
available, max_as_effective = check.contains_sv(grid, max_as, max_al, kind)
if max_as == max_as_effective:
if available is check.AvailableAtMax.SCVAR:
raise ValueError("Central order is not available but sv order is.")
if max_as < max_as_effective and available is not check.AvailableAtMax.BOTH:
raise ValueError("No available central order or sv order.")
class TheoryBuilder:
"""Common builder application to create the ingredients for a theory.
Parameters
----------
theory_id : int
theory identifier
datsets : list(str)
list of datasets
silent : bool
suppress logs
clear_logs : bool
erease previos logs (instead of appending)
overwrite : bool
allow files to be overwritten instead of skipping
"""
def __init__(
self, theory_id, datasets, silent=False, clear_logs=False, overwrite=False
):
"""Initialize theory object."""
self.theory_id = theory_id
self.datasets = datasets
self.silent = silent
self.clear_logs = clear_logs
self.overwrite = overwrite
@property
def operator_cards_path(self):
"""Suffix paths.operator_cards with theory id."""
return configs.configs["paths"]["operator_cards"] / str(self.theory_id)
def ekos_path(self, tid=None):
"""Suffix paths.ekos with theory id.
Parameters
----------
tid : int
theory id, defaults to my theory id
Returns
-------
pathlib.Path :
true path
"""
if tid is None:
tid = self.theory_id
return configs.configs["paths"]["ekos"] / str(tid)
@property
def fks_path(self):
"""Suffix paths.fktables with theory id."""
return configs.configs["paths"]["fktables"] / str(self.theory_id)
def grids_path(self, tid=None):
"""Suffix paths.grids with theory id.
Parameters
----------
tid : int
theory id, defaults to my theory id
Returns
-------
pathlib.Path :
true path
"""
if tid is None:
tid = self.theory_id
return configs.configs["paths"]["grids"] / str(tid)
def load_grids(self, ds):
"""Load all grids (i.e. process scale) of a dataset.
Parameters
----------
ds : str
dataset name
Returns
-------
grids : dict
mapping basename to path
"""
paths = configs.configs["paths"]
_info, grids = parser.get_yaml_information(
paths["ymldb"] / f"{ds}.yaml", self.grids_path()
)
# the list is still nested, so flatten
grids = [grid for opgrids in grids for grid in opgrids]
# then turn into a map name -> path
grids = {grid.stem.rsplit(".", 1)[0]: grid for grid in grids}
return grids
def inherit_grid(self, name, grid, other):
"""Inherit a grid to a new theory.
Parameters
----------
name : str
grid name, i.e. it's true stem
grid : pathlib.Path
path to grid
other : pathlib.Path
new folder
"""
new = other / f"{name}.{parser.EXT}"
if new.exists():
if not self.overwrite:
rich.print(f"Skipping existing grid {new}")
return
new.unlink()
# link
new.symlink_to(grid)
if new.exists():
rich.print(f"[green]Success:[/] Created link at {new}")
def inherit_grids(self, target_theory_id):
"""Inherit grids to a new theory.
Parameters
----------
target_theory_id : int
target theory id
"""
other = self.grids_path(target_theory_id)
other.mkdir(exist_ok=True)
self.iterate(self.inherit_grid, other=other)
def inherit_eko(self, name, _grid, other):
"""Inherit a EKO to a new theory.
Parameters
----------
name : str
grid name, i.e. it's true stem
grid : pathlib.Path
path to grid
other : pathlib.Path
new folder
"""
eko_path = self.ekos_path() / f"{name}.tar"
new = other / f"{name}.tar"
if new.exists():
if not self.overwrite:
rich.print(f"Skipping existing eko {new}")
return
new.unlink()
# link
new.symlink_to(eko_path)
if new.exists():
rich.print(f"[green]Success:[/] Created link at {new}")
def inherit_ekos(self, target_theory_id):
"""Inherit ekos to a new theory.
Parameters
----------
target_theory_id : int
target theory id
"""
other = self.ekos_path(target_theory_id)
other.mkdir(exist_ok=True)
self.iterate(self.inherit_eko, other=other)
def iterate(self, f, **kwargs):
"""Iterate grids in datasets.
Additional keyword arguments are simply passed down.
Parameters
----------
f : callable
iterated callable recieving name and grid as argument
"""
for ds in self.datasets:
rich.print(f"Analyze {ds}")
grids = self.load_grids(ds)
for name, grid in grids.items():
f(name, grid, **kwargs)
rich.print()
def opcard(self, name, grid, tcard):
"""Write a single operator card.
Parameters
----------
name : str
grid name, i.e. it's true stem
grid : pathlib.Path
path to grid
tcard : dict
theory card
"""
opcard_path = self.operator_cards_path / f"{name}.yaml"
if opcard_path.exists():
if not self.overwrite:
rich.print(f"Skipping existing operator card {opcard_path}")
return
_x_grid, q2_grid = evolve.write_operator_card_from_file(
grid,
self.operator_cards_path
/ configs.configs["paths"]["operator_card_template_name"],
opcard_path,
tcard,
)
if opcard_path.exists():
rich.print(
f"[green]Success:[/] Wrote card with {len(q2_grid)} Q2 points to {opcard_path}"
)
def opcards(self):
"""Write operator cards."""
tcard = theory_card.load(self.theory_id)
self.operator_cards_path.mkdir(exist_ok=True)
self.iterate(self.opcard, tcard=tcard)
def load_operator_card(self, name):
"""Read current operator card.
Parameters
----------
name : str
grid name, i.e. it's true stem
Returns
-------
ocard : dict
operator card
"""
opcard_path = self.operator_cards_path / f"{name}.yaml"
with open(opcard_path, encoding="utf-8") as f:
ocard = yaml.safe_load(f)
return ocard
def activate_logging(self, path, filename, activated_loggers=()):
"""Activate the logging facilities.
Parameters
----------
path : pathlib.Path
source directory
filename : str
log file name
activated_loggers : list(str)
list of loggers that get registered
"""
# nothing to do?
if self.silent or not path:
return False
# evtually remove old stuff?
log_path = path / filename
if self.clear_logs:
log_path.write_text("")
# register everything
log_file = logging.FileHandler(log_path)
log_file.setLevel(logging.INFO)
log_file.setFormatter(
logging.Formatter("%(asctime)s %(name)s/%(levelname)s: %(message)s")
)
for logger_ in (logger, *[logging.getLogger(n) for n in activated_loggers]):
logger_.handlers = []
logger_.addHandler(log_file)
logger_.setLevel(logging.INFO)
return True
def eko(self, name, _grid, tcard):
"""Compute a single eko.
Parameters
----------
name : str
grid name, i.e. it's true stem
grid : pathlib.Path
path to grid
tcard : dict
theory card
"""
paths = configs.configs["paths"]
# activate logging
self.activate_logging(
paths["logs"]["eko"], f"{self.theory_id}-{name}.log", ("eko",)
)
# setup data
ocard = self.load_operator_card(name)
# The operator card has been already generated in the correct format
# The theory card needs to be converted to a format that eko can use
legacy_class = eko.io.runcards.Legacy(tcard, ocard)
new_theory = legacy_class.new_theory
new_op = eko.io.runcards.OperatorCard.from_dict(ocard)
eko_filename = self.ekos_path() / f"{name}.tar"
if eko_filename.exists():
if not self.overwrite:
rich.print(f"Skipping existing operator {eko_filename}")
return
else:
eko_filename.unlink()
# do it!
logger.info("Start computation of %s", name)
start_time = time.perf_counter()
# Actual computation of the EKO
eko.runner.solve(new_theory, new_op, eko_filename)
logger.info(
"Finished computation of %s - took %f s",
name,
time.perf_counter() - start_time,
)
if eko_filename.exists():
rich.print(f"[green]Success:[/] Wrote EKO to {eko_filename}")
def ekos(self):
"""Compute all ekos."""
tcard = theory_card.load(self.theory_id)
self.ekos_path().mkdir(exist_ok=True)
self.iterate(self.eko, tcard=tcard)
def fk(self, name, grid_path, tcard, pdf):
"""Compute a single FK table.
Parameters
----------
name : str
grid name, i.e. it's true stem
grid_path : pathlib.Path
path to grid
tcard : dict
theory card
pdf : str
comparison PDF
"""
# activate logging
paths = configs.configs["paths"]
do_log = self.activate_logging(
paths["logs"]["fk"], f"{self.theory_id}-{name}-{pdf}.log"
)
# check if grid contains SV if theory is requesting them (in particular
# if theory is requesting scheme A or C)
sv_method = evolve.sv_scheme(tcard)
xir = tcard["XIR"]
xif = tcard["XIF"]
# loading grid
grid = pineappl.grid.Grid.read(grid_path)
# remove zero subgrid
grid.optimize()
# setup data
eko_filename = self.ekos_path() / f"{name}.tar"
fk_filename = self.fks_path / f"{name}.{parser.EXT}"
if fk_filename.exists():
if not self.overwrite:
rich.print(f"Skipping existing FK Table {fk_filename}")
return
max_as = 1 + int(tcard["PTO"])
# Check if we are computing FONLL-B fktable and eventually change max_as
if check.is_fonll_b(
tcard["FNS"],
grid.lumi(),
):
max_as += 1
max_al = 0
# check for sv
if not np.isclose(xir, 1.0):
check_scvar_evolve(grid, max_as, max_al, check.Scale.REN)
if sv_method is None:
if not np.isclose(xif, 1.0):
check_scvar_evolve(grid, max_as, max_al, check.Scale.FACT)
# loading ekos
with eko.EKO.edit(eko_filename) as operators:
# Obtain the assumptions hash
assumptions = theory_card.construct_assumptions(tcard)
# do it!
logger.info("Start computation of %s", name)
logger.info(
"max_as=%d, max_al=%d, xir=%f, xif=%f",
max_as,
max_al,
xir,
xif,
)
start_time = time.perf_counter()
rich.print(
rich.panel.Panel.fit(
"Computing ...", style="magenta", box=rich.box.SQUARE
),
f" {grid_path}\n",
f"+ {eko_filename}\n",
f"= {fk_filename}\n",
f"with max_as={max_as}, max_al={max_al}, xir={xir}, xif={xif}",
)
_grid, _fk, comparison = evolve.evolve_grid(
grid,
operators,
fk_filename,
max_as,
max_al,
xir=xir,
xif=xif,
assumptions=assumptions,
comparison_pdf=pdf,
)
logger.info(
"Finished computation of %s - took %f s",
name,
time.perf_counter() - start_time,
)
if do_log and comparison is not None:
logger.info("Comparison with %s:\n %s", pdf, comparison.to_string())
if fk_filename.exists():
rich.print(f"[green]Success:[/] Wrote FK table to {fk_filename}")
def fks(self, pdf):
"""Compute all FK tables.
Parameters
----------
pdf : str
comparison PDF
"""
tcard = theory_card.load(self.theory_id)
self.fks_path.mkdir(exist_ok=True)
self.iterate(self.fk, tcard=tcard, pdf=pdf)
def construct_ren_sv_grids(self, flavors):
"""Construct renormalization scale variations terms for all the grids in a dataset."""
tcard = theory_card.load(self.theory_id)
self.iterate(self.construct_ren_sv_grid, tcard=tcard, flavors=flavors)
def construct_ren_sv_grid(self, name, grid_path, tcard, flavors):
"""Construct renormalization scale variations terms for a grid."""
max_as = int(tcard["PTO"])
rich.print(f"Computing renormalization scale variations for {name}")
scale_variations.compute_ren_sv_grid(grid_path, max_as, flavors)
| 14,799 | 30.827957 | 95 | py |
pineko | pineko-main/src/pineko/check.py | """Tools to check compatibility of EKO and grid."""
from dataclasses import dataclass
from enum import Enum, auto
from typing import Tuple
import numpy as np
import pineappl
@dataclass
class ScaleValue:
"""Contain the information of a kind of scale variations and its index in the orders of a pineappl grid."""
description: str
index: int
class Scale(Enum):
"""Auxiliary class to list the possible scale variations."""
REN = ScaleValue("renormalization scale variations", -2)
FACT = ScaleValue("factorization scale variations", -1)
class AvailableAtMax(Enum):
"""Hold the information of a scale variation check.
BOTH means that both the central order and the scale variation order are contained in the grid.
CENTRAL means that only the central order is present.
SCVAR means that only the scale variation order is present.
"""
BOTH = auto()
CENTRAL = auto()
SCVAR = auto()
def islepton(el):
"""Return True if el is a lepton PID, otherwise return False."""
if 10 < abs(el) < 17:
return True
return False
def in1d(a, b, rtol=1e-05, atol=1e-08):
"""Improved version of np.in1d.
Thanks: https://github.com/numpy/numpy/issues/7784#issuecomment-848036186
Parameters
----------
a : list
needle
b : list
haystack
rtol : float
allowed relative error
atol : float
allowed absolute error
Returns
-------
list
mask of found elements
"""
if len(a) == 1:
for be in b:
if np.isclose(be, a[0], rtol=rtol, atol=atol):
return [True]
return [False]
ss = np.searchsorted(a[1:-1], b, side="left")
return np.isclose(a[ss], b, rtol=rtol, atol=atol) | np.isclose(
a[ss + 1], b, rtol=rtol, atol=atol
)
def check_grid_and_eko_compatible(pineappl_grid, operators, xif, max_as, max_al):
"""Check whether the EKO operators and the PineAPPL grid are compatible.
Parameters
----------
pineappl_grid : pineappl.grid.Grid
grid
operators : eko.EKO
operators
xif : float
factorization scale variation
max_as: int
max order of alpa_s
max_al: int
max order of alpha
Raises
------
ValueError
If the operators and the grid are not compatible.
"""
order_mask = pineappl.grid.Order.create_mask(
pineappl_grid.orders(), max_as, max_al, True
)
evol_info = pineappl_grid.evolve_info(order_mask)
x_grid = evol_info.x1
muf2_grid = evol_info.fac1
# Q2 grid
if not np.all(
in1d(np.unique(list(operators.mu2grid)), xif * xif * np.array(muf2_grid))
):
raise ValueError(
"Q2 grid in pineappl grid and eko operator are NOT compatible!"
)
# x-grid
if not np.all(
in1d(np.unique(operators.bases.targetgrid.tolist()), np.array(x_grid))
):
raise ValueError("x grid in pineappl grid and eko operator are NOT compatible!")
def is_fonll_b(fns, lumi):
"""Check if the fktable we are computing is a DIS FONLL-B fktable.
Parameters
----------
fns : str
flavor number scheme (from the theory card)
lumi : list(list(tuple))
luminosity info
Returns
-------
bool
true if the fktable is a FONLL-B DIS fktable
"""
for lists in lumi:
for el in lists:
if (not islepton(el[0])) and (not islepton(el[1])):
# in this case we are sure it is not DIS so for sure it is not FONLL-B
return False
if fns == "FONLL-B":
return True
return False
def orders(grid, max_as, max_al) -> list:
"""Select the relevant orders.
The orders in the grid are filtered according to `max_as` and `max_al`.
"""
order_array = np.array([order.as_tuple() for order in grid.orders()])
order_mask = pineappl.grid.Order.create_mask(grid.orders(), max_as, max_al, True)
order_list = order_array[order_mask]
return order_list
def pure_qcd(orders):
"""Select the QCD LO and pure QCD corrections to it."""
min_al = min(ord[1] for ord in orders)
return [ord for ord in orders if ord[1] == min_al]
def contains_sv(
grid: pineappl.grid.Grid, max_as: int, max_al: int, sv_type: Scale
) -> Tuple[AvailableAtMax, int]:
"""Check whether renormalization scale-variations are available in the pineappl grid."""
svindex = sv_type.value.index
ords = pure_qcd(orders(grid, max_as, max_al))
max_as = max(ord[0] for ord in ords)
min_as = min(ord[0] for ord in ords)
max_as_cen = max(ord[0] for ord in ords if ord[svindex] == 0)
max_as_sv = max((ord[0] for ord in ords if ord[svindex] != 0), default=0)
if max_as_cen == max_as:
if max_as_sv == max_as:
checkres = AvailableAtMax.BOTH
# This is the LO case so for both FACT and REN we do not expect sv orders at all
elif max_as == min_as:
checkres = AvailableAtMax.BOTH
# For renormalization scale variations, the NLO sv order is not present if the first non zero order is at alpha^0
elif max_as == 1 and sv_type is Scale.REN and min_as == 0:
checkres = AvailableAtMax.BOTH
else:
checkres = AvailableAtMax.CENTRAL
else:
checkres = AvailableAtMax.SCVAR
# Since max_as_effective will be compared to max_as and we are using different conventions for the two, here we sum 1 to max_as_effective and make it relative to the first non zero order
return checkres, max_as - min_as + 1
| 5,596 | 28.930481 | 190 | py |
pineko | pineko-main/src/pineko/scale_variations.py | """Module to generate scale variations."""
import pathlib
from enum import Enum
from typing import Dict, List, Optional, Tuple
import numpy as np
import pineappl
import rich
from eko import beta
from . import check
AS_NORM = 1.0 / (4.0 * np.pi)
OrderTuple = Tuple[int, int, int, int]
"""Tuple representing a PineAPPL order."""
class ReturnState(Enum):
"""Auxiliary class to list the possible return states."""
ALREADY_THERE = f"[green]Renormalization scale variations are already in the grid"
ORDER_EXISTS_FAILURE = (
"Order_exists is True but the order does not appear to be in the grid"
)
MISSING_CENTRAL = "Central order is not high enough to compute requested sv orders"
SUCCESS = f"[green]Success: scale variation orders included!"
def qcd(order: OrderTuple) -> int:
"""Extract the QCD order from an OrderTuple."""
return order[0]
def ren_sv_coeffs(m, max_as, logpart, which_part, nf):
"""Ren_sv coefficient for the requested part.
Parameters
----------
m : int
first non zero perturbative order
max_as : int
max order of alpha_s
logpart : int
power of the renormalization scale log asked
which_part : int
asked perturbative order contribution to be rescaled
nf : int
number of active flavors
Returns
-------
float
renormalization scale variation contribution
"""
bcoeff = beta.beta_qcd((max_as - logpart - which_part + 2, 0), nf)
as_normalization = AS_NORM ** (max_as - which_part)
if max_as == 0:
return 0.0
if max_as == 2:
if which_part > 0:
m += 1
elif logpart > 1:
m = 0.5 * m * (m + 1)
return m * as_normalization * bcoeff
def requirements(m: int, max_as: int, al: int) -> Dict[OrderTuple, List[OrderTuple]]:
"""Compute a dictionary with all the necessary orders to compute to have the full renormalization scale variation.
`m` is the first non-zero perturbative order of the grid, and `al` is the QED order of the "QCD" leading order.
"""
return {
(m + max_as, al, delt + 1, 0): [
(m + de, al, 0, 0) for de in range(max_as - delt)
]
for delt in range(max_as)
}
def initialize_new_grid(grid, new_order):
"""Initialize a new grid only containing one order and with the same setting of an original grid."""
# Retrieve parameters to create new grid
bin_limits = [
float(bin) for bin in range(grid.bins() + 1)
] # The +1 explanation is that n bins have n+1 bin limits, and range generates numbers from a half-open interval (range(n) generates n numbers).
lumi_grid = [pineappl.lumi.LumiEntry(mylum) for mylum in grid.lumi()]
subgrid_params = pineappl.subgrid.SubgridParams()
new_order = [pineappl.grid.Order(*new_order)]
# create new_grid with same lumi and bin_limits of the original grid but with new_order
new_grid = pineappl.grid.Grid.create(
lumi_grid, new_order, bin_limits, subgrid_params
)
return new_grid
def create_svonly(grid, order, new_order, scalefactor):
"""Create a grid containing only the renormalization scale variations at a given order for a grid."""
new_grid = initialize_new_grid(grid, new_order)
# extract the relevant order to rescale from the grid for each lumi and bin
grid_orders = [order.as_tuple() for order in grid.orders()]
order_index = grid_orders.index(order)
for lumi_index in range(len(new_grid.lumi())):
for bin_index in range(grid.bins()):
extracted_subgrid = grid.subgrid(order_index, bin_index, lumi_index)
extracted_subgrid.scale(scalefactor)
# Set this subgrid inside the new grid
new_grid.set_subgrid(0, bin_index, lumi_index, extracted_subgrid)
# Fixing bin_limits and normalizations
bin_dimension = grid.bin_dimensions()
limits = []
for num_bin in range(grid.bins()):
for dim in range(bin_dimension):
limits.append((grid.bin_left(dim)[num_bin], grid.bin_right(dim)[num_bin]))
norma = grid.bin_normalizations()
remap_obj = pineappl.bin.BinRemapper(norma, limits)
new_grid.set_remapper(remap_obj)
return new_grid
def create_grids(gridpath, max_as, nf):
"""Create all the necessary scale variations grids for a certain starting grid."""
grid = pineappl.grid.Grid.read(gridpath)
grid_orders = [orde.as_tuple() for orde in grid.orders()]
order_mask = pineappl.grid.Order.create_mask(grid.orders(), max_as, 0, True)
grid_orders_filtered = list(np.array(grid_orders)[order_mask])
grid_orders_filtered.sort(key=qcd)
first_nonzero_order = grid_orders_filtered[0]
min_al = first_nonzero_order[1]
m_value = first_nonzero_order[0]
nec_orders = requirements(m_value, max_as, min_al)
grid_list = {}
for to_construct_order in nec_orders:
list_grid_order = []
for nec_order in nec_orders[to_construct_order]:
# The logpart of the coefficient I am asking is just the [2] entry of to_construct_order
# The QCD order of the part I am rescaling is just nec_order[0] but I need to rescale it with respect to the first non-zero order
scalefactor = ren_sv_coeffs(
m_value, max_as, to_construct_order[2], nec_order[0] - m_value, nf
)
list_grid_order.append(
create_svonly(grid, nec_order, to_construct_order, scalefactor)
)
grid_list[to_construct_order] = list_grid_order
return grid_list, nec_orders
def write_grids(gridpath, grid_list):
"""Write the single grids."""
base_name = gridpath.stem.split(".pineappl")[0]
final_part = ".pineappl.lz4"
grid_paths = []
for order in grid_list:
# For each scale variation order, if more than one grid contributes, merge them all together in a single one
if len(grid_list[order]) > 1:
for grid in grid_list[order][1:]:
tmp_path = gridpath.parent / ("tmp" + final_part)
grid.write_lz4(tmp_path)
grid_list[order][0].merge_from_file(tmp_path)
tmp_path.unlink()
new_grid_path = gridpath.parent / (
base_name + "_" + str(order[2]) + final_part
) # order[2] is the ren_sv order
grid_paths.append(new_grid_path)
grid_list[order][0].write_lz4(new_grid_path)
return grid_paths
def merge_grids(
gridpath, grid_list_path, target_path=None, nec_orders={}, order_exists=False
):
"""Merge the single grids in the original."""
grid = pineappl.grid.Grid.read(gridpath)
if target_path is None:
target_path = gridpath.parent / gridpath.name
else:
target_path = target_path / gridpath.name
if order_exists:
grid = construct_and_dump_order_exists_grid(grid, list(nec_orders.keys())[0])
for grid_path in grid_list_path:
grid.merge_from_file(grid_path)
grid_path.unlink()
grid.write_lz4(target_path)
def construct_and_dump_order_exists_grid(ori_grid, to_construct_order):
"""Remove the order that has to be substituted from the grid."""
bin_limits = [float(bin) for bin in range(ori_grid.bins() + 1)]
lumi_grid = [pineappl.lumi.LumiEntry(mylum) for mylum in ori_grid.lumi()]
subgrid_params = pineappl.subgrid.SubgridParams()
ori_grid_orders = [order.as_tuple() for order in ori_grid.orders()]
new_orders = [
pineappl.grid.Order(*ord)
for ord in ori_grid_orders
if ord != to_construct_order
]
new_grid = pineappl.grid.Grid.create(
lumi_grid, new_orders, bin_limits, subgrid_params
)
orders_indeces = [ori_grid_orders.index(order.as_tuple()) for order in new_orders]
for order_index in orders_indeces:
for lumi_index in range(len(lumi_grid)):
for bin_index in range(ori_grid.bins()):
extr_subgrid = ori_grid.subgrid(order_index, bin_index, lumi_index)
new_grid.set_subgrid(
orders_indeces.index(order_index),
bin_index,
lumi_index,
extr_subgrid,
)
bin_dimension = ori_grid.bin_dimensions()
limits = []
for num_bin in range(ori_grid.bins()):
for dim in range(bin_dimension):
limits.append(
(
ori_grid.bin_left(dim)[num_bin],
ori_grid.bin_right(dim)[num_bin],
)
)
norma = ori_grid.bin_normalizations()
remap_obj = pineappl.bin.BinRemapper(norma, limits)
new_grid.set_remapper(remap_obj)
new_grid.set_key_value("initial_state_2", ori_grid.key_values()["initial_state_2"])
return new_grid
def compute_ren_sv_grid(
grid_path: pathlib.Path,
max_as: int,
nf: int,
target_path: Optional[pathlib.Path] = None,
order_exists: bool = False,
):
"""Generate renormalization scale variation terms for the given grid, according to the max_as."""
# First let's check if the ren_sv are already there
checkres, max_as_effective = check.contains_sv(
pineappl.grid.Grid.read(grid_path), max_as, 0, check.Scale.REN
)
# Usual different convention with max_as
if max_as_effective == max_as and (checkres is not check.AvailableAtMax.CENTRAL):
if not order_exists:
return ReturnState.ALREADY_THERE
elif order_exists:
return ReturnState.ORDER_EXISTS_FAILURE
if max_as_effective < max_as and checkres is check.AvailableAtMax.SCVAR:
return ReturnState.MISSING_CENTRAL
# With respect to the usual convention here max_as is max_as-1
max_as -= 1
# Creating all the necessary grids
grid_list, nec_orders = create_grids(grid_path, max_as, nf)
# Writing the sv grids
sv_grids_paths = write_grids(gridpath=grid_path, grid_list=grid_list)
# Merging all together
merge_grids(
gridpath=grid_path,
grid_list_path=sv_grids_paths,
target_path=target_path,
nec_orders=nec_orders,
order_exists=order_exists,
)
return ReturnState.SUCCESS
| 10,171 | 37.530303 | 149 | py |
pineko | pineko-main/src/pineko/ekompatibility.py | """Compatibility layer for EKO migration."""
from typing import Any, Dict
from eko import EKO, basis_rotation
def pineappl_layout(operator: EKO) -> Dict[str, Any]:
"""Extract information required by :func:`pineappl.grid.Grid.convolute_eko`.
Parameters
----------
operator: eko.EKO
evolution operator in the new layout
Returns
-------
dict
a minimal object, with all and only the information consumed by PineAPPL
"""
oldgrid = {}
oldgrid["Q2grid"] = {}
for q2, op in operator.items():
oldop = dict(operators=op.operator)
oldgrid["Q2grid"][q2[0]] = oldop
oldgrid["q2_ref"] = operator.mu20
oldgrid["targetpids"] = operator.bases.targetpids
oldgrid["targetgrid"] = operator.bases.targetgrid.raw
# The EKO contains the rotation matrix but we pass the list of
# evol basis pids to pineappl.
oldgrid["inputpids"] = basis_rotation.evol_basis_pids
oldgrid["inputgrid"] = operator.bases.inputgrid.raw
return oldgrid
| 1,022 | 27.416667 | 80 | py |
pineko | pineko-main/src/pineko/theory_card.py | """Tools related to theory cards."""
import pathlib
from typing import Any, Dict
import yaml
from . import configs
def path(theory_id: int) -> pathlib.Path:
"""Determine path to theory card.
Parameters
----------
theory_id : int
theory id
Returns
-------
pathlib.Path
theory card path
"""
return configs.configs["paths"]["theory_cards"] / f"{theory_id}.yaml"
def load(theory_id: int) -> Dict[str, Any]:
"""Load a theory card.
Parameters
----------
theory_id : int
theory id
Returns
-------
theory_card : dict
theory card
"""
with open(path(theory_id), encoding="utf-8") as f:
theory_card = yaml.safe_load(f)
return theory_card
def construct_assumptions(tcard):
"""Compute the assumptions hash from the theory settings.
The used informations are the scale :math:`Q_0` of the FK table,
the matching scales of the heavy quarks and whether an intrinsic component of the charm is
allowed.
Parameters
----------
tcard : dict
theory card
Returns
-------
str
assumptions hash
"""
# retrieve the relevant info from theory card
Q0 = tcard["Q0"]
match_scales = {
"c": tcard["kcThr"] * tcard["mc"],
"b": tcard["kbThr"] * tcard["mb"],
"t": tcard["ktThr"] * tcard["mt"],
}
ic = tcard["IC"]
hash_ = "Nf"
act_flav = 6
mod = "Ind"
if Q0 < match_scales["t"]:
act_flav = 5
if Q0 < match_scales["b"]:
act_flav = 4
if Q0 < match_scales["c"]:
act_flav = 3
if ic:
act_flav += 1
mod = "Sym"
hash_ += str(act_flav) + mod
return hash_
| 1,734 | 19.411765 | 94 | py |
pineko | pineko-main/src/pineko/comparator.py | """Tools to compare grids and FK tables."""
import numpy as np
import pandas as pd
import pineappl
def compare(pine, fktable, max_as, max_al, pdf, xir, xif):
"""Build comparison table.
Parameters
----------
pine : pineappl.grid.Grid
uncovoluted grid
fktable : pineappl.fktable.FkTable
convoluted grid
max_as : int
maximum power of strong coupling
max_al : int
maximum power of electro-weak coupling
pdf : str
PDF set name
xir : float
renormalization scale variation
xif : float
factorization scale variation
Returns
-------
df : pd.DataFrame
comparison table
"""
import lhapdf # pylint: disable=import-error
pdfset = lhapdf.mkPDF(pdf, 0)
pdgid = int(pdfset.set().get_entry("Particle"))
order_mask = pineappl.grid.Order.create_mask(pine.orders(), max_as, max_al, True)
before = np.array(
pine.convolute_with_one(
pdgid,
pdfset.xfxQ2,
pdfset.alphasQ2,
order_mask=order_mask,
xi=((xir, xif),),
)
)
after = np.array(fktable.convolute_with_one(pdgid, pdfset.xfxQ2))
df = pd.DataFrame()
# add bin info
for d in range(pine.bin_dimensions()):
try:
label = pine.key_values()[f"x{d+1}_label"]
except KeyError:
label = f"O{d+1}"
df[f"{label} left"] = pine.bin_left(d)
df[f"{label} right"] = pine.bin_right(d)
# add data
df["PineAPPL"] = before
df["FkTable"] = after
df["permille_error"] = (after / before - 1.0) * 1000.0
return df
| 1,639 | 25.451613 | 85 | py |
pineko | pineko-main/src/pineko/version.py | """Version information."""
__version__ = "0.0.0"
| 49 | 15.666667 | 26 | py |
pineko | pineko-main/src/pineko/evolve.py | """Tools related to evolution/eko."""
import copy
import os
import pathlib
import eko
import eko.basis_rotation as br
import numpy as np
import pineappl
import rich
import rich.box
import rich.panel
import yaml
from eko.io.types import ScaleVariationsMethod
from eko.matchings import Atlas, nf_default
from eko.quantities import heavy_quarks
from . import check, comparator, ekompatibility, version
def sv_scheme(tcard):
"""Infere the factorization scale_variation scheme to be used from the theory card.
Parameters
----------
tcard : dict
theory card
"""
modsv_list = {a.value for a in ScaleVariationsMethod}
xif = tcard["XIF"]
modsv = tcard["ModSV"]
if np.isclose(xif, 1.0):
if modsv in modsv_list:
raise ValueError("ModSv is not None but xif is 1.0")
return None
# scheme C case
if modsv not in modsv_list:
return None
return modsv
def write_operator_card_from_file(
pineappl_path: os.PathLike,
default_card_path: os.PathLike,
card_path: os.PathLike,
tcard,
):
"""Generate operator card for a grid.
Parameters
----------
pineappl_path : str or os.PathLike
path to grid to evolve
default_card : str or os.PathLike
base operator card
card_path : str or os.PathLike
target path
tcard: dict
theory card for the run
Returns
-------
x_grid : np.ndarray
written x grid
q2_grid : np.ndarray
written Q2 grid
"""
# raise in python rather then rust
if not pathlib.Path(pineappl_path).exists():
raise FileNotFoundError(pineappl_path)
pineappl_grid = pineappl.grid.Grid.read(pineappl_path)
default_card = yaml.safe_load(
pathlib.Path(default_card_path).read_text(encoding="utf-8")
)
return write_operator_card(pineappl_grid, default_card, card_path, tcard)
def write_operator_card(pineappl_grid, default_card, card_path, tcard):
"""Generate operator card for this grid.
Parameters
----------
pineappl_grid : pineappl.grid.Grid
grid to evolve
default_card : dict
base operator card
card_path : str or os.PathLike
target path
tcard: dict
theory card for the run, since some information in EKO is now required
in operator card, but before was in the theory card
Returns
-------
x_grid : np.ndarray
written x grid
q2_grid : np.ndarray
written Q2 grid
"""
# Add a +1 to the orders for the difference in convention between nnpdf and pineappl
is_fns = int(check.is_fonll_b(tcard["FNS"], pineappl_grid.lumi()))
max_as = 1 + tcard["PTO"] + is_fns
max_al = 1 + tcard["QED"]
# ... in order to create a mask ...
order_mask = pineappl.grid.Order.create_mask(
pineappl_grid.orders(), max_as, max_al, True
)
# ... to get the x and muF grids for the eko
evol_info = pineappl_grid.evolve_info(order_mask)
muf2_grid = evol_info.fac1
operators_card = copy.deepcopy(default_card)
sv_method = sv_scheme(tcard)
xif = 1.0 if sv_method is not None else tcard["XIF"]
operators_card["configs"]["scvar_method"] = sv_method
q2_grid = (xif * xif * muf2_grid).tolist()
masses = np.array([tcard["mc"], tcard["mb"], tcard["mt"]]) ** 2
thresholds_ratios = np.array([tcard["kcThr"], tcard["kbThr"], tcard["ktThr"]]) ** 2
for q in range(tcard["MaxNfPdf"] + 1, 6 + 1):
thresholds_ratios[q - 4] = np.inf
atlas = Atlas(
matching_scales=heavy_quarks.MatchingScales(masses * thresholds_ratios),
origin=(tcard["Q0"] ** 2, tcard["nf0"]),
)
operators_card["mugrid"] = [
(float(np.sqrt(q2)), int(nf_default(q2, atlas))) for q2 in q2_grid
]
if "integrability_version" in pineappl_grid.key_values():
x_grid = evol_info.x1
x_grid = np.append(x_grid, 1.0)
operators_card["configs"]["interpolation_polynomial_degree"] = 1
operators_card["xgrid"] = x_grid.tolist()
with open(card_path, "w", encoding="UTF-8") as f:
yaml.safe_dump(operators_card, f)
return operators_card["xgrid"], q2_grid
def evolve_grid(
grid,
operators,
fktable_path,
max_as,
max_al,
xir,
xif,
assumptions="Nf6Ind",
comparison_pdf=None,
):
"""Convolute grid with EKO from file paths.
Parameters
----------
grid : pineappl.grid.Grid
unconvoluted grid
operators : eko.EKO
evolution operator
fktable_path : str
target path for convoluted grid
max_as : int
maximum power of strong coupling
max_al : int
maximum power of electro-weak coupling
xir : float
renormalization scale variation
xif : float
factorization scale variation
assumptions : str
assumptions on the flavor dimension
comparison_pdf : None or str
if given, a comparison table (with / without evolution) will be printed
"""
order_mask = pineappl.grid.Order.create_mask(grid.orders(), max_as, max_al, True)
evol_info = grid.evolve_info(order_mask)
x_grid = evol_info.x1
mur2_grid = evol_info.ren1
xif = 1.0 if operators.operator_card.configs.scvar_method is not None else xif
tcard = operators.theory_card
opcard = operators.operator_card
# rotate the targetgrid
if "integrability_version" in grid.key_values():
x_grid = np.append(x_grid, 1.0)
eko.io.manipulate.xgrid_reshape(
operators, targetgrid=eko.interpolation.XGrid(x_grid)
)
check.check_grid_and_eko_compatible(grid, operators, xif, max_as, max_al)
# rotate to evolution (if doable and necessary)
if np.allclose(operators.bases.inputpids, br.flavor_basis_pids):
eko.io.manipulate.to_evol(operators)
# Here we are checking if the EKO contains the rotation matrix (flavor to evol)
elif not np.allclose(operators.bases.inputpids, br.rotate_flavor_to_evolution):
raise ValueError("The EKO is neither in flavor nor in evolution basis.")
# PineAPPL wants alpha_s = 4*pi*a_s
# remember that we already accounted for xif in the opcard generation
evmod = eko.couplings.couplings_mod_ev(opcard.configs.evolution_method)
# Couplings ask for the square of the masses
thresholds_ratios = np.power(tcard.heavy.matching_ratios, 2.0)
for q in range(tcard.couplings.max_num_flavs + 1, 6 + 1):
thresholds_ratios[q - 4] = np.inf
sc = eko.couplings.Couplings(
tcard.couplings,
tcard.order,
evmod,
masses=[(x.value) ** 2 for x in tcard.heavy.masses],
hqm_scheme=tcard.heavy.masses_scheme,
thresholds_ratios=thresholds_ratios.tolist(),
)
# To compute the alphas values we are first reverting the factorization scale shift
# and then obtaining the renormalization scale using xir.
alphas_values = [
4.0
* np.pi
* sc.a_s(
xir * xir * mur2,
)
for mur2 in mur2_grid
]
# We need to use ekompatibility in order to pass a dictionary to pineappl
fktable = grid.evolve(
ekompatibility.pineappl_layout(operators),
xir * xir * mur2_grid,
alphas_values,
"evol",
order_mask=order_mask,
xi=(xir, xif),
)
rich.print(f"Optimizing for {assumptions}")
fktable.optimize(assumptions)
fktable.set_key_value("eko_version", operators.metadata.version)
fktable.set_key_value("pineko_version", version.__version__)
# compare before/after
comparison = None
if comparison_pdf is not None:
comparison = comparator.compare(
grid, fktable, max_as, max_al, comparison_pdf, xir, xif
)
fktable.set_key_value("results_fk", comparison.to_string())
fktable.set_key_value("results_fk_pdfset", comparison_pdf)
# write
fktable.write_lz4(str(fktable_path))
return grid, fktable, comparison
| 7,957 | 31.614754 | 88 | py |
pineko | pineko-main/src/pineko/__init__.py | """pineko = PineAPPL + EKO."""
from .cli import command
| 56 | 18 | 30 | py |
pineko | pineko-main/src/pineko/kfactor.py | """Module to include QCD K-factors in grids."""
import io
import numpy as np
import pineappl
import rich
import yaml
from pineappl import import_only_subgrid
from . import scale_variations
DEFAULT_PDF_SET = "NNPDF40_nnlo_as_01180"
def factgrid(subgrid):
"""Return the array of the factorization scales squared from a subgrid."""
return np.array([mu2.fac for mu2 in subgrid.mu2_grid()])
def rengrid(subgrid):
"""Return the array of the renormalization scales squared from a subgrid."""
return np.array([mu2.ren for mu2 in subgrid.mu2_grid()])
def read_kfactor(kfactor_path):
"""Read the k-factor and returns the central values and the pdfset used to compute it."""
with open(kfactor_path, encoding="utf-8") as f:
stars = f.readline()
if not stars.startswith("*"):
raise TypeError("First line should start with '*'.")
descstring = io.StringIO()
for line in f:
if line.startswith("*"):
break
descstring.write(line)
description = descstring.getvalue()
try:
data = np.loadtxt(f)
except Exception as e:
raise TypeError(e) from e
data = data.reshape(-1, 2)
central_value = data[:, 0]
pdf_set = description.split(sep="PDFset:")[-1].split(sep="\n")[0].strip()
# If there is no PDF set in the k-factor, a default PDF set will be used
# If the PDF set written in the file is not an actual lhapdf PDF, it will
# raise an error.
if len(pdf_set) == 0:
pdf_set = DEFAULT_PDF_SET
return central_value, pdf_set
def construct_scales_array(
mu2_ren_grid,
m_value,
order,
new_order,
central_k_factor,
bin_index,
alphas,
order_exists,
):
"""Construct the array that will rescale the subgrid array taking into account the different renormalization scales."""
scales_array = []
for mu2 in mu2_ren_grid:
scales_array.append(
compute_scale_factor(
m_value,
order,
new_order,
mu2,
central_k_factor,
bin_index,
alphas,
order_exists,
)
)
return scales_array
def compute_scale_factor(
m,
nec_order,
to_construct_order,
Q2,
central_k_factor,
bin_index,
alphas,
order_exists,
):
"""Compute the factor to be multiplied to the given nec_order.
Parameters
----------
m : int
first non zero perturbative order
nec_order : tuple(int)
tuple of the order that has to be rescaled to get the final order
to_contruct_order : tuple(int)
tuple of the scale varied order to be constructed
Q2: float
energy scale squared of the bin
central_k_factor: list(float)
list of the centrals k-factors
bin_index: int
index of the bin
alphas: lhapdf.AlphaS
alpha_s object
Returns
-------
float
full contribution factor
"""
max_as = to_construct_order[0] - m
alpha_val = alphas.alphasQ2(Q2)
alpha_term = 1.0 / pow(alpha_val, max_as - (nec_order[0] - m))
k_term = central_k_factor[bin_index] - 1.0
if order_exists and (max_as - (nec_order[0] - m)) == 0:
k_term = central_k_factor[bin_index]
return k_term * alpha_term
def scale_subgrid(extracted_subgrid, scales_array):
"""Rescales the array contained in the subgrid using scales_array and returns a new subgrid constructed with the scaled array."""
original_array = extracted_subgrid.to_array3()
if len(original_array) != len(scales_array):
raise ValueError("The original and the scales arrays have different shapes.")
scaled_array = []
for scale_value, arr_to_scale in zip(scales_array, original_array):
scaled_array_nest = []
for arr in arr_to_scale:
scaled_array_nest.append(list(arr * scale_value))
scaled_array.append(scaled_array_nest)
x1grid = extracted_subgrid.x1_grid()
x2grid = extracted_subgrid.x2_grid()
if len(scales_array) == 0:
scaled_array = np.zeros(shape=(0, 0, 0), dtype=float)
else:
scaled_array = np.array(scaled_array, dtype=float)
mu2_grid = [tuple([mu2.ren, mu2.fac]) for mu2 in extracted_subgrid.mu2_grid()]
scaled_subgrid = import_only_subgrid.ImportOnlySubgridV2(
scaled_array, mu2_grid, x1grid, x2grid
)
return scaled_subgrid
def compute_orders_map(m, max_as, min_al, order_exists):
"""Compute a dictionary with all the necessary orders to compute the requested order.
Parameters
----------
m : int
first non zero perturbative order of the grid
max_as : int
max alpha_s order
min_al : int
al order of leading order
Returns
-------
dict(tuple(int))
description of all the needed orders
"""
add = 0
if order_exists:
add = 1
orders = {}
orders[(m + max_as, min_al, 0, 0)] = [
(m + de, min_al, 0, 0) for de in range(max_as + add)
]
return orders
def create_singlegridonly(
grid, m_value, order, new_order, central_k_factor, alphas, order_exists
):
"""Create a grid containing only the contribution given by new_order."""
new_grid = scale_variations.initialize_new_grid(grid, new_order)
# extract the relevant order to rescale from the grid for each lumi and bin
grid_orders = [order.as_tuple() for order in grid.orders()]
order_index = grid_orders.index(order)
for lumi_index in range(len(new_grid.lumi())):
for bin_index in range(grid.bins()):
extracted_subgrid = grid.subgrid(order_index, bin_index, lumi_index)
scales_array = construct_scales_array(
rengrid(extracted_subgrid),
m_value,
order,
new_order,
central_k_factor,
bin_index,
alphas,
order_exists,
)
scaled_subgrid = scale_subgrid(extracted_subgrid, scales_array)
# Set this subgrid inside the new grid
new_grid.set_subgrid(0, bin_index, lumi_index, scaled_subgrid)
# Fixing bin_limits and normalizations
bin_dimension = grid.raw.bin_dimensions()
limits = []
for num_bin in range(grid.raw.bins()):
for dim in range(bin_dimension):
limits.append(
(grid.raw.bin_left(dim)[num_bin], grid.raw.bin_right(dim)[num_bin])
)
norma = grid.raw.bin_normalizations()
remap_obj = pineappl.bin.BinRemapper(norma, limits)
new_grid.set_remapper(remap_obj)
return new_grid
def create_grids(
gridpath,
max_as,
first_non_zero_as_order,
min_al,
centrals_k_factor,
alphas,
order_exists,
):
"""Create all the necessary grids for a certain starting grid."""
grid = pineappl.grid.Grid.read(gridpath)
m_value = first_non_zero_as_order
nec_orders = compute_orders_map(m_value, max_as, min_al, order_exists)
grid_list = {}
for to_construct_order in nec_orders:
list_grid_order = []
for nec_order in nec_orders[to_construct_order]:
list_grid_order.append(
create_singlegridonly(
grid,
m_value,
nec_order,
to_construct_order,
centrals_k_factor,
alphas,
order_exists,
)
)
grid_list[to_construct_order] = list_grid_order
return grid_list, nec_orders
def is_already_in(to_check, list_orders):
"""Check if the requested order is already in the grid."""
for order in list_orders:
if (
order[-2] == 0
and order[-1] == 0
and (order[0] == to_check[0])
and (order[1] == to_check[1])
):
return True
return False
def construct_and_merge_grids(
grid_path,
max_as,
first_nonzero_order,
min_al,
centrals_kfactor,
alphas,
target_folder,
order_exists,
):
"""Create, write and merge all the grids."""
# Creating all the necessary grids
grid_list, nec_orders = create_grids(
grid_path,
max_as,
first_nonzero_order[0],
min_al,
centrals_kfactor,
alphas,
order_exists,
)
# Writing the sv grids
grids_paths = scale_variations.write_grids(gridpath=grid_path, grid_list=grid_list)
# Merging all together
scale_variations.merge_grids(
gridpath=grid_path,
grid_list_path=grids_paths,
target_path=target_folder,
nec_orders=nec_orders,
order_exists=order_exists,
)
def do_it(
centrals_kfactor,
alphas,
grid_path,
grid,
max_as,
max_as_test,
target_folder,
order_exists,
):
"""Apply the centrals_kfactor to the grid if the order is not already there."""
grid_orders = [orde.as_tuple() for orde in grid.orders()]
order_mask = pineappl.grid.Order.create_mask(grid.orders(), max_as, 0, True)
grid_orders_filtered = list(np.array(grid_orders)[order_mask])
grid_orders_filtered.sort(key=scale_variations.qcd)
first_nonzero_order = grid_orders_filtered[0]
min_al = first_nonzero_order[1]
is_in = is_already_in(
(first_nonzero_order[0] + max_as_test, min_al, 0, 0), grid_orders_filtered
)
if is_in and not order_exists:
rich.print(f"[green] Success: Requested order already in the grid.")
return
elif not is_in and order_exists:
rich.print(f"[red] Abort: order exists is True but order not in the grid.")
return
construct_and_merge_grids(
grid_path,
max_as_test,
first_nonzero_order,
min_al,
centrals_kfactor,
alphas,
target_folder,
order_exists,
)
def filter_k_factors(pigrid, centrals_kfactor):
"""Filter the centrals k-factors according to their lenght compared to the number of bins of the grid."""
centrals_kfactor_filtered = np.array([])
if pigrid.bins() == len(centrals_kfactor):
rich.print(f"[orange] The number of bins match the lenght of the k-factor.")
centrals_kfactor_filtered = centrals_kfactor
elif pigrid.bins() < len(centrals_kfactor):
rich.print(
f"[yellow] The number of bins is less than the lenght of the k-factor."
)
if not all(elem == centrals_kfactor[0] for elem in centrals_kfactor):
# This case is actually wrong.
raise ValueError("KFactor contains too many different values.")
centrals_kfactor_filtered = centrals_kfactor
else:
rich.print(
f"[yellow] The number of bins is more than the lenght of the k-factor."
)
# This is the last case in which grid.bins() > len(centrals_kfactor)
# Note that sometimes there are more bins in the grid than in the cfactor file -
# this is not a problem because in those cases either all cfactor values are the
# same (thus there is no doubt about whether we have the correct one) or the
# non-exisiting cfactors would be multiplied by bins corresponding to all '0' in the
# grid.
# Let's check if we are in the first or second case
if len(np.unique(centrals_kfactor)) == 1:
# In this case I just need to add more elements to the kfactor
for _num in range(pigrid.bins()):
centrals_kfactor_filtered = np.append(
centrals_kfactor_filtered, centrals_kfactor[0]
)
else:
# In this case this means that the missing entries will multiply zero subgrids so we can just add 0s
for _num in range(pigrid.bins()):
centrals_kfactor_filtered = np.append(centrals_kfactor_filtered, 0.0)
return centrals_kfactor_filtered
def compute_k_factor_grid(
grids_folder,
kfactor_folder,
yamldb_path,
max_as,
target_folder=None,
order_exists=False,
):
"""Include the k-factor in the grid in order to have its associated order in the grid itself.
Parameters
----------
grids_folder : pathlib.Path()
pineappl grids folder
kfactor_folder : pathlib.Path()
kfactors folder
yamldb_path : pathlib.Path()
path to the yaml file describing the dataset
max_as : int
max as order
target_folder: pathlib.Path
path where store the new grid (optional)
"""
import lhapdf # pylint: disable=import-error
# With respect to the usual convention here max_as is max_as-1
max_as_test = max_as - 1
# Extracting info from yaml file
with open(yamldb_path, encoding="utf-8") as f:
yamldict = yaml.safe_load(f)
for grid_list in yamldict["operands"]:
for grid in grid_list:
cfac_path = kfactor_folder / f"CF_QCD_{grid}.dat"
if "ATLASDY2D8TEV" in grid:
cfac_path = kfactor_folder / f"CF_QCDEWK_{grid}.dat"
centrals_kfactor, pdf_set = read_kfactor(cfac_path)
alphas = lhapdf.mkAlphaS(pdf_set)
grid_path = grids_folder / (f"{grid}.pineappl.lz4")
pigrid = pineappl.grid.Grid.read(grid_path)
centrals_kfactor_filtered = filter_k_factors(pigrid, centrals_kfactor)
do_it(
centrals_kfactor_filtered,
alphas,
grid_path,
pigrid,
max_as,
max_as_test,
target_folder,
order_exists,
)
| 13,736 | 31.863636 | 133 | py |
pineko | pineko-main/src/pineko/scaffold.py | """Tools related to generation and managing of a pineko project."""
import dataclasses
import pathlib
from .configs import NEEDED_FILES, NEEDED_KEYS
@dataclasses.dataclass
class CheckResult:
"""The results of a scaffold check.
In particular it contains a bool that is True if the check has been
successful, a list of missing entries in the configuration file and a
dictionary containing all the folders that should exist but that could not
be found.
"""
confs: list
folders: dict
@property
def success(self):
"""Whether the check was overall successful."""
return len(self.confs) == 0 and list(self.folders.keys()) == ["logs"]
def set_up_project(configs):
"""Set up all the folders spelled out in the configs dictionary.
Parameters
----------
configs : dict
configs dictionary containing all the paths to be set up
"""
for path_key, path in configs["paths"].items():
if path_key == "root" or path_key in NEEDED_FILES:
continue
if isinstance(path, pathlib.Path):
path.mkdir(parents=True, exist_ok=True)
elif isinstance(path, dict):
for log_path in path:
if isinstance(path[log_path], pathlib.Path):
path[log_path].mkdir(parents=True, exist_ok=True)
else:
raise TypeError(f"Not recognized entry {log_path} in configs")
else:
raise TypeError(f"Not recognized entry {path} in configs")
def check_folders(configs):
"""Check if all the folders spelled out in configs exist.
Parameters
----------
configs : dict
configs dictionary containing all the paths to be checked
Returns
-------
: CheckResult
object containing the result of the check
"""
wrong_confs = []
wrong_folders = {}
for key in NEEDED_KEYS:
if key not in configs["paths"]:
wrong_confs.append(key)
else:
if key in NEEDED_FILES:
continue
if not configs["paths"][key].exists():
wrong_folders[key] = configs["paths"][key]
if "logs" not in configs["paths"]:
print("WARNING: logs folder is not spelled out in the config file")
else:
wrong_folders["logs"] = {}
for key, folder in configs["paths"]["logs"].items():
if not folder.exists():
wrong_folders["logs"][key] = folder
return CheckResult(wrong_confs, wrong_folders)
| 2,534 | 29.914634 | 82 | py |
pineko | pineko-main/src/pineko/configs.py | """Tools related to the configuration file handling."""
import copy
import pathlib
import tomli
name = "pineko.toml"
"Name of the config file (wherever it is placed)"
# better to declare immediately the correct type
configs = {}
"Holds loaded configurations"
NEEDED_KEYS = [
"ymldb",
"operator_cards",
"grids",
"operator_card_template_name",
"theory_cards",
"fktables",
"ekos",
]
NEEDED_FILES = ["operator_card_template_name"]
def defaults(base_configs):
"""Provide additional defaults.
Parameters
----------
base_config : dict
user provided configuration
Returns
-------
configs_ : dict
enhanced configuration
Note
----
The general rule is to never replace user provided input.
"""
configs_ = copy.deepcopy(base_configs)
enhance_paths(configs_)
return configs_
def enhance_paths(configs_):
"""Check required path and enhance them with root path.
The changes are done inplace.
Parameters
----------
configs_ : dict
configuration
"""
# required keys without default
for key in NEEDED_KEYS:
if key not in configs_["paths"]:
raise ValueError(f"Configuration is missing a 'paths.{key}' key")
if key in NEEDED_FILES:
continue
if pathlib.Path(configs_["paths"][key]).anchor == "":
configs_["paths"][key] = configs_["paths"]["root"] / configs_["paths"][key]
else:
configs_["paths"][key] = pathlib.Path(configs_["paths"][key])
# optional keys which are by default None
if "logs" not in configs_["paths"]:
configs_["paths"]["logs"] = {}
for key in ["eko", "fk"]:
if key not in configs_["paths"]["logs"]:
configs_["paths"]["logs"][key] = None
elif pathlib.Path(configs_["paths"]["logs"][key]).anchor == "":
configs_["paths"]["logs"][key] = (
configs_["paths"]["root"] / configs_["paths"]["logs"][key]
)
else:
configs_["paths"]["logs"][key] = pathlib.Path(
configs_["paths"]["logs"][key]
)
def detect(path=None):
"""Autodetect configuration file path.
Parameters
----------
path : str or os.PathLike
user provided guess
Returns
-------
pathlib.Path :
file path
"""
# If a path is provided then we only look for the pineko.toml file there.
if path is not None:
path = pathlib.Path(path)
configs_file = path / name if path.is_dir() else path
if configs_file.is_file():
return configs_file
raise ValueError(
"Provided path is not pointing to (or does not contain) the pineko.toml file"
)
# If no path is provided we need to look after the file.
# We want to check cwd and all its parent folders (without their subfolders
# of course) up to root.
cwd_path = pathlib.Path.cwd().absolute()
paths = [cwd_path] + list(cwd_path.parents)
for p in paths:
configs_file = p / name
if configs_file.is_file():
return configs_file
raise FileNotFoundError("No configurations file detected.")
def load(path=None):
"""Load config file.
Parameters
----------
path : str or os.PathLike
file path
Returns
-------
loaded : dict
configuration dictionary
"""
path = detect(path)
with open(path, "rb") as fd:
loaded = tomli.load(fd)
if "paths" not in loaded:
loaded["paths"] = {}
if "root" not in loaded["paths"]:
loaded["paths"]["root"] = pathlib.Path(path).parent
return loaded
| 3,700 | 23.509934 | 89 | py |
pineko | pineko-main/src/pineko/cli/gen_sv.py | """CLI entry point to generation of scale variations from central grid."""
import pathlib
import click
import pineappl
import rich
from .. import scale_variations
from ._base import command
@command.command("ren_sv_grid")
@click.argument("pineappl_path", type=click.Path(exists=True))
@click.argument("target_path", type=click.Path(exists=False))
@click.argument("max_as", type=int)
@click.argument("nf", type=int)
@click.argument("order_exists", type=bool)
def ren_sv_grid(pineappl_path, target_path, max_as, nf, order_exists):
"""Construct new grid with renormalization scale variations included."""
return_state = scale_variations.compute_ren_sv_grid(
pathlib.Path(pineappl_path),
max_as,
nf,
target_path=pathlib.Path(target_path),
order_exists=order_exists,
)
rich.print(return_state)
| 850 | 28.344828 | 76 | py |
pineko | pineko-main/src/pineko/cli/check.py | """CLI entry point to check compatibility."""
from dataclasses import dataclass
from enum import Enum
import click
import eko
import pineappl
import rich
from .. import check
from ._base import command
@command.group("check")
def subcommand():
"""Check grid and operator properties."""
@subcommand.command("compatibility")
@click.argument("grid_path", metavar="PINEAPPL", type=click.Path(exists=True))
@click.argument("operator_path", metavar="EKO", type=click.Path(exists=True))
@click.option("--xif", default=1.0, help="factorization scale variation")
@click.option("--max_as", type=int, default=5, help="Maximum order of alpha_s to check")
@click.option("--max_al", type=int, default=5, help="Maximum order of alpha to check")
def sub_compatibility(grid_path, operator_path, xif, max_as, max_al):
"""Check PineAPPL grid and EKO compatibility.
In order to be compatible, the grid provided in PINEAPPL and the operator
provided in EKO, have to expose the same x grid and Q2 grid.
XIF is the factorization scale variation.
max_as and max_al default to a very high value so the comparison is done at the level
of the entire grid, with no orders masked.
If only some orders are required the user must use the MAX_AS and MAX_AL flags.
"""
pineappl_grid = pineappl.grid.Grid.read(grid_path)
with eko.EKO.read(operator_path) as operators:
try:
check.check_grid_and_eko_compatible(
pineappl_grid, operators, xif, max_as, max_al
)
rich.print("[green]Success:[/] grids are compatible")
except ValueError as e:
rich.print("[red]Error:[/]", e)
@dataclass
class CouplingInfo:
"""Coupling known attributes, used to describe it."""
descr: str
theory: str
class Coupling(Enum):
"""Auxiliary class to list the possible couplings."""
AS = CouplingInfo("strong", "QCD")
AL = CouplingInfo("electromagnetic", "QED")
SCVAR_ERROR = "[red]Error:[/] grids do not contain"
SCVAR_MESSAGES = {
check.AvailableAtMax.BOTH: "[green]Success:[/] grids contain",
check.AvailableAtMax.CENTRAL: "[orange]Warning:[/] grids do not contain central order for requested",
check.AvailableAtMax.SCVAR: SCVAR_ERROR,
}
@subcommand.command("scvar")
@click.argument("grid_path", metavar="PINEAPPL", type=click.Path(exists=True))
@click.argument(
"scale",
metavar="SCALE",
type=click.Choice(list(el.name for el in check.Scale), case_sensitive=False),
)
@click.argument("max_as_order", metavar="AS_ORDER", type=int)
@click.argument("max_al_order", metavar="AL_ORDER", type=int)
def sub_scvar(grid_path, scale, max_as_order, max_al_order):
"""Check if PineAPPL grid contains requested scale variations for the requested order."""
grid = pineappl.grid.Grid.read(grid_path)
grid.optimize()
# Call the function
scaleobj = check.Scale[scale]
checkres, max_as_effective = check.contains_sv(
grid, max_as_order, max_al_order, scaleobj
)
# Communicate result
message = SCVAR_MESSAGES[checkres]
if not max_as_effective == max_as_order:
message = SCVAR_ERROR
descr = check.Scale[scale].value.description
cname = Coupling.AS.name.lower()
rich.print(f"{message} {descr} for {cname}")
| 3,289 | 31.574257 | 105 | py |
pineko | pineko-main/src/pineko/cli/opcard.py | """CLI entry point to the operator card generation."""
import pathlib
import click
import rich
import yaml
from .. import evolve
from ._base import command
@command.command("opcard")
@click.argument("pineappl-path", metavar="PINEAPPL", type=click.Path(exists=True))
@click.argument(
"default-card-path", metavar="DEFAULT_CARD", type=click.Path(exists=True)
)
@click.argument("thcard-path", metavar="THCARD", type=click.Path())
@click.argument("opcard-path", metavar="OPCARD", type=click.Path())
def subcommand(pineappl_path, default_card_path, thcard_path, opcard_path):
"""Write EKO card for PineAPPL grid.
Writes a copy of the default card from DEFAULT_CARD to OPCARD
with the adjusted x grid and Q2 grid read from PINEAPPL.
A THCARD is required, since some of the EKO's OPCARD information come from
the NNPDF theory entries (e.g. :math:`Q0`).
"""
tcard = yaml.safe_load(pathlib.Path(thcard_path).read_text(encoding="utf-8"))
_x_grid, q2_grid = evolve.write_operator_card_from_file(
pineappl_path, default_card_path, opcard_path, tcard
)
rich.print(
f"[green]Success:[/] Wrote card with {len(q2_grid)} Q2 points to {opcard_path}"
)
| 1,205 | 32.5 | 87 | py |
pineko | pineko-main/src/pineko/cli/convolute.py | """CLI entry point to convolution."""
import click
import eko
import pineappl
import rich
from .. import evolve
from ._base import command
@command.command("convolute")
@click.argument("grid_path", type=click.Path(exists=True))
@click.argument("op_path", type=click.Path(exists=True))
@click.argument("fktable", type=click.Path())
@click.argument("max_as", type=int)
@click.argument("max_al", type=int)
@click.option("--xir", default=1.0, help="renormalization scale variation")
@click.option("--xif", default=1.0, help="factorization scale variation")
@click.option(
"--pdf", default=None, help="if given, print comparison table", show_default=True
)
@click.option(
"--assumptions",
default="Nf6Ind",
help="the flavor assumptions to be used",
show_default=True,
)
def subcommand(grid_path, op_path, fktable, max_as, max_al, xir, xif, pdf, assumptions):
"""Convolute PineAPPL grid and EKO into an FK table.
GRID_PATH and OP_PATH are the path to the respective elements to convolute, and
FKTABLE is the path where to dump the output.
MAX_AS and MAX_AL are used to specify the order in QCD and QED
couplings (i.e. the maximum power allowed for each correction).
XIR and XIF represent the renormalization and factorization scale in the grid respectively.
ASSUMPTIONS represent the assumptions on the flavor dimension.
PDF is an optional PDF set compatible with the EKO to compare grid and FK table.
"""
grid = pineappl.grid.Grid.read(grid_path)
with eko.EKO.edit(op_path) as operators:
rich.print(
rich.panel.Panel.fit("Computing ...", style="magenta", box=rich.box.SQUARE),
f" {grid_path}\n",
f"+ {op_path}\n",
f"= {fktable}\n",
f"with max_as={max_as}, max_al={max_al}, xir={xir}, xif={xif}",
)
_grid, _fk, comp = evolve.evolve_grid(
grid,
operators,
fktable,
max_as,
max_al,
xir,
xif,
assumptions=assumptions,
comparison_pdf=pdf,
)
if comp:
print(comp.to_string())
| 2,158 | 32.215385 | 95 | py |
pineko | pineko-main/src/pineko/cli/compare.py | """CLI entry point to comparison grid vs. FK Table."""
import click
import pineappl
import rich
from .. import comparator
from ._base import command
@command.command("compare")
@click.argument("pineappl_path", type=click.Path(exists=True))
@click.argument("fktable_path", type=click.Path())
@click.argument("max_as", type=int)
@click.argument("max_al", type=int)
@click.argument("pdf", type=str)
@click.option("--xir", default=1.0, help="renormalization scale variation")
@click.option("--xif", default=1.0, help="factorization scale variation")
def subcommand(pineappl_path, fktable_path, max_as, max_al, pdf, xir, xif):
"""Compare process level PineAPPL grid and derived FK Table.
The comparison between the grid stored at PINEAPPL_PATH, and the FK table
stored at FKTABLE_PATH, is performed by convoluting both the grids with the PDF
set, evaluating its interpolation grid at the two different scales (thus
comparing the EKO evolution, with the one stored inside LHAPDF grid).
The comparison involves the orders in QCD and QED up to the maximum power
of the coupling corresponding respectively to MAX_AS and MAX_AL.
XIR and XIF represent the renormalization and factorization scale in the grid respectively.
"""
pine = pineappl.grid.Grid.read(pineappl_path)
fk = pineappl.fk_table.FkTable.read(fktable_path)
# Note that we need to cast to string before printing to avoid ellipsis ...
rich.print(comparator.compare(pine, fk, max_as, max_al, pdf, xir, xif).to_string())
| 1,529 | 42.714286 | 95 | py |
pineko | pineko-main/src/pineko/cli/_base.py | """Adds global CLI options."""
import click
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(context_settings=CONTEXT_SETTINGS)
def command():
"""pineko: Combine PineAPPL grids and EKOs into FK tables."""
| 237 | 20.636364 | 65 | py |
pineko | pineko-main/src/pineko/cli/__init__.py | """CLI entry point."""
from . import check, compare, convolute, gen_sv, kfactor, opcard, scaffold, theory_
from ._base import command
| 134 | 32.75 | 83 | py |
pineko | pineko-main/src/pineko/cli/theory_.py | """'theory' mode of CLI."""
import pathlib
import click
from .. import configs, theory
from ._base import command
@command.group("theory")
@click.option(
"-c",
"--configs",
"cfg",
default=None,
type=click.Path(resolve_path=True, path_type=pathlib.Path),
help="Explicitly specify config file (it has to be a valid TOML file).",
)
def theory_(cfg):
"""Iterate a subcommand on a given theory and list of datasets."""
path = configs.detect(cfg)
base_configs = configs.load(path)
configs.configs = configs.defaults(base_configs)
if cfg is not None:
print(f"Configurations loaded from '{path}'")
@theory_.command()
@click.argument("source_theory_id", type=click.INT)
@click.argument("target_theory_id", type=click.INT)
@click.argument("datasets", type=click.STRING, nargs=-1)
@click.option("--overwrite", is_flag=True, help="Allow files to be overwritten")
def inherit_grids(source_theory_id, target_theory_id, datasets, overwrite):
"""Inherit grids for datasets from one theory to another."""
theory.TheoryBuilder(source_theory_id, datasets, overwrite=overwrite).inherit_grids(
target_theory_id
)
@theory_.command()
@click.argument("theory_id", type=click.INT)
@click.argument("datasets", type=click.STRING, nargs=-1)
@click.option("--overwrite", is_flag=True, help="Allow files to be overwritten")
def opcards(theory_id, datasets, overwrite):
"""Write EKO card for all FK tables in all datasets."""
theory.TheoryBuilder(theory_id, datasets, overwrite=overwrite).opcards()
@theory_.command()
@click.argument("theory_id", type=click.INT)
@click.argument("datasets", type=click.STRING, nargs=-1)
@click.option("--silent", is_flag=True, help="Suppress logs")
@click.option(
"-cl",
"--clear-logs",
is_flag=True,
help="Erease previos logs (instead of appending)",
)
@click.option("--overwrite", is_flag=True, help="Allow files to be overwritten")
def ekos(theory_id, datasets, silent, clear_logs, overwrite):
"""Compute EKOs for all FK tables in all datasets."""
theory.TheoryBuilder(
theory_id, datasets, silent=silent, clear_logs=clear_logs, overwrite=overwrite
).ekos()
@theory_.command()
@click.argument("source_theory_id", type=click.INT)
@click.argument("target_theory_id", type=click.INT)
@click.argument("datasets", type=click.STRING, nargs=-1)
@click.option("--overwrite", is_flag=True, help="Allow files to be overwritten")
def inherit_ekos(source_theory_id, target_theory_id, datasets, overwrite):
"""Inherit ekos from one theory to another."""
theory.TheoryBuilder(source_theory_id, datasets, overwrite=overwrite).inherit_ekos(
target_theory_id
)
@theory_.command()
@click.argument("theory_id", type=click.INT)
@click.argument("datasets", type=click.STRING, nargs=-1)
@click.option("--pdf", "-p", default=None, help="PDF set used for comparison")
@click.option("--silent", is_flag=True, help="Suppress logs with comparison")
@click.option(
"-cl",
"--clear-logs",
is_flag=True,
help="Erease previos logs (instead of appending)",
)
@click.option("--overwrite", is_flag=True, help="Allow files to be overwritten")
def fks(theory_id, datasets, pdf, silent, clear_logs, overwrite):
"""Compute FK tables in all datasets."""
theory.TheoryBuilder(
theory_id, datasets, silent=silent, clear_logs=clear_logs, overwrite=overwrite
).fks(pdf)
@theory_.command()
@click.argument("theory_id", type=click.INT)
@click.argument("datasets", type=click.STRING, nargs=-1)
@click.option("--flavors", "-f", default=5, help="Number of active flavors")
def ren_sv_grids(theory_id, datasets, flavors):
"""Construct new grids with renormalization scale variations included."""
theory.TheoryBuilder(theory_id, datasets).construct_ren_sv_grids(flavors)
| 3,822 | 35.409524 | 88 | py |
pineko | pineko-main/src/pineko/cli/kfactor.py | """CLI entry point to generation of the inclusion of kfactor in a grid."""
import pathlib
import click
import rich
from .. import kfactor
from ._base import command
@command.command("kfactor")
@click.argument("grids_folder", type=click.Path(exists=True))
@click.argument("kfactor_folder", type=click.Path(exists=True))
@click.argument("yamldb_path", type=click.Path(exists=True))
@click.argument("target_folder", type=click.Path(exists=True))
@click.argument("max_as", type=int)
@click.argument("order_exists", type=bool)
def k_factor_inclusion(
grids_folder,
kfactor_folder,
yamldb_path,
target_folder,
max_as,
order_exists,
):
"""Construct new grid with k_factor included."""
grids_folder = pathlib.Path(grids_folder)
kfactor_folder = pathlib.Path(kfactor_folder)
yamldb_path = pathlib.Path(yamldb_path)
target_folder = pathlib.Path(target_folder)
kfactor.compute_k_factor_grid(
grids_folder,
kfactor_folder,
yamldb_path,
max_as,
target_folder=target_folder,
order_exists=order_exists,
)
| 1,095 | 26.4 | 74 | py |
pineko | pineko-main/src/pineko/cli/scaffold.py | """'scaffold' mode of CLI."""
import pathlib
import click
import rich
from .. import configs, scaffold
from ._base import command
@command.group("scaffold")
@click.option(
"-c",
"--configs",
"cfg",
default=None,
type=click.Path(resolve_path=True, path_type=pathlib.Path),
help="Explicitly specify config file (it has to be a valid TOML file).",
)
def scaffold_(cfg):
"""Manage folders needed for the project as spelled out in the configuration file."""
path = configs.detect(cfg)
base_configs = configs.load(path)
configs.configs = configs.defaults(base_configs)
if cfg is not None:
print(f"Configurations loaded from '{path}'")
@scaffold_.command()
def new():
"""Create all the folders to set up a new project."""
scaffold.set_up_project(configs.configs)
@scaffold_.command()
def check():
"""Check if all the configurations are correct."""
check_res = scaffold.check_folders(configs.configs)
if check_res.success:
rich.print("[green]Success:[/] All the folders are correctly configured.")
else:
rich.print("[red]Error:[/] Project is not correctly configured.")
for conf in check_res.confs:
rich.print(f"[red]Missing entry in conf file: '{conf}'")
for folder in check_res.folders.values():
if not isinstance(folder, dict):
rich.print(f"[red]Missing folder:\n{folder}")
else:
for log_key in folder:
rich.print(f"[red]Missing folder: \n{folder[log_key]}")
| 1,558 | 29.568627 | 89 | py |
pineko | pineko-main/tests/test_kfactor.py | import numpy as np
import pytest
from pineko import kfactor
class FakeAlpha:
def __init__(self, const_value):
self.const_value = const_value
def alphasQ2(self, q2):
return self.const_value
class FakeGrid:
def __init__(self, nbins):
self.nbins = nbins
def bins(self):
return self.nbins
def test_compute_scale_factor():
const_value = 0.01180
myfakealpha = FakeAlpha(const_value)
fake_kfactor = [1.1, 1.2, 1.3]
bin_index = 1
np.testing.assert_allclose(
kfactor.compute_scale_factor(
0,
[0, 0, 0, 0],
[1, 0, 0, 0],
5.0**2,
fake_kfactor,
bin_index,
myfakealpha,
False,
),
(1.0 / const_value) * (fake_kfactor[bin_index] - 1.0),
)
np.testing.assert_allclose(
kfactor.compute_scale_factor(
0,
[0, 0, 0, 0],
[2, 0, 0, 0],
5.0**2,
fake_kfactor,
bin_index,
myfakealpha,
False,
),
(1.0 / (const_value**2)) * (fake_kfactor[bin_index] - 1.0),
)
def test_filter_k_factors():
fakegrid = FakeGrid(3)
# This is the case in which kfactor lenght matches with number of bins
np.testing.assert_allclose(
kfactor.filter_k_factors(fakegrid, [1.0, 1.2, 1.3]), [1.0, 1.2, 1.3]
)
# This is the case in which kfactor lenght > number of bins and kfactors are all the same
np.testing.assert_allclose(
kfactor.filter_k_factors(fakegrid, [1.1, 1.1, 1.1, 1.1, 1.1]),
[1.1, 1.1, 1.1, 1.1, 1.1],
)
# This is the case in which kfactor lenght < number of bins and kfactors are all the same
np.testing.assert_allclose(
kfactor.filter_k_factors(fakegrid, [1.1, 1.1]), [1.1, 1.1, 1.1]
)
# This is the case in which kfactor lenght < number of bins and kfactors are not all the same
np.testing.assert_allclose(
kfactor.filter_k_factors(fakegrid, [1.1, 1.3]), [0.0, 0.0, 0.0]
)
with pytest.raises(ValueError):
# This is the case in which kfactor lenght > number of bins and kfactors are not all the same
kfactor.filter_k_factors(fakegrid, [1.1, 1.2, 1.1, 1.7, 1.1])
| 2,266 | 28.064103 | 101 | py |
pineko | pineko-main/tests/test_theory_card.py | import pineko.theory_card
def test_construct_assumptions():
fake_t_card = {
"Q0": 1.65,
"kcThr": 1.0,
"kbThr": 1.0,
"ktThr": 1.0,
"mc": 2.0,
"mb": 3.0,
"mt": 50.0,
"IC": 1,
}
assert pineko.theory_card.construct_assumptions(fake_t_card) == "Nf4Sym"
| 326 | 19.4375 | 76 | py |
pineko | pineko-main/tests/test_regression.py | """
Suite of tests that go through the entire process of creating a new fktable
from a empty folder.
The target theory is 400 and the relevant `.toml`, theory runcard and eko template
are downloaded from https://github.com/NNPDF/theories during this test so this tests
has the double effect of ensuring compatibility between both repositories.
"""
import itertools
from pathlib import Path
from subprocess import run
from urllib.request import urlretrieve
import numpy as np
import pytest
from eko.interpolation import XGrid
from eko.io.runcards import OperatorCard
from pineappl.fk_table import FkTable
from yaml import dump, safe_load
THEORIES_REPO = "https://raw.githubusercontent.com/NNPDF/theories/main"
THEORYID = 400
REGRESSION_ROOT = Path(__file__).parent / "regression_data"
def _download_resources(filename, tmp_path):
"""Download resources (filename) from the theories repo and put it in the same path
relative to THEORIES_REPO
"""
output_file = tmp_path / filename
output_file.parent.mkdir(exist_ok=True)
urlretrieve(f"{THEORIES_REPO}/{filename}", output_file)
return output_file
def _download_dataset(dataset, theoryid, tmp_path):
"""Download both the yaml file and all grids for a given dataset for a given theory"""
yaml_file = f"data/yamldb/{theoryid}/{dataset}.yaml"
_download_resources(yaml_file, tmp_path)
# However, the yaml file goes into the "root" of the yaml directory, move it there!
right_yaml = tmp_path / "data" / "yamldb" / f"{dataset}.yaml"
(tmp_path / yaml_file).rename(right_yaml)
# Download the relevant grids for this dataset
res = safe_load(right_yaml.read_text(encoding="utf-8"))
grids = list(itertools.chain(*res["operands"]))
for grid_name in grids:
_download_resources(f"data/grids/400/{grid_name}.pineappl.lz4", tmp_path)
return grids
class _FakePDF:
"""A Fake lhapdf-like PDF to evolve the grids"""
def __init__(self):
pids = np.arange(-6, 8)
pids[6] = 21
pids[-1] = 22
alphas = np.linspace(1.2, 1.8, len(pids))
betas = np.linspace(1.2, 3.8, len(pids))
self._alphas = dict(zip(pids, alphas))
self._betas = dict(zip(pids, betas))
def xfxQ2(self, pid, x, q2):
"""Compute x^alpha*(1-x)^beta"""
alpha = self._alphas[pid]
beta = self._betas[pid]
return np.power(x, alpha) * np.power(1 - x, beta)
def _trim_template(template_card, take_points=10):
"""Trim the template card so that the number of x-values to compute is much smaller"""
card_info = OperatorCard.from_dict(
safe_load(template_card.read_text(encoding="utf-8"))
)
original_x = card_info.xgrid
size = len(original_x.raw)
skip = int(size / take_points)
card_info.xgrid = XGrid(original_x.raw[:size:skip])
template_card.write_text(dump(card_info.raw), encoding="utf-8")
@pytest.mark.parametrize("dataset", ["LHCBWZMU8TEV", "INTEGXT3"])
def test_regression(tmp_path, dataset):
"""Run pineko through subprocess to ensure that the shell scripts are working exactly
as intended.
If the data does not exist, create it and fail the test, i.e., in order to regenerate
the data just remove the previous dataset.npy file
"""
# We start by downloading pineko.toml in order to generate the folder structure
_download_resources("pineko.toml", tmp_path)
# Which we create... now!
run(["pineko", "scaffold", "new"], cwd=tmp_path, check=True)
# Now download other necessary objects
_download_resources(f"data/theory_cards/{THEORYID}.yaml", tmp_path)
template_card = _download_resources(
f"data/operator_cards/{THEORYID}/_template.yaml", tmp_path
)
_trim_template(template_card)
# And use some small (but not trivial!) dataset to test
gridnames = _download_dataset(dataset, THEORYID, tmp_path)
# Now go, first with eko creation
run(
["pineko", "theory", "opcards", str(THEORYID), dataset],
cwd=tmp_path,
check=True,
)
run(["pineko", "theory", "ekos", str(THEORYID), dataset], cwd=tmp_path, check=True)
# Then FK Table production!
run(["pineko", "theory", "fks", str(THEORYID), dataset], cwd=tmp_path, check=True)
# Now loop over the grids and check the results of the convolution with the PDF
pdf = _FakePDF()
regression_path = REGRESSION_ROOT / f"{dataset}.npy"
result = []
for grid_name in gridnames:
fkt = FkTable.read(
tmp_path / "data" / "fktables" / str(THEORYID) / f"{grid_name}.pineappl.lz4"
)
result.append(fkt.convolute_with_one(2212, pdf.xfxQ2))
result = np.concatenate(result)
if not regression_path.exists():
np.save(regression_path, result)
raise FileNotFoundError("Regression did not exist and has been regenerated")
regression_data = np.load(regression_path)
np.testing.assert_allclose(regression_data, result)
| 4,979 | 35.617647 | 90 | py |
pineko | pineko-main/tests/test_check.py | import numpy as np
from pineappl.pineappl import PyOrder
import pineko.check
def test_islepton():
el = 21
assert pineko.check.islepton(el) == False
el = -13
assert pineko.check.islepton(el) == True
def test_in1d():
to_check = np.array([0.3])
against_this = np.array(
[1, 2, 0.3, 90, 67, 10.0e-10, 0.00002, 12567, 1729291, 10.0e-7]
)
checked = pineko.check.in1d(to_check, against_this)
assert checked == np.array([True])
def test_is_fonll_b():
fns = "FONLL-B"
lumi_first = [[(-12, 1, 2.0), (-13, 1, 5.0)]]
lumi_second = [[(1, 11, 1.0), (3, 11, 5.0)]]
assert pineko.check.is_fonll_b(fns, lumi_first) is True
assert pineko.check.is_fonll_b(fns, lumi_second) is True
lumi_crazy = [[(1, 1, 4.0), (2, 11, 3.0)]]
assert pineko.check.is_fonll_b(fns, lumi_crazy) is False
fns = "FONLL-C"
assert pineko.check.is_fonll_b(fns, lumi_first) is False
assert pineko.check.is_fonll_b(fns, lumi_second) is False
assert pineko.check.is_fonll_b(fns, lumi_crazy) is False
class Fake_grid:
def __init__(self, order_list):
self.orderlist = order_list
def orders(self):
return self.orderlist
class Order:
def __init__(self, order_tup):
self.orders = order_tup
self._raw = PyOrder(order_tup[0], order_tup[1], order_tup[2], order_tup[3])
def as_tuple(self):
return self.orders
def test_contains_fact():
max_as = 2
max_al = 1
first_order = Order((0, 0, 0, 0))
second_order = Order((1, 0, 0, 0))
third_order = Order((1, 0, 0, 1))
order_list = [first_order, second_order, third_order]
mygrid = Fake_grid(order_list)
checkres, max_as_effective = pineko.check.contains_sv(
mygrid, max_as, max_al, pineko.check.Scale.FACT
)
assert checkres is pineko.check.AvailableAtMax.BOTH
assert max_as_effective == max_as
order_list.pop(-1)
mygrid_nofact = Fake_grid(order_list)
checkres, max_as_effective = pineko.check.contains_sv(
mygrid_nofact, max_as, max_al, pineko.check.Scale.FACT
)
assert checkres is pineko.check.AvailableAtMax.CENTRAL
assert max_as_effective == max_as
checkres, max_as_effective = pineko.check.contains_sv(
mygrid_nofact, max_as - 1, max_al, pineko.check.Scale.FACT
)
assert checkres is pineko.check.AvailableAtMax.BOTH
assert max_as_effective == max_as - 1
order_list.pop(-1)
mygrid_LO = Fake_grid(order_list)
checkres, max_as_effective = pineko.check.contains_sv(
mygrid_LO, max_as, max_al, pineko.check.Scale.FACT
)
assert checkres is pineko.check.AvailableAtMax.BOTH
assert max_as_effective == max_as - 1
order_list = [first_order, third_order]
mygrid = Fake_grid(order_list)
checkres, max_as_effective = pineko.check.contains_sv(
mygrid, max_as, max_al, pineko.check.Scale.FACT
)
assert checkres is pineko.check.AvailableAtMax.SCVAR
assert max_as_effective == max_as
def test_contains_ren():
max_as = 3
max_al = 0
first_order = Order((0, 0, 0, 0))
second_order = Order((1, 0, 0, 0))
third_order = Order((2, 0, 1, 0))
order_list = [first_order, second_order, third_order]
mygrid = Fake_grid(order_list)
checkres, max_as_effective = pineko.check.contains_sv(
mygrid, max_as, max_al, pineko.check.Scale.REN
)
assert checkres is pineko.check.AvailableAtMax.SCVAR
assert max_as_effective == max_as
order_list.pop(-1)
mygrid_new = Fake_grid(order_list)
checkres, max_as_effective = pineko.check.contains_sv(
mygrid_new, max_as, max_al, pineko.check.Scale.REN
)
assert checkres is pineko.check.AvailableAtMax.BOTH
assert max_as_effective == max_as - 1
order_list.append(Order((2, 0, 0, 0)))
mygrid_noren = Fake_grid(order_list)
checkres, max_as_effective = pineko.check.contains_sv(
mygrid_noren, max_as, max_al, pineko.check.Scale.REN
)
assert checkres is pineko.check.AvailableAtMax.CENTRAL
assert max_as_effective == max_as
checkres, max_as_effective = pineko.check.contains_sv(
mygrid_noren, max_as - 1, max_al, pineko.check.Scale.REN
)
assert checkres is pineko.check.AvailableAtMax.BOTH
assert max_as_effective == max_as - 1
order_list.pop(0)
mygrid_noren = Fake_grid(order_list)
checkres, max_as_effective = pineko.check.contains_sv(
mygrid_noren, max_as, max_al, pineko.check.Scale.REN
)
assert checkres is pineko.check.AvailableAtMax.CENTRAL
assert max_as_effective == max_as - 1
| 4,576 | 33.156716 | 83 | py |
pineko | pineko-main/tests/test_scaffold.py | import pytest
from pineko import configs, scaffold
def test_set_up_project(tmp_path, wrong_fake_configs, fake_configs_incomplete):
with pytest.raises(TypeError):
scaffold.set_up_project(wrong_fake_configs)
scaffold.set_up_project(fake_configs_incomplete)
assert (tmp_path / "data/ymldb").exists()
assert (tmp_path / "logs/eko").exists()
def test_check_folder(fake_configs_incomplete, fake_configs):
# we may fail because we use a wrong config ...
scaffold.set_up_project(fake_configs_incomplete)
incomplete_check = scaffold.check_folders(fake_configs_incomplete)
assert incomplete_check.success == False
assert [
"operator_cards",
"grids",
"operator_card_template_name",
"theory_cards",
"fktables",
"ekos",
] == incomplete_check.confs
# or because we didn't setup up properly ...
fake_check = scaffold.check_folders(fake_configs)
assert fake_check.success == False
assert len(fake_check.confs) == 0
for key in fake_check.folders:
if not isinstance(fake_check.folders[key], dict):
assert key in configs.NEEDED_KEYS
# but if we use our function we have to be safe.
scaffold.set_up_project(fake_configs)
assert scaffold.check_folders(fake_configs).success == True
| 1,314 | 34.540541 | 79 | py |
pineko | pineko-main/tests/conftest.py | import pytest
@pytest.fixture
def wrong_fake_configs(tmp_path):
"""This configs are wrong because under logs/fk there is a list and not a string."""
wrong_fake_configs = {
"paths": {
"ymldb": tmp_path / "data" / "ymldb",
"logs": {"eko": tmp_path / "logs" / "eko", "fk": ["something", "wrong"]},
},
"root": tmp_path,
}
return wrong_fake_configs
@pytest.fixture
def fake_configs_incomplete(tmp_path):
"This configs are incomplete because we are missing for instance the ekos and fktables keys."
fake_configs_incomplete = {
"paths": {
"ymldb": tmp_path / "data" / "ymldb",
"logs": {"eko": tmp_path / "logs" / "eko"},
},
"root": tmp_path,
}
return fake_configs_incomplete
@pytest.fixture
def fake_configs(tmp_path):
fake_configs = {
"paths": {
"ymldb": tmp_path / "data" / "ymldb",
"operator_cards": tmp_path / "data" / "operator_cards",
"grids": tmp_path / "data" / "grids",
"operator_card_template_name": "_template.yaml",
"theory_cards": tmp_path / "data" / "theory_cards",
"fktables": tmp_path / "data" / "fktables",
"ekos": tmp_path / "data" / "ekos",
"logs": {"eko": tmp_path / "logs" / "eko"},
},
"root": tmp_path,
}
return fake_configs
| 1,406 | 29.586957 | 97 | py |
pineko | pineko-main/tests/test_evolve.py | import pytest
import pineko.evolve
def test_sv_scheme():
wrong_tcard = {"XIF": 1.0, "ModSV": "expanded"}
schemeA_tcard = {
"XIF": 2.0,
"ModSV": "exponentiated",
}
schemeB_tcard = {"XIF": 0.5, "ModSV": "expanded"}
schemeC_tcard = {"XIF": 2.0, "ModSV": None}
with pytest.raises(ValueError):
pineko.evolve.sv_scheme(wrong_tcard)
assert pineko.evolve.sv_scheme(schemeA_tcard) == "exponentiated"
assert pineko.evolve.sv_scheme(schemeB_tcard) == "expanded"
assert pineko.evolve.sv_scheme(schemeC_tcard) is None
| 568 | 28.947368 | 68 | py |
pineko | pineko-main/tests/test_scale_variations.py | import numpy as np
from eko.beta import beta_qcd
from pineko import scale_variations
def test_ren_sv_coeffs():
np.testing.assert_allclose(
scale_variations.ren_sv_coeffs(m=0, max_as=0, logpart=0, which_part=0, nf=5), 0
)
np.testing.assert_allclose(
scale_variations.ren_sv_coeffs(m=0, max_as=1, logpart=1, which_part=0, nf=5), 0
)
res_nf5 = scale_variations.ren_sv_coeffs(
m=1, max_as=1, logpart=1, which_part=0, nf=5
)
res_nf4 = scale_variations.ren_sv_coeffs(
m=1, max_as=1, logpart=1, which_part=0, nf=4
)
np.testing.assert_allclose(
res_nf5 / res_nf4, beta_qcd((2, 0), 5) / beta_qcd((2, 0), 4)
)
exp_res = beta_qcd((2, 0), 5) * (1.0 / (4.0 * np.pi))
np.testing.assert_allclose(
scale_variations.ren_sv_coeffs(m=0, max_as=2, logpart=1, which_part=1, nf=5),
exp_res,
)
np.testing.assert_allclose(
scale_variations.ren_sv_coeffs(m=0, max_as=2, logpart=2, which_part=0, nf=5),
0.0,
)
def test_requirements():
m = 0
max_as = 1
# In this case we expect only one necessary order
exp_to_compute_ord = (1, 0, 1, 0)
exp_nec_order = (0, 0, 0, 0)
assert scale_variations.requirements(m, max_as, 0)[exp_to_compute_ord] == [
exp_nec_order
]
| 1,304 | 29.348837 | 87 | py |
pineko | pineko-main/tests/test_configs.py | import pathlib
import pytest
import pineko
def test_enhance_paths():
# Testing with one missing key
test_configs = {
"paths": {
"ymldb": pathlib.Path(""),
"grids": pathlib.Path(""),
"theory_cards": pathlib.Path(""),
"fktables": pathlib.Path(""),
"ekos": pathlib.Path(""),
"root": pathlib.Path("/my/root/path/"),
"logs": {"fk": pathlib.Path("my/fk/logs/")},
},
}
with pytest.raises(ValueError):
pineko.configs.enhance_paths(test_configs)
test_configs["paths"]["operator_cards"] = pathlib.Path("my/ope/cards/")
test_configs["paths"]["operator_card_template_name"] = "_template.yaml"
pineko.configs.enhance_paths(test_configs)
assert test_configs["paths"]["operator_cards"] == pathlib.Path(
"/my/root/path/my/ope/cards/"
)
assert test_configs["paths"]["logs"]["eko"] is None
assert test_configs["paths"]["logs"]["fk"] == pathlib.Path(
"/my/root/path/my/fk/logs/"
)
assert test_configs["paths"]["operator_card_template_name"] == "_template.yaml"
def test_default():
test_configs = {
"paths": {
"ymldb": pathlib.Path(""),
"grids": pathlib.Path(""),
"operator_cards": pathlib.Path("my/ope/cards/"),
"operator_card_template_name": "_template.yaml",
"theory_cards": pathlib.Path(""),
"fktables": pathlib.Path(""),
"ekos": pathlib.Path(""),
"root": pathlib.Path("/my/root/path/"),
"logs": {"fk": pathlib.Path("my/fk/logs/")},
},
}
configs = pineko.configs.defaults(test_configs)
assert configs["paths"]["ymldb"] == pathlib.Path("/my/root/path")
| 1,758 | 32.826923 | 83 | py |
pineko | pineko-main/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import pathlib
import pineko
here = pathlib.Path(__file__).absolute().parent
# -- Project information -----------------------------------------------------
project = "pineko"
copyright = "2023, the PineLine team"
author = "the PineLine team"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.autosectionlabel",
"sphinxcontrib.bibtex",
"sphinx.ext.napoleon",
"sphinx.ext.graphviz",
"sphinx.ext.extlinks",
]
autosectionlabel_prefix_document = True
# autosectionlabel_maxdepth = 10
# Allow to embed rst syntax in markdown files.
enable_eval_rst = True
# The master toctree document.
master_doc = "index"
bibtex_bibfiles = ["refs.bib"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
".rst": "restructuredtext",
".txt": "restructuredtext",
}
use_index = True
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["shared/*"]
# A string to be included at the beginning of all files
shared = here / "shared"
rst_prolog = "\n".join(
[x.read_text(encoding="utf-8") for x in pathlib.Path(shared).glob("*.rst")]
)
extlinks = {
"yadism": ("https://n3pdf.github.io/yadism/%s", "yadism"),
"banana": ("https://n3pdf.github.io/banana/%s", "banana"),
"pineappl": ("https://n3pdf.github.io/pineappl/%s", "pineappl"),
"eko": ("https://github.com/N3PDF/eko/%s", "eko"),
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
html_css_files = [
"site.css",
]
| 3,189 | 30.27451 | 79 | py |
L0Learn | L0Learn-master/vignettes/profile/L0Learn_Profile_Run.py | import os
import pandas as pd
from mprof import read_mprofile_file
CMD_BASE = "mprof run -o {o}.dat Rscript L0Learn_Profile.R --n {n} --p {p} --k {k} --s {s} --t {t} --w {w} --m {m} --f {f}"
file_name = 'test_run3'
run = {"n":1000, "p":10000, "k":10, "s":1, "t":2.1, "w":4, "m":1, "f":file_name, "o":file_name}
cmd = CMD_BASE.format(**run)
os.system(cmd) # Creates <file_name>.dat and <file_name>.csv files in same directory as file.
# This cmd will often error out for no reason.
# https://github.com/pythonprofilers/memory_profiler/issues/240
memory_usage = read_mprofile_file(file_name + ".dat")
timing = pd.read_csv(file_name + ".csv")
| 648 | 31.45 | 123 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/exp_elas.py | import torch.nn.functional as F
import matplotlib.pyplot as plt
from timeit import default_timer
from utils.utilities3 import *
from utils.adam import Adam
from utils.params import get_args
from model_dict import get_model
import math
import os
torch.manual_seed(0)
np.random.seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
################################################################
# configs
################################################################
args = get_args()
PATH_Sigma = os.path.join(args.data_path, './Meshes/Random_UnitCell_sigma_10.npy')
PATH_XY = os.path.join(args.data_path, './Meshes/Random_UnitCell_XY_10.npy')
PATH_rr = os.path.join(args.data_path, './Meshes/Random_UnitCell_rr_10.npy')
ntrain = args.ntrain
ntest = args.ntest
N = args.ntotal
in_channels = args.in_dim
out_channels = args.out_dim
batch_size = args.batch_size
learning_rate = args.learning_rate
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
model_save_path = args.model_save_path
model_save_name = args.model_save_name
################################################################
# models
################################################################
model, model_iphi = get_model(args)
print(count_params(model), count_params(model_iphi))
params = list(model.parameters()) + list(model_iphi.parameters())
################################################################
# load data and data normalization
################################################################
input_rr = np.load(PATH_rr)
input_rr = torch.tensor(input_rr, dtype=torch.float).permute(1, 0)
input_s = np.load(PATH_Sigma)
input_s = torch.tensor(input_s, dtype=torch.float).permute(1, 0).unsqueeze(-1)
input_xy = np.load(PATH_XY)
input_xy = torch.tensor(input_xy, dtype=torch.float).permute(2, 0, 1)
train_rr = input_rr[:ntrain]
test_rr = input_rr[-ntest:]
train_s = input_s[:ntrain]
test_s = input_s[-ntest:]
train_xy = input_xy[:ntrain]
test_xy = input_xy[-ntest:]
print(train_rr.shape, train_s.shape, train_xy.shape)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(train_rr, train_s, train_xy),
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(test_rr, test_s, test_xy),
batch_size=batch_size,
shuffle=False)
################################################################
# training and evaluation
################################################################
optimizer = Adam(params, lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
N_sample = 1000
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
for rr, sigma, mesh in train_loader:
rr, sigma, mesh = rr.cuda(), sigma.cuda(), mesh.cuda()
samples_x = torch.rand(batch_size, N_sample, 2).cuda() * 3 - 1
optimizer.zero_grad()
out = model(mesh, code=rr, iphi=model_iphi)
samples_xi = model_iphi(samples_x, code=rr)
loss_data = myloss(out.view(batch_size, -1), sigma.view(batch_size, -1))
loss = loss_data
loss.backward()
optimizer.step()
train_l2 += loss_data.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for rr, sigma, mesh in test_loader:
rr, sigma, mesh = rr.cuda(), sigma.cuda(), mesh.cuda()
out = model(mesh, code=rr, iphi=model_iphi)
test_l2 += myloss(out.view(batch_size, -1), sigma.view(batch_size, -1)).item()
train_l2 /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2 - t1, train_l2, test_l2)
if ep % step_size == 0:
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
print('save model')
torch.save(model.state_dict(), os.path.join(model_save_path, model_save_name)) | 4,143 | 33.823529 | 103 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/exp_airfoils.py | import torch.nn.functional as F
import matplotlib.pyplot as plt
from timeit import default_timer
from utils.utilities3 import *
from utils.adam import Adam
from utils.params import get_args
from model_dict import get_model
import math
import os
torch.manual_seed(0)
np.random.seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
################################################################
# configs
################################################################
args = get_args()
INPUT_X = os.path.join(args.data_path, './naca/NACA_Cylinder_X.npy')
INPUT_Y = os.path.join(args.data_path, './naca/NACA_Cylinder_Y.npy')
OUTPUT_Sigma = os.path.join(args.data_path, './naca/NACA_Cylinder_Q.npy')
ntrain = args.ntrain
ntest = args.ntest
N = args.ntotal
in_channels = args.in_dim
out_channels = args.out_dim
r1 = args.h_down
r2 = args.w_down
s1 = int(((args.h - 1) / r1) + 1)
s2 = int(((args.w - 1) / r2) + 1)
batch_size = args.batch_size
learning_rate = args.learning_rate
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
model_save_path = args.model_save_path
model_save_name = args.model_save_name
################################################################
# models
################################################################
model = get_model(args)
print(count_params(model))
################################################################
# load data and data normalization
################################################################
inputX = np.load(INPUT_X)
inputX = torch.tensor(inputX, dtype=torch.float)
inputY = np.load(INPUT_Y)
inputY = torch.tensor(inputY, dtype=torch.float)
input = torch.stack([inputX, inputY], dim=-1)
output = np.load(OUTPUT_Sigma)[:, 4]
output = torch.tensor(output, dtype=torch.float)
print(input.shape, output.shape)
x_train = input[:ntrain, ::r1, ::r2][:, :s1, :s2]
y_train = output[:ntrain, ::r1, ::r2][:, :s1, :s2]
x_test = input[ntrain:ntrain + ntest, ::r1, ::r2][:, :s1, :s2]
y_test = output[ntrain:ntrain + ntest, ::r1, ::r2][:, :s1, :s2]
x_train = x_train.reshape(ntrain, s1, s2, 2)
x_test = x_test.reshape(ntest, s1, s2, 2)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train), batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=batch_size,
shuffle=False)
################################################################
# training and evaluation
################################################################
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
for x, y in train_loader:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
out = model(x)
loss = myloss(out.view(batch_size, -1), y.view(batch_size, -1))
loss.backward()
optimizer.step()
train_l2 += loss.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y in test_loader:
x, y = x.cuda(), y.cuda()
out = model(x)
test_l2 += myloss(out.view(batch_size, -1), y.view(batch_size, -1)).item()
train_l2 /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2 - t1, train_l2, test_l2)
# plot
if ep % step_size == 0:
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
print('save model')
torch.save(model.state_dict(), os.path.join(model_save_path, model_save_name))
ind = -1
X = x[ind, :, :, 0].squeeze().detach().cpu().numpy()
Y = x[ind, :, :, 1].squeeze().detach().cpu().numpy()
truth = y[ind].squeeze().detach().cpu().numpy()
pred = out[ind].squeeze().detach().cpu().numpy()
nx = 40 // r1
ny = 20 // r2
X_small = X[nx:-nx, :ny]
Y_small = Y[nx:-nx, :ny]
truth_small = truth[nx:-nx, :ny]
pred_small = pred[nx:-nx, :ny]
fig, ax = plt.subplots(nrows=3, ncols=2, figsize=(16, 16))
ax[0, 0].pcolormesh(X, Y, truth, shading='gouraud')
ax[1, 0].pcolormesh(X, Y, pred, shading='gouraud')
ax[2, 0].pcolormesh(X, Y, pred - truth, shading='gouraud')
ax[0, 1].pcolormesh(X_small, Y_small, truth_small, shading='gouraud')
ax[1, 1].pcolormesh(X_small, Y_small, pred_small, shading='gouraud')
ax[2, 1].pcolormesh(X_small, Y_small, np.abs(pred_small - truth_small), shading='gouraud')
fig.show()
| 4,794 | 32.767606 | 115 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/exp_elas_interp.py | import torch.nn.functional as F
import matplotlib.pyplot as plt
from timeit import default_timer
from utils.utilities3 import *
from utils.adam import Adam
from utils.params import get_args
from model_dict import get_model
import math
import os
torch.manual_seed(0)
np.random.seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
################################################################
# configs
################################################################
args = get_args()
INPUT_PATH = os.path.join(args.data_path, './Interp/Random_UnitCell_mask_10_interp.npy')
OUTPUT_PATH = os.path.join(args.data_path, './Interp/Random_UnitCell_sigma_10_interp.npy')
ntrain = args.ntrain
ntest = args.ntest
N = args.ntotal
in_channels = args.in_dim
out_channels = args.out_dim
r1 = args.h_down
r2 = args.w_down
s1 = int(((args.h - 1) / r1) + 1)
s2 = int(((args.w - 1) / r2) + 1)
batch_size = args.batch_size
learning_rate = args.learning_rate
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
model_save_path = args.model_save_path
model_save_name = args.model_save_name
################################################################
# models
################################################################
model = get_model(args)
print(count_params(model))
################################################################
# load data and data normalization
################################################################
input = np.load(INPUT_PATH)
input = torch.tensor(input, dtype=torch.float).permute(2, 0, 1)
output = np.load(OUTPUT_PATH)
output = torch.tensor(output, dtype=torch.float).permute(2, 0, 1)
x_train = input[:N][:ntrain, ::r1, ::r2][:, :s1, :s2]
y_train = output[:N][:ntrain, ::r1, ::r2][:, :s1, :s2]
x_test = input[:N][-ntest:, ::r1, ::r2][:, :s1, :s2]
y_test = output[:N][-ntest:, ::r1, ::r2][:, :s1, :s2]
x_train = x_train.reshape(ntrain, s1, s2, 1)
x_test = x_test.reshape(ntest, s1, s2, 1)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train), batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=batch_size,
shuffle=False)
################################################################
# training and evaluation
################################################################
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
for x, y in train_loader:
x, y = x.cuda(), y.cuda()
mask = x.clone()
optimizer.zero_grad()
out = model(x)
out = out * mask
loss = myloss(out.view(batch_size, -1), y.view(batch_size, -1))
loss.backward()
optimizer.step()
train_l2 += loss.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y in test_loader:
x, y = x.cuda(), y.cuda()
mask = x.clone()
out = model(x)
out2 = out * mask
test_l2 += myloss(out2.view(batch_size, -1), y.view(batch_size, -1)).item()
train_l2 /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2 - t1, train_l2, test_l2)
if ep % step_size == 0:
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
print('save model')
torch.save(model.state_dict(), os.path.join(model_save_path, model_save_name)) | 3,753 | 30.283333 | 115 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/exp_pipe.py | import torch.nn.functional as F
import matplotlib.pyplot as plt
from timeit import default_timer
from utils.utilities3 import *
from utils.adam import Adam
from utils.params import get_args
from model_dict import get_model
import math
import os
torch.manual_seed(0)
np.random.seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
################################################################
# configs
################################################################
args = get_args()
INPUT_X = os.path.join(args.data_path, 'Pipe_X.npy')
INPUT_Y = os.path.join(args.data_path, 'Pipe_Y.npy')
OUTPUT_Sigma = os.path.join(args.data_path, 'Pipe_Q.npy')
ntrain = args.ntrain
ntest = args.ntest
N = args.ntotal
in_channels = args.in_dim
out_channels = args.out_dim
r1 = args.h_down
r2 = args.w_down
s1 = int(((args.h - 1) / r1) + 1)
s2 = int(((args.w - 1) / r2) + 1)
batch_size = args.batch_size
learning_rate = args.learning_rate
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
model_save_path = args.model_save_path
model_save_name = args.model_save_name
################################################################
# models
################################################################
model = get_model(args)
print(count_params(model))
################################################################
# load data and data normalization
################################################################
inputX = np.load(INPUT_X)
inputX = torch.tensor(inputX, dtype=torch.float)
inputY = np.load(INPUT_Y)
inputY = torch.tensor(inputY, dtype=torch.float)
input = torch.stack([inputX, inputY], dim=-1)
output = np.load(OUTPUT_Sigma)[:, 0]
output = torch.tensor(output, dtype=torch.float)
x_train = input[:N][:ntrain, ::r1, ::r2][:, :s1, :s2]
y_train = output[:N][:ntrain, ::r1, ::r2][:, :s1, :s2]
x_test = input[:N][-ntest:, ::r1, ::r2][:, :s1, :s2]
y_test = output[:N][-ntest:, ::r1, ::r2][:, :s1, :s2]
x_train = x_train.reshape(ntrain, s1, s2, 2)
x_test = x_test.reshape(ntest, s1, s2, 2)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train), batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=batch_size,
shuffle=False)
################################################################
# training and evaluation
################################################################
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
for x, y in train_loader:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
out = model(x)
loss = myloss(out.view(batch_size, -1), y.view(batch_size, -1))
loss.backward()
optimizer.step()
train_l2 += loss.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y in test_loader:
x, y = x.cuda(), y.cuda()
out = model(x)
test_l2 += myloss(out.view(batch_size, -1), y.view(batch_size, -1)).item()
train_l2 /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2 - t1, train_l2, test_l2)
if ep % step_size == 0:
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
print('save model')
torch.save(model, os.path.join(model_save_path, model_save_name))
X = x[0, :, :, 0].squeeze().detach().cpu().numpy()
Y = x[0, :, :, 1].squeeze().detach().cpu().numpy()
truth = y[0].squeeze().detach().cpu().numpy()
pred = out[0].squeeze().detach().cpu().numpy()
fig, ax = plt.subplots(nrows=3, figsize=(16, 16))
ax[0].pcolormesh(X, Y, truth, shading='gouraud')
ax[1].pcolormesh(X, Y, pred, shading='gouraud')
ax[2].pcolormesh(X, Y, pred - truth, shading='gouraud')
fig.show()
| 4,190 | 31.238462 | 115 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/exp_darcy.py | import torch.nn.functional as F
import matplotlib.pyplot as plt
from timeit import default_timer
from utils.utilities3 import *
from utils.adam import Adam
from utils.params import get_args
from model_dict import get_model
import math
import os
torch.manual_seed(0)
np.random.seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
################################################################
# configs
################################################################
args = get_args()
TRAIN_PATH = os.path.join(args.data_path, './piececonst_r421_N1024_smooth1.mat')
TEST_PATH = os.path.join(args.data_path, './piececonst_r421_N1024_smooth2.mat')
ntrain = args.ntrain
ntest = args.ntest
N = args.ntotal
in_channels = args.in_dim
out_channels = args.out_dim
r1 = args.h_down
r2 = args.w_down
s1 = int(((args.h - 1) / r1) + 1)
s2 = int(((args.w - 1) / r2) + 1)
batch_size = args.batch_size
learning_rate = args.learning_rate
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
model_save_path = args.model_save_path
model_save_name = args.model_save_name
################################################################
# models
################################################################
model = get_model(args)
print(count_params(model))
################################################################
# load data and data normalization
################################################################
reader = MatReader(TRAIN_PATH)
x_train = reader.read_field('coeff')[:ntrain, ::r1, ::r2][:, :s1, :s2]
y_train = reader.read_field('sol')[:ntrain, ::r1, ::r2][:, :s1, :s2]
reader.load_file(TEST_PATH)
x_test = reader.read_field('coeff')[:ntest, ::r1, ::r2][:, :s1, :s2]
y_test = reader.read_field('sol')[:ntest, ::r1, ::r2][:, :s1, :s2]
x_normalizer = UnitGaussianNormalizer(x_train)
x_train = x_normalizer.encode(x_train)
x_test = x_normalizer.encode(x_test)
y_normalizer = UnitGaussianNormalizer(y_train)
y_train = y_normalizer.encode(y_train)
y_normalizer.cuda()
x_train = x_train.reshape(ntrain, s1, s2, 1)
x_test = x_test.reshape(ntest, s1, s2, 1)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train), batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=batch_size,
shuffle=False)
################################################################
# training and evaluation
################################################################
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
for x, y in train_loader:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
out = model(x).reshape(batch_size, s1, s2)
out = y_normalizer.decode(out)
y = y_normalizer.decode(y)
loss = myloss(out.view(batch_size, -1), y.view(batch_size, -1))
loss.backward()
optimizer.step()
train_l2 += loss.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y in test_loader:
x, y = x.cuda(), y.cuda()
out = model(x).reshape(batch_size, s1, s2)
out = y_normalizer.decode(out)
test_l2 += myloss(out.view(batch_size, -1), y.view(batch_size, -1)).item()
train_l2 /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2 - t1, train_l2, test_l2)
if ep % step_size == 0:
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
print('save model')
torch.save(model.state_dict(), os.path.join(model_save_path, model_save_name))
| 3,958 | 30.927419 | 115 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/model_dict.py | from models import LSM_2D, LSM_3D, LSM_Irregular_Geo, FNO_2D, FNO_3D, FNO_Irregular_Geo
def get_model(args):
model_dict = {
'FNO_2D': FNO_2D,
'FNO_3D': FNO_3D,
'FNO_Irregular_Geo': FNO_Irregular_Geo,
'LSM_2D': LSM_2D,
'LSM_3D': LSM_3D,
'LSM_Irregular_Geo': LSM_Irregular_Geo,
}
if args.model == 'LSM_Irregular_Geo' or args.model == 'FNO_Irregular_Geo':
return model_dict[args.model].Model(args).cuda(), model_dict[args.model].IPHI().cuda()
else:
return model_dict[args.model].Model(args).cuda()
| 576 | 35.0625 | 94 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/exp_ns.py | import torch.nn.functional as F
import matplotlib.pyplot as plt
from timeit import default_timer
from utils.utilities3 import *
from utils.params import get_args
from model_dict import get_model
from utils.adam import Adam
import math
import os
torch.manual_seed(0)
np.random.seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
################################################################
# configs
################################################################
args = get_args()
TRAIN_PATH = os.path.join(args.data_path, './NavierStokes_V1e-5_N1200_T20.mat')
TEST_PATH = os.path.join(args.data_path, './NavierStokes_V1e-5_N1200_T20.mat')
ntrain = args.ntrain
ntest = args.ntest
N = args.ntotal
in_channels = args.in_dim
out_channels = args.out_dim
r1 = args.h_down
r2 = args.w_down
s1 = int(((args.h - 1) / r1) + 1)
s2 = int(((args.w - 1) / r2) + 1)
T_in = args.T_in
T_out = args.T_out
batch_size = args.batch_size
learning_rate = args.learning_rate
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
model_save_path = args.model_save_path
model_save_name = args.model_save_name
################################################################
# models
################################################################
model = get_model(args)
print(count_params(model))
################################################################
# load data and data normalization
################################################################
reader = MatReader(TRAIN_PATH)
train_a = reader.read_field('u')[:ntrain, ::r1, ::r2, :T_in]
train_u = reader.read_field('u')[:ntrain, ::r1, ::r2, T_in:T_in + T_out]
test_a = reader.read_field('u')[-ntest:, ::r1, ::r2, :T_in]
test_u = reader.read_field('u')[-ntest:, ::r1, ::r2, T_in:T_in + T_out]
print(train_u.shape)
print(test_u.shape)
train_a = train_a.reshape(ntrain, s1, s2, T_in)
test_a = test_a.reshape(ntest, s1, s2, T_in)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(train_a, train_u), batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(test_a, test_u), batch_size=batch_size,
shuffle=False)
################################################################
# training and evaluation
################################################################
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
step = 1
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2_step = 0
train_l2_full = 0
for xx, yy in train_loader:
loss = 0
xx = xx.to(device)
yy = yy.to(device)
for t in range(0, T_out, step):
y = yy[..., t:t + step]
im = model(xx)
loss += myloss(im.reshape(batch_size, -1), y.reshape(batch_size, -1))
if t == 0:
pred = im
else:
pred = torch.cat((pred, im), -1)
xx = torch.cat((xx[..., step:], im), dim=-1)
train_l2_step += loss.item()
l2_full = myloss(pred.reshape(batch_size, -1), yy.reshape(batch_size, -1))
train_l2_full += l2_full.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
test_l2_step = 0
test_l2_full = 0
with torch.no_grad():
for xx, yy in test_loader:
loss = 0
xx = xx.to(device)
yy = yy.to(device)
for t in range(0, T_out, step):
y = yy[..., t:t + step]
im = model(xx)
loss += myloss(im.reshape(batch_size, -1), y.reshape(batch_size, -1))
if t == 0:
pred = im
else:
pred = torch.cat((pred, im), -1)
xx = torch.cat((xx[..., step:], im), dim=-1)
test_l2_step += loss.item()
test_l2_full += myloss(pred.reshape(batch_size, -1), yy.reshape(batch_size, -1)).item()
t2 = default_timer()
scheduler.step()
print(ep, t2 - t1, train_l2_step / ntrain / (T_out / step), train_l2_full / ntrain,
test_l2_step / ntest / (T_out / step),
test_l2_full / ntest)
if ep % step_size == 0:
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
print('save model')
torch.save(model.state_dict(), os.path.join(model_save_path, model_save_name)) | 4,624 | 31.118056 | 115 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/exp_plas.py | import torch.nn.functional as F
import matplotlib.pyplot as plt
from timeit import default_timer
from utils.utilities3 import *
from utils.adam import Adam
from utils.params import get_args
from model_dict import get_model
import math
import os
torch.manual_seed(0)
np.random.seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
################################################################
# configs
################################################################
args = get_args()
DATA_PATH = os.path.join(args.data_path, './plas_N987_T20.mat')
ntrain = args.ntrain
ntest = args.ntest
N = args.ntotal
in_channels = args.in_dim
out_channels = args.out_dim
r1 = args.h_down
r2 = args.w_down
s1 = int(((args.h - 1) / r1) + 1)
s2 = int(((args.w - 1) / r2) + 1)
t = args.T_in
batch_size = args.batch_size
learning_rate = args.learning_rate
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
model_save_path = args.model_save_path
model_save_name = args.model_save_name
################################################################
# models
################################################################
model = get_model(args)
print(count_params(model))
################################################################
# load data and data normalization
################################################################
reader = MatReader(DATA_PATH)
x_train = reader.read_field('input')[:ntrain, ::r1][:, :s1].reshape(ntrain, s1, 1, 1, 1).repeat(1, 1, s2, t, 1)
y_train = reader.read_field('output')[:ntrain, ::r1, ::r2][:, :s1, :s2]
reader.load_file(DATA_PATH)
x_test = reader.read_field('input')[-ntest:, ::r1][:, :s1].reshape(ntest, s1, 1, 1, 1).repeat(1, 1, s2, t, 1)
y_test = reader.read_field('output')[-ntest:, ::r1, ::r2][:, :s1, :s2]
print(x_train.shape, y_train.shape)
x_normalizer = UnitGaussianNormalizer(x_train)
x_train = x_normalizer.encode(x_train)
x_test = x_normalizer.encode(x_test)
y_normalizer = UnitGaussianNormalizer(y_train)
y_train = y_normalizer.encode(y_train)
y_normalizer.cuda()
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train), batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=batch_size,
shuffle=False)
################################################################
# training and evaluation
################################################################
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False, p=2)
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
train_reg = 0
for x, y in train_loader:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
out = model(x).reshape(batch_size, s1, s2, t, out_channels)
out = y_normalizer.decode(out)
y = y_normalizer.decode(y)
loss = myloss(out.view(batch_size, -1), y.view(batch_size, -1))
loss.backward()
optimizer.step()
train_l2 += loss.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y in test_loader:
x, y = x.cuda(), y.cuda()
out = model(x).reshape(batch_size, s1, s2, t, out_channels)
out = y_normalizer.decode(out)
test_l2 += myloss(out.view(batch_size, -1), y.view(batch_size, -1)).item()
train_l2 /= ntrain
train_reg /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2 - t1, train_l2, train_reg, test_l2)
if ep % step_size == 0:
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
print('save model')
torch.save(model.state_dict(), os.path.join(model_save_path, model_save_name))
truth = y[0].squeeze().detach().cpu().numpy()
pred = out[0].squeeze().detach().cpu().numpy()
ZERO = torch.zeros(s1, s2)
truth_du = np.linalg.norm(truth[:, :, :, 2:], axis=-1)
pred_du = np.linalg.norm(pred[:, :, :, 2:], axis=-1)
lims = dict(cmap='RdBu_r', vmin=truth_du.min(), vmax=truth_du.max())
fig, ax = plt.subplots(nrows=2, ncols=5, figsize=(20, 6))
t0, t1, t2, t3, t4 = 0, 4, 9, 14, 19
ax[0, 0].scatter(truth[:, :, 0, 0], truth[:, :, 0, 1], 10, truth_du[:, :, 0], **lims)
ax[1, 0].scatter(pred[:, :, 0, 0], pred[:, :, 0, 1], 10, pred_du[:, :, 0], **lims)
ax[0, 1].scatter(truth[:, :, 4, 0], truth[:, :, 4, 1], 10, truth_du[:, :, 4], **lims)
ax[1, 1].scatter(pred[:, :, 4, 0], pred[:, :, 4, 1], 10, pred_du[:, :, 4], **lims)
ax[0, 2].scatter(truth[:, :, 9, 0], truth[:, :, 9, 1], 10, truth_du[:, :, 9], **lims)
ax[1, 2].scatter(pred[:, :, 9, 0], pred[:, :, 9, 1], 10, pred_du[:, :, 9], **lims)
ax[0, 3].scatter(truth[:, :, 14, 0], truth[:, :, 14, 1], 10, truth_du[:, :, 14], **lims)
ax[1, 3].scatter(pred[:, :, 14, 0], pred[:, :, 14, 1], 10, pred_du[:, :, 14], **lims)
ax[0, 4].scatter(truth[:, :, 19, 0], truth[:, :, 19, 1], 10, truth_du[:, :, 19], **lims)
ax[1, 4].scatter(pred[:, :, 19, 0], pred[:, :, 19, 1], 10, pred_du[:, :, 19], **lims)
fig.show()
| 5,411 | 37.935252 | 115 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/models/LSM_Irregular_Geo.py | """
@author: Haixu Wu
"""
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import math
################################################################
# Multiscale modules 2D
################################################################
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
################################################################
# geo projection
################################################################
class SpectralConv2d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2, s1=32, s2=32):
super(SpectralConv2d, self).__init__()
"""
from geoFNO
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes2 = modes2
self.s1 = s1
self.s2 = s2
self.scale = (1 / (in_channels * out_channels))
self.weights1 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
self.weights2 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
# Complex multiplication
def compl_mul2d(self, input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, u, x_in=None, x_out=None, iphi=None, code=None):
batchsize = u.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
if x_in == None:
u_ft = torch.fft.rfft2(u)
s1 = u.size(-2)
s2 = u.size(-1)
else:
u_ft = self.fft2d(u, x_in, iphi, code)
s1 = self.s1
s2 = self.s2
# Multiply relevant Fourier modes
# print(u.shape, u_ft.shape)
factor1 = self.compl_mul2d(u_ft[:, :, :self.modes1, :self.modes2], self.weights1)
factor2 = self.compl_mul2d(u_ft[:, :, -self.modes1:, :self.modes2], self.weights2)
# Return to physical space
if x_out == None:
out_ft = torch.zeros(batchsize, self.out_channels, s1, s2 // 2 + 1, dtype=torch.cfloat, device=u.device)
out_ft[:, :, :self.modes1, :self.modes2] = factor1
out_ft[:, :, -self.modes1:, :self.modes2] = factor2
u = torch.fft.irfft2(out_ft, s=(s1, s2))
else:
out_ft = torch.cat([factor1, factor2], dim=-2)
u = self.ifft2d(out_ft, x_out, iphi, code)
return u
def fft2d(self, u, x_in, iphi=None, code=None):
# u (batch, channels, n)
# x_in (batch, n, 2) locations in [0,1]*[0,1]
# iphi: function: x_in -> x_c
batchsize = x_in.shape[0]
N = x_in.shape[1]
device = x_in.device
m1 = 2 * self.modes1
m2 = 2 * self.modes2 - 1
# wavenumber (m1, m2)
k_x1 = torch.cat((torch.arange(start=0, end=self.modes1, step=1), \
torch.arange(start=-(self.modes1), end=0, step=1)), 0).reshape(m1, 1).repeat(1, m2).to(device)
k_x2 = torch.cat((torch.arange(start=0, end=self.modes2, step=1), \
torch.arange(start=-(self.modes2 - 1), end=0, step=1)), 0).reshape(1, m2).repeat(m1, 1).to(
device)
if iphi == None:
x = x_in
else:
x = iphi(x_in, code)
# K = <y, k_x>, (batch, N, m1, m2)
K1 = torch.outer(x[..., 0].view(-1), k_x1.view(-1)).reshape(batchsize, N, m1, m2)
K2 = torch.outer(x[..., 1].view(-1), k_x2.view(-1)).reshape(batchsize, N, m1, m2)
K = K1 + K2
# basis (batch, N, m1, m2)
basis = torch.exp(-1j * 2 * np.pi * K).to(device)
# Y (batch, channels, N)
u = u + 0j
Y = torch.einsum("bcn,bnxy->bcxy", u, basis)
return Y
def ifft2d(self, u_ft, x_out, iphi=None, code=None):
# u_ft (batch, channels, kmax, kmax)
# x_out (batch, N, 2) locations in [0,1]*[0,1]
# iphi: function: x_out -> x_c
batchsize = x_out.shape[0]
N = x_out.shape[1]
device = x_out.device
m1 = 2 * self.modes1
m2 = 2 * self.modes2 - 1
# wavenumber (m1, m2)
k_x1 = torch.cat((torch.arange(start=0, end=self.modes1, step=1), \
torch.arange(start=-(self.modes1), end=0, step=1)), 0).reshape(m1, 1).repeat(1, m2).to(device)
k_x2 = torch.cat((torch.arange(start=0, end=self.modes2, step=1), \
torch.arange(start=-(self.modes2 - 1), end=0, step=1)), 0).reshape(1, m2).repeat(m1, 1).to(
device)
if iphi == None:
x = x_out
else:
x = iphi(x_out, code)
# K = <y, k_x>, (batch, N, m1, m2)
K1 = torch.outer(x[:, :, 0].view(-1), k_x1.view(-1)).reshape(batchsize, N, m1, m2)
K2 = torch.outer(x[:, :, 1].view(-1), k_x2.view(-1)).reshape(batchsize, N, m1, m2)
K = K1 + K2
# basis (batch, N, m1, m2)
basis = torch.exp(1j * 2 * np.pi * K).to(device)
# coeff (batch, channels, m1, m2)
u_ft2 = u_ft[..., 1:].flip(-1, -2).conj()
u_ft = torch.cat([u_ft, u_ft2], dim=-1)
# Y (batch, channels, N)
Y = torch.einsum("bcxy,bnxy->bcn", u_ft, basis)
Y = Y.real
return Y
class IPHI(nn.Module):
def __init__(self, width=32):
super(IPHI, self).__init__()
"""
inverse phi: x -> xi
"""
self.width = width
self.fc0 = nn.Linear(4, self.width)
self.fc_code = nn.Linear(42, self.width)
self.fc_no_code = nn.Linear(3 * self.width, 4 * self.width)
self.fc1 = nn.Linear(4 * self.width, 4 * self.width)
self.fc2 = nn.Linear(4 * self.width, 4 * self.width)
self.fc3 = nn.Linear(4 * self.width, 4 * self.width)
self.fc4 = nn.Linear(4 * self.width, 2)
self.activation = torch.tanh
self.center = torch.tensor([0.0001, 0.0001], device="cuda").reshape(1, 1, 2)
self.B = np.pi * torch.pow(2, torch.arange(0, self.width // 4, dtype=torch.float, device="cuda")).reshape(1, 1,
1,
self.width // 4)
def forward(self, x, code=None):
# x (batch, N_grid, 2)
# code (batch, N_features)
# some feature engineering
angle = torch.atan2(x[:, :, 1] - self.center[:, :, 1], x[:, :, 0] - self.center[:, :, 0])
radius = torch.norm(x - self.center, dim=-1, p=2)
xd = torch.stack([x[:, :, 0], x[:, :, 1], angle, radius], dim=-1)
# sin features from NeRF
b, n, d = xd.shape[0], xd.shape[1], xd.shape[2]
x_sin = torch.sin(self.B * xd.view(b, n, d, 1)).view(b, n, d * self.width // 4)
x_cos = torch.cos(self.B * xd.view(b, n, d, 1)).view(b, n, d * self.width // 4)
xd = self.fc0(xd)
xd = torch.cat([xd, x_sin, x_cos], dim=-1).reshape(b, n, 3 * self.width)
if code != None:
cd = self.fc_code(code)
cd = cd.unsqueeze(1).repeat(1, xd.shape[1], 1)
xd = torch.cat([cd, xd], dim=-1)
else:
xd = self.fc_no_code(xd)
xd = self.fc1(xd)
xd = self.activation(xd)
xd = self.fc2(xd)
xd = self.activation(xd)
xd = self.fc3(xd)
xd = self.activation(xd)
xd = self.fc4(xd)
return x + x * xd
################################################################
# Patchify and Neural Spectral Block
################################################################
class NeuralSpectralBlock2d(nn.Module):
def __init__(self, width, num_basis, patch_size=[3, 3], num_token=4):
super(NeuralSpectralBlock2d, self).__init__()
self.patch_size = patch_size
self.width = width
self.num_basis = num_basis
# basis
self.modes_list = (1.0 / float(num_basis)) * torch.tensor([i for i in range(num_basis)],
dtype=torch.float).cuda()
self.weights = nn.Parameter(
(1 / (width)) * torch.rand(width, self.num_basis * 2, dtype=torch.float))
# latent
self.head = 8
self.num_token = num_token
self.latent = nn.Parameter(
(1 / (width)) * torch.rand(self.head, self.num_token, width // self.head, dtype=torch.float))
self.encoder_attn = nn.Conv2d(self.width, self.width * 2, kernel_size=1, stride=1)
self.decoder_attn = nn.Conv2d(self.width, self.width, kernel_size=1, stride=1)
self.softmax = nn.Softmax(dim=-1)
def self_attn(self, q, k, v):
# q,k,v: B H L C/H
attn = self.softmax(torch.einsum("bhlc,bhsc->bhls", q, k))
return torch.einsum("bhls,bhsc->bhlc", attn, v)
def latent_encoder_attn(self, x):
# x: B C H W
B, C, H, W = x.shape
L = H * W
latent_token = self.latent[None, :, :, :].repeat(B, 1, 1, 1)
x_tmp = self.encoder_attn(x).view(B, C * 2, -1).permute(0, 2, 1).contiguous() \
.view(B, L, self.head, C // self.head, 2).permute(4, 0, 2, 1, 3).contiguous()
latent_token = self.self_attn(latent_token, x_tmp[0], x_tmp[1]) + latent_token
latent_token = latent_token.permute(0, 1, 3, 2).contiguous().view(B, C, self.num_token)
return latent_token
def latent_decoder_attn(self, x, latent_token):
# x: B C L
x_init = x
B, C, H, W = x.shape
L = H * W
latent_token = latent_token.view(B, self.head, C // self.head, self.num_token).permute(0, 1, 3, 2).contiguous()
x_tmp = self.decoder_attn(x).view(B, C, -1).permute(0, 2, 1).contiguous() \
.view(B, L, self.head, C // self.head).permute(0, 2, 1, 3).contiguous()
x = self.self_attn(x_tmp, latent_token, latent_token)
x = x.permute(0, 1, 3, 2).contiguous().view(B, C, H, W) + x_init # B H L C/H
return x
def get_basis(self, x):
# x: B C N
x_sin = torch.sin(self.modes_list[None, None, None, :] * x[:, :, :, None] * math.pi)
x_cos = torch.cos(self.modes_list[None, None, None, :] * x[:, :, :, None] * math.pi)
return torch.cat([x_sin, x_cos], dim=-1)
def compl_mul2d(self, input, weights):
return torch.einsum("bilm,im->bil", input, weights)
def forward(self, x):
B, C, H, W = x.shape
# patchify
x = x.view(x.shape[0], x.shape[1],
x.shape[2] // self.patch_size[0], self.patch_size[0], x.shape[3] // self.patch_size[1],
self.patch_size[1]).contiguous() \
.permute(0, 2, 4, 1, 3, 5).contiguous() \
.view(x.shape[0] * (x.shape[2] // self.patch_size[0]) * (x.shape[3] // self.patch_size[1]), x.shape[1],
self.patch_size[0],
self.patch_size[1])
# Neural Spectral
# (1) encoder
latent_token = self.latent_encoder_attn(x)
# (2) transition
latent_token_modes = self.get_basis(latent_token)
latent_token = self.compl_mul2d(latent_token_modes, self.weights) + latent_token
# (3) decoder
x = self.latent_decoder_attn(x, latent_token)
# de-patchify
x = x.view(B, (H // self.patch_size[0]), (W // self.patch_size[1]), C, self.patch_size[0],
self.patch_size[1]).permute(0, 3, 1, 4, 2, 5).contiguous() \
.view(B, C, H, W).contiguous()
return x
class Model(nn.Module):
def __init__(self, args, bilinear=True, modes1=12, modes2=12, s1=96, s2=96):
super(Model, self).__init__()
in_channels = args.in_dim
out_channels = args.out_dim
width = args.d_model
num_token = args.num_token
num_basis = args.num_basis
patch_size = [int(x) for x in args.patch_size.split(',')]
padding = [int(x) for x in args.padding.split(',')]
# multiscale modules
self.inc = DoubleConv(width, width)
self.down1 = Down(width, width * 2)
self.down2 = Down(width * 2, width * 4)
self.down3 = Down(width * 4, width * 8)
factor = 2 if bilinear else 1
self.down4 = Down(width * 8, width * 16 // factor)
self.up1 = Up(width * 16, width * 8 // factor, bilinear)
self.up2 = Up(width * 8, width * 4 // factor, bilinear)
self.up3 = Up(width * 4, width * 2 // factor, bilinear)
self.up4 = Up(width * 2, width, bilinear)
self.outc = OutConv(width, width)
# Patchified Neural Spectral Blocks
self.process1 = NeuralSpectralBlock2d(width, num_basis, patch_size, num_token)
self.process2 = NeuralSpectralBlock2d(width * 2, num_basis, patch_size, num_token)
self.process3 = NeuralSpectralBlock2d(width * 4, num_basis, patch_size, num_token)
self.process4 = NeuralSpectralBlock2d(width * 8, num_basis, patch_size, num_token)
self.process5 = NeuralSpectralBlock2d(width * 16 // factor, num_basis, patch_size, num_token)
# geo projectors
self.s1 = s1
self.s2 = s2
self.fc0 = nn.Linear(in_channels, width)
self.fftproject_in = SpectralConv2d(width, width, modes1, modes2, s1, s2)
self.fftproject_out = SpectralConv2d(width, width, modes1, modes2, s1, s2)
self.convproject_in = nn.Conv2d(2, width, 1)
self.convproject_out = nn.Conv1d(2, width, 1)
# dim projectors
self.fc1 = nn.Linear(width, 128)
self.fc2 = nn.Linear(128, out_channels)
def forward(self, x, code=None, x_in=None, x_out=None, iphi=None):
if x_in == None:
x_in = x
if x_out == None:
x_out = x
grid = self.get_grid([x.shape[0], self.s1, self.s2], x.device).permute(0, 3, 1, 2)
u = self.fc0(x)
u = u.permute(0, 2, 1)
uc1 = self.fftproject_in(u, x_in=x_in, iphi=iphi, code=code)
uc2 = self.convproject_in(grid)
uc = uc1 + uc2
uc = F.gelu(uc)
x1 = self.inc(uc)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(self.process5(x5), self.process4(x4))
x = self.up2(x, self.process3(x3))
x = self.up3(x, self.process2(x2))
x = self.up4(x, self.process1(x1))
uc = self.outc(x)
u = self.fftproject_out(uc, x_out=x_out, iphi=iphi, code=code)
u1 = self.convproject_out(x_out.permute(0, 2, 1))
u = u + u1
u = u.permute(0, 2, 1)
u = self.fc1(u)
u = F.gelu(u)
u = self.fc2(u)
return u
def get_grid(self, shape, device):
batchsize, size_x, size_y = shape[0], shape[1], shape[2]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1).repeat([batchsize, 1, size_y, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1).repeat([batchsize, size_x, 1, 1])
return torch.cat((gridx, gridy), dim=-1).to(device)
| 17,899 | 39.134529 | 130 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/models/FNO_Irregular_Geo.py | """
@author: Zongyi Li
modified by Haixu Wu to adapt to this code base
"""
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import math
################################################################
# fourier layer
################################################################
class SpectralConv2d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2, s1=32, s2=32):
super(SpectralConv2d, self).__init__()
"""
2D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes2 = modes2
self.s1 = s1
self.s2 = s2
self.scale = (1 / (in_channels * out_channels))
self.weights1 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
self.weights2 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
# Complex multiplication
def compl_mul2d(self, input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, u, x_in=None, x_out=None, iphi=None, code=None):
batchsize = u.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
if x_in == None:
u_ft = torch.fft.rfft2(u)
s1 = u.size(-2)
s2 = u.size(-1)
else:
u_ft = self.fft2d(u, x_in, iphi, code)
s1 = self.s1
s2 = self.s2
# Multiply relevant Fourier modes
# print(u.shape, u_ft.shape)
factor1 = self.compl_mul2d(u_ft[:, :, :self.modes1, :self.modes2], self.weights1)
factor2 = self.compl_mul2d(u_ft[:, :, -self.modes1:, :self.modes2], self.weights2)
# Return to physical space
if x_out == None:
out_ft = torch.zeros(batchsize, self.out_channels, s1, s2 // 2 + 1, dtype=torch.cfloat, device=u.device)
out_ft[:, :, :self.modes1, :self.modes2] = factor1
out_ft[:, :, -self.modes1:, :self.modes2] = factor2
u = torch.fft.irfft2(out_ft, s=(s1, s2))
else:
out_ft = torch.cat([factor1, factor2], dim=-2)
u = self.ifft2d(out_ft, x_out, iphi, code)
return u
def fft2d(self, u, x_in, iphi=None, code=None):
# u (batch, channels, n)
# x_in (batch, n, 2) locations in [0,1]*[0,1]
# iphi: function: x_in -> x_c
batchsize = x_in.shape[0]
N = x_in.shape[1]
device = x_in.device
m1 = 2 * self.modes1
m2 = 2 * self.modes2 - 1
# wavenumber (m1, m2)
k_x1 = torch.cat((torch.arange(start=0, end=self.modes1, step=1), \
torch.arange(start=-(self.modes1), end=0, step=1)), 0).reshape(m1, 1).repeat(1, m2).to(device)
k_x2 = torch.cat((torch.arange(start=0, end=self.modes2, step=1), \
torch.arange(start=-(self.modes2 - 1), end=0, step=1)), 0).reshape(1, m2).repeat(m1, 1).to(
device)
# print(x_in.shape)
if iphi == None:
x = x_in
else:
x = iphi(x_in, code)
# print(x.shape)
# K = <y, k_x>, (batch, N, m1, m2)
K1 = torch.outer(x[..., 0].view(-1), k_x1.view(-1)).reshape(batchsize, N, m1, m2)
K2 = torch.outer(x[..., 1].view(-1), k_x2.view(-1)).reshape(batchsize, N, m1, m2)
K = K1 + K2
# basis (batch, N, m1, m2)
basis = torch.exp(-1j * 2 * np.pi * K).to(device)
# Y (batch, channels, N)
u = u + 0j
Y = torch.einsum("bcn,bnxy->bcxy", u, basis)
return Y
def ifft2d(self, u_ft, x_out, iphi=None, code=None):
# u_ft (batch, channels, kmax, kmax)
# x_out (batch, N, 2) locations in [0,1]*[0,1]
# iphi: function: x_out -> x_c
batchsize = x_out.shape[0]
N = x_out.shape[1]
device = x_out.device
m1 = 2 * self.modes1
m2 = 2 * self.modes2 - 1
# wavenumber (m1, m2)
k_x1 = torch.cat((torch.arange(start=0, end=self.modes1, step=1), \
torch.arange(start=-(self.modes1), end=0, step=1)), 0).reshape(m1, 1).repeat(1, m2).to(device)
k_x2 = torch.cat((torch.arange(start=0, end=self.modes2, step=1), \
torch.arange(start=-(self.modes2 - 1), end=0, step=1)), 0).reshape(1, m2).repeat(m1, 1).to(
device)
if iphi == None:
x = x_out
else:
x = iphi(x_out, code)
# K = <y, k_x>, (batch, N, m1, m2)
K1 = torch.outer(x[:, :, 0].view(-1), k_x1.view(-1)).reshape(batchsize, N, m1, m2)
K2 = torch.outer(x[:, :, 1].view(-1), k_x2.view(-1)).reshape(batchsize, N, m1, m2)
K = K1 + K2
# basis (batch, N, m1, m2)
basis = torch.exp(1j * 2 * np.pi * K).to(device)
# coeff (batch, channels, m1, m2)
u_ft2 = u_ft[..., 1:].flip(-1, -2).conj()
u_ft = torch.cat([u_ft, u_ft2], dim=-1)
# Y (batch, channels, N)
Y = torch.einsum("bcxy,bnxy->bcn", u_ft, basis)
Y = Y.real
return Y
class IPHI(nn.Module):
def __init__(self, width=32):
super(IPHI, self).__init__()
"""
inverse phi: x -> xi
"""
self.width = width
self.fc0 = nn.Linear(4, self.width)
self.fc_code = nn.Linear(42, self.width)
self.fc_no_code = nn.Linear(3 * self.width, 4 * self.width)
self.fc1 = nn.Linear(4 * self.width, 4 * self.width)
self.fc2 = nn.Linear(4 * self.width, 4 * self.width)
self.fc3 = nn.Linear(4 * self.width, 4 * self.width)
self.fc4 = nn.Linear(4 * self.width, 2)
self.activation = torch.tanh
self.center = torch.tensor([0.0001, 0.0001], device="cuda").reshape(1, 1, 2)
self.B = np.pi * torch.pow(2, torch.arange(0, self.width // 4, dtype=torch.float, device="cuda")).reshape(1, 1,
1,
self.width // 4)
def forward(self, x, code=None):
# x (batch, N_grid, 2)
# code (batch, N_features)
# some feature engineering
angle = torch.atan2(x[:, :, 1] - self.center[:, :, 1], x[:, :, 0] - self.center[:, :, 0])
radius = torch.norm(x - self.center, dim=-1, p=2)
xd = torch.stack([x[:, :, 0], x[:, :, 1], angle, radius], dim=-1)
# sin features from NeRF
b, n, d = xd.shape[0], xd.shape[1], xd.shape[2]
x_sin = torch.sin(self.B * xd.view(b, n, d, 1)).view(b, n, d * self.width // 4)
x_cos = torch.cos(self.B * xd.view(b, n, d, 1)).view(b, n, d * self.width // 4)
xd = self.fc0(xd)
xd = torch.cat([xd, x_sin, x_cos], dim=-1).reshape(b, n, 3 * self.width)
if code != None:
cd = self.fc_code(code)
cd = cd.unsqueeze(1).repeat(1, xd.shape[1], 1)
xd = torch.cat([cd, xd], dim=-1)
else:
xd = self.fc_no_code(xd)
xd = self.fc1(xd)
xd = self.activation(xd)
xd = self.fc2(xd)
xd = self.activation(xd)
xd = self.fc3(xd)
xd = self.activation(xd)
xd = self.fc4(xd)
return x + x * xd
class Model(nn.Module):
def __init__(self, args, is_mesh=True, modes1=12, modes2=12, s1=96, s2=96):
super(Model, self).__init__()
in_channels = args.in_dim
out_channels = args.out_dim
width = args.d_model
self.modes1 = modes1
self.modes2 = modes2
self.width = width
self.is_mesh = is_mesh
self.s1 = s1
self.s2 = s2
self.fc0 = nn.Linear(in_channels, self.width) # input channel is 3: (a(x, y), x, y)
self.conv0 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2, s1, s2)
self.conv1 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv2 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv3 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv4 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2, s1, s2)
self.w1 = nn.Conv2d(self.width, self.width, 1)
self.w2 = nn.Conv2d(self.width, self.width, 1)
self.w3 = nn.Conv2d(self.width, self.width, 1)
self.b0 = nn.Conv2d(2, self.width, 1)
self.b1 = nn.Conv2d(2, self.width, 1)
self.b2 = nn.Conv2d(2, self.width, 1)
self.b3 = nn.Conv2d(2, self.width, 1)
self.b4 = nn.Conv1d(2, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, out_channels)
def forward(self, u, code=None, x_in=None, x_out=None, iphi=None):
# u (batch, Nx, d) the input value
# code (batch, Nx, d) the input features
# x_in (batch, Nx, 2) the input mesh (sampling mesh)
# xi (batch, xi1, xi2, 2) the computational mesh (uniform)
# x_in (batch, Nx, 2) the input mesh (query mesh)
if self.is_mesh and x_in == None:
x_in = u
if self.is_mesh and x_out == None:
x_out = u
grid = self.get_grid([u.shape[0], self.s1, self.s2], u.device).permute(0, 3, 1, 2)
u = self.fc0(u)
u = u.permute(0, 2, 1)
uc1 = self.conv0(u, x_in=x_in, iphi=iphi, code=code)
uc3 = self.b0(grid)
uc = uc1 + uc3
uc = F.gelu(uc)
uc1 = self.conv1(uc)
uc2 = self.w1(uc)
uc3 = self.b1(grid)
uc = uc1 + uc2 + uc3
uc = F.gelu(uc)
uc1 = self.conv2(uc)
uc2 = self.w2(uc)
uc3 = self.b2(grid)
uc = uc1 + uc2 + uc3
uc = F.gelu(uc)
uc1 = self.conv3(uc)
uc2 = self.w3(uc)
uc3 = self.b3(grid)
uc = uc1 + uc2 + uc3
uc = F.gelu(uc)
u = self.conv4(uc, x_out=x_out, iphi=iphi, code=code)
u3 = self.b4(x_out.permute(0, 2, 1))
u = u + u3
u = u.permute(0, 2, 1)
u = self.fc1(u)
u = F.gelu(u)
u = self.fc2(u)
return u
def get_grid(self, shape, device):
batchsize, size_x, size_y = shape[0], shape[1], shape[2]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1).repeat([batchsize, 1, size_y, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1).repeat([batchsize, size_x, 1, 1])
return torch.cat((gridx, gridy), dim=-1).to(device)
| 11,055 | 36.733788 | 130 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/models/FNO_3D.py | """
@author: Zongyi Li
modified by Haixu Wu to adapt to this code base
"""
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import math
################################################################
# 3d fourier layers
################################################################
class SpectralConv3d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2, modes3):
super(SpectralConv3d, self).__init__()
"""
3D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes2 = modes2
self.modes3 = modes3
self.scale = (1 / (in_channels * out_channels))
self.weights1 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3,
dtype=torch.cfloat))
self.weights2 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3,
dtype=torch.cfloat))
self.weights3 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3,
dtype=torch.cfloat))
self.weights4 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3,
dtype=torch.cfloat))
# Complex multiplication
def compl_mul3d(self, input, weights):
# (batch, in_channel, x,y,t ), (in_channel, out_channel, x,y,t) -> (batch, out_channel, x,y,t)
return torch.einsum("bixyz,ioxyz->boxyz", input, weights)
def forward(self, x):
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfftn(x, dim=[-3, -2, -1])
# Multiply relevant Fourier modes
out_ft = torch.zeros(batchsize, self.out_channels, x.size(-3), x.size(-2), x.size(-1) // 2 + 1,
dtype=torch.cfloat, device=x.device)
out_ft[:, :, :self.modes1, :self.modes2, :self.modes3] = \
self.compl_mul3d(x_ft[:, :, :self.modes1, :self.modes2, :self.modes3], self.weights1)
out_ft[:, :, -self.modes1:, :self.modes2, :self.modes3] = \
self.compl_mul3d(x_ft[:, :, -self.modes1:, :self.modes2, :self.modes3], self.weights2)
out_ft[:, :, :self.modes1, -self.modes2:, :self.modes3] = \
self.compl_mul3d(x_ft[:, :, :self.modes1, -self.modes2:, :self.modes3], self.weights3)
out_ft[:, :, -self.modes1:, -self.modes2:, :self.modes3] = \
self.compl_mul3d(x_ft[:, :, -self.modes1:, -self.modes2:, :self.modes3], self.weights4)
# Return to physical space
x = torch.fft.irfftn(out_ft, s=(x.size(-3), x.size(-2), x.size(-1)))
return x
class Model(nn.Module):
def __init__(self, args):
super(Model, self).__init__()
in_channels = args.in_dim
out_channels = args.out_dim
self.modes1 = args.num_basis
self.modes2 = args.num_basis
self.modes3 = args.num_basis // 2
self.width = args.d_model
self.padding = [int(x) for x in args.padding.split(',')]
self.conv0 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
self.conv1 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
self.conv2 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
self.conv3 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
self.w0 = nn.Conv3d(self.width, self.width, 1)
self.w1 = nn.Conv3d(self.width, self.width, 1)
self.w2 = nn.Conv3d(self.width, self.width, 1)
self.w3 = nn.Conv3d(self.width, self.width, 1)
self.bn0 = torch.nn.BatchNorm3d(self.width)
self.bn1 = torch.nn.BatchNorm3d(self.width)
self.bn2 = torch.nn.BatchNorm3d(self.width)
self.bn3 = torch.nn.BatchNorm3d(self.width)
self.fc0 = nn.Linear(in_channels + 3, self.width)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, out_channels)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 4, 1, 2, 3)
if not all(item == 0 for item in self.padding):
x = F.pad(x, [0, self.padding[0], 0, self.padding[1], 0, self.padding[2]])
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
if not all(item == 0 for item in self.padding):
x = x[..., :-self.padding[2], :-self.padding[1], :-self.padding[0]]
x = x.permute(0, 2, 3, 4, 1) # pad the domain if input is non-periodic
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x, size_y, size_z = shape[0], shape[1], shape[2], shape[3]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1, 1).repeat([batchsize, 1, size_y, size_z, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1, 1).repeat([batchsize, size_x, 1, size_z, 1])
gridz = torch.tensor(np.linspace(0, 1, size_z), dtype=torch.float)
gridz = gridz.reshape(1, 1, 1, size_z, 1).repeat([batchsize, size_x, size_y, 1, 1])
return torch.cat((gridx, gridy, gridz), dim=-1).to(device)
| 6,128 | 41.86014 | 103 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/models/FNO_2D.py | """
@author: Zongyi Li
modified by Haixu Wu to adapt to this code base
"""
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import math
################################################################
# fourier layer
################################################################
class SpectralConv2d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2):
super(SpectralConv2d, self).__init__()
"""
2D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes2 = modes2
self.scale = (1 / (in_channels * out_channels))
self.weights1 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
self.weights2 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
# Complex multiplication
def compl_mul2d(self, input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, x):
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft2(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(batchsize, self.out_channels, x.size(-2), x.size(-1) // 2 + 1, dtype=torch.cfloat,
device=x.device)
out_ft[:, :, :self.modes1, :self.modes2] = \
self.compl_mul2d(x_ft[:, :, :self.modes1, :self.modes2], self.weights1)
out_ft[:, :, -self.modes1:, :self.modes2] = \
self.compl_mul2d(x_ft[:, :, -self.modes1:, :self.modes2], self.weights2)
# Return to physical space
x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
return x
class Model(nn.Module):
def __init__(self, args):
super(Model, self).__init__()
in_channels = args.in_dim
out_channels = args.out_dim
self.modes1 = args.num_basis
self.modes2 = args.num_basis
self.width = args.d_model
self.padding = [int(x) for x in args.padding.split(',')]
self.conv0 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv1 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv2 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv3 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.w0 = nn.Conv2d(self.width, self.width, 1)
self.w1 = nn.Conv2d(self.width, self.width, 1)
self.w2 = nn.Conv2d(self.width, self.width, 1)
self.w3 = nn.Conv2d(self.width, self.width, 1)
self.fc0 = nn.Linear(in_channels + 2, self.width) # input channel is 3: (a(x, y), x, y)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, out_channels)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 3, 1, 2)
if not all(item == 0 for item in self.padding):
x = F.pad(x, [0, self.padding[0], 0, self.padding[1]])
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
if not all(item == 0 for item in self.padding):
x = x[..., :-self.padding[1], :-self.padding[0]]
x = x.permute(0, 2, 3, 1)
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x, size_y = shape[0], shape[1], shape[2]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1).repeat([batchsize, 1, size_y, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1).repeat([batchsize, size_x, 1, 1])
return torch.cat((gridx, gridy), dim=-1).to(device)
| 4,586 | 36.598361 | 111 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/models/LSM_2D.py | """
@author: Haixu Wu
"""
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import math
################################################################
# Multiscale modules 2D
################################################################
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
################################################################
# Patchify and Neural Spectral Block
################################################################
class NeuralSpectralBlock2d(nn.Module):
def __init__(self, width, num_basis, patch_size=[3, 3], num_token=4):
super(NeuralSpectralBlock2d, self).__init__()
self.patch_size = patch_size
self.width = width
self.num_basis = num_basis
# basis
self.modes_list = (1.0 / float(num_basis)) * torch.tensor([i for i in range(num_basis)],
dtype=torch.float).cuda()
self.weights = nn.Parameter(
(1 / (width)) * torch.rand(width, self.num_basis * 2, dtype=torch.float))
# latent
self.head = 8
self.num_token = num_token
self.latent = nn.Parameter(
(1 / (width)) * torch.rand(self.head, self.num_token, width // self.head, dtype=torch.float))
self.encoder_attn = nn.Conv2d(self.width, self.width * 2, kernel_size=1, stride=1)
self.decoder_attn = nn.Conv2d(self.width, self.width, kernel_size=1, stride=1)
self.softmax = nn.Softmax(dim=-1)
def self_attn(self, q, k, v):
# q,k,v: B H L C/H
attn = self.softmax(torch.einsum("bhlc,bhsc->bhls", q, k))
return torch.einsum("bhls,bhsc->bhlc", attn, v)
def latent_encoder_attn(self, x):
# x: B C H W
B, C, H, W = x.shape
L = H * W
latent_token = self.latent[None, :, :, :].repeat(B, 1, 1, 1)
x_tmp = self.encoder_attn(x).view(B, C * 2, -1).permute(0, 2, 1).contiguous() \
.view(B, L, self.head, C // self.head, 2).permute(4, 0, 2, 1, 3).contiguous()
latent_token = self.self_attn(latent_token, x_tmp[0], x_tmp[1]) + latent_token
latent_token = latent_token.permute(0, 1, 3, 2).contiguous().view(B, C, self.num_token)
return latent_token
def latent_decoder_attn(self, x, latent_token):
# x: B C L
x_init = x
B, C, H, W = x.shape
L = H * W
latent_token = latent_token.view(B, self.head, C // self.head, self.num_token).permute(0, 1, 3, 2).contiguous()
x_tmp = self.decoder_attn(x).view(B, C, -1).permute(0, 2, 1).contiguous() \
.view(B, L, self.head, C // self.head).permute(0, 2, 1, 3).contiguous()
x = self.self_attn(x_tmp, latent_token, latent_token)
x = x.permute(0, 1, 3, 2).contiguous().view(B, C, H, W) + x_init # B H L C/H
return x
def get_basis(self, x):
# x: B C N
x_sin = torch.sin(self.modes_list[None, None, None, :] * x[:, :, :, None] * math.pi)
x_cos = torch.cos(self.modes_list[None, None, None, :] * x[:, :, :, None] * math.pi)
return torch.cat([x_sin, x_cos], dim=-1)
def compl_mul2d(self, input, weights):
return torch.einsum("bilm,im->bil", input, weights)
def forward(self, x):
B, C, H, W = x.shape
# patchify
x = x.view(x.shape[0], x.shape[1],
x.shape[2] // self.patch_size[0], self.patch_size[0], x.shape[3] // self.patch_size[1],
self.patch_size[1]).contiguous() \
.permute(0, 2, 4, 1, 3, 5).contiguous() \
.view(x.shape[0] * (x.shape[2] // self.patch_size[0]) * (x.shape[3] // self.patch_size[1]), x.shape[1],
self.patch_size[0],
self.patch_size[1])
# Neural Spectral
# (1) encoder
latent_token = self.latent_encoder_attn(x)
# (2) transition
latent_token_modes = self.get_basis(latent_token)
latent_token = self.compl_mul2d(latent_token_modes, self.weights) + latent_token
# (3) decoder
x = self.latent_decoder_attn(x, latent_token)
# de-patchify
x = x.view(B, (H // self.patch_size[0]), (W // self.patch_size[1]), C, self.patch_size[0],
self.patch_size[1]).permute(0, 3, 1, 4, 2, 5).contiguous() \
.view(B, C, H, W).contiguous()
return x
class Model(nn.Module):
def __init__(self, args, bilinear=True):
super(Model, self).__init__()
in_channels = args.in_dim
out_channels = args.out_dim
width = args.d_model
num_token = args.num_token
num_basis = args.num_basis
patch_size = [int(x) for x in args.patch_size.split(',')]
padding = [int(x) for x in args.padding.split(',')]
# multiscale modules
self.inc = DoubleConv(width, width)
self.down1 = Down(width, width * 2)
self.down2 = Down(width * 2, width * 4)
self.down3 = Down(width * 4, width * 8)
factor = 2 if bilinear else 1
self.down4 = Down(width * 8, width * 16 // factor)
self.up1 = Up(width * 16, width * 8 // factor, bilinear)
self.up2 = Up(width * 8, width * 4 // factor, bilinear)
self.up3 = Up(width * 4, width * 2 // factor, bilinear)
self.up4 = Up(width * 2, width, bilinear)
self.outc = OutConv(width, width)
# Patchified Neural Spectral Blocks
self.process1 = NeuralSpectralBlock2d(width, num_basis, patch_size, num_token)
self.process2 = NeuralSpectralBlock2d(width * 2, num_basis, patch_size, num_token)
self.process3 = NeuralSpectralBlock2d(width * 4, num_basis, patch_size, num_token)
self.process4 = NeuralSpectralBlock2d(width * 8, num_basis, patch_size, num_token)
self.process5 = NeuralSpectralBlock2d(width * 16 // factor, num_basis, patch_size, num_token)
# projectors
self.padding = padding
self.fc0 = nn.Linear(in_channels + 2, width)
self.fc1 = nn.Linear(width, 128)
self.fc2 = nn.Linear(128, out_channels)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 3, 1, 2)
if not all(item == 0 for item in self.padding):
x = F.pad(x, [0, self.padding[0], 0, self.padding[1]])
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(self.process5(x5), self.process4(x4))
x = self.up2(x, self.process3(x3))
x = self.up3(x, self.process2(x2))
x = self.up4(x, self.process1(x1))
x = self.outc(x)
if not all(item == 0 for item in self.padding):
x = x[..., :-self.padding[1], :-self.padding[0]]
x = x.permute(0, 2, 3, 1)
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x, size_y = shape[0], shape[1], shape[2]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1).repeat([batchsize, 1, size_y, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1).repeat([batchsize, size_x, 1, 1])
return torch.cat((gridx, gridy), dim=-1).to(device)
| 9,905 | 40.103734 | 122 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/models/LSM_3D.py | """
@author: Haixu Wu
"""
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import math
################################################################
# Multiscale modules 3D
################################################################
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv3d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm3d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv3d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool3d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose3d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
################################################################
# Patchify and Neural Spectral Block
################################################################
class NeuralSpectralBlock2d(nn.Module):
def __init__(self, width, num_basis, patch_size=[8, 8, 4], num_token=4):
super(NeuralSpectralBlock2d, self).__init__()
self.patch_size = patch_size
self.width = width
self.num_basis = num_basis
# basis
self.modes_list = (1.0 / float(num_basis)) * torch.tensor([i for i in range(num_basis)],
dtype=torch.float).cuda()
self.weights = nn.Parameter(
(1 / (width)) * torch.rand(width, self.num_basis * 2, dtype=torch.float))
# latent
self.head = 8
self.num_token = num_token
self.latent = nn.Parameter(
(1 / (width)) * torch.rand(self.head, self.num_token, width // self.head, dtype=torch.float))
self.encoder_attn = nn.Conv3d(self.width, self.width * 2, kernel_size=1, stride=1)
self.decoder_attn = nn.Conv3d(self.width, self.width, kernel_size=1, stride=1)
self.softmax = nn.Softmax(dim=-1)
def self_attn(self, q, k, v):
# q,k,v: B H L C/H
attn = self.softmax(torch.einsum("bhlc,bhsc->bhls", q, k))
return torch.einsum("bhls,bhsc->bhlc", attn, v)
def latent_encoder_attn(self, x):
# x: B C H W
B, C, H, W, T = x.shape
L = H * W * T
latent_token = self.latent[None, :, :, :].repeat(B, 1, 1, 1)
x_tmp = self.encoder_attn(x).view(B, C * 2, -1).permute(0, 2, 1).contiguous() \
.view(B, L, self.head, C // self.head, 2).permute(4, 0, 2, 1, 3).contiguous()
latent_token = self.self_attn(latent_token, x_tmp[0], x_tmp[1]) + latent_token
latent_token = latent_token.permute(0, 1, 3, 2).contiguous().view(B, C, self.num_token)
return latent_token
def latent_decoder_attn(self, x, latent_token):
# x: B C L
x_init = x
B, C, H, W, T = x.shape
L = H * W * T
latent_token = latent_token.view(B, self.head, C // self.head, self.num_token).permute(0, 1, 3, 2).contiguous()
x_tmp = self.decoder_attn(x).view(B, C, -1).permute(0, 2, 1).contiguous() \
.view(B, L, self.head, C // self.head).permute(0, 2, 1, 3).contiguous()
x = self.self_attn(x_tmp, latent_token, latent_token)
x = x.permute(0, 1, 3, 2).contiguous().view(B, C, H, W, T) + x_init # B H L C/H
return x
def get_basis(self, x):
# x: B C N
x_sin = torch.sin(self.modes_list[None, None, None, :] * x[:, :, :, None] * math.pi)
x_cos = torch.cos(self.modes_list[None, None, None, :] * x[:, :, :, None] * math.pi)
return torch.cat([x_sin, x_cos], dim=-1)
def compl_mul2d(self, input, weights):
return torch.einsum("bilm,im->bil", input, weights)
def forward(self, x):
B, C, H, W, T = x.shape
# patchify
x = x.view(x.shape[0], x.shape[1],
x.shape[2] // self.patch_size[0], self.patch_size[0], x.shape[3] // self.patch_size[1], self.patch_size[1],
x.shape[4] // self.patch_size[2], self.patch_size[2]).contiguous() \
.permute(0, 2, 4, 6, 1, 3, 5, 7).contiguous() \
.view(x.shape[0] * (x.shape[2] // self.patch_size[0]) * (x.shape[3] // self.patch_size[1]) * (
x.shape[4] // self.patch_size[2]), x.shape[1], self.patch_size[0], self.patch_size[1], self.patch_size[2])
# Neural Spectral
# (1) encoder
latent_token = self.latent_encoder_attn(x)
# (2) transition
latent_token_modes = self.get_basis(latent_token)
latent_token = self.compl_mul2d(latent_token_modes, self.weights) + latent_token
# (3) decoder
x = self.latent_decoder_attn(x, latent_token)
# de-patchify
x = x.view(B, (H // self.patch_size[0]), (W // self.patch_size[1]), (T // self.patch_size[2]), C,
self.patch_size[0], self.patch_size[1], self.patch_size[2]).permute(0, 4, 1, 5, 2, 6, 3, 7).contiguous() \
.view(B, C, H, W, T).contiguous()
return x
class Model(nn.Module):
def __init__(self, args, bilinear=True):
super(Model, self).__init__()
in_channels = args.in_dim
out_channels = args.out_dim
width = args.d_model
num_token = args.num_token
num_basis = args.num_basis
patch_size = [int(x) for x in args.patch_size.split(',')]
padding = [int(x) for x in args.padding.split(',')]
# multiscale modules
self.inc = DoubleConv(width, width)
self.down1 = Down(width, width * 2)
self.down2 = Down(width * 2, width * 4)
self.down3 = Down(width * 4, width * 8)
factor = 2 if bilinear else 1
self.down4 = Down(width * 8, width * 16 // factor)
self.up1 = Up(width * 16, width * 8 // factor, bilinear)
self.up2 = Up(width * 8, width * 4 // factor, bilinear)
self.up3 = Up(width * 4, width * 2 // factor, bilinear)
self.up4 = Up(width * 2, width, bilinear)
self.outc = OutConv(width, width)
# Patchified Neural Spectral Blocks
self.process1 = NeuralSpectralBlock2d(width, num_basis, patch_size, num_token)
self.process2 = NeuralSpectralBlock2d(width * 2, num_basis, patch_size, num_token)
self.process3 = NeuralSpectralBlock2d(width * 4, num_basis, patch_size, num_token)
self.process4 = NeuralSpectralBlock2d(width * 8, num_basis, patch_size, num_token)
self.process5 = NeuralSpectralBlock2d(width * 16 // factor, num_basis, patch_size, num_token)
# projectors
self.padding = padding
self.fc0 = nn.Linear(in_channels + 3, width)
self.fc1 = nn.Linear(width, 128)
self.fc2 = nn.Linear(128, out_channels)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 4, 1, 2, 3)
if not all(item == 0 for item in self.padding):
x = F.pad(x, [0, self.padding[0], 0, self.padding[1], 0, self.padding[2]])
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(self.process5(x5), self.process4(x4))
x = self.up2(x, self.process3(x3))
x = self.up3(x, self.process2(x2))
x = self.up4(x, self.process1(x1))
x = self.outc(x)
if not all(item == 0 for item in self.padding):
x = x[..., :-self.padding[2], :-self.padding[1], :-self.padding[0]]
x = x.permute(0, 2, 3, 4, 1)
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x, size_y, size_z = shape[0], shape[1], shape[2], shape[3]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1, 1).repeat([batchsize, 1, size_y, size_z, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1, 1).repeat([batchsize, size_x, 1, size_z, 1])
gridz = torch.tensor(np.linspace(0, 1, size_z), dtype=torch.float)
gridz = gridz.reshape(1, 1, 1, size_z, 1).repeat([batchsize, size_x, size_y, 1, 1])
return torch.cat((gridx, gridy, gridz), dim=-1).to(device)
| 9,849 | 41.094017 | 126 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/utils/adam.py | import math
import torch
from torch import Tensor
from typing import List, Optional
from torch.optim.optimizer import Optimizer
def adam(params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[int],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float):
r"""Functional API that performs Adam algorithm computation.
See :class:`~torch.optim.Adam` for details.
"""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
bias_correction1 = 1 - beta1 ** step
bias_correction2 = 1 - beta2 ** step
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = lr / bias_correction1
param.addcdiv_(exp_avg, denom, value=-step_size)
class Adam(Optimizer):
r"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
The implementation of the L2 penalty follows changes proposed in
`Decoupled Weight Decay Regularization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
grads.append(p.grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['amsgrad']:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if group['amsgrad']:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
# update the steps for each param group update
state['step'] += 1
# record the step after step update
state_steps.append(state['step'])
adam(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=group['amsgrad'],
beta1=beta1,
beta2=beta2,
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'])
return loss
| 6,563 | 39.02439 | 120 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/utils/utilities3.py | import torch
import numpy as np
import scipy.io
import h5py
import torch.nn as nn
import operator
from functools import reduce
#################################################
# Utilities
#################################################
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# reading data
class MatReader(object):
def __init__(self, file_path, to_torch=True, to_cuda=False, to_float=True):
super(MatReader, self).__init__()
self.to_torch = to_torch
self.to_cuda = to_cuda
self.to_float = to_float
self.file_path = file_path
self.data = None
self.old_mat = True
self.h5 = False
self._load_file()
def _load_file(self):
if self.file_path[-3:] == '.h5':
self.data = h5py.File(self.file_path, 'r')
self.h5 = True
else:
try:
self.data = scipy.io.loadmat(self.file_path)
except:
self.data = h5py.File(self.file_path, 'r')
self.old_mat = False
def load_file(self, file_path):
self.file_path = file_path
self._load_file()
def read_field(self, field):
x = self.data[field]
if self.h5:
x = x[()]
if not self.old_mat:
x = x[()]
x = np.transpose(x, axes=range(len(x.shape) - 1, -1, -1))
if self.to_float:
x = x.astype(np.float32)
if self.to_torch:
x = torch.from_numpy(x)
if self.to_cuda:
x = x.cuda()
return x
def set_cuda(self, to_cuda):
self.to_cuda = to_cuda
def set_torch(self, to_torch):
self.to_torch = to_torch
def set_float(self, to_float):
self.to_float = to_float
# normalization, pointwise gaussian
class UnitGaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(UnitGaussianNormalizer, self).__init__()
# x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T
self.mean = torch.mean(x, 0)
self.std = torch.std(x, 0)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
if sample_idx is None:
std = self.std + self.eps # n
mean = self.mean
else:
if len(self.mean.shape) == len(sample_idx[0].shape):
std = self.std[sample_idx] + self.eps # batch*n
mean = self.mean[sample_idx]
if len(self.mean.shape) > len(sample_idx[0].shape):
std = self.std[:, sample_idx] + self.eps # T*batch*n
mean = self.mean[:, sample_idx]
# x is in shape of batch*n or T*batch*n
x = (x * std) + mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, Gaussian
class GaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(GaussianNormalizer, self).__init__()
self.mean = torch.mean(x)
self.std = torch.std(x)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
x = (x * (self.std + self.eps)) + self.mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, scaling by range
class RangeNormalizer(object):
def __init__(self, x, low=0.0, high=1.0):
super(RangeNormalizer, self).__init__()
mymin = torch.min(x, 0)[0].view(-1)
mymax = torch.max(x, 0)[0].view(-1)
self.a = (high - low) / (mymax - mymin)
self.b = -self.a * mymax + high
def encode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = self.a * x + self.b
x = x.view(s)
return x
def decode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = (x - self.b) / self.a
x = x.view(s)
return x
# loss function with rel/abs Lp loss
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
# Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
# Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h ** (self.d / self.p)) * torch.norm(x.reshape(num_examples, -1) - y.reshape(num_examples, -1),
self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples, -1) - y.reshape(num_examples, -1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples, -1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms / y_norms)
else:
return torch.sum(diff_norms / y_norms)
return diff_norms / y_norms
def __call__(self, x, y):
return self.rel(x, y)
# A simple feedforward neural network
class DenseNet(torch.nn.Module):
def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False):
super(DenseNet, self).__init__()
self.n_layers = len(layers) - 1
assert self.n_layers >= 1
self.layers = nn.ModuleList()
for j in range(self.n_layers):
self.layers.append(nn.Linear(layers[j], layers[j + 1]))
if j != self.n_layers - 1:
if normalize:
self.layers.append(nn.BatchNorm1d(layers[j + 1]))
self.layers.append(nonlinearity())
if out_nonlinearity is not None:
self.layers.append(out_nonlinearity())
def forward(self, x):
for _, l in enumerate(self.layers):
x = l(x)
return x
def pdist(sample_1, sample_2, norm=2, eps=1e-5):
r"""Compute the matrix of all squared pairwise distances.
Arguments
---------
sample_1 : torch.Tensor or Variable
The first sample, should be of shape ``(n_1, d)``.
sample_2 : torch.Tensor or Variable
The second sample, should be of shape ``(n_2, d)``.
norm : float
The l_p norm to be used.
Returns
-------
torch.Tensor or Variable
Matrix of shape (n_1, n_2). The [i, j]-th entry is equal to
``|| sample_1[i, :] - sample_2[j, :] ||_p``."""
n_1, n_2 = sample_1.size(0), sample_2.size(0)
norm = float(norm)
if norm == 2.:
norms_1 = torch.sum(sample_1 ** 2, dim=1, keepdim=True)
norms_2 = torch.sum(sample_2 ** 2, dim=1, keepdim=True)
norms = (norms_1.expand(n_1, n_2) +
norms_2.transpose(0, 1).expand(n_1, n_2))
distances_squared = norms - 2 * sample_1.mm(sample_2.t())
return torch.sqrt(eps + torch.abs(distances_squared))
else:
dim = sample_1.size(1)
expanded_1 = sample_1.unsqueeze(1).expand(n_1, n_2, dim)
expanded_2 = sample_2.unsqueeze(0).expand(n_1, n_2, dim)
differences = torch.abs(expanded_1 - expanded_2) ** norm
inner = torch.sum(differences, dim=2, keepdim=False)
return (eps + inner) ** (1. / norm)
class MMDStatistic:
r"""The *unbiased* MMD test of :cite:`gretton2012kernel`.
The kernel used is equal to:
.. math ::
k(x, x') = \sum_{j=1}^k e^{-\alpha_j\|x - x'\|^2},
for the :math:`\alpha_j` proved in :py:meth:`~.MMDStatistic.__call__`.
Arguments
---------
n_1: int
The number of points in the first sample.
n_2: int
The number of points in the second sample."""
def __init__(self, n_1, n_2):
self.n_1 = n_1
self.n_2 = n_2
# The three constants used in the test.
self.a00 = 1. / (n_1 * (n_1 - 1))
self.a11 = 1. / (n_2 * (n_2 - 1))
self.a01 = - 1. / (n_1 * n_2)
def __call__(self, sample_1, sample_2, alphas, ret_matrix=False):
r"""Evaluate the statistic.
The kernel used is
.. math::
k(x, x') = \sum_{j=1}^k e^{-\alpha_j \|x - x'\|^2},
for the provided ``alphas``.
Arguments
---------
sample_1: :class:`torch:torch.autograd.Variable`
The first sample, of size ``(n_1, d)``.
sample_2: variable of shape (n_2, d)
The second sample, of size ``(n_2, d)``.
alphas : list of :class:`float`
The kernel parameters.
ret_matrix: bool
If set, the call with also return a second variable.
This variable can be then used to compute a p-value using
:py:meth:`~.MMDStatistic.pval`.
Returns
-------
:class:`float`
The test statistic.
:class:`torch:torch.autograd.Variable`
Returned only if ``ret_matrix`` was set to true."""
sample_12 = torch.cat((sample_1, sample_2), 0)
distances = pdist(sample_12, sample_12, norm=2)
kernels = None
for alpha in alphas:
kernels_a = torch.exp(- alpha * distances ** 2)
if kernels is None:
kernels = kernels_a
else:
kernels = kernels + kernels_a
k_1 = kernels[:self.n_1, :self.n_1]
k_2 = kernels[self.n_1:, self.n_1:]
k_12 = kernels[:self.n_1, self.n_1:]
mmd = (2 * self.a01 * k_12.sum() +
self.a00 * (k_1.sum() - torch.trace(k_1)) +
self.a11 * (k_2.sum() - torch.trace(k_2)))
if ret_matrix:
return mmd, kernels
else:
return mmd
# print the number of parameters
def count_params(model):
c = 0
for p in list(model.parameters()):
c += reduce(operator.mul, list(p.size()))
return c
| 10,440 | 28.246499 | 116 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/utils/params.py | import argparse
def get_args():
parser = argparse.ArgumentParser('Latent Spectral Models', add_help=False)
# dataset
parser.add_argument('--data-path', default='./dataset', type=str, help='dataset folder')
parser.add_argument('--ntotal', default=1200, type=int, help='number of overall data')
parser.add_argument('--ntrain', default=1000, type=int, help='number of train set')
parser.add_argument('--ntest', default=200, type=int, help='number of test set')
parser.add_argument('--in_dim', default=1, type=int, help='input data dimension')
parser.add_argument('--out_dim', default=1, type=int, help='output data dimension')
parser.add_argument('--h', default=1, type=int, help='input data height')
parser.add_argument('--w', default=1, type=int, help='input data width')
parser.add_argument('--T-in', default=10, type=int,
help='input data time points (only for temporal related experiments)')
parser.add_argument('--T-out', default=10, type=int,
help='predict data time points (only for temporal related experiments)')
parser.add_argument('--h-down', default=1, type=int, help='height downsampe rate of input')
parser.add_argument('--w-down', default=1, type=int, help='width downsampe rate of input')
# optimization
parser.add_argument('--batch-size', default=20, type=int, help='batch size of training')
parser.add_argument('--learning-rate', default=0.001, type=float, help='learning rate')
parser.add_argument('--epochs', default=501, type=int, help='training epochs')
parser.add_argument('--step-size', default=100, type=int, help='interval of model save')
parser.add_argument('--gamma', default=0.5, type=float, help='parameter of learning rate scheduler')
# Model parameters
parser.add_argument('--model', default='lsm', type=str, help='model name')
parser.add_argument('--d-model', default=32, type=int, help='channels of hidden variates')
parser.add_argument('--num-basis', default=12, type=int, help='number of basis operators')
parser.add_argument('--num-token', default=4, type=int, help='number of latent tokens')
parser.add_argument('--patch-size', default='3,3', type=str, help='patch size of different dimensions')
parser.add_argument('--padding', default='3,3', type=str, help='padding size of different dimensions')
# save
parser.add_argument('--model-save-path', default='./checkpoints/', type=str, help='model save path')
parser.add_argument('--model-save-name', default='lsm.pt', type=str, help='model name')
return parser.parse_args() | 2,633 | 66.538462 | 107 | py |
FaceChat | FaceChat-main/app.py | async_mode = None
if async_mode is None:
try:
import eventlet
async_mode = "eventlet"
except ImportError:
pass
if async_mode is None:
try:
from gevent import monkey
async_mode = "gevent"
except ImportError:
pass
if async_mode is None:
async_mode = "threading"
print("async_mode is " + async_mode)
# monkey patching is necessary because this application uses a background
# thread
if async_mode == "eventlet":
import eventlet
eventlet.monkey_patch()
elif async_mode == "gevent":
from gevent import monkey
monkey.patch_all()
# The Session instance is not used for direct access, you should always use flask.session
from flask_session import Session
import os
import random
import torch
import time
from threading import Thread
import collections
import queue
from flask import Flask, render_template, request, session, redirect
from flask_socketio import SocketIO
import numpy as np
import threading
#DEEMA#
from pydub import AudioSegment
sem = threading.Semaphore(1)
# audio processing
# from transformers import AutoProcessor, WhisperForConditionalGeneration
import scipy.signal as sps
import webrtcvad
from transformers import AutoProcessor, WhisperForConditionalGeneration
from TTS.api import TTS
import openai
from flask_session import Session
# image processing
import base64, cv2
import io
from PIL import Image
from engineio.payload import Payload
from deepface import DeepFace
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
Payload.max_decode_packets = 2048
openai.api_key = os.environ["OPENAI_API_KEY"]
app = Flask(__name__)
app.debug = True
socketio = SocketIO(app, cors_allowed_origins="*")
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
def isEnglish(s):
try:
s.encode(encoding="utf-8").decode("ascii")
except UnicodeDecodeError:
return False
else:
return True
def genWaveHeader(sampleRate, bitsPerSample, channels, samples):
datasize = samples * channels * bitsPerSample // 8
o = bytes("RIFF", "ascii") # (4byte) Marks file as RIFF
o += (datasize + 36).to_bytes(4, "little") # (4byte) File size in bytes excluding this and RIFF marker
o += bytes("WAVE", "ascii") # (4byte) File type
o += bytes("fmt ", "ascii") # (4byte) Format Chunk Marker
o += (16).to_bytes(4, "little") # (4byte) Length of above format data
o += (1).to_bytes(2, "little") # (2byte) Format type (1 - PCM)
o += (channels).to_bytes(2, "little") # (2byte)
o += (sampleRate).to_bytes(4, "little") # (4byte)
o += (sampleRate * channels * bitsPerSample // 8).to_bytes(4, "little") # (4byte)
o += (channels * bitsPerSample // 8).to_bytes(2, "little") # (2byte)
o += (bitsPerSample).to_bytes(2, "little") # (2byte)
o += bytes("data", "ascii") # (4byte) Data Chunk Marker
o += (datasize).to_bytes(4, "little") # (4byte) Data size in bytes
return o
def Int2Float(sound):
_sound = np.copy(sound) #
abs_max = np.abs(_sound).max()
_sound = _sound.astype("float32")
if abs_max > 0:
_sound *= 1 / abs_max
audio_float32 = torch.from_numpy(_sound.squeeze())
return audio_float32
class ASR:
def __init__(self) -> None:
#**BENCHMARK**#
# it was model_name
#self.model_name = "openai/whisper-tiny" #1
#self.model_name = "openai/whisper-base" #2
#self.model_name = "openai/whisper-small" #3
self.model_name = "openai/whisper-medium" #4
#self.model_name = "openai/whisper-large" #5
self.processor = AutoProcessor.from_pretrained(self.model_name)
self.model = WhisperForConditionalGeneration.from_pretrained(self.model_name)
self.model_sample_rate = 16000
self.device = torch.device("cpu")
def __call__(self, data, sample_rate=16000) -> str:
"""
Args:
data: PCM float32 format
sample_rate: the sample rate of data
"""
is_valid = True
# first, resample the data to the model's sample_rate
if sample_rate != self.model_sample_rate:
number_of_samples = round(len(data) * float(self.model_sample_rate) / sample_rate)
data = sps.resample(data, number_of_samples)
# genearte text
inputs = self.processor(data, return_tensors="pt", sampling_rate=self.model_sample_rate)
input_features = inputs.input_features.to(self.device)
generated_ids = self.model.generate(inputs=input_features)
text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
if len(generated_ids[0]) < 4:
is_valid = False
if not isEnglish(text):
is_valid = False
return text, is_valid
def to(self, device):
self.model = self.model.to(device)
self.device = device
return self
class TTSModel:
def __init__(self) -> None:
self.model = TTS("tts_models/en/vctk/vits", gpu=True)
def __call__(self, text) -> np.float32:
wav = self.model.tts(text, speaker=self.model.speakers[33])
return wav
class AudioContext:
"""Streams raw audio from web microphone. Data is received in a separate thread, and stored in a buffer, to be read from.
"""
MIC_RATE = 16000
CHANNELS = 1
BLOCKS_PER_SECOND = 50
def __init__(self):
self.audio_buffer = queue.Queue()
def read(self):
"""Return a block of audio data, blocking if necessary."""
return self.audio_buffer.get()
def update(self, audioData):
"""Update the audio buffer."""
self.audio_buffer.put(audioData)
class GPT3Chatbot:
def __init__(self, model_name="text-davinci-003"):
self.model_name = model_name
self.instruct_prompt = \
"""Agent: Hello, How can I help you today?
"""
# """Background: Sony Life Insurance Co., Ltd. is a Japanese insurance company founded in 1979 as a joint venture between Sony Corporation and Prudential Financial and headquartered in Tokyo.
# Instruction: Be an insurance agent that works for Sony Insurance Company and chat with a customer. The response should be short and informative.
#Agent: Hello, I am an insurance agent from Sony Insurance Company. How can I help you today?
# """
self.bot_prompt = "Agent: "
self.user_prompt = "User: "
self.context = self.instruct_prompt
self.emotion_prompt = "(User is in neutral emotion)"
if "davinci-003" in model_name:
self.use_emotion_prompt = True
else:
self.use_emotion_prompt = False
def get_response(self, user_input):
if "restart" in user_input.lower() or "reset" in user_input.lower():
self.reset()
return "Hello, How can I help you today?" #"Hello, I am an insurance agent from Sony Insurance Company. How can I help you today?"
error_responses = ["Let me think...", "Give me some seconds...", "Wait a second"]
user_input = self.user_prompt + user_input + "\n"
if self.use_emotion_prompt:
user_input += self.emotion_prompt + "\n"
completion_prompt = self.context + user_input + self.bot_prompt
request_success = False
while not request_success:
try:
response = openai.Completion.create(
model=self.model_name, prompt=completion_prompt, temperature=0.95, max_tokens=128, top_p=0.95,
)
request_success = True
except Exception as e:
print(e)
error_response = random.choice(error_responses)
audio_speak(error_response)
print("Request failed, retrying...")
response = response["choices"][0]["text"].strip()
self.context += user_input + self.bot_prompt + response + "\n"
return response
def reset(self):
self.context = self.instruct_prompt
reset_audio_buffer()
# GPT models
# to be used for the BENCHMARK
chatbot = GPT3Chatbot("text-davinci-002")
#chatbot = GPT3Chatbot("text-davinci-003")
#chatbot = GPT3Chatbot("text-curie-001")
#chatbot = GPT3Chatbot("text-babbage-001")
#chatbot = GPT3Chatbot("text-ada-001")
asr_model = ASR()
tts_model = TTSModel()
# specify the running device
# device = torch.device("cuda")
# force cuda
device = torch.device('cuda')
asr_model = asr_model.to(device)
audio_buffer = queue.Queue()
audio_buffer_lock = False
def reset_audio_buffer():
global audio_buffer
audio_buffer.queue.clear()
@socketio.on("audio_listen")
def audio_listen(audioData):
global audio_context
global audio_buffer
global audio_buffer_lock
if not audio_buffer_lock:
audio_buffer.put(audioData)
@socketio.on("start_chat")
def start_chat(data):
global audio_buffer_lock
audio_buffer_lock = False
@socketio.on("stop_chat")
def stop_chat(data):
print("stopping...")
global audio_buffer_lock
audio_buffer_lock = True
session["name"] = None
sem.release()
socketio.emit('logout', "login");
# to be used for the BENCHMARK
f.close()
@socketio.on("system_init")
def system_init(audioData):
# speak
#audio_speak("Hello, I am an insurance agent from Sony Insurance Company. How can I help you today?")
audio_speak("Hello, How can I help you today?")
# # delay the next request
@app.route("/update-text", methods=["POST"])
def update_text():
text = chatbot.context
return text
@app.route("/", methods=["POST", "GET"])
def index():
print("intialized")
if not session.get("name"):
return redirect("/login")
if sem._value == 0:
return render_template("login.html", msg="Please wait, the agent is busy with another client...")
sem.acquire()
# reset the chatbot and buffer queue
chatbot.reset()
reset_audio_buffer()
return render_template("index.html")
@app.route("/login", methods=["POST", "GET"])
def login():
if request.method=='POST':
session["name"] = request.form.get('name')
return redirect("/")
return render_template("login.html")
class VADAudio:
"""Filter & segment audio with voice activity detection."""
def __init__(self, input_rate, audio_context):
self.input_rate = input_rate
self.audio_context = audio_context
self.RATE_PROCESS = 16000
self.block_size = 743
self.frame_duration_ms = 1000 * self.block_size // self.input_rate
self.sample_rate = 16000
self.silence_duration_ms = 500
self.vad = webrtcvad.Vad(mode=3)
def vad_collector(self, padding_ms=300, ratio=0.75, frames=None):
"""Generator that yields series of consecutive audio frames comprising each utterence, separated by yielding a single None.
Determines voice activity by ratio of frames in padding_ms. Uses a buffer to include padding_ms prior to being triggered.
Example: (frame, ..., frame, None, frame, ..., frame, None, ...)
|---utterence---| |---utterence---|
"""
global vad_model
global vad_iterator
global audio_buffer
global audio_buffer_lock
num_padding_frames = padding_ms // self.frame_duration_ms
ring_buffer = collections.deque(maxlen=num_padding_frames)
triggered = False
empty_frame_count = 0
max_empty_frame_count = self.silence_duration_ms // self.frame_duration_ms
while True:
if audio_buffer_lock:
continue
frame = audio_buffer.get()
is_speech = self.vad.is_speech(frame[-960:], self.sample_rate)
if not triggered:
ring_buffer.append((frame, is_speech))
# if speaking
num_voiced = len([f for f, is_speech in ring_buffer if is_speech])
if num_voiced > ratio * ring_buffer.maxlen:
triggered = True
for frame, is_speech in ring_buffer:
yield frame
ring_buffer.clear()
else:
yield frame
ring_buffer.append((frame, is_speech))
# if not seapking
num_unvoiced = len([f for f, is_speech in ring_buffer if not is_speech])
if num_unvoiced > ratio * ring_buffer.maxlen:
# detects 5 consecutive empty frames
if empty_frame_count > max_empty_frame_count:
triggered = False
yield None
ring_buffer.clear()
empty_frame_count = 0
else:
empty_frame_count += 1
else:
# reset empty_frame_count if detects speech
empty_frame_count = 0
# to be used for the BENCHMARK
f = open(str(chatbot.model_name)+"_"+str(asr_model.model_name[asr_model.model_name.index("/")+1:])+".txt", "a")
class EngagementDetector(Thread):
def __init__(self, audio_context):
Thread.__init__(self)
self.audio_context = audio_context
self.vad_audio = VADAudio(input_rate=16000, audio_context=self.audio_context)
self.vad_model, vad_utils = torch.hub.load(repo_or_dir="snakers4/silero-vad", model="silero_vad")
(self.get_speech_ts, save_audio, read_audio, VADIterator, collect_chunks) = vad_utils
self.count = 0
def run(self):
frames = self.vad_audio.vad_collector()
wav_data = bytearray()
vad_model, vad_utils = torch.hub.load(repo_or_dir="snakers4/silero-vad", model="silero_vad")
(get_speech_ts, save_audio, read_audio, VADIterator, collect_chunks) = vad_utils
print("Listening...")
for frame in frames:
if frame is not None:
wav_data.extend(frame)
else:
data = np.frombuffer(wav_data, np.int16)
data = Int2Float(data)
# two-stage VAD
time_stamps = get_speech_ts(data, vad_model)
if len(time_stamps) > 0:
print("Speaking:", end="")
# to be used for the BENCHMARK
s_time = time.time()
text, is_asr_valid = asr_model(data, sample_rate=16000)
print(text)
if is_asr_valid:
chatbot_response = chatbot.get_response(text)
# to be used for the BENCHMARK
f.write(str(time.time()-s_time))
f.write("\n")
# speak
audio_speak(chatbot_response)
# clear buffer if speech detected
wav_data = bytearray()
def audio_speak(text):
global audio_buffer_lock
print(text)
audio_buffer_lock = True
# reset audio buffer to avoid interference
reset_audio_buffer()
audio_float32 = tts_model(text)
audio_int16 = (np.array(audio_float32, dtype=np.float32) * 32768).astype(np.int16)
wav_header = genWaveHeader(sampleRate=22050, bitsPerSample=16, channels=1, samples=len(audio_int16))
speak_data = wav_header + audio_int16.tobytes()
now = len(text.split(" "))
audio_io = io.BytesIO(speak_data)
# Create AudioSegment object from the in-memory file object
# Get duration of audio in milliseconds
duration_ms = AudioSegment.from_file(audio_io, format="wav")
duration_ms = len(duration_ms)
# Convert duration to seconds
now = duration_ms/1000.0
# we need the size of the text
print(f"TTS Duration: {now}")
socketio.emit("audio_speak", {"voice": speak_data, "words": now});
print(f"sending data! {text}")
time.sleep(len(audio_int16) / 22050)
audio_buffer_lock = False
def read_image_b64(base64_string):
"decode base64"
idx = base64_string.find("base64,")
base64_string = base64_string[idx + 7 :]
sbuf = io.BytesIO()
sbuf.write(base64.b64decode(base64_string, " /"))
pimg = Image.open(sbuf)
return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
def moving_average(x):
return np.mean(x)
# given 20 fps, control the image buffer
image_buffer = queue.Queue(maxsize=5)
@socketio.on("image_observe")
def image_observe(data_image):
global image_buffer
frame = read_image_b64(data_image)
image_buffer.put(frame)
class VideoProcessor(Thread):
def __init__(self, image_buffer):
Thread.__init__(self)
self.image_buffer = image_buffer
self._fps_array = [0]
def frame_generator(self):
while True:
frame = self.image_buffer.get()
yield frame
def run(self):
frames = self.frame_generator()
prev_recv_time = time.time()
fps = 0
cnt = 0
prev_box_pos = np.array([0, 0, 0, 0])
prev_scores = np.zeros(7)
emotion_beta = 0.99
box_beta = 0.2
for frame in frames:
try:
obj = DeepFace.analyze(
frame, actions=["emotion"], enforce_detection=False, silent=True, detector_backend="ssd"
)
if isinstance(obj, list):
obj = obj[0]
except Exception as e:
print(e)
continue
emotions, scores = zip(*obj["emotion"].items())
scores = list(scores)
# emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
# mask out disgust
scores[1] = scores[1] - 1000
# mask out fear
scores[2] = scores[2] - 1000
# mask out surprise
scores[5] = scores[5] - 1000
# give more weight to happy
# scores[3] = scores[3] * 2.0
scores = prev_scores * emotion_beta + np.array(scores) * (1 - emotion_beta)
# apply softmax
scores = np.exp(scores) / np.sum(np.exp(scores))
prev_scores = scores
index = np.argmax(scores)
pred_emotion = emotions[index]
# x, y, w, h
box_pos = np.array(list(obj["region"].values()))
if (
pred_emotion in emotions
and (box_pos[0] > 0 and box_pos[1] > 0)
and (box_pos[0] < 400 and box_pos[1] < 300)
):
box_pos = prev_box_pos * box_beta + box_pos * (1 - box_beta)
box_pos = np.rint(box_pos).astype(int)
x, y, w, h = box_pos
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.putText(
frame,
pred_emotion,
(x - 10, y - 10),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.9,
color=(255, 0, 0),
thickness=2,
)
# old_pred_emotion = pred_emotion
prev_box_pos = box_pos
if pred_emotion == "happy":
chatbot.emotion_prompt = "(User is in happy emotion)"
elif pred_emotion == "sad":
chatbot.emotion_prompt = "(User is in sad emotion)"
elif pred_emotion == "angry":
chatbot.emotion_prompt = "(User is in angry emotion)"
elif pred_emotion == "neutral":
chatbot.emotion_prompt = "(User is in neutral emotion)"
else:
pred_emotion = "neutral"
chatbot.emotion_prompt = "(User is in neutral emotion)"
recv_time = time.time()
cv2.putText(
frame,
"fps " + str(fps),
(10, 20),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,
color=(255, 0, 0),
thickness=1,
)
# encode it into jpeg
imgencode = cv2.imencode(".jpeg", frame, [cv2.IMWRITE_JPEG_QUALITY, 40])[1]
# base64 encode
stringData = base64.b64encode(imgencode).decode("utf-8")
b64_src = "data:image/jpeg;base64,"
stringData = b64_src + stringData
# emit the frame back
socketio.emit("image_show", stringData)
fps = 1 / (recv_time - prev_recv_time)
self._fps_array.append(fps)
fps = round(moving_average(np.array(self._fps_array)), 1)
prev_recv_time = recv_time
# print(fps_array)
cnt += 1
if cnt == 30:
self._fps_array = [fps]
cnt = 0
# Globals
audio_context = AudioContext()
engagement_detector = EngagementDetector(audio_context)
engagement_detector.start()
video_process = VideoProcessor(image_buffer)
video_process.start()
if __name__ == "__main__":
socketio.run(app, host="0.0.0.0", port=55009, debug=False, keyfile="key.pem", certfile="cert.pem")
# app.run(host='0.0.0.0', debug=True, threaded=True, port=9900, ssl_context=("cert.pem", "key.pem"))
| 21,580 | 31.748103 | 194 | py |
GraphCAD | GraphCAD-main/gin_conv_weight.py | from typing import Callable, Optional, Union
import torch
from torch import Tensor
from torch_sparse import SparseTensor, matmul
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.typing import Adj, OptPairTensor, OptTensor, Size
from ..inits import reset
class GINConv_w(MessagePassing):
r"""The graph isomorphism operator from the `"How Powerful are
Graph Neural Networks?" <https://arxiv.org/abs/1810.00826>`_ paper
.. math::
\mathbf{x}^{\prime}_i = h_{\mathbf{\Theta}} \left( (1 + \epsilon) \cdot
\mathbf{x}_i + \sum_{j \in \mathcal{N}(i)} \mathbf{x}_j \right)
or
.. math::
\mathbf{X}^{\prime} = h_{\mathbf{\Theta}} \left( \left( \mathbf{A} +
(1 + \epsilon) \cdot \mathbf{I} \right) \cdot \mathbf{X} \right),
here :math:`h_{\mathbf{\Theta}}` denotes a neural network, *.i.e.* an MLP.
Args:
nn (torch.nn.Module): A neural network :math:`h_{\mathbf{\Theta}}` that
maps node features :obj:`x` of shape :obj:`[-1, in_channels]` to
shape :obj:`[-1, out_channels]`, *e.g.*, defined by
:class:`torch.nn.Sequential`.
eps (float, optional): (Initial) :math:`\epsilon`-value.
(default: :obj:`0.`)
train_eps (bool, optional): If set to :obj:`True`, :math:`\epsilon`
will be a trainable parameter. (default: :obj:`False`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
Shapes:
- **input:**
node features :math:`(|\mathcal{V}|, F_{in})` or
:math:`((|\mathcal{V_s}|, F_{s}), (|\mathcal{V_t}|, F_{t}))`
if bipartite,
edge indices :math:`(2, |\mathcal{E}|)`
- **output:** node features :math:`(|\mathcal{V}|, F_{out})` or
:math:`(|\mathcal{V}_t|, F_{out})` if bipartite
"""
def __init__(self, nn: Callable, eps: float = 0., train_eps: bool = False,
**kwargs):
kwargs.setdefault('aggr', 'add')
super().__init__(**kwargs)
self.nn = nn
self.initial_eps = eps
if train_eps:
self.eps = torch.nn.Parameter(torch.Tensor([eps]))
else:
self.register_buffer('eps', torch.Tensor([eps]))
self.reset_parameters()
def reset_parameters(self):
reset(self.nn)
self.eps.data.fill_(self.initial_eps)
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, edge_weight: OptTensor = None,
size: Size = None) -> Tensor:
""""""
if isinstance(x, Tensor):
x: OptPairTensor = (x, x)
# propagate_type: (x: OptPairTensor)
out = self.propagate(edge_index, x=x, edge_weight=edge_weight, size=size)
x_r = x[1]
if x_r is not None:
out += (1 + self.eps) * x_r
return self.nn(out)
# def message(self, x_j: Tensor) -> Tensor:
# return x_j
def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor:
return x_j if edge_weight is None else edge_weight.view(-1, 1) * x_j
def message_and_aggregate(self, adj_t: SparseTensor,
x: OptPairTensor) -> Tensor:
adj_t = adj_t.set_value(None, layout=None)
return matmul(adj_t, x[0], reduce=self.aggr)
def __repr__(self) -> str:
return f'{self.__class__.__name__}(nn={self.nn})'
| 3,471 | 35.166667 | 102 | py |
GraphCAD | GraphCAD-main/MAG/main.py | import os
import argparse
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import random
import json
import pickle
from collections import defaultdict
from operator import itemgetter
import logging
from torch_geometric.data import Data, DataLoader
from torch.optim.lr_scheduler import _LRScheduler
from models import GraphCAD, outlierLoss
from utils import *
torch.backends.cudnn.benchmark = True
torch.autograd.set_detect_anomaly(True)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# CUDA_VISIBLE_DEVICES=1 python main.py --train_dir '/raid/chenbo/outlier_detection/release_data/mag_train.pkl' --test_dir '/raid/chenbo/outlier_detection/release_data/mag_test.pkl'
def add_arguments(args):
# essential paras
args.add_argument('--train_dir', type=str, help="train_dir", required = True)
args.add_argument('--test_dir', type=str, help="test_dir", required = True)
args.add_argument('--saved_dir', type=str, help="log_name", default= "saved_model")
args.add_argument('--log_name', type=str, help="log_name", default = "log")
# training paras.
args.add_argument('--epochs', type=int, help="training #epochs", default=100)
args.add_argument('--seed', type=int, help="seed", default=1)
args.add_argument('--lr', type=float, help="learning rate", default=1e-3)
args.add_argument('--min_lr', type=float, help="min lr", default=5e-4)
args.add_argument('--bs', type=int, help="batch size", default=4)
args.add_argument('--input_dim', type=int, help="input dimension", default=768)
args.add_argument('--out_dim', type=int, help="output dimension", default=768)
args.add_argument('--verbose', type=int, help="eval", default=1)
# model paras.
args.add_argument('--outer_layer', type=int, help="#layers of GraphCAD", default = 2)
args.add_argument('--inner_layer', type=int, help="#layers of node_update", default = 1)
args.add_argument('--is_global', help="whether to add global information", action = "store_false")
args.add_argument('--is_edge', help="whether to use edge update", action = "store_false")
args.add_argument('--pooling', type=str, help="pooing_type", choices=['memory', 'avg', 'min', 'max'], default = "memory")
args.add_argument('--is_lp', help="whether to use link prediction loss", action = "store_false")
args.add_argument("--lp_weight", type = float, help="the weight of link prediction loss", default=0.1)
args = args.parse_args()
return args
def logging_builder(args):
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(os.path.join(os.getcwd(), args.log_name), mode='w')
fileHandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
consoleHandler.setFormatter(formatter)
fileHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
logger.addHandler(fileHandler)
return logger
class WarmupLinearLR(_LRScheduler):
def __init__(self, optimizer, step_size, min_lr, peak_percentage=0.1, last_epoch=-1):
self.step_size = step_size
self.peak_step = peak_percentage * step_size
self.min_lr = min_lr
super(WarmupLinearLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
ret = []
for tmp_min_lr, tmp_base_lr in zip(self.min_lr, self.base_lrs):
if self._step_count <= self.peak_step:
ret.append(tmp_min_lr + (tmp_base_lr - tmp_min_lr) * self._step_count / self.peak_step)
else:
ret.append(tmp_min_lr + max(0, (tmp_base_lr - tmp_min_lr) * (self.step_size - self._step_count) / (self.step_size - self.peak_step)))
return ret
if __name__ == "__main__":
args = argparse.ArgumentParser()
args = add_arguments(args)
setup_seed(args.seed)
logger = logging_builder(args)
print(args)
os.makedirs(os.path.join(os.getcwd(), args.saved_dir), exist_ok = True)
encoder = GraphCAD(logger, args, args.input_dim, args.out_dim, args.outer_layer, args.inner_layer, is_global = args.is_global, is_edge = args.is_edge, pooling= args.pooling).cuda()
criterion = outlierLoss(args, logger, is_lp = args.is_lp, lp_weight = args.lp_weight).cuda()
with open(args.train_dir, 'rb') as files:
train_data = pickle.load(files)
with open(args.test_dir, 'rb') as files:
test_data = pickle.load(files)
logger.info("# Batch: {} - {}".format(len(train_data), len(train_data) / args.bs))
optimizer = torch.optim.Adam([{'params': encoder.parameters(), 'lr': args.lr}])
optimizer.zero_grad()
max_step = int(len(train_data) / args.bs * 10)
logger.info("max_step: %d, %d, %d, %d"%(max_step, len(train_data), args.bs, args.epochs))
scheduler = WarmupLinearLR(optimizer, max_step, min_lr=[args.min_lr])
encoder.train()
epoch_num = 0
max_map = -1
max_auc = -1
max_epoch = -1
for epoch_num in range(args.epochs):
batch_loss = []
batch_contras_loss = []
batch_lp_loss = []
batch_edge_score = []
batch_labels = []
batch_index = 0
random.shuffle(train_data)
for tmp_train in tqdm(train_data):
batch_index += 1
batch_data, edge_labels = tmp_train
node_outputs, adj_matrix, adj_weight, labels, batch_item = batch_data.x, batch_data.edge_index, batch_data.edge_attr.squeeze(-1), batch_data.y, batch_data.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
overall_loss, _, contras_loss, lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
# overall_loss.backward()
overall_loss = overall_loss / args.bs
overall_loss.backward()
batch_loss.append(overall_loss.item())
batch_contras_loss.append(contras_loss.item())
batch_lp_loss.append(lp_loss.item())
if (batch_index + 1) % args.bs == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
avg_batch_loss = np.mean(np.array(batch_loss))
avg_batch_contras_loss = np.mean(np.array(batch_contras_loss))
avg_batch_lp_loss = np.mean(np.array(batch_lp_loss))
logger.info("Epoch:{} Overall loss: {:.6f} Contrastive loss: {:.6f} LP_loss: {:.6f}".format(epoch_num, avg_batch_loss, avg_batch_contras_loss, avg_batch_lp_loss))
if (epoch_num + 1) % args.verbose == 0:
encoder.eval()
test_loss = []
test_contras_loss = []
test_lp_loss = []
test_gt = []
labels_list = []
scores_list = []
with torch.no_grad():
for tmp_test in tqdm(test_data):
each_sub, edge_labels = tmp_test
node_outputs, adj_matrix, adj_weight, labels, batch_item = each_sub.x, each_sub.edge_index, each_sub.edge_attr.squeeze(-1), each_sub.y, each_sub.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
centroid = centroid.squeeze(0)
centroid_loss = centroid_loss.squeeze(0)
test_each_overall_loss, scores, test_each_contras_loss, test_each_lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
scores = scores.detach().cpu().numpy()
scores_list.append(scores)
labels = labels.detach().cpu().numpy()
test_gt.append(labels)
test_loss.append(test_each_overall_loss.item())
test_contras_loss.append(test_each_contras_loss.item())
test_lp_loss.append(test_each_lp_loss.item())
avg_test_loss = np.mean(np.array(test_loss))
avg_test_contras_loss = np.mean(np.array(test_contras_loss))
avg_test_lp_loss = np.mean(np.array(test_lp_loss))
auc, maps = MAPs(test_gt, scores_list)
logger.info("Epoch: {} Auc: {:.6f} Maps: {:.6f} Max-Auc: {:.6f} Max-Maps: {:.6f}".format(epoch_num, auc, maps, max_auc, max_map))
if maps > max_map or auc > max_auc:
max_epoch = epoch_num
max_map = maps if maps > max_map else max_map
max_auc = auc if auc > max_auc else max_auc
# state = {'encoder': encoder.state_dict()}
# torch.save(state, saved_file + "model_" + str(epoch_num))
logger.info("***************** Epoch: {} Max Auc: {:.6f} Maps: {:.6f} *******************".format(epoch_num, max_auc, max_map))
encoder.train()
optimizer.zero_grad()
logger.info("***************** Max_Epoch: {} Max Auc: {:.6f} Maps: {:.6f}*******************".format(max_epoch, max_auc, max_map)) | 9,336 | 46.637755 | 213 | py |
GraphCAD | GraphCAD-main/MAG/utils.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import multiprocessing
from sklearn.metrics import roc_auc_score, auc, roc_curve
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from operator import itemgetter
from scipy import sparse
import random
def MAPs(label_lists, score_lists):
assert len(label_lists) == len(score_lists)
maps = []
mean_auc = []
total_count = 0
# print(np.array(score_lists).shape)
total_nan = 0
for sub_labels, sub_scores in zip(label_lists, score_lists):
assert len(sub_labels) == len(sub_scores)
combine = [each for each in zip(sub_scores, sub_labels)]
sorted_combine = sorted(combine, key=itemgetter(0))
# print(sorted_combine)
rights = 0
ps = []
tmp_scores = []
tmp_labels = []
for index in range(len(sorted_combine)):
ins_scores, ins_labels = sorted_combine[index]
tmp_scores.append(ins_scores)
tmp_labels.append(ins_labels)
if(ins_labels == 0):
rights += 1
ps.append(rights/(index+1))
tmp_scores = np.array(tmp_scores)
nan_num = len(tmp_scores[np.isnan(tmp_scores)])
total_nan += nan_num
tmp_scores = np.nan_to_num(tmp_scores)
tmp_labels = np.array(tmp_labels)
auc = roc_auc_score(1-tmp_labels, -1 * tmp_scores)
ap = np.mean(np.array(ps))
maps.append((ap, len(sub_labels)))
mean_auc.append(auc)
total_count += len(sub_labels)
assert len(maps) == len(mean_auc) == len(label_lists)
maps_scores = 0
maps_weight = 0
for each in maps:
ap, count = each
each_w = total_count / count
maps_scores += ap * each_w
maps_weight += each_w
norm_maps = maps_scores/maps_weight
mean_auc = np.mean(np.array(mean_auc))
return mean_auc, norm_maps
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
| 2,237 | 27.329114 | 96 | py |
GraphCAD | GraphCAD-main/MAG/models.py | from random import sample
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity
import pickle
from torch_geometric.nn import GCNConv, MessagePassing, GINConv, GATConv
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from torch_scatter import scatter_add
import math
import numpy as np
import pyro
class outlierLoss(nn.Module):
def __init__(self, args, logger, is_lp = True, lp_weight = 0.1):
super(outlierLoss, self).__init__()
self.cross_entropy = nn.CrossEntropyLoss().cuda()
self.dim = args.out_dim
self.temp = 0.1
self.margin_1 = 0.5
self.lamb = 0.5
self.thres = torch.tensor(0.0).cuda()
self.lp_weight = lp_weight
self.is_lp = is_lp
logger.info("is_lp: %s, lp_weight: %f"%(self.is_lp, self.lp_weight))
def cal_lp_loss(self, edge_prob, edge_labels):
# feat_all_trans: [(b x N) x C]
non_zero = torch.nonzero(edge_labels)
zero = torch.nonzero(edge_labels == 0)
pos_prob = edge_prob[non_zero][:8192]
neg_prob = edge_prob[zero]
logits = torch.cat((pos_prob, neg_prob.view(1, neg_prob.size(0)).repeat(pos_prob.size(), 1)), dim = 1)
logits_labels = torch.zeros([pos_prob.size(0)]).cuda().long()
lp_loss = self.cross_entropy(logits/self.temp, logits_labels)
return lp_loss
def forward(self, feat_all, q, edge_prob, edge_labels, adj_mat, batch_item, labels, raw_feat_all, raw_centroid):
# q2all: [N, 1]
# q = F.normalize(q.view(C, 1), p=2, dim=0)
# feat_all = F.normalize(feat_all, p=2, dim=1)
q2all = torch.mm(feat_all, q.view(self.dim, 1)).squeeze(-1)
pos_len = torch.sum(labels, dim = 0)
neg_len = q2all.size(0) - pos_len
# pos [P]; neg [Neg]
q2all_pos, q2all_neg = torch.split(q2all, [pos_len, neg_len], dim = 0)
q2all_each_logits = torch.cat([q2all_pos.unsqueeze(-1), q2all_neg.view(1, neg_len).repeat(pos_len, 1)], dim = -1)
q2all_each_logits = q2all_each_logits.view(pos_len, neg_len + 1)
# pos: [b x P, 1]
# neg: [b x p, Neg + (b - 1) * N]
# pos_score, neg_score = torch.split(q2all_each_logits, [1, Neg + (b-1) * N], dim = -1)
logits_labels = torch.zeros([pos_len]).cuda().long()
contras_loss = self.cross_entropy(q2all_each_logits/self.temp, logits_labels)
if self.is_lp:
lp_loss = self.cal_lp_loss(edge_prob, edge_labels)
else:
lp_loss = torch.tensor(0.0).cuda()
# outlier_loss = contras_loss + consist_loss
outlier_loss = contras_loss + self.lp_weight * lp_loss
raw_feat_all = F.normalize(raw_feat_all, p=2, dim=1)
raw_centroid = F.normalize(raw_centroid.view(self.dim, 1), p=2, dim=0)
scores = torch.mm(raw_feat_all, raw_centroid.view(self.dim, 1)).squeeze(-1)
return outlier_loss, scores, contras_loss, lp_loss
class GraphCAD(nn.Module):
def __init__(self, logger, args,in_dim, out_dim, total_layer_num, ins_layer_num, is_norm = True, is_edge = True, is_node = True, is_system = True, is_global = True, pooling = "memory"):
super(GraphCAD, self).__init__()
self.total_layer_num = total_layer_num
self.is_edge = is_edge
self.is_node = is_node
self.is_system = is_system
self.in_dim = in_dim
# edge_model
# self.edgemodel = None
if is_edge:
logger.info("EdgeUpdate")
self.edgemodel = EdgeUpdate(is_global, out_dim, 1)
# conv_model
if is_node:
logger.info("NodeUpdate")
self.node_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.node_updates.append(NodeUpdate(out_dim, out_dim, is_norm, ins_layer_num))
# sys_model
if is_system:
logger.info("SystemUpdate")
self.sys_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.sys_updates.append(SystemUpdate(out_dim, out_dim, pooling))
self.mlp_head = nn.Sequential(
nn.Linear(out_dim, out_dim),
nn.ReLU(),
nn.Linear(out_dim, out_dim)
)
self.drop_layer = nn.Dropout(0.5)
self.relu = nn.ReLU()
logger.info("is_edge: %r, is_global: %r pooling: %r"%(is_edge, is_global, pooling))
def forward(self, x, edge_index, edge_weight, batch_item, bs):
init_lens = edge_index
# x_pos, x_neg = torch.split(x.view(b, N, C), [P, Neg], dim = 1)
centroid = torch.mean(x.view(bs, -1, self.in_dim), dim = 1)
edge_prob = edge_index
x_trans_loss = x
for index in range(self.total_layer_num):
# edge update
if self.is_edge:
edge_index, edge_weight, edge_prob, x_trans_loss = self.edgemodel(x, edge_index, edge_weight, centroid, batch_item, bs)
# node update
if self.is_node:
x, saved_x = self.node_updates[index](x, edge_index, edge_weight)
# system update
if self.is_system:
centroid = self.sys_updates[index](saved_x, centroid, bs)
x_loss = self.mlp_head(x)
centroid_loss = self.mlp_head(centroid)
final_len = edge_index
return x, edge_weight, centroid, x_loss, centroid_loss, edge_prob
class edgePredictor(nn.Module):
def __init__(self, dim, is_global):
super(edgePredictor, self).__init__()
self.is_global = is_global
self.dim = dim
if is_global:
self.l2r = nn.Sequential(
nn.Linear(3 * dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
else:
self.l2r = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
def forward(self, node_features, edge_index, centroid, bs):
node_features = node_features.view(-1, self.dim)
node_j = node_features[edge_index[0]]
node_i = node_features[edge_index[1]]
if self.is_global:
residual_node_features = (node_features.view(bs, -1, self.dim) - centroid.view(bs, 1, self.dim)).view(-1, self.dim)
residual_node_j = residual_node_features[edge_index[0]]
residual_node_i = residual_node_features[edge_index[1]]
sim_vec = torch.cat((torch.abs(node_i - node_j), residual_node_i, residual_node_j), dim = 1)
else:
sim_vec = torch.abs(node_i - node_j)
prob_score = self.l2r(sim_vec)
return prob_score
class EdgeUpdate(nn.Module):
def __init__(self, is_global, feature_dim, edge_dim, load_dir = None):
super(EdgeUpdate, self).__init__()
self.feature_dim = feature_dim
self.edge_dim = edge_dim
self.temp = 0.6
self.thres_1 = torch.nn.Threshold(0.5, 0)
self.thres_2 = torch.nn.Threshold(-0.49, 1)
self.mins = torch.tensor(1e-10).cuda()
self.relu_fuc = nn.ReLU()
self.edge_skip_alpha = nn.Parameter(torch.rand(1))
self.ep_net = edgePredictor(feature_dim, is_global)
def forward(self, x, edge_index, edge_weight, centroid, batch_item, bs):
pre_prob = self.ep_net(x, edge_index, centroid, bs).squeeze(-1)
pre_adj = torch.sigmoid(pre_prob)
sampled_edge = torch.ones([pre_adj.size(0)]).cuda()
sampled_edge = pyro.distributions.RelaxedBernoulliStraightThrough(temperature=self.temp, probs = pre_adj).rsample()
combine_weight = self.edge_skip_alpha * (sampled_edge * edge_weight) + (1-self.edge_skip_alpha) * (sampled_edge * pre_adj)
return edge_index, combine_weight, pre_adj, x
class NodeUpdate(torch.nn.Module):
def __init__(self, in_channel, out_channel, is_norm, layer_num):
super(NodeUpdate, self).__init__()
self.conv1 = GCNConv(in_channel, out_channel)
self.drop_layer = nn.Dropout(0.5)
def forward(self, x, edge_index, edge_weight = None):
his_x = []
x = F.relu(self.conv1(x, edge_index, edge_weight))
x = self.drop_layer(x)
his_x.append(x)
return x, his_x
class SystemUpdate(nn.Module):
def __init__(self, in_channel, out_channel, pooling):
super(SystemUpdate, self).__init__()
self.in_dim = in_channel
self.out_dim = out_channel
self.pooling = pooling
def forward(self, his_x, init_c, bs):
mem_lens = len(his_x)
if self.pooling == "memory":
for index in range(mem_lens):
tmp_mem = his_x[index].view(bs, -1, self.in_dim)
tmp_score = torch.bmm(tmp_mem, init_c.view(bs, self.in_dim, 1)).view(bs, -1)
tmp_att = F.softmax(tmp_score, dim = 1)
tmp_read = torch.sum(tmp_att.view(bs, -1, 1) * tmp_mem, dim = 1)
init_c = tmp_read
elif self.pooling == "avg":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.mean(x, dim = 1)
elif self.pooling == "sum":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.sum(x, dim = 1)
elif self.pooling == "max":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.max(x, dim = 1).values
else:
raise ValueError("No such pooling type!")
return init_c
| 9,780 | 37.507874 | 189 | py |
GraphCAD | GraphCAD-main/AMiner/main.py | import os
import argparse
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import random
import json
import pickle
from collections import defaultdict
from operator import itemgetter
import logging
from torch_geometric.data import Data, DataLoader
from torch.optim.lr_scheduler import _LRScheduler
from models import GraphCAD, outlierLoss
from utils import *
torch.backends.cudnn.benchmark = True
torch.autograd.set_detect_anomaly(True)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# CUDA_VISIBLE_DEVICES=1 python main.py --train_dir '/raid/chenbo/outlier_detection/release_data/aminer_train.pkl' --test_dir '/raid/chenbo/outlier_detection/release_data/aminer_test.pkl'
def add_arguments(args):
# essential paras
args.add_argument('--train_dir', type=str, help="train_dir", required = True)
args.add_argument('--test_dir', type=str, help="test_dir", required = True)
args.add_argument('--saved_dir', type=str, help="log_name", default= "saved_model")
args.add_argument('--log_name', type=str, help="log_name", default = "log")
# training paras.
args.add_argument('--epochs', type=int, help="training #epochs", default=100)
args.add_argument('--seed', type=int, help="seed", default=1)
args.add_argument('--lr', type=float, help="learning rate", default=5e-4)
args.add_argument('--min_lr', type=float, help="min lr", default=1e-4)
args.add_argument('--bs', type=int, help="batch size", default=2)
args.add_argument('--input_dim', type=int, help="input dimension", default=768)
args.add_argument('--out_dim', type=int, help="output dimension", default=768)
args.add_argument('--verbose', type=int, help="eval", default=1)
# model paras.
args.add_argument('--outer_layer', type=int, help="#layers of GraphCAD", default = 2)
args.add_argument('--inner_layer', type=int, help="#layers of node_update", default = 1)
args.add_argument('--is_global', help="whether to add global information", action = "store_false")
args.add_argument('--is_edge', help="whether to use edge update", action = "store_false")
args.add_argument('--pooling', type=str, help="pooing_type", choices=['memory', 'avg', 'min', 'max'], default = "memory")
args.add_argument('--is_lp', help="whether to use link prediction loss", action = "store_false")
args.add_argument("--lp_weight", type = float, help="the weight of link prediction loss", default=0.1)
args = args.parse_args()
return args
def logging_builder(args):
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(os.path.join(os.getcwd(), args.log_name), mode='w')
fileHandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
consoleHandler.setFormatter(formatter)
fileHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
logger.addHandler(fileHandler)
return logger
class WarmupLinearLR(_LRScheduler):
def __init__(self, optimizer, step_size, min_lr, peak_percentage=0.1, last_epoch=-1):
self.step_size = step_size
self.peak_step = peak_percentage * step_size
self.min_lr = min_lr
super(WarmupLinearLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
ret = []
for tmp_min_lr, tmp_base_lr in zip(self.min_lr, self.base_lrs):
if self._step_count <= self.peak_step:
ret.append(tmp_min_lr + (tmp_base_lr - tmp_min_lr) * self._step_count / self.peak_step)
else:
ret.append(tmp_min_lr + max(0, (tmp_base_lr - tmp_min_lr) * (self.step_size - self._step_count) / (self.step_size - self.peak_step)))
return ret
if __name__ == "__main__":
args = argparse.ArgumentParser()
args = add_arguments(args)
setup_seed(args.seed)
logger = logging_builder(args)
print(args)
os.makedirs(os.path.join(os.getcwd(), args.saved_dir), exist_ok = True)
encoder = GraphCAD(logger, args, args.input_dim, args.out_dim, args.outer_layer, args.inner_layer, is_global = args.is_global, is_edge = args.is_edge, pooling= args.pooling).cuda()
criterion = outlierLoss(args, logger, is_lp = args.is_lp, lp_weight = args.lp_weight).cuda()
with open(args.train_dir, 'rb') as files:
train_data = pickle.load(files)
with open(args.test_dir, 'rb') as files:
test_data = pickle.load(files)
logger.info("# Batch: {} - {}".format(len(train_data), len(train_data) / args.bs))
optimizer = torch.optim.Adam([{'params': encoder.parameters(), 'lr': args.lr}])
optimizer.zero_grad()
max_step = int(len(train_data) / args.bs * 10)
logger.info("max_step: %d, %d, %d, %d"%(max_step, len(train_data), args.bs, args.epochs))
scheduler = WarmupLinearLR(optimizer, max_step, min_lr=[args.min_lr])
encoder.train()
epoch_num = 0
max_map = -1
max_auc = -1
max_epoch = -1
for epoch_num in range(args.epochs):
batch_loss = []
batch_contras_loss = []
batch_lp_loss = []
batch_edge_score = []
batch_labels = []
batch_index = 0
random.shuffle(train_data)
for tmp_train in tqdm(train_data):
batch_index += 1
batch_data, edge_labels = tmp_train
node_outputs, adj_matrix, adj_weight, labels, batch_item = batch_data.x, batch_data.edge_index, batch_data.edge_attr.squeeze(-1), batch_data.y, batch_data.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
overall_loss, _, contras_loss, lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
# overall_loss.backward()
overall_loss = overall_loss / args.bs
overall_loss.backward()
batch_loss.append(overall_loss.item())
batch_contras_loss.append(contras_loss.item())
batch_lp_loss.append(lp_loss.item())
if (batch_index + 1) % args.bs == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
avg_batch_loss = np.mean(np.array(batch_loss))
avg_batch_contras_loss = np.mean(np.array(batch_contras_loss))
avg_batch_lp_loss = np.mean(np.array(batch_lp_loss))
logger.info("Epoch:{} Overall loss: {:.6f} Contrastive loss: {:.6f} LP_loss: {:.6f}".format(epoch_num, avg_batch_loss, avg_batch_contras_loss, avg_batch_lp_loss))
if (epoch_num + 1) % args.verbose == 0:
encoder.eval()
test_loss = []
test_contras_loss = []
test_lp_loss = []
test_gt = []
labels_list = []
scores_list = []
with torch.no_grad():
for tmp_test in tqdm(test_data):
each_sub, edge_labels = tmp_test
node_outputs, adj_matrix, adj_weight, labels, batch_item = each_sub.x, each_sub.edge_index, each_sub.edge_attr.squeeze(-1), each_sub.y, each_sub.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
centroid = centroid.squeeze(0)
centroid_loss = centroid_loss.squeeze(0)
test_each_overall_loss, scores, test_each_contras_loss, test_each_lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
scores = scores.detach().cpu().numpy()
scores_list.append(scores)
labels = labels.detach().cpu().numpy()
test_gt.append(labels)
test_loss.append(test_each_overall_loss.item())
test_contras_loss.append(test_each_contras_loss.item())
test_lp_loss.append(test_each_lp_loss.item())
avg_test_loss = np.mean(np.array(test_loss))
avg_test_contras_loss = np.mean(np.array(test_contras_loss))
avg_test_lp_loss = np.mean(np.array(test_lp_loss))
auc, maps = MAPs(test_gt, scores_list)
logger.info("Epoch: {} Auc: {:.6f} Maps: {:.6f} Max-Auc: {:.6f} Max-Maps: {:.6f}".format(epoch_num, auc, maps, max_auc, max_map))
if maps > max_map or auc > max_auc:
max_epoch = epoch_num
max_map = maps if maps > max_map else max_map
max_auc = auc if auc > max_auc else max_auc
# state = {'encoder': encoder.state_dict()}
# torch.save(state, saved_file + "model_" + str(epoch_num))
logger.info("***************** Epoch: {} Max Auc: {:.6f} Maps: {:.6f} *******************".format(epoch_num, max_auc, max_map))
encoder.train()
optimizer.zero_grad()
logger.info("***************** Max_Epoch: {} Max Auc: {:.6f} Maps: {:.6f}*******************".format(max_epoch, max_auc, max_map)) | 9,342 | 46.668367 | 213 | py |
GraphCAD | GraphCAD-main/AMiner/utils.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import multiprocessing
from sklearn.metrics import roc_auc_score, auc, roc_curve
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from operator import itemgetter
from scipy import sparse
import random
def MAPs(label_lists, score_lists):
assert len(label_lists) == len(score_lists)
maps = []
mean_auc = []
total_count = 0
# print(np.array(score_lists).shape)
total_nan = 0
for sub_labels, sub_scores in zip(label_lists, score_lists):
assert len(sub_labels) == len(sub_scores)
combine = [each for each in zip(sub_scores, sub_labels)]
sorted_combine = sorted(combine, key=itemgetter(0))
# print(sorted_combine)
rights = 0
ps = []
tmp_scores = []
tmp_labels = []
for index in range(len(sorted_combine)):
ins_scores, ins_labels = sorted_combine[index]
tmp_scores.append(ins_scores)
tmp_labels.append(ins_labels)
if(ins_labels == 0):
rights += 1
ps.append(rights/(index+1))
tmp_scores = np.array(tmp_scores)
nan_num = len(tmp_scores[np.isnan(tmp_scores)])
total_nan += nan_num
tmp_scores = np.nan_to_num(tmp_scores)
tmp_labels = np.array(tmp_labels)
auc = roc_auc_score(1-tmp_labels, -1 * tmp_scores)
ap = np.mean(np.array(ps))
maps.append((ap, len(sub_labels)))
mean_auc.append(auc)
total_count += len(sub_labels)
assert len(maps) == len(mean_auc) == len(label_lists)
maps_scores = 0
maps_weight = 0
for each in maps:
ap, count = each
each_w = total_count / count
maps_scores += ap * each_w
maps_weight += each_w
norm_maps = maps_scores/maps_weight
mean_auc = np.mean(np.array(mean_auc))
return mean_auc, norm_maps
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
| 2,237 | 27.329114 | 96 | py |
GraphCAD | GraphCAD-main/AMiner/models.py | from random import sample
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity
import pickle
from torch_geometric.nn import GCNConv, MessagePassing, GINConv, GATConv
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from torch_scatter import scatter_add
import math
import numpy as np
import pyro
class outlierLoss(nn.Module):
def __init__(self, args, logger, is_lp = True, lp_weight = 0.1):
super(outlierLoss, self).__init__()
self.cross_entropy = nn.CrossEntropyLoss().cuda()
self.dim = args.out_dim
self.temp = 0.1
self.margin_1 = 0.5
self.lamb = 0.5
self.thres = torch.tensor(0.0).cuda()
self.lp_weight = lp_weight
self.is_lp = is_lp
logger.info("is_lp: %s, lp_weight: %f"%(self.is_lp, self.lp_weight))
def cal_lp_loss(self, edge_prob, edge_labels):
# feat_all_trans: [(b x N) x C]
non_zero = torch.nonzero(edge_labels)
zero = torch.nonzero(edge_labels == 0)
pos_prob = edge_prob[non_zero][:8192]
neg_prob = edge_prob[zero]
logits = torch.cat((pos_prob, neg_prob.view(1, neg_prob.size(0)).repeat(pos_prob.size(), 1)), dim = 1)
logits_labels = torch.zeros([pos_prob.size(0)]).cuda().long()
lp_loss = self.cross_entropy(logits/self.temp, logits_labels)
return lp_loss
def forward(self, feat_all, q, edge_prob, edge_labels, adj_mat, batch_item, labels, raw_feat_all, raw_centroid):
# q2all: [N, 1]
# q = F.normalize(q.view(C, 1), p=2, dim=0)
# feat_all = F.normalize(feat_all, p=2, dim=1)
q2all = torch.mm(feat_all, q.view(self.dim, 1)).squeeze(-1)
pos_len = torch.sum(labels, dim = 0)
neg_len = q2all.size(0) - pos_len
# pos [P]; neg [Neg]
q2all_pos, q2all_neg = torch.split(q2all, [pos_len, neg_len], dim = 0)
q2all_each_logits = torch.cat([q2all_pos.unsqueeze(-1), q2all_neg.view(1, neg_len).repeat(pos_len, 1)], dim = -1)
q2all_each_logits = q2all_each_logits.view(pos_len, neg_len + 1)
# pos: [b x P, 1]
# neg: [b x p, Neg + (b - 1) * N]
# pos_score, neg_score = torch.split(q2all_each_logits, [1, Neg + (b-1) * N], dim = -1)
logits_labels = torch.zeros([pos_len]).cuda().long()
contras_loss = self.cross_entropy(q2all_each_logits/self.temp, logits_labels)
if self.is_lp:
lp_loss = self.cal_lp_loss(edge_prob, edge_labels)
else:
lp_loss = torch.tensor(0.0).cuda()
# outlier_loss = contras_loss + consist_loss
outlier_loss = contras_loss + self.lp_weight * lp_loss
raw_feat_all = F.normalize(raw_feat_all, p=2, dim=1)
raw_centroid = F.normalize(raw_centroid.view(self.dim, 1), p=2, dim=0)
scores = torch.mm(raw_feat_all, raw_centroid.view(self.dim, 1)).squeeze(-1)
return outlier_loss, scores, contras_loss, lp_loss
class GraphCAD(nn.Module):
def __init__(self, logger, args,in_dim, out_dim, total_layer_num, ins_layer_num, is_norm = True, is_edge = True, is_node = True, is_system = True, is_global = True, pooling = "memory"):
super(GraphCAD, self).__init__()
self.total_layer_num = total_layer_num
self.is_edge = is_edge
self.is_node = is_node
self.is_system = is_system
self.in_dim = in_dim
# edge_model
# self.edgemodel = None
if is_edge:
logger.info("EdgeUpdate")
self.edgemodel = EdgeUpdate(is_global, out_dim, 1)
# conv_model
if is_node:
logger.info("NodeUpdate")
self.node_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.node_updates.append(NodeUpdate(out_dim, out_dim, is_norm, ins_layer_num))
# sys_model
if is_system:
logger.info("SystemUpdate")
self.sys_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.sys_updates.append(SystemUpdate(out_dim, out_dim, pooling))
self.mlp_head = nn.Sequential(
nn.Linear(out_dim, out_dim),
nn.ReLU(),
nn.Linear(out_dim, out_dim)
)
self.drop_layer = nn.Dropout(0.5)
self.relu = nn.ReLU()
logger.info("is_edge: %r, is_global: %r pooling: %r"%(is_edge, is_global, pooling))
def forward(self, x, edge_index, edge_weight, batch_item, bs):
init_lens = edge_index
# x_pos, x_neg = torch.split(x.view(b, N, C), [P, Neg], dim = 1)
centroid = torch.mean(x.view(bs, -1, self.in_dim), dim = 1)
edge_prob = edge_index
x_trans_loss = x
for index in range(self.total_layer_num):
# edge update
if self.is_edge:
edge_index, edge_weight, edge_prob, x_trans_loss = self.edgemodel(x, edge_index, edge_weight, centroid, batch_item, bs)
# node update
if self.is_node:
x, saved_x = self.node_updates[index](x, edge_index, edge_weight)
# system update
if self.is_system:
centroid = self.sys_updates[index](saved_x, centroid, bs)
x_loss = self.mlp_head(x)
centroid_loss = self.mlp_head(centroid)
final_len = edge_index
return x, edge_weight, centroid, x_loss, centroid_loss, edge_prob
class edgePredictor(nn.Module):
def __init__(self, dim, is_global):
super(edgePredictor, self).__init__()
self.is_global = is_global
self.dim = dim
if is_global:
self.l2r = nn.Sequential(
nn.Linear(3 * dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
else:
self.l2r = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
def forward(self, node_features, edge_index, centroid, bs):
node_features = node_features.view(-1, self.dim)
node_j = node_features[edge_index[0]]
node_i = node_features[edge_index[1]]
if self.is_global:
residual_node_features = (node_features.view(bs, -1, self.dim) - centroid.view(bs, 1, self.dim)).view(-1, self.dim)
residual_node_j = residual_node_features[edge_index[0]]
residual_node_i = residual_node_features[edge_index[1]]
sim_vec = torch.cat((torch.abs(node_i - node_j), residual_node_i, residual_node_j), dim = 1)
else:
sim_vec = torch.abs(node_i - node_j)
prob_score = self.l2r(sim_vec)
return prob_score
class EdgeUpdate(nn.Module):
def __init__(self, is_global, feature_dim, edge_dim, load_dir = None):
super(EdgeUpdate, self).__init__()
self.feature_dim = feature_dim
self.edge_dim = edge_dim
self.temp = 0.6
self.thres_1 = torch.nn.Threshold(0.5, 0)
self.thres_2 = torch.nn.Threshold(-0.49, 1)
self.mins = torch.tensor(1e-10).cuda()
self.relu_fuc = nn.ReLU()
self.edge_skip_alpha = nn.Parameter(torch.rand(1))
self.ep_net = edgePredictor(feature_dim, is_global)
def forward(self, x, edge_index, edge_weight, centroid, batch_item, bs):
pre_prob = self.ep_net(x, edge_index, centroid, bs).squeeze(-1)
pre_adj = torch.sigmoid(pre_prob)
sampled_edge = torch.ones([pre_adj.size(0)]).cuda()
sampled_edge = pyro.distributions.RelaxedBernoulliStraightThrough(temperature=self.temp, probs = pre_adj).rsample()
combine_weight = self.edge_skip_alpha * (sampled_edge * edge_weight) + (1-self.edge_skip_alpha) * (sampled_edge * pre_adj)
return edge_index, combine_weight, pre_adj, x
class NodeUpdate(torch.nn.Module):
def __init__(self, in_channel, out_channel, is_norm, layer_num):
super(NodeUpdate, self).__init__()
self.conv1 = GCNConv(in_channel, out_channel)
self.drop_layer = nn.Dropout(0.5)
def forward(self, x, edge_index, edge_weight = None):
his_x = []
x = F.relu(self.conv1(x, edge_index, edge_weight))
x = self.drop_layer(x)
his_x.append(x)
return x, his_x
class SystemUpdate(nn.Module):
def __init__(self, in_channel, out_channel, pooling):
super(SystemUpdate, self).__init__()
self.in_dim = in_channel
self.out_dim = out_channel
self.pooling = pooling
def forward(self, his_x, init_c, bs):
mem_lens = len(his_x)
if self.pooling == "memory":
for index in range(mem_lens):
tmp_mem = his_x[index].view(bs, -1, self.in_dim)
tmp_score = torch.bmm(tmp_mem, init_c.view(bs, self.in_dim, 1)).view(bs, -1)
tmp_att = F.softmax(tmp_score, dim = 1)
tmp_read = torch.sum(tmp_att.view(bs, -1, 1) * tmp_mem, dim = 1)
init_c = tmp_read
elif self.pooling == "avg":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.mean(x, dim = 1)
elif self.pooling == "sum":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.sum(x, dim = 1)
elif self.pooling == "max":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.max(x, dim = 1).values
else:
raise ValueError("No such pooling type!")
return init_c
| 9,780 | 37.507874 | 189 | py |
GraphCAD | GraphCAD-main/Yelp/main.py | import os
import argparse
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import random
import json
import pickle
from collections import defaultdict
from operator import itemgetter
import logging
from torch_geometric.data import Data, DataLoader
from torch.optim.lr_scheduler import _LRScheduler
from models import GraphCAD, outlierLoss
from utils import *
torch.backends.cudnn.benchmark = True
torch.autograd.set_detect_anomaly(True)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# CUDA_VISIBLE_DEVICES=1 python main.py --data_dir '/raid/chenbo/outlier_detection/release_data/yelp_data.pkl'
def add_arguments(args):
# essential paras
args.add_argument('--data_dir', type=str, help="data_dir", required = True)
args.add_argument('--saved_dir', type=str, help="log_name", default= "saved_model")
args.add_argument('--log_name', type=str, help="log_name", default = "log")
# training paras.
args.add_argument('--epochs', type=int, help="training #epochs", default=2000)
args.add_argument('--seed', type=int, help="seed", default=1)
args.add_argument('--lr', type=float, help="learning rate", default=5e-4)
args.add_argument('--min_lr', type=float, help="min lr", default=1e-4)
args.add_argument('--bs', type=int, help="batch size", default=1)
args.add_argument('--input_dim', type=int, help="input dimension", default=100)
args.add_argument('--out_dim', type=int, help="output dimension", default=100)
args.add_argument('--verbose', type=int, help="eval", default=1)
# model paras.
args.add_argument('--outer_layer', type=int, help="#layers of GraphCAD", default = 2)
args.add_argument('--inner_layer', type=int, help="#layers of node_update", default = 1)
args.add_argument('--is_global', help="whether to add global information", action = "store_false")
args.add_argument('--is_edge', help="whether to use edge update", action = "store_false")
args.add_argument('--pooling', type=str, help="pooing_type", choices=['memory', 'avg', 'min', 'max'], default = "memory")
args.add_argument('--is_lp', help="whether to use link prediction loss", action = "store_false")
args.add_argument("--lp_weight", type = float, help="the weight of link prediction loss", default=0.1)
args = args.parse_args()
return args
def logging_builder(args):
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(os.path.join(os.getcwd(), args.log_name), mode='w')
fileHandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
consoleHandler.setFormatter(formatter)
fileHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
logger.addHandler(fileHandler)
return logger
class WarmupLinearLR(_LRScheduler):
def __init__(self, optimizer, step_size, min_lr, peak_percentage=0.1, last_epoch=-1):
self.step_size = step_size
self.peak_step = peak_percentage * step_size
self.min_lr = min_lr
super(WarmupLinearLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
ret = []
for tmp_min_lr, tmp_base_lr in zip(self.min_lr, self.base_lrs):
if self._step_count <= self.peak_step:
ret.append(tmp_min_lr + (tmp_base_lr - tmp_min_lr) * self._step_count / self.peak_step)
else:
ret.append(tmp_min_lr + max(0, (tmp_base_lr - tmp_min_lr) * (self.step_size - self._step_count) / (self.step_size - self.peak_step)))
return ret
if __name__ == "__main__":
args = argparse.ArgumentParser()
args = add_arguments(args)
setup_seed(args.seed)
logger = logging_builder(args)
print(args)
os.makedirs(os.path.join(os.getcwd(), args.saved_dir), exist_ok = True)
encoder = GraphCAD(logger, args, args.input_dim, args.out_dim, args.outer_layer, args.inner_layer, is_global = args.is_global, is_edge = args.is_edge, pooling= args.pooling).cuda()
criterion = outlierLoss(args, logger, is_lp = args.is_lp, lp_weight = args.lp_weight).cuda()
with open(args.data_dir, 'rb') as files:
data_collection = pickle.load(files)
data, y, train_mask, train_label_index, train_edge_ids, train_edge_labels, test_mask, test_label_index,test_edge_ids, test_edge_labels = data_collection
# for older version of pyg
# data = Data(**data.__dict__)
edges_attrs = torch.ones(data.edge_index.size(0))
data_set = DataLoader([Data(x = data.x.cuda(), edge_index = data.edge_index.cuda().t(), y = y.cuda(), edge_attr = edges_attrs.cuda().unsqueeze(-1))], batch_size=1, shuffle = True)
optimizer = torch.optim.Adam([{'params': encoder.parameters(), 'lr': args.lr}])
optimizer.zero_grad()
logger.info(f"Warm up schedular: {args.epochs}")
scheduler = WarmupLinearLR(optimizer, args.epochs, min_lr=[args.min_lr])
encoder.train()
epoch_num = 0
max_map = -1
max_auc = -1
max_epoch = -1
for epoch_num in range(args.epochs):
batch_loss = []
batch_contras_loss = []
batch_lp_loss = []
batch_edge_score = []
batch_labels = []
batch_index = 0
for batch_data in tqdm(data_set):
batch_index += 1
node_outputs, adj_matrix, adj_weight, labels, batch_item = batch_data.x, batch_data.edge_index, batch_data.edge_attr.squeeze(-1), batch_data.y, batch_data.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
# training index
node_outputs = node_outputs[train_mask][train_label_index]
output_loss = output_loss[train_mask][train_label_index]
edge_prob = edge_prob[train_edge_ids]
edge_labels = train_edge_labels.cuda()
labels = labels[train_mask][train_label_index]
overall_loss, _, contras_loss, lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
# overall_loss.backward()
overall_loss = overall_loss / args.bs
overall_loss.backward()
batch_loss.append(overall_loss.item())
batch_contras_loss.append(contras_loss.item())
batch_lp_loss.append(lp_loss.item())
if (batch_index + 1) % args.bs == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
avg_batch_loss = np.mean(np.array(batch_loss))
avg_batch_contras_loss = np.mean(np.array(batch_contras_loss))
avg_batch_lp_loss = np.mean(np.array(batch_lp_loss))
logger.info("Epoch:{} Overall loss: {:.6f} Contrastive loss: {:.6f} LP_loss: {:.6f}".format(epoch_num, avg_batch_loss, avg_batch_contras_loss, avg_batch_lp_loss))
if (epoch_num + 1) % args.verbose == 0:
encoder.eval()
test_loss = []
test_contras_loss = []
test_lp_loss = []
test_gt = []
labels_list = []
scores_list = []
with torch.no_grad():
for batch_test in tqdm(data_set):
node_outputs, adj_matrix, adj_weight, labels, batch_item = batch_test.x, batch_test.edge_index, batch_test.edge_attr.squeeze(-1), batch_test.y, batch_test.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
centroid = centroid.squeeze(0)
centroid_loss = centroid_loss.squeeze(0)
# test index
node_outputs = node_outputs[test_mask][test_label_index]
output_loss = output_loss[test_mask][test_label_index]
edge_prob = edge_prob[test_edge_ids]
edge_labels = test_edge_labels.cuda()
labels = labels[test_mask][test_label_index]
test_each_overall_loss, scores, test_each_contras_loss, test_each_lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
scores = scores.detach().cpu().numpy()
scores_list.append(scores)
labels = labels.detach().cpu().numpy()
test_gt.append(labels)
test_loss.append(test_each_overall_loss.item())
test_contras_loss.append(test_each_contras_loss.item())
test_lp_loss.append(test_each_lp_loss.item())
avg_test_loss = np.mean(np.array(test_loss))
avg_test_contras_loss = np.mean(np.array(test_contras_loss))
avg_test_lp_loss = np.mean(np.array(test_lp_loss))
auc, maps = MAPs(test_gt, scores_list)
logger.info("Epoch: {} Auc: {:.6f} Maps: {:.6f} Max-Auc: {:.6f} Max-Maps: {:.6f}".format(epoch_num, auc, maps, max_auc, max_map))
if maps > max_map or auc > max_auc:
max_epoch = epoch_num
max_map = maps if maps > max_map else max_map
max_auc = auc if auc > max_auc else max_auc
# state = {'encoder': encoder.state_dict()}
# torch.save(state, saved_file + "model_" + str(epoch_num))
logger.info("***************** Epoch: {} Max Auc: {:.6f} Maps: {:.6f} *******************".format(epoch_num, max_auc, max_map))
encoder.train()
optimizer.zero_grad()
logger.info("***************** Max_Epoch: {} Max Auc: {:.6f} Maps: {:.6f}*******************".format(max_epoch, max_auc, max_map)) | 9,996 | 46.379147 | 213 | py |
GraphCAD | GraphCAD-main/Yelp/utils.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import multiprocessing
from sklearn.metrics import roc_auc_score, auc, roc_curve
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from operator import itemgetter
from scipy import sparse
import random
def MAPs(label_lists, score_lists):
assert len(label_lists) == len(score_lists)
maps = []
mean_auc = []
total_count = 0
# print(np.array(score_lists).shape)
total_nan = 0
for sub_labels, sub_scores in zip(label_lists, score_lists):
assert len(sub_labels) == len(sub_scores)
combine = [each for each in zip(sub_scores, sub_labels)]
sorted_combine = sorted(combine, key=itemgetter(0))
# print(sorted_combine)
rights = 0
ps = []
tmp_scores = []
tmp_labels = []
for index in range(len(sorted_combine)):
ins_scores, ins_labels = sorted_combine[index]
tmp_scores.append(ins_scores)
tmp_labels.append(ins_labels)
if(ins_labels == 0):
rights += 1
ps.append(rights/(index+1))
tmp_scores = np.array(tmp_scores)
nan_num = len(tmp_scores[np.isnan(tmp_scores)])
total_nan += nan_num
tmp_scores = np.nan_to_num(tmp_scores)
tmp_labels = np.array(tmp_labels)
auc = roc_auc_score(1-tmp_labels, -1 * tmp_scores)
ap = np.mean(np.array(ps))
maps.append((ap, len(sub_labels)))
mean_auc.append(auc)
total_count += len(sub_labels)
assert len(maps) == len(mean_auc) == len(label_lists)
maps_scores = 0
maps_weight = 0
for each in maps:
ap, count = each
each_w = total_count / count
maps_scores += ap * each_w
maps_weight += each_w
norm_maps = maps_scores/maps_weight
mean_auc = np.mean(np.array(mean_auc))
return mean_auc, norm_maps
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
| 2,237 | 27.329114 | 96 | py |
GraphCAD | GraphCAD-main/Yelp/models.py | from random import sample
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity
import pickle
from torch_geometric.nn import GINConv_w as GINConv
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from torch_scatter import scatter_add
import math
import numpy as np
import pyro
class outlierLoss(nn.Module):
def __init__(self, args, logger, is_lp = True, lp_weight = 0.1):
super(outlierLoss, self).__init__()
self.cross_entropy = nn.CrossEntropyLoss().cuda()
self.dim = args.out_dim
self.temp = 0.1
self.margin_1 = 0.5
self.lamb = 0.5
self.thres = torch.tensor(0.0).cuda()
self.lp_weight = lp_weight
self.is_lp = is_lp
logger.info("is_lp: %s, lp_weight: %f"%(self.is_lp, self.lp_weight))
def cal_lp_loss(self, edge_prob, edge_labels):
# feat_all_trans: [(b x N) x C]
non_zero = torch.nonzero(edge_labels)
zero = torch.nonzero(edge_labels == 0)
if non_zero.size(0) != 0 and zero.size(0) != 0:
pos_prob = edge_prob[non_zero][:8192]
neg_prob = edge_prob[zero][:8192]
logits = torch.cat((pos_prob, neg_prob.view(1, neg_prob.size(0)).repeat(pos_prob.size(), 1)), dim = 1)
logits_labels = torch.zeros([pos_prob.size(0)]).cuda().long()
lp_loss = self.cross_entropy(logits/self.temp, logits_labels)
else:
lp_loss = torch.tensor(0.0).cuda()
return lp_loss
def forward(self, feat_all, q, edge_prob, edge_labels, adj_mat, batch_item, labels, raw_feat_all, raw_centroid):
# q2all: [N, 1]
q = F.normalize(q.view(self.dim, 1), p=2, dim=0)
feat_all = F.normalize(feat_all, p=2, dim=1)
q2all = torch.mm(feat_all, q.view(self.dim, 1)).squeeze(-1)
pos_len = torch.sum(labels, dim = 0)
neg_len = q2all.size(0) - pos_len
# pos [P]; neg [Neg]
q2all_pos, q2all_neg = torch.split(q2all, [pos_len, neg_len], dim = 0)
q2all_each_logits = torch.cat([q2all_pos.unsqueeze(-1), q2all_neg.view(1, neg_len).repeat(pos_len, 1)], dim = -1)
# print(q2all_each_logits.size())
q2all_each_logits = q2all_each_logits.view(pos_len, neg_len + 1)
# pos: [b x P, 1]
# neg: [b x p, Neg + (b - 1) * N]ß
logits_labels = torch.zeros([pos_len]).cuda().long()
contras_loss = self.cross_entropy(q2all_each_logits/self.temp, logits_labels)
if self.is_lp:
lp_loss = self.cal_lp_loss(edge_prob, edge_labels)
else:
lp_loss = torch.tensor(0.0).cuda()
outlier_loss = contras_loss + self.lp_weight * lp_loss
scores = torch.mm(feat_all, q.view(self.dim, 1)).squeeze(-1)
return outlier_loss, scores, contras_loss, lp_loss
class GraphCAD(nn.Module):
def __init__(self, logger, args,in_dim, out_dim, total_layer_num, ins_layer_num, is_norm = True, is_edge = True, is_node = True, is_system = True, is_global = True, pooling = "memory"):
super(GraphCAD, self).__init__()
self.total_layer_num = total_layer_num
self.is_edge = is_edge
self.is_node = is_node
self.is_system = is_system
self.in_dim = in_dim
# edge_model
# self.edgemodel = None
if is_edge:
logger.info("EdgeUpdate")
self.edgemodel = EdgeUpdate(is_global, out_dim, 1)
# conv_model
if is_node:
logger.info("NodeUpdate")
self.node_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.node_updates.append(NodeUpdate(out_dim, out_dim, is_norm, ins_layer_num))
# sys_model
if is_system:
logger.info("SystemUpdate")
self.sys_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.sys_updates.append(SystemUpdate(out_dim, out_dim, pooling))
self.mlp_head = nn.Sequential(
nn.Linear(out_dim, out_dim),
nn.ReLU(),
nn.Linear(out_dim, out_dim)
)
self.drop_layer = nn.Dropout(0.5)
self.relu = nn.ReLU()
logger.info("is_edge: %r, is_global: %r pooling: %r"%(is_edge, is_global, pooling))
def forward(self, x, edge_index, edge_weight, batch_item, bs):
init_lens = edge_index
# x_pos, x_neg = torch.split(x.view(b, N, C), [P, Neg], dim = 1)
centroid = torch.mean(x.view(bs, -1, self.in_dim), dim = 1)
edge_prob = edge_index
x_trans_loss = x
for index in range(self.total_layer_num):
# edge update
if self.is_edge:
edge_index, edge_weight, edge_prob, x_trans_loss = self.edgemodel(x, edge_index, edge_weight, centroid, batch_item, bs)
# node update
if self.is_node:
x, saved_x = self.node_updates[index](x, edge_index, edge_weight)
# system update
if self.is_system:
centroid = self.sys_updates[index](saved_x, centroid, bs)
x_loss = self.mlp_head(x)
centroid_loss = self.mlp_head(centroid)
final_len = edge_index
return x, edge_weight, centroid, x_loss, centroid_loss, edge_prob
class edgePredictor(nn.Module):
def __init__(self, dim, is_global):
super(edgePredictor, self).__init__()
self.is_global = is_global
self.dim = dim
if is_global:
self.l2r = nn.Sequential(
nn.Linear(3 * dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
else:
self.l2r = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
def forward(self, node_features, edge_index, centroid, bs):
node_features = node_features.view(-1, self.dim)
node_j = node_features[edge_index[0]]
node_i = node_features[edge_index[1]]
if self.is_global:
residual_node_features = (node_features.view(bs, -1, self.dim) - centroid.view(bs, 1, self.dim)).view(-1, self.dim)
residual_node_j = residual_node_features[edge_index[0]]
residual_node_i = residual_node_features[edge_index[1]]
sim_vec = torch.cat((torch.abs(node_i - node_j), residual_node_i, residual_node_j), dim = 1)
else:
sim_vec = torch.abs(node_i - node_j)
prob_score = self.l2r(sim_vec)
return prob_score
class EdgeUpdate(nn.Module):
def __init__(self, is_global, feature_dim, edge_dim, load_dir = None):
super(EdgeUpdate, self).__init__()
self.feature_dim = feature_dim
self.edge_dim = edge_dim
self.temp = 0.6
self.thres_1 = torch.nn.Threshold(0.5, 0)
self.thres_2 = torch.nn.Threshold(-0.49, 1)
self.mins = torch.tensor(1e-10).cuda()
self.relu_fuc = nn.ReLU()
self.edge_skip_alpha = nn.Parameter(torch.rand(1))
self.ep_net = edgePredictor(feature_dim, is_global)
def forward(self, x, edge_index, edge_weight, centroid, batch_item, bs):
pre_prob = self.ep_net(x, edge_index, centroid, bs).squeeze(-1)
pre_adj = torch.sigmoid(pre_prob)
sampled_edge = torch.ones([pre_adj.size(0)]).cuda()
sampled_edge = pyro.distributions.RelaxedBernoulliStraightThrough(temperature=self.temp, probs = pre_adj).rsample()
combine_weight = self.edge_skip_alpha * (sampled_edge * edge_weight) + (1-self.edge_skip_alpha) * (sampled_edge * pre_adj)
return edge_index, combine_weight, pre_adj, x
class NodeUpdate(torch.nn.Module):
def __init__(self, in_channel, out_channel, is_norm, layer_num):
super(NodeUpdate, self).__init__()
self.bn1 = torch.nn.BatchNorm1d(in_channel)
self.conv1 = GINConv(
nn.Sequential(nn.Linear(out_channel, out_channel), nn.ReLU()))
self.drop_layer = nn.Dropout(0.5)
def forward(self, x, edge_index, edge_weight = None):
his_x = []
x = F.relu(self.conv1(x, edge_index, edge_weight))
x = self.bn1(x)
x = self.drop_layer(x)
his_x.append(x)
return x, his_x
class SystemUpdate(nn.Module):
def __init__(self, in_channel, out_channel, pooling):
super(SystemUpdate, self).__init__()
self.in_dim = in_channel
self.out_dim = out_channel
self.pooling = pooling
def forward(self, his_x, init_c, bs):
mem_lens = len(his_x)
if self.pooling == "memory":
for index in range(mem_lens):
tmp_mem = his_x[index].view(bs, -1, self.in_dim)
tmp_score = torch.bmm(tmp_mem, init_c.view(bs, self.in_dim, 1)).view(bs, -1)
tmp_att = F.softmax(tmp_score, dim = 1)
tmp_read = torch.sum(tmp_att.view(bs, -1, 1) * tmp_mem, dim = 1)
init_c = tmp_read
elif self.pooling == "avg":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.mean(x, dim = 1)
elif self.pooling == "sum":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.sum(x, dim = 1)
elif self.pooling == "max":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.max(x, dim = 1).values
else:
raise ValueError("No such pooling type!")
return init_c
| 9,770 | 37.317647 | 189 | py |
GraphCAD | GraphCAD-main/Alpha/main.py | import os
import argparse
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import random
import json
import pickle
from collections import defaultdict
from operator import itemgetter
import logging
from torch_geometric.data import Data, DataLoader
from torch.optim.lr_scheduler import _LRScheduler
from models import GraphCAD, outlierLoss
from utils import *
torch.backends.cudnn.benchmark = True
torch.autograd.set_detect_anomaly(True)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# CUDA_VISIBLE_DEVICES=1 python main.py --data_dir '/raid/chenbo/outlier_detection/release_data/alpha_data.pkl'
def add_arguments(args):
# essential paras
args.add_argument('--data_dir', type=str, help="data_dir", required = True)
args.add_argument('--saved_dir', type=str, help="log_name", default= "saved_model")
args.add_argument('--log_name', type=str, help="log_name", default = "log")
# training paras.
args.add_argument('--epochs', type=int, help="training #epochs", default=1000)
args.add_argument('--seed', type=int, help="seed", default=1)
args.add_argument('--lr', type=float, help="learning rate", default=5e-4)
args.add_argument('--min_lr', type=float, help="min lr", default=1e-4)
args.add_argument('--bs', type=int, help="batch size", default=1)
args.add_argument('--input_dim', type=int, help="input dimension", default=256)
args.add_argument('--out_dim', type=int, help="output dimension", default=256)
args.add_argument('--verbose', type=int, help="eval", default=1)
# model paras.
args.add_argument('--outer_layer', type=int, help="#layers of GraphCAD", default = 2)
args.add_argument('--inner_layer', type=int, help="#layers of node_update", default = 1)
args.add_argument('--is_global', help="whether to add global information", action = "store_false")
args.add_argument('--is_edge', help="whether to use edge update", action = "store_false")
args.add_argument('--pooling', type=str, help="pooing_type", choices=['memory', 'avg', 'min', 'max'], default = "memory")
args.add_argument('--is_lp', help="whether to use link prediction loss", action = "store_false")
args.add_argument("--lp_weight", type = float, help="the weight of link prediction loss", default=0.1)
args = args.parse_args()
return args
def logging_builder(args):
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(os.path.join(os.getcwd(), args.log_name), mode='w')
fileHandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
consoleHandler.setFormatter(formatter)
fileHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
logger.addHandler(fileHandler)
return logger
class WarmupLinearLR(_LRScheduler):
def __init__(self, optimizer, step_size, min_lr, peak_percentage=0.1, last_epoch=-1):
self.step_size = step_size
self.peak_step = peak_percentage * step_size
self.min_lr = min_lr
super(WarmupLinearLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
ret = []
for tmp_min_lr, tmp_base_lr in zip(self.min_lr, self.base_lrs):
if self._step_count <= self.peak_step:
ret.append(tmp_min_lr + (tmp_base_lr - tmp_min_lr) * self._step_count / self.peak_step)
else:
ret.append(tmp_min_lr + max(0, (tmp_base_lr - tmp_min_lr) * (self.step_size - self._step_count) / (self.step_size - self.peak_step)))
# print(self._step_count, self.step_size, ret)
return ret
if __name__ == "__main__":
args = argparse.ArgumentParser()
args = add_arguments(args)
setup_seed(args.seed)
logger = logging_builder(args)
print(args)
os.makedirs(os.path.join(os.getcwd(), args.saved_dir), exist_ok = True)
encoder = GraphCAD(logger, args, args.input_dim, args.out_dim, args.outer_layer, args.inner_layer, is_global = args.is_global, is_edge = args.is_edge, pooling= args.pooling).cuda()
criterion = outlierLoss(args, logger, is_lp = args.is_lp, lp_weight = args.lp_weight).cuda()
with open(args.data_dir, 'rb') as files:
data_collection = pickle.load(files)
data, y, train_mask, train_label_index, train_edge_ids, train_edge_labels, test_mask, test_label_index,test_edge_ids, test_edge_labels = data_collection
# for older version of pyg
data = Data(**data.__dict__)
edges_attrs = torch.ones(data.edge_index.size(0))
data_set = DataLoader([Data(x = data.x.cuda(), edge_index = data.edge_index.cuda().t(), y = y.cuda(), edge_attr = edges_attrs.cuda().unsqueeze(-1))], batch_size=1, shuffle = True)
optimizer = torch.optim.Adam([{'params': encoder.parameters(), 'lr': args.lr}])
optimizer.zero_grad()
logger.info(f"Warm up schedular: {args.epochs}")
scheduler = WarmupLinearLR(optimizer, args.epochs, min_lr=[args.min_lr])
encoder.train()
epoch_num = 0
max_map = -1
max_auc = -1
max_epoch = -1
for epoch_num in range(args.epochs):
batch_loss = []
batch_contras_loss = []
batch_lp_loss = []
batch_edge_score = []
batch_labels = []
batch_index = 0
for batch_data in tqdm(data_set):
batch_index += 1
node_outputs, adj_matrix, adj_weight, labels, batch_item = batch_data.x, batch_data.edge_index, batch_data.edge_attr.squeeze(-1), batch_data.y, batch_data.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
# training index
node_outputs = node_outputs[train_mask][train_label_index]
output_loss = output_loss[train_mask][train_label_index]
edge_prob = edge_prob[train_edge_ids]
edge_labels = train_edge_labels.cuda()
labels = labels[train_mask][train_label_index]
overall_loss, _, contras_loss, lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
# overall_loss.backward()
overall_loss = overall_loss / args.bs
overall_loss.backward()
batch_loss.append(overall_loss.item())
batch_contras_loss.append(contras_loss.item())
batch_lp_loss.append(lp_loss.item())
if (batch_index + 1) % args.bs == 0:
optimizer.step()
# scheduler.step()
optimizer.zero_grad()
avg_batch_loss = np.mean(np.array(batch_loss))
avg_batch_contras_loss = np.mean(np.array(batch_contras_loss))
avg_batch_lp_loss = np.mean(np.array(batch_lp_loss))
logger.info("Epoch:{} Overall loss: {:.6f} Contrastive loss: {:.6f} LP_loss: {:.6f}".format(epoch_num, avg_batch_loss, avg_batch_contras_loss, avg_batch_lp_loss))
if (epoch_num + 1) % args.verbose == 0:
encoder.eval()
test_loss = []
test_contras_loss = []
test_lp_loss = []
test_gt = []
labels_list = []
scores_list = []
with torch.no_grad():
for batch_test in tqdm(data_set):
node_outputs, adj_matrix, adj_weight, labels, batch_item = batch_test.x, batch_test.edge_index, batch_test.edge_attr.squeeze(-1), batch_test.y, batch_test.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
centroid = centroid.squeeze(0)
centroid_loss = centroid_loss.squeeze(0)
# test index
node_outputs = node_outputs[test_mask][test_label_index]
output_loss = output_loss[test_mask][test_label_index]
edge_prob = edge_prob[test_edge_ids]
edge_labels = test_edge_labels.cuda()
labels = labels[test_mask][test_label_index]
test_each_overall_loss, scores, test_each_contras_loss, test_each_lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
scores = scores.detach().cpu().numpy()
scores_list.append(scores)
labels = labels.detach().cpu().numpy()
test_gt.append(labels)
test_loss.append(test_each_overall_loss.item())
test_contras_loss.append(test_each_contras_loss.item())
test_lp_loss.append(test_each_lp_loss.item())
avg_test_loss = np.mean(np.array(test_loss))
avg_test_contras_loss = np.mean(np.array(test_contras_loss))
avg_test_lp_loss = np.mean(np.array(test_lp_loss))
auc, maps = MAPs(test_gt, scores_list)
logger.info("Epoch: {} Auc: {:.6f} Maps: {:.6f} Max-Auc: {:.6f} Max-Maps: {:.6f}".format(epoch_num, auc, maps, max_auc, max_map))
if maps > max_map or auc > max_auc:
max_epoch = epoch_num
max_map = maps if maps > max_map else max_map
max_auc = auc if auc > max_auc else max_auc
# state = {'encoder': encoder.state_dict()}
# torch.save(state, saved_file + "model_" + str(epoch_num))
logger.info("***************** Epoch: {} Max Auc: {:.6f} Maps: {:.6f} *******************".format(epoch_num, max_auc, max_map))
encoder.train()
optimizer.zero_grad()
logger.info("***************** Max_Epoch: {} Max Auc: {:.6f} Maps: {:.6f}*******************".format(max_epoch, max_auc, max_map)) | 10,052 | 46.419811 | 213 | py |
GraphCAD | GraphCAD-main/Alpha/utils.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import multiprocessing
from sklearn.metrics import roc_auc_score, auc, roc_curve
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from operator import itemgetter
from scipy import sparse
import random
def MAPs(label_lists, score_lists):
assert len(label_lists) == len(score_lists)
maps = []
mean_auc = []
total_count = 0
# print(np.array(score_lists).shape)
total_nan = 0
for sub_labels, sub_scores in zip(label_lists, score_lists):
assert len(sub_labels) == len(sub_scores)
combine = [each for each in zip(sub_scores, sub_labels)]
sorted_combine = sorted(combine, key=itemgetter(0))
# print(sorted_combine)
rights = 0
ps = []
tmp_scores = []
tmp_labels = []
for index in range(len(sorted_combine)):
ins_scores, ins_labels = sorted_combine[index]
tmp_scores.append(ins_scores)
tmp_labels.append(ins_labels)
if(ins_labels == 0):
rights += 1
ps.append(rights/(index+1))
tmp_scores = np.array(tmp_scores)
nan_num = len(tmp_scores[np.isnan(tmp_scores)])
total_nan += nan_num
tmp_scores = np.nan_to_num(tmp_scores)
tmp_labels = np.array(tmp_labels)
auc = roc_auc_score(1-tmp_labels, -1 * tmp_scores)
ap = np.mean(np.array(ps))
maps.append((ap, len(sub_labels)))
mean_auc.append(auc)
total_count += len(sub_labels)
assert len(maps) == len(mean_auc) == len(label_lists)
maps_scores = 0
maps_weight = 0
for each in maps:
ap, count = each
each_w = total_count / count
maps_scores += ap * each_w
maps_weight += each_w
norm_maps = maps_scores/maps_weight
mean_auc = np.mean(np.array(mean_auc))
return mean_auc, norm_maps
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
| 2,237 | 27.329114 | 96 | py |
GraphCAD | GraphCAD-main/Alpha/models.py | from random import sample
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity
import pickle
from torch_geometric.nn import GINConv_w as GINConv
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from torch_scatter import scatter_add
import math
import numpy as np
import pyro
class outlierLoss(nn.Module):
def __init__(self, args, logger, is_lp = True, lp_weight = 0.1):
super(outlierLoss, self).__init__()
self.cross_entropy = nn.CrossEntropyLoss().cuda()
self.dim = args.out_dim
self.temp = 0.1
self.margin_1 = 0.5
self.lamb = 0.5
self.thres = torch.tensor(0.0).cuda()
self.lp_weight = lp_weight
self.is_lp = is_lp
logger.info("is_lp: %s, lp_weight: %f"%(self.is_lp, self.lp_weight))
def cal_lp_loss(self, edge_prob, edge_labels):
# feat_all_trans: [(b x N) x C]
non_zero = torch.nonzero(edge_labels)
zero = torch.nonzero(edge_labels == 0)
if non_zero.size(0) != 0 and zero.size(0) != 0:
pos_prob = edge_prob[non_zero][:8192]
neg_prob = edge_prob[zero]
logits = torch.cat((pos_prob, neg_prob.view(1, neg_prob.size(0)).repeat(pos_prob.size(), 1)), dim = 1)
logits_labels = torch.zeros([pos_prob.size(0)]).cuda().long()
lp_loss = self.cross_entropy(logits/self.temp, logits_labels)
else:
lp_loss = torch.tensor(0.0).cuda()
return lp_loss
def forward(self, feat_all, q, edge_prob, edge_labels, adj_mat, batch_item, labels, raw_feat_all, raw_centroid):
# q2all: [N, 1]
q = F.normalize(q.view(self.dim, 1), p=2, dim=0)
feat_all = F.normalize(feat_all, p=2, dim=1)
q2all = torch.mm(feat_all, q.view(self.dim, 1)).squeeze(-1)
pos_len = torch.sum(labels, dim = 0)
neg_len = q2all.size(0) - pos_len
# pos [P]; neg [Neg]
q2all_pos, q2all_neg = torch.split(q2all, [pos_len, neg_len], dim = 0)
q2all_each_logits = torch.cat([q2all_pos.unsqueeze(-1), q2all_neg.view(1, neg_len).repeat(pos_len, 1)], dim = -1)
# print(q2all_each_logits.size())
q2all_each_logits = q2all_each_logits.view(pos_len, neg_len + 1)
# pos: [b x P, 1]
# neg: [b x p, Neg + (b - 1) * N]ß
logits_labels = torch.zeros([pos_len]).cuda().long()
contras_loss = self.cross_entropy(q2all_each_logits/self.temp, logits_labels)
if self.is_lp:
lp_loss = self.cal_lp_loss(edge_prob, edge_labels)
else:
lp_loss = torch.tensor(0.0).cuda()
outlier_loss = contras_loss + self.lp_weight * lp_loss
scores = torch.mm(feat_all, q.view(self.dim, 1)).squeeze(-1)
return outlier_loss, scores, contras_loss, lp_loss
class GraphCAD(nn.Module):
def __init__(self, logger, args,in_dim, out_dim, total_layer_num, ins_layer_num, is_norm = True, is_edge = True, is_node = True, is_system = True, is_global = True, pooling = "memory"):
super(GraphCAD, self).__init__()
self.total_layer_num = total_layer_num
self.is_edge = is_edge
self.is_node = is_node
self.is_system = is_system
self.in_dim = in_dim
# edge_model
# self.edgemodel = None
if is_edge:
logger.info("EdgeUpdate")
self.edgemodel = EdgeUpdate(is_global, out_dim, 1)
# conv_model
if is_node:
logger.info("NodeUpdate")
self.node_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.node_updates.append(NodeUpdate(out_dim, out_dim, is_norm, ins_layer_num))
# sys_model
if is_system:
logger.info("SystemUpdate")
self.sys_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.sys_updates.append(SystemUpdate(out_dim, out_dim, pooling))
self.mlp_head = nn.Sequential(
nn.Linear(out_dim, out_dim),
nn.ReLU(),
nn.Linear(out_dim, out_dim)
)
self.drop_layer = nn.Dropout(0.5)
self.relu = nn.ReLU()
logger.info("is_edge: %r, is_global: %r pooling: %r"%(is_edge, is_global, pooling))
def forward(self, x, edge_index, edge_weight, batch_item, bs):
init_lens = edge_index
# x_pos, x_neg = torch.split(x.view(b, N, C), [P, Neg], dim = 1)
centroid = torch.mean(x.view(bs, -1, self.in_dim), dim = 1)
edge_prob = edge_index
x_trans_loss = x
for index in range(self.total_layer_num):
# edge update
if self.is_edge:
edge_index, edge_weight, edge_prob, x_trans_loss = self.edgemodel(x, edge_index, edge_weight, centroid, batch_item, bs)
# node update
if self.is_node:
x, saved_x = self.node_updates[index](x, edge_index, edge_weight)
# system update
if self.is_system:
centroid = self.sys_updates[index](saved_x, centroid, bs)
x_loss = self.mlp_head(x)
centroid_loss = self.mlp_head(centroid)
final_len = edge_index
return x, edge_weight, centroid, x_loss, centroid_loss, edge_prob
class edgePredictor(nn.Module):
def __init__(self, dim, is_global):
super(edgePredictor, self).__init__()
self.is_global = is_global
self.dim = dim
if is_global:
self.l2r = nn.Sequential(
nn.Linear(3 * dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
else:
self.l2r = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
def forward(self, node_features, edge_index, centroid, bs):
node_features = node_features.view(-1, self.dim)
node_j = node_features[edge_index[0]]
node_i = node_features[edge_index[1]]
if self.is_global:
residual_node_features = (node_features.view(bs, -1, self.dim) - centroid.view(bs, 1, self.dim)).view(-1, self.dim)
residual_node_j = residual_node_features[edge_index[0]]
residual_node_i = residual_node_features[edge_index[1]]
sim_vec = torch.cat((torch.abs(node_i - node_j), residual_node_i, residual_node_j), dim = 1)
else:
sim_vec = torch.abs(node_i - node_j)
prob_score = self.l2r(sim_vec)
return prob_score
class EdgeUpdate(nn.Module):
def __init__(self, is_global, feature_dim, edge_dim, load_dir = None):
super(EdgeUpdate, self).__init__()
self.feature_dim = feature_dim
self.edge_dim = edge_dim
self.temp = 0.6
self.thres_1 = torch.nn.Threshold(0.5, 0)
self.thres_2 = torch.nn.Threshold(-0.49, 1)
self.mins = torch.tensor(1e-10).cuda()
self.relu_fuc = nn.ReLU()
self.edge_skip_alpha = nn.Parameter(torch.rand(1))
self.ep_net = edgePredictor(feature_dim, is_global)
def forward(self, x, edge_index, edge_weight, centroid, batch_item, bs):
pre_prob = self.ep_net(x, edge_index, centroid, bs).squeeze(-1)
pre_adj = torch.sigmoid(pre_prob)
sampled_edge = torch.ones([pre_adj.size(0)]).cuda()
sampled_edge = pyro.distributions.RelaxedBernoulliStraightThrough(temperature=self.temp, probs = pre_adj).rsample()
combine_weight = self.edge_skip_alpha * (sampled_edge * edge_weight) + (1-self.edge_skip_alpha) * (sampled_edge * pre_adj)
return edge_index, combine_weight, pre_adj, x
class NodeUpdate(torch.nn.Module):
def __init__(self, in_channel, out_channel, is_norm, layer_num):
super(NodeUpdate, self).__init__()
self.bn1 = torch.nn.BatchNorm1d(in_channel)
self.conv1 = GINConv(
nn.Sequential(nn.Linear(out_channel, out_channel), nn.ReLU()))
self.drop_layer = nn.Dropout(0.5)
def forward(self, x, edge_index, edge_weight = None):
his_x = []
x = F.relu(self.conv1(x, edge_index, edge_weight))
x = self.bn1(x)
x = self.drop_layer(x)
his_x.append(x)
return x, his_x
class SystemUpdate(nn.Module):
def __init__(self, in_channel, out_channel, pooling):
super(SystemUpdate, self).__init__()
self.in_dim = in_channel
self.out_dim = out_channel
self.pooling = pooling
def forward(self, his_x, init_c, bs):
mem_lens = len(his_x)
if self.pooling == "memory":
for index in range(mem_lens):
tmp_mem = his_x[index].view(bs, -1, self.in_dim)
tmp_score = torch.bmm(tmp_mem, init_c.view(bs, self.in_dim, 1)).view(bs, -1)
tmp_att = F.softmax(tmp_score, dim = 1)
tmp_read = torch.sum(tmp_att.view(bs, -1, 1) * tmp_mem, dim = 1)
init_c = tmp_read
elif self.pooling == "avg":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.mean(x, dim = 1)
elif self.pooling == "sum":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.sum(x, dim = 1)
elif self.pooling == "max":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.max(x, dim = 1).values
else:
raise ValueError("No such pooling type!")
return init_c
| 9,763 | 37.290196 | 189 | py |
CoordFill | CoordFill-master/test.py | import argparse
import os
import math
from functools import partial
import yaml
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import datasets
import models
import utils
from PIL import Image
from torchvision import transforms
from torchsummary import summary
import numpy as np
def batched_predict(model, inp, coord, bsize):
with torch.no_grad():
model.gen_feat(inp)
n = coord.shape[1]
ql = 0
preds = []
while ql < n:
qr = min(ql + bsize, n)
pred = model.query_rgb(coord[:, ql: qr, :])
preds.append(pred)
ql = qr
pred = torch.cat(preds, dim=1)
return pred, preds
def tensor2PIL(tensor):
# img = tensor.cpu().clone()
# img = img.squeeze(0)
# img = unloader(img)
toPIL = transforms.ToPILImage()
return toPIL(tensor)
def eval_psnr(loader, model, data_norm=None, eval_type=None, eval_bsize=None,
verbose=False):
model.eval()
if data_norm is None:
data_norm = {
'inp': {'sub': [0], 'div': [1]},
'gt': {'sub': [0], 'div': [1]}
}
t = data_norm['inp']
inp_sub = torch.FloatTensor(t['sub']).view(1, -1, 1, 1).cuda()
inp_div = torch.FloatTensor(t['div']).view(1, -1, 1, 1).cuda()
t = data_norm['gt_rgb']
gt_rgb_sub = torch.FloatTensor(t['sub']).view(1, 1, -1).cuda()
gt_rgb_div = torch.FloatTensor(t['div']).view(1, 1, -1).cuda()
if eval_type is None:
metric_fn = utils.calc_psnr
elif eval_type.startswith('div2k'):
scale = int(eval_type.split('-')[1])
metric_fn = partial(utils.calc_psnr, dataset='div2k', scale=scale)
elif eval_type.startswith('benchmark'):
scale = int(eval_type.split('-')[1])
metric_fn = partial(utils.calc_psnr, dataset='benchmark', scale=scale)
else:
raise NotImplementedError
# val_res = utils.Averager()
val_psnr = utils.Averager()
val_ssim = utils.Averager()
val_l1 = utils.Averager()
pbar = tqdm(loader, leave=False, desc='val')
for batch in pbar:
for k, v in batch.items():
batch[k] = v.cuda()
inp = (batch['inp'] - inp_sub) / inp_div
gt = (batch['gt_rgb'] - gt_rgb_sub) / gt_rgb_div
if eval_bsize is None:
with torch.no_grad():
# pred = model.encoder.mask_predict([inp, batch['mask']])
pred = model.encoder([inp, batch['mask']])
else:
pred = batched_predict(model, inp, batch['coord'], eval_bsize)
pred = (pred * (1 - batch['mask']) + gt * batch['mask']) * gt_rgb_div + gt_rgb_sub
pred.clamp_(0, 1)
if eval_type is not None: # reshape for shaving-eval
ih, iw = batch['inp'].shape[-2:]
s = math.sqrt(batch['coord'].shape[1] / (ih * iw))
shape = [batch['inp'].shape[0], round(ih * s), round(iw * s), 3]
pred = pred.view(*shape) \
.permute(0, 3, 1, 2).contiguous()
batch['gt'] = batch['gt'].view(*shape) \
.permute(0, 3, 1, 2).contiguous()
psnr, ssim, l1 = metric_fn(model, pred, batch['gt_rgb'])
val_psnr.add(psnr.item(), inp.shape[0])
val_ssim.add(ssim.item(), inp.shape[0])
val_l1.add(l1.item(), inp.shape[0])
if verbose:
pbar.set_description('val psnr{:.4f}'.format(val_psnr.item()))
pbar.set_description('val ssim{:.4f}'.format(val_ssim.item()))
pbar.set_description('val lpips{:.4f}'.format(val_l1.item()))
return val_psnr.item(), val_ssim.item(), val_l1.item()
from collections import OrderedDict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config')
parser.add_argument('--model')
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
with open(args.config, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
spec = config['test_dataset']
dataset = datasets.make(spec['dataset'])
dataset = datasets.make(spec['wrapper'], args={'dataset': dataset})
loader = DataLoader(dataset, batch_size=spec['batch_size'],
num_workers=8, pin_memory=True)
model = models.make(config['model']).cuda()
model.encoder.load_state_dict(torch.load(args.model, map_location='cuda:0'))
res = eval_psnr(loader, model,
data_norm=config.get('data_norm'),
eval_type=config.get('eval_type'),
eval_bsize=config.get('eval_bsize'),
verbose=True)
print('result psnr: {:.6f}'.format(res[0]))
print('result ssim: {:.6f}'.format(res[1]))
print('result lpips: {:.6f}'.format(res[2]))
| 4,752 | 31.333333 | 90 | py |
CoordFill | CoordFill-master/utils.py | import os
import time
import shutil
import math
import torch
import numpy as np
from torch.optim import SGD, Adam
from tensorboardX import SummaryWriter
from skimage.measure import compare_ssim
from skimage.measure import compare_psnr
class Averager():
def __init__(self):
self.n = 0.0
self.v = 0.0
def add(self, v, n=1.0):
self.v = (self.v * self.n + v * n) / (self.n + n)
self.n = self.n + n
def item(self):
return self.v
class Timer():
def __init__(self):
self.v = time.time()
def s(self):
self.v = time.time()
def t(self):
return time.time() - self.v
def time_text(t):
if t >= 3600:
return '{:.1f}h'.format(t / 3600)
elif t >= 60:
return '{:.1f}m'.format(t / 60)
else:
return '{:.1f}s'.format(t)
_log_path = None
def set_log_path(path):
global _log_path
_log_path = path
def log(obj, filename='log.txt'):
print(obj)
if _log_path is not None:
with open(os.path.join(_log_path, filename), 'a') as f:
print(obj, file=f)
def ensure_path(path, remove=True):
basename = os.path.basename(path.rstrip('/'))
if os.path.exists(path):
if remove:
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
def set_save_path(save_path, remove=True):
ensure_path(save_path, remove=remove)
set_log_path(save_path)
writer = SummaryWriter(os.path.join(save_path, 'tensorboard'))
return log, writer
def compute_num_params(model, text=False):
tot = int(sum([np.prod(p.shape) for p in model.parameters()]))
if text:
if tot >= 1e6:
return '{:.1f}M'.format(tot / 1e6)
else:
return '{:.1f}K'.format(tot / 1e3)
else:
return tot
def make_optimizer(param_list, optimizer_spec, load_sd=False):
Optimizer = {
'sgd': SGD,
'adam': Adam
}[optimizer_spec['name']]
optimizer = Optimizer(param_list, **optimizer_spec['args'])
if load_sd:
optimizer.load_state_dict(optimizer_spec['sd'])
return optimizer
def make_coord(shape, ranges=None, flatten=True):
""" Make coordinates at grid centers.
"""
coord_seqs = []
for i, n in enumerate(shape):
if ranges is None:
v0, v1 = -1, 1
else:
v0, v1 = ranges[i]
r = (v1 - v0) / (2 * n)
seq = v0 + r + (2 * r) * torch.arange(n).float()
coord_seqs.append(seq)
ret = torch.stack(torch.meshgrid(*coord_seqs), dim=-1)
# if flatten:
# ret = ret.view(-1, ret.shape[-1])
return ret
def to_pixel_samples(img):
""" Convert the image to coord-RGB pairs.
img: Tensor, (3, H, W)
"""
coord = make_coord(img.shape[-2:])
rgb = img.view(3, -1).permute(1, 0)
return coord, rgb
def calc_psnr(model, sr, hr, dataset=None, scale=1, rgb_range=1):
pred_batch = (sr.detach().permute(0, 2, 3, 1).cpu().numpy() * 255).astype(np.uint8)
gt_batch = (hr.detach().permute(0, 2, 3, 1).cpu().numpy() * 255).astype(np.uint8)
psnr = 0
ssim = 0
l1 = 0
lpips = 0
batch_size = sr.size(0)
for i in range(batch_size):
gt, pred = gt_batch[i], pred_batch[i]
psnr += compare_psnr(pred, gt, data_range=255)
ssim += compare_ssim(pred, gt, data_range=255, multichannel=True, win_size=11)
l1 += np.mean(np.abs((np.mean(pred, 2) - np.mean(gt, 2))/255)) * 100
lpips += model.model_LPIPS.forward(im2tensor(pred), im2tensor(gt))
return psnr/batch_size, ssim/batch_size, lpips/batch_size
def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
return torch.Tensor((image / factor - cent)
[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
| 3,801 | 24.346667 | 87 | py |
CoordFill | CoordFill-master/train_parallel.py | import argparse
import os
import yaml
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.optim.lr_scheduler import MultiStepLR, LambdaLR
from torchvision import transforms
import random
import datasets
import models
import utils
from test import eval_psnr, batched_predict
import numpy as np
from collections import OrderedDict
from PIL import Image
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def make_data_loader(spec, tag=''):
if spec is None:
return None
dataset = datasets.make(spec['dataset'])
dataset = datasets.make(spec['wrapper'], args={'dataset': dataset})
log('{} dataset: size={}'.format(tag, len(dataset)))
for k, v in dataset[0].items():
log(' {}: shape={}'.format(k, tuple(v.shape)))
# loader = DataLoader(dataset, batch_size=spec['batch_size'],
# shuffle=(tag == 'train'), num_workers=8, pin_memory=True)
sampler = DistributedSampler(dataset, shuffle=(tag == 'train'))
loader = DataLoader(dataset, batch_size=spec['batch_size'], sampler=sampler, num_workers=8, pin_memory=True)
return loader
def make_data_loaders():
train_loader = make_data_loader(config.get('train_dataset'), tag='train')
val_loader = make_data_loader(config.get('val_dataset'), tag='val')
return train_loader, val_loader
def prepare_training():
if config.get('resume') is not None:
sv_file = torch.load(config['resume'])
model = models.make(sv_file['model'], load_sd=True).cuda()
optimizer = utils.make_optimizer(
model.parameters(), sv_file['optimizer'], load_sd=True)
epoch_start = sv_file['epoch'] + 1
else:
model = models.make(config['model']).cuda()
optimizer = utils.make_optimizer(
model.parameters(), config['optimizer'])
epoch_start = 1
max_epoch = config.get('epoch_max')
# lr_scheduler = LambdaLR(optimizer, lr_lambda= lambda epoch: (1-(epoch/max_epoch))**0.9)
lr_scheduler = None
# log('model: #params={}'.format(utils.compute_num_params(model.encoder, text=True)))
log('model: #params={}'.format(utils.compute_num_params(model, text=True)))
return model, optimizer, epoch_start, lr_scheduler
def train(train_loader, model, optimizer):
model.train()
train_loss_G = utils.Averager()
train_loss_D = utils.Averager()
data_norm = config['data_norm']
t = data_norm['inp']
inp_sub = torch.FloatTensor(t['sub']).view(1, -1, 1, 1).cuda()
inp_div = torch.FloatTensor(t['div']).view(1, -1, 1, 1).cuda()
t = data_norm['gt_rgb']
gt_rgb_sub = torch.FloatTensor(t['sub']).view(1, -1, 1, 1).cuda()
gt_rgb_div = torch.FloatTensor(t['div']).view(1, -1, 1, 1).cuda()
for batch in tqdm(train_loader, leave=False, desc='train'):
for k, v in batch.items():
batch[k] = v.cuda()
inp = (batch['inp'] - inp_sub) / inp_div
gt_rgb = (batch['gt_rgb'] - gt_rgb_sub) / gt_rgb_div
model.set_input(inp, gt_rgb, batch['mask'])
model.optimize_parameters()
train_loss_G.add(model.loss_G.item())
# if model.discriminator != None:
train_loss_D.add(model.loss_D.item())
return train_loss_G.item(), train_loss_D.item()
def main(config_, save_path):
global config, log, writer
config = config_
log, writer = utils.set_save_path(save_path, remove=False)
with open(os.path.join(save_path, 'config.yaml'), 'w') as f:
yaml.dump(config, f, sort_keys=False)
train_loader, val_loader = make_data_loaders()
if config.get('data_norm') is None:
config['data_norm'] = {
'inp': {'sub': [0], 'div': [1]},
'gt': {'sub': [0], 'div': [1]}
}
model, optimizer, epoch_start, lr_scheduler = prepare_training()
n_gpus = 8
if n_gpus > 1:
# model = nn.parallel.DataParallel(model)
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # device_ids will include all GPU devices by default
epoch_max = config['epoch_max']
epoch_val = config.get('epoch_val')
epoch_save = config.get('epoch_save')
max_val_v = -1e18
timer = utils.Timer()
for epoch in range(epoch_start, epoch_max + 1):
t_epoch_start = timer.t()
log_info = ['epoch {}/{}'.format(epoch, epoch_max)]
writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
# train_loss_G, train_loss_D = train(train_loader, model, optimizer)
if n_gpus > 1:
train_loss_G, train_loss_D = train(train_loader, model.module, optimizer)
else:
train_loss_G, train_loss_D = train(train_loader, model, optimizer)
if lr_scheduler is not None:
lr_scheduler.step()
log_info.append('train G: loss={:.4f}'.format(train_loss_G))
writer.add_scalars('loss', {'train G': train_loss_G}, epoch)
log_info.append('train D: loss={:.4f}'.format(train_loss_D))
writer.add_scalars('loss', {'train D': train_loss_D}, epoch)
if n_gpus > 1:
model_ = model.module
else:
model_ = model
model_spec = config['model']
model_spec['sd'] = model_.state_dict()
optimizer_spec = config['optimizer']
optimizer_spec['sd'] = optimizer.state_dict()
torch.save(model_.encoder.state_dict(), os.path.join(save_path, 'encoder-epoch-last.pth'))
if (epoch_val is not None) and (epoch % epoch_val == 0):
# if n_gpus > 1 and (config.get('eval_bsize') is not None):
if n_gpus > 1:
model_ = model.module
else:
model_ = model
val_psnr, val_ssim, val_lpips = eval_psnr(val_loader, model_,
data_norm=config['data_norm'],
eval_type=config.get('eval_type'),
eval_bsize=config.get('eval_bsize'))
log_info.append('val: psnr={:.4f}'.format(val_psnr))
writer.add_scalars('psnr', {'val': val_psnr}, epoch)
log_info.append('val: ssim={:.4f}'.format(val_ssim))
writer.add_scalars('ssim', {'val': val_ssim}, epoch)
log_info.append('val: lpips={:.4f}'.format(val_lpips))
writer.add_scalars('lpips', {'val': val_lpips}, epoch)
if val_psnr > max_val_v:
max_val_v = val_psnr
torch.save(model_.encoder.state_dict(), os.path.join(save_path, 'encoder-epoch-best.pth'))
t = timer.t()
prog = (epoch - epoch_start + 1) / (epoch_max - epoch_start + 1)
t_epoch = utils.time_text(t - t_epoch_start)
t_elapsed, t_all = utils.time_text(t), utils.time_text(t / prog)
log_info.append('{} {}/{}'.format(t_epoch, t_elapsed, t_all))
log(', '.join(log_info))
writer.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config')
parser.add_argument('--name', default=None)
parser.add_argument('--tag', default=None)
parser.add_argument('--local_rank', default=0, type=int,
help='node rank for distributed training')
args = parser.parse_args()
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl")
with open(args.config, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
print('config loaded.')
save_name = args.name
if save_name is None:
save_name = '_' + args.config.split('/')[-1][:-len('.yaml')]
if args.tag is not None:
save_name = save_name + '_' + args.tag
save_path = os.path.join('./save', save_name)
main(config, save_path)
| 7,851 | 35.52093 | 202 | py |
CoordFill | CoordFill-master/demo.py | import argparse
import os
from PIL import Image
import torch
from torchvision import transforms
import models
def resize_fn(img, size):
return transforms.ToTensor()(
transforms.Resize(size)(
transforms.ToPILImage()(img)))
def to_mask(mask):
return transforms.ToTensor()(
transforms.Grayscale(num_output_channels=1)(
transforms.ToPILImage()(mask)))
import yaml
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input')
parser.add_argument('--mask')
parser.add_argument('--config')
parser.add_argument('--model')
parser.add_argument('--resolution')
parser.add_argument('--output', default='output.png')
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
with open(args.config, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
img = transforms.ToTensor()(Image.open(args.input).convert('RGB'))
model = models.make(config['model']).cuda()
model.encoder.load_state_dict(torch.load(args.model, map_location='cuda:0'))
h, w = list(map(int, args.resolution.split(',')))
mask = transforms.ToTensor()(Image.open(args.mask).convert('RGB'))
img = resize_fn(img, (h, w))
img = (img - 0.5) / 0.5
mask = resize_fn(mask, (h, w))
mask = to_mask(mask)
mask[mask > 0] = 1
mask = 1 - mask
with torch.no_grad():
pred = model.encoder.mask_predict([img.unsqueeze(0).cuda(), mask.unsqueeze(0).cuda()])
pred = (pred * 0.5 + 0.5).clamp(0, 1).view(3, h, w).cpu()
transforms.ToPILImage()(pred).save(args.output) | 1,668 | 28.280702 | 94 | py |
CoordFill | CoordFill-master/train.py | import argparse
import os
import yaml
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.data import DataLoader
import datasets
import models
import utils
from test import eval_psnr, batched_predict
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def make_data_loader(spec, tag=''):
if spec is None:
return None
dataset = datasets.make(spec['dataset'])
dataset = datasets.make(spec['wrapper'], args={'dataset': dataset})
log('{} dataset: size={}'.format(tag, len(dataset)))
for k, v in dataset[0].items():
log(' {}: shape={}'.format(k, tuple(v.shape)))
loader = DataLoader(dataset, batch_size=spec['batch_size'],
shuffle=(tag == 'train'), num_workers=8, pin_memory=True)
return loader
def make_data_loaders():
train_loader = make_data_loader(config.get('train_dataset'), tag='train')
val_loader = make_data_loader(config.get('val_dataset'), tag='val')
return train_loader, val_loader
def prepare_training():
if config.get('resume') is not None:
sv_file = torch.load(config['resume'])
model = models.make(sv_file['model'], load_sd=True).cuda()
optimizer = utils.make_optimizer(
model.parameters(), sv_file['optimizer'], load_sd=True)
epoch_start = sv_file['epoch'] + 1
else:
model = models.make(config['model']).cuda()
optimizer = utils.make_optimizer(
model.parameters(), config['optimizer'])
epoch_start = 1
max_epoch = config.get('epoch_max')
lr_scheduler = None
log('model: #params={}'.format(utils.compute_num_params(model, text=True)))
return model, optimizer, epoch_start, lr_scheduler
def train(train_loader, model, optimizer):
model.train()
train_loss_G = utils.Averager()
train_loss_D = utils.Averager()
data_norm = config['data_norm']
t = data_norm['inp']
inp_sub = torch.FloatTensor(t['sub']).view(1, -1, 1, 1).cuda()
inp_div = torch.FloatTensor(t['div']).view(1, -1, 1, 1).cuda()
t = data_norm['gt_rgb']
gt_rgb_sub = torch.FloatTensor(t['sub']).view(1, -1, 1, 1).cuda()
gt_rgb_div = torch.FloatTensor(t['div']).view(1, -1, 1, 1).cuda()
for batch in tqdm(train_loader, leave=False, desc='train'):
for k, v in batch.items():
batch[k] = v.cuda()
inp = (batch['inp'] - inp_sub) / inp_div
gt_rgb = (batch['gt_rgb'] - gt_rgb_sub) / gt_rgb_div
model.set_input(inp, gt_rgb, batch['mask'])
model.optimize_parameters()
train_loss_G.add(model.loss_G.item())
train_loss_D.add(model.loss_D.item())
return train_loss_G.item(), train_loss_D.item()
def main(config_, save_path):
global config, log, writer
config = config_
log, writer = utils.set_save_path(save_path)
with open(os.path.join(save_path, 'config.yaml'), 'w') as f:
yaml.dump(config, f, sort_keys=False)
train_loader, val_loader = make_data_loaders()
if config.get('data_norm') is None:
config['data_norm'] = {
'inp': {'sub': [0], 'div': [1]},
'gt': {'sub': [0], 'div': [1]}
}
model, optimizer, epoch_start, lr_scheduler = prepare_training()
model.optimizer_G = optimizer
model.optimizer_D = optimizer
epoch_max = config['epoch_max']
epoch_val = config.get('epoch_val')
epoch_save = config.get('epoch_save')
max_val_v = -1e18
timer = utils.Timer()
for epoch in range(epoch_start, epoch_max + 1):
t_epoch_start = timer.t()
log_info = ['epoch {}/{}'.format(epoch, epoch_max)]
writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
train_loss_G, train_loss_D = train(train_loader, model, optimizer)
if lr_scheduler is not None:
lr_scheduler.step()
log_info.append('train G: loss={:.4f}'.format(train_loss_G))
writer.add_scalars('loss', {'train G': train_loss_G}, epoch)
log_info.append('train D: loss={:.4f}'.format(train_loss_D))
writer.add_scalars('loss', {'train D': train_loss_D}, epoch)
model_ = model
model_spec = config['model']
model_spec['sd'] = model_.state_dict()
optimizer_spec = config['optimizer']
optimizer_spec['sd'] = optimizer.state_dict()
torch.save(model.encoder.state_dict(), os.path.join(save_path, 'encoder-epoch-last.pth'))
if (epoch_val is not None) and (epoch % epoch_val == 0):
val_psnr, val_ssim, val_lpips = eval_psnr(val_loader, model_,
data_norm=config['data_norm'],
eval_type=config.get('eval_type'),
eval_bsize=config.get('eval_bsize'))
log_info.append('val: psnr={:.4f}'.format(val_psnr))
writer.add_scalars('psnr', {'val': val_psnr}, epoch)
log_info.append('val: ssim={:.4f}'.format(val_ssim))
writer.add_scalars('ssim', {'val': val_ssim}, epoch)
log_info.append('val: lpips={:.4f}'.format(val_lpips))
writer.add_scalars('lpips', {'val': val_lpips}, epoch)
if val_psnr > max_val_v:
max_val_v = val_psnr
torch.save(model.encoder.state_dict(), os.path.join(save_path, 'encoder-epoch-best.pth'))
t = timer.t()
prog = (epoch - epoch_start + 1) / (epoch_max - epoch_start + 1)
t_epoch = utils.time_text(t - t_epoch_start)
t_elapsed, t_all = utils.time_text(t), utils.time_text(t / prog)
log_info.append('{} {}/{}'.format(t_epoch, t_elapsed, t_all))
log(', '.join(log_info))
writer.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config')
parser.add_argument('--name', default=None)
parser.add_argument('--tag', default=None)
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
with open(args.config, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
print('config loaded.')
save_name = args.name
if save_name is None:
save_name = '_' + args.config.split('/')[-1][:-len('.yaml')]
if args.tag is not None:
save_name = save_name + '_' + args.tag
save_path = os.path.join('./save', save_name)
main(config, save_path)
| 6,360 | 33.570652 | 105 | py |
CoordFill | CoordFill-master/models/replicate.py | # -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate | 3,218 | 35.579545 | 115 | py |
CoordFill | CoordFill-master/models/comm.py | # -*- coding: utf-8 -*-
# File : comm.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import queue
import collections
import threading
__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
class FutureResult(object):
"""A thread-safe future implementation. Used only as one-to-one pipe."""
def __init__(self):
self._result = None
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
def put(self, result):
with self._lock:
assert self._result is None, 'Previous result has\'t been fetched.'
self._result = result
self._cond.notify()
def get(self):
with self._lock:
if self._result is None:
self._cond.wait()
res = self._result
self._result = None
return res
_MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result'])
class SlavePipe(_SlavePipeBase):
"""Pipe for master-slave communication."""
def run_slave(self, msg):
self.queue.put((self.identifier, msg))
ret = self.result.get()
self.queue.put(True)
return ret
class SyncMaster(object):
"""An abstract `SyncMaster` object.
- During the replication, as the data parallel will trigger an callback of each module, all slave devices should
call `register(id)` and obtain an `SlavePipe` to communicate with the master.
- During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
and passed to a registered callback.
- After receiving the messages, the master device should gather the information and determine to message passed
back to each slave devices.
"""
def __init__(self, master_callback):
"""
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
"""
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False
def __getstate__(self):
return {'master_callback': self._master_callback}
def __setstate__(self, state):
self.__init__(state['master_callback'])
def register_slave(self, identifier):
"""
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
"""
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future)
def run_master(self, master_msg):
"""
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including the master device).
Args:
master_msg: the message that the master want to send to itself. This will be placed as the first
message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
Returns: the message to be sent back to the master device.
"""
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert results[0][0] == 0, 'The first result should belongs to the master.'
for i, res in results:
if i == 0:
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert self._queue.get() is True
return results[0][1]
@property
def nr_slaves(self):
return len(self._registry) | 4,439 | 33.418605 | 117 | py |
CoordFill | CoordFill-master/models/modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .networks import BaseNetwork
from .networks import get_nonspade_norm_layer
from .networks import MySeparableBilinearDownsample as BilinearDownsample
import torch.nn.utils.spectral_norm as spectral_norm
import torch as th
from math import pi
from math import log2
import time
import math
class CoordFillGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.set_defaults(norm_G='instanceaffine')
parser.set_defaults(lr_instance=True)
parser.set_defaults(no_instance_dist=True)
parser.set_defaults(hr_coor="cosine")
return parser
def __init__(self, opt, hr_stream=None, lr_stream=None, fast=False):
super(CoordFillGenerator, self).__init__()
if lr_stream is None or hr_stream is None:
lr_stream = dict()
hr_stream = dict()
self.num_inputs = opt.label_nc + (1 if opt.contain_dontcare_label else 0) + (0 if (opt.no_instance_edge & opt.no_instance_dist) else 1)
self.lr_instance = opt.lr_instance
self.learned_ds_factor = opt.learned_ds_factor #(S2 in sec. 3.2)
self.gpu_ids = opt.gpu_ids
self.downsampling = opt.crop_size // opt.ds_scale
self.highres_stream = PixelQueryNet(self.downsampling, num_inputs=self.num_inputs,
num_outputs=opt.output_nc, width=opt.hr_width,
depth=opt.hr_depth,
no_one_hot=opt.no_one_hot, lr_instance=opt.lr_instance,
**hr_stream)
num_params = self.highres_stream.num_params
self.lowres_stream = ParaGenNet(num_params, scale_injection=opt.scale_injection)
def use_gpu(self):
return len(self.gpu_ids) > 0
def get_lowres(self, im):
"""Creates a lowres version of the input."""
device = self.use_gpu()
if(self.learned_ds_factor != self.downsampling):
myds = BilinearDownsample(int(self.downsampling//self.learned_ds_factor), self.num_inputs,device)
return myds(im)
else:
return im
def forward(self, highres):
lowres = self.get_lowres(highres)
lr_features = self.lowres_stream(lowres)
output = self.highres_stream(highres, lr_features)
return output, lr_features#, lowres
def _get_coords(bs, h, w, device, ds):
"""Creates the position encoding for the pixel-wise MLPs"""
x = th.arange(0, w).float()
y = th.arange(0, h).float()
scale = 7 / 8
x_cos = th.remainder(x, ds).float() / ds
x_sin = th.remainder(x, ds).float() / ds
y_cos = th.remainder(y, ds).float() / ds
y_sin = th.remainder(y, ds).float() / ds
x_cos = x_cos / (max(x_cos) / scale)
x_sin = x_sin / (max(x_sin) / scale)
y_cos = x_cos / (max(y_cos) / scale)
y_sin = x_cos / (max(y_sin) / scale)
xcos = th.cos((2 * pi * x_cos).float())
xsin = th.sin((2 * pi * x_sin).float())
ycos = th.cos((2 * pi * y_cos).float())
ysin = th.sin((2 * pi * y_sin).float())
xcos = xcos.view(1, 1, 1, w).repeat(bs, 1, h, 1)
xsin = xsin.view(1, 1, 1, w).repeat(bs, 1, h, 1)
ycos = ycos.view(1, 1, h, 1).repeat(bs, 1, 1, w)
ysin = ysin.view(1, 1, h, 1).repeat(bs, 1, 1, w)
coords = th.cat([xcos, xsin, ycos, ysin], 1).to(device)
return coords.to(device)
def spectral_norm(module, mode=True):
if mode:
return nn.utils.spectral_norm(module)
return module
class ParaGenNet(th.nn.Module):
"""Convolutional LR stream to estimate the pixel-wise MLPs parameters"""
def __init__(self, num_out, scale_injection=False):
super(ParaGenNet, self).__init__()
self.num_out = num_out
self.scale_injection = scale_injection
ngf = 64
if self.scale_injection:
self.out_para = nn.Sequential(
th.nn.Linear(ngf * 8 + 1, self.num_out)
)
else:
self.out_para = nn.Sequential(
th.nn.Linear(ngf * 8, self.num_out)
)
def forward(self, model, x, x_hr):
structure = model(x)
if self.scale_injection:
scale = (torch.ones(x_hr.size(0), 1, 1, 1) * (structure.size(3) / x_hr.size(3))) \
.to(structure.device)
scale = scale.repeat(1, structure.size(2), structure.size(3), 1)
structure = torch.cat([structure.permute(0, 2, 3, 1), scale], dim=-1)
para = self.out_para(structure).permute(0, 3, 1, 2)
else:
para = self.out_para(structure.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
return para
def mask_predict(self, model, x, x_hr, mask):
structure = model(x)
if self.scale_injection:
scale = (torch.ones(x_hr.size(0), 1, 1, 1) * (structure.size(3) / x_hr.size(3))) \
.to(structure.device)
scale = scale.repeat(1, structure.size(2), structure.size(3), 1)
structure = torch.cat([structure.permute(0, 2, 3, 1), scale], dim=-1)
else:
structure = structure.permute(0, 2, 3, 1)
bs, h, w, c = structure.size()
k = mask.size(2) // h
mask = mask.unfold(2, k, k).unfold(3, k, k)
mask = mask.permute(0, 2, 3, 4, 5, 1).contiguous().view(
bs, h, w, int(k * k))
lr_mask = torch.mean(mask, dim=-1).view(h * w)
structure = structure.view(bs, h * w, c)
index = torch.nonzero(1 - lr_mask).squeeze(1)
structure = structure[:, index, :]
para = self.out_para(structure).permute(0, 2, 1)
return para, mask
class PixelQueryNet(th.nn.Module):
"""Addaptive pixel-wise MLPs"""
def __init__(self, downsampling,
num_inputs=13, num_outputs=3, width=64, depth=5, coordinates="cosine",
no_one_hot=False, lr_instance=False):
super(PixelQueryNet, self).__init__()
self.lr_instance = lr_instance
self.downsampling = downsampling
self.num_inputs = num_inputs - (1 if self.lr_instance else 0)
self.num_outputs = num_outputs
self.width = width
self.depth = depth
self.coordinates = coordinates
self.xy_coords = None
self.no_one_hot = no_one_hot
self.channels = []
self._set_channels()
self.num_params = 0
self.splits = {}
self._set_num_params()
@property # for backward compatibility
def ds(self):
return self.downsampling
def _set_channels(self):
"""Compute and store the hr-stream layer dimensions."""
in_ch = self.num_inputs
in_ch = in_ch + int(4)
self.channels = [in_ch]
for _ in range(self.depth - 1): # intermediate layer -> cste size
self.channels.append(self.width)
# output layer
self.channels.append(self.num_outputs)
def _set_num_params(self):
nparams = 0
self.splits = {
"biases": [],
"weights": [],
}
# # go over input/output channels for each layer
idx = 0
for layer, nci in enumerate(self.channels[:-1]):
nco = self.channels[layer + 1]
nparams = nparams + nco # FC biases
self.splits["biases"].append((idx, idx + nco))
idx = idx + nco
nparams = nparams + nci * nco # FC weights
self.splits["weights"].append((idx, idx + nco * nci))
idx = idx + nco * nci
self.num_params = nparams
def _get_weight_indices(self, idx):
return self.splits["weights"][idx]
def _get_bias_indices(self, idx):
return self.splits["biases"][idx]
def forward(self, highres, lr_params):
assert lr_params.shape[1] == self.num_params, "incorrect input params"
if self.lr_instance:
highres = highres[:, :-1, :, :]
# Fetch sizes
bs, _, h, w = highres.shape
bs, _, h_lr, w_lr = lr_params.shape
k = h // h_lr
self.xy_coords = _get_coords(1, h, w, highres.device, h // h_lr)
highres = torch.repeat_interleave(self.xy_coords, repeats=bs, dim=0)
# Split input in tiles of size kxk according to the NN interp factor (the total downsampling factor),
# with channels last (for matmul)
# all pixels within a tile of kxk are processed by the same MLPs parameters
nci = highres.shape[1]
tiles = highres.unfold(2, k, k).unfold(3, k, k)
tiles = tiles.permute(0, 2, 3, 4, 5, 1).contiguous().view(
bs, h_lr, w_lr, int(k * k), nci)
out = tiles
num_layers = len(self.channels) - 1
for idx, nci in enumerate(self.channels[:-1]):
nco = self.channels[idx + 1]
# Select params in lowres buffer
bstart, bstop = self._get_bias_indices(idx)
wstart, wstop = self._get_weight_indices(idx)
w_ = lr_params[:, wstart:wstop]
b_ = lr_params[:, bstart:bstop]
w_ = w_.permute(0, 2, 3, 1).view(bs, h_lr, w_lr, nci, nco)
b_ = b_.permute(0, 2, 3, 1).view(bs, h_lr, w_lr, 1, nco)
out = th.matmul(out, w_) + b_
# Apply RelU non-linearity in all but the last layer, and tanh in the last
# out = th.nn.functional.leaky_relu(out, 0.01)
if idx < num_layers - 1:
out = th.nn.functional.leaky_relu(out, 0.01)
else:
out = torch.tanh(out)
#
# reorder the tiles in their correct position, and put channels first
out = out.view(bs, h_lr, w_lr, k, k, self.num_outputs).permute(
0, 5, 1, 3, 2, 4)
out = out.contiguous().view(bs, self.num_outputs, h, w)
return out
def mask_predict(self, highres, lr_params, hr_mask, lr_mask):
assert lr_params.shape[1] == self.num_params, "incorrect input params"
if self.lr_instance:
highres = highres[:, :-1, :, :]
bs, _, h, w = highres.shape
bs, h_lr, w_lr, _ = lr_mask.shape
k = h // h_lr
self.xy_coords = _get_coords(1, h, w, highres.device, h // h_lr)
pe = torch.repeat_interleave(self.xy_coords, repeats=bs, dim=0)
# Split input in tiles of size kxk according to the NN interp factor (the total downsampling factor),
# with channels last (for matmul)
# all pixels within a tile of kxk are processed by the same MLPs parameters
nci = pe.shape[1]
# bs, 5 rgbxy, h//k=h_lr, w//k=w_lr, k, k
tiles = pe.unfold(2, k, k).unfold(3, k, k)
tiles = tiles.permute(0, 2, 3, 4, 5, 1).contiguous().view(
bs, h_lr, w_lr, int(k * k), nci)
mask = torch.mean(lr_mask, dim=-1).view(h_lr * w_lr)
index = torch.nonzero(1 - mask).squeeze(1)
out = tiles
num_layers = len(self.channels) - 1
out = out.view(bs, h_lr * w_lr, int(k * k), nci)[:, index, :, :]
num = out.size(1)
for idx, nci in enumerate(self.channels[:-1]):
nco = self.channels[idx + 1]
# Select params in lowres buffer
bstart, bstop = self._get_bias_indices(idx)
wstart, wstop = self._get_weight_indices(idx)
w_ = lr_params[:, wstart:wstop]
b_ = lr_params[:, bstart:bstop]
w_ = w_.permute(0, 2, 1).view(bs, num, nci, nco)
b_ = b_.permute(0, 2, 1).view(bs, num, 1, nco)
out = th.matmul(out, w_) + b_
# Apply RelU non-linearity in all but the last layer, and tanh in the last
if idx < num_layers - 1:
out = th.nn.functional.leaky_relu(out, 0.01)
else:
out = torch.tanh(out)
highres = highres.unfold(2, k, k).unfold(3, k, k)
highres = highres.permute(0, 2, 3, 4, 5, 1).contiguous().view(
bs, h_lr, w_lr, int(k * k), 3).view(bs, h_lr * w_lr, int(k * k), 3)
highres[:, index, :, :] = out
out = highres.view(bs, h_lr, w_lr, k, k, self.num_outputs).permute(
0, 5, 1, 3, 2, 4)
out = out.contiguous().view(bs, self.num_outputs, h, w)
return out | 12,294 | 36.257576 | 143 | py |
CoordFill | CoordFill-master/models/misc.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import models
from models import register
from utils import make_coord
@register('metasr')
class MetaSR(nn.Module):
def __init__(self, encoder_spec):
super().__init__()
self.encoder = models.make(encoder_spec)
imnet_spec = {
'name': 'mlp',
'args': {
'in_dim': 3,
'out_dim': self.encoder.out_dim * 9 * 3,
'hidden_list': [256]
}
}
self.imnet = models.make(imnet_spec)
def gen_feat(self, inp):
self.feat = self.encoder(inp)
return self.feat
def query_rgb(self, coord, cell=None):
feat = self.feat
feat = F.unfold(feat, 3, padding=1).view(
feat.shape[0], feat.shape[1] * 9, feat.shape[2], feat.shape[3])
feat_coord = make_coord(feat.shape[-2:], flatten=False).cuda()
feat_coord[:, :, 0] -= (2 / feat.shape[-2]) / 2
feat_coord[:, :, 1] -= (2 / feat.shape[-1]) / 2
feat_coord = feat_coord.permute(2, 0, 1) \
.unsqueeze(0).expand(feat.shape[0], 2, *feat.shape[-2:])
coord_ = coord.clone()
coord_[:, :, 0] -= cell[:, :, 0] / 2
coord_[:, :, 1] -= cell[:, :, 1] / 2
coord_q = (coord_ + 1e-6).clamp(-1 + 1e-6, 1 - 1e-6)
q_feat = F.grid_sample(
feat, coord_q.flip(-1).unsqueeze(1),
mode='nearest', align_corners=False)[:, :, 0, :] \
.permute(0, 2, 1)
q_coord = F.grid_sample(
feat_coord, coord_q.flip(-1).unsqueeze(1),
mode='nearest', align_corners=False)[:, :, 0, :] \
.permute(0, 2, 1)
rel_coord = coord_ - q_coord
rel_coord[:, :, 0] *= feat.shape[-2] / 2
rel_coord[:, :, 1] *= feat.shape[-1] / 2
r_rev = cell[:, :, 0] * (feat.shape[-2] / 2)
inp = torch.cat([rel_coord, r_rev.unsqueeze(-1)], dim=-1)
bs, q = coord.shape[:2]
pred = self.imnet(inp.view(bs * q, -1)).view(bs * q, feat.shape[1], 3)
pred = torch.bmm(q_feat.contiguous().view(bs * q, 1, -1), pred)
pred = pred.view(bs, q, 3)
return pred
def forward(self, inp, coord, cell):
self.gen_feat(inp)
return self.query_rgb(coord, cell)
| 2,303 | 31.450704 | 78 | py |
CoordFill | CoordFill-master/models/gan.py | import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import models
from models import register
import math
import numpy as np
from torch.autograd import Variable
import os
import logging
logger = logging.getLogger(__name__)
from .coordfill import CoordFill
from .ffc_baseline import FFC
from .adv_loss import AdversarialLoss
from collections import OrderedDict
from .LPIPS.models import dist_model as dm
import random
class D_Net(nn.Module):
def __init__(self, in_channels=3, use_sigmoid=True, use_spectral_norm=True):
super(D_Net, self).__init__()
self.use_sigmoid = use_sigmoid
self.conv1 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=4, stride=2, padding=1, bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2),
)
self.conv2 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1, bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2),
)
self.conv3 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, stride=2, padding=1, bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2),
)
self.conv4 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=1, padding=1, bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2),
)
self.conv5 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=512, out_channels=1, kernel_size=4, stride=1, padding=1, bias=not use_spectral_norm), use_spectral_norm),
)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
outputs = conv5
if self.use_sigmoid:
outputs = torch.sigmoid(conv5)
return outputs, [conv1, conv2, conv3, conv4]
def spectral_norm(module, mode=True):
if mode:
return nn.utils.spectral_norm(module)
return module
@register('gan')
class GAN(nn.Module):
def __init__(self, encoder_spec=None):
super().__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from argparse import Namespace
args = Namespace()
args.n_channels = 3
args.n_classes = 3
args.no_upsampling = True
self.mode = encoder_spec['name']
if encoder_spec['name'] == 'baseline':
self.encoder = Baseline(args)
elif encoder_spec['name'] == 'ffc' or encoder_spec['name'] == 'mlp':
self.encoder = FFC(args, encoder_spec['name'], encoder_spec['mask_prediction'])
else:
self.encoder = CoordFill(args, encoder_spec['name'],
encoder_spec['mask_prediction'], encoder_spec['attffc'],
encoder_spec['scale_injection'])
self.model_LPIPS = dm.DistModel()
self.model_LPIPS.initialize(model='net-lin', net='alex', use_gpu=True)
self.fm_loss = torch.nn.L1Loss()
self.discriminator = D_Net(use_sigmoid=True)
self.criterionGAN = AdversarialLoss('nsgan')
self.lambda_D = 1
self.lambda_perceptual = 10
self.lambda_fm = 100
self.multi_res_training = encoder_spec['multi_res_training']
self.optimizer_G = torch.optim.Adam(self.encoder.parameters(), lr=1e-4)
self.optimizer_D = torch.optim.Adam(self.discriminator.parameters(), lr=1e-4)
def set_input(self, inp, gt, input_mask):
self.input = inp.to(self.device)
self.gt = gt.to(self.device)
self.input_mask = input_mask.to(self.device)
if self.multi_res_training:
ratio = random.randint(0, 8)
size = 256 + 32 * ratio
self.input = F.interpolate(self.input, size=(size, size), mode='bilinear')
self.gt = F.interpolate(self.gt, size=(size, size), mode='bilinear')
self.input_mask = F.interpolate(self.input_mask, size=(size, size), mode='nearest')
def forward(self):
self.pred = self.encoder([self.input, self.input_mask])
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
pred_fake, _ = self.discriminator(self.pred.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False, True)
pred_real, _ = self.discriminator(self.gt)
self.loss_D_real = self.criterionGAN(pred_real, True, True)
self.loss_D = self.loss_D_fake + self.loss_D_real
# combine loss and calculate gradients
self.loss_D.backward()
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
pred_fake, feat_fake = self.discriminator(self.pred)
self.loss_GAN = self.criterionGAN(pred_fake, True, False) * self.lambda_D
self.feat_match_loss = 0
pred_real_hr, feat_real = self.discriminator(self.gt)
for i in range(len(feat_fake)):
self.feat_match_loss += self.fm_loss(feat_fake[i], feat_real[i].detach())
self.feat_match_loss = self.feat_match_loss * self.lambda_fm
self.loss_LPIPS, _ = self.model_LPIPS.forward_pair(self.pred, self.gt)
self.loss_perceptual = torch.mean(self.loss_LPIPS) * self.lambda_perceptual
self.loss_G = self.loss_perceptual + self.loss_GAN + self.feat_match_loss
self.loss_G.backward()
def optimize_parameters(self):
self.forward() # compute fake images: G(A)
self.set_requires_grad(self.discriminator, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
self.set_requires_grad(self.discriminator, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
| 6,804 | 36.185792 | 162 | py |
CoordFill | CoordFill-master/models/networks.py | import torch.nn as nn
from torch.nn import init
import torch.nn.utils.spectral_norm as spectral_norm
import torch
import torch.nn.functional as F
import functools
import numpy as np
class MySeparableBilinearDownsample(torch.nn.Module):
def __init__(self, stride, channels, use_gpu):
super().__init__()
self.stride = stride
self.channels = channels
# create tent kernel
kernel = np.arange(1,2*stride+1,2) # ramp up
kernel = np.concatenate((kernel,kernel[::-1])) # reflect it and concatenate
if use_gpu:
kernel = torch.Tensor(kernel/np.sum(kernel)).to(device='cuda') # normalize
else:
kernel = torch.Tensor(kernel / np.sum(kernel))
self.register_buffer('kernel_horz', kernel[None,None,None,:].repeat((self.channels,1,1,1)))
self.register_buffer('kernel_vert', kernel[None,None,:,None].repeat((self.channels,1,1,1)))
self.refl = nn.ReflectionPad2d(int(stride/2))#nn.ReflectionPad2d(int(stride/2))
def forward(self, input):
return F.conv2d(F.conv2d(self.refl(input), self.kernel_horz, stride=(1,self.stride), groups=self.channels),
self.kernel_vert, stride=(self.stride,1), groups=self.channels)
class ASAPNetsBlock(nn.Module):
def __init__(self, dim, norm_layer, activation=nn.ReLU(), kernel_size=3, reflection_pad=False, replicate_pad=False):
super().__init__()
padw = 1
if reflection_pad:
self.conv_block = nn.Sequential(nn.ReflectionPad2d(padw),
norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=0)),
activation
)
elif replicate_pad:
self.conv_block = nn.Sequential(nn.ReplicationPad2d(padw),
norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=0)),
activation
)
else:
self.conv_block = nn.Sequential(norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=padw)),
activation
)
def forward(self, x):
out = self.conv_block(x)
return out
def get_nonspade_norm_layer(opt, norm_type='instance'):
# helper function to get # output channels of the previous layer
def get_out_channel(layer):
if hasattr(layer, 'out_channels'):
return getattr(layer, 'out_channels')
return layer.weight.size(0)
# this function will be returned
def add_norm_layer(layer):
nonlocal norm_type
if norm_type.startswith('spectral'):
layer = spectral_norm(layer)
subnorm_type = norm_type[len('spectral'):]
else:
subnorm_type = norm_type
if subnorm_type == 'none' or len(subnorm_type) == 0:
return layer
# remove bias in the previous layer, which is meaningless
# since it has no effect after normalization
if getattr(layer, 'bias', None) is not None:
delattr(layer, 'bias')
layer.register_parameter('bias', None)
if subnorm_type == 'batch':
norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'spectral':
norm_layer = torch.nn.utils.spectral_norm(get_out_channel(layer))
# elif subnorm_type == 'sync_batch':
# norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'instance':
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
elif subnorm_type == 'instanceaffine':
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=True)
else:
raise ValueError('normalization layer %s is not recognized' % subnorm_type)
return nn.Sequential(layer, norm_layer)
return add_norm_layer
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def print_network(self):
if isinstance(self, list):
self = self[0]
num_params = 0
for param in self.parameters():
num_params = num_params + param.numel()
print('Network [%s] was created. Total number of parameters: %.1f million. '
'To see the architecture, do print(network).'
% (type(self).__name__, num_params / 1000000))
def init_weights(self, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if classname.find('BatchNorm2d') != -1:
if hasattr(m, 'weight') and m.weight is not None:
init.normal_(m.weight.data, 1.0, gain)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'xavier_uniform':
init.xavier_uniform_(m.weight.data, gain=1.0)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
elif init_type == 'none': # uses pytorch's default init method
m.reset_parameters()
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
'''
for name, param in m.named_parameters():
if (name == "lowres_stream.params_pred.weight"):
print("%s_init" % name)
init.zeros_(param.data[0:13])
init.normal_(param.data[13:13 + 64 * 64], 0.0, 0.02)
for i in range(1,6):
init.zeros_(param.data[13+i*64*64+(i-1)*64:13+64*64+i*64])
init.normal_(param.data[13+i*64*64+i*64:13+i*64+(i+1)*64*64], 0.0, 0.02)
init.zeros_(param.data[13 + i * 64 * 64 + (i - 1) * 64:13 + 64 * 64 + i * 64 + 3])
init.normal_(param.data[13 + i * 64 * 64 + i * 64 + 3 :13 + i * 64 + i * 64 * 64 +64*3], 0.0, 0.02)
if (name == "lowres_stream.params_pred.bias"):
print("%s_init" % name)
init.zeros_(param.data)
'''
self.apply(init_func)
# propagate to children
for m in self.children():
if hasattr(m, 'init_weights'):
m.init_weights(init_type, gain) | 7,259 | 43 | 120 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.