filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_27090 | #!usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Simple demo of solution of race conditions with locks.
Note that the race conditions in this demo are also amplified by fuzzing
technique
In order to be away with race conditions, we need to
1. Ensure an explicit ordering of the operations (on the shared resources)
All operations (on the shared resources) must be executed in the same order
they are received.
2. Restrict access to the shared resource
Only one operation can access the shared resource at the same time. During
the period of access, no other operations can read or change its value.
Specifically for solution with locks,
2. All accesses to the shared resource shall be done using its own lock.
"""
import random
import time
from threading import Condition, Thread
##### Fuzzing technique #####
FUZZ = False
def fuzz() -> None:
"""
Fuzzes the program for a random amount of time, if instructed.
:return: None
"""
if fuzz:
time.sleep(random.random())
##### Locks for print()-access & "counter"-access #####
# All accesses to the shared resource shall be done using its own lock. (=> 2)
print_lock = Condition() # Lock for print()-access
counter = 0
counter_lock = Condition() # Lock for "counter"-access
def worker() -> None:
"""
Thread function that increments "counter" by 1.
:return: None
"""
global counter
# Lock on the "counter"-access lock
with counter_lock:
fuzz()
old_val = counter
fuzz()
counter = old_val + 1
# Lock on the print()-access lock
with print_lock:
fuzz()
print(f'The counter value is {counter}')
fuzz()
print('----------')
# Lock on the print()-access lock
with print_lock:
print('Starting up')
# Create and start 10 worker threads
worker_threads = []
for _ in range(10):
worker_thread = Thread(target=worker)
worker_threads.append(worker_thread)
worker_thread.start()
fuzz()
# Join the 10 worker threads
for worker_thread in worker_threads:
worker_thread.join()
fuzz()
# Lock on the print()-access lock
with print_lock:
print('Finishing up')
# Output:
# Starting up
# The counter value is 1
# ----------
# The counter value is 2
# ----------
# The counter value is 3
# ----------
# The counter value is 4
# ----------
# The counter value is 5
# ----------
# The counter value is 6
# ----------
# The counter value is 7
# ----------
# The counter value is 8
# ----------
# The counter value is 9
# ----------
# The counter value is 10
# ----------
# Finishing up
|
the-stack_106_27091 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from pyrigidbody3d import quaternion
import unittest
class QuaternionTest(unittest.TestCase):
def test_identity(self):
q = quaternion.identity()
self.assertLess(
np.linalg.norm(q.xyzw - np.array([0.0, 0.0, 0.0, 1.0])), 1e-6)
def test_rotate(self):
q = quaternion.from_axis_angle(np.array([0.0, 0.0, 1.0]), np.pi / 2)
v = np.array([1.0, 0.0, 0.0])
v_rot = quaternion.rotate(q, v)
self.assertLess(np.linalg.norm(v_rot - np.array([0.0, 1.0, 0.0])), 1e-6)
def test_cross(self):
res = np.cross(np.array([1.0, 0.0, 0.0]), np.array([0.0, 1.0, 0.0]))
self.assertLess(np.linalg.norm(res - np.array([0.0, 0.0, 1.0])), 1e-6)
def test_normalize(self):
q = quaternion.Quaternion(0.0, 0.0, 0.0, 2.0)
q.normalize()
self.assertLess(
np.linalg.norm(q.xyzw - np.array([0.0, 0.0, 0.0, 1.0])), 1e-6)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_27092 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
PyTorch utils
"""
import datetime
import logging
import math
import os
import platform
import subprocess
import time
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torchvision
try:
import thop # for FLOPs computation
except ImportError:
thop = None
LOGGER = logging.getLogger(__name__)
@contextmanager
def torch_distributed_zero_first(local_rank: int):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
"""
if local_rank not in [-1, 0]:
dist.barrier(device_ids=[local_rank])
yield
if local_rank == 0:
dist.barrier(device_ids=[0])
def init_torch_seeds(seed=0):
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(seed)
if seed == 0: # slower, more reproducible
cudnn.benchmark, cudnn.deterministic = False, True
else: # faster, less reproducible
cudnn.benchmark, cudnn.deterministic = True, False
def date_modified(path=__file__):
# return human-readable file modification date, i.e. '2021-3-26'
t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
return f'{t.year}-{t.month}-{t.day}'
def git_describe(path=Path(__file__).parent): # path must be a directory
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
s = f'git -C {path} describe --tags --long --always'
try:
return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
except subprocess.CalledProcessError as e:
return '' # not a git repository
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string
device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'
cpu = device == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
elif device: # non-cpu device requested
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
cuda = not cpu and torch.cuda.is_available()
if cuda:
devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7
n = len(devices) # device count
if n > 1 and batch_size: # check batch_size is divisible by device_count
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
space = ' ' * (len(s) + 1)
for i, d in enumerate(devices):
p = torch.cuda.get_device_properties(i)
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
else:
s += 'CPU\n'
LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe
return torch.device('cuda:0' if cuda else 'cpu')
def time_sync():
# pytorch-accurate time
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
def profile(input, ops, n=10, device=None):
# YOLOv5 speed/memory/FLOPs profiler
#
# Usage:
# input = torch.randn(16, 3, 640, 640)
# m1 = lambda x: x * torch.sigmoid(x)
# m2 = nn.SiLU()
# profile(input, [m1, m2], n=100) # profile over 100 iterations
results = []
logging.basicConfig(format="%(message)s", level=logging.INFO)
device = device or select_device()
print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}"
f"{'input':>24s}{'output':>24s}")
for x in input if isinstance(input, list) else [input]:
x = x.to(device)
x.requires_grad = True
for m in ops if isinstance(ops, list) else [ops]:
m = m.to(device) if hasattr(m, 'to') else m # device
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m
tf, tb, t = 0., 0., [0., 0., 0.] # dt forward, backward
try:
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs
except:
flops = 0
try:
for _ in range(n):
t[0] = time_sync()
y = m(x)
t[1] = time_sync()
try:
_ = (sum([yi.sum() for yi in y]) if isinstance(y, list) else y).sum().backward()
t[2] = time_sync()
except Exception as e: # no backward method
print(e)
t[2] = float('nan')
tf += (t[1] - t[0]) * 1000 / n # ms per op forward
tb += (t[2] - t[1]) * 1000 / n # ms per op backward
mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB)
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}')
results.append([p, flops, mem, tf, tb, s_in, s_out])
except Exception as e:
print(e)
results.append(None)
torch.cuda.empty_cache()
return results
def is_parallel(model):
# Returns True if model is of type DP or DDP
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
def de_parallel(model):
# De-parallelize a model: returns single-GPU model if model is of type DP or DDP
return model.module if is_parallel(model) else model
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def find_modules(model, mclass=nn.Conv2d):
# Finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
def sparsity(model):
# Return global model sparsity
a, b = 0., 0.
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print(' %.3g global sparsity' % sparsity(model))
def fuse_conv_and_bn(conv, bn):
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
fusedconv = nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True).requires_grad_(False).to(conv.weight.device)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def model_info(model, verbose=False, img_size=640):
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPs
from thop import profile
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs
except (ImportError, Exception):
fs = ''
LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
model = torchvision.models.__dict__[name](pretrained=True)
# ResNet model properties
# input_size = [3, 224, 224]
# input_space = 'RGB'
# input_range = [0, 1]
# mean = [0.485, 0.456, 0.406]
# std = [0.229, 0.224, 0.225]
# Reshape output to n classes
filters = model.fc.weight.shape[1]
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
model.fc.out_features = n
return model
def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
# scales img(bs,3,y,x) by ratio constrained to gs-multiple
if ratio == 1.0:
return img
else:
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def copy_attr(a, b, include=(), exclude=()):
# Copy attributes from b to a, options to only include [...] and to exclude [...]
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
continue
else:
setattr(a, k, v)
class EarlyStopping:
# YOLOv5 simple early stopper
def __init__(self, patience=30):
self.best_fitness = 0.0 # i.e. mAP
self.best_epoch = 0
self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop
self.possible_stop = False # possible stop may occur next epoch
def __call__(self, epoch, fitness):
if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training
self.best_epoch = epoch
self.best_fitness = fitness
delta = epoch - self.best_epoch # epochs without improvement
self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch
stop = delta >= self.patience # stop training if patience exceeded
if stop:
LOGGER.info(f'EarlyStopping patience {self.patience} exceeded, stopping training.')
return stop
class ModelEMA:
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, updates=0):
# Create EMA
self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
# if next(model.parameters()).device.type != 'cpu':
# self.ema.half() # FP16 EMA
self.updates = updates # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
# Update EMA parameters
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
for k, v in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
# Update EMA attributes
copy_attr(self.ema, model, include, exclude)
|
the-stack_106_27097 | #! python
"""MS²ReScore: Sensitive PSM rescoring with predicted MS² peak intensities and RTs."""
import logging
import os
import subprocess
import tempfile
from multiprocessing import cpu_count
from typing import Dict, Optional, Union
from ms2rescore import id_file_parser, rescore_core, setup_logging
from ms2rescore._exceptions import MS2ReScoreError
from ms2rescore._version import __version__
from ms2rescore.config_parser import parse_config
from ms2rescore.retention_time import RetentionTimeIntegration
from ms2rescore import plotting
logger = logging.getLogger(__name__)
class MS2ReScore:
"""
MS²ReScore: Sensitive PSM rescoring with predicted MS² peak intensities and RTs.
Parameters
----------
parse_cli_args : bool, optional
parse command line arguments, default True
configuration : dict, optional
dict containing general ms2rescore configuration; should at least contain
`identification_file`; required if `parse_cli_args` is False
set_logger : bool, optional
set custom logger or not, default False
"""
def __init__(
self,
parse_cli_args: bool = True,
configuration: Optional[Dict] = None,
set_logger: bool = False,
) -> None:
"""Initialize MS2ReScore object."""
self.config = parse_config(
parse_cli_args=parse_cli_args, config_class=configuration
)
if set_logger:
setup_logging.setup_logging(self.config["general"]["log_level"])
self._validate_cli_dependency("percolator -h")
self._validate_cli_dependency("ms2pip -h")
logger.debug(
"Using %i of %i available CPUs.",
self.config["general"]["num_cpu"],
cpu_count(),
)
if not self.config["general"]["tmp_path"]:
self.tmp_path = tempfile.mkdtemp()
self.config["general"]["tmp_path"] = self.tmp_path
else:
self.tmp_path = self.config["general"]["tmp_path"]
os.makedirs(self.tmp_path, exist_ok=True)
self.tmpfile_basepath = os.path.join(
self.tmp_path,
os.path.basename(
os.path.splitext(self.config["general"]["identification_file"])[0]
),
)
selected_pipeline = self._select_pipeline()
self.pipeline = selected_pipeline(self.config, self.tmpfile_basepath)
logger.info("Using %s.", selected_pipeline.__name__)
@staticmethod
def _validate_cli_dependency(command):
"""Validate that command returns zero exit status."""
if subprocess.getstatusoutput(command)[0] != 0:
logger.critical(
"`%s` returned non-zero exit status. Please verify installation.",
command,
)
exit(1)
@staticmethod
def _infer_pipeline(identification_file: str):
"""Infer pipeline from identification file."""
logger.debug("Inferring pipeline from identification filename...")
if identification_file.lower().endswith(".pin"):
pipeline = id_file_parser.PinPipeline
elif identification_file.lower().endswith(".t.xml"):
pipeline = id_file_parser.TandemPipeline
elif identification_file.endswith("msms.txt"):
pipeline = id_file_parser.MaxQuantPipeline
elif identification_file.lower().endswith(".mzid"):
pipeline = id_file_parser.MSGFPipeline
else:
raise MS2ReScoreError(
"Could not infer pipeline from identification filename. Please specify "
"`general` > `pipeline` in your configuration file."
)
return pipeline
def _select_pipeline(self):
"""Select specific rescoring pipeline."""
if self.config["general"]["pipeline"] == "infer":
pipeline = self._infer_pipeline(
self.config["general"]["identification_file"]
)
elif self.config["general"]["pipeline"] == "pin":
pipeline = id_file_parser.PinPipeline
elif self.config["general"]["pipeline"] == "maxquant":
pipeline = id_file_parser.MaxQuantPipeline
elif self.config["general"]["pipeline"] == "msgfplus":
pipeline = id_file_parser.MSGFPipeline
elif self.config["general"]["pipeline"] == "tandem":
pipeline = id_file_parser.TandemPipeline
elif self.config["general"]["pipeline"] == "peptideshaker":
pipeline = id_file_parser.PeptideShakerPipeline
else:
raise NotImplementedError(self.config["general"]["pipeline"])
return pipeline
@staticmethod
def get_ms2pip_features(
ms2pip_config: Dict,
peprec_filename: Union[str, os.PathLike],
mgf_filename: Union[str, os.PathLike],
output_filename: Union[str, os.PathLike],
num_cpu: int,
):
"""Get predicted MS² peak intensities from MS2PIP."""
logger.info("Adding MS2 peak intensity features with MS²PIP.")
ms2pip_config_filename = output_filename + "_ms2pip_config.txt"
rescore_core.make_ms2pip_config(ms2pip_config, filename=ms2pip_config_filename)
# Check if input files exist
for f in [peprec_filename, mgf_filename]:
if not os.path.isfile(f):
raise FileNotFoundError(f)
ms2pip_command = "ms2pip {} -c {} -s {} -n {}".format(
peprec_filename,
ms2pip_config_filename,
mgf_filename,
num_cpu,
)
logger.debug("Running MS2PIP: %s", ms2pip_command)
subprocess.run(ms2pip_command, shell=True, check=True)
logger.info("Calculating features from predicted spectra")
preds_filename = (
peprec_filename.replace(".peprec", "")
+ "_"
+ ms2pip_config["model"]
+ "_pred_and_emp.csv"
)
rescore_core.calculate_features(
preds_filename,
output_filename + "_ms2pipfeatures.csv",
num_cpu,
)
@staticmethod
def get_rt_features(
peprec_filename: Union[str, os.PathLike],
output_filename: Union[str, os.PathLike],
num_cpu: int,
):
"""Get retention time features with DeepLC."""
logger.info("Adding retention time features with DeepLC.")
rt_int = RetentionTimeIntegration(
peprec_filename,
output_filename + "_rtfeatures.csv",
num_cpu=num_cpu,
)
rt_int.run()
def _run_percolator(self):
"""Run Percolator with different feature subsets."""
for subset in self.config["general"]["feature_sets"]:
subname = (
self.config["general"]["output_filename"]
+ "_"
+ "_".join(subset)
+ "_features"
)
percolator_cmd = "percolator "
for op in self.config["percolator"].keys():
percolator_cmd = percolator_cmd + "--{} {} ".format(
op, self.config["percolator"][op]
)
percolator_cmd = (
percolator_cmd
+ "{} -m {} -M {} -w {} -v 0 -U --post-processing-tdc\n".format(
subname + ".pin",
subname + ".pout",
subname + ".pout_dec",
subname + ".weights",
)
)
logger.info("Running Percolator: %s", percolator_cmd)
subprocess.run(percolator_cmd, shell=True)
if not os.path.isfile(subname + ".pout"):
logger.error("Error running Percolator")
def run(self):
"""Run MS²ReScore."""
peprec = self.pipeline.get_peprec()
peprec_filename = self.tmpfile_basepath + ".peprec"
peprec.to_csv(peprec_filename)
search_engine_features = self.pipeline.get_search_engine_features()
search_engine_features_filename = (
self.tmpfile_basepath + "_search_engine_features.csv"
)
search_engine_features.to_csv(search_engine_features_filename, index=False)
if any("ms2pip" in fst for fst in self.config["general"]["feature_sets"]):
self.get_ms2pip_features(
self.config["ms2pip"],
peprec_filename,
self.pipeline.path_to_mgf_file,
self.tmpfile_basepath,
self.config["general"]["num_cpu"],
)
if any("rt" in fst for fst in self.config["general"]["feature_sets"]):
self.get_rt_features(
peprec_filename,
self.tmpfile_basepath,
self.config["general"]["num_cpu"],
)
logger.info("Generating PIN files")
rescore_core.write_pin_files(
peprec_filename,
self.config["general"]["output_filename"],
searchengine_features_path=search_engine_features_filename,
ms2pip_features_path=self.tmpfile_basepath + "_ms2pipfeatures.csv",
rt_features_path=self.tmpfile_basepath + "_rtfeatures.csv",
feature_sets=self.config["general"]["feature_sets"],
)
if self.config["general"]["run_percolator"]:
self._run_percolator()
logger.info("Generating Rescore plots")
if self.config["general"]["plotting"]:
plotting.PIN(
peprec_filename, self.config["general"]["output_filename"]
)
for fset in self.config["general"]["feature_sets"]:
pout_file = (
self.config["general"]["output_filename"]
+ "_"
+ "_".join(fset)
+ "_features.pout"
)
pout_decoy_file = (
self.config["general"]["output_filename"]
+ "_"
+ "_".join(fset)
+ "_features.pout_dec"
)
plotting.POUT(
pout_file,
pout_decoy_file,
self.config["general"]["output_filename"],
" ".join(fset)
)
plotting.RescoreRecord.save_plots_to_pdf(
self.config["general"]["output_filename"] + "_plots.pdf",
FDR_thresholds=[0.01, 0.001],
)
logger.info("MS²ReScore finished!")
|
the-stack_106_27099 | # coding=utf-8
__author__ = 'jamon'
import copy
from obespoir.share.ob_log import logger
from share.message_ids import *
from service.mahjong.models.playeract.base_player_act import BasePlayerAct
from service.mahjong.constants.gamedefine import Act, SettleType
from service.mahjong.models.actrecord import ActRecord
from service.mahjong.models.timermanager import timer_manager_ins
class DianHu(BasePlayerAct):
def __init__(self, game_data):
super(DianHu, self).__init__(game_data=game_data)
self.step_handlers = {
"param_check": self.param_check, # 参数验证
"clear_other_act": self.clear_other_act, # 清除该玩家其他动作
"set_data": self.set_data, # 设置相应数据
"record": self.record, # 记录玩家动作
"notify_other_player": self.notify_other_player, # 记录玩家动作
}
self.seat_id = -1 # 执行胡牌的玩家位置
self.hook_seat_id = -1 # 是否为抢胡, 如抢补杠胡
self.hand_card = None
self.dian_hu_card = 0 # 点炮的那张牌
def execute(self, act_params={}):
"""
执行点炮胡牌
:param act_params:
:return:
"""
logger.debug(u"点炮胡牌: %s", str(act_params))
for seat_id, params in act_params.items():
for step in self.game_config.player_act_step.get(Act.DIAN_HU):
for name, cfg in step.items():
ret = self.step_handlers.get(name)(seat_id=seat_id, params=params, config_params=cfg)
if not ret:
logger.error("step:%s", step)
return
if not self.game_config.has_tong_pao:
# 如果不能通炮胡,则只取第一个胡牌的玩家
break
self.settle(settle_type_list=[SettleType.HU])
if self.game_config.is_hu_end:
# 当回合胡牌后结束当局游戏
self.end_game()
return 1
def param_check(self, seat_id, params, config_params): # 参数验证
hook_seat_id = params.get("hook_seat_id", -1)
if not self.game_data.last_chu_card_val:
logger.error("dian_hu params error: %s", str([seat_id, params]))
return
hand_card_vals = self.players[seat_id].hand_card.hand_card_vals
# if 1 != len(hand_card_vals) % 3 or not self.players[seat_id].can_hu_result:
if 1 != len(hand_card_vals) % 3:
logger.error("dian_hu params error: %s", str([seat_id, params]))
return
self.seat_id = seat_id
self.hook_seat_id = hook_seat_id
self.hand_card = self.game_data.players[seat_id].hand_card
self.dian_hu_card = self.game_data.last_chu_card_val
return 1
def clear_other_act(self, seat_id, params, config_params): # 清除该玩家其他动作
timer_manager_ins.kill_timer(self.desk_id, self.seat_id, is_force=True)
return 1
def set_data(self, seat_id, params, config_params): # 设置相应数据
self.players[self.seat_id].hook_hu_seat_id = self.hook_seat_id # 抢胡来源座位id
self.players[self.seat_id].hand_card.qiang_gang_hu_seat_id = self.hook_seat_id # 抢胡来源座位id
# 将手牌信息保存入 hand_card_for_settle_show
self.game_data.players[self.seat_id].hand_card.hand_card_for_settle_show[-1] = [self.game_data.last_chu_card_val]
# 储存胡的牌值
self.game_data.players[self.seat_id].hand_card.hu_card_val = self.game_data.last_chu_card_val
# 吃碰杠时,移除提供吃碰杠玩家已出牌里面的 那一张
self.players[self.game_data.last_chu_card_seat_id].hand_card.out_card_vals.remove(
self.game_data.last_chu_card_val)
# 联合手牌
for i in range(self.game_data.max_player_num):
self.game_data.players[i].hand_card.union_hand_card()
# 计算结算相关数据,用于101006
type_list = self.game_data.hu_manager.check_hu_result(self.hand_card, self.dian_hu_card)
self.game_data.hu_player_static[self.seat_id] = {
"type_list": type_list,
"is_zi_mo": 0,
"source_seat_id": self.game_data.last_chu_card_seat_id,
"guo_hu_count": self.game_data.players[self.seat_id].hand_card.guo_hu_num,
"settle_hand_card": self.game_data.players[self.seat_id].hand_card.hand_card_for_settle_show
}
# 判断是否是点胡
if self._is_first_chu_card():
self.players[self.seat_id].hand_card.is_ren_hu = 1
return 1
def record(self, seat_id, params, config_params): # 记录玩家动作
act_record = ActRecord(self.seat_id, Act.DIAN_HU, [self.game_data.last_chu_card_val])
self.game_data.act_record_list.append(act_record)
self.players[self.seat_id].hand_card.record_dian_hu_card(self.game_data.last_chu_card_seat_id,
self.game_data.last_chu_card_val)
return 1
def notify_other_player(self, **kwargs): # 通知其他玩家动作已经执行
act_info = {"seat_id": self.seat_id,
"act_type": Act.DIAN_HU,
"card_list": self.dian_hu_card}
self.notify_other_player_act_executed(self.seat_id,
act_info=act_info,
max_player_num=self.game_data.max_player_num)
return 1
def _is_first_chu_card(self):
if len(self.game_data.act_record_list) >1 :
return False
if self.game_data.act_record_list[0].act_type == 10:
return True
return False
|
the-stack_106_27100 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, ungrouped-imports
"""Namespace for supporting Relay operators on VTA."""
from __future__ import absolute_import as _abs
import tvm
import topi
from tvm.relay.op import op as reg
from tvm.relay.op import strategy as _strategy
from tvm.relay.op.op import OpPattern, OpStrategy
from .util import is_packed_layout
from .vta_conv2d import conv2d_packed, schedule_conv2d_packed
from .vta_conv2d_transpose import conv2d_transpose_packed, schedule_conv2d_transpose_packed
from .vta_group_conv2d import group_conv2d_packed, schedule_group_conv2d_packed
from .vta_dense import dense_packed, schedule_dense_packed
from ..environment import get_env
# override to force partition at copy
reg.register_pattern("copy", OpPattern.INJECTIVE, level=15)
# add clip vta strategy
def compute_clip_vta(attrs, inputs, output_type):
""" Clip operator. """
x = inputs[0]
a_min = attrs.a_min
a_max = attrs.a_max
const_min = tvm.const(a_min, x.dtype)
const_max = tvm.const(a_max, x.dtype)
with tvm.tag_scope(topi.tag.ELEMWISE):
x = tvm.compute(
x.shape, lambda *i: tvm.min(x(*i), const_max), name="clipA")
x = tvm.compute(
x.shape, lambda *i: tvm.max(x(*i), const_min), name="clipB")
return [x]
def clip_strategy_vta(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
compute_clip_vta,
_strategy.wrap_topi_schedule(topi.generic.schedule_injective),
name="clip.vta")
return strategy
reg.get("clip").get_attr("FTVMStrategy").register(clip_strategy_vta, "vta")
@_strategy.conv2d_strategy.register("vta")
def conv2d_strategy_vta(attrs, inputs, out_type, target):
"""conv2d vta strategy"""
strategy = OpStrategy()
kernel = inputs[1]
dilation = topi.util.get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
assert dilation == (1, 1), "support for dilation limited to (1, 1)"
if is_packed_layout(layout):
if groups == 1:
env = get_env()
assert env.LOG_INP_WIDTH == 3, "only support 8bit inp for now"
assert env.LOG_WGT_WIDTH == 3, "only support 8bit wgt for now"
assert kernel.dtype == "int8"
strategy.add_implementation(
_strategy.wrap_compute_conv2d(conv2d_packed, True),
_strategy.wrap_topi_schedule(schedule_conv2d_packed),
name="conv2d_packed.vta")
else: # group_conv2d
strategy.add_implementation(
_strategy.wrap_compute_conv2d(group_conv2d_packed, has_groups=True),
_strategy.wrap_topi_schedule(schedule_group_conv2d_packed),
name="group_conv2d_packed.vta")
return strategy
# If it's not packed, run on ARM CPU
arm_tgt = tvm.target.arm_cpu(target.model)
return _strategy.arm_cpu.conv2d_strategy_arm_cpu(attrs, inputs, out_type, arm_tgt)
@_strategy.conv2d_transpose_strategy.register("vta")
def conv2d_transpose_strategy_vta(attrs, inputs, out_type, target):
"""conv2d_transpose vta strategy"""
dilation = topi.util.get_const_tuple(attrs.dilation)
layout = attrs.data_layout
assert dilation == (1, 1), "support for dilation limited to (1, 1)"
if is_packed_layout(layout):
strategy = OpStrategy()
strategy.add_implementation(
_strategy.wrap_compute_conv2d_transpose(conv2d_transpose_packed),
_strategy.wrap_topi_schedule(schedule_conv2d_transpose_packed),
name="conv2d_transpose_packed.vta")
return strategy
# If it's not packed, run on ARM CPU
arm_tgt = tvm.target.arm_cpu(target.model)
return _strategy.arm_cpu.conv2d_transpose_strategy_arm_cpu(attrs, inputs, out_type, arm_tgt)
@_strategy.dense_strategy.register("vta")
def dense_strategy_vta(attrs, inputs, out_type, target):
"""dense vta strategy"""
if inputs[0].shape == 4: # this implies the layout is packed
strategy = OpStrategy()
strategy.add_implementation(
_strategy.wrap_compute_dense(dense_packed),
_strategy.wrap_topi_schedule(schedule_dense_packed),
name="dense_packed.vta")
return strategy
# If it's not packed, run on ARM CPU
arm_tgt = tvm.target.arm_cpu(target.model)
return _strategy.x86.dense_strategy_cpu(attrs, inputs, out_type, arm_tgt)
|
the-stack_106_27101 | # those modules are all taken from ParlAI
import numpy as np
import numbers
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Dict, Tuple, Optional
from core.modules.treesearch import GreedySearch, BeamSearch, TopKSampling, \
NucleusSampling, DelayedBeamSearch
from torch.nn.parameter import Parameter
LAYER_NORM_EPS = 1e-5
def neginf(dtype: torch.dtype) -> float:
"""
Return a representable finite number near -inf for a dtype.
"""
return -1e20
def _normalize(tensor, norm_layer):
"""
Broadcast layer norm.
"""
is_cpu = tensor.device == 'cpu' or tensor.device.type == 'cpu'
return norm_layer(tensor)
def gelu(tensor):
"""
Compute gelu function.
c.f. https://arxiv.org/abs/1606.08415
"""
return 0.5 * tensor * (1.0 + torch.erf(tensor / math.sqrt(2.0)))
def create_position_codes(n_pos, dim, out):
"""
Create positional codes and store them in ``out``.
"""
position_enc = np.array(
[
[pos / np.power(10000, 2 * j / dim) for j in range(dim // 2)]
for pos in range(n_pos)
]
)
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc)).type_as(out)
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc)).type_as(out)
out.detach_()
out.requires_grad = False
class TransformerEncodeDecoderVaswani(nn.Module):
def __init__(self, opt, dictionary, embedding_size, embedding_weights=None,
pad_idx=None, start_idx=None, end_idx=None, device='cpu'):
super(TransformerEncodeDecoderVaswani, self).__init__()
self.pad_idx = pad_idx
self.end_idx = end_idx
self.start_idx = start_idx
self.device = device
if embedding_weights is None and embedding_size is None:
assert IOError, 'Provide pretrained embeddings or an embedding size'
if embedding_weights is not None:
assert not self.pad_idx, "pad_idx is None"
print("Embeddings init with pretrained!")
self.embedding = nn.Embedding(len(dictionary), embedding_size,
padding_idx=self.pad_idx)
self.embedding.weight = nn.Parameter(torch.from_numpy(
embedding_weights),
requires_grad=opt.learn_embeddings)
else:
self.embedding = nn.Embedding(len(dictionary), embedding_size,
padding_idx=self.pad_idx)
print("Embeddings init with normal distr!")
nn.init.normal_(self.embedding.weight, 0, embedding_size ** -0.5)
self.embedding.weight.requires_grad = opt.learn_embeddings
# TODO: fix embeddings if its None!!
self.encoder = TransformerEncoder(
n_heads=opt.n_heads,
n_layers=opt.n_layers,
embedding_size=embedding_size,
ffn_size=opt.ffn_size,
vocabulary_size=len(dictionary),
embedding=self.embedding,
dropout=opt.dropout,
attention_dropout=opt.attention_dropout,
relu_dropout=opt.relu_dropout,
padding_idx=self.pad_idx,
learn_positional_embeddings=opt.learn_positional_embeddings,
embeddings_scale=opt.embeddings_scale,
reduction_type=None,
n_positions=opt.n_positions,
n_segments=opt.n_segments,
activation=opt.activation,
variant=opt.variant,
output_scaling=opt.output_scaling)
self.decoder = TransformerDecoder(
n_heads=opt.n_heads,
n_layers=opt.n_layers,
embedding_size=embedding_size,
ffn_size=opt.ffn_size,
vocabulary_size=len(dictionary),
embedding=self.embedding,
dropout=opt.dropout,
attention_dropout=opt.attention_dropout,
relu_dropout=opt.relu_dropout,
padding_idx=self.pad_idx,
learn_positional_embeddings=opt.learn_positional_embeddings,
embeddings_scale=opt.embeddings_scale,
n_positions=opt.n_positions,
activation=opt.activation,
variant=opt.variant,
n_segments=opt.n_segments)
def reorder_encoder_states(self, encoder_states, indices):
"""
Reorder the encoder states.
See ``TorchGeneratorModel.reorder_encoder_states`` for a description.
"""
enc, mask = encoder_states
if not torch.is_tensor(indices):
indices = torch.LongTensor(indices).to(enc.device)
enc = torch.index_select(enc, 0, indices)
mask = torch.index_select(mask, 0, indices)
return enc, mask
def reorder_decoder_incremental_state(
self, incremental_state: Dict[int, dict], inds: torch.Tensor
) -> Dict[int, dict]:
"""
Reorder the decoder incremental state.
See ``TorchGeneratorModel.reorder_decoder_incremental_state`` for a description.
Here, incremental_state is a dict whose keys are layer indices and whose values
are dicts containing the incremental state for that layer.
"""
return {
idx: layer.reorder_incremental_state(incremental_state[idx], inds)
for idx, layer in enumerate(self.decoder.layers)
}
def output(self, tensor):
"""
Compute output logits.
"""
# project back to vocabulary
output = F.linear(tensor, self.embedding.weight)
# compatibility with fairseq: fairseq sometimes reuses BOS tokens and
# we need to force their probability of generation to be 0.
#output[:, :, self.start_idx] = neginf(output.dtype)
return output
def decode_forced(self, encoder_states, targets):
"""
Decode with a fixed, true sequence, computing loss.
Useful for training, or ranking fixed candidates.
:param targets:
the prediction targets. Contains both the start and end tokens.
:type targets:
LongTensor[bsz, time]
:param encoder_states:
Output of the encoder. Model specific types.
:type encoder_states:
model specific
:return:
pair (logits, choices) containing the logits and MLE predictions
:rtype:
(FloatTensor[bsz, targets, vocab], LongTensor[bsz, targets])
"""
bsz = targets.size(0)
seqlen = targets.size(1)
inputs = targets.narrow(1, 0, seqlen - 1)
start_idxs = torch.LongTensor([self.start_idx]).expand(bsz, 1)
start_idxs = start_idxs.to(self.device)
inputs = torch.cat([start_idxs, inputs], dim=1)
latent, _ = self.decoder(inputs, encoder_states)
logits = self.output(latent)
_, preds = logits.max(dim=2)
return logits, preds
def forward(self, *xs, ys=None, prev_enc=None, maxlen=None, bsz=None):
"""
Get output predictions from the model.
:param xs:
input to the encoder
:type xs:
LongTensor[bsz, seqlen]
:param ys:
Expected output from the decoder. Used
for teacher forcing to calculate loss.
:type ys:
LongTensor[bsz, outlen]
:param prev_enc:
if you know you'll pass in the same xs multiple times, you can pass
in the encoder output from the last forward pass to skip
recalcuating the same encoder output.
:param maxlen:
max number of tokens to decode. if not set, will use the length of
the longest label this model has seen. ignored when ys is not None.
:param bsz:
if ys is not provided, then you must specify the bsz for greedy
decoding.
:return:
(scores, candidate_scores, encoder_states) tuple
- scores contains the model's predicted token scores.
(FloatTensor[bsz, seqlen, num_features])
- candidate_scores are the score the model assigned to each candidate.
(FloatTensor[bsz, num_cands])
- encoder_states are the output of model.encoder. Model specific types.
Feed this back in to skip encoding on the next call.
"""
assert ys is not None, "Greedy decoding in TGModel.forward no longer supported."
# TODO: get rid of longest_label
# keep track of longest label we've ever seen
# we'll never produce longer ones than that during prediction
#self.longest_label = max(self.longest_label, ys.size(1))
# TODO: longest_label how to get rid of it?
self.longest_label = ys.size(1)
# use cached encoding if available
encoder_states = prev_enc if prev_enc is not None else self.encoder(*xs)
# use teacher forcing
scores, preds = self.decode_forced(encoder_states, ys)
return scores, preds, encoder_states
def generate(self,*inputs, beam, max_ts,options):
"""
Generate an output with beam search.
Depending on the options, this may perform greedy/topk/nucleus generation.
:param Batch batch:
Batch structure with input and labels
:param int beam_size:
Size of each beam during the search
:param int max_ts:
the maximum length of the decoded sequence
:return:
tuple (beam_pred_scores, beams)
- beam_preds_scores: list of (prediction, score) pairs for each sample in
Batch
- beams :list of Beam instances defined in Beam class, can be used for any
following postprocessing, e.g. dot logging.
"""
bsz = inputs[0].shape[0]
encoder_states = self.encoder(*inputs)
beams = [self.treesearch_factory() for i in range(bsz)]
def _treesearch_factory(self, opt):
method = opt.method
beam_size = opt.beam_size
if method == 'greedy':
return GreedySearch(
beam_size,
min_length=0,
block_ngram=opt.beam_block_ngram,
context_block_ngram=opt.beam_context_block_ngram,
length_penalty=opt.beam_length_penalty,
padding_token=self.NULL_IDX,
bos_token=self.START_IDX,
eos_token=self.END_IDX,
device=self.device,
)
elif method == 'beam':
return BeamSearch(
beam_size,
min_length=opt.beam_min_length,
block_ngram=opt.beam_block_ngram,
context_block_ngram=opt.beam_context_block_ngram,
length_penalty=opt.beam_length_penalty,
padding_token=self.NULL_IDX,
bos_token=self.START_IDX,
eos_token=self.END_IDX,
device=self.device,
)
elif method == 'delayedbeam':
return DelayedBeamSearch(
opt.topk,
opt.beam_delay,
beam_size,
min_length=opt.beam_min_length,
block_ngram=opt.beam_block_ngram,
context_block_ngram=opt.beam_context_block_ngram,
length_penalty=opt.beam_length_penalty,
padding_token=self.NULL_IDX,
bos_token=self.START_IDX,
eos_token=self.END_IDX,
device=self.device,
)
elif method == 'topk':
return TopKSampling(
opt.topk,
beam_size,
min_length=opt.beam_min_length,
block_ngram=opt.beam_block_ngram,
context_block_ngram=opt.beam_context_block_ngram,
length_penalty=opt.beam_length_penalty,
padding_token=self.NULL_IDX,
bos_token=self.START_IDX,
eos_token=self.END_IDX,
device=self.device,
)
elif method == 'nucleus':
return NucleusSampling(
opt.topp,
beam_size,
min_length=opt.beam_min_length,
block_ngram=opt.beam_block_ngram,
context_block_ngram=opt.beam_context_block_ngram,
length_penalty=opt.beam_length_penalty,
padding_token=self.NULL_IDX,
bos_token=self.START_IDX,
eos_token=self.END_IDX,
device=self.device,
)
else:
raise ValueError(f"Can't use inference method {method}")
class TransformerEncoder(nn.Module):
"""
Transformer encoder module.
:param int n_heads: the number of multihead attention heads.
:param int n_layers: number of transformer layers.
:param int embedding_size: the embedding sizes. Must be a multiple of n_heads.
:param int ffn_size: the size of the hidden layer in the FFN
:param embedding: an embedding matrix for the bottom layer of the transformer.
If none, one is created for this encoder.
:param float dropout: Dropout used around embeddings and before layer
layer normalizations. This is used in Vaswani 2017 and works well on
large datasets.
:param float attention_dropout: Dropout performed after the multhead attention
softmax. This is not used in Vaswani 2017.
:param float relu_attention: Dropout used after the ReLU in the FFN. Not used
in Vaswani 2017, but used in Tensor2Tensor.
:param int padding_idx: Reserved padding index in the embeddings matrix.
:param bool learn_positional_embeddings: If off, sinusoidal embeddings are
used. If on, position embeddings are learned from scratch.
:param bool embeddings_scale: Scale embeddings relative to their dimensionality.
Found useful in fairseq.
:param bool reduction: If true, returns the mean vector for the entire encoding
sequence.
:param int n_positions:
Size of the position embeddings matrix.
:param int n_segments:
Number of segments/lang/sentence embeddings.
:param activation:
Type of nonlinear activation. Can be relu or gelu.
:param variant:
Which transformer architecture to use. Could be AIAYN or XLM.
Future versions may support things like GPT-2, ...
:param output_scaling:
Scale the outputs by a given scalar
"""
def __init__(
self,
n_heads,
n_layers,
embedding_size,
ffn_size,
vocabulary_size,
embedding=None,
dropout=0.0,
attention_dropout=0.0,
relu_dropout=0.0,
padding_idx=0,
learn_positional_embeddings=False,
embeddings_scale=False,
reduction_type='mean',
n_positions=1024,
activation='relu',
variant='aiayn',
n_segments=0,
output_scaling=1.0,
):
super(TransformerEncoder, self).__init__()
self.embedding_size = embedding_size
self.ffn_size = ffn_size
self.n_layers = n_layers
self.n_heads = n_heads
self.dim = embedding_size
self.embeddings_scale = embeddings_scale
self.reduction_type = reduction_type
self.padding_idx = padding_idx
# this is --dropout, not --relu-dropout or --attention-dropout
self.dropout_frac = dropout
self.dropout = nn.Dropout(p=self.dropout_frac)
self.variant = variant
self.n_segments = n_segments
self.n_positions = n_positions #used for positional embeddings!
self.out_dim = embedding_size
assert (
embedding_size % n_heads == 0
), 'Transformer embedding size must be a multiple of n_heads'
# check input formats:
if embedding is not None:
assert (
embedding_size is None or embedding_size == embedding.weight.shape[1]
), "Embedding dim must match the embedding size."
if embedding is not None:
self.embeddings = embedding
else:
raise AssertionError(
"This code should not execute. Left here in case we want to enable it."
)
assert padding_idx is not None
self.embeddings = nn.Embedding(
vocabulary_size, embedding_size, padding_idx=padding_idx
)
nn.init.normal_(self.embeddings.weight, 0, embedding_size ** -0.5)
# create the positional embeddings
self.position_embeddings = nn.Embedding(n_positions, embedding_size)
if not learn_positional_embeddings:
create_position_codes(
n_positions, embedding_size, out=self.position_embeddings.weight
)
else:
nn.init.normal_(self.position_embeddings.weight, 0, embedding_size ** -0.5)
# embedding normalization
if self.variant == 'xlm' or self.variant == 'prelayernorm':
self.norm_embeddings = nn.LayerNorm(self.dim, eps=LAYER_NORM_EPS)
elif self.variant == 'aiayn':
pass
else:
raise ValueError("Can't handle --variant {}".format(self.variant))
if self.n_segments >= 1:
self.segment_embeddings = nn.Embedding(self.n_segments, self.dim)
# build the model
self.layers = nn.ModuleList()
for _ in range(self.n_layers):
self.layers.append(
TransformerEncoderLayer(
n_heads,
embedding_size,
ffn_size,
attention_dropout=attention_dropout,
relu_dropout=relu_dropout,
dropout=dropout,
variant=variant,
activation=activation,
)
)
self.output_scaling = output_scaling
def forward(self, input, positions=None, segments=None):
"""
Forward pass.
:param LongTensor[batch,seqlen] input:
The input IDs
:param BoolTensor[batch,seqlen] mask:
The attention mask; 1 means attend, 0 means ignore.
:param LongTensor[batch,seqlen]:
If provided, additionally adds ``segments`` as extra embedding features.
"""
mask = input != self.padding_idx
if positions is None:
positions = (mask.cumsum(dim=1, dtype=torch.int64) - 1).clamp_(min=0)
tensor = self.embeddings(input)
if self.embeddings_scale:
tensor = tensor * np.sqrt(self.dim)
if positions.max().item() > self.n_positions:
warnings.warn(
'You are inputting a sequence of {x} length, but only have '
'--n-positions {y}. Set --truncate or increase --n-positions'.format(
x=positions.max().item(), y=self.n_positions
)
)
position_embs = self.position_embeddings(positions).expand_as(tensor)
tensor = tensor + position_embs
if self.n_segments >= 1:
if segments is None:
segments = torch.zeros_like(input)
tensor = tensor + self.segment_embeddings(segments)
if self.variant == 'xlm':
tensor = _normalize(tensor, self.norm_embeddings)
# --dropout on the embeddings
tensor = self.dropout(tensor)
tensor *= mask.unsqueeze(-1).type_as(tensor)
if getattr(self.layers, 'is_model_parallel', False):
# factored out for readability. It is equivalent to the other
# condition
tensor = self._apply_model_parallel(tensor, mask)
else:
for i in range(self.n_layers):
tensor = self.layers[i](tensor, mask)
if self.variant == 'prelayernorm':
tensor = _normalize(tensor, self.norm_embeddings)
tensor *= self.output_scaling
if self.reduction_type == 'first':
return tensor[:, 0, :]
elif self.reduction_type == 'max':
return tensor.max(dim=1)[0]
elif self.reduction_type == 'mean':
divisor = mask.float().sum(dim=1).unsqueeze(-1).clamp(min=1).type_as(tensor)
output = tensor.sum(dim=1) / divisor
return output
elif self.reduction_type is None or 'none' in self.reduction_type:
return tensor, mask
else:
raise ValueError(
"Can't handle --reduction-type {}".format(self.reduction_type)
)
# def _apply_model_parallel(self, tensor, mask):
# """
# Pipeline application of model parallelism.
# """
# chunks = PipelineHelper.split((tensor, mask))
# work_items = PipelineHelper.schedule_work_items(self.layers, chunks)
#
# for chunk_idx, layer_nos, next_device in work_items:
# s_tensor, s_mask = chunks[chunk_idx]
# for layer_no in layer_nos:
# s_tensor = self.layers[layer_no](s_tensor, s_mask)
# chunks[chunk_idx] = PipelineHelper.chunk_to((s_tensor, s_mask), next_device)
#
# tensor_out, mask_out = PipelineHelper.join(chunks)
# return tensor_out
class TransformerEncoderLayer(nn.Module):
"""
Implements a single Transformer encoder layer.
"""
def __init__(
self,
n_heads,
embedding_size,
ffn_size,
attention_dropout=0.0,
relu_dropout=0.0,
dropout=0.0,
activation='relu',
variant=None,
):
super().__init__()
self.dim = embedding_size
self.ffn_dim = ffn_size
self.activation = activation
self.variant = variant
self.attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout # --attention-dropout
)
self.norm1 = nn.LayerNorm(embedding_size, eps=LAYER_NORM_EPS)
self.ffn = TransformerFFN(
embedding_size,
ffn_size,
relu_dropout=relu_dropout,
activation=self.activation,
)
self.norm2 = nn.LayerNorm(embedding_size, eps=LAYER_NORM_EPS)
self.dropout = nn.Dropout(p=dropout)
def forward(self, tensor, mask):
"""
Forward pass.
"""
residual = tensor
if self.variant == 'prelayernorm':
tensor = _normalize(tensor, self.norm1)
attended_tensor, _ = self.attention(tensor, mask=mask)
tensor = residual + self.dropout(attended_tensor)
if self.variant == 'aiayn' or self.variant == 'xlm':
tensor = _normalize(tensor, self.norm1)
residual = tensor
if self.variant == 'prelayernorm':
tensor = _normalize(tensor, self.norm2)
tensor = residual + self.dropout(self.ffn(tensor))
if self.variant == 'aiayn' or self.variant == 'xlm':
tensor = _normalize(tensor, self.norm2)
tensor *= mask.unsqueeze(-1).type_as(tensor)
return tensor
class TransformerDecoder(nn.Module):
"""
Transformer Decoder layer.
:param int n_heads: the number of multihead attention heads.
:param int n_layers: number of transformer layers.
:param int embedding_size: the embedding sizes. Must be a multiple of n_heads.
:param int ffn_size: the size of the hidden layer in the FFN
:param embedding: an embedding matrix for the bottom layer of the transformer.
If none, one is created for this encoder.
:param float dropout: Dropout used around embeddings and before layer
layer normalizations. This is used in Vaswani 2017 and works well on
large datasets.
:param float attention_dropout: Dropout performed after the multhead attention
softmax. This is not used in Vaswani 2017.
:param float relu_attention: Dropout used after the ReLU in the FFN. Not used
in Vaswani 2017, but used in Tensor2Tensor.
:param int padding_idx: Reserved padding index in the embeddings matrix.
:param bool learn_positional_embeddings: If off, sinusoidal embeddings are
used. If on, position embeddings are learned from scratch.
:param bool embeddings_scale: Scale embeddings relative to their dimensionality.
Found useful in fairseq.
:param int n_positions: Size of the position embeddings matrix.
"""
def __init__(
self,
n_heads,
n_layers,
embedding_size,
ffn_size,
vocabulary_size,
embedding=None,
dropout=0.0,
attention_dropout=0.0,
relu_dropout=0.0,
embeddings_scale=True,
learn_positional_embeddings=False,
padding_idx=None,
n_positions=1024,
n_segments=0,
variant='aiayn',
activation='relu',
):
super().__init__()
self.embedding_size = embedding_size
self.ffn_size = ffn_size
self.n_layers = n_layers
self.n_heads = n_heads
self.dim = embedding_size
self.activation = activation
self.variant = variant
self.embeddings_scale = embeddings_scale
self.dropout = nn.Dropout(p=dropout) # --dropout
self.n_positions = n_positions
self.out_dim = embedding_size
assert (
embedding_size % n_heads == 0
), 'Transformer embedding size must be a multiple of n_heads'
self.embeddings = embedding
if self.variant == 'xlm' or self.variant == 'prelayernorm':
self.norm_embeddings = nn.LayerNorm(self.dim, eps=LAYER_NORM_EPS)
elif self.variant == 'aiayn':
pass
else:
raise ValueError("Can't handle --variant {}".format(self.variant))
# create the positional embeddings
self.position_embeddings = nn.Embedding(n_positions, embedding_size)
if not learn_positional_embeddings:
create_position_codes(
n_positions, embedding_size, out=self.position_embeddings.weight
)
else:
nn.init.normal_(self.position_embeddings.weight, 0, embedding_size ** -0.5)
# build the model
self.layers = nn.ModuleList()
for _ in range(self.n_layers):
self.layers.append(
TransformerDecoderLayer(
n_heads,
embedding_size,
ffn_size,
attention_dropout=attention_dropout,
relu_dropout=relu_dropout,
dropout=dropout,
activation=activation,
variant=variant,
)
)
def forward(self, input, encoder_state, incr_state=None):
"""
Forward pass.
:param LongTensor[batch,seqlen] input:
The decoder inputs (partial or full decoded token IDs).
:param encoder_state:
Output from the encoder module forward pass.
:param incr_state:
The incremental state: a dictionary whose keys index the layers and whose
values contain the incremental state for each layer.
"""
encoder_output, encoder_mask = encoder_state
seq_len = input.size(1)
positions = input.new(seq_len).long()
positions = torch.arange(seq_len, out=positions).unsqueeze(0)
if incr_state is not None:
# We're doing incremental decoding, so select only the most recent position
input = input[:, -1:]
if positions is not None:
positions = positions[:, -1:]
else:
incr_state = {}
tensor = self.embeddings(input)
if self.embeddings_scale:
tensor = tensor * np.sqrt(self.dim)
if self.variant == 'xlm':
tensor = _normalize(tensor, self.norm_embeddings)
if positions.max().item() > self.n_positions:
warnings.warn(
'You are inputting a sequence of {x} length, but only have '
'--n-positions {y}. Set --truncate or increase --n-positions'.format(
x=positions.max().item(), y=self.n_positions
)
)
tensor = tensor + self.position_embeddings(positions).expand_as(tensor)
tensor = self.dropout(tensor) # --dropout
new_incr_state = {}
if getattr(self.layers, 'is_model_parallel', False):
tensor, new_incr_state = self._apply_model_parallel(
tensor, encoder_output, encoder_mask, incr_state
)
else:
for idx, layer in enumerate(self.layers):
tensor, new_incr_state[idx] = layer(
x=tensor,
encoder_output=encoder_output,
encoder_mask=encoder_mask,
incr_state=incr_state.get(idx),
)
if self.variant == 'prelayernorm':
tensor = _normalize(tensor, self.norm_embeddings)
return tensor, new_incr_state
# def _apply_model_parallel(self, tensor, encoder_output, encoder_mask, incr_state):
# """
# Pipeline application of model parallelism.
# """
# chunks = PipelineHelper.split(
# (tensor, encoder_output, encoder_mask, incr_state)
# )
# work_items = PipelineHelper.schedule_work_items(self.layers, chunks)
#
# new_incr_state = [{} for _ in chunks]
#
# for chunk_idx, layer_nos, next_device in work_items:
# s_tensor, s_enc_out, s_enc_mask, s_incr_state = chunks[chunk_idx]
# for layer_no in layer_nos:
# s_tensor, new_incr_state[chunk_idx][layer_no] = self.layers[layer_no](
# x=s_tensor,
# encoder_output=s_enc_out,
# encoder_mask=s_enc_mask,
# incr_state=s_incr_state.get(layer_no),
# )
# chunks[chunk_idx] = PipelineHelper.chunk_to(
# (s_tensor, s_enc_out, s_enc_mask, s_incr_state), next_device
# )
#
# tensor_out = PipelineHelper.join([c[0] for c in chunks])
# new_incr_state = PipelineHelper.join(new_incr_state)
#
# return tensor_out, new_incr_state
class TransformerDecoderLayer(nn.Module):
"""
Implements a single Transformer decoder layer.
Decoder layers are similar to encoder layers but:
1. Self-attention is limited in a casaul (auto-regressive) manner.
2. Attend over all of the encoder states.
"""
def __init__(
self,
n_heads,
embedding_size,
ffn_size,
attention_dropout=0.0,
relu_dropout=0.0,
dropout=0.0,
activation='relu',
variant='aiayn',
):
super().__init__()
self.dim = embedding_size
self.ffn_dim = ffn_size
self.variant = variant
self.activation = activation
self.dropout = nn.Dropout(p=dropout)
self.self_attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm1 = nn.LayerNorm(embedding_size, eps=LAYER_NORM_EPS)
self.encoder_attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm2 = nn.LayerNorm(embedding_size, eps=LAYER_NORM_EPS)
self.ffn = TransformerFFN(
embedding_size, ffn_size, relu_dropout=relu_dropout, activation=activation
)
self.norm3 = nn.LayerNorm(embedding_size, eps=LAYER_NORM_EPS)
def forward(self, x, encoder_output, encoder_mask, incr_state=None):
"""
Forward pass.
The incremental state is a dict with values for self- and encoder-attention
states.
"""
if incr_state is None:
incr_state = {}
decoder_mask = self._create_selfattn_mask(x)
# first self attn
residual = x
if self.variant == 'prelayernorm':
x = _normalize(x, self.norm1)
# don't peak into the future!
x, final_self_attn_incr_state = self.self_attention(
query=x,
mask=decoder_mask,
incr_state=incr_state.get('self_attn'),
static_kv=False,
)
x = self.dropout(x) # --dropout
x = x + residual
if self.variant == 'aiayn' or self.variant == 'xlm':
x = _normalize(x, self.norm1)
residual = x
# encoder_attn_layer_norm norm 2
if self.variant == 'prelayernorm':
x = _normalize(x, self.norm2)
x, final_encoder_attn_incr_state = self.encoder_attention(
query=x,
key=encoder_output,
value=encoder_output,
mask=encoder_mask,
incr_state=incr_state.get('encoder_attn'),
static_kv=True,
)
x = self.dropout(x) # --dropout
x = residual + x
if self.variant == 'aiayn' or self.variant == 'xlm':
x = _normalize(x, self.norm2)
# finally the ffn
residual = x
if self.variant == 'prelayernorm':
x = _normalize(x, self.norm3)
x = self.ffn(x)
x = self.dropout(x) # --dropout
x = residual + x
if self.variant == 'aiayn' or self.variant == 'xlm':
x = _normalize(x, self.norm3)
new_incr_state = {
'self_attn': final_self_attn_incr_state,
'encoder_attn': final_encoder_attn_incr_state,
}
return x, new_incr_state
def _create_selfattn_mask(self, x):
# figure out how many timestamps we need
bsz = x.size(0)
time = x.size(1)
# make sure that we don't look into the future
mask = torch.tril(x.new(time, time).fill_(1))
# broadcast across batch
mask = mask.unsqueeze(0).expand(bsz, -1, -1)
return mask
def reorder_incremental_state(
self, incremental_state: Dict[str, dict], inds: torch.Tensor
) -> Dict[str, dict]:
"""
Reorder all incremental-state tensors for this layer.
"""
attn_types = {
'self_attn': self.self_attention,
'encoder_attn': self.encoder_attention,
}
return {
attn_type: attn.reorder_incremental_state(
incremental_state[attn_type], inds
)
for attn_type, attn in attn_types.items()
}
class TransformerFFN(nn.Module):
"""
Implements the FFN part of the transformer.
"""
def __init__(self, dim, dim_hidden, relu_dropout=0, activation='relu'):
super(TransformerFFN, self).__init__()
self.relu_dropout = nn.Dropout(p=relu_dropout)
if activation == 'relu':
self.nonlinear = F.relu
elif activation == 'gelu':
self.nonlinear = gelu
else:
raise ValueError(
"Don't know how to handle --activation {}".format(activation)
)
self.lin1 = nn.Linear(dim, dim_hidden)
self.lin2 = nn.Linear(dim_hidden, dim)
nn.init.xavier_uniform_(self.lin1.weight)
nn.init.xavier_uniform_(self.lin2.weight)
# TODO: initialize biases to 0
def forward(self, x):
"""
Forward pass.
"""
x = self.nonlinear(self.lin1(x))
x = self.relu_dropout(x) # --relu-dropout
x = self.lin2(x)
return x
class BasicAttention(nn.Module):
"""
Implements simple/classical attention.
"""
def __init__(self, dim=1, attn='cosine', residual=False, get_weights=True):
super().__init__()
if attn == 'cosine':
self.cosine = nn.CosineSimilarity(dim=dim)
self.attn = attn
self.dim = dim
self.get_weights = get_weights
self.residual = residual
def forward(self, xs, ys, mask_ys=None, values=None):
"""
Compute attention.
Attend over ys with query xs to obtain weights, then apply weights to
values (ys if yalues is None)
Args:
xs: B x query_len x dim (queries)
ys: B x key_len x dim (keys)
mask_ys: B x key_len (mask)
values: B x value_len x dim (values); if None, default to ys
"""
bsz = xs.size(0)
y_len = ys.size(1)
x_len = xs.size(1)
if self.attn == 'cosine':
l1 = self.cosine(xs, ys).unsqueeze(self.dim - 1)
else:
l1 = torch.bmm(xs, ys.transpose(1, 2))
if self.attn == 'sqrt':
d_k = ys.size(-1)
l1 = l1 / math.sqrt(d_k)
if mask_ys is not None:
attn_mask = (mask_ys == 0).view(bsz, 1, y_len)
attn_mask = attn_mask.repeat(1, x_len, 1)
l1.masked_fill(attn_mask, neginf(l1.dtype))
l2 = F.softmax(l1, dim=self.dim, dtype=torch.float).type_as(l1)
if values is None:
values = ys
lhs_emb = torch.bmm(l2, values)
# # add back the query
if self.residual:
lhs_emb = lhs_emb.add(xs)
if self.get_weights:
return lhs_emb.squeeze(self.dim - 1), l2
else:
return lhs_emb.squeeze(self.dim - 1)
class MultiHeadAttention(nn.Module):
"""
Implements MultiHeadAttention; this is the core workhorse of the Transformer.
See Vaswani (2017) for an extensive description.
"""
def __init__(self, n_heads, dim, dropout=0):
super(MultiHeadAttention, self).__init__()
self.n_heads = n_heads
self.dim = dim
self.attn_dropout = nn.Dropout(p=dropout) # --attention-dropout
self.q_lin = nn.Linear(dim, dim)
self.k_lin = nn.Linear(dim, dim)
self.v_lin = nn.Linear(dim, dim)
# TODO: merge for the initialization step
nn.init.xavier_normal_(self.q_lin.weight)
nn.init.xavier_normal_(self.k_lin.weight)
nn.init.xavier_normal_(self.v_lin.weight)
# and set biases to 0
self.out_lin = nn.Linear(dim, dim)
nn.init.xavier_normal_(self.out_lin.weight)
def forward( # type: ignore
# TODO: remove type ignore with pytorch 1.5:
# https://github.com/pytorch/pytorch/pull/31057
self,
query: torch.Tensor,
key: Optional[torch.Tensor] = None,
value: Optional[torch.Tensor] = None,
mask: torch.Tensor = None,
incr_state: Optional[Dict[str, torch.Tensor]] = None,
static_kv: bool = False,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Forward pass.
:param query: attention query
:param key: attention key
:param value: attention value
:param mask: tensor in which True means that we are allowing attention and False
means we are blocking it. Mask is:
- [B, key_len] (encoder self-attn and decoder enc/dec attn)
- [B, query_len, key_len] (decoder self-attn)
- [B, 1, 1] (decoder self-attn with incr_state caching)
:param incr_state: dictionary with values representing the previous states of
the key, value, and mask
:param static_kv: True if the key and value are held constant during decoding
(as in encoder/decoder attention)
:return: (final attended tensor, new incremental state)
"""
batch_size, query_len, dim = query.size()
assert (
dim == self.dim
), 'Dimensions do not match: {} query vs {} configured'.format(dim, self.dim)
assert mask is not None, 'Mask is None, please specify a mask'
n_heads = self.n_heads
dim_per_head = dim // n_heads
scale = math.sqrt(dim_per_head)
def prepare_head(tensor):
# input is [batch_size, seq_len, n_heads * dim_per_head]
# output is [batch_size * n_heads, seq_len, dim_per_head]
bsz, seq_len, _ = tensor.size()
tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head)
tensor = (
tensor.transpose(1, 2)
.contiguous()
.view(batch_size * n_heads, seq_len, dim_per_head)
)
return tensor
# q, k, v are the transformed values
if key is None and value is None:
# self attention
key = value = query
_, _key_len, dim = query.size()
elif value is None:
# key and value are the same, but query differs
# self attention
value = key
assert key is not None # let mypy know we sorted this
_, _key_len, dim = key.size()
q = prepare_head(self.q_lin(query))
k = prepare_head(self.k_lin(key))
v = prepare_head(self.v_lin(value))
# Prepend incremental states. For each of the key, value, and mask, see if
# a previous incremental state exists, and if so, reshape it to match the shape
# of the new state. Concatenate the previous and new states to match what the
# full state would have been if we had not cached. (If we are using static_kv,
# these three states are unchanging, so just re-use the cached states.)
if incr_state is None:
incr_state = {}
if 'prev_key' in incr_state:
prev_key = incr_state['prev_key'].view(
batch_size * n_heads, -1, dim_per_head
)
if static_kv:
k = prev_key
else:
k = torch.cat([prev_key, k], dim=1)
if 'prev_value' in incr_state:
prev_value = incr_state['prev_value'].view(
batch_size * n_heads, -1, dim_per_head
)
if static_kv:
v = prev_value
else:
v = torch.cat([prev_value, v], dim=1)
if 'prev_mask' in incr_state:
if static_kv:
mask = incr_state['prev_mask']
else:
mask = torch.cat([incr_state['prev_mask'], mask], dim=2)
# Prepend along the key_len dimension (analogous to
# incr_state['prev_key'])
# Save new incremental states. We reshape to allow for reordering along batch
# dimension.
new_incr_state = {
'prev_key': k.view(batch_size, n_heads, -1, dim_per_head),
'prev_value': v.view(batch_size, n_heads, -1, dim_per_head),
'prev_mask': mask,
}
full_key_len = k.size(1)
dot_prod = q.div_(scale).bmm(k.transpose(1, 2))
# [B * n_heads, query_len, key_len]
attn_mask = (
(mask == 0)
.view(batch_size, 1, -1, full_key_len)
.repeat(1, n_heads, 1, 1)
.expand(batch_size, n_heads, query_len, full_key_len)
.view(batch_size * n_heads, query_len, full_key_len)
)
assert attn_mask.shape == dot_prod.shape
dot_prod.masked_fill_(attn_mask, neginf(dot_prod.dtype))
attn_weights = F.softmax(dot_prod, dim=-1, dtype=torch.float).type_as(query)
attn_weights = self.attn_dropout(attn_weights) # --attention-dropout
attentioned = attn_weights.bmm(v)
attentioned = (
attentioned.type_as(query)
.view(batch_size, n_heads, query_len, dim_per_head)
.transpose(1, 2)
.contiguous()
.view(batch_size, query_len, dim)
)
out = self.out_lin(attentioned)
return out, new_incr_state
def reorder_incremental_state(
self, incremental_state: Dict[str, torch.Tensor], inds: torch.Tensor
) -> Dict[str, torch.Tensor]:
"""
Reorder the input incremental-state tensors.
"""
return {
key: torch.index_select(val, 0, inds.to(val.device)).contiguous()
for key, val in incremental_state.items()
}
|
the-stack_106_27102 | """Manages Git."""
from __future__ import unicode_literals
import os
import logging
from dvc.utils.compat import str, open
from dvc.utils import fix_env
from dvc.scm.base import (
Base,
SCMError,
FileNotInRepoError,
FileNotInTargetSubdirError,
)
from dvc.scm.git.tree import GitTree
logger = logging.getLogger(__name__)
DIFF_A_TREE = "a_tree"
DIFF_B_TREE = "b_tree"
DIFF_A_REF = "a_ref"
DIFF_B_REF = "b_ref"
DIFF_EQUAL = "equal"
class Git(Base):
"""Class for managing Git."""
GITIGNORE = ".gitignore"
GIT_DIR = ".git"
def __init__(self, root_dir=os.curdir, repo=None):
"""Git class constructor.
Requires `Repo` class from `git` module (from gitpython package).
"""
super(Git, self).__init__(root_dir, repo=repo)
import git
from git.exc import InvalidGitRepositoryError
try:
self.git = git.Repo(root_dir)
except InvalidGitRepositoryError:
msg = "{} is not a git repository"
raise SCMError(msg.format(root_dir))
# NOTE: fixing LD_LIBRARY_PATH for binary built by PyInstaller.
# http://pyinstaller.readthedocs.io/en/stable/runtime-information.html
env = fix_env(None)
libpath = env.get("LD_LIBRARY_PATH", None)
self.git.git.update_environment(LD_LIBRARY_PATH=libpath)
self.ignored_paths = []
self.files_to_track = []
@staticmethod
def is_repo(root_dir):
return os.path.isdir(Git._get_git_dir(root_dir))
@staticmethod
def is_submodule(root_dir):
return os.path.isfile(Git._get_git_dir(root_dir))
@staticmethod
def _get_git_dir(root_dir):
return os.path.join(root_dir, Git.GIT_DIR)
@property
def dir(self):
return self.git.git_dir
@property
def ignore_file(self):
return self.GITIGNORE
def _get_gitignore(self, path, ignore_file_dir=None):
if not ignore_file_dir:
ignore_file_dir = os.path.dirname(os.path.realpath(path))
assert os.path.isabs(path)
assert os.path.isabs(ignore_file_dir)
if not path.startswith(ignore_file_dir):
msg = (
"{} file has to be located in one of '{}' subdirectories"
", not outside '{}'"
)
raise FileNotInTargetSubdirError(
msg.format(self.GITIGNORE, path, ignore_file_dir)
)
entry = os.path.relpath(path, ignore_file_dir).replace(os.sep, "/")
# NOTE: using '/' prefix to make path unambiguous
if len(entry) > 0 and entry[0] != "/":
entry = "/" + entry
gitignore = os.path.join(ignore_file_dir, self.GITIGNORE)
if not gitignore.startswith(os.path.realpath(self.root_dir)):
raise FileNotInRepoError(path)
return entry, gitignore
def ignore(self, path, in_curr_dir=False):
base_dir = (
os.path.realpath(os.curdir)
if in_curr_dir
else os.path.dirname(path)
)
entry, gitignore = self._get_gitignore(path, base_dir)
ignore_list = []
if os.path.exists(gitignore):
with open(gitignore, "r") as f:
ignore_list = f.readlines()
if any(filter(lambda x: x.strip() == entry.strip(), ignore_list)):
return
msg = "Adding '{}' to '{}'.".format(
os.path.relpath(path), os.path.relpath(gitignore)
)
logger.info(msg)
self._add_entry_to_gitignore(entry, gitignore, ignore_list)
self.track_file(os.path.relpath(gitignore))
self.ignored_paths.append(path)
@staticmethod
def _add_entry_to_gitignore(entry, gitignore, ignore_list):
content = entry
if ignore_list:
content = "\n" + content
with open(gitignore, "a", encoding="utf-8") as fobj:
fobj.write(content)
def ignore_remove(self, path):
entry, gitignore = self._get_gitignore(path)
if not os.path.exists(gitignore):
return
with open(gitignore, "r") as fobj:
lines = fobj.readlines()
filtered = list(filter(lambda x: x.strip() != entry.strip(), lines))
with open(gitignore, "w") as fobj:
fobj.writelines(filtered)
self.track_file(os.path.relpath(gitignore))
def add(self, paths):
# NOTE: GitPython is not currently able to handle index version >= 3.
# See https://github.com/iterative/dvc/issues/610 for more details.
try:
self.git.index.add(paths)
except AssertionError:
msg = (
"failed to add '{}' to git. You can add those files"
" manually using 'git add'."
" See 'https://github.com/iterative/dvc/issues/610'"
" for more details.".format(str(paths))
)
logger.exception(msg)
def commit(self, msg):
self.git.index.commit(msg)
def checkout(self, branch, create_new=False):
if create_new:
self.git.git.checkout("HEAD", b=branch)
else:
self.git.git.checkout(branch)
def branch(self, branch):
self.git.git.branch(branch)
def tag(self, tag):
self.git.git.tag(tag)
def untracked_files(self):
files = self.git.untracked_files
return [os.path.join(self.git.working_dir, fname) for fname in files]
def is_tracked(self, path):
# it is equivalent to `bool(self.git.git.ls_files(path))` by
# functionality, but ls_files fails on unicode filenames
path = os.path.relpath(path, self.root_dir)
return path in [i[0] for i in self.git.index.entries]
def is_dirty(self):
return self.git.is_dirty()
def active_branch(self):
return self.git.active_branch.name
def list_branches(self):
return [h.name for h in self.git.heads]
def list_tags(self):
return [t.name for t in self.git.tags]
def _install_hook(self, name, cmd):
command = "dvc {}".format(cmd)
hook = os.path.join(self.root_dir, self.GIT_DIR, "hooks", name)
if os.path.isfile(hook):
with open(hook, "r+") as fobj:
if command not in fobj.read():
fobj.write("exec {command}\n".format(command=command))
else:
with open(hook, "w+") as fobj:
fobj.write(
"#!/bin/sh\n" "exec {command}\n".format(command=command)
)
os.chmod(hook, 0o777)
def install(self):
self._install_hook("post-checkout", "checkout")
self._install_hook("pre-commit", "status")
self._install_hook("pre-push", "push")
def cleanup_ignores(self):
for path in self.ignored_paths:
self.ignore_remove(path)
self.reset_ignores()
def reset_ignores(self):
self.ignored_paths = []
def remind_to_track(self):
if not self.files_to_track:
return
logger.info(
"\n"
"To track the changes with git run:\n"
"\n"
"\tgit add {files}".format(files=" ".join(self.files_to_track))
)
def track_file(self, path):
self.files_to_track.append(path)
def belongs_to_scm(self, path):
basename = os.path.basename(path)
path_parts = os.path.normpath(path).split(os.path.sep)
return basename == self.ignore_file or Git.GIT_DIR in path_parts
def get_tree(self, rev):
return GitTree(self.git, rev)
def _get_diff_trees(self, a_ref, b_ref):
"""Private method for getting the trees and commit hashes of 2 git
references.
Requires `gitdb` module (from gitpython package).
Args:
a_ref(str) - git reference
b_ref(str) - second git reference. If None, uses HEAD
Returns:
tuple - tuple with elements: (trees, commits)
"""
from gitdb.exc import BadObject, BadName
trees = {DIFF_A_TREE: None, DIFF_B_TREE: None}
commits = []
if b_ref is None:
b_ref = self.git.head.commit
try:
a_commit = self.git.git.rev_parse(a_ref, short=True)
b_commit = self.git.git.rev_parse(b_ref, short=True)
# See https://gitpython.readthedocs.io
# /en/2.1.11/reference.html#git.objects.base.Object.__str__
commits.append(a_commit)
commits.append(b_commit)
trees[DIFF_A_TREE] = self.get_tree(commits[0])
trees[DIFF_B_TREE] = self.get_tree(commits[1])
except (BadName, BadObject) as e:
raise SCMError("git problem", cause=e)
return trees, commits
def get_diff_trees(self, a_ref, b_ref=None):
"""Method for getting two repo trees between two git tag commits.
Returns the dvc hash names of changed file/directory
Args:
a_ref(str) - git reference
b_ref(str) - optional second git reference, default None
Returns:
dict - dictionary with keys: (a_tree, b_tree, a_ref, b_ref, equal)
"""
diff_dct = {DIFF_EQUAL: False}
trees, commits = self._get_diff_trees(a_ref, b_ref)
diff_dct[DIFF_A_REF] = commits[0]
diff_dct[DIFF_B_REF] = commits[1]
if commits[0] == commits[1]:
diff_dct[DIFF_EQUAL] = True
return diff_dct
diff_dct[DIFF_A_TREE] = trees[DIFF_A_TREE]
diff_dct[DIFF_B_TREE] = trees[DIFF_B_TREE]
return diff_dct
|
the-stack_106_27104 |
import tensorflow as tf
import os
import numpy as np
from model.postprocess import postprocess
from tensorflow.keras import callbacks
from utils.coco_eval import CocoEvalidation
from model.nms import yolov4_nms
from tqdm import tqdm
class CocoMapCallback(callbacks.Callback):
def __init__(self, pred_generator,model,args,mAP_writer):
self.args = args
self.pred_generator = pred_generator
self.model = model
self.mAP_writer = mAP_writer
self.max_coco_map = -1
self.max_coco_map_epoch = -1
self.best_weight_path = ''
groundtruth_boxes = []
groundtruth_classes = []
groundtruth_valids = []
print("loading dataset...")
with open(os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),os.path.join('dataset',args.class_names))) as f:
class_names = [name.strip() for name in f.readlines()]
pred_generator_tqdm = tqdm(self.pred_generator, total=len(self.pred_generator))
for batch_img, batch_boxes, batch_valids in pred_generator_tqdm:
groundtruth_boxes.append(batch_boxes[..., 0:4])
groundtruth_classes.append(batch_boxes[..., 4])
groundtruth_valids.append(batch_valids)
groundtruth_boxes = np.concatenate(groundtruth_boxes, axis=0)
groundtruth_classes = np.concatenate(groundtruth_classes, axis=0)
groundtruth_valids = np.concatenate(groundtruth_valids, axis=0)
self.coco = CocoEvalidation(groundtruth_boxes,groundtruth_classes,groundtruth_valids,class_names)
def on_train_begin(self, logs={}):
pass
def on_epoch_end(self, epoch, logs=None):
if epoch < self.args.start_eval_epoch or epoch % self.args.eval_epoch_interval != 0:
return
detection_boxes = []
detection_scores = []
detection_classes = []
detection_valids = []
pred_generator_tqdm = tqdm(self.pred_generator, total=len(self.pred_generator))
for batch_img, _, _ in pred_generator_tqdm:
model_outputs = self.model.predict(batch_img)
pre_nms_decoded_boxes, pre_nms_scores = postprocess(model_outputs, self.args)
pre_nms_decoded_boxes = pre_nms_decoded_boxes.numpy()
pre_nms_scores = pre_nms_scores.numpy()
boxes, scores, classes, valid_detections = yolov4_nms(self.args)(pre_nms_decoded_boxes, pre_nms_scores,
self.args)
detection_boxes.append(boxes)
detection_scores.append(scores)
detection_classes.append(classes)
detection_valids.append(valid_detections)
pred_generator_tqdm.set_description("Evaluation...")
detection_boxes = np.concatenate(detection_boxes, axis=0)
detection_scores = np.concatenate(detection_scores, axis=0)
detection_classes = np.concatenate(detection_classes, axis=0)
detection_valids = np.concatenate(detection_valids, axis=0)
summary_metrics = self.coco.get_coco_mAP(detection_boxes, detection_scores, detection_classes, detection_valids)
if summary_metrics['Precision/[email protected]'] > self.max_coco_map:
self.max_coco_map = summary_metrics['Precision/[email protected]']
self.max_coco_map_epoch = epoch
self.best_weight_path = os.path.join(self.args.checkpoints_dir, 'best_weight_{}_{}_{:.3f}'.format(self.args.model_type,self.max_coco_map_epoch, self.max_coco_map))
self.model.save_weights(self.best_weight_path)
print("max_coco_map:{},epoch:{}".format(self.max_coco_map, self.max_coco_map_epoch))
with self.mAP_writer.as_default():
tf.summary.scalar("[email protected]", summary_metrics['Precision/[email protected]'], step=epoch)
self.mAP_writer.flush() |
the-stack_106_27105 | import transaction
from datetime import timedelta
from pyramid.exceptions import ConfigurationConflictError
from ptah.testing import PtahTestCase
class TestTokenType(PtahTestCase):
_init_ptah = False
_auto_commit = False
def test_token(self):
from ptah import token
tt = token.TokenType('unique-id', timedelta(minutes=20))
self.init_ptah()
t = token.service.generate(tt, 'data')
transaction.commit()
self.assertEqual(token.service.get(t), 'data')
self.assertEqual(token.service.get_bydata(tt, 'data'), t)
token.service.remove(t)
self.assertEqual(token.service.get(t), None)
def test_token_type(self):
from ptah import token
token.TokenType('unique-id', timedelta(minutes=20))
token.TokenType('unique-id', timedelta(minutes=20))
self.assertRaises(ConfigurationConflictError, self.init_ptah)
def test_token_remove_expired(self):
pass
|
the-stack_106_27106 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pickle
from fvcore.common.checkpoint import Checkpointer
from fvcore.common.file_io import PathManager
import detectron2.utils.comm as comm
from .c2_model_loading import align_and_update_state_dicts
class DetectionCheckpointer(Checkpointer):
"""
Same as :class:`Checkpointer`, but is able to handle models in detectron & detectron2
model zoo, and apply conversions for legacy models.
"""
def __init__(self, model, save_dir="", is_base_model=False, *, save_to_disk=None, **checkpointables):
is_main_process = comm.is_main_process()
super().__init__(
model,
save_dir,
save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
**checkpointables,
)
self.is_base_model = is_base_model
def _load_file(self, filename):
if filename.endswith(".pkl"):
with PathManager.open(filename, "rb") as f:
data = pickle.load(f, encoding="latin1")
if "model" in data and "__author__" in data:
# file is in Detectron2 model zoo format
self.logger.info("Reading a file from '{}'".format(data["__author__"]))
return data
else:
# assume file is from Caffe2 / Detectron1 model zoo
if "blobs" in data:
# Detection models have "blobs", but ImageNet models don't
data = data["blobs"]
data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
loaded = super()._load_file(filename) # load native pth checkpoint
if "model" not in loaded:
loaded = {"model": loaded}
return loaded
def _load_model(self, checkpoint):
if checkpoint.get("matching_heuristics", False):
self._convert_ndarray_to_tensor(checkpoint["model"])
# convert weights by name-matching heuristics
model_state_dict = self.model.state_dict()
align_and_update_state_dicts(
model_state_dict,
checkpoint["model"],
c2_conversion=checkpoint.get("__author__", None) == "Caffe2", is_base_model=self.is_base_model
)
checkpoint["model"] = model_state_dict
# for non-caffe2 models, use standard ways to load it
super()._load_model(checkpoint)
|
the-stack_106_27107 | #!/usr/bin/python3
import json
from argparse import ArgumentParser
def get_args():
p = ArgumentParser(description='Merge CLOSURE xdconf.ini files')
p.add_argument('-f', '--files', required=True, type=str, help='Input files')
p.add_argument('-o', '--outfile', required=False, type=str, default='xdconf.ini', help='Output file')
return p.parse_args()
def main():
args = get_args()
print('Options selected:')
for x in vars(args).items(): print(' %s: %s' % x)
files=args.files.split(' ')
if len(files) < 1:
print('Require at least one file to merge')
return
data = {'enclaves': []}
for f in files:
with open(f,'r') as inf:
cur = json.load(inf)
enc = cur['enclaves']
for e in enc:
# find matching enclave e1 in data['enclaves']
found = False;
for e1 in data['enclaves']:
if e['enclave'] == e1['enclave']:
found = True;
break;
# if e not in data['enclaves'], simply add enclave to data['enclaves']
if not found:
data['enclaves'].append(e)
else:
if e['inuri'] != e1['inuri'] or e['outuri'] != e1['outuri']:
print('URI do not match, merge not possible')
exit
# XXX: need to check for duplicates
print("Warning: Not checking for duplicate halmaps")
e1['halmaps'].extend(e['halmaps'])
with open(args.outfile, 'w') as outf:
json.dump(data,outf,indent=2)
if __name__ == '__main__':
main()
|
the-stack_106_27108 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
from setuptools import find_packages, setup
from url_filter import __author__, __version__
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname), "rb") as fid:
return fid.read().decode("utf-8")
authors = read("AUTHORS.rst")
history = read("HISTORY.rst").replace(".. :changelog:", "")
licence = read("LICENSE.rst")
readme = read("README.rst")
req = read("requirements.txt").splitlines()
dev_req = read("requirements-dev.txt").splitlines()[2:]
requirements = req + ["setuptools"]
test_requirements = req + dev_req
setup(
name="django-url-filter",
version=__version__,
author=__author__,
description="Django URL Filter provides a safe way to filter data via human-friendly URLs.",
long_description="\n\n".join([readme, history, authors, licence]),
url="https://github.com/miki725/django-url-filter",
license="MIT",
packages=find_packages(exclude=["test_project*", "tests*"]),
install_requires=requirements,
test_suite="tests",
tests_require=test_requirements,
keywords=" ".join(["django django-rest-framework"]),
classifiers=[
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Development Status :: 2 - Pre-Alpha",
],
)
|
the-stack_106_27109 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""Creating the task and start trainer.
Supported modes: train, eval, train_and_eval, continuous_eval
The ProgressiveMaskedLM class is a subclass of ProgressivePolicy. This means
that a progressive trainer instead of a base trainer.
"""
# pylint: enable=line-too-long
# Lint as: python3
from absl import app
from absl import flags
import dataclasses
import gin
from grow_bert.progressive import masked_lm
from grow_bert.progressive import utils
from official.common import flags as tfm_flags
from official.modeling import optimization
from official.modeling.hyperparams import config_definitions as cfg
from official.modeling.progressive import train_lib
from official.modeling.progressive import trainer as prog_trainer_lib
from official.nlp.data import pretrain_dataloader
from official.utils.misc import distribution_utils
FLAGS = flags.FLAGS
AdamWeightDecay = optimization.AdamWeightDecayConfig
PolynomialLr = optimization.PolynomialLrConfig
PolynomialWarmupConfig = optimization.PolynomialWarmupConfig
@dataclasses.dataclass
class BertOptimizationConfig(optimization.OptimizationConfig):
"""Bert optimization config."""
optimizer: optimization.OptimizerConfig = optimization.OptimizerConfig(
type='adamw',
adamw=AdamWeightDecay(
weight_decay_rate=0.01,
exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias']))
learning_rate: optimization.LrConfig = optimization.LrConfig(
type='polynomial',
polynomial=PolynomialLr(
initial_learning_rate=1e-4,
decay_steps=1000000,
end_learning_rate=0.0))
warmup: optimization.WarmupConfig = optimization.WarmupConfig(
type='polynomial', polynomial=PolynomialWarmupConfig(warmup_steps=10000))
def get_exp_config():
"""Get ExperimentConfig."""
params = cfg.ExperimentConfig(
task=masked_lm.MaskedLMConfig(
train_data=pretrain_dataloader.BertPretrainDataConfig(),
small_train_data=pretrain_dataloader.BertPretrainDataConfig(),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
is_training=False)),
trainer=prog_trainer_lib.ProgressiveTrainerConfig(
progressive=masked_lm.ProgStackingConfig(),
optimizer_config=BertOptimizationConfig(),
train_steps=1000000),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return utils.config_override(params, FLAGS)
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = get_exp_config()
distribution_strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = masked_lm.ProgressiveMaskedLM(
strategy=distribution_strategy,
progressive_config=params.trainer.progressive,
optimizer_config=params.trainer.optimizer_config,
train_data_config=params.task.train_data,
small_train_data_config=params.task.small_train_data,
task_config=params.task)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=FLAGS.model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
|
the-stack_106_27110 | """Tools for helping with ANSI color codes."""
import re
import sys
import warnings
import builtins
import typing as tp
from xonsh.platform import HAS_PYGMENTS
from xonsh.lazyasd import LazyDict, lazyobject
from xonsh.color_tools import (
RE_XONSH_COLOR,
BASE_XONSH_COLORS,
make_palette,
find_closest_color,
rgb2short,
rgb_to_256,
short_to_ints,
iscolor,
warn_deprecated_no_color,
)
from xonsh.tools import FORMATTER
# pygments modifier to ANSI escape code mapping
_PART_STYLE_CODE_MAPPING = {
"bold": "1",
"nobold": "21",
"italic": "3",
"noitalic": "23",
"underline": "4",
"nounderline": "24",
"blink": "5",
"noblink": "25",
"reverse": "7",
"noreverse": "27",
"hidden": "8",
"nohidden": "28",
}
def _ensure_color_map(style="default", cmap=None):
if cmap is not None:
pass
elif style in ANSI_STYLES:
cmap = ANSI_STYLES[style]
else:
try: # dynamically loading the style
cmap = ansi_style_by_name(style)
except Exception:
msg = "Could not find color style {0!r}, using default."
print(msg.format(style), file=sys.stderr)
builtins.__xonsh__.env["XONSH_COLOR_STYLE"] = "default"
cmap = ANSI_STYLES["default"]
return cmap
@lazyobject
def ANSI_ESCAPE_MODIFIERS():
return {
"BOLD": "1",
"FAINT": "2",
"ITALIC": "3",
"UNDERLINE": "4",
"SLOWBLINK": "5",
"FASTBLINK": "6",
"INVERT": "7",
"CONCEAL": "8",
"STRIKETHROUGH": "9",
"BOLDOFF": "21",
"FAINTOFF": "22",
"ITALICOFF": "23",
"UNDERLINEOFF": "24",
"BLINKOFF": "25",
"INVERTOFF": "27",
"REVEALOFF": "28",
"STRIKETHROUGHOFF": "29",
}
def ansi_color_name_to_escape_code(name, style="default", cmap=None):
"""Converts a color name to the inner part of an ANSI escape code"""
cmap = _ensure_color_map(style=style, cmap=cmap)
if name in cmap:
return cmap[name]
m = RE_XONSH_COLOR.match(name)
if m is None:
raise ValueError("{!r} is not a color!".format(name))
parts = m.groupdict()
# convert regex match into actual ANSI colors
if parts["reset"] is not None:
if parts["reset"] == "NO_COLOR":
warn_deprecated_no_color()
res = "0"
elif parts["bghex"] is not None:
res = "48;5;" + rgb_to_256(parts["bghex"][3:])[0]
elif parts["background"] is not None:
color = parts["color"]
if "#" in color:
res = "48;5;" + rgb_to_256(color[1:])[0]
else:
fgcolor = cmap[color]
if fgcolor.isdecimal():
res = str(int(fgcolor) + 10)
elif fgcolor.startswith("38;"):
res = "4" + fgcolor[1:]
elif fgcolor == "DEFAULT":
res = "39"
else:
msg = (
"when converting {!r}, did not recognize {!r} within "
"the following color map as a valid color:\n\n{!r}"
)
raise ValueError(msg.format(name, fgcolor, cmap))
else:
# have regular, non-background color
mods = parts["modifiers"]
if mods is None:
mods = []
else:
mods = mods.strip("_").split("_")
mods = [ANSI_ESCAPE_MODIFIERS[mod] for mod in mods]
color = parts["color"]
if "#" in color:
mods.append("38;5;" + rgb_to_256(color[1:])[0])
elif color == "DEFAULT":
res = "39"
else:
mods.append(cmap[color])
res = ";".join(mods)
cmap[name] = res
return res
def ansi_partial_color_format(template, style="default", cmap=None, hide=False):
"""Formats a template string but only with respect to the colors.
Another template string is returned, with the color values filled in.
Parameters
----------
template : str
The template string, potentially with color names.
style : str, optional
Style name to look up color map from.
cmap : dict, optional
A color map to use, this will prevent the color map from being
looked up via the style name.
hide : bool, optional
Whether to wrap the color codes in the \\001 and \\002 escape
codes, so that the color codes are not counted against line
length.
Returns
-------
A template string with the color values filled in.
"""
try:
return _ansi_partial_color_format_main(
template, style=style, cmap=cmap, hide=hide
)
except Exception:
return template
def _ansi_partial_color_format_main(template, style="default", cmap=None, hide=False):
cmap = _ensure_color_map(style=style, cmap=cmap)
overrides = builtins.__xonsh__.env["XONSH_STYLE_OVERRIDES"]
if overrides:
cmap.update(_style_dict_to_ansi(overrides))
esc = ("\001" if hide else "") + "\033["
m = "m" + ("\002" if hide else "")
bopen = "{"
bclose = "}"
colon = ":"
expl = "!"
toks = []
for literal, field, spec, conv in FORMATTER.parse(template):
toks.append(literal)
if field is None:
pass
elif field in cmap:
toks.extend([esc, cmap[field], m])
elif iscolor(field):
color = ansi_color_name_to_escape_code(field, cmap=cmap)
cmap[field] = color
toks.extend([esc, color, m])
elif field is not None:
toks.append(bopen)
toks.append(field)
if conv is not None and len(conv) > 0:
toks.append(expl)
toks.append(conv)
if spec is not None and len(spec) > 0:
toks.append(colon)
toks.append(spec)
toks.append(bclose)
return "".join(toks)
def ansi_color_style_names():
"""Returns an iterable of all ANSI color style names."""
return ANSI_STYLES.keys()
def ansi_color_style(style="default"):
"""Returns the current color map."""
if style in ANSI_STYLES:
cmap = ANSI_STYLES[style]
else:
msg = "Could not find color style {0!r}, using default.".format(style)
warnings.warn(msg, RuntimeWarning)
cmap = ANSI_STYLES["default"]
return cmap
def ansi_reverse_style(style="default", return_style=False):
"""Reverses an ANSI color style mapping so that escape codes map to
colors. Style may either be string or mapping. May also return
the style it looked up.
"""
style = ansi_style_by_name(style) if isinstance(style, str) else style
reversed_style = {v: k for k, v in style.items()}
# add keys to make this more useful
updates = {
"1": "BOLD_",
"2": "FAINT_",
"3": "ITALIC_",
"4": "UNDERLINE_",
"5": "SLOWBLINK_",
"6": "FASTBLINK_",
"7": "INVERT_",
"8": "CONCEAL_",
"9": "STRIKETHROUGH_",
"21": "BOLDOFF_",
"22": "FAINTOFF_",
"23": "ITALICOFF_",
"24": "UNDERLINEOFF_",
"25": "BLINKOFF_",
"27": "INVERTOFF_",
"28": "REVEALOFF_",
"29": "STRIKETHROUGHOFF_",
"38": "SET_FOREGROUND_",
"48": "SET_BACKGROUND_",
"38;2": "SET_FOREGROUND_FAINT_",
"48;2": "SET_BACKGROUND_FAINT_",
"38;5": "SET_FOREGROUND_SLOWBLINK_",
"48;5": "SET_BACKGROUND_SLOWBLINK_",
}
for ec, name in reversed_style.items():
no_left_zero = ec.lstrip("0")
if no_left_zero.startswith(";"):
updates[no_left_zero[1:]] = name
elif no_left_zero != ec:
updates[no_left_zero] = name
reversed_style.update(updates)
# return results
if return_style:
return style, reversed_style
else:
return reversed_style
@lazyobject
def ANSI_ESCAPE_CODE_RE():
return re.compile(r"\001?(\033\[)?([0-9;]+)m?\002?")
@lazyobject
def ANSI_COLOR_NAME_SET_3INTS_RE():
return re.compile(r"(\w+_)?SET_(FORE|BACK)GROUND_FAINT_(\d+)_(\d+)_(\d+)")
@lazyobject
def ANSI_COLOR_NAME_SET_SHORT_RE():
return re.compile(r"(\w+_)?SET_(FORE|BACK)GROUND_SLOWBLINK_(\d+)")
def _color_name_from_ints(ints, background=False, prefix=None):
name = find_closest_color(ints, BASE_XONSH_COLORS)
if background:
name = "BACKGROUND_" + name
name = name if prefix is None else prefix + name
return name
_ANSI_COLOR_ESCAPE_CODE_TO_NAME_CACHE: tp.Dict[str, tp.Tuple[str, ...]] = {}
def ansi_color_escape_code_to_name(escape_code, style, reversed_style=None):
"""Converts an ANSI color code escape sequence to a tuple of color names
in the provided style ('default' should almost be the style). For example,
'0' becomes ('RESET',) and '32;41' becomes ('GREEN', 'BACKGROUND_RED').
The style keyword may either be a string, in which the style is looked up,
or an actual style dict. You can also provide a reversed style mapping,
too, which is just the keys/values of the style dict swapped. If reversed
style is not provided, it is computed.
"""
key = (escape_code, style)
# todo: see the cache ever used?
if key in _ANSI_COLOR_ESCAPE_CODE_TO_NAME_CACHE:
return _ANSI_COLOR_ESCAPE_CODE_TO_NAME_CACHE[key]
if reversed_style is None:
style, reversed_style = ansi_reverse_style(style, return_style=True)
# strip some actual escape codes, if needed.
match = ANSI_ESCAPE_CODE_RE.match(escape_code)
if not match:
msg = 'Invalid ANSI color sequence "{0}", using "RESET" instead.'.format(
escape_code
)
warnings.warn(msg, RuntimeWarning)
return ("RESET",)
ec = match.group(2)
names = []
n_ints = 0
seen_set_foreback = False
for e in ec.split(";"):
no_left_zero = e.lstrip("0") if len(e) > 1 else e
if seen_set_foreback and n_ints > 0:
names.append(e)
n_ints -= 1
if n_ints == 0:
seen_set_foreback = False
continue
else:
names.append(reversed_style.get(no_left_zero, no_left_zero))
# set the flags for next time
if "38" == e or "48" == e:
seen_set_foreback = True
elif seen_set_foreback and "2" == e:
n_ints = 3
elif seen_set_foreback and "5" == e:
n_ints = 1
# normalize names
n = ""
norm_names = []
prefixes = ""
for name in names:
if name in ("RESET", "NO_COLOR"):
# skip most '0' entries
continue
elif "BACKGROUND_" in name and n:
prefixes += n
n = ""
n = n + name if n else name
if n.endswith("_"):
continue
elif ANSI_COLOR_NAME_SET_SHORT_RE.match(n) is not None:
pre, fore_back, short = ANSI_COLOR_NAME_SET_SHORT_RE.match(n).groups()
n = _color_name_from_ints(
short_to_ints(short), background=(fore_back == "BACK"), prefix=pre
)
elif ANSI_COLOR_NAME_SET_3INTS_RE.match(n) is not None:
pre, fore_back, r, g, b = ANSI_COLOR_NAME_SET_3INTS_RE.match(n).groups()
n = _color_name_from_ints(
(int(r), int(g), int(b)), background=(fore_back == "BACK"), prefix=pre
)
elif "GROUND_FAINT_" in n:
# have 1 or 2, but not 3 ints
n += "_"
continue
# error check
if not iscolor(n):
msg = (
"Could not translate ANSI color code {escape_code!r} "
"into a known color in the palette. Specifically, the {n!r} "
"portion of {name!r} in {names!r} seems to missing."
)
raise ValueError(
msg.format(escape_code=escape_code, names=names, name=name, n=n)
)
norm_names.append(n)
n = ""
# check if we have pre- & post-fixes to apply to the last, non-background element
prefixes += n
if prefixes.endswith("_"):
for i in range(-1, -len(norm_names) - 1, -1):
if "BACKGROUND_" not in norm_names[i]:
norm_names[i] = prefixes + norm_names[i]
break
else:
# only have background colors, so select WHITE as default color
norm_names.append(prefixes + "WHITE")
# return
if len(norm_names) == 0:
return ("RESET",)
else:
return tuple(norm_names)
def _bw_style():
style = {
"RESET": "0",
"BLACK": "0;30",
"BLUE": "0;37",
"CYAN": "0;37",
"GREEN": "0;37",
"PURPLE": "0;37",
"RED": "0;37",
"WHITE": "0;37",
"YELLOW": "0;37",
"BACKGROUND_BLACK": "40",
"BACKGROUND_RED": "47",
"BACKGROUND_GREEN": "47",
"BACKGROUND_YELLOW": "47",
"BACKGROUND_BLUE": "47",
"BACKGROUND_PURPLE": "47",
"BACKGROUND_CYAN": "47",
"BACKGROUND_WHITE": "47",
"INTENSE_BLACK": "0;90",
"INTENSE_BLUE": "0;97",
"INTENSE_CYAN": "0;97",
"INTENSE_GREEN": "0;97",
"INTENSE_PURPLE": "0;97",
"INTENSE_RED": "0;97",
"INTENSE_WHITE": "0;97",
"INTENSE_YELLOW": "0;97",
}
return style
def _default_style():
style = {
# Reset
"RESET": "0", # Text Reset
# Regular Colors
"BLACK": "30", # BLACK
"RED": "31", # RED
"GREEN": "32", # GREEN
"YELLOW": "33", # YELLOW
"BLUE": "34", # BLUE
"PURPLE": "35", # PURPLE
"CYAN": "36", # CYAN
"WHITE": "37", # WHITE
# Background
"BACKGROUND_BLACK": "40", # BLACK
"BACKGROUND_RED": "41", # RED
"BACKGROUND_GREEN": "42", # GREEN
"BACKGROUND_YELLOW": "43", # YELLOW
"BACKGROUND_BLUE": "44", # BLUE
"BACKGROUND_PURPLE": "45", # PURPLE
"BACKGROUND_CYAN": "46", # CYAN
"BACKGROUND_WHITE": "47", # WHITE
# High Intensity
"INTENSE_BLACK": "90", # BLACK
"INTENSE_RED": "91", # RED
"INTENSE_GREEN": "92", # GREEN
"INTENSE_YELLOW": "93", # YELLOW
"INTENSE_BLUE": "94", # BLUE
"INTENSE_PURPLE": "95", # PURPLE
"INTENSE_CYAN": "96", # CYAN
"INTENSE_WHITE": "97", # WHITE
# High Intensity backgrounds
"BACKGROUND_INTENSE_BLACK": "100", # BLACK
"BACKGROUND_INTENSE_RED": "101", # RED
"BACKGROUND_INTENSE_GREEN": "102", # GREEN
"BACKGROUND_INTENSE_YELLOW": "103", # YELLOW
"BACKGROUND_INTENSE_BLUE": "104", # BLUE
"BACKGROUND_INTENSE_PURPLE": "105", # PURPLE
"BACKGROUND_INTENSE_CYAN": "106", # CYAN
"BACKGROUND_INTENSE_WHITE": "107", # WHITE
}
return style
def _monokai_style():
style = {
"RESET": "0",
"BLACK": "38;5;16",
"BLUE": "38;5;63",
"CYAN": "38;5;81",
"GREEN": "38;5;40",
"PURPLE": "38;5;89",
"RED": "38;5;124",
"WHITE": "38;5;188",
"YELLOW": "38;5;184",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;20",
"INTENSE_CYAN": "38;5;44",
"INTENSE_GREEN": "38;5;148",
"INTENSE_PURPLE": "38;5;141",
"INTENSE_RED": "38;5;197",
"INTENSE_WHITE": "38;5;15",
"INTENSE_YELLOW": "38;5;186",
}
return style
####################################
# Auto-generated below this line #
####################################
def _algol_style():
style = {
"BLACK": "38;5;59",
"BLUE": "38;5;59",
"CYAN": "38;5;59",
"GREEN": "38;5;59",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;102",
"INTENSE_CYAN": "38;5;102",
"INTENSE_GREEN": "38;5;102",
"INTENSE_PURPLE": "38;5;102",
"INTENSE_RED": "38;5;09",
"INTENSE_WHITE": "38;5;102",
"INTENSE_YELLOW": "38;5;102",
"RESET": "0",
"PURPLE": "38;5;59",
"RED": "38;5;09",
"WHITE": "38;5;102",
"YELLOW": "38;5;09",
}
return style
def _algol_nu_style():
style = {
"BLACK": "38;5;59",
"BLUE": "38;5;59",
"CYAN": "38;5;59",
"GREEN": "38;5;59",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;102",
"INTENSE_CYAN": "38;5;102",
"INTENSE_GREEN": "38;5;102",
"INTENSE_PURPLE": "38;5;102",
"INTENSE_RED": "38;5;09",
"INTENSE_WHITE": "38;5;102",
"INTENSE_YELLOW": "38;5;102",
"RESET": "0",
"PURPLE": "38;5;59",
"RED": "38;5;09",
"WHITE": "38;5;102",
"YELLOW": "38;5;09",
}
return style
def _autumn_style():
style = {
"BLACK": "38;5;18",
"BLUE": "38;5;19",
"CYAN": "38;5;37",
"GREEN": "38;5;34",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;33",
"INTENSE_CYAN": "38;5;33",
"INTENSE_GREEN": "38;5;64",
"INTENSE_PURPLE": "38;5;217",
"INTENSE_RED": "38;5;130",
"INTENSE_WHITE": "38;5;145",
"INTENSE_YELLOW": "38;5;217",
"RESET": "0",
"PURPLE": "38;5;90",
"RED": "38;5;124",
"WHITE": "38;5;145",
"YELLOW": "38;5;130",
}
return style
def _borland_style():
style = {
"BLACK": "38;5;16",
"BLUE": "38;5;18",
"CYAN": "38;5;30",
"GREEN": "38;5;28",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;21",
"INTENSE_CYAN": "38;5;194",
"INTENSE_GREEN": "38;5;102",
"INTENSE_PURPLE": "38;5;188",
"INTENSE_RED": "38;5;09",
"INTENSE_WHITE": "38;5;224",
"INTENSE_YELLOW": "38;5;188",
"RESET": "0",
"PURPLE": "38;5;90",
"RED": "38;5;124",
"WHITE": "38;5;145",
"YELLOW": "38;5;124",
}
return style
def _colorful_style():
style = {
"BLACK": "38;5;16",
"BLUE": "38;5;20",
"CYAN": "38;5;31",
"GREEN": "38;5;34",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;61",
"INTENSE_CYAN": "38;5;145",
"INTENSE_GREEN": "38;5;102",
"INTENSE_PURPLE": "38;5;217",
"INTENSE_RED": "38;5;166",
"INTENSE_WHITE": "38;5;15",
"INTENSE_YELLOW": "38;5;217",
"RESET": "0",
"PURPLE": "38;5;90",
"RED": "38;5;124",
"WHITE": "38;5;145",
"YELLOW": "38;5;130",
}
return style
def _emacs_style():
style = {
"BLACK": "38;5;28",
"BLUE": "38;5;18",
"CYAN": "38;5;26",
"GREEN": "38;5;34",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;26",
"INTENSE_CYAN": "38;5;145",
"INTENSE_GREEN": "38;5;34",
"INTENSE_PURPLE": "38;5;129",
"INTENSE_RED": "38;5;167",
"INTENSE_WHITE": "38;5;145",
"INTENSE_YELLOW": "38;5;145",
"RESET": "0",
"PURPLE": "38;5;90",
"RED": "38;5;124",
"WHITE": "38;5;145",
"YELLOW": "38;5;130",
}
return style
def _friendly_style():
style = {
"BLACK": "38;5;22",
"BLUE": "38;5;18",
"CYAN": "38;5;31",
"GREEN": "38;5;34",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;74",
"INTENSE_CYAN": "38;5;74",
"INTENSE_GREEN": "38;5;71",
"INTENSE_PURPLE": "38;5;134",
"INTENSE_RED": "38;5;167",
"INTENSE_WHITE": "38;5;15",
"INTENSE_YELLOW": "38;5;145",
"RESET": "0",
"PURPLE": "38;5;90",
"RED": "38;5;124",
"WHITE": "38;5;145",
"YELLOW": "38;5;166",
}
return style
def _fruity_style():
style = {
"BLACK": "38;5;16",
"BLUE": "38;5;32",
"CYAN": "38;5;32",
"GREEN": "38;5;28",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;33",
"INTENSE_CYAN": "38;5;33",
"INTENSE_GREEN": "38;5;102",
"INTENSE_PURPLE": "38;5;198",
"INTENSE_RED": "38;5;202",
"INTENSE_WHITE": "38;5;15",
"INTENSE_YELLOW": "38;5;187",
"RESET": "0",
"PURPLE": "38;5;198",
"RED": "38;5;09",
"WHITE": "38;5;187",
"YELLOW": "38;5;202",
}
return style
def _igor_style():
style = {
"BLACK": "38;5;34",
"BLUE": "38;5;21",
"CYAN": "38;5;30",
"GREEN": "38;5;34",
"INTENSE_BLACK": "38;5;30",
"INTENSE_BLUE": "38;5;21",
"INTENSE_CYAN": "38;5;30",
"INTENSE_GREEN": "38;5;34",
"INTENSE_PURPLE": "38;5;163",
"INTENSE_RED": "38;5;166",
"INTENSE_WHITE": "38;5;163",
"INTENSE_YELLOW": "38;5;166",
"RESET": "0",
"PURPLE": "38;5;163",
"RED": "38;5;166",
"WHITE": "38;5;163",
"YELLOW": "38;5;166",
}
return style
def _lovelace_style():
style = {
"BLACK": "38;5;59",
"BLUE": "38;5;25",
"CYAN": "38;5;29",
"GREEN": "38;5;65",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;25",
"INTENSE_CYAN": "38;5;102",
"INTENSE_GREEN": "38;5;29",
"INTENSE_PURPLE": "38;5;133",
"INTENSE_RED": "38;5;131",
"INTENSE_WHITE": "38;5;102",
"INTENSE_YELLOW": "38;5;136",
"RESET": "0",
"PURPLE": "38;5;133",
"RED": "38;5;124",
"WHITE": "38;5;102",
"YELLOW": "38;5;130",
}
return style
def _manni_style():
style = {
"BLACK": "38;5;16",
"BLUE": "38;5;18",
"CYAN": "38;5;30",
"GREEN": "38;5;40",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;105",
"INTENSE_CYAN": "38;5;45",
"INTENSE_GREEN": "38;5;113",
"INTENSE_PURPLE": "38;5;165",
"INTENSE_RED": "38;5;202",
"INTENSE_WHITE": "38;5;224",
"INTENSE_YELLOW": "38;5;221",
"RESET": "0",
"PURPLE": "38;5;165",
"RED": "38;5;124",
"WHITE": "38;5;145",
"YELLOW": "38;5;166",
}
return style
def _murphy_style():
style = {
"BLACK": "38;5;16",
"BLUE": "38;5;18",
"CYAN": "38;5;31",
"GREEN": "38;5;34",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;63",
"INTENSE_CYAN": "38;5;86",
"INTENSE_GREEN": "38;5;86",
"INTENSE_PURPLE": "38;5;213",
"INTENSE_RED": "38;5;209",
"INTENSE_WHITE": "38;5;15",
"INTENSE_YELLOW": "38;5;222",
"RESET": "0",
"PURPLE": "38;5;90",
"RED": "38;5;124",
"WHITE": "38;5;145",
"YELLOW": "38;5;166",
}
return style
def _native_style():
style = {
"BLACK": "38;5;52",
"BLUE": "38;5;67",
"CYAN": "38;5;31",
"GREEN": "38;5;64",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;68",
"INTENSE_CYAN": "38;5;87",
"INTENSE_GREEN": "38;5;70",
"INTENSE_PURPLE": "38;5;188",
"INTENSE_RED": "38;5;160",
"INTENSE_WHITE": "38;5;15",
"INTENSE_YELLOW": "38;5;214",
"RESET": "0",
"PURPLE": "38;5;59",
"RED": "38;5;124",
"WHITE": "38;5;145",
"YELLOW": "38;5;124",
}
return style
def _paraiso_dark_style():
style = {
"BLACK": "38;5;95",
"BLUE": "38;5;97",
"CYAN": "38;5;39",
"GREEN": "38;5;72",
"INTENSE_BLACK": "38;5;95",
"INTENSE_BLUE": "38;5;97",
"INTENSE_CYAN": "38;5;79",
"INTENSE_GREEN": "38;5;72",
"INTENSE_PURPLE": "38;5;188",
"INTENSE_RED": "38;5;203",
"INTENSE_WHITE": "38;5;188",
"INTENSE_YELLOW": "38;5;220",
"RESET": "0",
"PURPLE": "38;5;97",
"RED": "38;5;203",
"WHITE": "38;5;79",
"YELLOW": "38;5;214",
}
return style
def _paraiso_light_style():
style = {
"BLACK": "38;5;16",
"BLUE": "38;5;16",
"CYAN": "38;5;39",
"GREEN": "38;5;72",
"INTENSE_BLACK": "38;5;16",
"INTENSE_BLUE": "38;5;97",
"INTENSE_CYAN": "38;5;79",
"INTENSE_GREEN": "38;5;72",
"INTENSE_PURPLE": "38;5;97",
"INTENSE_RED": "38;5;203",
"INTENSE_WHITE": "38;5;79",
"INTENSE_YELLOW": "38;5;220",
"RESET": "0",
"PURPLE": "38;5;97",
"RED": "38;5;16",
"WHITE": "38;5;102",
"YELLOW": "38;5;214",
}
return style
def _pastie_style():
style = {
"BLACK": "38;5;16",
"BLUE": "38;5;20",
"CYAN": "38;5;25",
"GREEN": "38;5;28",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;61",
"INTENSE_CYAN": "38;5;194",
"INTENSE_GREEN": "38;5;34",
"INTENSE_PURPLE": "38;5;188",
"INTENSE_RED": "38;5;172",
"INTENSE_WHITE": "38;5;15",
"INTENSE_YELLOW": "38;5;188",
"RESET": "0",
"PURPLE": "38;5;125",
"RED": "38;5;124",
"WHITE": "38;5;145",
"YELLOW": "38;5;130",
}
return style
def _perldoc_style():
style = {
"BLACK": "38;5;18",
"BLUE": "38;5;18",
"CYAN": "38;5;31",
"GREEN": "38;5;34",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;134",
"INTENSE_CYAN": "38;5;145",
"INTENSE_GREEN": "38;5;28",
"INTENSE_PURPLE": "38;5;134",
"INTENSE_RED": "38;5;167",
"INTENSE_WHITE": "38;5;188",
"INTENSE_YELLOW": "38;5;188",
"RESET": "0",
"PURPLE": "38;5;90",
"RED": "38;5;124",
"WHITE": "38;5;145",
"YELLOW": "38;5;166",
}
return style
def _rrt_style():
style = {
"BLACK": "38;5;09",
"BLUE": "38;5;117",
"CYAN": "38;5;117",
"GREEN": "38;5;46",
"INTENSE_BLACK": "38;5;117",
"INTENSE_BLUE": "38;5;117",
"INTENSE_CYAN": "38;5;122",
"INTENSE_GREEN": "38;5;46",
"INTENSE_PURPLE": "38;5;213",
"INTENSE_RED": "38;5;09",
"INTENSE_WHITE": "38;5;188",
"INTENSE_YELLOW": "38;5;222",
"RESET": "0",
"PURPLE": "38;5;213",
"RED": "38;5;09",
"WHITE": "38;5;117",
"YELLOW": "38;5;09",
}
return style
def _tango_style():
style = {
"BLACK": "38;5;16",
"BLUE": "38;5;20",
"CYAN": "38;5;61",
"GREEN": "38;5;34",
"INTENSE_BLACK": "38;5;24",
"INTENSE_BLUE": "38;5;62",
"INTENSE_CYAN": "38;5;15",
"INTENSE_GREEN": "38;5;64",
"INTENSE_PURPLE": "38;5;15",
"INTENSE_RED": "38;5;09",
"INTENSE_WHITE": "38;5;15",
"INTENSE_YELLOW": "38;5;178",
"RESET": "0",
"PURPLE": "38;5;90",
"RED": "38;5;124",
"WHITE": "38;5;15",
"YELLOW": "38;5;94",
}
return style
def _trac_style():
style = {
"BLACK": "38;5;16",
"BLUE": "38;5;18",
"CYAN": "38;5;30",
"GREEN": "38;5;100",
"INTENSE_BLACK": "38;5;59",
"INTENSE_BLUE": "38;5;60",
"INTENSE_CYAN": "38;5;194",
"INTENSE_GREEN": "38;5;102",
"INTENSE_PURPLE": "38;5;188",
"INTENSE_RED": "38;5;137",
"INTENSE_WHITE": "38;5;224",
"INTENSE_YELLOW": "38;5;188",
"RESET": "0",
"PURPLE": "38;5;90",
"RED": "38;5;124",
"WHITE": "38;5;145",
"YELLOW": "38;5;100",
}
return style
def _vim_style():
style = {
"BLACK": "38;5;18",
"BLUE": "38;5;18",
"CYAN": "38;5;44",
"GREEN": "38;5;40",
"INTENSE_BLACK": "38;5;60",
"INTENSE_BLUE": "38;5;68",
"INTENSE_CYAN": "38;5;44",
"INTENSE_GREEN": "38;5;40",
"INTENSE_PURPLE": "38;5;164",
"INTENSE_RED": "38;5;09",
"INTENSE_WHITE": "38;5;188",
"INTENSE_YELLOW": "38;5;184",
"RESET": "0",
"PURPLE": "38;5;164",
"RED": "38;5;160",
"WHITE": "38;5;188",
"YELLOW": "38;5;160",
}
return style
def _vs_style():
style = {
"BLACK": "38;5;28",
"BLUE": "38;5;21",
"CYAN": "38;5;31",
"GREEN": "38;5;28",
"INTENSE_BLACK": "38;5;31",
"INTENSE_BLUE": "38;5;31",
"INTENSE_CYAN": "38;5;31",
"INTENSE_GREEN": "38;5;31",
"INTENSE_PURPLE": "38;5;31",
"INTENSE_RED": "38;5;09",
"INTENSE_WHITE": "38;5;31",
"INTENSE_YELLOW": "38;5;31",
"RESET": "0",
"PURPLE": "38;5;124",
"RED": "38;5;124",
"WHITE": "38;5;31",
"YELLOW": "38;5;124",
}
return style
def _xcode_style():
style = {
"BLACK": "38;5;16",
"BLUE": "38;5;20",
"CYAN": "38;5;60",
"GREEN": "38;5;28",
"INTENSE_BLACK": "38;5;60",
"INTENSE_BLUE": "38;5;20",
"INTENSE_CYAN": "38;5;60",
"INTENSE_GREEN": "38;5;60",
"INTENSE_PURPLE": "38;5;126",
"INTENSE_RED": "38;5;160",
"INTENSE_WHITE": "38;5;60",
"INTENSE_YELLOW": "38;5;94",
"RESET": "0",
"PURPLE": "38;5;126",
"RED": "38;5;160",
"WHITE": "38;5;60",
"YELLOW": "38;5;94",
}
return style
ANSI_STYLES = LazyDict(
{
"algol": _algol_style,
"algol_nu": _algol_nu_style,
"autumn": _autumn_style,
"borland": _borland_style,
"bw": _bw_style,
"colorful": _colorful_style,
"default": _default_style,
"emacs": _emacs_style,
"friendly": _friendly_style,
"fruity": _fruity_style,
"igor": _igor_style,
"lovelace": _lovelace_style,
"manni": _manni_style,
"monokai": _monokai_style,
"murphy": _murphy_style,
"native": _native_style,
"paraiso-dark": _paraiso_dark_style,
"paraiso-light": _paraiso_light_style,
"pastie": _pastie_style,
"perldoc": _perldoc_style,
"rrt": _rrt_style,
"tango": _tango_style,
"trac": _trac_style,
"vim": _vim_style,
"vs": _vs_style,
"xcode": _xcode_style,
},
globals(),
"ANSI_STYLES",
)
del (
_algol_style,
_algol_nu_style,
_autumn_style,
_borland_style,
_bw_style,
_colorful_style,
_default_style,
_emacs_style,
_friendly_style,
_fruity_style,
_igor_style,
_lovelace_style,
_manni_style,
_monokai_style,
_murphy_style,
_native_style,
_paraiso_dark_style,
_paraiso_light_style,
_pastie_style,
_perldoc_style,
_rrt_style,
_tango_style,
_trac_style,
_vim_style,
_vs_style,
_xcode_style,
)
#
# Dynamically generated styles
#
def make_ansi_style(palette):
"""Makes an ANSI color style from a color palette"""
style = {"RESET": "0"}
for name, t in BASE_XONSH_COLORS.items():
closest = find_closest_color(t, palette)
if len(closest) == 3:
closest = "".join([a * 2 for a in closest])
short = rgb2short(closest)[0]
style[name] = "38;5;" + short
return style
def _pygments_to_ansi_style(style):
"""Tries to convert the given pygments style to ANSI style.
Parameters
----------
style : pygments style value
Returns
-------
ANSI style
"""
ansi_style_list = []
parts = style.split(" ")
for part in parts:
if part in _PART_STYLE_CODE_MAPPING:
ansi_style_list.append(_PART_STYLE_CODE_MAPPING[part])
elif part[:3] == "bg:":
ansi_style_list.append("48;5;" + rgb2short(part[3:])[0])
else:
ansi_style_list.append("38;5;" + rgb2short(part)[0])
return ";".join(ansi_style_list)
def _style_dict_to_ansi(styles):
"""Converts pygments like style dict to ANSI rules"""
ansi_style = {}
for token, style in styles.items():
token = str(token) # convert pygments token to str
parts = token.split(".")
if len(parts) == 1 or parts[-2] == "Color":
ansi_style[parts[-1]] = _pygments_to_ansi_style(style)
return ansi_style
def register_custom_ansi_style(name, styles, base="default"):
"""Register custom ANSI style.
Parameters
----------
name : str
Style name.
styles : dict
Token (or str) -> style mapping.
base : str, optional
Base style to use as default.
"""
base_style = ANSI_STYLES[base].copy()
base_style.update(_style_dict_to_ansi(styles))
ANSI_STYLES[name] = base_style
def ansi_style_by_name(name):
"""Gets or makes an ANSI color style by name. If the styles does not
exist, it will look for a style using the pygments name.
"""
if name in ANSI_STYLES:
return ANSI_STYLES[name]
elif not HAS_PYGMENTS:
raise KeyError("could not find style {0!r}".format(name))
from xonsh.pygments_cache import get_style_by_name
pstyle = get_style_by_name(name)
palette = make_palette(pstyle.styles.values())
astyle = make_ansi_style(palette)
ANSI_STYLES[name] = astyle
return astyle
|
the-stack_106_27111 | #!/usr/bin/env python
"""
Created on Thu Jun 18 10:42:49 2015
Author: Oren Freifeld
Email: [email protected]
"""
from of.utils import ObsoleteError
raise ObsoleteError("Moved to Tessellation.py")
import numpy as np
from itertools import product # since we have an unknown number of lists
from numpy import binary_repr
from of.utils import ipshell
def create_cells(nCs,nC,XMINS,XMAXS,tess='II'):
N = len(nCs)
if len(XMINS)!=N:
raise ValueError(XMINS)
if len(XMAXS)!=N:
raise ValueError(XMAXS)
if tess != 'II':
raise ValueError(tess)
if np.prod(nCs) != nC:
raise ValueError(tess,np.prod(nCs), nCs)
nCs = map(int,nCs)
Vs = [np.linspace(m,M,nc+1) for (m,M,nc) in zip(XMINS,XMAXS,nCs)]
cells_verts=[]
cells_multiidx=[]
lists = map(range,nCs)
lists = lists[::-1]
Vs=Vs[::-1]
brs = [binary_repr(i,N) for i in range(2**N)]
for idx, items in enumerate(product(*lists)):
# print idx,'---',items
items=np.asarray(items)
verts_of_this_cell =[]
for i in range(2**N):
inc = np.asarray(map(int,brs[i]))
indices = items+inc
tmp = [Vs[j][indices[j]] for j in range(N)][::-1]
verts_of_this_cell.append(tmp+[1])
cells_multiidx.append( tuple(items.tolist()))
verts_of_this_cell = map(tuple,verts_of_this_cell)
verts_of_this_cell = tuple(verts_of_this_cell)
cells_verts.append(verts_of_this_cell)
# ipshell('hi')
# 2/0
if len(cells_multiidx) != nC:
raise ValueError( len(cells_multiidx) , nC)
if len(cells_verts) != nC:
raise ValueError( len(cells_verts) , nC)
if tess == 'II':
# every cell should be made of 8 vertices (cube)
if not all([x==2**N for x in map(len,map(set,cells_verts))]):
raise ValueError
else:
raise ValueError(tess)
return cells_multiidx,cells_verts |
the-stack_106_27113 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class AccessPolicyResponse(msrest.serialization.Model):
"""Get Data Plane read only token response definition.
:param policy: The user access policy.
:type policy: ~dfaz_management_client.models.UserAccessPolicy
:param access_token: Data Plane read only access token.
:type access_token: str
:param data_plane_url: Data Plane service base URL.
:type data_plane_url: str
"""
_attribute_map = {
'policy': {'key': 'policy', 'type': 'UserAccessPolicy'},
'access_token': {'key': 'accessToken', 'type': 'str'},
'data_plane_url': {'key': 'dataPlaneUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessPolicyResponse, self).__init__(**kwargs)
self.policy = kwargs.get('policy', None)
self.access_token = kwargs.get('access_token', None)
self.data_plane_url = kwargs.get('data_plane_url', None)
class Trigger(msrest.serialization.Model):
"""Azure data factory nested object which contains information about creating pipeline run.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ChainingTrigger, MultiplePipelineTrigger, RerunTumblingWindowTrigger, TumblingWindowTrigger.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
}
_subtype_map = {
'type': {'ChainingTrigger': 'ChainingTrigger', 'MultiplePipelineTrigger': 'MultiplePipelineTrigger', 'RerunTumblingWindowTrigger': 'RerunTumblingWindowTrigger', 'TumblingWindowTrigger': 'TumblingWindowTrigger'}
}
def __init__(
self,
**kwargs
):
super(Trigger, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.type = 'Trigger' # type: str
self.description = kwargs.get('description', None)
self.runtime_state = None
self.annotations = kwargs.get('annotations', None)
class MultiplePipelineTrigger(Trigger):
"""Base class for all triggers that support one to many model for trigger to pipeline.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: BlobEventsTrigger, BlobTrigger, ScheduleTrigger.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
:param pipelines: Pipelines that need to be started.
:type pipelines: list[~dfaz_management_client.models.TriggerPipelineReference]
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'pipelines': {'key': 'pipelines', 'type': '[TriggerPipelineReference]'},
}
_subtype_map = {
'type': {'BlobEventsTrigger': 'BlobEventsTrigger', 'BlobTrigger': 'BlobTrigger', 'ScheduleTrigger': 'ScheduleTrigger'}
}
def __init__(
self,
**kwargs
):
super(MultiplePipelineTrigger, self).__init__(**kwargs)
self.type = 'MultiplePipelineTrigger' # type: str
self.pipelines = kwargs.get('pipelines', None)
class BlobEventsTrigger(MultiplePipelineTrigger):
"""Trigger that runs every time a Blob event occurs.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
:param pipelines: Pipelines that need to be started.
:type pipelines: list[~dfaz_management_client.models.TriggerPipelineReference]
:param blob_path_begins_with: The blob path must begin with the pattern provided for trigger to
fire. For example, '/records/blobs/december/' will only fire the trigger for blobs in the
december folder under the records container. At least one of these must be provided:
blobPathBeginsWith, blobPathEndsWith.
:type blob_path_begins_with: str
:param blob_path_ends_with: The blob path must end with the pattern provided for trigger to
fire. For example, 'december/boxes.csv' will only fire the trigger for blobs named boxes in a
december folder. At least one of these must be provided: blobPathBeginsWith, blobPathEndsWith.
:type blob_path_ends_with: str
:param ignore_empty_blobs: If set to true, blobs with zero bytes will be ignored.
:type ignore_empty_blobs: bool
:param events: Required. The type of events that cause this trigger to fire.
:type events: list[str or ~dfaz_management_client.models.BlobEventTypes]
:param scope: Required. The ARM resource ID of the Storage Account.
:type scope: str
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
'events': {'required': True},
'scope': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'pipelines': {'key': 'pipelines', 'type': '[TriggerPipelineReference]'},
'blob_path_begins_with': {'key': 'typeProperties.blobPathBeginsWith', 'type': 'str'},
'blob_path_ends_with': {'key': 'typeProperties.blobPathEndsWith', 'type': 'str'},
'ignore_empty_blobs': {'key': 'typeProperties.ignoreEmptyBlobs', 'type': 'bool'},
'events': {'key': 'typeProperties.events', 'type': '[str]'},
'scope': {'key': 'typeProperties.scope', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BlobEventsTrigger, self).__init__(**kwargs)
self.type = 'BlobEventsTrigger' # type: str
self.blob_path_begins_with = kwargs.get('blob_path_begins_with', None)
self.blob_path_ends_with = kwargs.get('blob_path_ends_with', None)
self.ignore_empty_blobs = kwargs.get('ignore_empty_blobs', None)
self.events = kwargs['events']
self.scope = kwargs['scope']
class BlobTrigger(MultiplePipelineTrigger):
"""Trigger that runs every time the selected Blob container changes.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
:param pipelines: Pipelines that need to be started.
:type pipelines: list[~dfaz_management_client.models.TriggerPipelineReference]
:param folder_path: Required. The path of the container/folder that will trigger the pipeline.
:type folder_path: str
:param max_concurrency: Required. The max number of parallel files to handle when it is
triggered.
:type max_concurrency: int
:param linked_service: Required. The Azure Storage linked service reference.
:type linked_service: ~dfaz_management_client.models.LinkedServiceReference
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
'folder_path': {'required': True},
'max_concurrency': {'required': True},
'linked_service': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'pipelines': {'key': 'pipelines', 'type': '[TriggerPipelineReference]'},
'folder_path': {'key': 'typeProperties.folderPath', 'type': 'str'},
'max_concurrency': {'key': 'typeProperties.maxConcurrency', 'type': 'int'},
'linked_service': {'key': 'typeProperties.linkedService', 'type': 'LinkedServiceReference'},
}
def __init__(
self,
**kwargs
):
super(BlobTrigger, self).__init__(**kwargs)
self.type = 'BlobTrigger' # type: str
self.folder_path = kwargs['folder_path']
self.max_concurrency = kwargs['max_concurrency']
self.linked_service = kwargs['linked_service']
class ChainingTrigger(Trigger):
"""Trigger that allows the referenced pipeline to depend on other pipeline runs based on runDimension Name/Value pairs. Upstream pipelines should declare the same runDimension Name and their runs should have the values for those runDimensions. The referenced pipeline run would be triggered if the values for the runDimension match for all upstream pipeline runs.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
:param pipeline: Required. Pipeline for which runs are created when all upstream pipelines
complete successfully.
:type pipeline: ~dfaz_management_client.models.TriggerPipelineReference
:param depends_on: Required. Upstream Pipelines.
:type depends_on: list[~dfaz_management_client.models.PipelineReference]
:param run_dimension: Required. Run Dimension property that needs to be emitted by upstream
pipelines.
:type run_dimension: str
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
'pipeline': {'required': True},
'depends_on': {'required': True},
'run_dimension': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'pipeline': {'key': 'pipeline', 'type': 'TriggerPipelineReference'},
'depends_on': {'key': 'typeProperties.dependsOn', 'type': '[PipelineReference]'},
'run_dimension': {'key': 'typeProperties.runDimension', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ChainingTrigger, self).__init__(**kwargs)
self.type = 'ChainingTrigger' # type: str
self.pipeline = kwargs['pipeline']
self.depends_on = kwargs['depends_on']
self.run_dimension = kwargs['run_dimension']
class CloudError(msrest.serialization.Model):
"""The object that defines the structure of an Azure Data Factory error response.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code.
:type code: str
:param message: Required. Error message.
:type message: str
:param target: Property name/path in request associated with error.
:type target: str
:param details: Array with additional error details.
:type details: list[~dfaz_management_client.models.CloudError]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'error.code', 'type': 'str'},
'message': {'key': 'error.message', 'type': 'str'},
'target': {'key': 'error.target', 'type': 'str'},
'details': {'key': 'error.details', 'type': '[CloudError]'},
}
def __init__(
self,
**kwargs
):
super(CloudError, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
class CustomSetupBase(msrest.serialization.Model):
"""The base definition of the custom setup.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CmdkeySetup, ComponentSetup, EnvironmentVariableSetup.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of custom setup.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
_subtype_map = {
'type': {'CmdkeySetup': 'CmdkeySetup', 'ComponentSetup': 'ComponentSetup', 'EnvironmentVariableSetup': 'EnvironmentVariableSetup'}
}
def __init__(
self,
**kwargs
):
super(CustomSetupBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class CmdkeySetup(CustomSetupBase):
"""The custom setup of running cmdkey commands.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of custom setup.Constant filled by server.
:type type: str
:param target_name: Required. The server name of data source access.
:type target_name: object
:param user_name: Required. The user name of data source access.
:type user_name: object
:param password: Required. The password of data source access.
:type password: ~dfaz_management_client.models.SecretBase
"""
_validation = {
'type': {'required': True},
'target_name': {'required': True},
'user_name': {'required': True},
'password': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'target_name': {'key': 'typeProperties.targetName', 'type': 'object'},
'user_name': {'key': 'typeProperties.userName', 'type': 'object'},
'password': {'key': 'typeProperties.password', 'type': 'SecretBase'},
}
def __init__(
self,
**kwargs
):
super(CmdkeySetup, self).__init__(**kwargs)
self.type = 'CmdkeySetup' # type: str
self.target_name = kwargs['target_name']
self.user_name = kwargs['user_name']
self.password = kwargs['password']
class ComponentSetup(CustomSetupBase):
"""The custom setup of installing 3rd party components.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of custom setup.Constant filled by server.
:type type: str
:param component_name: Required. The name of the 3rd party component.
:type component_name: str
:param license_key: The license key to activate the component.
:type license_key: ~dfaz_management_client.models.SecretBase
"""
_validation = {
'type': {'required': True},
'component_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'component_name': {'key': 'typeProperties.componentName', 'type': 'str'},
'license_key': {'key': 'typeProperties.licenseKey', 'type': 'SecretBase'},
}
def __init__(
self,
**kwargs
):
super(ComponentSetup, self).__init__(**kwargs)
self.type = 'ComponentSetup' # type: str
self.component_name = kwargs['component_name']
self.license_key = kwargs.get('license_key', None)
class CreateLinkedIntegrationRuntimeRequest(msrest.serialization.Model):
"""The linked integration runtime information.
:param name: The name of the linked integration runtime.
:type name: str
:param subscription_id: The ID of the subscription that the linked integration runtime belongs
to.
:type subscription_id: str
:param data_factory_name: The name of the data factory that the linked integration runtime
belongs to.
:type data_factory_name: str
:param data_factory_location: The location of the data factory that the linked integration
runtime belongs to.
:type data_factory_location: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'data_factory_name': {'key': 'dataFactoryName', 'type': 'str'},
'data_factory_location': {'key': 'dataFactoryLocation', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CreateLinkedIntegrationRuntimeRequest, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.subscription_id = kwargs.get('subscription_id', None)
self.data_factory_name = kwargs.get('data_factory_name', None)
self.data_factory_location = kwargs.get('data_factory_location', None)
class DependencyReference(msrest.serialization.Model):
"""Referenced dependency.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SelfDependencyTumblingWindowTriggerReference, TriggerDependencyReference.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of dependency reference.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
_subtype_map = {
'type': {'SelfDependencyTumblingWindowTriggerReference': 'SelfDependencyTumblingWindowTriggerReference', 'TriggerDependencyReference': 'TriggerDependencyReference'}
}
def __init__(
self,
**kwargs
):
super(DependencyReference, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class EntityReference(msrest.serialization.Model):
"""The entity reference.
:param type: The type of this referenced entity. Possible values include:
"IntegrationRuntimeReference", "LinkedServiceReference".
:type type: str or ~dfaz_management_client.models.IntegrationRuntimeEntityReferenceType
:param reference_name: The name of this referenced entity.
:type reference_name: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EntityReference, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.reference_name = kwargs.get('reference_name', None)
class EnvironmentVariableSetup(CustomSetupBase):
"""The custom setup of setting environment variable.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of custom setup.Constant filled by server.
:type type: str
:param variable_name: Required. The name of the environment variable.
:type variable_name: str
:param variable_value: Required. The value of the environment variable.
:type variable_value: str
"""
_validation = {
'type': {'required': True},
'variable_name': {'required': True},
'variable_value': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'variable_name': {'key': 'typeProperties.variableName', 'type': 'str'},
'variable_value': {'key': 'typeProperties.variableValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EnvironmentVariableSetup, self).__init__(**kwargs)
self.type = 'EnvironmentVariableSetup' # type: str
self.variable_name = kwargs['variable_name']
self.variable_value = kwargs['variable_value']
class Resource(msrest.serialization.Model):
"""Azure Data Factory top-level resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:ivar e_tag: Etag identifies change in the resource.
:vartype e_tag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'e_tag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'e_tag': {'key': 'eTag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
self.e_tag = None
class Factory(Resource):
"""Factory resource type.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:ivar e_tag: Etag identifies change in the resource.
:vartype e_tag: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:ivar provisioning_state: Factory provisioning state, example Succeeded.
:vartype provisioning_state: str
:ivar create_time: Time the factory was created in ISO8601 format.
:vartype create_time: ~datetime.datetime
:ivar version: Version of the factory.
:vartype version: str
:param repo_configuration: Git repo information of the factory.
:type repo_configuration: ~dfaz_management_client.models.FactoryRepoConfiguration
:param fake_identity: This is only for az test.
:type fake_identity: ~dfaz_management_client.models.FakeFactoryIdentity
:param zones: This is only for az test.
:type zones: list[str]
:param type_identity_type: The identity type. Currently the only supported type is
'SystemAssigned'. Possible values include: "SystemAssigned".
:type type_identity_type: str or ~dfaz_management_client.models.FactoryIdentityType
:ivar principal_id: The principal id of the identity.
:vartype principal_id: str
:ivar tenant_id: The client tenant id of the identity.
:vartype tenant_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'e_tag': {'readonly': True},
'provisioning_state': {'readonly': True},
'create_time': {'readonly': True},
'version': {'readonly': True},
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'create_time': {'key': 'properties.createTime', 'type': 'iso-8601'},
'version': {'key': 'properties.version', 'type': 'str'},
'repo_configuration': {'key': 'properties.repoConfiguration', 'type': 'FactoryRepoConfiguration'},
'fake_identity': {'key': 'properties.fakeIdentity', 'type': 'FakeFactoryIdentity'},
'zones': {'key': 'properties.zones', 'type': '[str]'},
'type_identity_type': {'key': 'identity.type', 'type': 'str'},
'principal_id': {'key': 'identity.principalId', 'type': 'str'},
'tenant_id': {'key': 'identity.tenantId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Factory, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.provisioning_state = None
self.create_time = None
self.version = None
self.repo_configuration = kwargs.get('repo_configuration', None)
self.fake_identity = kwargs.get('fake_identity', None)
self.zones = kwargs.get('zones', None)
self.type_identity_type = kwargs.get('type_identity_type', None)
self.principal_id = None
self.tenant_id = None
class FactoryRepoConfiguration(msrest.serialization.Model):
"""Factory's git repo information.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: FactoryGitHubConfiguration, FactoryVstsConfiguration.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of repo configuration.Constant filled by server.
:type type: str
:param account_name: Required. Account name.
:type account_name: str
:param repository_name: Required. Repository name.
:type repository_name: str
:param collaboration_branch: Required. Collaboration branch.
:type collaboration_branch: str
:param root_folder: Required. Root folder.
:type root_folder: str
:param last_commit_id: Last commit id.
:type last_commit_id: str
"""
_validation = {
'type': {'required': True},
'account_name': {'required': True},
'repository_name': {'required': True},
'collaboration_branch': {'required': True},
'root_folder': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'account_name': {'key': 'accountName', 'type': 'str'},
'repository_name': {'key': 'repositoryName', 'type': 'str'},
'collaboration_branch': {'key': 'collaborationBranch', 'type': 'str'},
'root_folder': {'key': 'rootFolder', 'type': 'str'},
'last_commit_id': {'key': 'lastCommitId', 'type': 'str'},
}
_subtype_map = {
'type': {'FactoryGitHubConfiguration': 'FactoryGitHubConfiguration', 'FactoryVSTSConfiguration': 'FactoryVstsConfiguration'}
}
def __init__(
self,
**kwargs
):
super(FactoryRepoConfiguration, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.account_name = kwargs['account_name']
self.repository_name = kwargs['repository_name']
self.collaboration_branch = kwargs['collaboration_branch']
self.root_folder = kwargs['root_folder']
self.last_commit_id = kwargs.get('last_commit_id', None)
class FactoryGitHubConfiguration(FactoryRepoConfiguration):
"""Factory's GitHub repo information.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of repo configuration.Constant filled by server.
:type type: str
:param account_name: Required. Account name.
:type account_name: str
:param repository_name: Required. Repository name.
:type repository_name: str
:param collaboration_branch: Required. Collaboration branch.
:type collaboration_branch: str
:param root_folder: Required. Root folder.
:type root_folder: str
:param last_commit_id: Last commit id.
:type last_commit_id: str
:param host_name: GitHub Enterprise host name. For example: https://github.mydomain.com.
:type host_name: str
"""
_validation = {
'type': {'required': True},
'account_name': {'required': True},
'repository_name': {'required': True},
'collaboration_branch': {'required': True},
'root_folder': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'account_name': {'key': 'accountName', 'type': 'str'},
'repository_name': {'key': 'repositoryName', 'type': 'str'},
'collaboration_branch': {'key': 'collaborationBranch', 'type': 'str'},
'root_folder': {'key': 'rootFolder', 'type': 'str'},
'last_commit_id': {'key': 'lastCommitId', 'type': 'str'},
'host_name': {'key': 'hostName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FactoryGitHubConfiguration, self).__init__(**kwargs)
self.type = 'FactoryGitHubConfiguration' # type: str
self.host_name = kwargs.get('host_name', None)
class FactoryListResponse(msrest.serialization.Model):
"""A list of factory resources.
All required parameters must be populated in order to send to Azure.
:param value: Required. List of factories.
:type value: list[~dfaz_management_client.models.Factory]
:param next_link: The link to the next page of results, if any remaining results exist.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Factory]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FactoryListResponse, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class FactoryRepoUpdate(msrest.serialization.Model):
"""Factory's git repo information.
:param factory_resource_id: The factory resource id.
:type factory_resource_id: str
:param repo_configuration: Git repo information of the factory.
:type repo_configuration: ~dfaz_management_client.models.FactoryRepoConfiguration
"""
_attribute_map = {
'factory_resource_id': {'key': 'factoryResourceId', 'type': 'str'},
'repo_configuration': {'key': 'repoConfiguration', 'type': 'FactoryRepoConfiguration'},
}
def __init__(
self,
**kwargs
):
super(FactoryRepoUpdate, self).__init__(**kwargs)
self.factory_resource_id = kwargs.get('factory_resource_id', None)
self.repo_configuration = kwargs.get('repo_configuration', None)
class FactoryUpdateParameters(msrest.serialization.Model):
"""Parameters for updating a factory resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param type: Required. The identity type. Currently the only supported type is
'SystemAssigned'. Possible values include: "SystemAssigned".
:type type: str or ~dfaz_management_client.models.FactoryIdentityType
:ivar principal_id: The principal id of the identity.
:vartype principal_id: str
:ivar tenant_id: The client tenant id of the identity.
:vartype tenant_id: str
"""
_validation = {
'type': {'required': True},
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'type': {'key': 'identity.type', 'type': 'str'},
'principal_id': {'key': 'identity.principalId', 'type': 'str'},
'tenant_id': {'key': 'identity.tenantId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FactoryUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.type = kwargs['type']
self.principal_id = None
self.tenant_id = None
class FactoryVstsConfiguration(FactoryRepoConfiguration):
"""Factory's VSTS repo information.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of repo configuration.Constant filled by server.
:type type: str
:param account_name: Required. Account name.
:type account_name: str
:param repository_name: Required. Repository name.
:type repository_name: str
:param collaboration_branch: Required. Collaboration branch.
:type collaboration_branch: str
:param root_folder: Required. Root folder.
:type root_folder: str
:param last_commit_id: Last commit id.
:type last_commit_id: str
:param project_name: Required. VSTS project name.
:type project_name: str
:param tenant_id: VSTS tenant id.
:type tenant_id: str
"""
_validation = {
'type': {'required': True},
'account_name': {'required': True},
'repository_name': {'required': True},
'collaboration_branch': {'required': True},
'root_folder': {'required': True},
'project_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'account_name': {'key': 'accountName', 'type': 'str'},
'repository_name': {'key': 'repositoryName', 'type': 'str'},
'collaboration_branch': {'key': 'collaborationBranch', 'type': 'str'},
'root_folder': {'key': 'rootFolder', 'type': 'str'},
'last_commit_id': {'key': 'lastCommitId', 'type': 'str'},
'project_name': {'key': 'projectName', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FactoryVstsConfiguration, self).__init__(**kwargs)
self.type = 'FactoryVSTSConfiguration' # type: str
self.project_name = kwargs['project_name']
self.tenant_id = kwargs.get('tenant_id', None)
class FakeFactoryIdentity(msrest.serialization.Model):
"""This is only for az test.
All required parameters must be populated in order to send to Azure.
:param name: Required. ..
:type name: str
:param zones_inside: sample of simple array.
:type zones_inside: list[str]
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'zones_inside': {'key': 'zonesInside', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(FakeFactoryIdentity, self).__init__(**kwargs)
self.name = kwargs['name']
self.zones_inside = kwargs.get('zones_inside', None)
class GitHubAccessTokenRequest(msrest.serialization.Model):
"""Get GitHub access token request definition.
All required parameters must be populated in order to send to Azure.
:param git_hub_access_code: Required. GitHub access code.
:type git_hub_access_code: str
:param git_hub_client_id: GitHub application client ID.
:type git_hub_client_id: str
:param git_hub_access_token_base_url: Required. GitHub access token base URL.
:type git_hub_access_token_base_url: str
"""
_validation = {
'git_hub_access_code': {'required': True},
'git_hub_access_token_base_url': {'required': True},
}
_attribute_map = {
'git_hub_access_code': {'key': 'gitHubAccessCode', 'type': 'str'},
'git_hub_client_id': {'key': 'gitHubClientId', 'type': 'str'},
'git_hub_access_token_base_url': {'key': 'gitHubAccessTokenBaseUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GitHubAccessTokenRequest, self).__init__(**kwargs)
self.git_hub_access_code = kwargs['git_hub_access_code']
self.git_hub_client_id = kwargs.get('git_hub_client_id', None)
self.git_hub_access_token_base_url = kwargs['git_hub_access_token_base_url']
class GitHubAccessTokenResponse(msrest.serialization.Model):
"""Get GitHub access token response definition.
:param git_hub_access_token: GitHub access token.
:type git_hub_access_token: str
"""
_attribute_map = {
'git_hub_access_token': {'key': 'gitHubAccessToken', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GitHubAccessTokenResponse, self).__init__(**kwargs)
self.git_hub_access_token = kwargs.get('git_hub_access_token', None)
class IntegrationRuntime(msrest.serialization.Model):
"""Azure Data Factory nested object which serves as a compute resource for activities.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ManagedIntegrationRuntime, SelfHostedIntegrationRuntime.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Type of integration runtime.Constant filled by server. Possible values
include: "Managed", "SelfHosted".
:type type: str or ~dfaz_management_client.models.IntegrationRuntimeType
:param description: Integration runtime description.
:type description: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
_subtype_map = {
'type': {'Managed': 'ManagedIntegrationRuntime', 'SelfHosted': 'SelfHostedIntegrationRuntime'}
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntime, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.type = 'IntegrationRuntime' # type: str
self.description = kwargs.get('description', None)
class IntegrationRuntimeAuthKeys(msrest.serialization.Model):
"""The integration runtime authentication keys.
:param auth_key1: The primary integration runtime authentication key.
:type auth_key1: str
:param auth_key2: The secondary integration runtime authentication key.
:type auth_key2: str
"""
_attribute_map = {
'auth_key1': {'key': 'authKey1', 'type': 'str'},
'auth_key2': {'key': 'authKey2', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeAuthKeys, self).__init__(**kwargs)
self.auth_key1 = kwargs.get('auth_key1', None)
self.auth_key2 = kwargs.get('auth_key2', None)
class IntegrationRuntimeComputeProperties(msrest.serialization.Model):
"""The compute resource properties for managed integration runtime.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param location: The location for managed integration runtime. The supported regions could be
found on https://docs.microsoft.com/en-us/azure/data-factory/data-factory-data-movement-
activities.
:type location: str
:param node_size: The node size requirement to managed integration runtime.
:type node_size: str
:param number_of_nodes: The required number of nodes for managed integration runtime.
:type number_of_nodes: int
:param max_parallel_executions_per_node: Maximum parallel executions count per node for managed
integration runtime.
:type max_parallel_executions_per_node: int
:param data_flow_properties: Data flow properties for managed integration runtime.
:type data_flow_properties: ~dfaz_management_client.models.IntegrationRuntimeDataFlowProperties
:param v_net_properties: VNet properties for managed integration runtime.
:type v_net_properties: ~dfaz_management_client.models.IntegrationRuntimeVNetProperties
"""
_validation = {
'number_of_nodes': {'minimum': 1},
'max_parallel_executions_per_node': {'minimum': 1},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'location': {'key': 'location', 'type': 'str'},
'node_size': {'key': 'nodeSize', 'type': 'str'},
'number_of_nodes': {'key': 'numberOfNodes', 'type': 'int'},
'max_parallel_executions_per_node': {'key': 'maxParallelExecutionsPerNode', 'type': 'int'},
'data_flow_properties': {'key': 'dataFlowProperties', 'type': 'IntegrationRuntimeDataFlowProperties'},
'v_net_properties': {'key': 'vNetProperties', 'type': 'IntegrationRuntimeVNetProperties'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeComputeProperties, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.location = kwargs.get('location', None)
self.node_size = kwargs.get('node_size', None)
self.number_of_nodes = kwargs.get('number_of_nodes', None)
self.max_parallel_executions_per_node = kwargs.get('max_parallel_executions_per_node', None)
self.data_flow_properties = kwargs.get('data_flow_properties', None)
self.v_net_properties = kwargs.get('v_net_properties', None)
class IntegrationRuntimeConnectionInfo(msrest.serialization.Model):
"""Connection information for encrypting the on-premises data source credentials.
Variables are only populated by the server, and will be ignored when sending a request.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:ivar service_token: The token generated in service. Callers use this token to authenticate to
integration runtime.
:vartype service_token: str
:ivar identity_cert_thumbprint: The integration runtime SSL certificate thumbprint. Click-Once
application uses it to do server validation.
:vartype identity_cert_thumbprint: str
:ivar host_service_uri: The on-premises integration runtime host URL.
:vartype host_service_uri: str
:ivar version: The integration runtime version.
:vartype version: str
:ivar public_key: The public key for encrypting a credential when transferring the credential
to the integration runtime.
:vartype public_key: str
:ivar is_identity_cert_exprired: Whether the identity certificate is expired.
:vartype is_identity_cert_exprired: bool
"""
_validation = {
'service_token': {'readonly': True},
'identity_cert_thumbprint': {'readonly': True},
'host_service_uri': {'readonly': True},
'version': {'readonly': True},
'public_key': {'readonly': True},
'is_identity_cert_exprired': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'service_token': {'key': 'serviceToken', 'type': 'str'},
'identity_cert_thumbprint': {'key': 'identityCertThumbprint', 'type': 'str'},
'host_service_uri': {'key': 'hostServiceUri', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'public_key': {'key': 'publicKey', 'type': 'str'},
'is_identity_cert_exprired': {'key': 'isIdentityCertExprired', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeConnectionInfo, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.service_token = None
self.identity_cert_thumbprint = None
self.host_service_uri = None
self.version = None
self.public_key = None
self.is_identity_cert_exprired = None
class IntegrationRuntimeCustomSetupScriptProperties(msrest.serialization.Model):
"""Custom setup script properties for a managed dedicated integration runtime.
:param blob_container_uri: The URI of the Azure blob container that contains the custom setup
script.
:type blob_container_uri: str
:param sas_token: The SAS token of the Azure blob container.
:type sas_token: ~dfaz_management_client.models.SecureString
"""
_attribute_map = {
'blob_container_uri': {'key': 'blobContainerUri', 'type': 'str'},
'sas_token': {'key': 'sasToken', 'type': 'SecureString'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeCustomSetupScriptProperties, self).__init__(**kwargs)
self.blob_container_uri = kwargs.get('blob_container_uri', None)
self.sas_token = kwargs.get('sas_token', None)
class IntegrationRuntimeDataFlowProperties(msrest.serialization.Model):
"""Data flow properties for managed integration runtime.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param compute_type: Compute type of the cluster which will execute data flow job. Possible
values include: "General", "MemoryOptimized", "ComputeOptimized".
:type compute_type: str or ~dfaz_management_client.models.DataFlowComputeType
:param core_count: Core count of the cluster which will execute data flow job. Supported values
are: 8, 16, 32, 48, 80, 144 and 272.
:type core_count: int
:param time_to_live: Time to live (in minutes) setting of the cluster which will execute data
flow job.
:type time_to_live: int
"""
_validation = {
'time_to_live': {'minimum': 0},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'compute_type': {'key': 'computeType', 'type': 'str'},
'core_count': {'key': 'coreCount', 'type': 'int'},
'time_to_live': {'key': 'timeToLive', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeDataFlowProperties, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.compute_type = kwargs.get('compute_type', None)
self.core_count = kwargs.get('core_count', None)
self.time_to_live = kwargs.get('time_to_live', None)
class IntegrationRuntimeDataProxyProperties(msrest.serialization.Model):
"""Data proxy properties for a managed dedicated integration runtime.
:param connect_via: The self-hosted integration runtime reference.
:type connect_via: ~dfaz_management_client.models.EntityReference
:param staging_linked_service: The staging linked service reference.
:type staging_linked_service: ~dfaz_management_client.models.EntityReference
:param path: The path to contain the staged data in the Blob storage.
:type path: str
"""
_attribute_map = {
'connect_via': {'key': 'connectVia', 'type': 'EntityReference'},
'staging_linked_service': {'key': 'stagingLinkedService', 'type': 'EntityReference'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeDataProxyProperties, self).__init__(**kwargs)
self.connect_via = kwargs.get('connect_via', None)
self.staging_linked_service = kwargs.get('staging_linked_service', None)
self.path = kwargs.get('path', None)
class IntegrationRuntimeListResponse(msrest.serialization.Model):
"""A list of integration runtime resources.
All required parameters must be populated in order to send to Azure.
:param value: Required. List of integration runtimes.
:type value: list[~dfaz_management_client.models.IntegrationRuntimeResource]
:param next_link: The link to the next page of results, if any remaining results exist.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IntegrationRuntimeResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeListResponse, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class IntegrationRuntimeMonitoringData(msrest.serialization.Model):
"""Get monitoring data response.
:param name: Integration runtime name.
:type name: str
:param nodes: Integration runtime node monitoring data.
:type nodes: list[~dfaz_management_client.models.IntegrationRuntimeNodeMonitoringData]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'nodes': {'key': 'nodes', 'type': '[IntegrationRuntimeNodeMonitoringData]'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeMonitoringData, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.nodes = kwargs.get('nodes', None)
class IntegrationRuntimeNodeIpAddress(msrest.serialization.Model):
"""The IP address of self-hosted integration runtime node.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar ip_address: The IP address of self-hosted integration runtime node.
:vartype ip_address: str
"""
_validation = {
'ip_address': {'readonly': True},
}
_attribute_map = {
'ip_address': {'key': 'ipAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeNodeIpAddress, self).__init__(**kwargs)
self.ip_address = None
class IntegrationRuntimeNodeMonitoringData(msrest.serialization.Model):
"""Monitoring data for integration runtime node.
Variables are only populated by the server, and will be ignored when sending a request.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:ivar node_name: Name of the integration runtime node.
:vartype node_name: str
:ivar available_memory_in_mb: Available memory (MB) on the integration runtime node.
:vartype available_memory_in_mb: int
:ivar cpu_utilization: CPU percentage on the integration runtime node.
:vartype cpu_utilization: int
:ivar concurrent_jobs_limit: Maximum concurrent jobs on the integration runtime node.
:vartype concurrent_jobs_limit: int
:ivar concurrent_jobs_running: The number of jobs currently running on the integration runtime
node.
:vartype concurrent_jobs_running: int
:ivar max_concurrent_jobs: The maximum concurrent jobs in this integration runtime.
:vartype max_concurrent_jobs: int
:ivar sent_bytes: Sent bytes on the integration runtime node.
:vartype sent_bytes: float
:ivar received_bytes: Received bytes on the integration runtime node.
:vartype received_bytes: float
"""
_validation = {
'node_name': {'readonly': True},
'available_memory_in_mb': {'readonly': True},
'cpu_utilization': {'readonly': True},
'concurrent_jobs_limit': {'readonly': True},
'concurrent_jobs_running': {'readonly': True},
'max_concurrent_jobs': {'readonly': True},
'sent_bytes': {'readonly': True},
'received_bytes': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'node_name': {'key': 'nodeName', 'type': 'str'},
'available_memory_in_mb': {'key': 'availableMemoryInMB', 'type': 'int'},
'cpu_utilization': {'key': 'cpuUtilization', 'type': 'int'},
'concurrent_jobs_limit': {'key': 'concurrentJobsLimit', 'type': 'int'},
'concurrent_jobs_running': {'key': 'concurrentJobsRunning', 'type': 'int'},
'max_concurrent_jobs': {'key': 'maxConcurrentJobs', 'type': 'int'},
'sent_bytes': {'key': 'sentBytes', 'type': 'float'},
'received_bytes': {'key': 'receivedBytes', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeNodeMonitoringData, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.node_name = None
self.available_memory_in_mb = None
self.cpu_utilization = None
self.concurrent_jobs_limit = None
self.concurrent_jobs_running = None
self.max_concurrent_jobs = None
self.sent_bytes = None
self.received_bytes = None
class IntegrationRuntimeReference(msrest.serialization.Model):
"""Integration runtime reference type.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. Type of integration runtime. Default value:
"IntegrationRuntimeReference".
:vartype type: str
:param reference_name: Required. Reference integration runtime name.
:type reference_name: str
:param parameters: Arguments for integration runtime.
:type parameters: dict[str, object]
"""
_validation = {
'type': {'required': True, 'constant': True},
'reference_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{object}'},
}
type = "IntegrationRuntimeReference"
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeReference, self).__init__(**kwargs)
self.reference_name = kwargs['reference_name']
self.parameters = kwargs.get('parameters', None)
class IntegrationRuntimeRegenerateKeyParameters(msrest.serialization.Model):
"""Parameters to regenerate the authentication key.
:param key_name: The name of the authentication key to regenerate. Possible values include:
"authKey1", "authKey2".
:type key_name: str or ~dfaz_management_client.models.IntegrationRuntimeAuthKeyName
"""
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeRegenerateKeyParameters, self).__init__(**kwargs)
self.key_name = kwargs.get('key_name', None)
class SubResource(msrest.serialization.Model):
"""Azure Data Factory nested resource, which belongs to a factory.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar etag: Etag identifies change in the resource.
:vartype etag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.etag = None
class IntegrationRuntimeResource(SubResource):
"""Integration runtime resource type.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar etag: Etag identifies change in the resource.
:vartype etag: str
:param properties: Required. Integration runtime properties.
:type properties: ~dfaz_management_client.models.IntegrationRuntime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'IntegrationRuntime'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeResource, self).__init__(**kwargs)
self.properties = kwargs['properties']
class IntegrationRuntimeSsisCatalogInfo(msrest.serialization.Model):
"""Catalog information for managed dedicated integration runtime.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param catalog_server_endpoint: The catalog database server URL.
:type catalog_server_endpoint: str
:param catalog_admin_user_name: The administrator user name of catalog database.
:type catalog_admin_user_name: str
:param catalog_admin_password: The password of the administrator user account of the catalog
database.
:type catalog_admin_password: ~dfaz_management_client.models.SecureString
:param catalog_pricing_tier: The pricing tier for the catalog database. The valid values could
be found in https://azure.microsoft.com/en-us/pricing/details/sql-database/. Possible values
include: "Basic", "Standard", "Premium", "PremiumRS".
:type catalog_pricing_tier: str or
~dfaz_management_client.models.IntegrationRuntimeSsisCatalogPricingTier
"""
_validation = {
'catalog_admin_user_name': {'max_length': 128, 'min_length': 1},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'catalog_server_endpoint': {'key': 'catalogServerEndpoint', 'type': 'str'},
'catalog_admin_user_name': {'key': 'catalogAdminUserName', 'type': 'str'},
'catalog_admin_password': {'key': 'catalogAdminPassword', 'type': 'SecureString'},
'catalog_pricing_tier': {'key': 'catalogPricingTier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeSsisCatalogInfo, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.catalog_server_endpoint = kwargs.get('catalog_server_endpoint', None)
self.catalog_admin_user_name = kwargs.get('catalog_admin_user_name', None)
self.catalog_admin_password = kwargs.get('catalog_admin_password', None)
self.catalog_pricing_tier = kwargs.get('catalog_pricing_tier', None)
class IntegrationRuntimeSsisProperties(msrest.serialization.Model):
"""SSIS properties for managed integration runtime.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param catalog_info: Catalog information for managed dedicated integration runtime.
:type catalog_info: ~dfaz_management_client.models.IntegrationRuntimeSsisCatalogInfo
:param license_type: License type for bringing your own license scenario. Possible values
include: "BasePrice", "LicenseIncluded".
:type license_type: str or ~dfaz_management_client.models.IntegrationRuntimeLicenseType
:param custom_setup_script_properties: Custom setup script properties for a managed dedicated
integration runtime.
:type custom_setup_script_properties:
~dfaz_management_client.models.IntegrationRuntimeCustomSetupScriptProperties
:param data_proxy_properties: Data proxy properties for a managed dedicated integration
runtime.
:type data_proxy_properties:
~dfaz_management_client.models.IntegrationRuntimeDataProxyProperties
:param edition: The edition for the SSIS Integration Runtime. Possible values include:
"Standard", "Enterprise".
:type edition: str or ~dfaz_management_client.models.IntegrationRuntimeEdition
:param express_custom_setup_properties: Custom setup without script properties for a SSIS
integration runtime.
:type express_custom_setup_properties: list[~dfaz_management_client.models.CustomSetupBase]
:param package_stores: Package stores for the SSIS Integration Runtime.
:type package_stores: list[~dfaz_management_client.models.PackageStore]
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'catalog_info': {'key': 'catalogInfo', 'type': 'IntegrationRuntimeSsisCatalogInfo'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'custom_setup_script_properties': {'key': 'customSetupScriptProperties', 'type': 'IntegrationRuntimeCustomSetupScriptProperties'},
'data_proxy_properties': {'key': 'dataProxyProperties', 'type': 'IntegrationRuntimeDataProxyProperties'},
'edition': {'key': 'edition', 'type': 'str'},
'express_custom_setup_properties': {'key': 'expressCustomSetupProperties', 'type': '[CustomSetupBase]'},
'package_stores': {'key': 'packageStores', 'type': '[PackageStore]'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeSsisProperties, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.catalog_info = kwargs.get('catalog_info', None)
self.license_type = kwargs.get('license_type', None)
self.custom_setup_script_properties = kwargs.get('custom_setup_script_properties', None)
self.data_proxy_properties = kwargs.get('data_proxy_properties', None)
self.edition = kwargs.get('edition', None)
self.express_custom_setup_properties = kwargs.get('express_custom_setup_properties', None)
self.package_stores = kwargs.get('package_stores', None)
class IntegrationRuntimeStatus(msrest.serialization.Model):
"""Integration runtime status.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ManagedIntegrationRuntimeStatus, SelfHostedIntegrationRuntimeStatus.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Type of integration runtime.Constant filled by server. Possible values
include: "Managed", "SelfHosted".
:type type: str or ~dfaz_management_client.models.IntegrationRuntimeType
:ivar data_factory_name: The data factory name which the integration runtime belong to.
:vartype data_factory_name: str
:ivar state: The state of integration runtime. Possible values include: "Initial", "Stopped",
"Started", "Starting", "Stopping", "NeedRegistration", "Online", "Limited", "Offline",
"AccessDenied".
:vartype state: str or ~dfaz_management_client.models.IntegrationRuntimeState
"""
_validation = {
'type': {'required': True},
'data_factory_name': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'data_factory_name': {'key': 'dataFactoryName', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
}
_subtype_map = {
'type': {'Managed': 'ManagedIntegrationRuntimeStatus', 'SelfHosted': 'SelfHostedIntegrationRuntimeStatus'}
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeStatus, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.type = 'IntegrationRuntimeStatus' # type: str
self.data_factory_name = None
self.state = None
class IntegrationRuntimeStatusListResponse(msrest.serialization.Model):
"""A list of integration runtime status.
All required parameters must be populated in order to send to Azure.
:param value: Required. List of integration runtime status.
:type value: list[~dfaz_management_client.models.IntegrationRuntimeStatusResponse]
:param next_link: The link to the next page of results, if any remaining results exist.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IntegrationRuntimeStatusResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeStatusListResponse, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class IntegrationRuntimeStatusResponse(msrest.serialization.Model):
"""Integration runtime status response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: The integration runtime name.
:vartype name: str
:param properties: Required. Integration runtime properties.
:type properties: ~dfaz_management_client.models.IntegrationRuntimeStatus
"""
_validation = {
'name': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'IntegrationRuntimeStatus'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeStatusResponse, self).__init__(**kwargs)
self.name = None
self.properties = kwargs['properties']
class IntegrationRuntimeVNetProperties(msrest.serialization.Model):
"""VNet properties for managed integration runtime.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param v_net_id: The ID of the VNet that this integration runtime will join.
:type v_net_id: str
:param subnet: The name of the subnet this integration runtime will join.
:type subnet: str
:param public_i_ps: Resource IDs of the public IP addresses that this integration runtime will
use.
:type public_i_ps: list[str]
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'v_net_id': {'key': 'vNetId', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'str'},
'public_i_ps': {'key': 'publicIPs', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeVNetProperties, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.v_net_id = kwargs.get('v_net_id', None)
self.subnet = kwargs.get('subnet', None)
self.public_i_ps = kwargs.get('public_i_ps', None)
class LinkedIntegrationRuntime(msrest.serialization.Model):
"""The linked integration runtime information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the linked integration runtime.
:vartype name: str
:ivar subscription_id: The subscription ID for which the linked integration runtime belong to.
:vartype subscription_id: str
:ivar data_factory_name: The name of the data factory for which the linked integration runtime
belong to.
:vartype data_factory_name: str
:ivar data_factory_location: The location of the data factory for which the linked integration
runtime belong to.
:vartype data_factory_location: str
:ivar create_time: The creating time of the linked integration runtime.
:vartype create_time: ~datetime.datetime
"""
_validation = {
'name': {'readonly': True},
'subscription_id': {'readonly': True},
'data_factory_name': {'readonly': True},
'data_factory_location': {'readonly': True},
'create_time': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'data_factory_name': {'key': 'dataFactoryName', 'type': 'str'},
'data_factory_location': {'key': 'dataFactoryLocation', 'type': 'str'},
'create_time': {'key': 'createTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(LinkedIntegrationRuntime, self).__init__(**kwargs)
self.name = None
self.subscription_id = None
self.data_factory_name = None
self.data_factory_location = None
self.create_time = None
class LinkedIntegrationRuntimeType(msrest.serialization.Model):
"""The base definition of a linked integration runtime.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: LinkedIntegrationRuntimeKeyAuthorization, LinkedIntegrationRuntimeRbacAuthorization.
All required parameters must be populated in order to send to Azure.
:param authorization_type: Required. The authorization type for integration runtime
sharing.Constant filled by server.
:type authorization_type: str
"""
_validation = {
'authorization_type': {'required': True},
}
_attribute_map = {
'authorization_type': {'key': 'authorizationType', 'type': 'str'},
}
_subtype_map = {
'authorization_type': {'Key': 'LinkedIntegrationRuntimeKeyAuthorization', 'RBAC': 'LinkedIntegrationRuntimeRbacAuthorization'}
}
def __init__(
self,
**kwargs
):
super(LinkedIntegrationRuntimeType, self).__init__(**kwargs)
self.authorization_type = None # type: Optional[str]
class LinkedIntegrationRuntimeKeyAuthorization(LinkedIntegrationRuntimeType):
"""The key authorization type integration runtime.
All required parameters must be populated in order to send to Azure.
:param authorization_type: Required. The authorization type for integration runtime
sharing.Constant filled by server.
:type authorization_type: str
:param key: Required. The key used for authorization.
:type key: ~dfaz_management_client.models.SecureString
"""
_validation = {
'authorization_type': {'required': True},
'key': {'required': True},
}
_attribute_map = {
'authorization_type': {'key': 'authorizationType', 'type': 'str'},
'key': {'key': 'key', 'type': 'SecureString'},
}
def __init__(
self,
**kwargs
):
super(LinkedIntegrationRuntimeKeyAuthorization, self).__init__(**kwargs)
self.authorization_type = 'Key' # type: str
self.key = kwargs['key']
class LinkedIntegrationRuntimeRbacAuthorization(LinkedIntegrationRuntimeType):
"""The role based access control (RBAC) authorization type integration runtime.
All required parameters must be populated in order to send to Azure.
:param authorization_type: Required. The authorization type for integration runtime
sharing.Constant filled by server.
:type authorization_type: str
:param resource_id: Required. The resource identifier of the integration runtime to be shared.
:type resource_id: str
"""
_validation = {
'authorization_type': {'required': True},
'resource_id': {'required': True},
}
_attribute_map = {
'authorization_type': {'key': 'authorizationType', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LinkedIntegrationRuntimeRbacAuthorization, self).__init__(**kwargs)
self.authorization_type = 'RBAC' # type: str
self.resource_id = kwargs['resource_id']
class LinkedIntegrationRuntimeRequest(msrest.serialization.Model):
"""Data factory name for linked integration runtime request.
All required parameters must be populated in order to send to Azure.
:param linked_factory_name: Required. The data factory name for linked integration runtime.
:type linked_factory_name: str
"""
_validation = {
'linked_factory_name': {'required': True},
}
_attribute_map = {
'linked_factory_name': {'key': 'factoryName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LinkedIntegrationRuntimeRequest, self).__init__(**kwargs)
self.linked_factory_name = kwargs['linked_factory_name']
class LinkedServiceReference(msrest.serialization.Model):
"""Linked service reference type.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. Linked service reference type. Default value: "LinkedServiceReference".
:vartype type: str
:param reference_name: Required. Reference LinkedService name.
:type reference_name: str
:param parameters: Arguments for LinkedService.
:type parameters: dict[str, object]
"""
_validation = {
'type': {'required': True, 'constant': True},
'reference_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{object}'},
}
type = "LinkedServiceReference"
def __init__(
self,
**kwargs
):
super(LinkedServiceReference, self).__init__(**kwargs)
self.reference_name = kwargs['reference_name']
self.parameters = kwargs.get('parameters', None)
class ManagedIntegrationRuntime(IntegrationRuntime):
"""Managed integration runtime, including managed elastic and managed dedicated integration runtimes.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Type of integration runtime.Constant filled by server. Possible values
include: "Managed", "SelfHosted".
:type type: str or ~dfaz_management_client.models.IntegrationRuntimeType
:param description: Integration runtime description.
:type description: str
:ivar state: Integration runtime state, only valid for managed dedicated integration runtime.
Possible values include: "Initial", "Stopped", "Started", "Starting", "Stopping",
"NeedRegistration", "Online", "Limited", "Offline", "AccessDenied".
:vartype state: str or ~dfaz_management_client.models.IntegrationRuntimeState
:param repo_configuration: Git repo information of the factory.
:type repo_configuration: ~dfaz_management_client.models.FactoryRepoConfiguration
:param fake_identity: This is only for az test.
:type fake_identity: ~dfaz_management_client.models.FakeFactoryIdentity
:param zones: This is only for az test.
:type zones: list[str]
:param compute_properties: The compute resource for managed integration runtime.
:type compute_properties: ~dfaz_management_client.models.IntegrationRuntimeComputeProperties
:param ssis_properties: SSIS properties for managed integration runtime.
:type ssis_properties: ~dfaz_management_client.models.IntegrationRuntimeSsisProperties
"""
_validation = {
'type': {'required': True},
'state': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'repo_configuration': {'key': 'repoConfiguration', 'type': 'FactoryRepoConfiguration'},
'fake_identity': {'key': 'fakeIdentity', 'type': 'FakeFactoryIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
'compute_properties': {'key': 'typeProperties.computeProperties', 'type': 'IntegrationRuntimeComputeProperties'},
'ssis_properties': {'key': 'typeProperties.ssisProperties', 'type': 'IntegrationRuntimeSsisProperties'},
}
def __init__(
self,
**kwargs
):
super(ManagedIntegrationRuntime, self).__init__(**kwargs)
self.type = 'Managed' # type: str
self.state = None
self.repo_configuration = kwargs.get('repo_configuration', None)
self.fake_identity = kwargs.get('fake_identity', None)
self.zones = kwargs.get('zones', None)
self.compute_properties = kwargs.get('compute_properties', None)
self.ssis_properties = kwargs.get('ssis_properties', None)
class ManagedIntegrationRuntimeError(msrest.serialization.Model):
"""Error definition for managed integration runtime.
Variables are only populated by the server, and will be ignored when sending a request.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:ivar time: The time when the error occurred.
:vartype time: ~datetime.datetime
:ivar code: Error code.
:vartype code: str
:ivar parameters: Managed integration runtime error parameters.
:vartype parameters: list[str]
:ivar message: Error message.
:vartype message: str
"""
_validation = {
'time': {'readonly': True},
'code': {'readonly': True},
'parameters': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'time': {'key': 'time', 'type': 'iso-8601'},
'code': {'key': 'code', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '[str]'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedIntegrationRuntimeError, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.time = None
self.code = None
self.parameters = None
self.message = None
class ManagedIntegrationRuntimeNode(msrest.serialization.Model):
"""Properties of integration runtime node.
Variables are only populated by the server, and will be ignored when sending a request.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:ivar node_id: The managed integration runtime node id.
:vartype node_id: str
:ivar status: The managed integration runtime node status. Possible values include: "Starting",
"Available", "Recycling", "Unavailable".
:vartype status: str or ~dfaz_management_client.models.ManagedIntegrationRuntimeNodeStatus
:param errors: The errors that occurred on this integration runtime node.
:type errors: list[~dfaz_management_client.models.ManagedIntegrationRuntimeError]
"""
_validation = {
'node_id': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'node_id': {'key': 'nodeId', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[ManagedIntegrationRuntimeError]'},
}
def __init__(
self,
**kwargs
):
super(ManagedIntegrationRuntimeNode, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.node_id = None
self.status = None
self.errors = kwargs.get('errors', None)
class ManagedIntegrationRuntimeOperationResult(msrest.serialization.Model):
"""Properties of managed integration runtime operation result.
Variables are only populated by the server, and will be ignored when sending a request.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:ivar type: The operation type. Could be start or stop.
:vartype type: str
:ivar start_time: The start time of the operation.
:vartype start_time: ~datetime.datetime
:ivar result: The operation result.
:vartype result: str
:ivar error_code: The error code.
:vartype error_code: str
:ivar parameters: Managed integration runtime error parameters.
:vartype parameters: list[str]
:ivar activity_id: The activity id for the operation request.
:vartype activity_id: str
"""
_validation = {
'type': {'readonly': True},
'start_time': {'readonly': True},
'result': {'readonly': True},
'error_code': {'readonly': True},
'parameters': {'readonly': True},
'activity_id': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'result': {'key': 'result', 'type': 'str'},
'error_code': {'key': 'errorCode', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '[str]'},
'activity_id': {'key': 'activityId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedIntegrationRuntimeOperationResult, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.type = None
self.start_time = None
self.result = None
self.error_code = None
self.parameters = None
self.activity_id = None
class ManagedIntegrationRuntimeStatus(IntegrationRuntimeStatus):
"""Managed integration runtime status.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Type of integration runtime.Constant filled by server. Possible values
include: "Managed", "SelfHosted".
:type type: str or ~dfaz_management_client.models.IntegrationRuntimeType
:ivar data_factory_name: The data factory name which the integration runtime belong to.
:vartype data_factory_name: str
:ivar state: The state of integration runtime. Possible values include: "Initial", "Stopped",
"Started", "Starting", "Stopping", "NeedRegistration", "Online", "Limited", "Offline",
"AccessDenied".
:vartype state: str or ~dfaz_management_client.models.IntegrationRuntimeState
:ivar create_time: The time at which the integration runtime was created, in ISO8601 format.
:vartype create_time: ~datetime.datetime
:ivar nodes: The list of nodes for managed integration runtime.
:vartype nodes: list[~dfaz_management_client.models.ManagedIntegrationRuntimeNode]
:ivar other_errors: The errors that occurred on this integration runtime.
:vartype other_errors: list[~dfaz_management_client.models.ManagedIntegrationRuntimeError]
:ivar last_operation: The last operation result that occurred on this integration runtime.
:vartype last_operation:
~dfaz_management_client.models.ManagedIntegrationRuntimeOperationResult
"""
_validation = {
'type': {'required': True},
'data_factory_name': {'readonly': True},
'state': {'readonly': True},
'create_time': {'readonly': True},
'nodes': {'readonly': True},
'other_errors': {'readonly': True},
'last_operation': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'data_factory_name': {'key': 'dataFactoryName', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'create_time': {'key': 'typeProperties.createTime', 'type': 'iso-8601'},
'nodes': {'key': 'typeProperties.nodes', 'type': '[ManagedIntegrationRuntimeNode]'},
'other_errors': {'key': 'typeProperties.otherErrors', 'type': '[ManagedIntegrationRuntimeError]'},
'last_operation': {'key': 'typeProperties.lastOperation', 'type': 'ManagedIntegrationRuntimeOperationResult'},
}
def __init__(
self,
**kwargs
):
super(ManagedIntegrationRuntimeStatus, self).__init__(**kwargs)
self.type = 'Managed' # type: str
self.create_time = None
self.nodes = None
self.other_errors = None
self.last_operation = None
class PackageStore(msrest.serialization.Model):
"""Package store for the SSIS integration runtime.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the package store.
:type name: str
:param package_store_linked_service: Required. The package store linked service reference.
:type package_store_linked_service: ~dfaz_management_client.models.EntityReference
"""
_validation = {
'name': {'required': True},
'package_store_linked_service': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'package_store_linked_service': {'key': 'packageStoreLinkedService', 'type': 'EntityReference'},
}
def __init__(
self,
**kwargs
):
super(PackageStore, self).__init__(**kwargs)
self.name = kwargs['name']
self.package_store_linked_service = kwargs['package_store_linked_service']
class PipelineReference(msrest.serialization.Model):
"""Pipeline reference type.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. Pipeline reference type. Default value: "PipelineReference".
:vartype type: str
:param reference_name: Required. Reference pipeline name.
:type reference_name: str
:param name: Reference name.
:type name: str
"""
_validation = {
'type': {'required': True, 'constant': True},
'reference_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
type = "PipelineReference"
def __init__(
self,
**kwargs
):
super(PipelineReference, self).__init__(**kwargs)
self.reference_name = kwargs['reference_name']
self.name = kwargs.get('name', None)
class RecurrenceSchedule(msrest.serialization.Model):
"""The recurrence schedule.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param minutes: The minutes.
:type minutes: list[int]
:param hours: The hours.
:type hours: list[int]
:param week_days: The days of the week.
:type week_days: list[str or ~dfaz_management_client.models.DaysOfWeek]
:param month_days: The month days.
:type month_days: list[int]
:param monthly_occurrences: The monthly occurrences.
:type monthly_occurrences: list[~dfaz_management_client.models.RecurrenceScheduleOccurrence]
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'minutes': {'key': 'minutes', 'type': '[int]'},
'hours': {'key': 'hours', 'type': '[int]'},
'week_days': {'key': 'weekDays', 'type': '[str]'},
'month_days': {'key': 'monthDays', 'type': '[int]'},
'monthly_occurrences': {'key': 'monthlyOccurrences', 'type': '[RecurrenceScheduleOccurrence]'},
}
def __init__(
self,
**kwargs
):
super(RecurrenceSchedule, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.minutes = kwargs.get('minutes', None)
self.hours = kwargs.get('hours', None)
self.week_days = kwargs.get('week_days', None)
self.month_days = kwargs.get('month_days', None)
self.monthly_occurrences = kwargs.get('monthly_occurrences', None)
class RecurrenceScheduleOccurrence(msrest.serialization.Model):
"""The recurrence schedule occurrence.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param day: The day of the week. Possible values include: "Sunday", "Monday", "Tuesday",
"Wednesday", "Thursday", "Friday", "Saturday".
:type day: str or ~dfaz_management_client.models.DayOfWeek
:param occurrence: The occurrence.
:type occurrence: int
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'day': {'key': 'day', 'type': 'str'},
'occurrence': {'key': 'occurrence', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RecurrenceScheduleOccurrence, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.day = kwargs.get('day', None)
self.occurrence = kwargs.get('occurrence', None)
class RerunTumblingWindowTrigger(Trigger):
"""Trigger that schedules pipeline reruns for all fixed time interval windows from a requested start time to requested end time.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
:param parent_trigger: Required. The parent trigger reference.
:type parent_trigger: object
:param requested_start_time: Required. The start time for the time period for which restatement
is initiated. Only UTC time is currently supported.
:type requested_start_time: ~datetime.datetime
:param requested_end_time: Required. The end time for the time period for which restatement is
initiated. Only UTC time is currently supported.
:type requested_end_time: ~datetime.datetime
:param rerun_concurrency: Required. The max number of parallel time windows (ready for
execution) for which a rerun is triggered.
:type rerun_concurrency: int
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
'parent_trigger': {'required': True},
'requested_start_time': {'required': True},
'requested_end_time': {'required': True},
'rerun_concurrency': {'required': True, 'maximum': 50, 'minimum': 1},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'parent_trigger': {'key': 'typeProperties.parentTrigger', 'type': 'object'},
'requested_start_time': {'key': 'typeProperties.requestedStartTime', 'type': 'iso-8601'},
'requested_end_time': {'key': 'typeProperties.requestedEndTime', 'type': 'iso-8601'},
'rerun_concurrency': {'key': 'typeProperties.rerunConcurrency', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RerunTumblingWindowTrigger, self).__init__(**kwargs)
self.type = 'RerunTumblingWindowTrigger' # type: str
self.parent_trigger = kwargs['parent_trigger']
self.requested_start_time = kwargs['requested_start_time']
self.requested_end_time = kwargs['requested_end_time']
self.rerun_concurrency = kwargs['rerun_concurrency']
class RetryPolicy(msrest.serialization.Model):
"""Execution policy for an activity.
:param count: Maximum ordinary retry attempts. Default is 0. Type: integer (or Expression with
resultType integer), minimum: 0.
:type count: object
:param interval_in_seconds: Interval between retries in seconds. Default is 30.
:type interval_in_seconds: int
"""
_validation = {
'interval_in_seconds': {'maximum': 86400, 'minimum': 30},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'object'},
'interval_in_seconds': {'key': 'intervalInSeconds', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RetryPolicy, self).__init__(**kwargs)
self.count = kwargs.get('count', None)
self.interval_in_seconds = kwargs.get('interval_in_seconds', None)
class ScheduleTrigger(MultiplePipelineTrigger):
"""Trigger that creates pipeline runs periodically, on schedule.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
:param pipelines: Pipelines that need to be started.
:type pipelines: list[~dfaz_management_client.models.TriggerPipelineReference]
:param recurrence: Required. Recurrence schedule configuration.
:type recurrence: ~dfaz_management_client.models.ScheduleTriggerRecurrence
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
'recurrence': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'pipelines': {'key': 'pipelines', 'type': '[TriggerPipelineReference]'},
'recurrence': {'key': 'typeProperties.recurrence', 'type': 'ScheduleTriggerRecurrence'},
}
def __init__(
self,
**kwargs
):
super(ScheduleTrigger, self).__init__(**kwargs)
self.type = 'ScheduleTrigger' # type: str
self.recurrence = kwargs['recurrence']
class ScheduleTriggerRecurrence(msrest.serialization.Model):
"""The workflow trigger recurrence.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param frequency: The frequency. Possible values include: "NotSpecified", "Minute", "Hour",
"Day", "Week", "Month", "Year".
:type frequency: str or ~dfaz_management_client.models.RecurrenceFrequency
:param interval: The interval.
:type interval: int
:param start_time: The start time.
:type start_time: ~datetime.datetime
:param end_time: The end time.
:type end_time: ~datetime.datetime
:param time_zone: The time zone.
:type time_zone: str
:param schedule: The recurrence schedule.
:type schedule: ~dfaz_management_client.models.RecurrenceSchedule
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'frequency': {'key': 'frequency', 'type': 'str'},
'interval': {'key': 'interval', 'type': 'int'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
'schedule': {'key': 'schedule', 'type': 'RecurrenceSchedule'},
}
def __init__(
self,
**kwargs
):
super(ScheduleTriggerRecurrence, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.frequency = kwargs.get('frequency', None)
self.interval = kwargs.get('interval', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.time_zone = kwargs.get('time_zone', None)
self.schedule = kwargs.get('schedule', None)
class SecretBase(msrest.serialization.Model):
"""The base definition of a secret type.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SecureString.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of the secret.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
_subtype_map = {
'type': {'SecureString': 'SecureString'}
}
def __init__(
self,
**kwargs
):
super(SecretBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class SecureString(SecretBase):
"""Azure Data Factory secure string definition. The string value will be masked with asterisks '*' during Get or List API calls.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of the secret.Constant filled by server.
:type type: str
:param value: Required. Value of secure string.
:type value: str
"""
_validation = {
'type': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecureString, self).__init__(**kwargs)
self.type = 'SecureString' # type: str
self.value = kwargs['value']
class SelfDependencyTumblingWindowTriggerReference(DependencyReference):
"""Self referenced tumbling window trigger dependency.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of dependency reference.Constant filled by server.
:type type: str
:param offset: Required. Timespan applied to the start time of a tumbling window when
evaluating dependency.
:type offset: str
:param size: The size of the window when evaluating the dependency. If undefined the frequency
of the tumbling window will be used.
:type size: str
"""
_validation = {
'type': {'required': True},
'offset': {'required': True, 'max_length': 15, 'min_length': 8, 'pattern': r'-((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9]))'},
'size': {'max_length': 15, 'min_length': 8, 'pattern': r'((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9]))'},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'offset': {'key': 'offset', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SelfDependencyTumblingWindowTriggerReference, self).__init__(**kwargs)
self.type = 'SelfDependencyTumblingWindowTriggerReference' # type: str
self.offset = kwargs['offset']
self.size = kwargs.get('size', None)
class SelfHostedIntegrationRuntime(IntegrationRuntime):
"""Self-hosted integration runtime.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Type of integration runtime.Constant filled by server. Possible values
include: "Managed", "SelfHosted".
:type type: str or ~dfaz_management_client.models.IntegrationRuntimeType
:param description: Integration runtime description.
:type description: str
:param linked_info: The base definition of a linked integration runtime.
:type linked_info: ~dfaz_management_client.models.LinkedIntegrationRuntimeType
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'linked_info': {'key': 'typeProperties.linkedInfo', 'type': 'LinkedIntegrationRuntimeType'},
}
def __init__(
self,
**kwargs
):
super(SelfHostedIntegrationRuntime, self).__init__(**kwargs)
self.type = 'SelfHosted' # type: str
self.linked_info = kwargs.get('linked_info', None)
class SelfHostedIntegrationRuntimeNode(msrest.serialization.Model):
"""Properties of Self-hosted integration runtime node.
Variables are only populated by the server, and will be ignored when sending a request.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:ivar node_name: Name of the integration runtime node.
:vartype node_name: str
:ivar machine_name: Machine name of the integration runtime node.
:vartype machine_name: str
:ivar host_service_uri: URI for the host machine of the integration runtime.
:vartype host_service_uri: str
:ivar status: Status of the integration runtime node. Possible values include:
"NeedRegistration", "Online", "Limited", "Offline", "Upgrading", "Initializing",
"InitializeFailed".
:vartype status: str or ~dfaz_management_client.models.SelfHostedIntegrationRuntimeNodeStatus
:ivar capabilities: The integration runtime capabilities dictionary.
:vartype capabilities: dict[str, str]
:ivar version_status: Status of the integration runtime node version.
:vartype version_status: str
:ivar version: Version of the integration runtime node.
:vartype version: str
:ivar register_time: The time at which the integration runtime node was registered in ISO8601
format.
:vartype register_time: ~datetime.datetime
:ivar last_connect_time: The most recent time at which the integration runtime was connected in
ISO8601 format.
:vartype last_connect_time: ~datetime.datetime
:ivar expiry_time: The time at which the integration runtime will expire in ISO8601 format.
:vartype expiry_time: ~datetime.datetime
:ivar last_start_time: The time the node last started up.
:vartype last_start_time: ~datetime.datetime
:ivar last_stop_time: The integration runtime node last stop time.
:vartype last_stop_time: ~datetime.datetime
:ivar last_update_result: The result of the last integration runtime node update. Possible
values include: "None", "Succeed", "Fail".
:vartype last_update_result: str or
~dfaz_management_client.models.IntegrationRuntimeUpdateResult
:ivar last_start_update_time: The last time for the integration runtime node update start.
:vartype last_start_update_time: ~datetime.datetime
:ivar last_end_update_time: The last time for the integration runtime node update end.
:vartype last_end_update_time: ~datetime.datetime
:ivar is_active_dispatcher: Indicates whether this node is the active dispatcher for
integration runtime requests.
:vartype is_active_dispatcher: bool
:ivar concurrent_jobs_limit: Maximum concurrent jobs on the integration runtime node.
:vartype concurrent_jobs_limit: int
:ivar max_concurrent_jobs: The maximum concurrent jobs in this integration runtime.
:vartype max_concurrent_jobs: int
"""
_validation = {
'node_name': {'readonly': True},
'machine_name': {'readonly': True},
'host_service_uri': {'readonly': True},
'status': {'readonly': True},
'capabilities': {'readonly': True},
'version_status': {'readonly': True},
'version': {'readonly': True},
'register_time': {'readonly': True},
'last_connect_time': {'readonly': True},
'expiry_time': {'readonly': True},
'last_start_time': {'readonly': True},
'last_stop_time': {'readonly': True},
'last_update_result': {'readonly': True},
'last_start_update_time': {'readonly': True},
'last_end_update_time': {'readonly': True},
'is_active_dispatcher': {'readonly': True},
'concurrent_jobs_limit': {'readonly': True},
'max_concurrent_jobs': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'node_name': {'key': 'nodeName', 'type': 'str'},
'machine_name': {'key': 'machineName', 'type': 'str'},
'host_service_uri': {'key': 'hostServiceUri', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': '{str}'},
'version_status': {'key': 'versionStatus', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'register_time': {'key': 'registerTime', 'type': 'iso-8601'},
'last_connect_time': {'key': 'lastConnectTime', 'type': 'iso-8601'},
'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'},
'last_start_time': {'key': 'lastStartTime', 'type': 'iso-8601'},
'last_stop_time': {'key': 'lastStopTime', 'type': 'iso-8601'},
'last_update_result': {'key': 'lastUpdateResult', 'type': 'str'},
'last_start_update_time': {'key': 'lastStartUpdateTime', 'type': 'iso-8601'},
'last_end_update_time': {'key': 'lastEndUpdateTime', 'type': 'iso-8601'},
'is_active_dispatcher': {'key': 'isActiveDispatcher', 'type': 'bool'},
'concurrent_jobs_limit': {'key': 'concurrentJobsLimit', 'type': 'int'},
'max_concurrent_jobs': {'key': 'maxConcurrentJobs', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(SelfHostedIntegrationRuntimeNode, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.node_name = None
self.machine_name = None
self.host_service_uri = None
self.status = None
self.capabilities = None
self.version_status = None
self.version = None
self.register_time = None
self.last_connect_time = None
self.expiry_time = None
self.last_start_time = None
self.last_stop_time = None
self.last_update_result = None
self.last_start_update_time = None
self.last_end_update_time = None
self.is_active_dispatcher = None
self.concurrent_jobs_limit = None
self.max_concurrent_jobs = None
class SelfHostedIntegrationRuntimeStatus(IntegrationRuntimeStatus):
"""Self-hosted integration runtime status.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Type of integration runtime.Constant filled by server. Possible values
include: "Managed", "SelfHosted".
:type type: str or ~dfaz_management_client.models.IntegrationRuntimeType
:ivar data_factory_name: The data factory name which the integration runtime belong to.
:vartype data_factory_name: str
:ivar state: The state of integration runtime. Possible values include: "Initial", "Stopped",
"Started", "Starting", "Stopping", "NeedRegistration", "Online", "Limited", "Offline",
"AccessDenied".
:vartype state: str or ~dfaz_management_client.models.IntegrationRuntimeState
:ivar create_time: The time at which the integration runtime was created, in ISO8601 format.
:vartype create_time: ~datetime.datetime
:ivar task_queue_id: The task queue id of the integration runtime.
:vartype task_queue_id: str
:ivar internal_channel_encryption: It is used to set the encryption mode for node-node
communication channel (when more than 2 self-hosted integration runtime nodes exist). Possible
values include: "NotSet", "SslEncrypted", "NotEncrypted".
:vartype internal_channel_encryption: str or
~dfaz_management_client.models.IntegrationRuntimeInternalChannelEncryptionMode
:ivar version: Version of the integration runtime.
:vartype version: str
:param nodes: The list of nodes for this integration runtime.
:type nodes: list[~dfaz_management_client.models.SelfHostedIntegrationRuntimeNode]
:ivar scheduled_update_date: The date at which the integration runtime will be scheduled to
update, in ISO8601 format.
:vartype scheduled_update_date: ~datetime.datetime
:ivar update_delay_offset: The time in the date scheduled by service to update the integration
runtime, e.g., PT03H is 3 hours.
:vartype update_delay_offset: str
:ivar local_time_zone_offset: The local time zone offset in hours.
:vartype local_time_zone_offset: str
:ivar capabilities: Object with additional information about integration runtime capabilities.
:vartype capabilities: dict[str, str]
:ivar service_urls: The URLs for the services used in integration runtime backend service.
:vartype service_urls: list[str]
:ivar auto_update: Whether Self-hosted integration runtime auto update has been turned on.
Possible values include: "On", "Off".
:vartype auto_update: str or ~dfaz_management_client.models.IntegrationRuntimeAutoUpdate
:ivar version_status: Status of the integration runtime version.
:vartype version_status: str
:param links: The list of linked integration runtimes that are created to share with this
integration runtime.
:type links: list[~dfaz_management_client.models.LinkedIntegrationRuntime]
:ivar pushed_version: The version that the integration runtime is going to update to.
:vartype pushed_version: str
:ivar latest_version: The latest version on download center.
:vartype latest_version: str
:ivar auto_update_eta: The estimated time when the self-hosted integration runtime will be
updated.
:vartype auto_update_eta: ~datetime.datetime
"""
_validation = {
'type': {'required': True},
'data_factory_name': {'readonly': True},
'state': {'readonly': True},
'create_time': {'readonly': True},
'task_queue_id': {'readonly': True},
'internal_channel_encryption': {'readonly': True},
'version': {'readonly': True},
'scheduled_update_date': {'readonly': True},
'update_delay_offset': {'readonly': True},
'local_time_zone_offset': {'readonly': True},
'capabilities': {'readonly': True},
'service_urls': {'readonly': True},
'auto_update': {'readonly': True},
'version_status': {'readonly': True},
'pushed_version': {'readonly': True},
'latest_version': {'readonly': True},
'auto_update_eta': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'data_factory_name': {'key': 'dataFactoryName', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'create_time': {'key': 'typeProperties.createTime', 'type': 'iso-8601'},
'task_queue_id': {'key': 'typeProperties.taskQueueId', 'type': 'str'},
'internal_channel_encryption': {'key': 'typeProperties.internalChannelEncryption', 'type': 'str'},
'version': {'key': 'typeProperties.version', 'type': 'str'},
'nodes': {'key': 'typeProperties.nodes', 'type': '[SelfHostedIntegrationRuntimeNode]'},
'scheduled_update_date': {'key': 'typeProperties.scheduledUpdateDate', 'type': 'iso-8601'},
'update_delay_offset': {'key': 'typeProperties.updateDelayOffset', 'type': 'str'},
'local_time_zone_offset': {'key': 'typeProperties.localTimeZoneOffset', 'type': 'str'},
'capabilities': {'key': 'typeProperties.capabilities', 'type': '{str}'},
'service_urls': {'key': 'typeProperties.serviceUrls', 'type': '[str]'},
'auto_update': {'key': 'typeProperties.autoUpdate', 'type': 'str'},
'version_status': {'key': 'typeProperties.versionStatus', 'type': 'str'},
'links': {'key': 'typeProperties.links', 'type': '[LinkedIntegrationRuntime]'},
'pushed_version': {'key': 'typeProperties.pushedVersion', 'type': 'str'},
'latest_version': {'key': 'typeProperties.latestVersion', 'type': 'str'},
'auto_update_eta': {'key': 'typeProperties.autoUpdateETA', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SelfHostedIntegrationRuntimeStatus, self).__init__(**kwargs)
self.type = 'SelfHosted' # type: str
self.create_time = None
self.task_queue_id = None
self.internal_channel_encryption = None
self.version = None
self.nodes = kwargs.get('nodes', None)
self.scheduled_update_date = None
self.update_delay_offset = None
self.local_time_zone_offset = None
self.capabilities = None
self.service_urls = None
self.auto_update = None
self.version_status = None
self.links = kwargs.get('links', None)
self.pushed_version = None
self.latest_version = None
self.auto_update_eta = None
class SsisObjectMetadata(msrest.serialization.Model):
"""SSIS object metadata.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SsisEnvironment, SsisFolder, SsisPackage, SsisProject.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of metadata.Constant filled by server. Possible values include:
"Folder", "Project", "Package", "Environment".
:type type: str or ~dfaz_management_client.models.SsisObjectMetadataType
:param id: Metadata id.
:type id: long
:param name: Metadata name.
:type name: str
:param description: Metadata description.
:type description: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
_subtype_map = {
'type': {'Environment': 'SsisEnvironment', 'Folder': 'SsisFolder', 'Package': 'SsisPackage', 'Project': 'SsisProject'}
}
def __init__(
self,
**kwargs
):
super(SsisObjectMetadata, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
class SsisEnvironment(SsisObjectMetadata):
"""Ssis environment.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of metadata.Constant filled by server. Possible values include:
"Folder", "Project", "Package", "Environment".
:type type: str or ~dfaz_management_client.models.SsisObjectMetadataType
:param id: Metadata id.
:type id: long
:param name: Metadata name.
:type name: str
:param description: Metadata description.
:type description: str
:param folder_id: Folder id which contains environment.
:type folder_id: long
:param variables: Variable in environment.
:type variables: list[~dfaz_management_client.models.SsisVariable]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'folder_id': {'key': 'folderId', 'type': 'long'},
'variables': {'key': 'variables', 'type': '[SsisVariable]'},
}
def __init__(
self,
**kwargs
):
super(SsisEnvironment, self).__init__(**kwargs)
self.type = 'Environment' # type: str
self.folder_id = kwargs.get('folder_id', None)
self.variables = kwargs.get('variables', None)
class SsisEnvironmentReference(msrest.serialization.Model):
"""Ssis environment reference.
:param id: Environment reference id.
:type id: long
:param environment_folder_name: Environment folder name.
:type environment_folder_name: str
:param environment_name: Environment name.
:type environment_name: str
:param reference_type: Reference type.
:type reference_type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'long'},
'environment_folder_name': {'key': 'environmentFolderName', 'type': 'str'},
'environment_name': {'key': 'environmentName', 'type': 'str'},
'reference_type': {'key': 'referenceType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SsisEnvironmentReference, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.environment_folder_name = kwargs.get('environment_folder_name', None)
self.environment_name = kwargs.get('environment_name', None)
self.reference_type = kwargs.get('reference_type', None)
class SsisFolder(SsisObjectMetadata):
"""Ssis folder.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of metadata.Constant filled by server. Possible values include:
"Folder", "Project", "Package", "Environment".
:type type: str or ~dfaz_management_client.models.SsisObjectMetadataType
:param id: Metadata id.
:type id: long
:param name: Metadata name.
:type name: str
:param description: Metadata description.
:type description: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SsisFolder, self).__init__(**kwargs)
self.type = 'Folder' # type: str
class SsisObjectMetadataListResponse(msrest.serialization.Model):
"""A list of SSIS object metadata.
:param value: List of SSIS object metadata.
:type value: list[~dfaz_management_client.models.SsisObjectMetadata]
:param next_link: The link to the next page of results, if any remaining results exist.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[SsisObjectMetadata]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SsisObjectMetadataListResponse, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class SsisPackage(SsisObjectMetadata):
"""Ssis Package.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of metadata.Constant filled by server. Possible values include:
"Folder", "Project", "Package", "Environment".
:type type: str or ~dfaz_management_client.models.SsisObjectMetadataType
:param id: Metadata id.
:type id: long
:param name: Metadata name.
:type name: str
:param description: Metadata description.
:type description: str
:param folder_id: Folder id which contains package.
:type folder_id: long
:param project_version: Project version which contains package.
:type project_version: long
:param project_id: Project id which contains package.
:type project_id: long
:param parameters: Parameters in package.
:type parameters: list[~dfaz_management_client.models.SsisParameter]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'folder_id': {'key': 'folderId', 'type': 'long'},
'project_version': {'key': 'projectVersion', 'type': 'long'},
'project_id': {'key': 'projectId', 'type': 'long'},
'parameters': {'key': 'parameters', 'type': '[SsisParameter]'},
}
def __init__(
self,
**kwargs
):
super(SsisPackage, self).__init__(**kwargs)
self.type = 'Package' # type: str
self.folder_id = kwargs.get('folder_id', None)
self.project_version = kwargs.get('project_version', None)
self.project_id = kwargs.get('project_id', None)
self.parameters = kwargs.get('parameters', None)
class SsisParameter(msrest.serialization.Model):
"""Ssis parameter.
:param id: Parameter id.
:type id: long
:param name: Parameter name.
:type name: str
:param description: Parameter description.
:type description: str
:param data_type: Parameter type.
:type data_type: str
:param required: Whether parameter is required.
:type required: bool
:param sensitive: Whether parameter is sensitive.
:type sensitive: bool
:param design_default_value: Design default value of parameter.
:type design_default_value: str
:param default_value: Default value of parameter.
:type default_value: str
:param sensitive_default_value: Default sensitive value of parameter.
:type sensitive_default_value: str
:param value_type: Parameter value type.
:type value_type: str
:param value_set: Parameter value set.
:type value_set: bool
:param variable: Parameter reference variable.
:type variable: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'data_type': {'key': 'dataType', 'type': 'str'},
'required': {'key': 'required', 'type': 'bool'},
'sensitive': {'key': 'sensitive', 'type': 'bool'},
'design_default_value': {'key': 'designDefaultValue', 'type': 'str'},
'default_value': {'key': 'defaultValue', 'type': 'str'},
'sensitive_default_value': {'key': 'sensitiveDefaultValue', 'type': 'str'},
'value_type': {'key': 'valueType', 'type': 'str'},
'value_set': {'key': 'valueSet', 'type': 'bool'},
'variable': {'key': 'variable', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SsisParameter, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.data_type = kwargs.get('data_type', None)
self.required = kwargs.get('required', None)
self.sensitive = kwargs.get('sensitive', None)
self.design_default_value = kwargs.get('design_default_value', None)
self.default_value = kwargs.get('default_value', None)
self.sensitive_default_value = kwargs.get('sensitive_default_value', None)
self.value_type = kwargs.get('value_type', None)
self.value_set = kwargs.get('value_set', None)
self.variable = kwargs.get('variable', None)
class SsisProject(SsisObjectMetadata):
"""Ssis project.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of metadata.Constant filled by server. Possible values include:
"Folder", "Project", "Package", "Environment".
:type type: str or ~dfaz_management_client.models.SsisObjectMetadataType
:param id: Metadata id.
:type id: long
:param name: Metadata name.
:type name: str
:param description: Metadata description.
:type description: str
:param folder_id: Folder id which contains project.
:type folder_id: long
:param version: Project version.
:type version: long
:param environment_refs: Environment reference in project.
:type environment_refs: list[~dfaz_management_client.models.SsisEnvironmentReference]
:param parameters: Parameters in project.
:type parameters: list[~dfaz_management_client.models.SsisParameter]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'folder_id': {'key': 'folderId', 'type': 'long'},
'version': {'key': 'version', 'type': 'long'},
'environment_refs': {'key': 'environmentRefs', 'type': '[SsisEnvironmentReference]'},
'parameters': {'key': 'parameters', 'type': '[SsisParameter]'},
}
def __init__(
self,
**kwargs
):
super(SsisProject, self).__init__(**kwargs)
self.type = 'Project' # type: str
self.folder_id = kwargs.get('folder_id', None)
self.version = kwargs.get('version', None)
self.environment_refs = kwargs.get('environment_refs', None)
self.parameters = kwargs.get('parameters', None)
class SsisVariable(msrest.serialization.Model):
"""Ssis variable.
:param id: Variable id.
:type id: long
:param name: Variable name.
:type name: str
:param description: Variable description.
:type description: str
:param data_type: Variable type.
:type data_type: str
:param sensitive: Whether variable is sensitive.
:type sensitive: bool
:param value: Variable value.
:type value: str
:param sensitive_value: Variable sensitive value.
:type sensitive_value: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'data_type': {'key': 'dataType', 'type': 'str'},
'sensitive': {'key': 'sensitive', 'type': 'bool'},
'value': {'key': 'value', 'type': 'str'},
'sensitive_value': {'key': 'sensitiveValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SsisVariable, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.data_type = kwargs.get('data_type', None)
self.sensitive = kwargs.get('sensitive', None)
self.value = kwargs.get('value', None)
self.sensitive_value = kwargs.get('sensitive_value', None)
class TriggerDependencyReference(DependencyReference):
"""Trigger referenced dependency.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: TumblingWindowTriggerDependencyReference.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of dependency reference.Constant filled by server.
:type type: str
:param reference_trigger: Required. Referenced trigger.
:type reference_trigger: ~dfaz_management_client.models.TriggerReference
"""
_validation = {
'type': {'required': True},
'reference_trigger': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_trigger': {'key': 'referenceTrigger', 'type': 'TriggerReference'},
}
_subtype_map = {
'type': {'TumblingWindowTriggerDependencyReference': 'TumblingWindowTriggerDependencyReference'}
}
def __init__(
self,
**kwargs
):
super(TriggerDependencyReference, self).__init__(**kwargs)
self.type = 'TriggerDependencyReference' # type: str
self.reference_trigger = kwargs['reference_trigger']
class TriggerFilterParameters(msrest.serialization.Model):
"""Query parameters for triggers.
:param continuation_token: The continuation token for getting the next page of results. Null
for first page.
:type continuation_token: str
:param parent_trigger_name: The name of the parent TumblingWindowTrigger to get the child rerun
triggers.
:type parent_trigger_name: str
"""
_attribute_map = {
'continuation_token': {'key': 'continuationToken', 'type': 'str'},
'parent_trigger_name': {'key': 'parentTriggerName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TriggerFilterParameters, self).__init__(**kwargs)
self.continuation_token = kwargs.get('continuation_token', None)
self.parent_trigger_name = kwargs.get('parent_trigger_name', None)
class TriggerListResponse(msrest.serialization.Model):
"""A list of trigger resources.
All required parameters must be populated in order to send to Azure.
:param value: Required. List of triggers.
:type value: list[~dfaz_management_client.models.TriggerResource]
:param next_link: The link to the next page of results, if any remaining results exist.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[TriggerResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TriggerListResponse, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class TriggerPipelineReference(msrest.serialization.Model):
"""Pipeline that needs to be triggered with the given parameters.
:param pipeline_reference: Pipeline reference.
:type pipeline_reference: ~dfaz_management_client.models.PipelineReference
:param parameters: Pipeline parameters.
:type parameters: dict[str, object]
"""
_attribute_map = {
'pipeline_reference': {'key': 'pipelineReference', 'type': 'PipelineReference'},
'parameters': {'key': 'parameters', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(TriggerPipelineReference, self).__init__(**kwargs)
self.pipeline_reference = kwargs.get('pipeline_reference', None)
self.parameters = kwargs.get('parameters', None)
class TriggerQueryResponse(msrest.serialization.Model):
"""A query of triggers.
All required parameters must be populated in order to send to Azure.
:param value: Required. List of triggers.
:type value: list[~dfaz_management_client.models.TriggerResource]
:param continuation_token: The continuation token for getting the next page of results, if any
remaining results exist, null otherwise.
:type continuation_token: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[TriggerResource]'},
'continuation_token': {'key': 'continuationToken', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TriggerQueryResponse, self).__init__(**kwargs)
self.value = kwargs['value']
self.continuation_token = kwargs.get('continuation_token', None)
class TriggerReference(msrest.serialization.Model):
"""Trigger reference type.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. Trigger reference type. Default value: "TriggerReference".
:vartype type: str
:param reference_name: Required. Reference trigger name.
:type reference_name: str
"""
_validation = {
'type': {'required': True, 'constant': True},
'reference_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
}
type = "TriggerReference"
def __init__(
self,
**kwargs
):
super(TriggerReference, self).__init__(**kwargs)
self.reference_name = kwargs['reference_name']
class TriggerResource(SubResource):
"""Trigger resource type.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar etag: Etag identifies change in the resource.
:vartype etag: str
:param properties: Required. Properties of the trigger.
:type properties: ~dfaz_management_client.models.Trigger
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'Trigger'},
}
def __init__(
self,
**kwargs
):
super(TriggerResource, self).__init__(**kwargs)
self.properties = kwargs['properties']
class TriggerSubscriptionOperationStatus(msrest.serialization.Model):
"""Defines the response of a trigger subscription operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar trigger_name: Trigger name.
:vartype trigger_name: str
:ivar status: Event Subscription Status. Possible values include: "Enabled", "Provisioning",
"Deprovisioning", "Disabled", "Unknown".
:vartype status: str or ~dfaz_management_client.models.EventSubscriptionStatus
"""
_validation = {
'trigger_name': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'trigger_name': {'key': 'triggerName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TriggerSubscriptionOperationStatus, self).__init__(**kwargs)
self.trigger_name = None
self.status = None
class TumblingWindowTrigger(Trigger):
"""Trigger that schedules pipeline runs for all fixed time interval windows from a start time without gaps and also supports backfill scenarios (when start time is in the past).
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
:param pipeline: Required. Pipeline for which runs are created when an event is fired for
trigger window that is ready.
:type pipeline: ~dfaz_management_client.models.TriggerPipelineReference
:param frequency: Required. The frequency of the time windows. Possible values include:
"Minute", "Hour".
:type frequency: str or ~dfaz_management_client.models.TumblingWindowFrequency
:param interval: Required. The interval of the time windows. The minimum interval allowed is 15
Minutes.
:type interval: int
:param start_time: Required. The start time for the time period for the trigger during which
events are fired for windows that are ready. Only UTC time is currently supported.
:type start_time: ~datetime.datetime
:param end_time: The end time for the time period for the trigger during which events are fired
for windows that are ready. Only UTC time is currently supported.
:type end_time: ~datetime.datetime
:param delay: Specifies how long the trigger waits past due time before triggering new run. It
doesn't alter window start and end time. The default is 0. Type: string (or Expression with
resultType string), pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:type delay: object
:param max_concurrency: Required. The max number of parallel time windows (ready for execution)
for which a new run is triggered.
:type max_concurrency: int
:param retry_policy: Retry policy that will be applied for failed pipeline runs.
:type retry_policy: ~dfaz_management_client.models.RetryPolicy
:param depends_on: Triggers that this trigger depends on. Only tumbling window triggers are
supported.
:type depends_on: list[~dfaz_management_client.models.DependencyReference]
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
'pipeline': {'required': True},
'frequency': {'required': True},
'interval': {'required': True},
'start_time': {'required': True},
'max_concurrency': {'required': True, 'maximum': 50, 'minimum': 1},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'pipeline': {'key': 'pipeline', 'type': 'TriggerPipelineReference'},
'frequency': {'key': 'typeProperties.frequency', 'type': 'str'},
'interval': {'key': 'typeProperties.interval', 'type': 'int'},
'start_time': {'key': 'typeProperties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'typeProperties.endTime', 'type': 'iso-8601'},
'delay': {'key': 'typeProperties.delay', 'type': 'object'},
'max_concurrency': {'key': 'typeProperties.maxConcurrency', 'type': 'int'},
'retry_policy': {'key': 'typeProperties.retryPolicy', 'type': 'RetryPolicy'},
'depends_on': {'key': 'typeProperties.dependsOn', 'type': '[DependencyReference]'},
}
def __init__(
self,
**kwargs
):
super(TumblingWindowTrigger, self).__init__(**kwargs)
self.type = 'TumblingWindowTrigger' # type: str
self.pipeline = kwargs['pipeline']
self.frequency = kwargs['frequency']
self.interval = kwargs['interval']
self.start_time = kwargs['start_time']
self.end_time = kwargs.get('end_time', None)
self.delay = kwargs.get('delay', None)
self.max_concurrency = kwargs['max_concurrency']
self.retry_policy = kwargs.get('retry_policy', None)
self.depends_on = kwargs.get('depends_on', None)
class TumblingWindowTriggerDependencyReference(TriggerDependencyReference):
"""Referenced tumbling window trigger dependency.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of dependency reference.Constant filled by server.
:type type: str
:param reference_trigger: Required. Referenced trigger.
:type reference_trigger: ~dfaz_management_client.models.TriggerReference
:param offset: Timespan applied to the start time of a tumbling window when evaluating
dependency.
:type offset: str
:param size: The size of the window when evaluating the dependency. If undefined the frequency
of the tumbling window will be used.
:type size: str
"""
_validation = {
'type': {'required': True},
'reference_trigger': {'required': True},
'offset': {'max_length': 15, 'min_length': 8, 'pattern': r'-?((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9]))'},
'size': {'max_length': 15, 'min_length': 8, 'pattern': r'((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9]))'},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_trigger': {'key': 'referenceTrigger', 'type': 'TriggerReference'},
'offset': {'key': 'offset', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TumblingWindowTriggerDependencyReference, self).__init__(**kwargs)
self.type = 'TumblingWindowTriggerDependencyReference' # type: str
self.offset = kwargs.get('offset', None)
self.size = kwargs.get('size', None)
class UpdateIntegrationRuntimeRequest(msrest.serialization.Model):
"""Update integration runtime request.
:param auto_update: Enables or disables the auto-update feature of the self-hosted integration
runtime. See https://go.microsoft.com/fwlink/?linkid=854189. Possible values include: "On",
"Off".
:type auto_update: str or ~dfaz_management_client.models.IntegrationRuntimeAutoUpdate
:param update_delay_offset: The time offset (in hours) in the day, e.g., PT03H is 3 hours. The
integration runtime auto update will happen on that time.
:type update_delay_offset: str
"""
_attribute_map = {
'auto_update': {'key': 'autoUpdate', 'type': 'str'},
'update_delay_offset': {'key': 'updateDelayOffset', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UpdateIntegrationRuntimeRequest, self).__init__(**kwargs)
self.auto_update = kwargs.get('auto_update', None)
self.update_delay_offset = kwargs.get('update_delay_offset', None)
class UserAccessPolicy(msrest.serialization.Model):
"""Get Data Plane read only token request definition.
:param permissions: The string with permissions for Data Plane access. Currently only 'r' is
supported which grants read only access.
:type permissions: str
:param access_resource_path: The resource path to get access relative to factory. Currently
only empty string is supported which corresponds to the factory resource.
:type access_resource_path: str
:param profile_name: The name of the profile. Currently only the default is supported. The
default value is DefaultProfile.
:type profile_name: str
:param start_time: Start time for the token. If not specified the current time will be used.
:type start_time: str
:param expire_time: Expiration time for the token. Maximum duration for the token is eight
hours and by default the token will expire in eight hours.
:type expire_time: str
"""
_attribute_map = {
'permissions': {'key': 'permissions', 'type': 'str'},
'access_resource_path': {'key': 'accessResourcePath', 'type': 'str'},
'profile_name': {'key': 'profileName', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
'expire_time': {'key': 'expireTime', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAccessPolicy, self).__init__(**kwargs)
self.permissions = kwargs.get('permissions', None)
self.access_resource_path = kwargs.get('access_resource_path', None)
self.profile_name = kwargs.get('profile_name', None)
self.start_time = kwargs.get('start_time', None)
self.expire_time = kwargs.get('expire_time', None)
|
the-stack_106_27116 | #!/usr/bin/env python
""" Plots related to the response matrix.
.. codeauthor:: Raymond Ehlers <[email protected]>, Yale University
"""
import logging
import ctypes
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns
from typing import Any, cast, Dict, Iterator, Mapping, Sequence, Tuple, TYPE_CHECKING, Union
import pachyderm.fit
from pachyderm import histogram
from pachyderm import utils
from jet_hadron.base import analysis_objects
from jet_hadron.base import labels
from jet_hadron.base import params
from jet_hadron.base.typing_helpers import Hist
from jet_hadron.plot import base as plot_base
import ROOT
if TYPE_CHECKING:
from jet_hadron.analysis import pt_hard_analysis # noqa: F401
from jet_hadron.analysis import response_matrix
logger = logging.getLogger(__name__)
Analyses = Dict[Any, "response_matrix.ResponseMatrix"]
AnalysesBase = Dict[Any, "response_matrix.ResponseMatrixBase"]
def plot_particle_level_spectra(ep_analyses_iter: Iterator[Tuple[Any, "response_matrix.ResponseMatrixBase"]],
output_info: analysis_objects.PlottingOutputWrapper,
plot_with_ROOT: bool = False) -> None:
""" Plot the particle level spectra associated with the response.
Args:
ep_analyses: The event plane dependent final response matrices.
output_info: Output information.
plot_with_ROOT: True if the plot should be done via ROOT. Default: False
Returns:
None. The spectra are plotted and saved.
"""
# Pull out the dict because we need to grab individual analyses for some labeling information, which doesn't
# play well with generators (the generator will be exhausted).
ep_analyses = dict(ep_analyses_iter)
# Determine the general and plot labels
# First, we need some variables to define the general labels, so we retrieve the inclusive analysis.
# All of the parameters retrieved here are shared by all analyses.
inclusive = next(iter(ep_analyses.values()))
# Then we define some additional helper variables
particle_level_spectra_bin = inclusive.task_config["particle_level_spectra"]["particle_level_spectra_bin"]
embedded_additional_label = inclusive.event_activity.display_str()
# General labels
general_labels = {
"alice_and_collision_energy":
rf"{inclusive.alice_label.display_str()}\:{inclusive.collision_energy.display_str()}",
"collision_system_and_event_activity":
rf"{inclusive.collision_system.display_str(embedded_additional_label = embedded_additional_label)}",
"detector_pt_range": labels.pt_range_string(
particle_level_spectra_bin,
lower_label = "T,jet",
upper_label = "det",
),
"constituent_cuts": labels.constituent_cuts(additional_label = "det"),
"leading_hadron_bias": inclusive.leading_hadron_bias.display_str(additional_label = "det"),
"jet_finding": labels.jet_finding(),
}
# Ensure that each line is a valid latex line.
# The heuristic is roughly that full statements (such as jet_finding) are already wrapped in "$",
# while partial statements, such as the leading hadron bias, event activity, etc are not wrapped in "$".
# This is due to the potential for such "$" to interfere with including those partial statements in other
# statements. As an example, it would be impossible to use the ``embedded_additional_label`` above if the
# ``event_activity`` included "$".
for k, v in general_labels.items():
general_labels[k] = labels.make_valid_latex_string(v)
# Plot labels
y_label = r"\mathrm{d}N/\mathrm{d}p_{\mathrm{T}}"
if inclusive.task_config["particle_level_spectra"]["normalize_by_n_jets"]:
y_label = r"(1/N_{\mathrm{jets}})" + y_label
if inclusive.task_config["particle_level_spectra"]["normalize_at_selected_jet_pt_bin"]:
# Assumes that we'll never set an upper bound.
values = inclusive.task_config["particle_level_spectra"]["normalize_at_selected_jet_pt_values"]
y_label = r"(1/N_{\text{jets}}^{p_{\text{T}} > " + fr"{values.min}\:{labels.momentum_units_label_gev()}" + r"})" + y_label
# Add y_label units
y_label += fr"\:({labels.momentum_units_label_gev()})^{{-1}}"
plot_labels = plot_base.PlotLabels(
title = "",
x_label = fr"${labels.jet_pt_display_label(upper_label = 'part')}\:({labels.momentum_units_label_gev()})$",
y_label = labels.make_valid_latex_string(y_label),
)
# Finally, we collect our arguments for the plotting functions.
kwargs: Dict[str, Any] = {
"ep_analyses": ep_analyses,
"output_name": "particle_level_spectra",
"output_info": output_info,
"general_labels": general_labels,
"plot_labels": plot_labels,
}
if plot_with_ROOT:
_plot_particle_level_spectra_with_ROOT(**kwargs)
else:
_plot_particle_level_spectra_with_matplotlib(**kwargs)
def _plot_particle_level_spectra_with_matplotlib(ep_analyses: AnalysesBase,
output_name: str,
output_info: analysis_objects.PlottingOutputWrapper,
general_labels: Dict[str, str],
plot_labels: plot_base.PlotLabels) -> None:
""" Plot the particle level spectra with matplotlib.
Args:
ep_analyses: The final event plane dependent response matrix analysis objects.
output_name: Name of the output plot.
output_info: Output information.
general_labels: General informational labels for the plot (ALICE, collision system, etc).
plot_labels: plot and axis titles.
Returns:
None. The created canvas is plotted and saved.
"""
# Setup
fig, ax = plt.subplots(figsize = (8, 6))
# Diamond, square, up triangle, circle
markers = ["D", "s", "^", "o"]
colors = ["black", "blue", "green", "red"]
# Label axes
plot_labels.apply_labels(ax)
# Plot the actual hists. The inclusive orientation will be plotted first.
particle_level_max_pt = 0
for analysis, color, marker in zip(ep_analyses.values(), colors, markers):
# Store this value for convenience. It is the same for all orientations.
particle_level_max_pt = analysis.task_config["particle_level_spectra"]["particle_level_max_pt"]
# For inclusive, use open markers that are plotted on top of all points.
additional_args = {}
if analysis.reaction_plane_orientation == params.ReactionPlaneOrientation.inclusive:
additional_args.update({
"fillstyle": "none",
"zorder": 10,
})
# Convert and plot hist
h = histogram.Histogram1D.from_existing_hist(analysis.particle_level_spectra)
ax.errorbar(
h.x, h.y,
xerr = (h.bin_edges[1:] - h.bin_edges[:-1]) / 2,
yerr = h.errors,
label = analysis.reaction_plane_orientation.display_str(),
color = color,
marker = marker,
linestyle = "",
**additional_args
)
# Final presentation settings
# Axis ticks
ax.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(base = 10))
ax.xaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(base = 2))
tick_shared_args = {
"axis": "both",
"bottom": True,
"left": True,
}
ax.tick_params(
which = "major",
# Size of the axis mark labels
labelsize = 15,
length = 8,
**tick_shared_args,
)
ax.tick_params(
which = "minor",
length = 4,
**tick_shared_args,
)
# Limits
ax.set_xlim(0, particle_level_max_pt)
# Unfortunately, MPL doesn't calculate restricted log limits very nicely, so we
# we have to set the values by hand.
# We grab the value from the last analysis object - the value will be the same for all of them.
y_limits = analysis.task_config["particle_level_spectra"]["y_limits"]
ax.set_ylim(y_limits[0], y_limits[1])
ax.set_yscale("log")
# Legend
ax.legend(
loc = "lower left",
frameon = False,
fontsize = 18,
)
# Combine the general labels and then plot them.
label = "\n".join(general_labels.values())
# The label is anchored in the upper right corner.
ax.text(0.95, 0.95, s = label,
horizontalalignment = "right",
verticalalignment = "top",
multialignment = "right",
fontsize = 18,
transform = ax.transAxes)
fig.tight_layout()
# Finally, save and cleanup
output_name += "_mpl"
plot_base.save_plot(output_info, fig, output_name)
plt.close(fig)
def _plot_particle_level_spectra_with_ROOT(ep_analyses: AnalysesBase,
output_name: str,
output_info: analysis_objects.PlottingOutputWrapper,
general_labels: Dict[str, str],
plot_labels: plot_base.PlotLabels) -> None:
""" Plot the particle level spectra with ROOT.
Args:
ep_analyses: The final event plane dependent response matrix analysis objects.
output_name: Name of the output plot.
output_info: Output information.
general_labels: General informational labels for the plot (ALICE, collision system, etc).
plot_labels: plot and axis titles.
Returns:
None. The created canvas is plotted and saved.
"""
# Setup
# Aesthetics
# Colors and markers are from Joel's plots.
colors = [ROOT.kBlack, ROOT.kBlue - 7, 8, ROOT.kRed - 4]
markers = [ROOT.kFullDiamond, ROOT.kFullSquare, ROOT.kFullTriangleUp, ROOT.kFullCircle]
# Diamond: 1.6
# Triangle: 1.2
marker_sizes = [1.6, 1.1, 1.2, 1.1]
# Canvas
canvas = ROOT.TCanvas("canvas", "canvas")
canvas.SetTopMargin(0.04)
canvas.SetLeftMargin(0.14)
canvas.SetRightMargin(0.04)
canvas.SetBottomMargin(0.15)
# These are spectra, so it makes sense to draw it in a log scale.
canvas.SetLogy(True)
# Legend
legend = ROOT.TLegend(0.14, 0.17, 0.42, 0.47)
# Remove border
legend.SetBorderSize(0)
# Increase text size
legend.SetTextSize(0.06)
# Make the legend transparent
legend.SetFillStyle(0)
# Main labeling
latex_labels = []
# ALICE + collision energy
latex_labels.append(ROOT.TLatex(
0.5675, 0.90,
labels.use_label_with_root(general_labels["alice_and_collision_energy"])
))
# Collision system + event activity
# We want the centrality to appear between the cross symbol and Pb--Pb
# NOTE: The y value is minimally adjusted down from the constant 0.06 decrease because the sqrt extends far down.
latex_labels.append(ROOT.TLatex(
0.5375, 0.825,
labels.use_label_with_root(general_labels["collision_system_and_event_activity"]),
))
# Particle level spectra range in detector pt.
latex_labels.append(ROOT.TLatex(
0.605, 0.75,
labels.use_label_with_root(general_labels["detector_pt_range"]),
))
# Constituent cuts
latex_labels.append(ROOT.TLatex(
0.5675, 0.675,
labels.use_label_with_root(general_labels["constituent_cuts"]),
))
# Leading hadron bias
# The x position of this label depends on the value.
# We need some additional parameters to determine the position, so we retrieve the inclusive analysis.
# All of the parameters retrieved here are shared by all analyses.
inclusive = next(iter(ep_analyses.values()))
# We start we a semi-reasonable position with the expectation that we will usually overwrite it.
leading_hadron_bias_label_x_position = 0.6
# Track bias
if inclusive.leading_hadron_bias.type == params.LeadingHadronBiasType.track:
leading_hadron_bias_label_x_position = 0.6025
# Cluster bias
if inclusive.leading_hadron_bias.type == params.LeadingHadronBiasType.cluster and \
inclusive.leading_hadron_bias.value < 10:
leading_hadron_bias_label_x_position = 0.633
latex_labels.append(ROOT.TLatex(
leading_hadron_bias_label_x_position, 0.60,
# Replace necessary because ROOT LaTeX support sux...
# Includes "d" in finding the space because there is another space that's rendered properly
# later in the string...
labels.use_label_with_root(general_labels["leading_hadron_bias"]).replace(r"d\:", "d "),
))
# Jet finding
latex_labels.append(ROOT.TLatex(0.71, 0.525, labels.use_label_with_root(general_labels["jet_finding"])))
# Plot the actual hists. The inclusive orientation will be plotted first.
for i, (analysis, color, marker, marker_size) in enumerate(zip(ep_analyses.values(), colors, markers, marker_sizes)):
# The hist to be plotted. We explicitly retrieve it for convenience.
hist = analysis.particle_level_spectra
# Set the titles
plot_labels.apply_labels(hist)
# Style each individual hist. In principle, we could do this for only one hist and then set the
# axis labels to empty for the rest, but then we would have to empty out the labels. This is just,
# as easy, and then we don't have to deal with changing the labels.
# Enlarge axis title size
hist.GetXaxis().SetTitleSize(0.055)
hist.GetYaxis().SetTitleSize(0.055)
# Ensure there is enough space
hist.GetXaxis().SetTitleOffset(1.15)
hist.GetYaxis().SetTitleOffset(1.22)
# Enlarge axis label size
hist.GetXaxis().SetLabelSize(0.06)
hist.GetYaxis().SetLabelSize(0.06)
# Center axis title
hist.GetXaxis().CenterTitle(True)
hist.GetYaxis().CenterTitle(True)
# View the interesting range
# Note that this must be set after removing any bins that we might want to remove,
# so we set it when plotting.
hist.GetXaxis().SetRangeUser(0, analysis.task_config["particle_level_spectra"]["particle_level_max_pt"])
# Set histogram aesthetics
hist.SetLineColor(color)
hist.SetMarkerColor(color)
hist.SetMarkerStyle(marker)
# Increase marker size slightly
hist.SetMarkerSize(marker_size)
# Could increase the line width if the inclusive angle was closed, but
# the open marker doesn't look very good...
#hist.SetLineWidth(2)
# Offset points
# See: https://root.cern.ch/root/roottalk/roottalk03/2765.html
if analysis.task_config["particle_level_spectra"]["plot_points_with_offset"]:
shift = i * 0.1 * hist.GetBinWidth(1)
xAxis = hist.GetXaxis()
xAxis.SetLimits(xAxis.GetXmin() + shift, xAxis.GetXmax() + shift)
# Store hist in legend
# Remap "inclusive" -> "all" for prior consistency.
label = analysis.reaction_plane_orientation.display_str()
if analysis.reaction_plane_orientation == params.ReactionPlaneOrientation.inclusive:
label = "All"
legend.AddEntry(hist, label)
# Last, we draw the actual hist.
hist.Draw("same")
# Redraw the inclusive hist so that it's on top.
inclusive.particle_level_spectra.Draw("same")
# Draw all of the labels and the legend.
for tex in latex_labels:
tex.SetNDC(True)
tex.Draw()
legend.Draw()
# Finally, save the plot
output_name += "_ROOT"
plot_base.save_plot(output_info, canvas, output_name)
# Also save the plot as a c macro
canvas.SaveAs(os.path.join(output_info.output_prefix, output_name + ".C"))
def particle_level_spectra_ratios(ep_analyses_iter: Iterator[Tuple[Any, "response_matrix.ResponseMatrixBase"]],
output_info: analysis_objects.PlottingOutputWrapper) -> None:
""" Create ratios relative to the particle level spectra and plot them.
Args:
ep_analyses: The event plane dependent final response matrices.
output_info: Output information.
Returns:
None. The spectra are plotted and saved.
"""
# Setup
fig, ax = plt.subplots(figsize = (8, 6))
# Diamond, square, up triangle, circle
markers = ["D", "s", "^", "o"]
colors = ["black", "blue", "green", "red"]
# Pull out the dict because we need to grab individual analyses for some labeling information, which doesn't
# play well with generators (the generator will be exhausted).
ep_analyses = dict(ep_analyses_iter)
# First, we need the inclusive analysis spectra to define the ratio.
inclusive = next(iter(ep_analyses.values()))
inclusive_hist = histogram.Histogram1D.from_existing_hist(inclusive.particle_level_spectra)
# Setup rank 1 polynomial fit (not really the right place, but it's quick and fine
# for these purposees).
def degree_1_polynomial(x: float, const: float, slope: float) -> float:
""" Degree 1 polynomial.
Args:
x: Independent variable.
const: Constant offset.
slope: Coefficient for 1st degree term.
Returns:
Calculated first degree polynomial.
"""
return const + x * slope
class Polynomial(pachyderm.fit.Fit):
""" Fit a degree-1 to the background dominated region of a delta eta hist.
The initial value of the fit will be determined by the minimum y value of the histogram.
Attributes:
fit_range: Range used for fitting the data. Values inside of this range will be used.
user_arguments: User arguments for the fit. Default: None.
fit_function: Function to be fit.
fit_result: Result of the fit. Only valid after the fit has been performed.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
# Finally, setup the fit function
self.fit_function = degree_1_polynomial
def _post_init_validation(self) -> None:
""" Validate that the fit object was setup properly.
This can be any method that the user devises to ensure that
all of the information needed for the fit is available.
Args:
None.
Returns:
None.
"""
fit_range = self.fit_options.get("range", None)
# Check that the fit range is specified
if fit_range is None:
raise ValueError("Fit range must be provided in the fit options.")
# Check that the fit range is a SelectedRange (this isn't really suitable for duck typing)
if not isinstance(fit_range, params.SelectedRange):
raise ValueError("Must provide fit range with a selected range or a set of two values")
def _setup(self, h: histogram.Histogram1D) -> Tuple[histogram.Histogram1D, pachyderm.fit.T_FitArguments]:
""" Setup the histogram and arguments for the fit.
Args:
h: Background subtracted histogram to be fit.
Returns:
Histogram to use for the fit, default arguments for the fit. Note that the histogram may be range
restricted or otherwise modified here.
"""
fit_range = self.fit_options["range"]
# Restrict the range so that we only fit within the desired input.
restricted_range = (h.x > fit_range.min) & (h.x < fit_range.max)
restricted_hist = histogram.Histogram1D(
# We need the bin edges to be inclusive.
# Need the +/- epsilons here to be extra safe, because apparently some of the <= and >= can fail
# (as might be guessed with floats, but I hadn't observed until now). We don't do this above
# because we don't want to be inclusive on the edges.
bin_edges = h.bin_edges[(h.bin_edges >= (fit_range.min - utils.epsilon)) & (h.bin_edges <= (fit_range.max + utils.epsilon))],
y = h.y[restricted_range],
errors_squared = h.errors_squared[restricted_range]
)
# Default arguments
# Use the minimum of the histogram as the starting value.
arguments: pachyderm.fit.T_FitArguments = {
"slope": 0, "error_slope": 0.005,
"const": 1, "error_const": 0.005,
"limit_slope": (-100, 100),
"limit_const": (-10, 10),
}
return restricted_hist, arguments
for analysis, color, marker in zip(ep_analyses.values(), colors, markers):
# For inclusive, use open markers that are plotted on top of all points.
additional_args = {}
if analysis.reaction_plane_orientation == params.ReactionPlaneOrientation.inclusive:
additional_args.update({
"fillstyle": "none",
"zorder": 10,
})
# Convert and plot hist
h = histogram.Histogram1D.from_existing_hist(analysis.particle_level_spectra)
h /= inclusive_hist
ax.errorbar(
h.x, h.y,
xerr = (h.bin_edges[1:] - h.bin_edges[:-1]) / 2,
yerr = h.errors,
label = analysis.reaction_plane_orientation.display_str(),
color = color,
marker = marker,
linestyle = "",
**additional_args
)
# Fit to a degree-1 polynomial and plot
fit_object = Polynomial(
use_log_likelihood = False,
fit_options = {"range": analysis.task_config["particle_level_spectra"]["normalize_at_selected_jet_pt_values"]}
)
fit_result = fit_object.fit(h = h)
fit_object.fit_result = fit_result
values = fit_object(fit_result.x, *fit_result.values_at_minimum.values())
# Plot the values
ax.plot(
fit_result.x, values,
label = rf"Fit, {fit_result.values_at_minimum['const']:.2f} $\pm$ {fit_result.errors_on_parameters['const']:.2f} + "
+ rf"{fit_result.values_at_minimum['slope']:.1e} $\pm$ {fit_result.errors_on_parameters['slope']:.1e} "
+ rf"* ${labels.jet_pt_display_label()}$",
color = color,
)
# And the error bands
ax.fill_between(
fit_result.x, values - fit_result.errors,
values + fit_result.errors,
facecolor = color, alpha = 0.5,
)
# Final presentation settings
# Legend
ax.legend(
# Here, we specify the location of the upper right corner of the legend box.
loc = "upper right",
bbox_to_anchor = (0.99, 0.99),
borderaxespad = 0,
fontsize = 14,
ncol = 2,
)
plot_labels = plot_base.PlotLabels(
title = "",
x_label = fr"${labels.jet_pt_display_label(upper_label = 'part')}\:({labels.momentum_units_label_gev()})$",
y_label = "Ratio to inclusive",
)
plot_labels.apply_labels(ax)
# Use the same xlimits as for the particle level spectra
ax.set_xlim(0, inclusive.task_config["particle_level_spectra"]["particle_level_max_pt"])
# Should be centered around 1.
ax.set_ylim(0.5, 1.5)
fig.tight_layout()
# Finally, save and cleanup
output_name = "particle_level_ratios"
plot_base.save_plot(output_info, fig, output_name)
plt.close(fig)
def compare_STAR_and_ALICE(star_final_response_task: "response_matrix.ResponseMatrixBase",
alice_particle_level_spectra: Dict[params.CollisionEnergy, Hist],
output_info: analysis_objects.PlottingOutputWrapper) -> None:
""" Compare STAR and ALICE particle level spectra.
Args:
star_final_response_task: STAR response matrix.
alice_particle_level_spectra: The ALICE particle level spectra to plot.
output_info: Output information.
Returns:
None. The comparison is plotted.
"""
# Setup
fig, ax = plt.subplots(figsize = (8, 6))
# First, plot the STAR points
star_centrality_map = {
params.EventActivity.semi_central: r"20 \textendash 50 \%",
}
star_hist = histogram.Histogram1D.from_existing_hist(star_final_response_task.particle_level_spectra)
star_label = f"STAR ${star_final_response_task.collision_energy.display_str()}$ hard-core jets"
star_label += "\n" + f"PYTHIA with ${star_centrality_map[star_final_response_task.event_activity]}$ ${star_final_response_task.collision_system.display_str()}$ det. conditions"
ax.errorbar(
star_hist.x, star_hist.y,
xerr = (star_hist.bin_edges[1:] - star_hist.bin_edges[:-1]) / 2,
yerr = star_hist.errors,
label = star_label,
color = "blue",
marker = "s",
linestyle = "",
)
# Convert and plot hist
# Markers are for 2.76, 5.02 TeV
markers = ["D", "o"]
fill_styles = ["none", "full"]
for (collision_energy, part_level_hist), marker, fill_style in zip(alice_particle_level_spectra.items(),
markers, fill_styles):
alice_label = f"ALICE ${collision_energy.display_str()}$ biased jets"
alice_label += "\n" + f"${params.CollisionSystem.embedPythia.display_str(embedded_additional_label = star_final_response_task.event_activity.display_str())}$"
h = histogram.Histogram1D.from_existing_hist(part_level_hist)
ax.errorbar(
h.x, h.y,
xerr = (h.bin_edges[1:] - h.bin_edges[:-1]) / 2,
yerr = h.errors,
label = alice_label,
color = "red",
marker = marker, fillstyle = fill_style,
linestyle = "",
)
# Label axes
y_label = r"\text{d}N/\text{d}p_{\text{T}}"
if star_final_response_task.task_config["particle_level_spectra"]["normalize_by_n_jets"]:
y_label = r"(1/N_{\text{jets}})" + y_label
if star_final_response_task.task_config["particle_level_spectra"]["normalize_at_selected_jet_pt_bin"]:
# Assumes that we'll never set an upper bound.
values = star_final_response_task.task_config["particle_level_spectra"]["normalize_at_selected_jet_pt_values"]
y_label = r"(1/N_{\text{jets}}^{p_{\text{T}} > " + fr"{values.min}\:{labels.momentum_units_label_gev()}" + r"})" + y_label
# Add y_label units
y_label += fr"\:({labels.momentum_units_label_gev()})^{{-1}}"
plot_labels = plot_base.PlotLabels(
title = "",
x_label = fr"${labels.jet_pt_display_label(upper_label = 'part')}\:({labels.momentum_units_label_gev()})$",
y_label = labels.make_valid_latex_string(y_label),
)
# Apply labels individually so we can increase the font size...
ax.set_xlabel(plot_labels.x_label, fontsize = 16)
ax.set_ylabel(plot_labels.y_label, fontsize = 16)
ax.set_title("")
# Final presentation settings
# Axis ticks
ax.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(base = 10))
ax.xaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(base = 2))
tick_shared_args = {
"axis": "both",
"bottom": True,
"left": True,
}
ax.tick_params(
which = "major",
# Size of the axis mark labels
labelsize = 15,
length = 8,
**tick_shared_args,
)
ax.tick_params(
which = "minor",
length = 4,
**tick_shared_args,
)
# Limits
ax.set_xlim(0, star_final_response_task.task_config["particle_level_spectra"]["particle_level_max_pt"])
# Unfortunately, MPL doesn't calculate restricted log limits very nicely, so we we have to set the values by hand.
# We grab the value from the configuration
y_limits = star_final_response_task.task_config["particle_level_spectra"]["y_limits"]
ax.set_ylim(y_limits[0], y_limits[1])
ax.set_yscale("log")
# Legend
ax.legend(
# Here, we specify the location of the upper right corner of the legend box.
loc = "upper right",
bbox_to_anchor = (0.99, 0.99),
borderaxespad = 0,
frameon = True,
fontsize = 13.5,
)
ax.text(0.99, 0.66, s = "Inclusive event plane orientation",
horizontalalignment = "right",
verticalalignment = "top",
multialignment = "right",
fontsize = 13.5,
transform = ax.transAxes)
fig.tight_layout()
# Finally, save and cleanup
output_name = "particle_level_comparison_STAR_ALICE"
output_name += "_mpl"
plot_base.save_plot(output_info, fig, output_name)
plt.close(fig)
def compare_min_constituent_cut(obj: "response_matrix.ResponseMatrixBase",
min_constituent_hist: Hist,
output_info: analysis_objects.PlottingOutputWrapper) -> None:
""" Compare the constituent cut against the minimum constituent cut.
Args:
obj: The response matrix analysis object.
min_constituent_hist: Particle level spectra generated with a minimum constituent cut.
output_info: Output information.
Returns:
None. The comparison is plotted.
"""
# Setup
fig, ax = plt.subplots(figsize = (8, 6))
# Plot the standard constituent cut.
particle_level_hist = histogram.Histogram1D.from_existing_hist(obj.particle_level_spectra)
label = f"Jets, {labels.constituent_cuts(additional_label = ' part,det')}"
ax.errorbar(
particle_level_hist.x, particle_level_hist.y,
xerr = particle_level_hist.bin_widths / 2,
yerr = particle_level_hist.errors,
label = label,
marker = "s",
linestyle = "",
)
# Plot the min constituent cut spectra
label = f"Jets, {labels.constituent_cuts(min_track_pt = 0.15, min_cluster_pt = 0.30, additional_label = ' part,det')}"
h = histogram.Histogram1D.from_existing_hist(min_constituent_hist)
ax.errorbar(
h.x, h.y,
xerr = h.bin_widths / 2,
yerr = h.errors,
label = label,
marker = "s",
linestyle = "",
)
# Label axes
y_label = r"\text{d}N/\text{d}p_{\text{T}}"
if obj.task_config["particle_level_spectra"]["normalize_by_n_jets"]:
y_label = r"(1/N_{\text{jets}})" + y_label
if obj.task_config["particle_level_spectra"]["normalize_at_selected_jet_pt_bin"]:
# Assumes that we'll never set an upper bound.
values = obj.task_config["particle_level_spectra"]["normalize_at_selected_jet_pt_values"]
y_label = r"(1/N_{\text{jets}}^{p_{\text{T}} > " + fr"{values.min}\:{labels.momentum_units_label_gev()}" + r"})" + y_label
# Add y_label units
y_label += fr"\:({labels.momentum_units_label_gev()})^{{-1}}"
plot_labels = plot_base.PlotLabels(
title = "",
x_label = fr"${labels.jet_pt_display_label(upper_label = 'part')}\:({labels.momentum_units_label_gev()})$",
y_label = labels.make_valid_latex_string(y_label),
)
# Apply labels individually so we can increase the font size...
ax.set_xlabel(plot_labels.x_label, fontsize = 16)
ax.set_ylabel(plot_labels.y_label, fontsize = 16)
ax.set_title("")
# Final presentation settings
# Axis ticks
ax.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(base = 10))
ax.xaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(base = 2))
tick_shared_args = {
"axis": "both",
"bottom": True,
"left": True,
}
ax.tick_params(
which = "major",
# Size of the axis mark labels
labelsize = 15,
length = 8,
**tick_shared_args,
)
ax.tick_params(
which = "minor",
length = 4,
**tick_shared_args,
)
# Limits
ax.set_xlim(0, obj.task_config["particle_level_spectra"]["particle_level_max_pt"])
# Unfortunately, MPL doesn't calculate restricted log limits very nicely, so we
# we have to set the values by hand.
# We grab the value from the last analysis object - the value will be the same for all of them.
y_limits = obj.task_config["particle_level_spectra"]["y_limits"]
ax.set_ylim(y_limits[0], y_limits[1])
ax.set_yscale("log")
# Legend
ax.legend(
loc = "lower left",
fontsize = 16,
frameon = False,
)
# General labels
label = f"${obj.alice_label.display_str()}$, ${obj.collision_energy.display_str()}$"
label += "\n" + f"${params.CollisionSystem.embedPythia.display_str(embedded_additional_label = obj.event_activity.display_str())}$"
label += "\n" + "Inclusive event plane orientation"
ax.text(0.99, 0.99, s = label,
horizontalalignment = "right",
verticalalignment = "top",
multialignment = "right",
fontsize = 16,
transform = ax.transAxes)
fig.tight_layout()
# Finally, save and cleanup
output_name = "particle_level_comparison_constituent_cut"
plot_base.save_plot(output_info, fig, output_name)
plt.close(fig)
def plot_response_matrix_and_errors(obj: "response_matrix.ResponseMatrixBase",
plot_with_ROOT: bool = False) -> None:
""" Plot the 2D response matrix and response matrix errors hists using ROOT.
Args:
obj: The response matrix analysis object.
plot_with_ROOT: True if the plot should be done via ROOT. Default: False
Returns:
None. The comparison is plotted.
"""
for hist, plot_errors_hist in [(obj.response_matrix, False),
(obj.response_matrix_errors, True)]:
# Plot response matrix
_plot_response_matrix(
hist = hist,
plot_errors_hist = plot_errors_hist,
output_info = obj.output_info,
plot_with_ROOT = plot_with_ROOT,
reaction_plane_orientation = obj.reaction_plane_orientation,
)
# We also want to plot a final response matrix, with final labeling and rebinning.
_plot_final_response_matrix_with_matplotlib(
obj = obj,
hist = obj.response_matrix,
output_info = obj.output_info,
)
def _plot_response_matrix(hist: Hist,
plot_errors_hist: bool,
output_info: analysis_objects.PlottingOutputWrapper,
plot_with_ROOT: bool,
reaction_plane_orientation: params.ReactionPlaneOrientation) -> None:
""" Plot the given response matrix related 2D hist.
Args:
hist: The response matrix related 2D hist.
errors_hist: True if the hist is the response matrix errors hist.
output_info: Output information.
plot_with_ROOT: True if the plot should be done via ROOT.
reaction_plane_orientation: Reaction plane orientation of the plot.
Returns:
None
"""
# Determine parameters
name = "Response Matrix"
if plot_errors_hist:
name += " Errors"
name += f", {reaction_plane_orientation.display_str()} event plane orientation"
output_name = "response_matrix"
if plot_errors_hist:
output_name += "_errors"
x_label = fr"{labels.jet_pt_display_label(upper_label = 'hybrid')}\:({labels.momentum_units_label_gev()})"
y_label = fr"{labels.jet_pt_display_label(upper_label = 'part')}\:({labels.momentum_units_label_gev()})"
# Determine args and call
args = {
"name": name,
"x_label": labels.make_valid_latex_string(x_label),
"y_label": labels.make_valid_latex_string(y_label),
"output_name": output_name,
"hist": hist,
"plot_errors_hist": plot_errors_hist,
"output_info": output_info,
}
if plot_with_ROOT:
_plot_response_matrix_with_ROOT(**args)
else:
_plot_response_matrix_with_matplotlib(**args)
def _plot_response_matrix_with_matplotlib(name: str, x_label: str, y_label: str, output_name: str,
hist: Hist,
plot_errors_hist: bool,
output_info: analysis_objects.PlottingOutputWrapper) -> None:
""" Underlying function to actually plot a response matrix with matplotlib.
Args:
name: Name of the histogram.
x_label: X axis label.
y_label: Y axis label.
output_name: Output name of the histogram.
hist: The response matrix related 2D hist.
errors_hist: True if the hist is the response matrix errors hist.
output_info: Output information.
Returns:
None
"""
# Setup
fig, ax = plt.subplots(figsize = (8, 6))
# Convert the histogram
X, Y, hist_array = histogram.get_array_from_hist2D(
hist = hist,
set_zero_to_NaN = True,
return_bin_edges = True,
)
# Determine and fill args
kwargs = {}
# Create a log z axis heat map.
kwargs["norm"] = matplotlib.colors.LogNorm(vmin = np.nanmin(hist_array), vmax = np.nanmax(hist_array))
logger.debug(f"min: {np.nanmin(hist_array)}, max: {np.nanmax(hist_array)}")
# The colormap that we use is the default from sns.heatmap
kwargs["cmap"] = plot_base.prepare_colormap(sns.cm.rocket)
# Label is included so we could use a legend if we want
kwargs["label"] = name
logger.debug("kwargs: {}".format(kwargs))
# Determine the edges
extent = [
np.amin(X), np.amax(X),
np.amin(Y), np.amax(Y)
]
# Finally, create the plot
ax_from_imshow = ax.imshow(
hist_array.T, extent = extent,
interpolation = "nearest", aspect = "auto", origin = "lower",
**kwargs
)
# Add colorbar
# It needs to be defined on the figure because it is stored in a separate axis.
fig.colorbar(ax_from_imshow, ax = ax)
# Final styling
ax.set_title(name)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
fig.tight_layout()
# Save and cleanup
output_name += "_mpl"
plot_base.save_plot(output_info, fig, output_name)
plt.close(fig)
def _plot_response_matrix_with_ROOT(name: str, x_label: str, y_label: str, output_name: str,
hist: Hist,
plot_errors_hist: bool,
output_info: analysis_objects.PlottingOutputWrapper) -> None:
""" Underlying function to actually plot a response matrix with ROOT.
Args:
name: Name of the histogram.
x_label: X axis label.
y_label: Y axis label.
output_name: Output name of the histogram.
hist: The response matrix related 2D hist.
errors_hist: True if the hist is the response matrix errors hist.
output_info: Output information.
Returns:
None
"""
# Setup
canvas = ROOT.TCanvas("canvas", "canvas")
canvas.SetLogz(True)
# Plot the histogram
hist.SetTitle(name)
hist.GetXaxis().SetTitle(labels.use_label_with_root(x_label))
hist.GetYaxis().SetTitle(labels.use_label_with_root(y_label))
hist.Draw("colz")
# Set the final axis ranges.
# Z axis
min_val = ctypes.c_double(0)
max_val = ctypes.c_double(0)
hist.GetMinimumAndMaximum(min_val, max_val)
# * 1.1 to put it slightly above the max value
# min_val doesn't work here, because there are some entries at 0
hist.GetZaxis().SetRangeUser(10e-7, max_val.value * 1.1)
# Save
output_name += "_ROOT"
plot_base.save_plot(output_info, canvas, output_name)
def _plot_final_response_matrix_with_matplotlib(obj: "response_matrix.ResponseMatrixBase",
hist: Hist,
output_info: analysis_objects.PlottingOutputWrapper) -> None:
""" Underlying function to actually plot the final response matrix with matplotlib.
It also rebins a clone of the histogram so it's presentable.
Args:
obj: Response matrix object.
hist: The response matrix related 2D hist.
output_info: Output information.
Returns:
None
"""
# Setup
fig, ax = plt.subplots(figsize = (8, 6))
# Convert the histogram
# First, we clone and rebin for presentation purposes
response = hist.Clone(f"{hist.GetName()}_rebin_temp")
response.Rebin2D(5, 5)
X, Y, hist_array = histogram.get_array_from_hist2D(
hist = response,
set_zero_to_NaN = True,
return_bin_edges = True,
)
# Determine and fill args
kwargs = {}
# Create a log z axis heat map.
kwargs["norm"] = matplotlib.colors.LogNorm(vmin = np.nanmin(hist_array), vmax = np.nanmax(hist_array))
logger.debug(f"min: {np.nanmin(hist_array)}, max: {np.nanmax(hist_array)}")
# The colormap that we use is the default from sns.heatmap
kwargs["cmap"] = "viridis"
# Label is included so we could use a legend if we want
kwargs["label"] = "response"
logger.debug("kwargs: {}".format(kwargs))
# Determine the edges
extent = [
np.amin(X), np.amax(X),
np.amin(Y), np.amax(Y)
]
# Finally, create the plot
ax_from_imshow = ax.imshow(
hist_array.T, extent = extent,
interpolation = "nearest", aspect = "auto", origin = "lower",
**kwargs
)
# Add colorbar
# It needs to be defined on the figure because it is stored in a separate axis.
fig.colorbar(ax_from_imshow, ax = ax)
# Add labeling
embedded_additional_label = obj.event_activity.display_str()
general_labels = {
"alice_and_collision_energy":
rf"{obj.alice_label.display_str()}\:{obj.collision_energy.display_str()}",
"collision_system_and_event_activity":
rf"{obj.collision_system.display_str(embedded_additional_label = embedded_additional_label)}",
"constituent_cuts": labels.constituent_cuts(additional_label = "det"),
"leading_hadron_bias": obj.leading_hadron_bias.display_str(additional_label = "det"),
"jet_finding": labels.jet_finding(),
}
# Ensure that each line is a valid latex line.
# The heuristic is roughly that full statements (such as jet_finding) are already wrapped in "$",
# while partial statements, such as the leading hadron bias, event activity, etc are not wrapped in "$".
# This is due to the potential for such "$" to interfere with including those partial statements in other
# statements. As an example, it would be impossible to use the ``embedded_additional_label`` above if the
# ``event_activity`` included "$".
for k, v in general_labels.items():
general_labels[k] = labels.make_valid_latex_string(v)
label = "\n".join(reversed(list(general_labels.values())))
ax.text(0.99, 0.01, s = label,
horizontalalignment = "right",
verticalalignment = "bottom",
multialignment = "right",
transform = ax.transAxes,
# We need a slightly smaller font size to fit everything...
fontsize = 15)
# Axis labels
x_label = labels.make_valid_latex_string(
fr"{labels.jet_pt_display_label(upper_label = 'hybrid')}\:({labels.momentum_units_label_gev()})"
)
y_label = labels.make_valid_latex_string(
fr"{labels.jet_pt_display_label(upper_label = 'part')}\:({labels.momentum_units_label_gev()})"
)
ax.set_title("")
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
# Final styling
fig.tight_layout()
# Save and cleanup
output_name = "response_matrix_final_mpl"
plot_base.save_plot(output_info, fig, output_name)
plt.close(fig)
def plot_response_spectra(plot_labels: plot_base.PlotLabels,
output_name: str,
merged_analysis: analysis_objects.JetHBase,
pt_hard_analyses: Mapping[Any, Union["pt_hard_analysis.PtHardAnalysis", "response_matrix.ResponseMatrixBase"]],
hist_attribute_name: str,
plot_with_ROOT: bool = False) -> None:
""" Plot 1D response spectra.
Args:
plot_labels: Labels for the plot.
output_name: Name under which the plot should be stored.
merged_analysis: Full merged together analysis object.
pt_hard_analyses: Pt hard dependent analysis objects to be plotted.
hist_attribute_name: Name of the attribute under which the histogram is stored.
plot_with_ROOT: True if the plot should be done via ROOT.
"""
# Setup
# NOTE: "husl" is also a good option.
colors = sns.color_palette(
palette = "Blues_d", n_colors = len(pt_hard_analyses)
)
# Update the plot labels as appropriate using the reaction plane orientation information
# Help out mypy....
assert plot_labels.title is not None
# The pt hard spectra doesn't have a reaction plane orientation, so add it to the title if the
# attribute is available
if hasattr(merged_analysis, "reaction_plane_orientation"):
# Help out mypy
merged_analysis = cast(analysis_objects.JetHReactionPlane, merged_analysis)
plot_labels.title = plot_labels.title + f", {merged_analysis.reaction_plane_orientation.display_str()} reaction plane orientation"
kwargs = {
"plot_labels": plot_labels,
"output_name": output_name,
"merged_analysis": merged_analysis,
"pt_hard_analyses": pt_hard_analyses,
"hist_attribute_name": hist_attribute_name,
"colors": colors,
}
if plot_with_ROOT:
_plot_response_spectra_with_ROOT(**kwargs)
else:
_plot_response_spectra_with_matplotlib(**kwargs)
def _plot_response_spectra_with_matplotlib(plot_labels: plot_base.PlotLabels,
output_name: str,
merged_analysis: analysis_objects.JetHBase,
pt_hard_analyses: Mapping[Any, Union["pt_hard_analysis.PtHardAnalysis", "response_matrix.ResponseMatrixBase"]],
hist_attribute_name: str,
colors: Sequence[Tuple[float, float, float]]) -> None:
""" Plot 1D response spectra with matplotlib.
Args:
plot_labels: Labels for the plot.
output_name: Name under which the plot should be stored.
merged_analysis: Full merged together analysis object.
pt_hard_analyses: Pt hard dependent analysis objects to be plotted.
hist_attribute_name: Name of the attribute under which the histogram is stored.
colors: List of colors to be used for plotting the pt hard spectra.
"""
# Setup
fig, ax = plt.subplots(figsize = (8, 6))
plot_labels.apply_labels(ax)
# First, we plot the merged analysis. This is the sum of the various pt hard bin contributions.
merged_hist = utils.recursive_getattr(merged_analysis, hist_attribute_name)
merged_hist = histogram.Histogram1D.from_existing_hist(merged_hist)
ax.errorbar(
merged_hist.x, merged_hist.y,
yerr = merged_hist.errors,
label = "Merged",
color = "black",
marker = ".",
linestyle = "",
)
# Now, we plot the pt hard dependent hists
for (key_index, analysis), color in zip(pt_hard_analyses.items(), colors):
# Determine the proper label.
label = labels.pt_range_string(
pt_bin = key_index.pt_hard_bin,
lower_label = "T",
upper_label = "hard",
only_show_lower_value_for_last_bin = True,
)
# Plot the histogram.
hist = utils.recursive_getattr(analysis, hist_attribute_name)
h = histogram.Histogram1D.from_existing_hist(hist)
ax.errorbar(
h.x, h.y,
yerr = h.errors,
label = label,
color = color,
marker = ".",
linestyle = "",
)
# Final presentation settings
# Ensure that the max is never beyond 300 for better presentation.
max_limit = np.max(merged_hist.x)
if max_limit > 300:
max_limit = 300
ax.set_xlim(0, max_limit)
ax.set_yscale("log")
ax.legend(loc = "best", frameon = False, ncol = 2, fontsize = 11)
fig.tight_layout()
# Save and cleanup
output_name += "_mpl"
plot_base.save_plot(merged_analysis.output_info, fig, output_name)
plt.close(fig)
def _plot_response_spectra_with_ROOT(plot_labels: plot_base.PlotLabels,
output_name: str,
merged_analysis: analysis_objects.JetHBase,
pt_hard_analyses: Mapping[Any, Union["pt_hard_analysis.PtHardAnalysis", "response_matrix.ResponseMatrixBase"]],
hist_attribute_name: str,
colors: Sequence[Tuple[float, float, float]]) -> None:
""" Plot 1D response spectra with ROOT.
Args:
plot_labels: Labels for the plot.
output_name: Name under which the plot should be stored.
merged_analysis: Full merged together analysis object.
pt_hard_analyses: Pt hard dependent analysis objects to be plotted.
hist_attribute_name: Name of the attribute under which the histogram is stored.
colors: List of colors to be used for plotting the pt hard spectra.
"""
# Setup
canvas = ROOT.TCanvas("canvas", "canvas")
canvas.SetLogy(True)
# Legend
legend = ROOT.TLegend(0.37, 0.55, 0.9, 0.9)
legend.SetHeader(r"p_{\mathrm{T}}\:\mathrm{bins}", "C")
# Increase text size
legend.SetTextSize(0.025)
# Use two columns because we have a lot of entries.
legend.SetNColumns(2)
# Remove the legend border
legend.SetBorderSize(0)
# Make the legend transparent
legend.SetFillStyle(0)
# First, we plot the merged analysis. This is the sum of the various pt hard bin contributions.
merged_hist = utils.recursive_getattr(merged_analysis, hist_attribute_name)
# Apply axis labels (which must be set on the hist)
plot_labels.apply_labels(merged_hist)
# Style the merged hist to ensure that it is possible to see the points
merged_hist.SetMarkerStyle(ROOT.kFullCircle)
merged_hist.SetMarkerSize(1)
merged_hist.SetMarkerColor(ROOT.kBlack)
merged_hist.SetLineColor(ROOT.kBlack)
# Ensure that the max is never beyond 300 for better presentation.
max_limit = merged_hist.GetXaxis().GetXmax()
if max_limit > 300:
max_limit = 300
merged_hist.GetXaxis().SetRangeUser(0, max_limit)
# Label and draw
legend.AddEntry(merged_hist, "Merged")
merged_hist.Draw("same")
# Now, we plot the pt hard dependent hists
for i, ((key_index, analysis), color) in enumerate(zip(pt_hard_analyses.items(), colors)):
# Setup
color = ROOT.TColor.GetColor(*color)
# Determine the proper label.
label = labels.pt_range_string(
pt_bin = key_index.pt_hard_bin,
lower_label = "T",
upper_label = "hard",
only_show_lower_value_for_last_bin = True,
)
# Retrieve and style the hist
hist = utils.recursive_getattr(analysis, hist_attribute_name)
hist.SetMarkerStyle(ROOT.kFullCircle + i)
hist.SetMarkerSize(1)
hist.SetMarkerColor(color)
hist.SetLineColor(color)
# Label and draw
legend.AddEntry(hist, labels.use_label_with_root(label))
hist.Draw("same")
# Final presentation settings
legend.Draw()
# Save and cleanup
output_name += "_ROOT"
plot_base.save_plot(merged_analysis.output_info, canvas, output_name)
def plot_particle_level_spectra_agreement(difference: Hist, absolute_value_of_difference: Hist,
output_info: analysis_objects.PlottingOutputWrapper) -> None:
""" Plot the agreement of the particle level spectra between the inclusive and sum of all EP orientations.
Args:
difference: Hist of the sum of the EP orientations spectra minus the inclusive spectra.
absolute_value_of_difference: Same as the difference hist, but having taken the absolute value.
This allows us to plot the difference on a log scale (which is useful if the differences
are small).
output_info: Output information.
Returns:
None.
"""
# Setup
output_name = "difference_of_sum_EP_orientations_vs_inclusive"
canvas = ROOT.TCanvas("canvas", "canvas")
# Labeling
x_label = labels.use_label_with_root(
fr"{labels.jet_pt_display_label(upper_label = 'part')}\:({labels.momentum_units_label_gev()})"
)
y_label = r"\mathrm{d}N/\mathrm{d}p_{\mathrm{T}}"
# Apply settings to hists
for h in [difference, absolute_value_of_difference]:
# Labeling
h.GetXaxis().SetTitle(x_label)
h.GetYaxis().SetTitle(y_label)
# Center axis title
h.GetXaxis().CenterTitle(True)
h.GetYaxis().CenterTitle(True)
# Draw and save the difference histogram.
difference.Draw()
plot_base.save_plot(output_info, canvas, output_name)
# Draw and save the absolute value of the difference histogram.
absolute_value_of_difference.Draw()
canvas.SetLogy(True)
output_name += "_abs"
plot_base.save_plot(output_info, canvas, output_name)
def matched_jet_energy_scale(plot_labels: plot_base.PlotLabels, output_name: str,
output_info: analysis_objects.PlottingOutputWrapper,
obj: "response_matrix.ResponseMatrixBase") -> None:
# Setup
canvas = ROOT.TCanvas("canvas", "canvas")
canvas.SetLogz(True)
hist = obj.matched_jet_pt_difference
logger.debug(f"hist: {hist}")
# Plot the histogram
plot_labels.apply_labels(hist)
hist.Draw("colz")
# Axis ranges
hist.GetXaxis().SetRangeUser(0, 150)
# Scale Z axis. Otherwise, we won't see much.
min_val = ctypes.c_double(0)
max_val = ctypes.c_double(0)
hist.GetMinimumAndMaximum(min_val, max_val)
# * 1.1 to put it slightly above the max value
# min_val doesn't work here, because there are some entries at 0
hist.GetZaxis().SetRangeUser(10e-7, max_val.value * 1.1)
# Save
plot_base.save_plot(output_info, canvas, output_name)
|
the-stack_106_27117 | import enum
import logging
from typing import Optional, Union, Tuple, Iterable
import numpy as np
from .azimuthalcurve import AzimuthalCurve
from .curve import Curve
from .header import Header
from ..algorithms.matrixaverager import ErrorPropagationMethod, MatrixAverager
from ..algorithms.radavg import radavg, autoq, azimavg
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class QRangeMethod(enum.Enum):
"""Methods for spacing q-bins"""
Linear = 1
Logarithmic = 0
Square = 2
Square_root = 3
class Exposure:
intensity: np.ndarray
mask: np.ndarray # 1: valid: 0: invalid
uncertainty: np.ndarray
header: Header
def __init__(self, intensity: np.ndarray, header: Header, uncertainty: Optional[np.ndarray] = None,
mask: Optional[np.ndarray] = None):
self.intensity = intensity
if uncertainty is None:
uncertainty = np.zeros_like(self.intensity)
if mask is None:
mask = np.ones(self.intensity.shape, np.uint8)
else:
mask = mask.astype(np.uint8)
if (intensity.shape != uncertainty.shape) or (intensity.shape != mask.shape):
raise ValueError('Shape mismatch')
self.mask = mask
self.uncertainty = uncertainty
self.header = header
def radial_average(self, qbincenters: Optional[Union[np.ndarray, int, Tuple[QRangeMethod, int]]] = None,
errorprop: ErrorPropagationMethod = ErrorPropagationMethod.Gaussian,
qerrorprop: ErrorPropagationMethod = ErrorPropagationMethod.Gaussian) -> Curve:
if (qbincenters is None) or isinstance(qbincenters, int):
qbincenters = autoq(
self.mask, self.header.wavelength[0], self.header.distance[0], self.header.pixelsize[0],
self.header.beamposrow[0], self.header.beamposcol[0], linspacing=1,
N=-1 if qbincenters is None else qbincenters)
elif isinstance(qbincenters, tuple) and (len(qbincenters) == 2) and isinstance(qbincenters[0], QRangeMethod):
qbincenters = autoq(
self.mask, self.header.wavelength[0], self.header.distance[0], self.header.pixelsize[0],
self.header.beamposrow[0], self.header.beamposcol[0], linspacing=qbincenters[0].value,
N=-1 if (qbincenters[1]<1) else qbincenters[1])
elif not isinstance(qbincenters, np.ndarray):
raise TypeError(f'Invalid type for parameter `qbincenters`: {type(qbincenters)}')
q, intensity, uncertainty, quncertainty, binarea, pixel = radavg(
self.intensity, self.uncertainty, self.mask,
self.header.wavelength[0], self.header.wavelength[1],
self.header.distance[0], self.header.distance[1],
self.header.pixelsize[0], self.header.pixelsize[1],
self.header.beamposrow[0], self.header.beamposrow[1],
self.header.beamposcol[0], self.header.beamposcol[1],
qbincenters,
errorprop.value, qerrorprop.value
)
return Curve.fromVectors(q, intensity, uncertainty, quncertainty, binarea, pixel)
def azim_average(self, count=100,
errorprop: ErrorPropagationMethod = ErrorPropagationMethod.Conservative,
qerrorprop: ErrorPropagationMethod = ErrorPropagationMethod.Conservative) -> AzimuthalCurve:
phi, intensity, uncertainty, phiuncertainty, binarea, qmean, qstd = azimavg(
self.intensity, self.uncertainty, self.mask,
self.header.wavelength[0], # self.header.wavelength[1],
self.header.distance[0], # self.header.distance[1],
self.header.pixelsize[0], # self.header.pixelsize[1],
self.header.beamposrow[0], self.header.beamposrow[1],
self.header.beamposcol[0], self.header.beamposcol[1],
count,
errorprop.value, qerrorprop.value
)
return AzimuthalCurve.fromVectors(phi, intensity, uncertainty, phiuncertainty, binarea, qmean, qstd)
@property
def size(self) -> int:
return self.intensity.size
@property
def shape(self) -> Tuple[int, ...]:
return self.intensity.shape
def radius_pixel(self) -> Tuple[np.ndarray, np.ndarray]:
row = np.arange(self.intensity.shape[0])[:, np.newaxis] - self.header.beamposrow[0]
drow = self.header.beamposrow[1]
col = np.arange(self.intensity.shape[1])[np.newaxis, :] - self.header.beamposcol[0]
dcol = self.header.beamposcol[1]
radius = (row ** 2 + col ** 2) ** 0.5
dradius = (row ** 2 * drow ** 2 + col ** 2 * dcol ** 2) ** 0.5 / radius
return radius, dradius
def radius_distance(self) -> Tuple[np.ndarray, np.ndarray]:
r, dr = self.radius_pixel()
return (r * self.header.pixelsize[0],
(dr ** 2 * self.header.pixelsize[0] ** 2 + r ** 2 * self.header.pixelsize[1] ** 2) ** 0.5)
def twotheta(self) -> Tuple[np.ndarray, np.ndarray]:
r, dr = self.radius_distance()
tan2th = (
r / self.header.distance[0],
(r ** 2 * self.header.distance[1] ** 2 / self.header.distance[0] ** 4 + dr ** 2 / self.header.distance[
0] ** 2) ** 0.5
)
return (np.arctan(tan2th[0]),
np.abs(tan2th[1] / (1 + tan2th[0] ** 2)))
def q(self) -> Tuple[np.ndarray, np.ndarray]:
tth, dtth = self.twotheta()
th, dth = 0.5 * tth, 0.5 * dtth
sinth = np.sin(th), np.abs(dth * np.cos(th))
return (4 * np.pi * sinth[0] / self.header.wavelength[0],
4 * np.pi * (
sinth[1] ** 2 / self.header.wavelength[0] + sinth[0] ** 2 * self.header.wavelength[1] ** 2 /
self.header.wavelength[0] ** 4) ** 0.5)
def save(self, filename: str):
np.savez_compressed(filename, Intensity=self.intensity, Error=self.uncertainty, mask=self.mask)
@classmethod
def average(cls, exposures: Iterable["Exposure"], errorpropagation: ErrorPropagationMethod) -> "Exposure":
avgintensity = MatrixAverager(errorpropagation)
mask = None
headers = []
for ex in exposures:
assert ex.intensity is not None
assert ex.uncertainty is not None
avgintensity.add(ex.intensity, ex.uncertainty)
if mask is None:
mask = ex.mask
else:
mask = np.logical_and(mask > 0, ex.mask > 0)
headers.append(ex.header)
intensity, uncertainty = avgintensity.get()
return Exposure(intensity, Header.average(*headers), uncertainty, mask)
def qtopixel(self, q: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return np.tan(2 * np.arcsin(q / 4 / np.pi * self.header.wavelength[0])) * self.header.distance[0] / \
self.header.pixelsize[0]
def pixeltoq(self, pixel: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return 4 * np.pi * np.sin(0.5 * np.arctan(pixel * self.header.pixelsize[0] / self.header.distance[0])) / \
self.header.wavelength[0]
|
the-stack_106_27119 | """
Build statically rendered files.
SPDX-FileCopyrightText: 2021 Birger Schacht <[email protected]>, Mikk Margus Möll <[email protected]>, Sebastian Wagner <[email protected]>
SPDX-License-Identifier: AGPL-3.0-or-later
"""
import argparse
import pathlib
import shutil
from mako.lookup import TemplateLookup
def render_page(pagename: str, **template_args) -> str:
template_dir = pathlib.Path(__file__).parent / 'templates'
template_lookup = TemplateLookup(directories=[template_dir], default_filters=["h"], input_encoding='utf8')
template = template_lookup.get_template(f'{pagename}.mako')
return template.render(pagename=pagename, **template_args)
def buildhtml(outputdir: pathlib.Path = pathlib.Path('html')):
outputdir.mkdir(parents=True, exist_ok=True)
htmlfiles = ["configs", "management", "monitor", "check", "about", "index"]
for filename in htmlfiles:
print(f"Rendering {filename}.html")
html = render_page(filename)
outputdir.joinpath(f"{filename}.html").write_text(html)
staticfiles = ["css", "images", "js", "plugins", "less"]
for filename in staticfiles:
print(f"Copying {filename} recursively")
src = pathlib.Path(__file__).parent / 'static' / filename
dst = outputdir / filename
if dst.exists():
shutil.rmtree(dst)
shutil.copytree(src, dst)
print('rendering dynvar.js')
rendered = render_page('dynvar', allowed_path='/opt/intelmq/var/lib/bots/', controller_cmd='intelmq')
outputdir.joinpath('js/dynvar.js').write_text(rendered)
def main():
parser = argparse.ArgumentParser(
prog='intelmq-manager-build',
description='Build statically rendered files for intelmq-manager.',
epilog='This command renders and saves all files required for IntelMQ Manager at the given directory, which can be served by Webservers statically',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--output-dir', '-o', default='html',
type=pathlib.Path,
help='The destination directory, will be created if needed.')
args = parser.parse_args()
buildhtml(outputdir=args.output_dir)
if __name__ == '__main__':
main()
|
the-stack_106_27120 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six import moves
from tempest_lib import exceptions as lib_exc
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import test
class ServicesTestJSON(base.BaseIdentityV2AdminTest):
def _del_service(self, service_id):
# Deleting the service created in this method
self.client.delete_service(service_id)
# Checking whether service is deleted successfully
self.assertRaises(lib_exc.NotFound, self.client.show_service,
service_id)
@test.idempotent_id('84521085-c6e6-491c-9a08-ec9f70f90110')
def test_create_get_delete_service(self):
# GET Service
# Creating a Service
name = data_utils.rand_name('service')
type = data_utils.rand_name('type')
description = data_utils.rand_name('description')
service_data = self.client.create_service(
name, type, description=description)['OS-KSADM:service']
self.assertFalse(service_data['id'] is None)
self.addCleanup(self._del_service, service_data['id'])
# Verifying response body of create service
self.assertIn('id', service_data)
self.assertIn('name', service_data)
self.assertEqual(name, service_data['name'])
self.assertIn('type', service_data)
self.assertEqual(type, service_data['type'])
self.assertIn('description', service_data)
self.assertEqual(description, service_data['description'])
# Get service
fetched_service = (self.client.show_service(service_data['id'])
['OS-KSADM:service'])
# verifying the existence of service created
self.assertIn('id', fetched_service)
self.assertEqual(fetched_service['id'], service_data['id'])
self.assertIn('name', fetched_service)
self.assertEqual(fetched_service['name'], service_data['name'])
self.assertIn('type', fetched_service)
self.assertEqual(fetched_service['type'], service_data['type'])
self.assertIn('description', fetched_service)
self.assertEqual(fetched_service['description'],
service_data['description'])
@test.idempotent_id('5d3252c8-e555-494b-a6c8-e11d7335da42')
def test_create_service_without_description(self):
# Create a service only with name and type
name = data_utils.rand_name('service')
type = data_utils.rand_name('type')
service = self.client.create_service(name, type)['OS-KSADM:service']
self.assertIn('id', service)
self.addCleanup(self._del_service, service['id'])
self.assertIn('name', service)
self.assertEqual(name, service['name'])
self.assertIn('type', service)
self.assertEqual(type, service['type'])
@test.attr(type='smoke')
@test.idempotent_id('34ea6489-012d-4a86-9038-1287cadd5eca')
def test_list_services(self):
# Create, List, Verify and Delete Services
services = []
for _ in moves.xrange(3):
name = data_utils.rand_name('service')
type = data_utils.rand_name('type')
description = data_utils.rand_name('description')
service = self.client.create_service(
name, type, description=description)['OS-KSADM:service']
services.append(service)
service_ids = map(lambda x: x['id'], services)
def delete_services():
for service_id in service_ids:
self.client.delete_service(service_id)
self.addCleanup(delete_services)
# List and Verify Services
body = self.client.list_services()['OS-KSADM:services']
found = [serv for serv in body if serv['id'] in service_ids]
self.assertEqual(len(found), len(services), 'Services not found')
|
the-stack_106_27122 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'MyMoney'
copyright = '2018, Yannick Chabbert'
author = 'Yannick Chabbert'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MyMoneydoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MyMoney.tex', 'MyMoney Documentation',
'Yannick Chabbert', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mymoney', 'MyMoney Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MyMoney', 'MyMoney Documentation',
author, 'MyMoney', 'One line description of project.',
'Miscellaneous'),
]
locale_dirs = ['locale/']
gettext_compact = False
|
the-stack_106_27124 | import torch
import torch.nn as nn
from torchstat import ModelHook
from collections import OrderedDict
from torchstat import StatTree, StatNode, report_format
def get_parent_node(root_node, stat_node_name):
assert isinstance(root_node, StatNode)
node = root_node
names = stat_node_name.split('.')
for i in range(len(names) - 1):
node_name = '.'.join(names[0:i+1])
child_index = node.find_child_index(node_name)
assert child_index != -1
node = node.children[child_index]
return node
def convert_leaf_modules_to_stat_tree(leaf_modules):
assert isinstance(leaf_modules, OrderedDict)
create_index = 1
root_node = StatNode(name='root', parent=None)
for leaf_module_name, leaf_module in leaf_modules.items():
names = leaf_module_name.split('.')
for i in range(len(names)):
create_index += 1
stat_node_name = '.'.join(names[0:i+1])
parent_node = get_parent_node(root_node, stat_node_name)
node = StatNode(name=stat_node_name, parent=parent_node)
parent_node.add_child(node)
if i == len(names) - 1: # leaf module itself
input_shape = leaf_module.input_shape.numpy().tolist()
output_shape = leaf_module.output_shape.numpy().tolist()
node.input_shape = input_shape
node.output_shape = output_shape
node.parameter_quantity = leaf_module.parameter_quantity.numpy()[0]
node.inference_memory = leaf_module.inference_memory.numpy()[0]
node.MAdd = leaf_module.MAdd.numpy()[0]
node.Flops = leaf_module.Flops.numpy()[0]
node.duration = leaf_module.duration.numpy()[0]
node.Memory = leaf_module.Memory.numpy().tolist()
return StatTree(root_node)
class ModelStat(object):
def __init__(self, model, input_size, query_granularity=1):
assert isinstance(model, nn.Module)
assert isinstance(input_size, (tuple, list)) and len(input_size) == 3
self._model = model
self._input_size = input_size
self._query_granularity = query_granularity
def _analyze_model(self):
model_hook = ModelHook(self._model, self._input_size)
leaf_modules = model_hook.retrieve_leaf_modules()
stat_tree = convert_leaf_modules_to_stat_tree(leaf_modules)
collected_nodes = stat_tree.get_collected_stat_nodes(self._query_granularity)
return collected_nodes
def show_report(self):
collected_nodes = self._analyze_model()
report = report_format(collected_nodes)
print(report)
def stat(model, input_size, query_granularity=1):
ms = ModelStat(model, input_size, query_granularity)
ms.show_report()
|
the-stack_106_27125 | # Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from enum import Enum
from typing import Dict, Optional, Sequence, Tuple, Union
import numpy as np
import nnabla as nn
from nnabla_rl.configuration import Configuration
from nnabla_rl.environments.environment_info import EnvironmentInfo
from nnabla_rl.models import Model
from nnabla_rl.utils.data import convert_to_list_if_not_list
from nnabla_rl.utils.misc import retrieve_internal_states
@contextlib.contextmanager
def rnn_support(model: Model,
prev_rnn_states: Dict[str, Dict[str, nn.Variable]],
train_rnn_states: Dict[str, Dict[str, nn.Variable]],
training_variables: 'TrainingVariables',
config: 'TrainerConfig'):
def stop_backprop(rnn_states):
for value in rnn_states.values():
value.need_grad = False
try:
if model.is_recurrent():
scope_name = model.scope_name
internal_states = retrieve_internal_states(
scope_name, prev_rnn_states, train_rnn_states, training_variables, config.reset_on_terminal)
model.set_internal_states(internal_states)
yield
finally:
if model.is_recurrent():
rnn_states = model.get_internal_states()
if training_variables.step_index() < config.burn_in_steps:
stop_backprop(rnn_states)
prev_rnn_states[model.scope_name] = rnn_states
class LossIntegration(Enum):
ALL_TIMESTEPS = 1, 'Computed loss is summed over all timesteps'
LAST_TIMESTEP_ONLY = 2, 'Only the last timestep\'s loss is used.'
@dataclass
class TrainerConfig(Configuration):
"""Configuration class for ModelTrainer
"""
unroll_steps: int = 1
burn_in_steps: int = 0
reset_on_terminal: bool = True # Reset internal rnn state to given state if previous state is terminal.
loss_integration: LossIntegration = LossIntegration.ALL_TIMESTEPS
def __post_init__(self):
super(TrainerConfig, self).__post_init__()
self._assert_positive(self.unroll_steps, 'unroll_steps')
self._assert_positive_or_zero(self.burn_in_steps, 'burn_in_steps')
class TrainingBatch():
"""Mini-Batch class for train
Args:
batch_size (int): the size of mini-batch
s_current (Optional[np.ndarray]): the current state array
a_current (Optional[np.ndarray]): the current action array
reward (Optional[np.ndarray]): the reward value array
gamma (Optional[float]): gamma value
non_terminal (Optional[np.ndarray]): the non_terminal flag array
s_next (Optional[np.ndarray]): the next state array
weight (Optional[np.ndarray]): the weight of loss array
extra (Dict[str, np.ndarray]): the extra information
next_step_batch (Optional[:py:class:`TrainingBatch <nnabla_rl.model_trainers.model_trainer.TrainingBatch>`]):\
the mini-batch for next step (used in n-step learning)
rnn_states (Dict[str, Dict[str, np.array]]): the rnn internal state values
"""
batch_size: int
s_current: Union[np.ndarray, Tuple[np.ndarray, ...]]
a_current: np.ndarray
reward: np.ndarray
gamma: float
non_terminal: np.ndarray
s_next: Union[np.ndarray, Tuple[np.ndarray, ...]]
weight: np.ndarray
extra: Dict[str, np.ndarray]
# Used in n-step/rnn learning
next_step_batch: Optional['TrainingBatch']
rnn_states: Dict[str, Dict[str, np.ndarray]]
def __init__(self,
batch_size: int,
s_current: Optional[Union[np.ndarray, Tuple[np.ndarray, ...]]] = None,
a_current: Optional[np.ndarray] = None,
reward: Optional[np.ndarray] = None,
gamma: Optional[float] = None,
non_terminal: Optional[np.ndarray] = None,
s_next: Optional[Union[np.ndarray, Tuple[np.ndarray, ...]]] = None,
weight: Optional[np.ndarray] = None,
extra: Dict[str, np.ndarray] = {},
next_step_batch: Optional['TrainingBatch'] = None,
rnn_states: Dict[str, Dict[str, np.ndarray]] = {}):
assert 0 < batch_size
self.batch_size = batch_size
if s_current is not None:
self.s_current = s_current
if a_current is not None:
self.a_current = a_current
if reward is not None:
self.reward = reward
if gamma is not None:
self.gamma = gamma
if non_terminal is not None:
self.non_terminal = non_terminal
if s_next is not None:
self.s_next = s_next
if weight is not None:
self.weight = weight
self.extra: Dict[str, np.ndarray] = extra
self.next_step_batch = next_step_batch
self.rnn_states = rnn_states
def __getitem__(self, index):
num_steps = len(self)
if num_steps <= index:
raise IndexError
batch = self
for _ in range(index):
batch = batch.next_step_batch
return batch
def __iter__(self):
batch = self
while batch is not None:
yield batch
batch = batch.next_step_batch
def __len__(self):
num_steps = 1
batch = self.next_step_batch
while batch is not None:
num_steps += 1
batch = batch.next_step_batch
return num_steps
class TrainingVariables():
batch_size: int
s_current: Union[nn.Variable, Tuple[nn.Variable, ...]]
a_current: nn.Variable
reward: nn.Variable
gamma: nn.Variable
non_terminal: nn.Variable
s_next: Union[nn.Variable, Tuple[nn.Variable, ...]]
weight: nn.Variable
extra: Dict[str, nn.Variable]
rnn_states: Dict[str, Dict[str, nn.Variable]]
# Used in rnn learning
_next_step_variables: Optional['TrainingVariables']
_prev_step_variables: Optional['TrainingVariables']
def __init__(self,
batch_size: int,
s_current: Optional[Union[nn.Variable, Tuple[nn.Variable, ...]]] = None,
a_current: Optional[nn.Variable] = None,
reward: Optional[nn.Variable] = None,
gamma: Optional[nn.Variable] = None,
non_terminal: Optional[nn.Variable] = None,
s_next: Optional[Union[nn.Variable, Tuple[nn.Variable, ...]]] = None,
weight: Optional[nn.Variable] = None,
extra: Dict[str, nn.Variable] = {},
next_step_variables: Optional[nn.Variable] = None,
rnn_states: Dict[str, Dict[str, nn.Variable]] = {}):
assert 0 < batch_size
self.batch_size = batch_size
if s_current is not None:
self.s_current = s_current
if a_current is not None:
self.a_current = a_current
if reward is not None:
self.reward = reward
if gamma is not None:
self.gamma = gamma
if non_terminal is not None:
self.non_terminal = non_terminal
if s_next is not None:
self.s_next = s_next
if weight is not None:
self.weight = weight
self.extra: Dict[str, nn.Variable] = extra
self.next_step_variables = next_step_variables
self.prev_step_variables = None
self.rnn_states = rnn_states
@property
def next_step_variables(self):
return self._next_step_variables
@next_step_variables.setter
def next_step_variables(self, value):
self._next_step_variables = value
if self._next_step_variables is None:
return
if self._next_step_variables.prev_step_variables is not self:
self._next_step_variables.prev_step_variables = self
@property
def prev_step_variables(self):
return self._prev_step_variables
@prev_step_variables.setter
def prev_step_variables(self, value):
self._prev_step_variables = value
if self._prev_step_variables is None:
return
if self._prev_step_variables.next_step_variables is not self:
self._prev_step_variables.next_step_variables = self
def __getitem__(self, item):
num_steps = len(self)
if num_steps <= item:
raise IndexError
variable = self
for _ in range(item):
variable = variable.next_step_variables
return variable
def __iter__(self):
variable = self
while variable is not None:
yield variable
variable = variable.next_step_variables
def __len__(self):
num_steps = 1
variable = self.next_step_variables
while variable is not None:
num_steps += 1
variable = variable.next_step_variables
return num_steps
def is_initial_step(self) -> bool:
return self.prev_step_variables is None
def step_index(self):
if self._prev_step_variables is None:
return 0
else:
return 1 + self._prev_step_variables.step_index()
class ModelTrainer(metaclass=ABCMeta):
# type declarations to type check with mypy
# NOTE: declared variables are instance variable and NOT class variable, unless it is marked with ClassVar
# See https://mypy.readthedocs.io/en/stable/class_basics.html for details
_env_info: EnvironmentInfo
_config: TrainerConfig
_models: Sequence[Model]
_solvers: Dict[str, nn.solver.Solver]
_train_count: int
_training_variables: TrainingVariables
def __init__(self,
models: Union[Model, Sequence[Model]],
solvers: Dict[str, nn.solver.Solver],
env_info: EnvironmentInfo,
config: TrainerConfig):
self._env_info = env_info
self._config = config
self._train_count = 0
self._models = convert_to_list_if_not_list(models)
self._assert_no_duplicate_model(self._models)
if self._need_rnn_support(self._models) and not self.support_rnn():
raise NotImplementedError(f'{self.__name__} does not support RNN models!')
self._solvers = solvers
# Initially create training variables with batch_size 1.
# The batch_size will be updated later depending on the given experience data
# This procedure is a workaround to initialize model parameters (it it is not created).
total_timesteps = self._config.unroll_steps + self._config.burn_in_steps
next_step_variables = None
for _ in range(total_timesteps):
training_variables = self._setup_training_variables(1)
training_variables.next_step_variables = next_step_variables
next_step_variables = training_variables
self._training_variables = training_variables
self._assert_variable_length_equals_total_timesteps()
self._build_training_graph(self._models, self._training_variables)
self._setup_solver()
@property
def __name__(self):
return self.__class__.__name__
def train(self, batch: TrainingBatch, **kwargs) -> Dict[str, np.ndarray]:
if self._models is None:
raise RuntimeError('Call setup_training() first. Model is not set!')
self._train_count += 1
batch = self._setup_batch(batch)
new_batch_size = batch.batch_size
prev_batch_size = self._training_variables.batch_size
if new_batch_size != prev_batch_size:
total_timesteps = self._config.unroll_steps + self._config.burn_in_steps
assert 0 < total_timesteps
next_step_variables = None
for _ in range(total_timesteps):
training_variables = self._setup_training_variables(new_batch_size)
training_variables.next_step_variables = next_step_variables
next_step_variables = training_variables
self._training_variables = training_variables
self._assert_variable_length_equals_total_timesteps()
self._build_training_graph(self._models, self._training_variables)
trainer_state = self._update_model(self._models, self._solvers, batch, self._training_variables, **kwargs)
return trainer_state
def set_learning_rate(self, new_learning_rate):
for solver in self._solvers.values():
solver.set_learning_rate(new_learning_rate)
def support_rnn(self) -> bool:
return False
def _setup_batch(self, training_batch: TrainingBatch) -> TrainingBatch:
return training_batch
@abstractmethod
def _update_model(self,
models: Sequence[Model],
solvers: Dict[str, nn.solver.Solver],
batch: TrainingBatch,
training_variables: TrainingVariables,
**kwargs) -> Dict[str, np.ndarray]:
raise NotImplementedError
@abstractmethod
def _build_training_graph(self,
models: Sequence[Model],
training_variables: TrainingVariables):
raise NotImplementedError
@abstractmethod
def _setup_training_variables(self, batch_size) -> TrainingVariables:
raise NotImplementedError
def _setup_solver(self):
for model in self._models:
if model.scope_name in self._solvers.keys():
solver = self._solvers[model.scope_name]
# Set retain_state = True and prevent overwriting loaded state (If it is loaded)
solver.set_parameters(model.get_parameters(), reset=False, retain_state=True)
def _assert_variable_length_equals_total_timesteps(self):
total_timesptes = self._config.unroll_steps + self._config.burn_in_steps
if len(self._training_variables) != total_timesptes:
raise RuntimeError(f'Training variables length and rnn unroll + burn-in steps does not match!. \
{len(self._training_variables)} != {total_timesptes}. \
Check that the training method supports recurrent networks.')
@classmethod
def _assert_no_duplicate_model(cls, models):
scope_names = set()
for model in models:
scope_name = model.scope_name
assert scope_name not in scope_names
scope_names.add(scope_name)
def _need_rnn_support(self, models: Sequence[Model]):
for model in models:
if model.is_recurrent():
return True
return False
|
the-stack_106_27126 | '''
Take picture
============
.. author:: Mathieu Virbel <[email protected]>
Little example to demonstrate how to start an Intent, and get the result.
When you use the Android.startActivityForResult(), the result will be dispatched
into onActivityResult. You can catch the event with the android.activity API
from python-for-android project.
If you want to compile it, don't forget to add the CAMERA permission::
./build.py --name 'TakePicture' --package org.test.takepicture \
--permission CAMERA --version 1 \
--private ~/code/kivy/examples/android/takepicture \
debug installd
'''
__version__ = '0.1'
from kivy.app import App
from os.path import exists
from jnius import autoclass, cast
from android import activity
from functools import partial
from kivy.clock import Clock
from kivy.uix.scatter import Scatter
from kivy.properties import StringProperty
Intent = autoclass('android.content.Intent')
PythonActivity = autoclass('org.renpy.android.PythonActivity')
MediaStore = autoclass('android.provider.MediaStore')
Uri = autoclass('android.net.Uri')
class Picture(Scatter):
source = StringProperty(None)
class TakePictureApp(App):
def build(self):
self.index = 0
activity.bind(on_activity_result=self.on_activity_result)
def get_filename(self):
while True:
self.index += 1
fn = '/sdcard/takepicture{}.png'.format(self.index)
if not exists(fn):
return fn
def take_picture(self):
intent = Intent(MediaStore.ACTION_IMAGE_CAPTURE)
self.last_fn = self.get_filename()
self.uri = Uri.parse('file://' + self.last_fn)
self.uri = cast('android.os.Parcelable', self.uri)
intent.putExtra(MediaStore.EXTRA_OUTPUT, self.uri)
PythonActivity.mActivity.startActivityForResult(intent, 0x123)
def on_activity_result(self, requestCode, resultCode, intent):
if requestCode == 0x123:
Clock.schedule_once(partial(self.add_picture, self.last_fn), 0)
def add_picture(self, fn, *args):
self.root.add_widget(Picture(source=fn, center=self.root.center))
def on_pause(self):
return True
TakePictureApp().run()
|
the-stack_106_27127 | # Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from solum.i18n import _
from solum.tests import base
from solum.tests import utils
from solum.tests.worker.handlers import test_shell
from solum.worker.handlers import noop as noop_handler
class HandlerTest(base.BaseTestCase):
def setUp(self):
super(HandlerTest, self).setUp()
self.ctx = utils.dummy_context()
@mock.patch('solum.worker.handlers.noop.LOG')
def test_echo(self, fake_LOG):
noop_handler.Handler().echo({}, 'foo')
fake_LOG.debug.assert_called_once_with(_('%s') % 'foo')
@mock.patch('solum.worker.handlers.noop.LOG')
def test_build(self, fake_LOG):
git_info = test_shell.mock_git_info()
args = [5, git_info, 'new_app', '1-2-3-4', 'heroku', 'docker',
44, None, None]
noop_handler.Handler().build(self.ctx, *args)
message = 'Build ' + ', '.join([str(a) for a in args])
fake_LOG.debug.assert_called_once_with(_("%s") % message)
@mock.patch('solum.worker.handlers.noop.LOG')
def test_unittest(self, fake_LOG):
git_info = test_shell.mock_git_info()
args = [5, git_info, 'new_app',
'1-2-3-4', 'heroku', 'docker', 44, 'pep8']
noop_handler.Handler().unittest(self.ctx, *args)
message = 'Unittest ' + ', '.join([str(a) for a in args])
fake_LOG.debug.assert_called_once_with(_("%s") % message)
|
the-stack_106_27128 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="update_camera_offset-SamGoodwin", # Replace with your own username
version="0.0.1",
author="Sam Goodwin + Bob Dimmock",
author_email="[email protected]",
description="Snap alignment of prop object attached to a calibrated SDI camera in Shogun Live 1.6",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Sam-Goods/update_camera_offset.git",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.9.1',
) |
the-stack_106_27129 | """
The MIT License (MIT)
Copyright (c) 2017-2020 TwitchIO
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import enum
import logging
import sanic
from sanic import request
from sanic import response
from twitchio.ext.webhooks.utils import Topic, StreamChangedNotification, UserChangedNotification, \
UserFollowsNotification
log = logging.getLogger(__name__)
NOTIFICATION_TYPE_BY_TOPIC = {
Topic.stream_changed: StreamChangedNotification,
Topic.user_changed: UserChangedNotification,
Topic.user_follows: UserFollowsNotification
}
class WebhookEventDispatcher:
__instances = set()
__dispatcher = None
def __init__(self, loop: asyncio.AbstractEventLoop = None):
self.__instances.add(self)
self.loop = loop or asyncio.get_event_loop()
def __init_subclass__(cls, **kwargs):
cls._registered_dispatcher(cls)
@classmethod
def _registered_dispatcher(cls, new_cls=None):
if new_cls:
WebhookEventDispatcher.__dispatcher = new_cls
return WebhookEventDispatcher.__dispatcher
@staticmethod
def accept_subscription(request: request.Request, topic: enum.Enum):
"""Handle Twitch challenge requests.
Accept Twitch subscriptions by responding the request with the provided challenge string.
Parameters
----------
request: sanic.request.Request
The challenge request received from Twitch
topic: enum.Enum
The topic being subscribed to
Returns
-------
response.HTTPResponse
status code: 200 if the request has correctly been processed
status code: 400 otherwise
"""
try:
mode = request.args['hub.mode'][0]
if mode == 'subscribe' or mode == 'unsubscribe':
return response.HTTPResponse(body=request.args['hub.challenge'][0], status=200)
elif mode == 'denied':
reason = request.args.get('hub.reason', 'no reason')
log.warning(f'{topic.name} webhook subscribe request denied ({request.args}) , reason: {reason}.')
return response.HTTPResponse(status=200)
except KeyError:
return response.HTTPResponse(status=400)
@classmethod
async def bulk_process_notification(cls, request: request.Request, topic: enum.Enum):
"""Process the received notification.
- Check if the related topic is supported.
- Pass the notification info to the dispatchers.
Parameters
----------
request: sanic.request.Request
The challenge request received from Twitch
topic: enum.Enum
Topic whose notification is being processed
Returns
-------
response.HTTPResponse
status code: 202 if the request has correctly been processed
status code: 400 otherwise
"""
if topic not in NOTIFICATION_TYPE_BY_TOPIC:
log.error(f'Invalid topic "{topic.name}", the notification has been ignored')
return
try:
params = {param: request.args.get(param) for param in NOTIFICATION_TYPE_BY_TOPIC[topic].valid_params}
data = request.json['data'][0] if request.json['data'] else {}
for instance in cls.__instances:
await instance.process_notification(data, topic, params)
return response.HTTPResponse(status=202)
except KeyError:
return response.HTTPResponse(status=400)
async def process_notification(self, data: dict, topic: enum.Enum, params: dict):
"""Filter the notification and call the related callback.
Parameters
----------
data: dict
Notification content
topic: enum.Enum
Topic whose notification is being processed
params: dict
Topic parameters
"""
try:
cls = NOTIFICATION_TYPE_BY_TOPIC[topic]
notification = cls(**data)
if cls == StreamChangedNotification:
if data:
await self.event_stream_updated(params, notification)
else:
await self.event_stream_offline(params, notification)
elif cls == UserChangedNotification:
await self.event_user_updated(params, notification)
elif cls == UserFollowsNotification:
if not params['from_id']:
await self.event_following_user(params, notification)
else:
await self.event_followed_by_user(params, notification)
except Exception as error:
await self.webhook_notification_error(topic, data, params, error)
async def webhook_notification_error(self, topic: enum.Enum, data: dict, params: dict, error: Exception):
"""Handle the error raised during the notification processing
Parameters
----------
topic: enum.Enum
Topic whose notification is being processed
data: dict
Notification content
params: dict
Topic parameters
error: Exception
The error being raised
"""
log.error(f"Exception '{type(error).__name__}' raised for topic '{topic.name}' (params={params})",
exc_info=(type(error), error, error.__traceback__))
async def event_stream_updated(self, params: dict, notification: StreamChangedNotification):
"""Callback called when a user starts or updates a stream.
Parameters
----------
params: dict
Topic parameters
notification: StreamChangedNotification
Topic data object
"""
async def event_stream_offline(self, params: dict, notification: StreamChangedNotification):
"""Callback called when a user stops a stream.
Parameters
----------
params: dict
Topic parameters
notification: StreamChangedNotification
Topic data object
"""
async def event_user_updated(self, params: dict, notification: UserChangedNotification):
"""Callback called when a user's data is updated.
Parameters
----------
params: dict
Topic parameters
notification: UserChangedNotification
Topic data object
"""
async def event_following_user(self, params: dict, notification: UserFollowsNotification):
"""Callback called when a user is being followed by someone
Parameters
----------
params: dict
Topic parameters
notification: UserFollowsNotification
Topic data object
"""
async def event_followed_by_user(self, params: dict, notification: UserFollowsNotification):
"""Callback called when a user is following someone
Parameters
----------
params: dict
Topic parameters
notification: UserFollowsNotification
Topic data object
"""
|
the-stack_106_27130 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_gtm_monitor_bigip
short_description: Manages F5 BIG-IP GTM BIG-IP monitors
description:
- Manages F5 BIG-IP GTM (now BIG-IP DNS) BIG-IP monitors. This monitor is used by GTM to monitor
BIG-IPs themselves.
version_added: "1.0.0"
options:
name:
description:
- Name of the monitor.
type: str
required: True
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(bigip)
parent on the C(Common) partition.
type: str
default: "/Common/bigip"
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, the default value will be
'*'.
type: str
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, the default value will be
'*'. Note that if specifying an IP address, you must use a value between 1 and 65535.
type: str
interval:
description:
- Specifies, in seconds, the frequency at which the system issues the monitor
check when either the resource is down or the status of the resource is unknown.
- When creating a new monitor, if this parameter is not provided, the
default value will be C(30). This value B(must) be less than the C(timeout) value.
type: int
timeout:
description:
- Specifies the number of seconds the target has in which to respond to the
monitor request.
- If the target responds within the set time period, it is considered up.
- If the target does not respond within the set time period, it is considered down.
- When this value is set to 0 (zero), the system uses the interval from the parent monitor.
- When creating a new monitor, if this parameter is not provided,
the default value will be C(90).
type: int
ignore_down_response:
description:
- Specifies the monitor allows more than one probe attempt per interval.
- When C(yes), specifies the monitor ignores down responses for the duration of
the monitor timeout. Once the monitor timeout is reached without the system receiving
an up response, the system marks the object down.
- When C(no), specifies the monitor immediately marks an object down when it
receives a down response.
- When creating a new monitor, if this parameter is not provided, the default
value will be C(no).
type: bool
aggregate_dynamic_ratios:
description:
- Specifies how the system combines the module values to create the proportion
(score) for the load balancing operation.
- The score represents the module's estimated capacity for handing traffic.
- Averaged values are appropriate for downstream Web Accelerator or Application
Security Manager (ASM) virtual servers.
- When creating a new monitor, if this parameter is not specified, the default
of C(none) is used, meaning the system does not use the scores in the load
balancing operation.
- When C(none), specifies the monitor ignores the nodes and pool member scores.
- When C(average-nodes), specifies the system averages the dynamic ratios
on the nodes associated with the monitor's target virtual servers and returns
that average as the virtual servers' score.
- When C(sum-nodes), specifies the system adds together the scores of the
nodes associated with the monitor's target virtual servers and uses that value
in the load balancing operation.
- When C(average-members), specifies the system averages the dynamic ratios
on the pool members associated with the monitor's target virtual servers and
returns that average as the virtual servers' score.
- When C(sum-members), specifies the system adds together the scores of the
pool members associated with the monitor's target virtual servers and uses
that value in the load balancing operation.
type: str
choices:
- none
- average-nodes
- sum-nodes
- average-members
- sum-members
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures the monitor exists.
- When C(absent), ensures the monitor is removed.
type: str
choices:
- present
- absent
default: present
notes:
- Requires BIG-IP software version >= 12
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create BIG-IP Monitor
bigip_gtm_monitor_bigip:
state: present
ip: 10.10.10.10
name: my_monitor
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Remove BIG-IP Monitor
bigip_gtm_monitor_bigip:
state: absent
name: my_monitor
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Add BIG-IP monitor for all addresses, port 514
bigip_gtm_monitor_bigip:
port: 514
name: my_monitor
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: str
sample: bigip
ip:
description: The new IP of IP/port definition.
returned: changed
type: str
sample: 10.12.13.14
interval:
description: The new interval at which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
aggregate_dynamic_ratios:
description: The new aggregate of to the monitor.
returned: changed
type: str
sample: sum-members
ignore_down_response:
description: Whether to ignore the down response or not.
returned: changed
type: bool
sample: True
'''
import os
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec
)
from ..module_utils.icontrol import (
module_provisioned, tmos_version
)
from ..module_utils.ipaddress import is_valid_ip
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'ignoreDownResponse': 'ignore_down_response',
'aggregateDynamicRatios': 'aggregate_dynamic_ratios',
}
api_attributes = [
'defaultsFrom',
'interval',
'timeout',
'destination',
'ignoreDownResponse',
'aggregateDynamicRatios',
]
returnables = [
'parent',
'ip',
'port',
'interval',
'timeout',
'ignore_down_response',
'aggregate_dynamic_ratios',
]
updatables = [
'destination',
'interval',
'timeout',
'ignore_down_response',
'aggregate_dynamic_ratios',
]
@property
def interval(self):
if self._values['interval'] is None:
return None
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def type(self):
return 'bigip'
class ApiParameters(Parameters):
@property
def ip(self):
ip, port = self._values['destination'].split(':')
return ip
@property
def port(self):
ip, port = self._values['destination'].split(':')
return int(port)
@property
def ignore_down_response(self):
if self._values['ignore_down_response'] is None:
return None
if self._values['ignore_down_response'] == 'disabled':
return False
return True
class ModuleParameters(Parameters):
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@property
def parent(self):
if self._values['parent'] is None:
return None
if self._values['parent'].startswith('/'):
parent = os.path.basename(self._values['parent'])
result = '/{0}/{1}'.format(self.partition, parent)
else:
result = '/{0}/{1}'.format(self.partition, self._values['parent'])
return result
@property
def ip(self):
if self._values['ip'] is None:
return None
if self._values['ip'] in ['*', '0.0.0.0']:
return '*'
elif is_valid_ip(self._values['ip']):
return self._values['ip']
else:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
@property
def ignore_down_response(self):
if self._values['ignore_down_response']:
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
result = self.__default(param)
return result
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.have = None
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 120})
if self.want.interval is None:
self.want.update({'interval': 30})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
if self.want.ignore_down_response is None:
self.want.update({'ignore_down_response': False})
if self.want.aggregate_dynamic_ratios is None:
self.want.update({'aggregate_dynamic_ratios': 'none'})
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
if not module_provisioned(self.client, 'gtm'):
raise F5ModuleError(
"GTM must be provisioned to use this module."
)
changed = False
result = dict()
state = self.want.state
if state in ["present", "disabled"]:
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.client, self.module, version)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
self._set_default_creation_values()
if self.module.check_mode:
return True
self.create_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the monitor.")
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/bigip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/bigip/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/bigip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/bigip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/bigip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return ApiParameters(params=response)
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/bigip'),
ip=dict(),
port=dict(),
interval=dict(type='int'),
timeout=dict(type='int'),
ignore_down_response=dict(type='bool'),
aggregate_dynamic_ratios=dict(
choices=[
'none', 'average-nodes', 'sum-nodes', 'average-members', 'sum-members'
]
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
the-stack_106_27131 | from PIL import Image
import os
import requests
import time
# converts the image into a square and converts RGBA to RGB
def make_square(im, min_size=0):
x, y = im.size
size = max(min_size, x, y)
new_im = Image.new('RGBA', (size, size), "WHITE")
new_im.paste(im, (int((size - x) / 2), int((size - y) / 2)), im)
return new_im
# uses remove.bg call to remove background from image. Return RGBA image.
def remove_background(image_path):
response = requests.post(
'https://api.remove.bg/v1.0/removebg',
files={'image_file': open(image_path, 'rb')},
data={'size': 'auto'},
headers={'X-Api-Key': 'hcM54vWhAtKudf4pXvyhCvRR'},
)
if response.status_code == requests.codes.ok:
with open(image_path, 'wb') as out:
out.write(response.content)
else:
print("Error:", response.status_code, response.text)
# function to resize an image and convert RGBA to RGB. Contains flags to execute other functions.
def resize_image(dir_path, image_name, image_size=(127, 127), square=False, remove_bg=False):
image_path = os.path.join(dir_path, image_name)
if remove_bg:
remove_background(image_path)
im = Image.open(image_path)
if square:
im = make_square(im)
im = im.convert("RGB", palette=Image.ADAPTIVE)
im = im.resize(image_size)
im.save(image_path)
# main function to execute
def inference(dir_path):
imagelist = [file for file in os.listdir(dir_path) if file.endswith('.png')]
# resize and remove background for all images
for image in imagelist:
resize_image(dir_path, image, square=True, remove_bg=True)
time.sleep(0.5) |
the-stack_106_27132 | import os
import sys
import glob
import argparse
from astropy.config import paths
import pandas as pd
import numpy as np
from astropy.wcs import WCS
from astropy.io import fits, ascii
from astropy.visualization import ImageNormalize, ZScaleInterval
import matplotlib.pyplot as plt
import datetime as dt
import time
from tqdm import tqdm
from ensemble import proc_time
def point_flag_color(x):
if x <= 1:
return 'red', 'Flag <= 1'
elif x <= 5:
return 'green', '2 <= Flag <= 5'
else:
return None, None # 'yellow', 'Flag > 5'
def segment_flag_color(x):
if x <= 1:
return 'blue', 'Flag <= 1'
elif x <= 5:
return 'green', '2 <= Flag <= 5'
else:
return None, None # 'yellow', 'Flag > 5'
def draw_catalogs(cfile, catalog):
cat, fcolor_, fcolor = None, None, None
if os.path.exists(cfile):
cat = ascii.read(cfile).to_pandas()
else:
cat = ''
if len(cat) > 0:
if 'Flags' in cat.columns:
flagcols = cat['Flags']
else:
flagcols = [c for c in cat.columns if 'Flags' in c]
if len(flagcols) > 0:
flags = cat.loc[:,flagcols].fillna(100, axis=0, inplace=False).apply(min, axis=1)
if catalog == 'point':
fcolor_ = flags.apply(point_flag_color)
elif catalog == 'segment':
fcolor_ = flags.apply(segment_flag_color)
fcolor = fcolor_.apply(lambda x: x[0]).values
return cat, fcolor_, fcolor
def create_image_name(name, dataset, P, S, G, crpt, outpath):
if P == 1 and S == 1:
catstr = '_source'
elif P == 1 and S == 0:
catstr = '_point'
elif P == 0 and S == 1:
catstr = '_segment'
elif G == 1:
catstr = '_gaia'
else:
catstr = ''
if crpt:
sfx = '_'.join(dataset.split('_')[1:])
name = f"{name}_{sfx}"
outpath = f'{outpath}/{name}'
os.makedirs(outpath, exist_ok=True)
imgpath = os.path.join(outpath, f'{name}{catstr}')
return imgpath
def draw_total_images(input_path, outpath, dataset, P=0, S=0, G=0, figsize=(24,24), crpt=0):
"""
Opens fits files from local directory path to generate total detection drizzled images
aligned to WCS with point/segment/gaia catalog overlay options. Saves figure as png.
**args**
input_path: path to dataset subdirectories containing total or filter fits files
dataset: name of subdirectory containing .fits and .ecsv files
**kwargs**
output_img: where to save the pngs (path) default='./img'
P: draw point catalog references (0=off, 1=on) default is 0
S: draw segment catalog references (0=off, 1=on) default is 0
G: draw GAIA catalog references (0=off, 1=on) default is 0
figsize: size to make the figures (default=24 sets figsize=(24,24))
corrs: determines png file naming convention
PNG naming convention is based on fits file unless corrs=1:
./input_path/dataset/filename.fits >> ./img_path/dataset/filename.png
catalog overlay pngs have an additional suffix:
P=1: _point.png
S=1: _segment.png
P=1, S=1: _source.png
G=1: _gaia.png
Normal SVM data (dataset=ib1f0a):
./{input_path}/ib1f0a/hst_11570_0a_wfc3_uvis_total_ib1f0a_drc.fits
saves as >> ./{imgdir}/hst_11570_0a_wfc3_uvis_total_ib1f0a/hst_11570_0a_wfc3_uvis_total_ib1f0a.png
Corruption SVM data (dataset=ia0m04_f110w_all_stat):
./{input_path}/ia0m04_f110w_all_stoc/hst_11099_04_wfc3_ir_total_ia0m04_drz.fits
saves as >> ./{imgdir}/hst_f110w_all_stoc_uvis_total_ib1f0a/hst_f110w_all_stoc_uvis_total_ib1f0a.png
"""
# allows for corruption subdir names e.g. ia0m04_f110w_all_stat and ia0m04
subdir, dname = f"{input_path}/{dataset}", dataset.split('_')[0]
hfiles = glob.glob(f"{subdir}/*total_{dname}_dr?.fits")
if len(hfiles) > 0:
for hfile in hfiles:
name = os.path.basename(hfile).split('.')[0][:-4]
detector = name.split('_')[4]
ras, decs = np.ndarray((0,)), np.ndarray((0,))
with fits.open(hfile) as ff:
hdu = ff[1]
wcs = WCS(hdu.header)
footprint = wcs.calc_footprint(hdu.header)
ras = np.append(ras, footprint[:, 0])
decs = np.append(decs, footprint[:, 1])
ralim = [np.max(ras), np.min(ras)]
declim = [np.max(decs), np.min(decs)]
radeclim = np.stack([ralim, declim], axis=1)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection=wcs, frameon=False)
plt.axis(False)
interval = ZScaleInterval()
_, vmax = interval.get_limits(hdu.data)
norm = ImageNormalize(hdu.data, vmin=0, vmax=vmax*2,
clip=True)
ax.imshow(hdu.data, origin='lower', norm=norm, cmap='gray')
if P:
p_cat = glob.glob(f"{subdir}/{name}_point-cat.ecsv")
if len(p_cat) > 0:
point, pfcolor_, pfcolor = draw_catalogs(p_cat[0], 'point')
if pfcolor_ is not None:
for fcol in pfcolor_.unique():
if fcol is not None:
q = pfcolor == fcol[0]
ax.scatter(point[q]['RA'], point[q]['DEC'],
edgecolor=fcol[0], facecolor='none',
transform=ax.get_transform('fk5'),
marker='o', s=15, alpha=0.5)
# else:
# print("Point cat not found: ", dataset)
if S:
s_cat = glob.glob(f"{subdir}/{name}_segment-cat.ecsv")
if len(s_cat) > 0:
seg, sfcolor_, sfcolor = draw_catalogs(s_cat[0], 'segment')
if sfcolor_ is not None:
for fcol in sfcolor_.unique():
if fcol is not None:
q = sfcolor == fcol[0]
ax.scatter(seg[q]['RA'], seg[q]['DEC'],
edgecolor=fcol[0], facecolor='none',
transform=ax.get_transform('fk5'),
marker='o', s=15, alpha=0.5)
# else:
# print("Segment cat not found: ", dataset)
if G:
g_cat = glob.glob(f"{subdir}/*_{detector}_*GAIAeDR3_ref_cat.ecsv")
if len(g_cat) > 0:
if os.path.exists(g_cat[0]):
gaia = ascii.read(g_cat[0]).to_pandas()
ax.scatter(gaia['RA'], gaia['DEC'],
edgecolor='cyan', facecolor='none',
transform=ax.get_transform('fk5'),
marker='o', s=15)
# else:
# print("GAIA cat not found: ", dataset)
xlim, ylim = wcs.wcs_world2pix(radeclim, 1).T
ax.set_xlim(xlim)
ax.set_ylim(ylim)
imgpath = create_image_name(name, dataset, P, S, G, crpt, outpath)
plt.savefig(imgpath, bbox_inches='tight')
plt.close(fig)
#print(f"\t{imgpath}.png")
else:
print(f"{dataset} fits file could not be found")
return
def list_visits(dataset, outpath):
df = pd.read_csv(dataset, index_col="index")
idx = list(df.index)
datasets = []
skip = 0
for i in idx:
impath = os.path.join(outpath, i)
visit = i.split('_')[6]
if os.path.exists(impath):
num = len(glob.glob(f"{impath}/*"))
if num < 3:
datasets.append(visit)
else:
skip += 1
else:
datasets.append(visit)
if skip > 0:
print("Skipping pre-existing images: ", skip)
return list(set(datasets))
def generate_total_images(input_path, outpath, dataset=None, figsize=(24,24), crpt=0, gen=3):
if dataset is not None:
if dataset.endswith(".csv"):
datasets = list_visits(dataset, outpath)
else:
datasets = [dataset]
else:
if crpt == 0:
paths = glob.glob(f"{input_path}/??????")
else:
paths = glob.glob(f"{input_path}/??????_*_???_st??")
datasets = [p.split('/')[-1] for p in paths]
print(f"\nFound {len(datasets)} datasets.")
if len(datasets) == 0:
print("Exiting.")
sys.exit(1)
t_start = time.time()
start = dt.datetime.fromtimestamp(t_start).strftime("%m/%d/%Y - %I:%M:%S %p")
print(f"\n[i] DRAWING IMAGES ***{start}***")
print(f"Generating images for {len(datasets)} datasets.")
for dataset in tqdm(datasets):
#print(dataset)
if gen == 3: # original, point-segment, and GAIA
draw_total_images(input_path, outpath, dataset, figsize=figsize, crpt=crpt)
draw_total_images(input_path, outpath, dataset, P=1, S=1, figsize=figsize, crpt=crpt)
draw_total_images(input_path, outpath, dataset, G=1, figsize=figsize, crpt=crpt)
elif gen == 2: # GAIA
draw_total_images(input_path, outpath, dataset, G=1, figsize=figsize, crpt=crpt)
elif gen == 1: # point-segment
draw_total_images(input_path, outpath, dataset, P=1, S=1, figsize=figsize, crpt=crpt)
else: # original (0)
draw_total_images(input_path, outpath, dataset, figsize=figsize, crpt=crpt)
t_end = time.time()
end = dt.datetime.fromtimestamp(t_end).strftime("%m/%d/%Y - %I:%M:%S %p")
print(f"\n[i] IMAGE GENERATION COMPLETE ***{end}***")
proc_time(t_start, t_end)
def draw_filter_images(input_path, outpath, dataset, figsize=(24,24), crpt=0):
subdir, dname = f"{input_path}/{dataset}", dataset.split('_')[0]
filter_files = glob.glob(f"{subdir}/*[!total]_{dname}_dr?.fits")
if len(filter_files) > 0:
outpath = os.path.join(outpath, dname)
os.makedirs(outpath, exist_ok=True)
else:
print("Filter images missing: ", dataset)
return
for hfile in filter_files:
ras, decs = np.ndarray((0,)), np.ndarray((0,))
with fits.open(hfile) as ff:
hdu = ff[1]
wcs = WCS(hdu.header)
footprint = wcs.calc_footprint(hdu.header)
ras = np.append(ras, footprint[:, 0])
decs = np.append(decs, footprint[:, 1])
ralim = [np.max(ras), np.min(ras)]
declim = [np.max(decs), np.min(decs)]
radeclim = np.stack([ralim, declim], axis=1)
fig = plt.figure(figsize=figsize, edgecolor='k', frameon=False)
ax = fig.add_subplot(111, projection=wcs, frameon=False)
plt.axis(False)
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(hdu.data)
norm = ImageNormalize(hdu.data, vmin=vmin, vmax=vmax*2,
clip=True)
xlim, ylim = wcs.wcs_world2pix(radeclim, 1)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.imshow(hdu.data, origin='lower', norm=norm, cmap='gray')
name = os.path.basename(hfile).split('.')[0][:-4]
if crpt:
pfx, sfx = '_'.join(dataset.split('_')[1:]), '_'.join(name.split('_')[4:])
name = f"hst_{pfx}_{sfx}"
imgpath = os.path.join(outpath, name)
plt.savefig(imgpath, bbox_inches='tight')
plt.close(fig)
print(f"\t{imgpath}.png")
def generate_filter_images(input_path, outpath, dataset=None, figsize=(24,24), crpt=0):
if dataset is not None:
datasets = [dataset]
else:
if os.path.exists(f'{input_path}/.DS_Store'): # only happens on Mac
os.remove(f'{input_path}/.DS_Store')
datasets = os.listdir(input_path)
for dataset in datasets:
print(dataset)
draw_filter_images(input_path, outpath, dataset, figsize=figsize, crpt=crpt)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("input_path", type=str, help="path to datasets directory")
parser.add_argument("outpath", type=str, help="directory path to save png images")
parser.add_argument("-t", "--imgtype", type=str, choices=['total', 'filter'], default='total', help="draw total detection or filter level images")
parser.add_argument("-g", "--generator", type=int, choices=[0, 1, 2, 3], default=3, help="0: generate original only; 1: point-segment, 2: gaia; 3: original, point-segment, and gaia (3 separate images)")
parser.add_argument("-s", "--size", type=int, default=24, help="figsize")
parser.add_argument("-c", "--corruptions", type=int, default=0, help="corrupted datasets (used to format png names)")
parser.add_argument("-d", "--dataset", type=str, default=None, help="specify single dataset (default is None to generate images for all dataset subdirectories in input_path")
args = parser.parse_args()
input_path = args.input_path
outpath = args.outpath
img_type = args.imgtype
gen = args.generator
size = (args.size, args.size)
crpt = args.corruptions
dataset = args.dataset
if img_type == 'total':
generate_total_images(input_path, outpath, dataset=dataset, figsize=size, crpt=crpt, gen=gen)
else:
generate_filter_images(input_path, outpath, dataset=dataset, figsize=size, crpt=crpt)
|
the-stack_106_27133 | """Database backed models for the asset store."""
import json
import re
import six
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, Integer, JSON, String
from sqlalchemy.exc import IntegrityError
from sqlalchemy_utils import ChoiceType
from asset_store.utils import get_choice_list, ResourceConflictError, ValidationError, validate_choice
db = SQLAlchemy()
class Asset(db.Model):
"""A model for tracking satellite and antenna assets."""
# TODO: consider using enums
# types
ANTENNA = 'antenna'
SATELLITE = 'satellite'
ASSET_TYPES = [(SATELLITE, 'satellite'),
(ANTENNA, 'antenna')]
# type specific classes
DISH = 'dish'
YAGI = 'yagi'
ANTENNA_CLASSES = [(DISH, 'dish'),
(YAGI, 'yagi')]
DOVE = 'dove'
RAPIDEYE = 'rapideye'
SATELLITE_CLASSES = [(DOVE, 'dove'),
(RAPIDEYE, 'rapideye')]
# all classes
ASSET_CLASSES = ANTENNA_CLASSES + SATELLITE_CLASSES
# supported keys in the asset_details json blob for dish assets
DIAMETER = 'diameter'
RADOME = 'radome'
DISH_DETAILS = [DIAMETER, RADOME]
# supported keys in the asset_details json blob for yagi assets
GAIN = 'gain'
YAGI_DETAILS = [GAIN]
# model fields
id = Column(Integer, primary_key=True)
asset_name = Column(String(64), nullable=False, unique=True)
asset_type = Column(ChoiceType(ASSET_TYPES), nullable=False)
asset_class = Column(ChoiceType(ASSET_CLASSES), nullable=False)
# store details as a string for now -- consider using a JSON column (requires a sqlite extension)
asset_details_json = Column(String)
@property
def asset_details(self):
"""Dict of asset details."""
if self.asset_details_json:
return json.loads(self.asset_details_json)
else:
return {}
def update_details(self, new_details):
"""Validate and update asset_details for an asset."""
if not isinstance(new_details, dict):
raise ValidationError('Asset details should be a dict.')
self._validate_asset_details_for_asset_class(new_details, self.asset_class.value)
self.asset_details_json = json.dumps(new_details)
db.session.add(self)
db.session.commit()
@classmethod
def create_asset(cls, asset_name, asset_type, asset_class, asset_details=None):
"""Create a new instance of an Asset.
Args:
asset_name (string): name of the new asset. Must meet the following constraints:
1. must start with an alphanumeric character
2. can only contain alphanumeric characters, dashses, and underscores
3. can be no longer than 64 characters
4. can be no shorter than 4 characters
5. can not already be used as an existing asset's asset_name
asset_type (string): the type of asset. Valid types are 'satellite' and 'antenna'
asset_class (string): the class of the asset. Valid classes depend on the asset_type.
- Valid classes for 'satellite' asset_type are 'dove' and 'rapideye'
- Valid classes for 'antenna' asset_type are 'dish' and 'yagi'
Returns:
asset: a newly created Asset instance
Raises:
ValidationError: the provided arguments do not meet validation constraints
IntegrityError
"""
from run import app
cls._validate_asset_name(asset_name)
cls._validate_asset_type(asset_type)
cls._validate_asset_class(asset_class)
cls._validate_asset_class_with_asset_type(asset_class, asset_type)
if not asset_details:
asset_details = {}
else:
cls._validate_asset_details_for_asset_class(asset_details, asset_class)
with app.app_context():
try:
asset = Asset(asset_name=asset_name,
asset_type=asset_type,
asset_class=asset_class,
asset_details_json=json.dumps(asset_details))
db.session.add(asset)
db.session.commit()
return asset
except IntegrityError as err:
if 'UNIQUE constraint failed: asset.asset_name' in '{}'.format(err):
raise ResourceConflictError('There is already an asset with asset_name {}'.format(asset_name))
# The following methods are for validating asset fields.
# To better enforce some of the business rules, additional database constraints could be added in the future.
@classmethod
def _validate_asset_type(cls, asset_type):
"""Check if an asset_type value is valid.
Args:
asset_type (str): value representing an asset_type to be validated
Returns:
valid (bool): True if validation passed
Raises:
ValidationError: if provided asset_type value is not a valid choice
"""
return validate_choice('asset_type', asset_type, cls.ASSET_TYPES)
@classmethod
def _validate_asset_class(cls, asset_class):
"""Check if an asset_type value is valid.
Args:
asset_class (str): value representing an asset_class to be validated
Returns:
valid (bool): True if validation passed
Raises:
ValidationError: if provided asset_class value is not a valid choice
"""
return validate_choice('asset_class', asset_class, cls.ASSET_CLASSES)
@classmethod
def _validate_asset_class_with_asset_type(cls, asset_class, asset_type):
"""Check if an asset_class value is valid for a given asset_type.
Args:
asset_type (str): value representing an asset_type to be validated
asset_class (str): value representing an asset_class to be validated
Returns:
valid (bool): True if validation passed
Raises:
ValidationError: if provided asset_class value is not a valid choice given the provided asset_type
"""
error_msg = 'Invalid asset_class. For the {} asset_type, valid asset_class values are: {}'
if asset_type == cls.ANTENNA:
choices = get_choice_list(cls.ANTENNA_CLASSES)
error_msg = error_msg.format(cls.ANTENNA, choices)
return validate_choice('asset_class', asset_class, cls.ANTENNA_CLASSES,
custom_error_msg=error_msg)
elif asset_type == cls.SATELLITE:
choices = get_choice_list(cls.SATELLITE_CLASSES)
error_msg = error_msg.format(cls.SATELLITE, choices)
return validate_choice('asset_class', asset_class, cls.SATELLITE_CLASSES,
custom_error_msg=error_msg)
else:
raise ValidationError('Unrecognized asset_type {}'.format(asset_type))
@classmethod
def _validate_asset_name(cls, asset_name):
"""Check if an asset_name value is valid.
Args:
asset_name (str): value representing an asset_name to be validated
Returns:
valid (bool): True if validation passed
Raises:
ValidationError: if provided asset_name has one of the following issues:
- is shorter than 4 characters
- is longer than 64 characters
- has already used by another asset
- starts with a '-' or '_'
"""
if not isinstance(asset_name, six.string_types):
raise ValidationError('asset_name must be a string.')
length = len(asset_name)
if length < 4:
raise ValidationError('asset_name must be at least 4 characters in length.')
if length > 64:
raise ValidationError('asset_name must be at most 64 characters in length.')
first_char = asset_name[0]
if first_char in ['-', '_']:
raise ValidationError('asset_name cannot begin with an underscore or dash.')
# should start with an alphanum and all subsequent characters should be alphanum or dashes
if re.match('^[0-9a-zA-Z]+[0-9a-zA-Z_-]*$', asset_name) is None:
raise ValidationError('asset_name may only contain alphanumeric ascii characters, underscores, and dashes.')
return True
@classmethod
def _get_asset_details_dict(cls, asset_details):
try:
details = json.loads(asset_details)
if not isinstance(details, dict):
raise ValidationError('asset_details should be a json object.')
return True
except:
raise ValidationError('asset_details should be a json object.')
@classmethod
def _check_for_unknown_asset_details_keys(cls, asset_details, asset_class):
"""Make sure details keys are supported."""
allowed_keys = []
if asset_class == cls.DISH:
allowed_keys = cls.DISH_DETAILS
if asset_class == cls.YAGI:
allowed_keys = cls.YAGI_DETAILS
keys = asset_details.keys()
for key in keys:
if key not in allowed_keys:
key_error_msg = 'key {} in asset_details is not supported for asset_class {}. allowed keys are: {}'
raise ValidationError(key_error_msg.format(key, asset_class, allowed_keys))
@classmethod
def _validate_float_key(cls, name, value):
try:
float(value)
except ValueError:
raise ValidationError('{} in asset_details should have a float value'.format(name))
@classmethod
def _validate_asset_details_for_asset_class(cls, asset_details, asset_class):
"""Make sure that the asset_details follow business roles for the provided asset_class.
- asset_class dish can have diameter and radome details
- asset_class yagi can have gain details
"""
cls._check_for_unknown_asset_details_keys(asset_details, asset_class)
for key, value in asset_details.items():
if key == cls.DIAMETER:
cls._validate_float_key(key, value)
elif key == cls.RADOME:
try:
bool(value)
except ValueError:
raise ValidationError('{} in asset_details should have a boolean value'.format(cls.RADOME))
elif key == cls.GAIN:
cls._validate_float_key(key, value)
|
the-stack_106_27136 | import os
import string
import numpy as np
import pandas as pd
from .auth import get_config_file
from .exceptions import CufflinksError
def scattergeo():
"""
Returns
"""
path=os.path.join(os.path.dirname(__file__), '../data/scattergeo.csv')
df=pd.read_csv(path)
del df['Unnamed: 0']
df['text'] = df['airport'] + ' ' + df['city'] + ', ' + df['state'] + ' ' + 'Arrivals: ' + df['cnt'].astype(str)
df=df.rename(columns={'cnt':'z','long':'lon'})
return df
def choropleth():
"""
Returns
"""
path=os.path.join(os.path.dirname(__file__), '../data/choropleth.csv')
df=pd.read_csv(path)
del df['Unnamed: 0']
df['z']=[np.random.randint(0,100) for _ in range(len(df))]
return df
def scatter3d(n_categories=5,n=10,prefix='category',mode=None):
"""
Returns a DataFrame with the required format for
a scatter3d plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each trace
prefix : string
Name for each trace
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
for i in range(n_categories):
categories.extend([prefix+str(i+1)]*n)
return pd.DataFrame({'x':np.random.randn(n*n_categories),
'y':np.random.randn(n*n_categories),
'z':np.random.randn(n*n_categories),
'text':getName(n*n_categories,mode=mode),
'categories':categories})
def bubble3d(n_categories=5,n=10,prefix='category',mode=None):
"""
Returns a DataFrame with the required format for
a bubble3d plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each trace
prefix : string
Name for each trace
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
for i in range(n_categories):
categories.extend([prefix+str(i+1)]*n)
return pd.DataFrame({'x':np.random.randn(n*n_categories),
'y':np.random.randn(n*n_categories),
'z':np.random.randn(n*n_categories),
'size':np.random.randint(1,100,n*n_categories),
'text':getName(n*n_categories,mode=mode),
'categories':categories})
def bubble(n_categories=5,n=10,prefix='category',mode=None):
"""
Returns a DataFrame with the required format for
a bubble plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each category
prefix : string
Name for each category
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
for i in range(n_categories):
categories.extend([prefix+str(i+1)]*n)
return pd.DataFrame({'x':np.random.randn(n*n_categories),
'y':np.random.randn(n*n_categories),
'size':np.random.randint(1,100,n*n_categories),
'text':getName(n*n_categories,mode=mode),
'categories':categories})
def pie(n_labels=5,mode=None):
"""
Returns a DataFrame with the required format for
a pie plot
Parameters:
-----------
n_labels : int
Number of labels
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
return pd.DataFrame({'values':np.random.randint(1,100,n_labels),
'labels':getName(n_labels,mode=mode)})
def scatter(n_categories=5,n=10,prefix='category',mode=None):
"""
Returns a DataFrame with the required format for
a scatter plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each category
prefix : string
Name for each category
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
for i in range(n_categories):
categories.extend([prefix+str(i+1)]*n)
return pd.DataFrame({'x':np.random.randn(n*n_categories),
'y':np.random.randn(n*n_categories),
'text':getName(n*n_categories,mode=mode),
'categories':categories})
def heatmap(n_x=5,n_y=10):
"""
Returns a DataFrame with the required format for
a heatmap plot
Parameters:
-----------
n_x : int
Number of x categories
n_y : int
Number of y categories
"""
x=['x_'+str(_) for _ in range(n_x)]
y=['y_'+str(_) for _ in range(n_y)]
return pd.DataFrame(surface(n_x-1,n_y-1).values,index=x,columns=y)
def lines(n_traces=5,n=100,columns=None,dateIndex=True,mode=None):
"""
Returns a DataFrame with the required format for
a scatter (lines) plot
Parameters:
-----------
n_traces : int
Number of traces
n : int
Number of points for each trace
columns : [str]
List of column names
dateIndex : bool
If True it will return a datetime index
if False it will return a enumerated index
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
index=pd.date_range('1/1/15',periods=n) if dateIndex else list(range(n))
df=pd.DataFrame(np.random.randn(n,n_traces),index=index,
columns=getName(n_traces,columns=columns,mode=mode))
return df.cumsum()
def bars(n=3,n_categories=3,prefix='category',columns=None,mode='abc'):
"""
Returns a DataFrame with the required format for
a bar plot
Parameters:
-----------
n : int
Number of points for each trace
n_categories : int
Number of categories for each point
prefix : string
Name for each category
columns : [str]
List of column names
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
if not columns:
columns=getName(n,mode=mode)
for i in range(n_categories):
categories.extend([prefix+str(i+1)])
data=dict([(x,np.random.randint(1,100,n_categories)) for x in columns])
return pd.DataFrame(data,index=categories)
def ohlc(n=100):
"""
Returns a DataFrame with the required format for
a candlestick or ohlc plot
df[['open','high','low','close']]
Parameters:
-----------
n : int
Number of ohlc points
"""
index=pd.date_range('1/1/15',periods=n*288,freq='5min',tz='utc')
data=np.random.randn(n*288)
data[0]=np.array([100])
df=pd.DataFrame(data,index=index,
columns=['a'])
df=df.cumsum()
df=df.resample('1d').ohlc()
df.index=df.index.date
df.index=pd.to_datetime(df.index)
return df['a']
def ohlcv(n=100):
"""
Returns a DataFrame with the required format for
a candlestick or ohlc plot
df[['open','high','low','close','volume']
Parameters:
-----------
n : int
Number of ohlc points
"""
df=ohlc(n=n)
df['volume']=[np.random.randint(1000,10000) for _ in range(len(df))]
return df
def box(n_traces=5,n=100,mode=None):
"""
Returns a DataFrame with the required format for
a box plot
Parameters:
-----------
n_traces : int
Number of traces
n : int
Number of points for each trace
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
df=pd.DataFrame([np.random.chisquare(np.random.randint(2,10),n_traces) for _ in range(n)],
columns=getName(n_traces,mode=mode))
return df
def histogram(n_traces=1,n=500,dispersion=2,mode=None):
"""
Returns a DataFrame with the required format for
a histogram plot
Parameters:
-----------
n_traces : int
Number of traces
n : int
Number of points for each trace
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
df=pd.DataFrame(np.transpose([np.random.randn(n)+np.random.randint(-1*dispersion,dispersion) for _ in range(n_traces)]),
columns=getName(n_traces,mode=mode))
return df
def distplot(n_traces=1,n=500,dispersion=3,mode=None):
"""
Returns a DataFrame with the required format for
a distribution plot (distplot)
Parameters:
-----------
n_traces : int
Number of traces
n : int
Number of points for each trace
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
return histogram(n_traces,n,dispersion,mode)
def violin(n=500,dispersion=3,categories=True,n_categories=5):
"""
Returns a DataFrame with the required format for
a distribution plot (distplot)
Parameters:
-----------
n : int
Number of points
categories : bool or int
If True, then a column with categories is added
n_categories : int
Number of categories
"""
df = histogram(1,n,dispersion,'abc')
df=df.rename(columns={'a':'data'})
if categories:
df['categories']=['category_{0}'.format(np.random.randint(n_categories)) for _ in range(n)]
return df
def surface(n_x=20,n_y=20):
"""
Returns a DataFrame with the required format for
a surface plot
Parameters:
-----------
n_x : int
Number of points along the X axis
n_y : int
Number of points along the Y axis
"""
x=[float(np.random.randint(0,100))]
for i in range(n_x):
x.append(x[:1][0]+np.random.randn()*np.random.randint(1,10))
df=pd.DataFrame(x)
for i in range(n_y):
df[i+1]=df[i].map(lambda x:x+np.random.randn()*np.random.randint(1,10))
return df
def sinwave(n=4,inc=.25):
"""
Returns a DataFrame with the required format for
a surface (sine wave) plot
Parameters:
-----------
n : int
Ranges for X and Y axis (-n,n)
n_y : int
Size of increment along the axis
"""
x=np.arange(-n,n,inc)
y=np.arange(-n,n,inc)
X,Y=np.meshgrid(x,y)
R = np.sqrt(X**2 + Y**2)
Z = np.sin(R)/(.5*R)
return pd.DataFrame(Z,index=x,columns=y)
def getName(n=1,name=3,exchange=2,columns=None,mode='abc'):
if columns:
if isinstance(columns,str):
columns=[columns]
if n != len(columns):
raise CufflinksError("Length of column names needs to be the \n"
"same length of traces")
else:
if mode is None:
mode=get_config_file()['datagen_mode']
if mode=='abc':
def get_abc(n):
def _w(n,base=2):
_n=1
st=[]
while base**_n<=n:
_n+=1
for _ in range(_n-1,0,-1):
n_st=n//(base**_)
st.append(n_st)
n=n-n_st*(base**_)
st.append(n+1)
return st
st=_w(n,len(string.ascii_lowercase))
_st=''
for _ in st:
_st+=string.ascii_lowercase[_-1]
return _st
columns=[get_abc(_) for _ in range(n)]
elif mode=='stocks':
columns=[''.join(np.random.choice(list(string.ascii_uppercase),name)) + '.' + ''.join(np.random.choice(list(string.ascii_uppercase),exchange)) for _ in range(n)]
else:
raise CufflinksError("Unknown mode: {0}".format(mode))
return columns
|
the-stack_106_27138 | from copy import deepcopy
import random
def find_path(scanner, memory):
DIR_reverse = {"N": 'S', "S": 'N', "W": 'E', "E": 'W'}
DIR = {"N": (-1, 0), "S": (1, 0), "W": (0, -1), "E": (0, 1)}
memory_binary = bin(memory)[2:]
memory_binary = '0'*(100-len(memory_binary)) + memory_binary
memory_list = [[j for j in memory_binary[i*10:(i+1)*10]] for i in range(10)]
memory_list[0][0] = '1'
path = []
pool = []
for (direction, distance) in scanner.items():
if distance and memory_list[DIR[direction][0]][DIR[direction][1]] == '0':
pool.append([direction,1])
if pool:
if len(pool) > 1:
random.seed()
[final_direction, final_distance] = random.choice(pool)
else:
final_direction = random.choice([i for i in scanner.keys() if scanner[i]])
final_distance = 1
if final_direction in 'WE':
for i in range(1,10):
memory_list[0][i] = '0'
else:
for i in range(1,10):
memory_list[i][0] = '0'
path += [final_direction]*final_distance
for i in range(final_distance):
for j, l in enumerate(memory_list):
memory_list[j] = l[DIR[final_direction][1]%10:] + l[:DIR[final_direction][1]%10]
memory_list = memory_list[DIR[final_direction][0]%10:] + memory_list[:DIR[final_direction][0]%10]
memory_binary = ''.join(k for k in [''.join(j) for j in memory_list])
return ''.join(path), int('0b'+memory_binary,2)
# for debuging
raise ValueError
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
DIR = {"N": (-1, 0), "S": (1, 0), "W": (0, -1), "E": (0, 1)}
WALL = "X"
EXIT = "E"
EMPTY = "."
MAX_STEP = 300
def get_visible(maze, player):
result = {}
for direction, (dr, dc) in DIR.items():
cr, cc = player
distance = -1
while maze[cr][cc] != WALL:
cr += dr
cc += dc
distance += 1
result[direction] = distance
return result
def checker(func, player, maze):
step = 0
memory = 0
while True:
result, memory = func(get_visible(maze, player), memory)
if not isinstance(result, str) or any(ch not in DIR.keys() for ch in result):
print("The function should return a string with directions.")
return False
if not isinstance(memory, int) or memory < 0 or memory >= 2 ** 100:
print("The memory number should be an integer from 0 to 2**100.")
return False
for act in result:
if step >= MAX_STEP:
print("You are tired and your scanner is off. Bye bye.")
return False
r, c = player[0] + DIR[act][0], player[1] + DIR[act][1]
if maze[r][c] == WALL:
print("BAM! You in the wall at {}, {}.".format(r, c))
return False
elif maze[r][c] == EXIT:
print("GRATZ!")
return True
else:
player = r, c
step += 1
assert checker(find_path, (1, 1), [
"XXXXXXXXXXXX",
"X..........X",
"X.XXXXXXXX.X",
"X.X......X.X",
"X.X......X.X",
"X.X......X.X",
"X.X......X.X",
"X.X......X.X",
"X.X......X.X",
"X.XXXXXXXX.X",
"X.........EX",
"XXXXXXXXXXXX",
]), "Simple"
assert checker(find_path, (1, 4), [
"XXXXXXXXXXXX",
"XX...X.....X",
"X..X.X.X.X.X",
"X.XX.X.X.X.X",
"X..X.X.X.X.X",
"XX.X.X.X.X.X",
"X..X.X.X.X.X",
"X.XX.X.X.X.X",
"X..X.X.X.X.X",
"XX.X.X.X.X.X",
"XE.X.....X.X",
"XXXXXXXXXXXX",
]), "Up Down"
assert checker(find_path, (10, 10), [
"XXXXXXXXXXXX",
"X..........X",
"X.XXXXXXXX.X",
"X.X......X.X",
"X.X.XX.X.X.X",
"X.X......X.X",
"X.X......X.X",
"X.X..E...X.X",
"X.X......X.X",
"X.XXXX.XXX.X",
"X..........X",
"XXXXXXXXXXXX",
]), "Left"
|
the-stack_106_27139 | # -*- coding: utf-8 -*-
''' Data Handler Module
This module contains a class for managing a data processing pipeline
'''
from time import time
from datetime import timedelta
import numpy as np
import pandas as pd
from scipy.stats import mode, skew
from scipy.interpolate import interp1d
from sklearn.cluster import DBSCAN
import cvxpy as cvx
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from solardatatools.time_axis_manipulation import make_time_series,\
standardize_time_axis
from solardatatools.matrix_embedding import make_2d
from solardatatools.data_quality import daily_missing_data_advanced
from solardatatools.data_filling import zero_nighttime, interp_missing
from solardatatools.clear_day_detection import find_clear_days
from solardatatools.plotting import plot_2d
from solardatatools.clear_time_labeling import find_clear_times
from solardatatools.solar_noon import avg_sunrise_sunset
from solardatatools.algorithms import CapacityChange, TimeShift, SunriseSunset
class DataHandler():
def __init__(self, data_frame=None, raw_data_matrix=None, datetime_col=None,
convert_to_ts=False, aggregate=None, how=lambda x: x.mean()):
if data_frame is not None:
if convert_to_ts:
data_frame, keys = make_time_series(data_frame)
self.keys = keys
else:
self.keys = list(data_frame.columns)
self.data_frame_raw = data_frame.copy()
if not isinstance(self.data_frame_raw.index, pd.DatetimeIndex):
if datetime_col is not None:
df = self.data_frame_raw
df[datetime_col] = pd.to_datetime(df[datetime_col])
df.set_index(datetime_col, inplace=True)
else:
e = "Data frame must have a DatetimeIndex or"
e += "the user must set the datetime_col kwarg."
raise Exception(e)
df_index = self.data_frame_raw.index
if df_index.tz is not None:
df_index = df_index.tz_localize(None)
self.data_frame = None
if aggregate is not None:
new_data = how(self.data_frame_raw.resample(aggregate))
self.data_frame_raw = new_data
else:
self.data_frame_raw = None
self.data_frame = None
self.keys = None
self.raw_data_matrix = raw_data_matrix
if self.raw_data_matrix is not None:
self.num_days = self.raw_data_matrix.shape[1]
if self.raw_data_matrix.shape[0] <= 1400:
self.data_sampling = int(24 * 60 / self.raw_data_matrix.shape[0])
else:
self.data_sampling = 24 * 60 / self.raw_data_matrix.shape[0]
else:
self.num_days = None
self.data_sampling = None
self.filled_data_matrix = None
self.use_column = None
self.capacity_estimate = None
self.start_doy = None
self.day_index = None
self.power_units = None
# "Extra" data, i.e. additional columns to process from the table
self.extra_matrices = {} # Matrix views of extra columns
self.extra_quality_scores = {} # Relative quality: fraction of non-NaN values in column during daylight time periods, as defined by the main power columns
# Scores for the entire data set
self.data_quality_score = None # Fraction of days without data acquisition errors
self.data_clearness_score = None # Fraction of days that are approximately clear/sunny
# Flags for the entire data set
self.inverter_clipping = None # True if there is inverter clipping, false otherwise
self.num_clip_points = None # If clipping, the number of clipping set points
self.capacity_changes = None # True if the apparent capacity seems to change over the data set
self.normal_quality_scores = None # True if clustering of data quality scores are within decision boundaries
self.time_shifts = None # True if time shifts detected and corrected in data set
self.tz_correction = 0 # TZ correction factor (determined during pipeline run)
# Daily scores (floats), flags (booleans), and boolean masks
self.daily_scores = DailyScores() # 1D arrays of floats
self.daily_flags = DailyFlags() # 1D arrays of Booleans
self.boolean_masks = BooleanMasks() # 2D arrays of Booleans
# Useful daily signals defined by the data set
self.daily_signals = DailySignals()
# Algorithm objects
self.scsf = None
self.capacity_analysis = None
self.time_shift_analysis = None
self.daytime_analysis = None
# Private attributes
self._ran_pipeline = False
self._error_msg = ''
self.__density_lower_threshold = None
self.__density_upper_threshold = None
self.__linearity_threshold = None
self.__recursion_depth = 0
self.__initial_time = None
self.__fix_dst_ran = False
def run_pipeline(self, power_col=None, min_val=-5, max_val=None,
zero_night=True, interp_day=True, fix_shifts=True,
density_lower_threshold=0.6, density_upper_threshold=1.05,
linearity_threshold=0.1, clear_day_smoothness_param=0.9,
clear_day_energy_param=0.8, verbose=True,
start_day_ix=None, end_day_ix=None, c1=None, c2=500.,
solar_noon_estimator='com', correct_tz=True, extra_cols=None,
daytime_threshold=0.1, units='W'):
self.daily_scores = DailyScores()
self.daily_flags = DailyFlags()
self.capacity_analysis = None
self.time_shift_analysis = None
self.extra_matrices = {} # Matrix views of extra columns
self.extra_quality_scores = {}
self.power_units = units
if self.__recursion_depth == 0:
self.tz_correction = 0
t = np.zeros(6)
######################################################################
# Preprocessing
######################################################################
t[0] = time()
if self.data_frame_raw is not None:
self.data_frame = standardize_time_axis(self.data_frame_raw,
timeindex=True,
verbose=verbose)
if self.data_frame is not None:
self.make_data_matrix(power_col, start_day_ix=start_day_ix,
end_day_ix=end_day_ix)
if max_val is not None:
mat_copy = np.copy(self.raw_data_matrix)
mat_copy[np.isnan(mat_copy)] = -9999
slct = mat_copy > max_val
if np.sum(slct) > 0:
self.raw_data_matrix[slct] = np.nan
if min_val is not None:
mat_copy = np.copy(self.raw_data_matrix)
mat_copy[np.isnan(mat_copy)] = 9999
slct = mat_copy < min_val
if np.sum(slct) > 0:
self.raw_data_matrix[slct] = np.nan
self.capacity_estimate = np.nanquantile(self.raw_data_matrix, 0.95)
if self.capacity_estimate <= 500 and self.power_units == 'W':
self.power_units = 'kW'
self.boolean_masks.missing_values = np.isnan(self.raw_data_matrix)
ss = SunriseSunset()
ss.run_optimizer(self.raw_data_matrix, plot=False)
self.boolean_masks.daytime = ss.sunup_mask_estimated
self.daytime_analysis = ss
### TZ offset detection and correction ###
# (1) Determine if there exists a "large" timezone offset error
if power_col is None:
power_col = self.data_frame.columns[0]
if correct_tz:
average_day = np.zeros(self.raw_data_matrix.shape[0])
all_nans = np.alltrue(np.isnan(self.raw_data_matrix), axis=1)
average_day[~all_nans] = np.nanmean(
self.raw_data_matrix[~all_nans, :], axis=1
)
average_day -= np.min(average_day)
average_day /= np.max(average_day)
### Troubleshooting code
# plt.plot(average_day)
# plt.axhline(0.02, color='red', ls='--', linewidth=1)
# plt.show()
meas_per_hour = np.int(60 / self.data_sampling)
cond1 = np.any(average_day[:meas_per_hour] > 0.02)
cond2 = np.any(average_day[-meas_per_hour:] > 0.02)
cond3 = self.__recursion_depth <= 2
if (cond1 or cond2) and cond3:
if verbose:
print(
'Warning: power generation at midnight. Attempting to correct...')
# Catch values that are more than 4 hours from noon and make a
# correction to the time axis (rough correction to avoid days
# rolling over)
rough_noon_est = np.nanmean(
self.data_frame.groupby(pd.Grouper(freq='D')) \
.idxmax()[power_col].dt.time \
.apply(lambda x: 60 * x.hour + x.minute)
) / 60
self.tz_correction = 12 - np.round(rough_noon_est)
self.data_frame.index = self.data_frame.index.shift(
self.tz_correction, freq='H'
)
if verbose:
print('Done.\nRestarting the pipeline...')
self.__recursion_depth += 1
if self.__initial_time is not None:
self.__initial_time = t[0]
self.run_pipeline(
power_col=power_col, min_val=min_val,
max_val=max_val, zero_night=zero_night,
interp_day=interp_day, fix_shifts=fix_shifts,
density_lower_threshold=density_lower_threshold,
density_upper_threshold=density_upper_threshold,
linearity_threshold=linearity_threshold,
clear_day_smoothness_param=clear_day_smoothness_param,
clear_day_energy_param=clear_day_energy_param,
verbose=verbose, start_day_ix=start_day_ix,
end_day_ix=end_day_ix, c1=c1, c2=c2,
solar_noon_estimator=solar_noon_estimator,
correct_tz=correct_tz, extra_cols=extra_cols,
daytime_threshold=daytime_threshold, units=units
)
return
######################################################################
# Cleaning
######################################################################
t[1] = time()
self.make_filled_data_matrix(zero_night=zero_night, interp_day=interp_day)
num_raw_measurements = np.count_nonzero(
np.nan_to_num(self.raw_data_matrix,
copy=True,
nan=0.)[self.boolean_masks.daytime]
)
num_filled_measurements = np.count_nonzero(
np.nan_to_num(self.filled_data_matrix,
copy=True,
nan=0.)[self.boolean_masks.daytime]
)
if num_raw_measurements > 0:
ratio = num_filled_measurements / num_raw_measurements
else:
msg = 'Error: data set contains no non-zero values!'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.daily_scores = None
self.daily_flags = None
self.data_quality_score = 0.0
self.data_clearness_score = 0.0
self._ran_pipeline = True
return
if ratio < 0.9:
msg = 'Error: data was lost during NaN filling procedure. '
msg += 'This typically occurs when\nthe time stamps are in the '
msg += 'wrong timezone. Please double check your data table.\n'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.daily_scores = None
self.daily_flags = None
self.data_quality_score = None
self.data_clearness_score = None
self._ran_pipeline = True
return
### TZ offset detection and correction ###
# (2) Determine if there is a "small" timezone offset error
if correct_tz:
average_noon = np.nanmean(
avg_sunrise_sunset(self.filled_data_matrix, threshold=0.01)
)
tz_offset = int(np.round(12 - average_noon))
if tz_offset != 0:
self.tz_correction += tz_offset
# Related to this bug fix:
# https://github.com/slacgismo/solar-data-tools/commit/ae0037771c09ace08bff5a4904475da606e934da
old_index = self.data_frame.index.copy()
self.data_frame.index = self.data_frame.index.shift(
tz_offset, freq='H'
)
self.data_frame = self.data_frame.reindex(index=old_index,
method='nearest',
limit=1).fillna(0)
meas_per_hour = self.filled_data_matrix.shape[0] / 24
roll_by = int(meas_per_hour * tz_offset)
self.filled_data_matrix = np.nan_to_num(
np.roll(self.filled_data_matrix, roll_by, axis=0),
0
)
self.raw_data_matrix = np.roll(
self.raw_data_matrix, roll_by, axis=0
)
self.boolean_masks.daytime = np.roll(
self.boolean_masks.daytime, roll_by, axis=0
)
######################################################################
# Scoring
######################################################################
t[2] = time()
t_clean = np.zeros(6)
t_clean[0] = time()
try:
self.get_daily_scores(threshold=0.2)
except:
msg = 'Daily quality scoring failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.daily_scores = None
try:
self.get_daily_flags(density_lower_threshold=density_lower_threshold,
density_upper_threshold=density_upper_threshold,
linearity_threshold=linearity_threshold)
except:
msg = 'Daily quality flagging failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.daily_flags = None
t_clean[1] = time()
try:
self.detect_clear_days(smoothness_threshold=clear_day_smoothness_param,
energy_threshold=clear_day_energy_param)
except:
msg = 'Clear day detection failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
t_clean[2] = time()
try:
self.clipping_check()
except:
msg = 'Clipping check failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.inverter_clipping = None
t_clean[3] = time()
try:
self.score_data_set()
except:
msg = 'Data set summary scoring failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.data_quality_score = None
self.data_clearness_score = None
t_clean[4] = time()
try:
self.capacity_clustering()
except TypeError:
self.capacity_changes = None
t_clean[5] = time()
######################################################################
# Fix Time Shifts
######################################################################
t[3] = time()
if fix_shifts:
try:
self.auto_fix_time_shifts(c1=c1, c2=c2,
estimator=solar_noon_estimator,
threshold=daytime_threshold,
periodic_detector=False)
except Exception as e:
msg = 'Fix time shift algorithm failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
print('Error message:', e)
print('\n')
self.time_shifts = None
######################################################################
# Update daytime detection based on cleaned up data
######################################################################
# self.daytime_analysis.run_optimizer(self.filled_data_matrix, plot=False)
self.daytime_analysis.calculate_times(self.filled_data_matrix)
self.boolean_masks.daytime = self.daytime_analysis.sunup_mask_estimated
######################################################################
# Process Extra columns
######################################################################
t[4] = time()
if extra_cols is not None:
freq = int(self.data_sampling * 60)
new_index = pd.date_range(start=self.day_index[0].date(),
end=self.day_index[-1].date() + timedelta(
days=1),
freq='{}s'.format(freq))[:-1]
if isinstance(extra_cols, str):
extra_cols = np.atleast_1d(extra_cols)
elif isinstance(extra_cols, tuple):
extra_cols = [extra_cols]
for col in extra_cols:
self.generate_extra_matrix(col, new_index=new_index)
t[5] = time()
times = np.diff(t, n=1)
cleaning_times = np.diff(t_clean, n=1)
total_time = t[-1] - t[0]
# Cleanup
self.__recursion_depth = 0
if verbose:
if self.__initial_time is not None:
restart_msg = '{:.2f} seconds spent automatically localizing the time zone\n'
restart_msg += 'Info for last pipeline run below:\n'
restart_msg = restart_msg.format(t[0] - self.__initial_time)
print(restart_msg)
out = 'total time: {:.2f} seconds\n'
out += '--------------------------------\n'
out += 'Breakdown\n'
out += '--------------------------------\n'
out += 'Preprocessing {:.2f}s\n'
out += 'Cleaning {:.2f}s\n'
out += 'Filtering/Summarizing {:.2f}s\n'
out += ' Data quality {:.2f}s\n'
out += ' Clear day detect {:.2f}s\n'
out += ' Clipping detect {:.2f}s\n'
out += ' Capacity change detect {:.2f}s\n'
if extra_cols is not None:
out += 'Extra Column Processing {:.2f}s'
print(out.format(
total_time,
times[0],
times[1] + times[3],
times[2],
cleaning_times[0],
cleaning_times[1],
cleaning_times[2],
cleaning_times[4],
times[4]
))
self._ran_pipeline = True
return
def report(self):
try:
if self.num_days >= 365:
l1 = 'Length: {:.2f} years\n'.format(self.num_days / 365)
else:
l1 = 'Length: {} days\n'.format(self.num_days)
if self.power_units == 'W':
l1_a = 'Capacity estimate: {:.2f} kW\n'.format(self.capacity_estimate / 1000)
elif self.power_units == 'kW':
l1_a = 'Capacity estimate: {:.2f} kW\n'.format(self.capacity_estimate)
else:
l1_a = 'Capacity estimate: {:.2f} '.format(self.capacity_estimate)
l1_a += self.power_units + '\n'
if self.raw_data_matrix.shape[0] <= 1440:
l2 = 'Data sampling: {} minute\n'.format(self.data_sampling)
else:
l2 = 'Data sampling: {} second\n'.format(int(self.data_sampling * 60))
l3 = 'Data quality score: {:.1f}%\n'.format(self.data_quality_score * 100)
l4 = 'Data clearness score: {:.1f}%\n'.format(self.data_clearness_score * 100)
l5 = 'Inverter clipping: {}\n'.format(self.inverter_clipping)
l6 = 'Time shifts corrected: {}\n'.format(self.time_shifts)
if self.tz_correction != 0:
l7 = 'Time zone correction: {} hours'.format(int(self.tz_correction))
else:
l7 = 'Time zone correction: None'
p_out = l1 + l1_a + l2 + l3 + l4 + l5 + l6 + l7
if self.capacity_changes:
p_out += '\nWARNING: Changes in system capacity detected!'
if self.num_clip_points > 1:
p_out += '\nWARNING: {} clipping set points detected!'.format(
self.num_clip_points
)
if not self.normal_quality_scores:
p_out += '\nWARNING: Abnormal clustering of data quality scores!'
print(p_out)
return
except TypeError:
if self._ran_pipeline:
m1 = 'Pipeline failed, please check data set.\n'
m2 = "Try running: self.plot_heatmap(matrix='raw')\n\n"
if self.num_days >= 365:
l1 = 'Length: {:.2f} years\n'.format(
self.num_days / 365)
else:
l1 = 'Length: {} days\n'.format(
self.num_days)
if self.power_units == 'W':
l1_a = 'Capacity estimate: {:.2f} kW\n'.format(self.capacity_estimate / 1000)
elif self.power_units == 'kW':
l1_a = 'Capacity estimate: {:.2f} kW\n'.format(self.capacity_estimate)
else:
l1_a = 'Capacity estimate: {:.2f} '.format(self.capacity_estimate)
l1_a += self.power_units + '\n'
if self.raw_data_matrix.shape[0] <= 1440:
l2 = 'Data sampling: {} minute\n'.format(
self.data_sampling)
else:
l2 = 'Data sampling: {} second\n'.format(
int(self.data_sampling * 60))
p_out = m1 + m2 + l1 + l1_a + l2
print(p_out)
print('\nError messages captured from pipeline:' + self._error_msg)
else:
print('Please run the pipeline first!')
return
def augment_data_frame(self, boolean_index, column_name):
"""
Add a column to the data frame (tabular) representation of the data,
containing True/False values at each time stamp.
Boolean index is a 1-D or 2-D numpy array of True/False values. If 1-D,
array should be of length N, where N is the number of days in the data
set. If 2-D, the array should be of size M X N where M is the number
of measurements each day and N is the number of days.
:param boolean_index: Length N or size M X N numpy arrays of booleans
:param column_name: Name for column
:return:
"""
if self.data_frame is None:
print('This DataHandler object does not contain a data frame.')
return
if boolean_index is None:
print('No mask available for ' + column_name)
return
m, n = self.raw_data_matrix.shape
index_shape = boolean_index.shape
cond1 = index_shape == (m, n)
cond2 = index_shape == (n ,)
if not cond1 and not cond2:
print('Boolean index shape does not match the data.')
elif cond1:
if self.time_shifts:
ts = self.time_shift_analysis
boolean_index = ts.invert_corrections(boolean_index)
start = self.day_index[0]
freq = '{}min'.format(self.data_sampling)
periods = self.filled_data_matrix.size
tindex = pd.date_range(start=start, freq=freq, periods=periods)
series = pd.Series(data=boolean_index.ravel(order='F'), index=tindex)
series.name = column_name
if column_name in self.data_frame.columns:
del self.data_frame[column_name]
self.data_frame = self.data_frame.join(series)
self.data_frame[column_name] = self.data_frame[column_name].fillna(False)
elif cond2:
slct_dates = self.day_index[boolean_index].date
bix = np.isin(self.data_frame.index.date, slct_dates)
self.data_frame[column_name] = False
self.data_frame.loc[bix, column_name] = True
if column_name in self.data_frame_raw.columns:
del self.data_frame_raw[column_name]
self.data_frame_raw = self.data_frame_raw.join(self.data_frame[column_name])
def fix_dst(self):
"""
Helper function for fixing data sets with known DST shift. This function
works for data recorded anywhere in the United States. The choice of
timezone (e.g. 'US/Pacific') does not matter, as long as the dates
of the clock changes are the same.
:return:
"""
if not self.__fix_dst_ran:
df = self.data_frame_raw
df_localized = df.tz_localize('US/Pacific', ambiguous='NaT',
nonexistent='NaT')
df_localized = df_localized[df_localized.index == df_localized.index]
df_localized = df_localized.tz_convert('Etc/GMT+8')
df_localized = df_localized.tz_localize(None)
self.data_frame_raw = df_localized
self.__fix_dst_ran = True
return
else:
print('DST correction already performed on this data set.')
return
def make_data_matrix(self, use_col=None, start_day_ix=None, end_day_ix=None):
df = self.data_frame
if use_col is None:
use_col = df.columns[0]
self.raw_data_matrix, day_index = make_2d(df, key=use_col, return_day_axis=True)
self.raw_data_matrix = self.raw_data_matrix[:, start_day_ix:end_day_ix]
self.num_days = self.raw_data_matrix.shape[1]
if self.raw_data_matrix.shape[0] <= 1400:
self.data_sampling = int(24 * 60 / self.raw_data_matrix.shape[0])
else:
self.data_sampling = 24 * 60 / self.raw_data_matrix.shape[0]
self.use_column = use_col
self.day_index = day_index[start_day_ix:end_day_ix]
self.start_doy = self.day_index.dayofyear[0]
return
def make_filled_data_matrix(self, zero_night=True, interp_day=True):
self.filled_data_matrix = np.copy(self.raw_data_matrix)
if zero_night:
self.filled_data_matrix = zero_nighttime(self.raw_data_matrix,
night_mask=~self.boolean_masks.daytime)
if interp_day:
self.filled_data_matrix = interp_missing(self.filled_data_matrix)
else:
msk = np.isnan(self.filled_data_matrix)
self.filled_data_matrix[msk] = 0
self.daily_signals.energy = np.sum(self.filled_data_matrix, axis=0) *\
24 / self.filled_data_matrix.shape[1]
return
def generate_extra_matrix(self, column, new_index=None, key=None):
if new_index is None:
freq = self.data_sampling * 60
end = self.day_index[-1].date() + timedelta(days=1)
new_index = pd.date_range(start=self.day_index[0].date(),
end=end,
freq='{}s'.format(freq))[:-1]
num_meas = self.filled_data_matrix.shape[0]
new_view = self.data_frame[column].loc[new_index[0]:new_index[-1]]
new_view = new_view.values.reshape(num_meas, -1, order='F')
if self.time_shifts:
ts = self.time_shift_analysis
new_view = ts.apply_corrections(new_view)
if key is None:
key = column
self.extra_matrices[key] = new_view
self.extra_quality_scores[key] = (
1 - np.sum(np.isnan(new_view[self.boolean_masks.daytime]))
/ np.sum(self.boolean_masks.daytime)
)
return
def get_daily_scores(self, threshold=0.2):
self.get_density_scores(threshold=threshold)
self.get_linearity_scores()
return
def get_daily_flags(self, density_lower_threshold=0.6,
density_upper_threshold=1.05, linearity_threshold=0.1):
self.daily_flags.density = np.logical_and(
self.daily_scores.density > density_lower_threshold,
self.daily_scores.density < density_upper_threshold
)
self.daily_flags.linearity = self.daily_scores.linearity < linearity_threshold
self.daily_flags.flag_no_errors()
scores = np.c_[self.daily_scores.density, self.daily_scores.linearity]
db = DBSCAN(eps=.03,
min_samples=max(0.01 * scores.shape[0], 3)).fit(scores)
# Count the number of days that cluster to the main group but fall
# outside the decision boundaries
day_counts = [np.logical_or(
self.daily_scores.linearity[db.labels_ == lb] > linearity_threshold,
np.logical_or(
self.daily_scores.density[db.labels_ == lb] < density_lower_threshold,
self.daily_scores.density[db.labels_ == lb] > density_upper_threshold
)
) for lb in set(db.labels_)]
self.normal_quality_scores = np.any([
np.sum(day_count) <= max(5e-3 * self.num_days, 1)
for day_count in day_counts
])
self.__density_lower_threshold = density_lower_threshold
self.__density_upper_threshold = density_upper_threshold
self.__linearity_threshold = linearity_threshold
self.daily_scores.quality_clustering = db.labels_
def get_density_scores(self, threshold=0.2):
if self.raw_data_matrix is None:
print('Generate a raw data matrix first.')
return
self.daily_scores.density, self.daily_signals.density, self.daily_signals.seasonal_density_fit\
= daily_missing_data_advanced(
self.raw_data_matrix,
threshold=threshold,
return_density_signal=True,
return_fit=True
)
return
def get_linearity_scores(self):
if self.capacity_estimate is None:
self.capacity_estimate = np.quantile(self.filled_data_matrix, 0.95)
if self.daily_signals.seasonal_density_fit is None:
print('Run the density check first')
return
temp_mat = np.copy(self.filled_data_matrix)
temp_mat[temp_mat < 0.005 * self.capacity_estimate] = np.nan
difference_mat = np.round(temp_mat[1:] - temp_mat[:-1], 4)
modes, counts = mode(difference_mat, axis=0, nan_policy='omit')
n = self.filled_data_matrix.shape[0] - 1
self.daily_scores.linearity = counts.data.squeeze() / (n * self.daily_signals.seasonal_density_fit)
# Label detected infill points with a boolean mask
infill = np.zeros_like(self.raw_data_matrix, dtype=np.bool)
slct = self.daily_scores.linearity >= 0.1
reference_diffs = np.tile(modes[0][slct],
(self.filled_data_matrix.shape[0], 1))
found_infill = np.logical_or(
np.isclose(
np.r_[np.zeros(self.num_days).reshape((1, -1)),
difference_mat][ :, slct],
reference_diffs),
np.isclose(
np.r_[difference_mat,
np.zeros(self.num_days).reshape((1, -1))][:, slct],
reference_diffs),
)
infill[:, slct] = found_infill
self.boolean_masks.infill = infill
return
def score_data_set(self):
num_days = self.raw_data_matrix.shape[1]
try:
self.data_quality_score = np.sum(self.daily_flags.no_errors) / num_days
except TypeError:
self.data_quality_score = None
try:
self.data_clearness_score = np.sum(self.daily_flags.clear) / num_days
except TypeError:
self.data_clearness_score = None
return
def clipping_check(self):
max_value = np.max(self.filled_data_matrix)
daily_max_val = np.max(self.filled_data_matrix, axis=0)
# 1st clipping statistic: ratio of the max value on each day to overall max value
clip_stat_1 = daily_max_val / max_value
# 2nd clipping statistic: fraction of energy generated each day at or
# near that day's max value
with np.errstate(divide='ignore', invalid='ignore'):
temp = self.filled_data_matrix / daily_max_val
temp_msk = temp > 0.995
temp2 = np.zeros_like(temp)
temp2[temp_msk] = temp[temp_msk]
clip_stat_2 = np.sum(temp2, axis=0) / np.sum(temp, axis=0)
clip_stat_2[np.isnan(clip_stat_2)] = 0
# Identify which days have clipping
clipped_days = np.logical_and(
clip_stat_1 > 0.05,
clip_stat_2 > 0.1
)
clipped_days = np.logical_and(
self.daily_flags.no_errors,
clipped_days
)
# clipped days must also be near a peak in the distribution of the
# 1st clipping statistic that shows the characteristic, strongly skewed
# peak shape
point_masses = self.__analyze_distribution(clip_stat_1)
try:
if len(point_masses) == 0:
clipped_days[:] = False
else:
clipped_days[clipped_days] = np.any(
np.array([np.abs(clip_stat_1[clipped_days] - x0) < .02 for x0 in
point_masses]), axis=0
)
except IndexError:
self.inverter_clipping = False
self.num_clip_points = 0
return
self.daily_scores.clipping_1 = clip_stat_1
self.daily_scores.clipping_2 = clip_stat_2
self.daily_flags.inverter_clipped = clipped_days
if np.sum(clipped_days) > 0.01 * self.num_days:
self.inverter_clipping = True
self.num_clip_points = len(point_masses)
else:
self.inverter_clipping = False
self.num_clip_points = 0
return
def find_clipped_times(self):
if self.inverter_clipping:
max_value = np.max(self.filled_data_matrix)
clip_stat_1 = self.daily_scores.clipping_1 #daily_max_val / max_value
point_masses = self.__analyze_distribution(clip_stat_1)
mat_normed = self.filled_data_matrix / max_value
masks = np.stack([np.abs(mat_normed - x0) < 0.01
for x0 in point_masses])
clipped_time_mask = np.any(masks, axis=0)
daily_max_val = np.max(self.filled_data_matrix, axis=0)
mat_normed = np.zeros_like(self.filled_data_matrix)
msk = daily_max_val != 0
mat_normed[:, msk] = self.filled_data_matrix[:, msk] / daily_max_val[msk]
clipped_time_mask = np.logical_and(
clipped_time_mask,
mat_normed >= 0.98
)
# clipped_days = self.daily_flags.inverter_clipped
# clipped_time_mask[:, ~clipped_days] = False
self.boolean_masks.clipped_times = clipped_time_mask
else:
self.boolean_masks.clipped_times = np.zeros_like(
self.filled_data_matrix, dtype=np.bool
)
def capacity_clustering(self, plot=False, figsize=(8, 6),
show_clusters=True):
if self.capacity_analysis is None:
self.capacity_analysis = CapacityChange()
self.capacity_analysis.run(
self.filled_data_matrix, filter=self.daily_flags.no_errors,
quantile=1.00, c1=15, c2=100, c3=300, reweight_eps=0.5,
reweight_niter=5, dbscan_eps=.02, dbscan_min_samples='auto'
)
if len(set(self.capacity_analysis.labels)) > 1: #np.max(db.labels_) > 0:
self.capacity_changes = True
self.daily_flags.capacity_cluster = self.capacity_analysis.labels
else:
self.capacity_changes = False
if plot:
metric = self.capacity_analysis.metric
s1 = self.capacity_analysis.s1
s2 = self.capacity_analysis.s2
labels = self.capacity_analysis.labels
try:
xs = self.day_index.to_pydatetime()
except AttributeError:
xs = np.arange(self.num_days)
if show_clusters:
fig, ax = plt.subplots(nrows=2, figsize=figsize, sharex=True,
gridspec_kw={'height_ratios': [4, 1]})
ax[0].plot(xs, s1, label='capacity change detector')
ax[0].plot(xs, s2 + s1, label='signal model')
ax[0].plot(xs, metric, alpha=0.3,
label='measured signal')
ax[0].legend()
ax[0].set_title('Detection of system capacity changes')
ax[1].set_xlabel('date')
ax[0].set_ylabel('normalized daily max power')
ax[1].plot(xs, labels, ls='none', marker='.')
ax[1].set_ylabel('Capacity clusters')
else:
fig, ax = plt.subplots(nrows=1, figsize=figsize)
ax.plot(xs, s1, label='capacity change detector')
ax.plot(xs, s2 + s1, label='signal model')
ax.plot(xs, metric, alpha=0.3,
label='measured signal')
ax.legend()
ax.set_title('Detection of system capacity changes')
ax.set_ylabel('normalized daily maximum power')
ax.set_xlabel('date')
return fig
def auto_fix_time_shifts(self, c1=5., c2=500., estimator='com',
threshold=0.1, periodic_detector=False):
self.time_shift_analysis = TimeShift()
if self.data_clearness_score > 0.1 and self.num_days > 365 * 2:
use_ixs = self.daily_flags.clear
else:
use_ixs = self.daily_flags.no_errors
self.time_shift_analysis.run(
self.filled_data_matrix, use_ixs=use_ixs,
c1=c1, c2=c2, solar_noon_estimator=estimator, threshold=threshold,
periodic_detector=periodic_detector
)
self.filled_data_matrix = self.time_shift_analysis.corrected_data
if len(self.time_shift_analysis.index_set) == 0:
self.time_shifts = False
else:
self.time_shifts = True
# self.filled_data_matrix, shift_ixs = fix_time_shifts(
# self.filled_data_matrix, solar_noon_estimator=estimator, c1=c1, c2=c2,
# return_ixs=True, verbose=False, use_ixs=None, threshold=threshold
# )
# if len(shift_ixs) == 0:
# self.time_shifts = False
# else:
# self.time_shifts = True
def detect_clear_days(self, smoothness_threshold=0.9, energy_threshold=0.8):
if self.filled_data_matrix is None:
print('Generate a filled data matrix first.')
return
clear_days = find_clear_days(self.filled_data_matrix,
smoothness_threshold=smoothness_threshold,
energy_threshold=energy_threshold)
### Remove days that are marginally low density, but otherwise pass
# the clearness test. Occaisonally, we find an early morning or late
# afternoon inverter outage on a clear day is still detected as clear.
# Added July 2020 --BM
clear_days = np.logical_and(
clear_days,
self.daily_scores.density > 0.9
)
self.daily_flags.flag_clear_cloudy(clear_days)
return
def find_clear_times(self, power_hyperparam=0.1,
smoothness_hyperparam=0.05, min_length=3):
if self.scsf is None:
print('No SCSF model detected. Fitting now...')
self.fit_statistical_clear_sky_model()
clear = self.scsf.estimated_power_matrix
clear_times = find_clear_times(self.filled_data_matrix, clear,
self.capacity_estimate,
th_relative_power=power_hyperparam,
th_relative_smoothness=smoothness_hyperparam,
min_length=min_length)
self.boolean_masks.clear_times = clear_times
def fit_statistical_clear_sky_model(self, rank=6, mu_l=None, mu_r=None,
tau=None, exit_criterion_epsilon=1e-3,
solver_type='MOSEK', max_iteration=10,
calculate_degradation=True,
max_degradation=None,
min_degradation=None,
non_neg_constraints=False,
verbose=True, bootstraps=None):
try:
from statistical_clear_sky import SCSF
except ImportError:
print('Please install statistical-clear-sky package')
return
scsf = SCSF(data_handler_obj=self, rank_k=rank, solver_type=solver_type)
scsf.execute(mu_l=mu_l, mu_r=mu_r, tau=tau,
exit_criterion_epsilon=exit_criterion_epsilon,
max_iteration=max_iteration,
is_degradation_calculated=calculate_degradation,
max_degradation=max_degradation,
min_degradation=min_degradation,
non_neg_constraints=non_neg_constraints,
verbose=verbose, bootstraps=bootstraps
)
self.scsf = scsf
def calculate_scsf_performance_index(self):
if self.scsf is None:
print('No SCSF model detected. Fitting now...')
self.fit_statistical_clear_sky_model()
clear = self.scsf.estimated_power_matrix
clear_energy = np.sum(clear, axis=0)
measured_energy = np.sum(self.filled_data_matrix, axis=0)
pi = np.divide(measured_energy, clear_energy)
return pi
def plot_heatmap(self, matrix='raw', flag=None, figsize=(12, 6),
scale_to_kw=True, year_lines=True, units=None):
if matrix == 'raw':
mat = np.copy(self.raw_data_matrix)
elif matrix == 'filled':
mat = np.copy(self.filled_data_matrix)
elif matrix in self.extra_matrices.keys():
mat = self.extra_matrices[matrix]
else:
return
if units is None:
if scale_to_kw and self.power_units == 'W':
mat /= 1000
units = 'kW'
else:
units = self.power_units
if flag is None:
return plot_2d(mat, figsize=figsize,
dates=self.day_index, year_lines=year_lines,
units=units)
elif flag == 'good':
fig = plot_2d(mat, figsize=figsize,
clear_days=self.daily_flags.no_errors,
dates=self.day_index, year_lines=year_lines,
units=units)
plt.title('Measured power, good days flagged')
return fig
elif flag == 'bad':
fig = plot_2d(mat, figsize=figsize,
clear_days=~self.daily_flags.no_errors,
dates=self.day_index, year_lines=year_lines,
units=units)
plt.title('Measured power, bad days flagged')
return fig
elif flag in ['clear', 'sunny']:
fig = plot_2d(mat, figsize=figsize,
clear_days=self.daily_flags.clear,
dates=self.day_index, year_lines=year_lines,
units=units)
plt.title('Measured power, clear days flagged')
return fig
elif flag == 'cloudy':
fig = plot_2d(mat, figsize=figsize,
clear_days=self.daily_flags.cloudy,
dates=self.day_index, year_lines=year_lines,
units=units)
plt.title('Measured power, cloudy days flagged')
return fig
elif flag == 'clipping':
fig = plot_2d(mat, figsize=figsize,
clear_days=self.daily_flags.inverter_clipped,
dates=self.day_index, year_lines=year_lines,
units=units)
plt.title('Measured power, days with inverter clipping flagged')
return fig
else:
print('Unknown daily flag. Please use one of the following:')
print('good, bad, sunny, cloudy, clipping')
return
def plot_daily_signals(self, boolean_index=None, start_day=0, num_days=5,
filled=True, ravel=True, figsize=(12, 6),
color=None, alpha=None, label=None,
boolean_mask=None, mask_label=None,
show_clear_model=True, show_legend=False,
marker=None):
if type(start_day) is not int:
try:
loc = self.day_index == start_day
start_day = np.arange(self.num_days)[loc][0]
except IndexError:
print("Please use an integer or a date string for 'start_day'")
return
if boolean_index is None:
boolean_index = np.s_[:]
i = start_day
j = start_day + num_days
slct = np.s_[np.arange(self.num_days)[boolean_index][i:j]]
if filled:
plot_data = self.filled_data_matrix[:, slct]
else:
plot_data = self.raw_data_matrix[:, slct]
if ravel:
plot_data = plot_data.ravel(order='F')
fig = plt.figure(figsize=figsize)
kwargs = {}
if color is not None:
kwargs['color'] = color
if alpha is not None:
kwargs['alpha'] = alpha
if marker is not None:
kwargs['marker'] = marker
if self.day_index is not None:
start = self.day_index[start_day]
freq = '{}min'.format(self.data_sampling)
periods = len(plot_data)
xs = pd.date_range(start=start, freq=freq, periods=periods)
else:
xs = np.arange(len(plot_data))
if label is None:
label = 'measured power'
plt.plot(xs, plot_data, linewidth=1, **kwargs, label=label)
if boolean_mask is not None:
if mask_label is None:
mask_label = 'boolean mask'
m, n = self.raw_data_matrix.shape
index_shape = boolean_mask.shape
cond1 = index_shape == (m, n)
cond2 = index_shape == (n,)
if cond1:
plot_flags = boolean_mask[:, slct].ravel(order='F')
elif cond2:
temp_bool = np.tile(boolean_mask, (m, 1))
plot_flags = temp_bool[:, slct].ravel(order='F')
plt.plot(xs[plot_flags], plot_data[plot_flags], ls='none',
marker='.', color='red', label=mask_label)
if show_clear_model and self.scsf is not None:
plot_model = self.scsf.estimated_power_matrix[:, slct].ravel(order='F')
plt.plot(xs, plot_model, color='orange', linewidth=1,
label='clear sky model')
if show_legend:
plt.legend()
return fig
def plot_density_signal(self, flag=None, show_fit=False, figsize=(8, 6)):
if self.daily_signals.density is None:
return
fig = plt.figure(figsize=figsize)
try:
xs = self.day_index.to_pydatetime()
except AttributeError:
xs = np.arange(len(self.daily_signals.density))
plt.plot(xs, self.daily_signals.density, linewidth=1)
title = 'Daily signal density'
if flag == 'density':
plt.plot(xs[~self.daily_flags.density],
self.daily_signals.density[~self.daily_flags.density],
ls='none', marker='.', color='red')
title += ', density outlier days flagged'
if flag == 'good':
plt.plot(xs[self.daily_flags.no_errors],
self.daily_signals.density[self.daily_flags.no_errors],
ls='none', marker='.', color='red')
title += ', good days flagged'
elif flag == 'bad':
plt.plot(xs[~self.daily_flags.no_errors],
self.daily_signals.density[~self.daily_flags.no_errors],
ls='none', marker='.', color='red')
title += ', bad days flagged'
elif flag in ['clear', 'sunny']:
plt.plot(xs[self.daily_flags.clear],
self.daily_signals.density[self.daily_flags.clear],
ls='none', marker='.', color='red')
title += ', clear days flagged'
elif flag == 'cloudy':
plt.plot(xs[self.daily_flags.cloudy],
self.daily_signals.density[self.daily_flags.cloudy],
ls='none', marker='.', color='red')
title += ', cloudy days flagged'
if np.logical_and(show_fit,
self.daily_signals.seasonal_density_fit is not None):
plt.plot(xs, self.daily_signals.seasonal_density_fit, color='orange')
plt.plot(xs, 0.6 * self.daily_signals.seasonal_density_fit,
color='green', linewidth=1,
ls='--')
plt.plot(xs, 1.05 * self.daily_signals.seasonal_density_fit,
color='green', linewidth=1,
ls='--')
plt.title(title)
plt.gcf().autofmt_xdate()
plt.ylabel('Fraction non-zero values')
plt.xlabel('Date')
return fig
def plot_data_quality_scatter(self, figsize=(6,5)):
fig = plt.figure(figsize=figsize)
labels = self.daily_scores.quality_clustering
for lb in set(labels):
plt.scatter(self.daily_scores.density[labels == lb],
self.daily_scores.linearity[labels == lb],
marker='.', label=lb)
plt.xlabel('density score')
plt.ylabel('linearity score')
plt.axhline(self.__linearity_threshold, linewidth=1, color='red',
ls=':', label='decision boundary')
plt.axvline(self.__density_upper_threshold, linewidth=1, color='red',
ls=':')
plt.axvline(self.__density_lower_threshold, linewidth=1, color='red',
ls=':')
plt.legend()
return fig
def plot_daily_energy(self, flag=None, figsize=(8, 6), units='Wh'):
if self.filled_data_matrix is None:
return
fig = plt.figure(figsize=figsize)
energy = np.copy(self.daily_signals.energy)
if np.max(energy) > 1000:
energy /= 1000
units = 'kWh'
try:
xs = self.day_index.to_pydatetime()
except AttributeError:
xs = np.arange(len(self.daily_signals.density))
plt.plot(xs, energy, linewidth=1)
title = 'Daily energy production'
if flag == 'good':
plt.plot(xs[self.daily_flags.no_errors],
energy[self.daily_flags.no_errors],
ls='none', marker='.', color='red')
title += ', good days flagged'
elif flag == 'bad':
plt.plot(xs[~self.daily_flags.no_errors],
energy[~self.daily_flags.no_errors],
ls='none', marker='.', color='red')
title += ', bad days flagged'
elif flag in ['clear', 'sunny']:
plt.plot(xs[self.daily_flags.clear],
energy[self.daily_flags.clear],
ls='none', marker='.', color='red')
title += ', clear days flagged'
elif flag == 'cloudy':
plt.plot(xs[self.daily_flags.cloudy],
energy[self.daily_flags.cloudy],
ls='none', marker='.', color='red')
title += ', cloudy days flagged'
plt.title(title)
plt.gcf().autofmt_xdate()
plt.xlabel('Date')
plt.ylabel('Energy ({})'.format(units))
return fig
def plot_clipping(self, figsize=(10, 8)):
if self.daily_scores is None:
return
if self.daily_scores.clipping_1 is None:
return
fig, ax = plt.subplots(nrows=2, figsize=figsize, sharex=True)
clip_stat_1 = self.daily_scores.clipping_1
clip_stat_2 = self.daily_scores.clipping_2
clipped_days = self.daily_flags.inverter_clipped
try:
xs = self.day_index.to_pydatetime()
except AttributeError:
xs = np.arange(len(self.daily_signals.density))
ax[0].plot(xs, clip_stat_1)
ax[1].plot(xs, clip_stat_2)
if self.inverter_clipping:
ax[0].plot(xs[clipped_days],
clip_stat_1[clipped_days], ls='none', marker='.',
color='red', label='days with inverter clipping')
ax[1].plot(xs[clipped_days],
clip_stat_2[clipped_days], ls='none', marker='.',
color='red')
ax[0].legend()
ax[0].set_title('Clipping Score 1: ratio of daily max to overal max')
ax[1].set_title('Clipping Score 2: fraction of daily energy generated at daily max power')
ax[1].set_xlabel('Date')
plt.gcf().autofmt_xdate()
return fig
def plot_daily_max_pdf(self, figsize=(8, 6)):
fig = self.__analyze_distribution(self.daily_scores.clipping_1,
plot='pdf', figsize=figsize)
plt.title('Distribution of normalized daily maximum values')
plt.xlabel('Normalized daily max power')
plt.ylabel('Count')
plt.legend()
return fig
def plot_daily_max_cdf(self, figsize=(10, 6)):
fig = self.__analyze_distribution(self.daily_scores.clipping_1,
plot='cdf', figsize=figsize)
plt.title('Cumulative density function of\nnormalized daily maximum values')
plt.xlabel('Normalized daily max power')
plt.ylabel('Cumulative occurance probability')
plt.legend()
ax = plt.gca()
ax.set_aspect('equal')
return fig
def plot_daily_max_cdf_and_pdf(self, figsize=(10, 6)):
fig = self.__analyze_distribution(self.daily_scores.clipping_1,
plot='both', figsize=figsize)
return fig
def plot_cdf_analysis(self, figsize=(12, 6)):
fig = self.__analyze_distribution(self.daily_scores.clipping_1,
plot='diffs', figsize=figsize)
return fig
def plot_capacity_change_analysis(self, figsize=(8, 6), show_clusters=True):
fig = self.capacity_clustering(plot=True, figsize=figsize,
show_clusters=show_clusters)
return fig
def plot_time_shift_analysis_results(self, figsize=(8, 6)):
if self.time_shift_analysis is not None:
use_ixs = self.time_shift_analysis.use_ixs
plt.figure(figsize=figsize)
plt.plot(self.day_index, self.time_shift_analysis.metric,
linewidth=1, alpha=0.6,
label='daily solar noon')
plt.plot(self.day_index[use_ixs],
self.time_shift_analysis.metric[use_ixs],
linewidth=1, alpha=0.6, color='orange', marker='.',
ls='none',
label='filtered days')
plt.plot(self.day_index, self.time_shift_analysis.s1, color='green',
label='shift detector')
plt.plot(self.day_index,
self.time_shift_analysis.s1 + self.time_shift_analysis.s2,
color='red', label='signal model', ls='--')
# plt.ylim(11, 13)
plt.legend()
fig = plt.gcf()
return fig
else:
print('Please run pipeline first.')
def plot_circ_dist(self, flag='good', num_bins=12*4, figsize=(8,8)):
title = 'Calendar distribution of '
if flag == 'good':
slct = self.daily_flags.no_errors
title += 'good days'
elif flag == 'bad':
slct = ~self.daily_flags.no_errors
title += 'bad days'
elif flag in ['clear', 'sunny']:
slct = self.daily_flags.clear
title += 'clear days'
elif flag == 'cloudy':
slct = self.daily_flags.cloudy
title += 'cloudy days'
circ_data = (self.start_doy + np.arange(self.num_days)[slct]) % 365 \
* 2 * np.pi / 365
circ_hist = np.histogram(circ_data, bins=num_bins)
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
start = (circ_hist[1][0] + circ_hist[1][1]) / 2
end = (circ_hist[1][-1] + circ_hist[1][-2]) / 2
theta = np.linspace(start, end, num_bins)
radii = circ_hist[0]
width = 2 * np.pi / num_bins
bars = ax.bar(theta, radii, width=width, bottom=0.0, edgecolor='none')
for r, bar in zip(radii, bars):
bar.set_facecolor(cm.magma(r / np.max(circ_hist[0])))
bar.set_alpha(0.75)
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
ax.set_rorigin(-2.)
ax.set_rlabel_position(0)
ax.set_xticks(np.linspace(0, 2 * np.pi, 12, endpoint=False))
ax.set_xticklabels(
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec']
)
ax.set_title(title)
# print(np.sum(circ_hist[0] <= 1))
return fig
def __analyze_distribution(self, data, plot=None, figsize=(8, 6)):
# Calculate empirical CDF
x = np.sort(np.copy(data))
x = x[x > 0]
x = np.concatenate([[0.], x, [1.]])
y = np.linspace(0, 1, len(x))
# Resample the CDF to get an even spacing of points along the x-axis
f = interp1d(x, y)
x_rs = np.linspace(0, 1, 5000)
y_rs = f(x_rs)
# Fit statistical model to resampled CDF that has sparse 2nd order difference
y_hat = cvx.Variable(len(y_rs))
mu = cvx.Parameter(nonneg=True)
mu.value = 1e1
error = cvx.sum_squares(y_rs - y_hat)
reg = cvx.norm(cvx.diff(y_hat, k=2), p=1)
objective = cvx.Minimize(error + mu * reg)
constraints = [
y_rs[0] == y_hat[0],
y[-1] == y_hat[-1]
]
problem = cvx.Problem(objective, constraints)
problem.solve(solver='MOSEK')
# Look for outliers in the 2nd order difference to identify point masses from clipping
local_curv = cvx.diff(y_hat, k=2).value
ref_slope = cvx.diff(y_hat, k=1).value[:-1]
threshold = -0.5
# metric = local_curv / ref_slope
metric = np.min([
local_curv / ref_slope,
np.concatenate([
(local_curv[:-1] + local_curv[1:]) / ref_slope[:-1],
[local_curv[-1] / ref_slope[-1]]
]),
np.concatenate([
(local_curv[:-2] + local_curv[1:-1] + local_curv[2:]) / ref_slope[:-2],
[local_curv[-2:] / ref_slope[-2:]]
], axis=None)
], axis=0)
point_masses = np.concatenate(
[[False], np.logical_and(metric <= threshold, ref_slope > 3e-4), # looking for drops of more than 65%
[False]])
# Catch if the PDF ends in a point mass at the high value
if np.logical_or(cvx.diff(y_hat, k=1).value[-1] > 1e-3,
np.allclose(cvx.diff(y_hat, k=1).value[-1],
np.max(cvx.diff(y_hat, k=1).value))):
point_masses[-2] = True
# Reduce clusters of detected points to single points
pm_reduce = np.zeros_like(point_masses, dtype=np.bool)
for ix in range(len(point_masses) - 1):
if ~point_masses[ix] and point_masses[ix + 1]:
begin_cluster = ix + 1
elif point_masses[ix] and ~point_masses[ix + 1]:
end_cluster = ix
try:
ix_select = np.argmax(metric[begin_cluster:end_cluster + 1])
except ValueError:
pm_reduce[begin_cluster] = True
else:
pm_reduce[begin_cluster + ix_select] = True
point_masses = pm_reduce
point_mass_values = x_rs[point_masses]
if plot is None:
return point_mass_values
elif plot == 'pdf':
fig = plt.figure(figsize=figsize)
plt.hist(data[data > 0], bins=100, alpha=0.5, label='histogram')
scale = np.histogram(data[data > 0], bins=100)[0].max() \
/ cvx.diff(y_hat, k=1).value.max()
plt.plot(x_rs[:-1], scale * cvx.diff(y_hat, k=1).value,
color='orange', linewidth=1, label='piecewise constant PDF estimate')
for count, val in enumerate(point_mass_values):
if count == 0:
plt.axvline(val, linewidth=1, linestyle=':',
color='green', label='detected point mass')
else:
plt.axvline(val, linewidth=1, linestyle=':',
color='green')
return fig
elif plot == 'cdf':
fig = plt.figure(figsize=figsize)
plt.plot(x_rs, y_rs, linewidth=1, label='empirical CDF')
plt.plot(x_rs, y_hat.value, linewidth=3, color='orange', alpha=0.57,
label='estimated CDF')
if len(point_mass_values) > 0:
plt.scatter(x_rs[point_masses], y_rs[point_masses],
color='red', marker='o',
label='detected point mass')
return fig
elif plot == 'diffs':
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=figsize)
y1 = cvx.diff(y_hat, k=1).value
y2 = metric
ax[0].plot(x_rs[:-1], y1)
ax[1].plot(x_rs[1:-1], y2)
ax[1].axhline(threshold, linewidth=1, color='r', ls=':',
label='decision boundary')
if len(point_mass_values) > 0:
ax[0].scatter(x_rs[point_masses],
y1[point_masses[1:]],
color='red', marker='o',
label='detected point mass')
ax[1].scatter(x_rs[point_masses],
y2[point_masses[1:-1]],
color='red', marker='o',
label='detected point mass')
ax[0].set_title('1st order difference of CDF fit')
ax[1].set_title('2nd order difference of CDF fit')
ax[1].legend()
plt.tight_layout()
return fig
elif plot == 'both':
fig = plt.figure(figsize=figsize)
gs = fig.add_gridspec(nrows=1, ncols=2, width_ratios=[2, 1])
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(y_rs, x_rs, linewidth=1, label='empirical CDF')
ax1.plot(y_hat.value, x_rs, linewidth=3, color='orange', alpha=0.57,
label='estimated CDF')
if len(point_mass_values) > 0:
ax1.scatter(y_rs[point_masses], x_rs[point_masses],
color='red', marker='o',
label='detected point mass')
ax1.set_title(
'Cumulative density function of\nnormalized daily maximum values')
ax1.set_ylabel('Normalized daily max power')
ax1.set_xlabel('Cumulative occurance probability')
ax1.legend()
ax2 = fig.add_subplot(gs[0, 1])
ax2.hist(data[data > 0], bins=100, alpha=0.5, label='histogram',
orientation='horizontal')
scale = np.histogram(data[data > 0], bins=100)[0].max() \
/ cvx.diff(y_hat, k=1).value.max()
ax2.plot(scale * cvx.diff(y_hat, k=1).value, x_rs[:-1],
color='orange', linewidth=1, label='piecewise constant fit')
for count, val in enumerate(point_mass_values):
if count == 0:
plt.axhline(val, linewidth=1, linestyle=':',
color='green', label='detected point mass')
else:
plt.axhline(val, linewidth=1, linestyle=':',
color='green')
ax2.set_title('Distribution of normalized\ndaily maximum values')
# ax2.set_ylabel('Normalized daily max power')
ax2.set_xlabel('Count')
ax2.legend(loc=(.15, .02)) #-0.4
return fig
class DailyScores():
def __init__(self):
self.density = None
self.linearity = None
self.clipping_1 = None
self.clipping_2 = None
self.quality_clustering = None
class DailyFlags():
def __init__(self):
self.density = None
self.linearity = None
self.no_errors = None
self.clear = None
self.cloudy = None
self.inverter_clipped = None
self.capacity_cluster = None
def flag_no_errors(self):
self.no_errors = np.logical_and(self.density, self.linearity)
def flag_clear_cloudy(self, clear_days):
self.clear = np.logical_and(clear_days, self.no_errors)
self.cloudy = np.logical_and(~self.clear, self.no_errors)
class DailySignals():
def __init__(self):
self.density = None
self.seasonal_density_fit = None
self.energy = None
class BooleanMasks():
"""
Boolean masks are used to identify time periods corresponding to elements
in the data matrix. The masks have the same shape as the data matrix. The
masks can be used to select data according to certain rules, generate the
associated time stamp values, or perform other data maniuplation. See,
for example:
https://jakevdp.github.io/PythonDataScienceHandbook/02.06-boolean-arrays-and-masks.html
"""
def __init__(self):
self.clear_times = None
self.clipped_times = None
self.daytime = None
self.missing_values = None
self.infill = None |
the-stack_106_27141 | # -*- coding: utf-8
from __future__ import unicode_literals, absolute_import
import django
DEBUG = True
USE_TZ = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "ok!d=o_mv%g^s+!y65w32dtc2-mz#j-@kxk)hfm*)y5-)^l=q)"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
ROOT_URLCONF = ""
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"awesome_django_timezones",
]
SITE_ID = 1
if django.VERSION >= (1, 10):
MIDDLEWARE = ()
else:
MIDDLEWARE_CLASSES = ()
|
the-stack_106_27142 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, modules
class Users(models.Model):
_name = 'res.users'
_inherit = ['res.users']
@api.model
def systray_get_activities(self):
""" Update the systray icon of res.partner activities to use the
contact application one instead of base icon. """
activities = super(Users, self).systray_get_activities()
for activity in activities:
if activity['model'] != 'res.partner':
continue
activity['icon'] = modules.module.get_module_icon('contacts')
return activities
|
the-stack_106_27143 | import os
import pickle
osp = os.path
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfk = tf.keras
from rlfd import memory, normalizer, policies
from rlfd.agents import agent, sac, sac_networks
class CQL(sac.SAC):
"""Default online training algorithm of CQL is SAC.
"""
def __init__(
self,
# environment configuration
dims,
max_u,
eps_length,
gamma,
# training
offline_batch_size,
online_batch_size,
online_sample_ratio,
fix_T,
# normalize
norm_obs_online,
norm_obs_offline,
norm_eps,
norm_clip,
# networks
layer_sizes,
q_lr,
pi_lr,
action_l2,
# sac specific
auto_alpha,
alpha,
# cql specific
cql_tau,
auto_cql_alpha,
cql_log_alpha,
cql_alpha_lr,
cql_weight_decay_factor,
# double q
soft_target_tau,
target_update_freq,
# online training plus offline data
use_pretrained_actor,
use_pretrained_critic,
use_pretrained_alpha,
online_data_strategy,
# replay buffer
buffer_size,
info):
agent.Agent.__init__(self, locals())
self.dims = dims
self.dimo = self.dims["o"]
self.dimu = self.dims["u"]
self.max_u = max_u
self.fix_T = fix_T
self.eps_length = eps_length
self.gamma = gamma
self.offline_batch_size = offline_batch_size
self.online_batch_size = online_batch_size
self.online_sample_ratio = online_sample_ratio
self.buffer_size = buffer_size
self.auto_alpha = auto_alpha
self.alpha = tf.constant(alpha, dtype=tf.float32)
self.alpha_lr = 3e-4
self.auto_cql_alpha = auto_cql_alpha
self.cql_log_alpha = tf.constant(cql_log_alpha, dtype=tf.float32)
self.cql_alpha_lr = cql_alpha_lr
self.cql_tau = cql_tau
self.cql_weight = tf.Variable(1.0, dtype=tf.float32, trainable=False)
self.cql_weight_decay_factor = cql_weight_decay_factor
self.layer_sizes = layer_sizes
self.q_lr = q_lr
self.pi_lr = pi_lr
self.action_l2 = action_l2
self.soft_target_tau = soft_target_tau
self.target_update_freq = target_update_freq
self.norm_obs_online = norm_obs_online
self.norm_obs_offline = norm_obs_offline
self.norm_eps = norm_eps
self.norm_clip = norm_clip
self.use_pretrained_actor = use_pretrained_actor
self.use_pretrained_critic = use_pretrained_critic
self.use_pretrained_alpha = use_pretrained_alpha
self.online_data_strategy = online_data_strategy
assert self.online_data_strategy in ["None", "BC", "Shaping"]
self.info = info
self._create_memory()
self._create_model()
self._initialize_training_steps()
def _create_model(self):
self._initialize_actor()
self._initialize_critic()
# For BC initialization
self._bc_optimizer = tfk.optimizers.Adam(learning_rate=self.pi_lr)
# Losses
self._huber_loss = tfk.losses.Huber(delta=10.0,
reduction=tfk.losses.Reduction.NONE)
# Entropy regularizer
if self.auto_alpha:
self.log_alpha = tf.Variable(0., dtype=tf.float32)
self.alpha = tf.Variable(0., dtype=tf.float32)
self.alpha.assign(tf.exp(self.log_alpha))
self.target_alpha = -np.prod(self.dimu)
self._alpha_optimizer = tfk.optimizers.Adam(learning_rate=self.alpha_lr)
self.save_var({"alpha": self.alpha, "log_alpha": self.log_alpha})
if self.auto_cql_alpha:
self.cql_log_alpha = tf.Variable(0.0, dtype=tf.float32)
self._cql_alpha_optimizer = tfk.optimizers.Adam(
learning_rate=self.cql_alpha_lr)
# Generate policies
def process_observation_expl(o):
return self._actor_o_norm(o)
self._expl_policy = policies.Policy(
self.dimo,
self.dimu,
get_action=lambda o: self._actor([o], sample=True)[0],
process_observation=process_observation_expl)
def process_observation_eval(o):
self._policy_inspect_graph(o)
return self._actor_o_norm(o)
self._eval_policy = policies.Policy(
self.dimo,
self.dimu,
get_action=lambda o: self._actor([o], sample=False)[0],
process_observation=process_observation_eval)
def _cql_criticq_loss_graph(self, o, o_2, u, r, done, step):
pi_2, logprob_pi_2 = self._actor([self._actor_o_norm(o_2)])
# Immediate reward
target_q = r
# Shaping reward
if self.online_data_strategy == "Shaping":
potential_curr = self.shaping.potential(o=o, u=u)
potential_next = self.shaping.potential(o=o_2, u=pi_2)
target_q += (1.0 - done) * self.gamma * potential_next - potential_curr
# Q value from next state
target_next_q1 = self._criticq1_target([self._critic_o_norm(o_2), pi_2])
target_next_q2 = self._criticq2_target([self._critic_o_norm(o_2), pi_2])
target_next_min_q = tf.minimum(target_next_q1, target_next_q2)
target_q += ((1.0 - done) * self.gamma *
(target_next_min_q - self.alpha * logprob_pi_2))
target_q = tf.stop_gradient(target_q)
td_loss_q1 = self._huber_loss(target_q,
self._criticq1([self._critic_o_norm(o), u]))
td_loss_q2 = self._huber_loss(target_q,
self._criticq2([self._critic_o_norm(o), u]))
td_loss = td_loss_q1 + td_loss_q2
# Being Conservative (Eqn.4)
critic_o = self._critic_o_norm(o)
# second term
max_term_q1 = self._criticq1([critic_o, u])
max_term_q2 = self._criticq2([critic_o, u])
# first term (uniform)
num_samples = 10
tiled_critic_o = tf.tile(tf.expand_dims(critic_o, axis=1),
[1, num_samples] + [1] * len(self.dimo))
uni_u_dist = tfd.Uniform(low=-self.max_u * tf.ones(self.dimu),
high=self.max_u * tf.ones(self.dimu))
uni_u = uni_u_dist.sample((tf.shape(u)[0], num_samples))
logprob_uni_u = tf.reduce_sum(uni_u_dist.log_prob(uni_u),
axis=list(range(2, 2 + len(self.dimu))),
keepdims=True)
uni_q1 = self._criticq1([tiled_critic_o, uni_u])
uni_q2 = self._criticq2([tiled_critic_o, uni_u])
uni_q1_logprob_uni_u = uni_q1 - logprob_uni_u
uni_q2_logprob_uni_u = uni_q2 - logprob_uni_u
# first term (policy)
actor_o = self._actor_o_norm(o)
tiled_actor_o = tf.tile(tf.expand_dims(actor_o, axis=1),
[1, num_samples] + [1] * len(self.dimo))
pi, logprob_pi = self._actor([tiled_actor_o])
pi_q1 = self._criticq1([tiled_critic_o, pi])
pi_q2 = self._criticq2([tiled_critic_o, pi])
pi_q1_logprob_pi = pi_q1 - logprob_pi
pi_q2_logprob_pi = pi_q2 - logprob_pi
# Note: log(2N) not included in this case since it is constant.
log_sum_exp_q1 = tf.math.reduce_logsumexp(tf.concat(
(uni_q1_logprob_uni_u, pi_q1_logprob_pi), axis=1),
axis=1)
log_sum_exp_q2 = tf.math.reduce_logsumexp(tf.concat(
(uni_q2_logprob_uni_u, pi_q2_logprob_pi), axis=1),
axis=1)
cql_loss_q1 = (tf.exp(self.cql_log_alpha) *
(log_sum_exp_q1 - max_term_q1 - self.cql_tau))
cql_loss_q2 = (tf.exp(self.cql_log_alpha) *
(log_sum_exp_q2 - max_term_q2 - self.cql_tau))
cql_loss = cql_loss_q1 + cql_loss_q2
criticq_loss = (tf.reduce_mean(td_loss) +
self.cql_weight * tf.reduce_mean(cql_loss))
tf.summary.scalar(name='criticq_loss vs {}'.format(step.name),
data=criticq_loss,
step=step)
return criticq_loss
@tf.function
def _train_offline_graph(self, o, o_2, u, r, done):
# Train critic q
criticq_trainable_weights = (self._criticq1.trainable_weights +
self._criticq2.trainable_weights)
with tf.GradientTape(watch_accessed_variables=False,
persistent=True) as tape:
tape.watch(criticq_trainable_weights)
if self.auto_cql_alpha:
tape.watch([self.cql_log_alpha])
with tf.name_scope('OfflineLosses/'):
criticq_loss = self._cql_criticq_loss_graph(o, o_2, u, r, done,
self.offline_training_step)
cql_alpha_loss = -criticq_loss
criticq_grads = tape.gradient(criticq_loss, criticq_trainable_weights)
self._criticq_optimizer.apply_gradients(
zip(criticq_grads, criticq_trainable_weights))
if self.auto_cql_alpha:
cql_alpha_grads = tape.gradient(cql_alpha_loss, [self.cql_log_alpha])
self._cql_alpha_optimizer.apply_gradients(
zip(cql_alpha_grads, [self.cql_log_alpha]))
# clip for numerical stability
self.cql_log_alpha.assign(tf.clip_by_value(self.cql_log_alpha, -20., 40.))
with tf.name_scope('OfflineLosses/'):
tf.summary.scalar(name='cql alpha vs {}'.format(
self.offline_training_step.name),
data=self.cql_log_alpha,
step=self.offline_training_step)
# Train actor
actor_trainable_weights = self._actor.trainable_weights
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(actor_trainable_weights)
with tf.name_scope('OfflineLosses/'):
actor_loss = self._sac_actor_loss_graph(o, u,
self.offline_training_step)
actor_grads = tape.gradient(actor_loss, actor_trainable_weights)
self._actor_optimizer.apply_gradients(
zip(actor_grads, actor_trainable_weights))
# Train alpha (entropy weight)
if self.auto_alpha:
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.log_alpha)
with tf.name_scope('OfflineLosses/'):
alpha_loss = self._alpha_loss_graph(o, self.offline_training_step)
alpha_grad = tape.gradient(alpha_loss, [self.log_alpha])
self._alpha_optimizer.apply_gradients(zip(alpha_grad, [self.log_alpha]))
self.alpha.assign(tf.exp(self.log_alpha))
self.offline_training_step.assign_add(1)
def train_offline(self):
with tf.summary.record_if(lambda: self.offline_training_step % 1000 == 0):
batch = self.offline_buffer.sample(self.offline_batch_size)
o_tf = tf.convert_to_tensor(batch["o"], dtype=tf.float32)
o_2_tf = tf.convert_to_tensor(batch["o_2"], dtype=tf.float32)
u_tf = tf.convert_to_tensor(batch["u"], dtype=tf.float32)
r_tf = tf.convert_to_tensor(batch["r"], dtype=tf.float32)
done_tf = tf.convert_to_tensor(batch["done"], dtype=tf.float32)
self._train_offline_graph(o_tf, o_2_tf, u_tf, r_tf, done_tf)
if self.offline_training_step % self.target_update_freq == 0:
self._copy_weights(self._criticq1, self._criticq1_target)
self._copy_weights(self._criticq2, self._criticq2_target) |
the-stack_106_27145 | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from monai.data import list_data_collate, worker_init_fn
class DataLoader(torch.utils.data.DataLoader):
"""Generates images/labels for train/validation/testing from dataset.
It inherits from PyTorch DataLoader and adds callbacks for `collate` and `worker_fn`.
Args:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: ``1``).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: ``False``).
sampler (Sampler, optional): defines the strategy to draw samples from
the dataset. If specified, :attr:`shuffle` must be ``False``.
batch_sampler (Sampler, optional): like :attr:`sampler`, but returns a batch of
indices at a time. Mutually exclusive with :attr:`batch_size`,
:attr:`shuffle`, :attr:`sampler`, and :attr:`drop_last`.
num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``0``)
pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
into CUDA pinned memory before returning them. If your data elements
are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
see the example below.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: ``False``)
timeout (numeric, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: ``0``)
multiprocessing_context (callable, optional): specify a valid start method for multi-processing.
"""
def __init__(
self,
dataset,
batch_size=1,
shuffle=False,
sampler=None,
batch_sampler=None,
num_workers=0,
pin_memory=False,
drop_last=False,
timeout=0,
multiprocessing_context=None,
):
super().__init__(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
sampler=sampler,
batch_sampler=batch_sampler,
num_workers=num_workers,
collate_fn=list_data_collate,
pin_memory=pin_memory,
drop_last=drop_last,
timeout=timeout,
worker_init_fn=worker_init_fn,
multiprocessing_context=multiprocessing_context,
)
|
the-stack_106_27146 | from exetera.core.utils import Timer
from exetera.core import fields as fld
def merge_daily_assessments_v1(s, src_group, dest_group, overrides=None):
"""
Organize the assessment dataset to group record of patients in each day.
:param s: The Exetera session instance.
:param src_group: The source dataframe that contains the dataset.
:param dest_group: The destination dataframe to write the result to.
:param overrides: The group function to apply to different columns, e.g. latest datetime for 'updated_at'
column, or concat for text columns.
"""
print("generate daily assessments")
patient_ids_ = s.get(src_group['patient_id']).data[:]
created_at_days_ = s.get(src_group['created_at_day']).data[:]
with Timer("calculating spans", new_line=True):
patient_id_index_spans = s.get_spans(fields=(patient_ids_,
created_at_days_))
with Timer("applying spans", new_line=True):
if overrides is None:
overrides = {
'id': s.apply_spans_last,
'patient_id': s.apply_spans_last,
'patient_index': s.apply_spans_last,
'created_at': s.apply_spans_last,
'created_at_day': s.apply_spans_last,
'updated_at': s.apply_spans_last,
'updated_at_day': s.apply_spans_last,
'version': s.apply_spans_max,
'country_code': s.apply_spans_first,
'date_test_occurred': None,
'date_test_occurred_guess': None,
'date_test_occurred_day': None,
'date_test_occurred_set': None,
}
for k in src_group.keys():
with Timer("merging '{}'".format(k), new_line=True):
reader = s.get(src_group[k])
if k in overrides:
fn = overrides[k]
else:
if isinstance(reader, fld.CategoricalField):
fn = s.apply_spans_max
elif isinstance(reader, fld.IndexedStringField):
fn = s.apply_spans_concat
elif isinstance(reader, fld.NumericField):
fn = s.apply_spans_max
else:
fn = None
if fn is None:
print(" Skipping field '{k'}")
else:
with Timer(" Merging field '{k}"):
fn(patient_id_index_spans, reader, reader.create_like(dest_group, k))
|
the-stack_106_27148 | #!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates an OAuth 2.0 refresh token for the Google Ads API.
This illustrates how to step through the OAuth 2.0 native / installed
application flow.
It is intended to be run from the command line and requires user input.
"""
import argparse
from google_auth_oauthlib.flow import InstalledAppFlow
SCOPE = "https://www.googleapis.com/auth/adwords"
def main(client_secrets_path, scopes):
flow = InstalledAppFlow.from_client_secrets_file(
client_secrets_path, scopes=scopes
)
flow.run_console()
print("Access token: %s" % flow.credentials.token)
print("Refresh token: %s" % flow.credentials.refresh_token)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generates OAuth 2.0 credentials with the specified "
"client secrets file."
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"--client_secrets_path",
required=True,
help=(
"Path to the client secrets JSON file from the "
"Google Developers Console that contains your "
"client ID and client secret."
),
)
parser.add_argument(
"--additional_scopes",
default=None,
help=(
"Additional scopes to apply when generating the "
"refresh token. Each scope should be separated "
"by a comma."
),
)
args = parser.parse_args()
configured_scopes = [SCOPE]
if args.additional_scopes:
configured_scopes.extend(
args.additional_scopes.replace(" ", "").split(",")
)
main(args.client_secrets_path, configured_scopes)
|
the-stack_106_27152 | """
Problem 46: https://projecteuler.net/problem=46
It was proposed by Christian Goldbach that every odd composite number can be
written as the sum of a prime and twice a square.
9 = 7 + 2 × 12
15 = 7 + 2 × 22
21 = 3 + 2 × 32
25 = 7 + 2 × 32
27 = 19 + 2 × 22
33 = 31 + 2 × 12
It turns out that the conjecture was false.
What is the smallest odd composite that cannot be written as the sum of a
prime and twice a square?
"""
from __future__ import annotations
seive = [True] * 100001
i = 2
while i * i <= 100000:
if seive[i]:
for j in range(i * i, 100001, i):
seive[j] = False
i += 1
def is_prime(n: int) -> bool:
"""
Returns True if n is prime,
False otherwise, for 2 <= n <= 100000
>>> is_prime(87)
False
>>> is_prime(23)
True
>>> is_prime(25363)
False
"""
return seive[n]
odd_composites = [num for num in range(3, len(seive), 2) if not is_prime(num)]
def compute_nums(n: int) -> list[int]:
"""
Returns a list of first n odd composite numbers which do
not follow the conjecture.
>>> compute_nums(1)
[5777]
>>> compute_nums(2)
[5777, 5993]
>>> compute_nums(0)
Traceback (most recent call last):
...
ValueError: n must be >= 0
>>> compute_nums("a")
Traceback (most recent call last):
...
ValueError: n must be an integer
>>> compute_nums(1.1)
Traceback (most recent call last):
...
ValueError: n must be an integer
"""
if not isinstance(n, int):
raise ValueError("n must be an integer")
if n <= 0:
raise ValueError("n must be >= 0")
list_nums = []
for num in range(len(odd_composites)):
i = 0
while 2 * i * i <= odd_composites[num]:
rem = odd_composites[num] - 2 * i * i
if is_prime(rem):
break
i += 1
else:
list_nums.append(odd_composites[num])
if len(list_nums) == n:
return list_nums
return []
def solution() -> int:
"""Return the solution to the problem"""
return compute_nums(1)[0]
if __name__ == "__main__":
print(f"{solution() = }")
|
the-stack_106_27153 | from rest_framework import viewsets, permissions, status
from rest_framework.decorators import action
from rest_framework.response import Response
from longclaw.contrib.productrequests.serializers import ProductRequestSerializer
from longclaw.contrib.productrequests.models import ProductRequest
from longclaw.utils import ProductVariant, maybe_get_product_model
class ProductRequestViewSet(viewsets.ModelViewSet):
"""create/list/get product requests
"""
serializer_class = ProductRequestSerializer
permission_classes = (permissions.AllowAny,)
queryset = ProductRequest.objects.all()
def create(self, request):
"""Create a new product request
"""
variant_id = request.data.get("variant_id", None)
if variant_id is not None:
variant = ProductVariant.objects.get(id=variant_id)
product_request = ProductRequest(variant=variant)
product_request.save()
serializer = self.serializer_class(product_request)
response = Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
response = Response(
{"message": "Missing 'variant_id'"},
status=status.HTTP_400_BAD_REQUEST)
return response
@action(detail=False, methods=['get'])
def requests_for_variant(self, request, variant_id=None):
"""Get all the requests for a single variant
"""
requests = ProductRequest.objects.filter(variant__id=variant_id)
serializer = self.serializer_class(requests, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
|
the-stack_106_27154 | """
Astra-Viso star camera module.
"""
from __future__ import division
import numpy as np
from astraviso import worldobject
from astraviso import starmap
from astraviso import imageutils
from astraviso import projectionutils
class StarCam(worldobject.WorldObject):
"""
Star camera class.
"""
def __init__(self):
"""
StarCam initialization.
Parameters
----------
None
Returns
-------
starcam : StarCam
Default StarCam object.
"""
# Internal settings
self.__settings = {}
self.__settings["resolution"] = 1024
self.__settings["max_angle_step"] = 1e-4
self.__settings["integration_steps"] = 1000
# Set psf size
self.setpsf(7, 1) # To be removed...
# Internal function variables
self.sensitivity_fcn = None
self.projection_fcn = None
self.quantum_efficiency_fcn = None
self.noise_fcn = None
self.saturation_fcn = None
# Set star catalog defaults
self.star_catalog = starmap.StarMap()
self.star_catalog.load_preset("random", 10000)
# Set sensor pointing default
worldobject.WorldObject.__init__(self)
self.set_pointing_preset("kinematic", initial_quaternion=np.array([0, 0, 0, 1]), \
initial_angular_rate=np.array([0, 0, 0]))
# Set position model default
self.set_position_preset("kinematic", initial_position=np.array([0, 0, 0]), \
initial_velocity=np.array([0, 0, 0]))
# Projection model defaults
self.set_projection_preset("pinhole", focal_len=93, pixel_size=0.016, resolution=1024)
# Set CCD defaults
self.set_saturation_preset("no_bleed", bit_depth=16)
self.set_quantum_efficiency_preset("constant", quantum_efficiency=0.22)
self.set_noise_preset("poisson", dark_current=1200, read_noise=200)
self.set_sensitivity_preset("default", aperture=1087, mv0_flux=19000)
# External objects
self.external_objects = []
def setpsf(self, size, sigma):
"""
Set PSF to Gaussian kernel.
In the future should have a separate function to handle explicit
PSF definitions.
"""
# Enforce odd dimensions
if size % 2 == 0:
size = size + 1
# Allocate variables
halfwidth = (size-1)/2
kernel = np.zeros((size, size))
# Create kernel
for row in range(size):
for col in range(size):
kernel[row, col] = np.exp(-0.5 * ((row-halfwidth)**2 + \
(col-halfwidth)**2) / sigma**2)
# Normalize and return
self.psf = kernel / np.sum(kernel)
def add_worldobject(self, obj=None):
"""
Add new or existing WorldObject to the external object catalog.
Parameters
----------
obj : WorldObject, optional
An existing WorldObject instance. If obj is not defined, a new
WorldObject instance will be created.
Returns
-------
None
Notes
-----
The user can manage elements in the external object catalog directly
through the external_objects property of the StarCam class.
Examples
--------
>>> cam = StarCam()
>>> obj1 = WorldObject()
>>> cam.add_worldobject(obj1)
>>> cam.external_objects
[<astraviso.worldobject.WorldObject object at 0x000001A4C8AA6438>]
>>> cam.external_objects[0].set_pointing_preset("kinematic", \
np.array([0,0,0,1,0,0,0]))
"""
# Append input WorldObject
if isinstance(obj, worldobject.WorldObject):
self.external_objects.append(obj)
# Append new WorldObject
elif obj is None:
self.external_objects.append(worldobject.WorldObject())
# Handle invalid input
else:
raise ValueError("Input must either be an existing WorldObject or None.")
def delete_worldobject(self, index):
"""
Clear a WorldObject from the external object catalog.
Parameters
----------
index : int
Index of the WorldObject catalog element to remove.
Returns
-------
None
Notes
-----
The user can manage elements in the external object catalog directly
through the external_objects property of the StarCam class.
Examples
--------
>>> cam = StarCam()
>>> obj1 = WorldObject()
>>> cam.add_worldobject(obj1)
>>> cam.external_objects
[<astraviso.worldobject.WorldObject object at 0x000001A4C8AA6438>]
>>> cam.delete_worldobject(0)
>>> cam.external_objects
[]
"""
# Delete object
del self.external_objects[index]
def integrate(self, time, delta_t):
"""
Compute CCD pixel values after set exposure time.
Parameters
----------
time : float
Time to begin exposure. Measured in seconds from epoch.
delta_t : float
Desired exposure time. Measured in seconds.
Returns
-------
img : ndarray
Resulting CCD array values. Each element contains a photon count.
Examples
--------
>>> cam = StarCam()
>>> cam.integrate(1)
array([[ 0., 0., 0., ..., 0., 0., 0.],
[ 0., 0., 0., ..., 0., 0., 0.],
[ 0., 0., 0., ..., 0., 0., 0.],
...,
[ 0., 0., 0., ..., 0., 0., 0.],
[ 0., 0., 0., ..., 0., 0., 0.],
[ 0., 0., 0., ..., 0., 0., 0.]])
"""
# Determine step size
# Temporary solution...
steps = self.__settings["integration_steps"]
step_size = delta_t / steps
angle = 0
# Extract subset of stars from catalog
# Also a temporary solution...
field_of_view = 45
boresight = np.dot([0, 0, 1], self.get_pointing(time, mode="dcm").T)
stars = self.star_catalog.get_region(boresight, np.rad2deg(angle)+field_of_view/2)
# Extract and scale magnitudes
mag = self.get_photons(stars["magnitude"], delta_t) / steps
# Allocate image
img = np.zeros((self.__settings["resolution"], self.__settings["resolution"]))
# Integrate star signals
for step in range(steps):
# Apply sensor rotation
dcm = self.get_pointing(time+step_size*step, mode="dcm")
vis = np.dot(stars["catalog"], dcm)
# Project stars
img_x, img_y = self.get_projection(vis)
# Shift y-axis origin to upper left corner
img_y = self.__settings["resolution"] - img_y - 1
# Check for stars in image bounds
in_img = [idx for idx in range(len(img_x)) if (img_x[idx] > 0 and
img_x[idx] < self.__settings["resolution"]-1 and
img_y[idx] > 0 and
img_y[idx] < self.__settings["resolution"]-1)]
# Create image
# *** This will eventually be replaced by self.psf_fcn
for idx in in_img:
xidx = img_x[idx] - np.floor(img_x[idx])
yidx = img_y[idx] - np.floor(img_y[idx])
img[int(np.ceil(img_y[idx])), int(np.ceil(img_x[idx]))] += mag[idx]*xidx*yidx
img[int(np.floor(img_y[idx])), int(np.ceil(img_x[idx]))] += mag[idx]*xidx*(1-yidx)
img[int(np.ceil(img_y[idx])), int(np.floor(img_x[idx]))] += mag[idx]*(1-xidx)*yidx
img[int(np.floor(img_y[idx])), int(np.floor(img_x[idx]))] += \
mag[idx]*(1-xidx)*(1-yidx)
# Integrate external object signals
for object in self.external_objects:
for step in range(steps):
# Compute current time
current_time = time + step_size*step
# Compute relative position in camera frame
vis = object.in_frame_of(self, current_time)
# If object is colocated with camera, skip iteration
if np.isclose(vis[2], 0):
continue
# Project object
img_x, img_y = self.get_projection(vis)
# Shift y-axis origin to upper left corner
img_y = self.__settings["resolution"] - img_y - 1
# If object is in image frame, add to image
if img_x > 0 and img_y > 0 and img_x < self.__settings["resolution"]-1 \
and img_y < self.__settings["resolution"]-1:
# Get photon count
mag = self.get_photons(object.get_vismag(current_time, \
self.get_position(current_time)), step_size)
# Add to image
xidx = img_x - np.floor(img_x)
yidx = img_y - np.floor(img_y)
img[int(np.ceil(img_y)), int(np.ceil(img_x))] += mag*xidx*yidx
img[int(np.floor(img_y)), int(np.ceil(img_x))] += mag*xidx*(1-yidx)
img[int(np.ceil(img_y)), int(np.floor(img_x))] += mag*(1-xidx)*yidx
img[int(np.floor(img_y)), int(np.floor(img_x))] += mag*(1-xidx)*(1-yidx)
# Return result
return img
def snap(self, time, delta_t):
"""
Create finished image with specified exposure time.
Parameters
----------
time : float
Time to begin exposure. Measured in seconds from epoch.
delta_t : float
Desired exposure time. Measured in seconds.
Returns
-------
image : ndarray
Resulting image array. Each pixel contains an integer value.
Examples
--------
>>> cam = StarCam()
>>> cam.snap(1)
array([[1427, 1408, 1429, ..., 1381, 1414, 1404],
[1418, 1370, 1400, ..., 1389, 1395, 1445],
[1390, 1445, 1323, ..., 1369, 1408, 1417],
...,
[1372, 1469, 1393, ..., 1356, 1468, 1412],
[1324, 1437, 1496, ..., 1419, 1399, 1360],
[1412, 1450, 1371, ..., 1376, 1367, 1421]])
"""
# Integrate photons
image = self.integrate(time, delta_t)
# Defocus image
image = imageutils.conv2(image, self.psf)
# Convert to photoelectrons
image = self.get_photoelectrons(image)
# Add noise
image = self.add_noise(image, delta_t)
# Saturate
image = self.get_saturation(image)
# Return
return image
def set_noise_fcn(self, fcn):
"""
Set internal noise function.
Parameters
----------
fcn : function
Input noise function. Output image must be the same size as input.
See notes for details about the required function format.
Returns
-------
None
See Also
--------
StarCam.set_noise_preset, StarCam.add_noise
Notes
-----
Function must be of the form noisy_image = f(image, delta_t).
Below are two valid function definition templates.
def user_fcn(image, delta_t):
...
return noisy_image
user_fcn = lambda image, delta_t: ...
Examples
--------
>>> cam = StarCam()
>>> fcn = lambda image, delta_t: image+np.random.rand(*image.shape)
>>> cam.set_noise_fcn(fcn)
"""
# Validate input
if not callable(fcn):
raise ValueError("Must provide callable function.")
if fcn(np.zeros(16), 0).shape != (16,):
raise ValueError("Function output must be the same size as input.")
# Set function
self.noise_fcn = fcn
def set_noise_preset(self, preset, **kwargs):
"""
Choose preset noise model & assign noise values. Current options are:
"poisson" -- Poisson-distributed noise.
"gaussian" -- Gaussian approximation to poisson noise.
"off" -- Turn image noise off.
Parameters
----------
preset : str
Name of chosen preset.
dark_current : float, optional
Sensor dark current noise level. Measured in photoelectrons per
second. Required for "gaussian" and "poisson" presets.
read_noise : float, optional
Sensor read noise. Measured in photoelectrons. Required for
"gaussian" and "poisson" presets.
Returns
-------
None
See Also
--------
StarCam.set_noise_fcn, StarCam.add_noise
Notes
-----
The default noise for the StarCam object is poisson noise with
dark_current=1200 and read_noise=200.
Examples
--------
>>> cam = StarCam()
>>> cam.set_noise_preset("poisson", dark_current=1200, read_noise=200)
"""
# Poisson model
if preset.lower() == "poisson":
# Check input
if "dark_current" not in kwargs or "read_noise" not in kwargs:
raise ValueError("Must provide the following keyword arguments for poisson- \
type noise: 'dark_current', 'read_noise'")
# Set function
noise_fcn = lambda image, delta_t: imageutils.poisson_noise(image, delta_t, \
kwargs["dark_current"], kwargs["read_noise"])
self.set_noise_fcn(noise_fcn)
# Gaussian model
elif preset.lower() == "gaussian":
if "dark_current" not in kwargs or "read_noise" not in kwargs:
raise ValueError("Must provide the following keyword arguments for poisson- \
type noise: 'dark_current', 'read_noise'")
# Set function
noise_fcn = lambda image, delta_t: imageutils.gaussian_noise(image, delta_t, \
kwargs["dark_current"], kwargs["read_noise"])
self.set_noise_fcn(noise_fcn)
elif preset.lower() == "off":
# Set function
self.set_noise_fcn(lambda image, delta_t: image)
# Invalid input
else:
raise NotImplementedError("Invalid noise preset. Available options are: poisson, \
gaussian.")
def add_noise(self, image, delta_t):
"""
Add noise to image using internal noise model.
Parameters
----------
image : ndarray
Input image array. All values should be measured in photoelectrons.
delta_t : float
Exposure time in seconds.
Returns
-------
None
See Also
--------
StarCam.set_noise_fcn, StarCam.set_noise_preset
Examples
--------
>>> cam = StarCam()
>>> cam.set_noise_preset("poisson", dark_current=1200, read_noise=200)
>>> cam.add_noise(np.zeros((4,4)), 1)
array([[1398, 1459, 1369, 1466],
[1375, 1302, 1416, 1465],
[1370, 1434, 1375, 1463],
[1491, 1400, 1384, 1381]])
"""
# Check if noise function is set
return self.noise_fcn(image, delta_t)
def set_sensitivity_fcn(self, fcn):
"""
Set internal conversion between visible magnitudes and photon counts.
Parameters
----------
fcn : function
Input photon function. Output must be the same size as input. See
notes for details about the required function format.
Returns
-------
None
See Also
--------
StarCam.set_sensitivity_preset, StarCam.get_photons
Notes
-----
Function must be of the form photon_count = f(magnitude, delta_t).
Below are two valid function definition templates.
def user_fcn(magnitude, delta_t):
...
return photon_count
user_fcn = lambda magnitude, delta_t: ...
Examples
--------
>>> cam = StarCam()
>>> fcn = lambda vismags, delta_t: 100*vismags
>>> cam.set_sensitivity_fcn(fcn)
"""
# Check for valid input
if not callable(fcn):
raise ValueError("Must provide callable function.")
# Check that input function supports multiple inputs
if len(fcn([1, 2], 1)) != 2:
raise ValueError("Input function must support multiple inputs and return an equivalent \
number of values.")
# Set function
self.sensitivity_fcn = fcn
def set_sensitivity_preset(self, preset, **kwargs):
"""
Choose preset sensitivity model & assign values. This model defines the
internal conversion between visible magnitudes and photon counts Current
options are:
"default" -- A log-scale magnitude to photon conversion.
Parameters
----------
preset : str
Name of chosen preset.
aperture : float, optional
Aperture area in mm^2. Required for "default" preset.
mv0_flux : float, optional
Photoelectrons per second per mm^2 of aperture area. Required for
"default" preset.
Returns
-------
None
See Also
--------
StarCam.set_sensitivity_fcn, StarCam.get_photons,
imageutils.vismag2photon
Notes
-----
The default values for the StarCam object are 1087 mm^2 aperture area
and 19,000 photons per mm^2 of aperture area per second.
Examples
--------
>>> cam = StarCam()
>>> cam.set_sensitivity_preset("default", aperture=1087, mv0_flux=19000)
"""
# Set default option
if preset.lower() == "default":
# Check input
if "aperture" not in kwargs or "mv0_flux" not in kwargs:
raise ValueError("Must provide the following keyword arguments for this preset: \
'aperture', 'mv0_flux'")
# Build function & set
sensitivity_fcn = lambda vismags, delta_t: imageutils.vismag2photon(vismags, delta_t, \
kwargs["aperture"], kwargs["mv0_flux"])
self.set_sensitivity_fcn(sensitivity_fcn)
# Handle invalid option
else:
raise NotImplementedError("Invalid preset option.")
def get_photons(self, magnitudes, delta_t):
"""
Convert array of visible magnitudes to photoelectron counts using the
internally-defined sensitivity model.
Parameters
----------
magnitudes : ndarray
Array of visible magnitudes to be converted.
delta_t : float
Sensor exposure time in seconds.
Returns
-------
photon_count : ndarray
Total photon count for each input visible magnitude.
See Also
--------
StarCam.set_sensitivity_fcn, StarCam.set_sensitivity_preset
Examples
--------
>>> cam = StarCam()
>>> cam.set_sensitivity_preset("default", aperture=1087, mv0_flux=19000)
>>> cam.get_photons(7, 0.1)
3383.7875200000003
"""
# Compute photon count
return self.sensitivity_fcn(magnitudes, delta_t)
def set_projection_fcn(self, fcn, resolution):
"""
Set internal projection model for incoming photons.
Parameters
----------
fcn : function
Input projection function. Output must be the same size as input.
See notes for details about the required function format.
resolution : int
Resolution of the sensor.
Returns
-------
None
See Also
--------
StarCam.set_projection_preset, StarCam.get_projection
Notes
-----
Function must be of the form img_x, img_y = f(vectors) where vectors
is an Nx3 array of unit vectors describing visible objects. Function
must return image-plane (x,y) coordinate in two separate vectors. Below
is a valid function definition templates.
def user_fcn(vectors):
...
return img_x, img_y
Examples
--------
>>> cam = StarCam()
>>> def proj_fcn(vectors):
... img_x = np.divide(vectors[:, 0], vectors[:, 2])
... img_y = np.divide(vectors[:, 1], vectors[:, 2])
... return img_x, img_y
...
>>> cam.set_projection_fcn(proj_fcn, resolution=1024)
>>> cam.projection_fcn(np.array([[0, 0, 1]]))
(array([ 0.]), array([ 0.]))
"""
# Check for valid resolution
if resolution <= 0 or not isinstance(resolution, int):
raise ValueError("Resolution must be integer-valued and positive.")
# Check for valid function
if not callable(fcn):
raise ValueError("Must provide callable function.")
# Set function
self.__settings["resolution"] = resolution
self.projection_fcn = fcn
def set_projection_preset(self, preset, **kwargs):
"""
Choose preset projection model & assign values. Current options are:
"pinhole" -- Pinhole projection model.
Parameters
----------
preset : str
Name of chosen preset.
focal_len : float, optional
Focal length of the sensor in mm. Required as keyword argument for
"pinhole" preset.
pixel_size : float, optional
Physical pixel size in mm. Pixels are assume square. Required as
keyword argument for "pinhole" preset.
resolution : int, optional
Resolution of the sensor. Default is a square 1024x1024 image.
Returns
-------
None
See Also
--------
StarCam.set_projection_fcn, StarCam.get_projection,
Notes
-----
The default setting for the StarCam object is the "pinhole" model with
a focal length of 93 mm, 0.016 mm pixels, and a resolution of 512x512.
Examples
--------
>>> cam = StarCam()
>>> cam.set_projection_preset("pinhole", focal_len=93, pixel_size=0.016)
>>> cam.projection_fcn(np.array([[0, 0, 1]]))
(array([ 512.5]), array([ 512.5]))
"""
# Set default resolution
if "resolution" not in kwargs:
kwargs["resolution"] = 1024
# Handle pinhole option
if preset.lower() == "pinhole":
# Check input
if "focal_len" not in kwargs or "pixel_size" not in kwargs:
raise ValueError("Must provide the following keyword arguments for this preset: \
'focal_len', 'pixel_size'")
# Build function & set
proj_fcn = lambda vectors: projectionutils.pinhole_project(vectors, \
kwargs["focal_len"], kwargs["pixel_size"], kwargs["resolution"])
self.set_projection_fcn(proj_fcn, kwargs["resolution"])
# Handle invalid option
else:
raise NotImplementedError("Invalid preset option.")
def get_projection(self, vectors):
"""
Get projected image-plane coordinates for an input vector using the
internal projection model.
Parameters
----------
vectors : ndarray
Body vectors to be projected into the image plane. Array should be
Nx3 where N is the number of vectors.
Returns
-------
img_x : ndarray
Array of x-coordinates (N elements).
img_y : ndarray
Array of y-coordinates (N elements).
See Also
--------
StarCam.set_projection_fcn, StarCam.set_projection_preset
Examples
--------
>>> cam = StarCam()
>>> cam.set_projection_preset("pinhole", focal_len=93, pixel_size=0.016)
>>> cam.get_projection(np.array([[0, 0, 1]]))
(array([ 512.5]), array([ 512.5]))
"""
# Compute projection
return self.projection_fcn(vectors)
def set_quantum_efficiency_fcn(self, fcn):
"""
Set function to simulate CCD quantum efficiency.
Parameters
----------
fcn : function
Input quantum efficiency function. Function should convert from
a continous photon count to a discrete photoelectron count. See
notes for details about the required function format.
Returns
-------
None
See Also
--------
StarCam.set_quantum_efficiency_preset, StarCam.get_photoelectrons
Notes
-----
Function must be of the form photoelectron_count = f(image).
Below are two valid function definition templates.
def user_fcn(image):
...
return photoelectron_count
user_fcn = lambda image: ...
Examples
--------
>>> cam = StarCam()
>>> fcn = lambda image: np.floor(image * 0.22)
>>> cam.set_quantum_efficiency_fcn(fcn)
"""
# Check function validity
if not callable(fcn):
raise ValueError("Must provide callable function.")
if fcn(np.zeros((16, 32))).shape != (16, 32):
raise ValueError("Saturation function output size must be equal to input.")
# Set function
self.quantum_efficiency_fcn = fcn
def set_quantum_efficiency_preset(self, preset, **kwargs):
"""
Choose preset quantum efficiency model & assign values. Current options are:
"constant" -- Equal quantum efficiency for every pixel.
"gaussian" -- Gaussian-distributed quantum efficiency values for each
pixel.
Parameters
----------
preset : str
Name of chosen preset.
quantum_efficiency : float, optional
Relationship between photons and photoelectrons. Measured as the
number of photoelectrons per photon. Required for "constant" preset.
sigma : float, optional
Desired standard deviation of random values. Required for "gaussian"
preset.
seed : float, optional
Random number generator seed. Optional for "gaussian" preset.
Returns
-------
None
See Also
--------
StarCam.set_quantum_efficiency_fcn, StarCam.get_photoelectrons
Notes
-----
The StarCam object uses the 'constant' preset by default with a quantum
efficiency parameter of 0.22.
Examples
--------
>>> cam = StarCam()
>>> cam.set_quantum_efficiency_preset("constant", 0.22)
"""
# Set default option
if preset.lower() == "constant":
# Check input
if "quantum_efficiency" not in kwargs:
raise ValueError("Must provide the following keyword arguments for this preset: \
'quantum_efficiency'")
# Build function & set
qe_fcn = lambda image: imageutils.apply_constant_quantum_efficiency(image, \
kwargs["quantum_efficiency"])
self.set_quantum_efficiency_fcn(qe_fcn)
# Set gaussian option
elif preset.lower() == "gaussian":
# Check input
if "quantum_efficiency" not in kwargs or "sigma" not in kwargs:
raise ValueError("Must provide the following keyword arguments for this preset: \
'quantum_efficiency', 'sigma'")
# Set seed, if necessary
if "seed" not in kwargs:
kwargs["seed"] = np.random.rand()
# Build function & set
qe_fcn = lambda image: imageutils.apply_gaussian_quantum_efficiency(image, \
kwargs["quantum_efficiency"], kwargs["sigma"], kwargs["seed"])
self.set_quantum_efficiency_fcn(qe_fcn)
# Handle invalid option
else:
raise NotImplementedError("Invalid preset option.")
def get_photoelectrons(self, photon_image):
"""
Get photoelectron count from photon count with internal quantum
efficiency model.
Parameters
----------
photon_image : ndarray
Input image where each pixel contains a total photon count.
Returns
-------
photoelectron_image : ndarray
Scaled, discrete-valued image where each pixel contains a photo-
electron count.
See Also
--------
StarCam.set_quantum_efficiency_fcn, StarCam.set_quantum_efficiency_preset
Examples
--------
>>> cam = StarCam()
>>> cam.set_quantum_efficiency_preset("constant", quantum_efficiency=0.2)
>>> cam.get_saturation(5*np.ones((4,4)))
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]])
"""
# Compute photoelectron count
return self.quantum_efficiency_fcn(photon_image)
def set_saturation_fcn(self, fcn):
"""
Set function to simulate sensor-level saturation thresholding.
Parameters
----------
fcn : function
Input saturation function. Must be of the form f(image). Output
must be the same size as input.
Returns
-------
None
See Also
--------
StarCam.set_saturation_preset, StarCam.get_saturation
Notes
-----
Function must be of the form saturated_image = f(image).
Below are two valid function definition templates.
def user_fcn(image):
...
return saturated_image
user_fcn = lambda image: ...
Examples
--------
>>> cam = StarCam()
>>> fcn = lambda image: np.floor(image)
>>> cam.set_saturation_fcn(fcn)
"""
# Check function validity
if not callable(fcn):
raise ValueError("Must provide callable function.")
if fcn(np.zeros((16, 32))).shape != (16, 32):
raise ValueError("Saturation function output size must be equal to input.")
# Set function
self.saturation_fcn = fcn
def set_saturation_preset(self, preset, **kwargs):
"""
Choose preset pixel saturation model & assign values. Current options
are:
"no_bleed" -- Saturation with no cross-pixel bleed.
"off" -- No saturation.
Parameters
----------
preset : str
Name of chosen preset.
bit_depth : int, optional
Number of bits used to store each pixel. Required for "no_bleed"
preset. Maximum value for a pixel is 2**bit_depth - 1.
Returns
-------
None
See Also
--------
StarCam.set_saturation_fcn, StarCam.get_saturation
Notes
-----
The StarCam object uses the 'no_bleed' preset by default with a bit
depth of 16.
Examples
--------
>>> cam = StarCam()
>>> cam.set_saturation_preset("no_bleed", bit_depth=16)
"""
# Set default option
if preset.lower() == "no_bleed":
# Check input
if "bit_depth" not in kwargs:
raise ValueError("Must provide the following keyword arguments for this preset: \
'bit_depth'")
# Build function & set
saturation_fcn = lambda image: imageutils.saturate(image, kwargs["bit_depth"])
self.set_saturation_fcn(saturation_fcn)
elif preset.lower() == "off":
# Set function
self.set_saturation_fcn(lambda image: image)
# Handle invalid option
else:
raise NotImplementedError("Invalid preset option.")
def get_saturation(self, image):
"""
Saturate image input with internal pixel saturation model.
Parameters
----------
image : ndarray
Input image where each pixel contains a total photoelectron count.
Returns
-------
saturated_image : ndarray
Saturated image. Output image is the same size as the input.
See Also
--------
StarCam.set_saturation_fcn, StarCam.set_saturation_preset
Examples
--------
>>> cam = StarCam()
>>> cam.set_saturation_preset("no_bleed", bit_depth=2)
>>> cam.get_saturation(16*np.ones((4,4)))
array([[ 3., 3., 3., 3.],
[ 3., 3., 3., 3.],
[ 3., 3., 3., 3.],
[ 3., 3., 3., 3.]])
"""
# Saturate image
return self.saturation_fcn(image)
|
the-stack_106_27155 | #!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Installation script for Trove's development virtualenv
"""
from __future__ import print_function
import os
import subprocess
import sys
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
VENV = os.path.join(ROOT, '.venv')
PIP_REQUIRES = os.path.join(ROOT, 'requirements.txt')
TEST_REQUIRES = os.path.join(ROOT, 'test-requirements.txt')
PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
def die(message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version():
if sys.version_info < (2, 7):
die("Need Python Version >= 2.7")
def run_command(cmd, redirect_output=True, check_exit_code=True):
"""
Runs a command in an out-of-process shell, returning the
output of that command. Working directory is ROOT.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return output
HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'],
check_exit_code=False).strip())
HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'],
check_exit_code=False).strip())
def check_dependencies():
"""Make sure virtualenv is in the path."""
if not HAS_VIRTUALENV:
print('not found.')
# Try installing it via easy_install...
if HAS_EASY_INSTALL:
print('Installing virtualenv via easy_install...'),
if not (run_command(['which', 'easy_install']) and
run_command(['easy_install', 'virtualenv'])):
die('ERROR: virtualenv not found.\n\Trove development'
' requires virtualenv, please install it using your'
' favorite package management tool')
print('done.')
print('done.')
def create_virtualenv(venv=VENV):
"""Creates the virtual environment and installs PIP only into the
virtual environment
"""
print('Creating venv...'),
run_command(['virtualenv', '-q', '--no-site-packages', VENV])
print('done.')
print('Installing pip in virtualenv...'),
if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip():
die("Failed to install pip.")
print('done.')
def install_dependencies(venv=VENV):
print('Installing dependencies with pip (this can take a while)...')
# Install greenlet by hand - just listing it in the requires file does not
# get it in stalled in the right order
run_command(['tools/with_venv.sh', '-E', venv, 'pip', 'install',
'greenlet'], redirect_output=False)
for requires in (PIP_REQUIRES, TEST_REQUIRES):
run_command(['tools/with_venv.sh', '-E', venv, 'pip', 'install', '-r',
requires], redirect_output=False)
# Tell the virtual env how to "import trove"
pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages",
"trove.pth")
f = open(pthfile, 'w')
f.write("%s\n" % ROOT)
def print_help():
help = """
Trove development environment setup is complete.
Trove development uses virtualenv to track and manage Python
dependencies while in development and testing.
To activate the Trove virtualenv for the extent of your current shell
session you can run:
$ source .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print(help)
def main(argv):
check_python_version()
check_dependencies()
create_virtualenv()
install_dependencies()
print_help()
if __name__ == '__main__':
main(sys.argv)
|
the-stack_106_27156 | import requests
import json
from bs4 import BeautifulSoup
class RAWG:
def __init__(self):
self.base_url = 'https://api.rawg.io/api'
self.session = requests.Session()
def get_game(self, name):
game = {}
id = self.__request_id(name)
if not id:
return game
res = self.__request_game(id)
game['Name'] = res['name']
game['RawgID'] = res['id']
game['Metacritic'] = res['metacritic']
game['Presence'] = self.__parse_precense(res)
game['Presence'] = self.__score_precense(game['Presence'])
game['Platform'] = self.__parse_platforms(res['platforms'])
game['RatingsBreakdown'] = self.__parse_ratings(res['ratings'])
game['ReleaseDate'] = res['released']
game['ESRB'] = self.__parse_esrb(res['esrb_rating'])
game['Achievements'] = res['achievements_count']
game['CreatorsCount'] = res['creators_count']
game['Description'] = self.__parse_description(res['description'])
return game
def __request_id(self, name):
params = { 'search': name }
try:
res = self.session.get(f'{self.base_url}/games', params=params)
if res.content:
res = json.loads(res.content)
if len(res['results']):
return res['results'][0]['id']
except:
return None
def __request_game(self, id):
res = self.session.get(f'{self.base_url}/games/{id}')
res = json.loads(res.content)
return res
def __parse_description(self, description):
soup = BeautifulSoup(description)
return soup.get_text()
def __parse_precense(self, social_media):
keys = [ 'reddit_count', 'twitch_count', 'youtube_count',
'reviews_text_count', 'ratings_count', 'suggestions_count']
vals = []
for key in keys:
vals.append(social_media[key])
return dict(zip(keys, vals))
def __score_precense(self, precense):
score = 0
for site in precense:
score += precense[site]
return score
def __parse_platforms(self, platforms):
platforms = [ platform['platform']['name'] for platform in platforms ]
return ', '.join(platforms )
def __parse_ratings(self, ratings):
shortened_ratings = [ f"{rating['title']}: {rating['count']}" for rating in ratings ]
return ', '.join(shortened_ratings)
def __parse_esrb(self, esrb_rating):
if esrb_rating:
return esrb_rating['name'] |
the-stack_106_27157 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from dora import Explorer
import treetable as tt
class MyExplorer(Explorer):
def get_grid_metrics(self):
return [
tt.leaf("train", ".3f"),
tt.leaf("test", ".3f"),
tt.leaf("correct", ".1f"),
]
@MyExplorer
def explorer(launcher):
for bs in [32, 64, 128]:
launcher(batch_size=bs)
for hidden_dim in [512, 1024]:
# here we get a sub launcher with `bind()`. All XPs scheduled with it
# will retain the bound params but it won't impact the parent launcher.
sub = launcher.bind({"model.hidden_dim": hidden_dim})
# Or, the two are equivalent
# sub = launcher.bind([f"model.hidden_dim={hidden_dim}"])
sub()
sub(gamma=0.6)
sub({'+new_param': 'whatever'}) # you can define extra keys with '+' if required
launcher.bind_(gamma=0.6)
launcher.slurm_(mem_per_gpu=20)
launcher()
launcher(lr=0.01)
with launcher.job_array():
for seed in range(1234, 1234 + 8):
launcher(seed=seed)
|
the-stack_106_27158 | import asyncio
async def foo():
for i in range(5):
print("foo")
await asyncio.sleep(1)
async def bar():
for i in range(10):
print("bar")
await asyncio.sleep(1)
loop = asyncio.get_event_loop()
# The event loop is executed until the task is completed.
loop.run_until_complete(foo())
loop.run_until_complete(bar())
loop.close()
|
the-stack_106_27159 | import os
import os.path as osp
import sys
import pdb
import argparse
import librosa
import natsort
import numpy as np
import mmcv
import random
from mir_eval.separation import bss_eval_sources
from glob import glob
from tqdm import tqdm
import h5py
from PIL import Image
import subprocess
from options.test_options import TestOptions
import torchvision.transforms as transforms
import torch
import torchvision
from data.sep_dataset import generate_spectrogram
from models.networks import VisualNet, VisualNetDilated, AudioNet, AssoConv, APNet, weights_init, Rearrange
def audio_empty(wav):
flag = np.sum(np.abs(wav)) < 1e-3
return flag
def audio_normalize(samples, desired_rms = 0.1, eps = 1e-4):
rms = np.maximum(eps, np.sqrt(np.mean(samples**2)))
samples = samples * (desired_rms / rms)
return rms / desired_rms, samples
def separation_metrics(pred_left, pred_right, gt_left, gt_right, mix):
if audio_empty(gt_left) or audio_empty(gt_right) or audio_empty(pred_right) or audio_empty(pred_left) or audio_empty(mix):
print("----------- Empty -----------")
return None
sdr, sir, sar, _ = bss_eval_sources(np.asarray([gt_left, gt_right]), np.asarray([pred_left, pred_right]), False)
sdr_mix, _, _, _ = bss_eval_sources(np.asarray([gt_left, gt_right]), np.asarray([mix, mix]), False)
return sdr.mean(), sir.mean(), sar.mean(), sdr_mix.mean()
def main():
#load test arguments
opt = TestOptions().parse()
opt.device = torch.device("cuda")
# visual net
original_resnet = torchvision.models.resnet18(pretrained=True)
if opt.visual_model == 'VisualNet':
net_visual = VisualNet(original_resnet)
elif opt.visual_model == 'VisualNetDilated':
net_visual = VisualNetDilated(original_resnet)
else:
raise TypeError("please input correct visual model type")
if len(opt.weights_visual) > 0:
print('Loading weights for visual stream')
net_visual.load_state_dict(torch.load(opt.weights_visual), strict=True)
# audio net
net_audio = AudioNet(
ngf=opt.unet_ngf,
input_nc=opt.unet_input_nc,
output_nc=opt.unet_output_nc,
)
net_audio.apply(weights_init)
if len(opt.weights_audio) > 0:
print('Loading weights for audio stream')
net_audio.load_state_dict(torch.load(opt.weights_audio), strict=True)
# fusion net
if opt.fusion_model == 'none':
net_fusion = None
elif opt.fusion_model == 'AssoConv':
net_fusion = AssoConv()
elif opt.fusion_model == 'APNet':
net_fusion = APNet()
else:
raise TypeError("Please input correct fusion model type")
if net_fusion is not None and len(opt.weights_fusion) > 0:
print('Loading weights for fusion stream')
net_fusion.load_state_dict(torch.load(opt.weights_fusion), strict=True)
net_visual.to(opt.device)
net_audio.to(opt.device)
net_visual.eval()
net_audio.eval()
if net_fusion is not None:
net_fusion.to(opt.device)
net_fusion.eval()
# rearrange module
net_rearrange = Rearrange()
net_rearrange.to(opt.device)
net_rearrange.eval()
val_list_file = 'data/dummy_MUSIC_split/val.csv'
sample_list = mmcv.list_from_file(val_list_file)
# ensure output dir
if not osp.exists(opt.output_dir_root):
os.mkdir(opt.output_dir_root)
#define the transformation to perform on visual frames
vision_transform_list = [transforms.Resize((224,448)), transforms.ToTensor()]
vision_transform_list.append(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
vision_transform = transforms.Compose(vision_transform_list)
chosen_audio_len = opt.audio_sampling_rate * 6
total_metrics = {'sdr':[], 'sir':[], 'sar':[], 'sdr_m':[]}
for global_idx, sample in enumerate(sample_list):
N = 2
chosen_samples = [sample]
# avoid repeat sample
for i in range(1, N):
while True:
new_sample = random.choice(sample_list)
if new_sample not in chosen_samples:
chosen_samples.append(new_sample)
break
audio_margin = 6
audio_list = []
frame_idx_list = []
frame_list = []
cur_output_dir_root = []
for idx, chosen_sample in enumerate(chosen_samples):
input_audio_path, img_folder, _, cate = chosen_sample.split(',')
cur_output_dir_root.append('_'.join([cate, img_folder[-4:]]))
#load the audio to perform separation
audio, audio_rate = librosa.load(input_audio_path, sr=opt.audio_sampling_rate, mono=True)
#randomly get a start time for 6s audio segment
audio_len = len(audio) / audio_rate
audio_start_time = random.uniform(audio_margin, audio_len - 6 - audio_margin)
audio_end_time = audio_start_time + 6
audio_start = int(audio_start_time * opt.audio_sampling_rate)
audio_end = audio_start + chosen_audio_len
audio = audio[audio_start:audio_end]
audio_list.append(audio)
#lock the frame idx range
frame_list.append(natsort.natsorted(glob(osp.join(img_folder, '*.jpg'))))
frame_idx_list.append(int((audio_start_time + audio_end_time) / 2 * 10))
#perform spatialization over the whole audio using a sliding window approach
overlap_count = np.zeros(chosen_audio_len) #count the number of times a data point is calculated
pred_left = np.zeros(chosen_audio_len)
pred_right = np.zeros(chosen_audio_len)
#perform spatialization over the whole spectrogram in a siliding-window fashion
sliding_window_start = 0
sliding_idx = 0
data = {}
samples_per_window = int(opt.audio_length * opt.audio_sampling_rate)
while sliding_window_start + samples_per_window < chosen_audio_len:
sliding_window_end = sliding_window_start + samples_per_window
normalizer1, audio_segment1 = audio_normalize(audio_list[0][sliding_window_start:sliding_window_end])
normalizer2, audio_segment2 = audio_normalize(audio_list[1][sliding_window_start:sliding_window_end])
audio_segment_channel1 = audio_segment1
audio_segment_channel2 = audio_segment2
audio_segment_mix = audio_segment_channel1 + audio_segment_channel2
audio_diff = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 - audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
audio_mix = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 + audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
#get the frame index for current window
frame_index1 = int(np.clip(frame_idx_list[0] + sliding_idx, 0, len(frame_list[0]) - 1))
frame_index2 = int(np.clip(frame_idx_list[1] + sliding_idx, 0, len(frame_list[1]) - 1))
image1 = Image.open(frame_list[0][frame_index1]).convert('RGB')
image2 = Image.open(frame_list[1][frame_index2]).convert('RGB')
#image = image.transpose(Image.FLIP_LEFT_RIGHT)
frame1 = vision_transform(image1).unsqueeze(0).to(opt.device) #unsqueeze to add a batch dimension
frame2 = vision_transform(image2).unsqueeze(0).to(opt.device) #unsqueeze to add a batch dimension
# data to device
audio_diff = audio_diff.to(opt.device)
audio_mix = audio_mix.to(opt.device)
img_feat = net_rearrange(net_visual(frame1), net_visual(frame2))
if net_fusion is not None:
upfeatures, output = net_audio(audio_diff, audio_mix, img_feat, return_upfeatures=True)
output.update(net_fusion(audio_mix, img_feat, upfeatures))
else:
output = net_audio(audio_diff, audio_mix, img_feat, return_upfeatures=False)
#ISTFT to convert back to audio
if opt.use_fusion_pred:
pred_left_spec = output['pred_left'][0,:,:,:].data[:].cpu().numpy()
pred_left_spec = pred_left_spec[0,:,:] + 1j * pred_left_spec[1,:,:]
reconstructed_signal_left = librosa.istft(pred_left_spec, hop_length=160, win_length=400, center=True, length=samples_per_window)
pred_right_spec = output['pred_right'][0,:,:,:].data[:].cpu().numpy()
pred_right_spec = pred_right_spec[0,:,:] + 1j * pred_right_spec[1,:,:]
reconstructed_signal_right = librosa.istft(pred_right_spec, hop_length=160, win_length=400, center=True, length=samples_per_window)
else:
predicted_spectrogram = output['binaural_spectrogram'][0,:,:,:].data[:].cpu().numpy()
reconstructed_stft_diff = predicted_spectrogram[0,:,:] + (1j * predicted_spectrogram[1,:,:])
reconstructed_signal_diff = librosa.istft(reconstructed_stft_diff, hop_length=160, win_length=400, center=True, length=samples_per_window)
reconstructed_signal_left = (audio_segment_mix + reconstructed_signal_diff) / 2
reconstructed_signal_right = (audio_segment_mix - reconstructed_signal_diff) / 2
pred_left[sliding_window_start:sliding_window_end] = pred_left[sliding_window_start:sliding_window_end] + reconstructed_signal_left * normalizer1
pred_right[sliding_window_start:sliding_window_end] = pred_right[sliding_window_start:sliding_window_end] + reconstructed_signal_right * normalizer2
overlap_count[sliding_window_start:sliding_window_end] = overlap_count[sliding_window_start:sliding_window_end] + 1
sliding_window_start = sliding_window_start + int(opt.hop_size * opt.audio_sampling_rate)
sliding_idx += 1
#deal with the last segment
normalizer1, audio_segment1 = audio_normalize(audio_list[0][-samples_per_window:])
normalizer2, audio_segment2 = audio_normalize(audio_list[1][-samples_per_window:])
audio_segment_channel1 = audio_segment1
audio_segment_channel2 = audio_segment2
audio_diff = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 - audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
audio_mix = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 + audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
#get the frame index for last window
frame_index1 = int(np.clip(frame_idx_list[0] + sliding_idx, 0, len(frame_list[0]) - 1))
frame_index2 = int(np.clip(frame_idx_list[1] + sliding_idx, 0, len(frame_list[1]) - 1))
image1 = Image.open(frame_list[0][frame_index1]).convert('RGB')
image2 = Image.open(frame_list[1][frame_index2]).convert('RGB')
#image = image.transpose(Image.FLIP_LEFT_RIGHT)
frame1 = vision_transform(image1).unsqueeze(0).to(opt.device) #unsqueeze to add a batch dimension
frame2 = vision_transform(image2).unsqueeze(0).to(opt.device) #unsqueeze to add a batch dimension
# data to device
audio_diff = audio_diff.to(opt.device)
audio_mix = audio_mix.to(opt.device)
img_feat = net_rearrange(net_visual(frame1), net_visual(frame2))
if net_fusion is not None:
upfeatures, output = net_audio(audio_diff, audio_mix, img_feat, return_upfeatures=True)
output.update(net_fusion(audio_mix, img_feat, upfeatures))
else:
output = net_audio(audio_diff, audio_mix, img_feat, return_upfeatures=False)
#ISTFT to convert back to audio
if opt.use_fusion_pred:
pred_left_spec = output['pred_left'][0,:,:,:].data[:].cpu().numpy()
pred_left_spec = pred_left_spec[0,:,:] + 1j * pred_left_spec[1,:,:]
reconstructed_signal_left = librosa.istft(pred_left_spec, hop_length=160, win_length=400, center=True, length=samples_per_window)
pred_right_spec = output['pred_right'][0,:,:,:].data[:].cpu().numpy()
pred_right_spec = pred_right_spec[0,:,:] + 1j * pred_right_spec[1,:,:]
reconstructed_signal_right = librosa.istft(pred_right_spec, hop_length=160, win_length=400, center=True, length=samples_per_window)
else:
predicted_spectrogram = output['binaural_spectrogram'][0,:,:,:].data[:].cpu().numpy()
reconstructed_stft_diff = predicted_spectrogram[0,:,:] + (1j * predicted_spectrogram[1,:,:])
reconstructed_signal_diff = librosa.istft(reconstructed_stft_diff, hop_length=160, win_length=400, center=True, length=samples_per_window)
reconstructed_signal_left = (audio_segment_mix + reconstructed_signal_diff) / 2
reconstructed_signal_right = (audio_segment_mix - reconstructed_signal_diff) / 2
pred_left[-samples_per_window:] = pred_left[-samples_per_window:] + reconstructed_signal_left * normalizer1
pred_right[-samples_per_window:] = pred_right[-samples_per_window:] + reconstructed_signal_right * normalizer2
#add the spatialized audio to reconstructed_binaural
overlap_count[-samples_per_window:] = overlap_count[-samples_per_window:] + 1
#divide aggregated predicted audio by their corresponding counts
pred_left = np.divide(pred_left, overlap_count)
pred_right = np.divide(pred_right, overlap_count)
gt_left, gt_right = audio_list
mix_audio = (gt_left + gt_right) / 2
sep_results = separation_metrics(pred_left, pred_right, gt_left, gt_right, mix_audio)
if sep_results is not None and global_idx % 20 == 0:
sdr, sir, sar, sdr_m = sep_results
print("index: {}, sdr: {}, sir: {}, sar: {}, sdr_m: {}\n".format(global_idx, sdr, sir, sar, sdr_m))
total_metrics['sdr'].append(sdr)
total_metrics['sir'].append(sir)
total_metrics['sar'].append(sar)
total_metrics['sdr_m'].append(sdr_m)
#check output directory
cur_output_dir_root = osp.join(opt.output_dir_root, '+'.join(cur_output_dir_root))
if not os.path.isdir(cur_output_dir_root):
os.mkdir(cur_output_dir_root)
librosa.output.write_wav(osp.join(cur_output_dir_root, 'pred_left.wav'), pred_left, sr=opt.audio_sampling_rate)
librosa.output.write_wav(osp.join(cur_output_dir_root, 'pred_right.wav'), pred_right, sr=opt.audio_sampling_rate)
librosa.output.write_wav(osp.join(cur_output_dir_root, 'gt_left.wav'), gt_left, sr=opt.audio_sampling_rate)
librosa.output.write_wav(osp.join(cur_output_dir_root, 'gt_right.wav'), gt_right, sr=opt.audio_sampling_rate)
librosa.output.write_wav(osp.join(cur_output_dir_root, 'mix.wav'), mix_audio, sr=opt.audio_sampling_rate)
print_content = "----- sdr: {}, sir: {}, sar: {}, sdr_m: {} -----\n".format(
sum(total_metrics['sdr']) / len(total_metrics['sdr']),
sum(total_metrics['sir']) / len(total_metrics['sir']),
sum(total_metrics['sar']) / len(total_metrics['sar']),
sum(total_metrics['sdr_m']) / len(total_metrics['sdr_m'])
)
print(print_content)
if __name__ == '__main__':
random.seed(1234)
torch.manual_seed(1234)
main()
|
the-stack_106_27164 | import asyncio
import awaitwhat.blocker
from awaitwhat.stack import task_get_stack
async def a():
await asyncio.gather(asyncio.sleep(0.01), asyncio.sleep(999 + 1))
def fixme_dont_test_sleep():
async def test():
t = asyncio.create_task(a())
try:
await asyncio.sleep(0.11)
stack = task_get_stack(t, None)
# assert awaitwhat.sleep.mine(stack[-2])
# text = awaitwhat.sleep.decode(stack[-2])
# assert "asyncio.sleep" in text
# assert "scheduled" in text
# assert "delay 42" in text
# assert "remaining 41.8" in text
finally:
t.cancel()
asyncio.run(test())
def test_blockers():
async def test():
t = asyncio.create_task(a())
try:
await asyncio.sleep(0.11)
text = str(awaitwhat.blocker.blockers(t))
assert "Task finished" in text
assert "Task pending" in text
finally:
t.cancel()
asyncio.run(test())
|
the-stack_106_27167 | import numpy as np
from pycocotools.coco import COCO
from .custom import CustomDataset
from .registry import DATASETS
@DATASETS.register_module
class CocoDataset(CustomDataset):
CLASSES = ('plane', 'ship', 'storage tank', 'baseball diamond', 'tennis court', 'basketball court', 'ground track field', 'harbor', 'bridge', 'large vehicle', 'small vehicle', 'helicopter', 'roundabout', 'soccer ball field', 'swimming pool')
def load_annotations(self, ann_file):
self.coco = COCO(ann_file)
self.cat_ids = self.coco.getCatIds()
self.cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(self.cat_ids)
}
self.img_ids = self.coco.getImgIds()
img_infos = []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info['filename'] = info['file_name']
img_infos.append(info)
return img_infos
def get_ann_info(self, idx):
img_id = self.img_infos[idx]['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
return self._parse_ann_info(self.img_infos[idx], ann_info)
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
for i, img_info in enumerate(self.img_infos):
if self.filter_empty_gt and self.img_ids[i] not in ids_with_ann:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, seg_map. "masks" are raw annotations and not
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
if ann['area'] <= 0 or w < 1 or h < 1:
continue
bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann['segmentation'])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
|
the-stack_106_27168 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
)
from ..utils import (
ExtractorError,
clean_html,
)
class MovieClipsIE(InfoExtractor):
_VALID_URL = r'https?://movieclips\.com/(?P<id>[\da-zA-Z]+)(?:-(?P<display_id>[\da-z-]+))?'
_TEST = {
'url': 'http://movieclips.com/Wy7ZU-my-week-with-marilyn-movie-do-you-love-me/',
'info_dict': {
'id': 'Wy7ZU',
'display_id': 'my-week-with-marilyn-movie-do-you-love-me',
'ext': 'mp4',
'title': 'My Week with Marilyn - Do You Love Me?',
'description': 'md5:e86795bd332fe3cff461e7c8dc542acb',
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
show_id = display_id or video_id
config = self._download_xml(
'http://config.movieclips.com/player/config/%s' % video_id,
show_id, 'Downloading player config')
if config.find('./country-region').text == 'false':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, config.find('./region_alert').text), expected=True)
properties = config.find('./video/properties')
smil_file = properties.attrib['smil_file']
smil = self._download_xml(smil_file, show_id, 'Downloading SMIL')
base_url = smil.find('./head/meta').attrib['base']
formats = []
for video in smil.findall('./body/switch/video'):
vbr = int(video.attrib['system-bitrate']) / 1000
src = video.attrib['src']
formats.append({
'url': base_url,
'play_path': src,
'ext': src.split(':')[0],
'vbr': vbr,
'format_id': '%dk' % vbr,
})
self._sort_formats(formats)
title = '%s - %s' % (properties.attrib['clip_movie_title'], properties.attrib['clip_title'])
description = clean_html(compat_str(properties.attrib['clip_description']))
thumbnail = properties.attrib['image']
categories = properties.attrib['clip_categories'].split(',')
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'categories': categories,
'formats': formats,
}
|
the-stack_106_27169 | """
A minimal character-based 2-layer Vanilla RNN model.
This is derived from the following scripts:
- https://gist.github.com/karpathy/d4dee566867f8291f086
- https://github.com/eliben/deep-learning-samples/blob/master/min-char-rnn/min-char-rnn.py
And you might find the following materials helpful:
- http://karpathy.github.io/2015/05/21/rnn-effectiveness/
- http://arxiv.org/abs/1506.00019
To run:
$ python min_char_rnn_two_layers.py <text file>
----
BSD License
"""
from __future__ import print_function
import numpy as np
import sys
# Make it possible to provide input file as a command-line argument; input.txt
# is still the default.
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = 'input.txt'
with open(filename, 'r') as f:
data = f.read()
# All unique characters / entities in the data set.
chars = list(set(data))
chars.sort()
data_size, vocab_size = len(data), len(chars)
print('data has %d characters, %d unique.' % (data_size, vocab_size))
# Each character in the vocabulary gets a unique integer index assigned, in the
# half-open interval [0:N). These indices are useful to create one-hot encoded
# vectors that represent characters in numerical computations.
char_to_ix = {ch: i for i, ch in enumerate(chars)}
ix_to_char = {i: ch for i, ch in enumerate(chars)}
print('char_to_ix', char_to_ix)
print('ix_to_char', ix_to_char)
# Hyperparameters
hidden_size = 512 # size of hidden layer of neurons
seq_length = 16 # number of steps to unroll the RNN for
learning_rate = 1e-2
# Stop when processed this much data
MAX_DATA = 100000
MAX_ITER = 200000
# Model parameters/weights -- these are shared among all steps. Weights
# initialized randomly; biases initialized to 0.
# Inputs are characters one-hot encoded in a vocab-sized vector.
# Dimensions: H = hidden_size, V = vocab_size
Wxh = np.random.randn(hidden_size, vocab_size) * 0.01 # input to hidden
Whh1 = np.random.randn(hidden_size, hidden_size) * 0.01 # hidden to hidden in layer one
Whh2 = np.random.randn(hidden_size, hidden_size) * 0.01 # hidden to hidden in layer two
Wh1h2 = np.random.randn(hidden_size, hidden_size) * 0.01 # hidden to hidden from layer one to layer two
Why = np.random.randn(vocab_size, hidden_size) * 0.01 # hidden to output
bh1 = np.zeros((hidden_size, 1)) # hidden bias
bh2 = np.zeros((hidden_size, 1)) # hidden bias from layer one to layer two
by = np.zeros((vocab_size, 1)) # output bias
def lossFun(inputs, targets, hprev):
"""Runs forward and backward passes through the RNN.
inputs, targets: Lists of integers. For some i, inputs[i] is the input
character (encoded as an index into the ix_to_char map) and
targets[i] is the corresponding next character in the
training data (similarly encoded).
hprev: Hx2 array of initial hidden state
returns: loss, gradients on model parameters, and last hidden state
"""
# Caches that keep values computed in the forward pass at each time step, to
# be reused in the backward pass.
xs, h1s, h2s, ys, ps = {}, {}, {}, {}, {}
# Initial incoming state.
h1s[-1] = np.expand_dims(np.copy(hprev[:, 0]), 1)
h2s[-1] = np.expand_dims(np.copy(hprev[:, 1]), 1)
loss = 0
# Forward pass
for t in range(len(inputs)):
# Input at time step t is xs[t]. Prepare a one-hot encoded vector of shape
# (V, 1). inputs[t] is the index where the 1 goes.
xs[t] = np.zeros((vocab_size, 1)) # encode in 1-of-k representation
xs[t][inputs[t]] = 1
# Compute h1[t], h2[t] from h1[t-1], h2[t-1] and x[t]
h1s[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh1, h1s[t-1]) + bh1)
h2s[t] = np.tanh(np.dot(Wh1h2, h1s[t]) + np.dot(Whh2, h2s[t-1]) + bh2)
# Compute ps[t] - softmax probabilities for output.
ys[t] = np.dot(Why, h2s[t]) + by
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))
# Cross-entropy loss for two probability distributions p and q is defined as
# follows:
#
# xent(q, p) = -Sum q(k)log(p(k))
# k
#
# Where k goes over all the possible values of the random variable p and q
# are defined for.
# In our case taking q is the "real answer" which is 1-hot encoded; p is the
# result of softmax (ps). targets[t] has the only index where q is not 0,
# so the sum simply becomes log of ps at that index.
loss += -np.log(ps[t][targets[t], 0])
# Backward pass: compute gradients going backwards.
# Gradients are initialized to 0s, and every time step contributes to them.
dWxh, dWhh1, dWhh2, dWh1h2, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh1), np.zeros_like(Whh2), np.zeros_like(Wh1h2), np.zeros_like(Why)
dbh1, dbh2, dby = np.zeros_like(bh1), np.zeros_like(bh2), np.zeros_like(by)
# Initialize the incoming gradient of h to zero; this is a safe assumption for
# a sufficiently long unrolling.
dh1next = np.zeros_like(h1s[0])
dh2next = np.zeros_like(h2s[0])
# The backwards pass iterates over the input sequence backwards.
for t in reversed(range(len(inputs))):
# Backprop through the gradients of loss and softmax.
dy = np.copy(ps[t])
dy[targets[t]] -= 1
# Compute gradients for the Why and by parameters.
dWhy += np.dot(dy, h2s[t].T)
dby += dy
# Backprop through the fully-connected layer (Why, by) to h2. Also add up the
# incoming gradient for h2 from the next cell.
# Note: proper Jacobian matmul here would be dy.dot(Why), that would give
# a [1,T] vector. Since we need [T,1] for h, we flip the dot (we could have
# transposed after everything, too)
dh2 = np.dot(Why.T, dy) + dh2next
# Backprop through the tanh in layer two.
dh2raw = (1 - h2s[t] * h2s[t]) * dh2
# Compute gradients for the dbh2, dWh1h2, Whh2 parameters.
dbh2 += dh2raw
dWh1h2 += np.dot(dh2raw, h1s[t].T)
dWhh2 += np.dot(dh2raw, h2s[t-1].T)
# Backprop through the fully-connected layer (Wh1h2, dbh1) to h1. Also add up the
# incoming gradient for h1 from the next cell.
dh1 = np.dot(Wh1h2.T, dh2raw) + dh1next
dh1raw = (1 - h1s[t] * h1s[t]) * dh1
# Compute
dbh1 += dh1raw
dWxh += np.dot(dh1raw, xs[t].T)
dWhh1 += np.dot(dh1raw, h1s[t-1].T)
# Backprop the gradient to the incoming h, which will be used in the
# previous time step.
dh2next = np.dot(Whh2.T, dh2raw)
dh1next = np.dot(Whh1.T, dh1raw)
# Gradient clipping to the range [-5, 5].
for dparam in [dWxh, dWhh1, dWhh2, dWh1h2, dWhy, dbh1, dbh2, dby]:
np.clip(dparam, -5, 5, out=dparam)
return loss, dWxh, dWhh1, dWhh2, dWh1h2, dWhy, dbh1, dbh2, dby, np.concatenate((h1s[len(inputs) - 1], h2s[len(inputs)-1]), axis=1)
def sample(h, seed_ix, n):
"""Sample a sequence of integers from the model.
Runs the RNN in forward mode for n steps; seed_ix is the seed letter for the
first time step, and h is the memory state. Returns a sequence of letters
produced by the model (indices).
"""
# Create a one-hot vector to represent the input.
x = np.zeros((vocab_size, 1))
x[seed_ix] = 1
ixes = []
h1 = np.expand_dims(np.copy(h[:, 0]), 1)
h2 = np.expand_dims(np.copy(h[:, 1]), 1)
for t in range(n):
# Run the forward pass only.
h1 = np.tanh(np.dot(Wxh, x) + np.dot(Whh1, h1) + bh1)
h2 = np.tanh(np.dot(Wh1h2, h1) + np.dot(Whh2, h2) + bh2)
y = np.dot(Why, h2) + by
p = np.exp(y) / np.sum(np.exp(y))
# Sample from the distribution produced by softmax.
ix = np.random.choice(range(vocab_size), p=p.ravel())
# Prepare input for the next cell.
x = np.zeros((vocab_size, 1))
x[ix] = 1
ixes.append(ix)
return ixes
# Gradient checking (from karpathy's own comment on the gist)
from random import uniform
def gradCheck(inputs, targets, hprev):
global Wxh, Whh1, Whh2, Wh1h2, Why, bh1, bh2, by
num_checks, delta = 30, 1e-5
_, dWxh, dWhh1, dWhh2, dWh1h2, dWhy, dbh1, dbh2, dby, _ = lossFun(inputs, targets, hprev)
for param, dparam, name in zip([Wxh, Whh1, Whh2, Wh1h2, Why, bh1, bh2, by],
[dWxh, dWhh1, dWhh2, dWh1h2, dWhy, dbh1, dbh2, dby],
['Wxh', 'Whh1', 'Whh2', 'Wh1h2', 'Why', 'bh', 'bhh', 'by']):
s0 = dparam.shape
s1 = param.shape
assert s0 == s1, 'Error dims dont match: %s and %s.' % (s0, s1)
print(name)
for i in range(num_checks):
ri = int(uniform(0, param.size))
# evaluate cost at [x + delta] and [x - delta]
old_val = param.flat[ri]
param.flat[ri] = old_val + delta
cg0, _, _, _, _, _, _, _, _, _ = lossFun(inputs, targets, hprev)
param.flat[ri] = old_val - delta
cg1, _, _, _, _, _, _, _, _, _ = lossFun(inputs, targets, hprev)
param.flat[ri] = old_val # reset old value for this parameter
# fetch both numerical and analytic gradient
grad_analytic = dparam.flat[ri]
grad_numerical = (cg0 - cg1) / (2 * delta)
rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)
print('%f, %f => %e ' % (grad_numerical, grad_analytic, rel_error))
# rel_error should be on order of 1e-7 or less
# This function invokes gradCheck with all the parameters properly set up.
def basicGradCheck():
inputs = [char_to_ix[ch] for ch in data[:seq_length]]
targets = [char_to_ix[ch] for ch in data[1:seq_length + 1]]
hprev = np.zeros((hidden_size, 2)) # reset RNN memory
gradCheck(inputs, targets, hprev)
# Uncomment this to run a basic gradient check.
# basicGradCheck()
# n is the iteration counter; p is the input sequence pointer, at the beginning
# of each step it points at the sequence in the input that will be used for
# training this iteration.
n, p = 0, 0
# Memory variables for Adagrad.
mWxh, mWhh1, mWhh2, mWh1h2, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh1), np.zeros_like(Whh2), np.zeros_like(Wh1h2), np.zeros_like(Why)
mbh1, mbh2, mby = np.zeros_like(bh1), np.zeros_like(bh2), np.zeros_like(by)
smooth_loss = -np.log(1.0 / vocab_size) * seq_length
while n < MAX_ITER:
# Prepare inputs (we're sweeping from left to right in steps seq_length long)
if p + seq_length + 1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size, 2)) # reset RNN memory
p = 0 # go from start of data
# In each step we unroll the RNN for seq_length cells, and present it with
# seq_length inputs and seq_length target outputs to learn.
inputs = [char_to_ix[ch] for ch in data[p:p + seq_length]]
targets = [char_to_ix[ch] for ch in data[p + 1:p + seq_length + 1]]
# gradCheck(inputs, targets, hprev)
# break
# Sample from the model now and then.
if n % 1000 == 0:
sample_ix = sample(hprev, inputs[0], 200)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
print('----\n %s \n----' % (txt,))
# Forward seq_length characters through the net and fetch gradient
loss, dWxh, dWhh1, dWhh2, dWh1h2, dWhy, dbh1, dbh2, dby, hprev = lossFun(inputs, targets, hprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 200 == 0: print('iter %d (p=%d), loss: %f' % (n, p, smooth_loss))
# Perform parameter update with Adagrad
for param, dparam, mem in zip([Wxh, Whh1, Whh2, Wh1h2, Why, bh1, bh2, by],
[dWxh, dWhh1, dWhh2, dWh1h2, dWhy, dbh1, dbh2, dby],
[mWxh, mWhh1, mWhh2, mWh1h2, mWhy, mbh1, mbh2, mby]):
mem += dparam * dparam
param += -learning_rate * dparam / np.sqrt(mem + 1e-8)
p += seq_length
n += 1 |
the-stack_106_27173 | # -*- coding: utf-8 -*-
from python_pachyderm.client.pps import pps_pb2 as proto
from python_pachyderm.client.pps import pps_pb2_grpc as grpc
from python_pachyderm.util import commit_from, get_address, get_metadata
class PpsClient(object):
def __init__(self, host=None, port=None, auth_token=None, root_certs=None):
"""
Creates a client to connect to PPS.
host: The pachd host. Default is 'localhost'.
port: The port to connect to. Default is 30650.
auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
root_certs: The PEM-encoded root certificates as byte string.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
if root_certs:
ssl_channel_credentials = grpc.grpc.ssl_channel_credentials
ssl = ssl_channel_credentials(root_certificates=root_certs)
self.channel = grpc.grpc.secure_channel(address, ssl)
else:
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def inspect_job(self, job_id, block_state=None, output_commit=None):
"""
Inspects a job with a given ID. Returns a `JobInfo`.
Params:
* job_id: The ID of the job to inspect.
* block_state: If true, block until the job completes.
* output_commit: An optional tuple, string, or `Commit` object
representing an output commit to filter on.
"""
output_commit = commit_from(output_commit) if output_commit is not None else None
req = proto.InspectJobRequest(job=proto.Job(id=job_id), block_state=block_state, output_commit=output_commit)
return self.stub.InspectJob(req, metadata=self.metadata)
def list_job(self, pipeline_name=None, input_commit=None, output_commit=None, history=None):
"""
Lists jobs. Yields `JobInfo` objects.
Params:
* pipeline_name: An optional string representing a pipeline name to
filter on.
* input_commit: An optional list of tuples, strings, or `Commit`
objects representing input commits to filter on.
* output_commit: An optional tuple, string, or `Commit` object
representing an output commit to filter on.
* history: An optional int that indicates to return jobs from
historical versions of pipelines. Semantics are:
0: Return jobs from the current version of the pipeline or pipelines.
1: Return the above and jobs from the next most recent version
2: etc.
-1: Return jobs from all historical versions.
"""
pipeline = proto.Pipeline(name=pipeline_name) if pipeline_name is not None else None
if isinstance(input_commit, list):
input_commit = [commit_from(ic) for ic in input_commit]
elif input_commit is not None:
input_commit = [commit_from(input_commit)]
output_commit = commit_from(output_commit) if output_commit is not None else None
req = proto.ListJobRequest(pipeline=pipeline, input_commit=input_commit,
output_commit=output_commit, history=history)
return self.stub.ListJobStream(req, metadata=self.metadata)
def flush_job(self, commits, pipeline_names=None):
"""
Blocks until all of the jobs which have a set of commits as
provenance have finished. Yields `JobInfo` objects.
Params:
* commits: A list of tuples, strings, or `Commit` objects representing
the commits to flush.
* pipeline_names: An optional list of strings specifying pipeline
names. If specified, only jobs within these pipelines will be flushed.
"""
commits = [commit_from(c) for c in commits]
pipelines = [proto.Pipeline(name=name) for name in pipeline_names] if pipeline_names is not None else None
req = proto.FlushJobRequest(commits=commits, to_pipelines=pipelines)
return self.stub.FlushJob(req)
def delete_job(self, job_id):
"""
Deletes a job by its ID.
Params:
* job_id: The ID of the job to delete.
"""
req = proto.DeleteJobRequest(job=proto.Job(id=job_id))
self.stub.DeleteJob(req, metadata=self.metadata)
def stop_job(self, job_id):
"""
Stops a job by its ID.
Params:
* job_id: The ID of the job to stop.
"""
req = proto.StopJobRequest(job=proto.Job(id=job_id))
self.stub.StopJob(req, metadata=self.metadata)
def inspect_datum(self, job_id, datum_id):
"""
Inspects a datum. Returns a `DatumInfo` object.
Params:
* job_id: The ID of the job.
* datum_id: The ID of the datum.
"""
req = proto.InspectDatumRequest(datum=proto.Datum(id=datum_id, job=proto.Job(id=job_id)))
return self.stub.InspectDatum(req, metadata=self.metadata)
def list_datum(self, job_id, page_size=None, page=None):
"""
Lists datums. Yields `ListDatumStreamResponse` objects.
Params:
* job_id: The ID of the job.
* page_size: An optional int specifying the size of the page.
* page: An optional int specifying the page number.
"""
req = proto.ListDatumRequest(job=proto.Job(id=job_id), page_size=page_size, page=page)
return self.stub.ListDatumStream(req, metadata=self.metadata)
def restart_datum(self, job_id, data_filters=None):
"""
Restarts a datum.
Params:
* job_id: The ID of the job.
* data_filters: An optional iterable of strings.
"""
req = proto.RestartDatumRequest(job=proto.Job(id=job_id), data_filters=data_filters)
self.stub.RestartDatum(req, metadata=self.metadata)
def create_pipeline(self, pipeline_name, transform=None, parallelism_spec=None,
hashtree_spec=None, egress=None, update=None, output_branch=None,
scale_down_threshold=None, resource_requests=None,
resource_limits=None, input=None, description=None, cache_size=None,
enable_stats=None, reprocess=None, batch=None, max_queue_size=None,
service=None, chunk_spec=None, datum_timeout=None,
job_timeout=None, salt=None, standby=None, datum_tries=None,
scheduling_spec=None, pod_patch=None):
"""
Creates a pipeline. For more info, please refer to the pipeline spec
document:
http://docs.pachyderm.io/en/latest/reference/pipeline_spec.html
Params:
* pipeline_name: A string representing the pipeline name.
* transform: An optional `Transform` object.
* parallelism_spec: An optional `ParallelismSpec` object.
* hashtree_spec: An optional `HashtreeSpec` object.
* egress: An optional `Egress` object.
* update: An optional bool specifying whether this should behave as an
upsert.
* output_branch: An optional string representing the branch to output
results on.
* scale_down_threshold: An optional protobuf `Duration` object.
* resource_requests: An optional `ResourceSpec` object.
* resource_limits: An optional `ResourceSpec` object.
* input: An optional `Input` object.
* description: An optional string describing the pipeline.
* cache_size: An optional string.
* enable_stats: An optional bool.
* reprocess: An optional bool. If true, pachyderm forces the pipeline
to reprocess all datums. It only has meaning if `update` is `True`.
* batch: An optional bool.
* max_queue_size: An optional int.
* service: An optional `Service` object.
* chunk_spec: An optional `ChunkSpec` object.
* datum_timeout: An optional protobuf `Duration` object.
* job_timeout: An optional protobuf `Duration` object.
* salt: An optional stirng.
* standby: An optional bool.
* datum_tries: An optional int.
* scheduling_spec: An optional `SchedulingSpec` object.
* pod_patch: An optional string.
"""
req = proto.CreatePipelineRequest(
pipeline=proto.Pipeline(name=pipeline_name),
transform=transform, parallelism_spec=parallelism_spec,
hashtree_spec=hashtree_spec, egress=egress, update=update,
output_branch=output_branch, scale_down_threshold=scale_down_threshold,
resource_requests=resource_requests, resource_limits=resource_limits,
input=input, description=description, cache_size=cache_size,
enable_stats=enable_stats, reprocess=reprocess, batch=batch,
max_queue_size=max_queue_size, service=service,
chunk_spec=chunk_spec, datum_timeout=datum_timeout,
job_timeout=job_timeout, salt=salt, standby=standby,
datum_tries=datum_tries, scheduling_spec=scheduling_spec,
pod_patch=pod_patch
)
self.stub.CreatePipeline(req, metadata=self.metadata)
def inspect_pipeline(self, pipeline_name, history=None):
"""
Inspects a pipeline. Returns a `PipelineInfo` object.
Params:
* pipeline_name: A string representing the pipeline name.
* history: An optional int that indicates to return jobs from
historical versions of pipelines. Semantics are:
0: Return jobs from the current version of the pipeline or pipelines.
1: Return the above and jobs from the next most recent version
2: etc.
-1: Return jobs from all historical versions.
"""
pipeline = proto.Pipeline(name=pipeline_name)
if history is None:
req = proto.InspectPipelineRequest(pipeline=pipeline)
return self.stub.InspectPipeline(req, metadata=self.metadata)
else:
# `InspectPipeline` doesn't support history, but `ListPipeline`
# with a pipeline filter does, so we use that here
req = proto.ListPipelineRequest(pipeline=pipeline, history=history)
pipelines = self.stub.ListPipeline(req, metadata=self.metadata).pipeline_info
assert len(pipelines) <= 1
return pipelines[0] if len(pipelines) else None
def list_pipeline(self, history=None):
"""
Lists pipelines. Returns a `PipelineInfos` object.
Params:
* pipeline_name: A string representing the pipeline name.
* history: An optional int that indicates to return jobs from
historical versions of pipelines. Semantics are:
0: Return jobs from the current version of the pipeline or pipelines.
1: Return the above and jobs from the next most recent version
2: etc.
-1: Return jobs from all historical versions.
"""
req = proto.ListPipelineRequest(history=history)
return self.stub.ListPipeline(req, metadata=self.metadata)
def delete_pipeline(self, pipeline_name, force=None):
"""
Deletes a pipeline.
Params:
* pipeline_name: A string representing the pipeline name.
* force: Whether to force delete.
"""
req = proto.DeletePipelineRequest(pipeline=proto.Pipeline(name=pipeline_name), force=force)
self.stub.DeletePipeline(req, metadata=self.metadata)
def delete_all_pipelines(self, force=None):
"""
Deletes all pipelines.
Params:
* force: Whether to force delete.
"""
req = proto.DeletePipelineRequest(all=True, force=force)
self.stub.DeletePipeline(req, metadata=self.metadata)
def start_pipeline(self, pipeline_name):
"""
Starts a pipeline.
Params:
* pipeline_name: A string representing the pipeline name.
"""
req = proto.StartPipelineRequest(pipeline=proto.Pipeline(name=pipeline_name))
self.stub.StartPipeline(req, metadata=self.metadata)
def stop_pipeline(self, pipeline_name):
"""
Stops a pipeline.
Params:
* pipeline_name: A string representing the pipeline name.
"""
req = proto.StopPipelineRequest(pipeline=proto.Pipeline(name=pipeline_name))
self.stub.StopPipeline(req, metadata=self.metadata)
def run_pipeline(self, pipeline_name, provenance=None):
"""
Runs a pipeline.
Params:
* pipeline_name: A string representing the pipeline name.
* provenance: An optional iterable of `CommitProvenance` objects
representing the pipeline execution provenance.
"""
req = proto.RunPipelineRequest(
pipeline=proto.Pipeline(name=pipeline_name),
provenance=provenance,
)
self.stub.RunPipeline(req, metadata=self.metadata)
def delete_all(self):
"""
Deletes everything in pachyderm.
"""
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
def get_pipeline_logs(self, pipeline_name, data_filters=None, master=None,
datum=None, follow=None, tail=None):
"""
Gets logs for a pipeline. Yields `LogMessage` objects.
Params:
* pipeline_name: A string representing a pipeline to get
logs of.
* data_filters: An optional iterable of strings specifying the names
of input files from which we want processing logs. This may contain
multiple files, to query pipelines that contain multiple inputs. Each
filter may be an absolute path of a file within a pps repo, or it may
be a hash for that file (to search for files at specific versions.)
* master: An optional bool.
* datum: An optional `Datum` object.
* follow: An optional bool specifying whether logs should continue to
stream forever.
* tail: An optional int. If nonzero, the number of lines from the end
of the logs to return. Note: tail applies per container, so you will
get tail * <number of pods> total lines back.
"""
req = proto.GetLogsRequest(
pipeline=proto.Pipeline(name=pipeline_name),
data_filters=data_filters, master=master, datum=datum,
follow=follow, tail=tail,
)
return self.stub.GetLogs(req, metadata=self.metadata)
def get_job_logs(self, job_id, data_filters=None, datum=None, follow=None,
tail=None):
"""
Gets logs for a job. Yields `LogMessage` objects.
Params:
* job_id: A string representing a job to get logs of.
* data_filters: An optional iterable of strings specifying the names
of input files from which we want processing logs. This may contain
multiple files, to query pipelines that contain multiple inputs. Each
filter may be an absolute path of a file within a pps repo, or it may
be a hash for that file (to search for files at specific versions.)
* datum: An optional `Datum` object.
* follow: An optional bool specifying whether logs should continue to
stream forever.
* tail: An optional int. If nonzero, the number of lines from the end
of the logs to return. Note: tail applies per container, so you will
get tail * <number of pods> total lines back.
"""
req = proto.GetLogsRequest(
job=proto.Job(id=job_id), data_filters=data_filters, datum=datum,
follow=follow, tail=tail,
)
return self.stub.GetLogs(req, metadata=self.metadata)
def garbage_collect(self):
"""
Runs garbage collection.
"""
return self.stub.GarbageCollect(proto.GarbageCollectRequest(), metadata=self.metadata)
|
the-stack_106_27175 | import tensorflow as tf
from functools import reduce
from operator import mul
from string import ascii_lowercase
import numpy as np
import tensorflow as tf
from ordered_set import OrderedSet
# These are constants:
# max k = 9
ascii_chi = ascii_lowercase[:9]
ascii_f = ascii_lowercase[9:18]
# max feature vector dimensions = 8
ascii_feature_vector = ascii_lowercase[18:]
def alternativePermutationMatrix_AToB(a, b):
return np.array(np.apply_along_axis(lambda x, y: np.in1d(y, x), 0, np.expand_dims(a, 0),
np.expand_dims(b, 0)), dtype=int)
# a function that returns appropriate promotion einsum expression for a given k and feature vector shape
def getEinsumExpression(k, feature_vector_shape):
# if k==1:
# return 'ai,if->af'
# if k==2:
# return 'ai,bj,ijf->abf'
str_to_join = []
for i in range(k):
str_to_join.append(ascii_chi[i])
str_to_join.append(ascii_f[i])
str_to_join.append(',')
for i in range(k):
str_to_join.append(ascii_f[i])
for i in range(len(feature_vector_shape)):
str_to_join.append(ascii_feature_vector[i])
str_to_join.append('->')
for i in range(k):
str_to_join.append(ascii_chi[i])
for i in range(len(feature_vector_shape)):
str_to_join.append(ascii_feature_vector[i])
return ''.join(str_to_join)
# THIS IS NOT IMPLEMENTED YET
# Check tests/testing.py
class CCN_Layer(tf.keras.layers.Layer):
def __init__(self):
super(CCN_Layer, self).__init__()
self.k = 2
self.feature_vector_shape = [3]
self.permutationFunction = alternativePermutationMatrix_AToB # Choose which permutation function to use (same results)
self.einsum_expr = getEinsumExpression(self.k, self.feature_vector_shape) # promotion expression
def build(self, input_shape):
# add weights
pass
#raise NotImplementedError
# self.kernel = self.add_variable("kernel",
# shape=[int(input_shape[-1]),
# self.num_outputs])
@tf.function
def call(self, input):
# Organizing inputs
# only the first (tensors) should pass gradients to update W's
# only the first and the third (tensors, parts) will change when propagating throughout network
# parts will accumulate with receptive fields
# tensors are activations from previous layer
# adjM is constant 2D square matrix - used to retrieve number of neurons by its size
# and to gather neurons to define new layer parts based on children parts
tensors, adjM, parts = input
# extract number of neurons from adjM number of rows (adjM is 2D square matrix)
# this is here for option to decrease number of neurons in the following layers by shrinking adjM
# e.g. neurons over leaf nodes in graph
num_neurons = len(adjM)
# contains information which neurons to gather signal from (for every neuron list)
receptive_fields = [tf.where(adjM[i] == 1)[:, 0] for i in range(num_neurons)]
# new, cumulative receptive fields (parts) based on adjM (for every neuron in current layer)
# for every neuron i;
# parts of every neuron in the receptive field of 'i' are reduced with union to get cumulative receptive fields
new_parts = [reduce(OrderedSet.union, [parts[tensor_child_index] for tensor_child_index in receptive_fields[i]])
for i in range(num_neurons)]
a_tensor = tf.convert_to_tensor(self.permutationFunction(parts[0], new_parts[0]), dtype=tf.float32)
one_prom = tf.einsum(self.einsum_expr, *([a_tensor] * k + [tensors[0]]))
# for every neuron i;
# create promotion chi matrix for every neuron/node in i's receptive field
chis = [{tensor_child_index.numpy(): tf.convert_to_tensor(
self.permutationFunction(parts[tensor_child_index], new_parts[i]), dtype=tf.float32)
for tensor_child_index in receptive_fields[i]}
for i in range(num_neurons)]
# for every neuron i;
# promote every activation of nodes in i's receptive field
# IMPORTANT:
# (probably) This is where tf functions should start to be used because new structures are formed based on previous ones
# and these new structures will ultimately 'transform' and mix with W to create activations
promotions = [
[tf.einsum(self.einsum_expr, *([chis[i][tensor_child_index.numpy()]] * k + [tensors[tensor_child_index]]))
for tensor_child_index in receptive_fields[i]]
for i in range(num_neurons)]
# print(promotions)
return [promotions, adjM, new_parts]
|
the-stack_106_27176 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import os
import setuptools # type: ignore
version = "1.0.0"
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
setuptools.setup(
name="google-cloud-compute",
version=version,
long_description=readme,
author="Google LLC",
author_email="[email protected]",
license="Apache 2.0",
url="https://github.com/googleapis/python-compute",
packages=[
package
for package in setuptools.PEP420PackageFinder.find()
if package.startswith("google")
],
namespace_packages=("google", "google.cloud"),
platforms="Posix; MacOS X; Windows",
include_package_data=True,
install_requires=(
"google-api-core[grpc] >= 2.2.0, <3.0.0dev",
"proto-plus >= 1.19.7",
"dataclasses >= 0.6; python_version < '3.7'",
),
python_requires=">=3.6",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
],
zip_safe=False,
)
|
the-stack_106_27179 | def double_sort(lst):
"""this sorting algorithm sorts an array using the principle of bubble sort,
but does it both from left to right and right to left,
hence i decided to call it "double sort"
:param collection: mutable ordered sequence of elements
:return: the same collection in ascending order
Examples:
>>> double_sort([-1 ,-2 ,-3 ,-4 ,-5 ,-6 ,-7])
[-7, -6, -5, -4, -3, -2, -1]
>>> double_sort([])
[]
>>> double_sort([-1 ,-2 ,-3 ,-4 ,-5 ,-6])
[-6, -5, -4, -3, -2, -1]
>>> double_sort([-3, 10, 16, -42, 29]) == sorted([-3, 10, 16, -42, 29])
True
"""
no_of_elements = len(lst)
for i in range(
0, int(((no_of_elements - 1) / 2) + 1)
): # we don't need to traverse to end of list as
for j in range(0, no_of_elements - 1):
if (
lst[j + 1] < lst[j]
): # applying bubble sort algorithm from left to right (or forwards)
temp = lst[j + 1]
lst[j + 1] = lst[j]
lst[j] = temp
if (
lst[no_of_elements - 1 - j] < lst[no_of_elements - 2 - j]
): # applying bubble sort algorithm from right to left (or backwards)
temp = lst[no_of_elements - 1 - j]
lst[no_of_elements - 1 - j] = lst[no_of_elements - 2 - j]
lst[no_of_elements - 2 - j] = temp
return lst
if __name__ == "__main__":
print("enter the list to be sorted")
lst = [int(x) for x in input().split()] # inputing elements of the list in one line
sorted_lst = double_sort(lst)
print("the sorted list is")
print(sorted_lst)
|
the-stack_106_27180 | #encoding=utf8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import collections
from argparse import ArgumentParser
import logging
from pathlib import Path
from functools import reduce, partial
from operator import getitem
from datetime import datetime
import paddle.distributed as dist
from logger import setup_logging
from utils import read_json, write_json
class ConfigParser:
def __init__(self, config, resume=None, modification=None, run_id=None):
"""
class to parse configuration json file. Handles hyperparameters for training, initializations of modules, checkpoint saving
and logging module.
:param config: Dict containing configurations, hyperparameters for training. contents of `config.json` file for example.
:param resume: String, path to the checkpoint being loaded.
:param modification: Dict keychain:value, specifying position values to be replaced from config dict.
:param run_id: Unique Identifier for training processes. Used to save checkpoints and training log. Timestamp is being used as default
"""
# load config file and apply modification
self._config = _update_config(config, modification)
self.resume = resume
# str to bool, from modification or from default json file
self.update_config('distributed', (self.config['distributed'] == 'true') or self.config['distributed'] is True)
self.update_config('finetune', (self.config['finetune'] == 'true') or self.config['finetune'] is True)
if (self.config['local_rank'] == 0 and self.config['distributed']) \
or (not self.config['distributed']): # only local master process create saved output dir
# set save_dir where trained model and log will be saved.
save_dir = Path(self.config['trainer']['save_dir'])
log_dir = Path(self.config['trainer']['log_dir'])
exper_name = self.config['name']
if run_id is None: # use timestamp as default run-id
run_id = datetime.now().strftime(r'%m%d_%H%M%S')
else:
run_id = run_id + '_' + datetime.now().strftime(r'%m%d_%H%M%S')
self._save_dir = save_dir / 'models' / exper_name / run_id
if self.config['distributed']:
self._log_dir = log_dir
else:
self._log_dir = save_dir / 'log' / exper_name / run_id
# make directory for saving checkpoints and log.
exist_ok = run_id == ''
self.save_dir.mkdir(parents=False, exist_ok=True)
self.log_dir.mkdir(parents=False, exist_ok=True)
# save updated config file to the checkpoint dir, only local master save file
write_json(self.config, self.save_dir / 'config.json')
# configure logging module, only local master setup logging
setup_logging(self.log_dir)
self.log_levels = {
0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG
}
@classmethod
def from_args(cls, args: ArgumentParser, options: collections.namedtuple = ''):
"""
Initialize this class from some cli arguments. Used in train, test.
"""
for opt in options:
args.add_argument(*opt.flags, default=opt.default, type=opt.type, help=opt.help)
if not isinstance(args, tuple):
args = args.parse_args()
if args.device is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
if args.resume is not None:
resume = Path(args.resume)
if args.config is None:
config_file_path = resume.parent / 'config.json'
else:
config_file_path = args.config
else:
msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example."
assert args.config is not None, msg_no_cfg
resume = None
config_file_path = Path(args.config)
config = read_json(config_file_path)
if args.config and resume and args.finetune == 'false':
# update new config for resume (continue train), finetune mode will don not use previous config
config.update(read_json(args.config))
try:
if args.distributed is not None:
config['distributed'] = (args.distributed == 'true')
if not config['distributed']: # change to one gpu or cpu mode if not distributed setting.
config['local_world_size'] = 1
if args.finetune is not None:
config['finetune'] = (args.finetune == 'true')
except Exception:
pass
# parse custom cli options into dictionary
modification = {opt.target: getattr(args, _get_opt_name(opt.flags)) for opt in options}
return cls(config, resume, modification, config['run_id'])
def init_obj(self, name, module, *args, **kwargs):
"""
Finds a function handle with the name given as 'type' in config, and returns the
instance initialized with corresponding arguments given.
`object = config.init_obj('name', module, a, b=1)`
is equivalent to
`object = module.name(a, b=1)`
"""
module_name = self[name]['type']
module_args = dict(self[name]['args'])
# assert all([k not in module_args for k in kwargs]), 'Overwriting kwargs given in config file is not allowed'
module_args.update(kwargs)
return getattr(module, module_name)(*args, **module_args)
def init_ftn(self, name, module, *args, **kwargs):
"""
Finds a function handle with the name given as 'type' in config, and returns the
function with given arguments fixed with functools.partial.
`function = config.init_ftn('name', module, a, b=1)`
is equivalent to
`function = lambda *args, **kwargs: module.name(a, *args, b=1, **kwargs)`.
"""
module_name = self[name]['type']
module_args = dict(self[name]['args'])
assert all([k not in module_args for k in kwargs]), 'Overwriting kwargs given in config file is not allowed'
module_args.update(kwargs)
return partial(getattr(module, module_name), *args, **module_args)
def __getitem__(self, name):
"""Access items like ordinary dict."""
return self.config[name]
def update_config(self, key, value):
"""Set config value ike ordinary dict. """
self.config[key] = value
def get_logger(self, name, verbosity=2):
msg_verbosity = 'verbosity option {} is invalid. Valid options are {}.'.format(verbosity,
self.log_levels.keys())
assert verbosity in self.log_levels, msg_verbosity
logger = logging.getLogger(name)
logger.setLevel(self.log_levels[verbosity])
return logger
# setting read-only attributes
@property
def config(self):
return self._config
@property
def save_dir(self):
return self._save_dir
@property
def log_dir(self):
return self._log_dir
# @property
# def log_levels(self):
# return self._log_levels
# helper functions to update config dict with custom cli options
def _update_config(config, modification):
if modification is None:
return config
for k, v in modification.items():
if v is not None:
_set_by_path(config, k, v)
return config
def _get_opt_name(flags):
for flg in flags:
if flg.startswith('--'):
return flg.replace('--', '')
return flags[0].replace('--', '')
def _set_by_path(tree, keys, value):
"""Set a value in a nested object in tree by sequence of keys."""
keys = keys.split(';')
_get_by_path(tree, keys[:-1])[keys[-1]] = value
def _get_by_path(tree, keys):
"""Access a nested object in tree by sequence of keys."""
return reduce(getitem, keys, tree)
|
the-stack_106_27183 | import numpy as np
from scipy.linalg import orth
from sklearn.preprocessing import normalize
def gen_union_of_subspaces(ambient_dim, subspace_dim, num_subspaces, num_points_per_subspace, noise_level=0.0):
"""This funtion generates a union of subspaces under random model, i.e.,
subspaces are independently and uniformly distributed in the ambient space,
data points are independently and uniformly distributed on the unit sphere of each subspace
Parameters
-----------
ambient_dim : int
Dimention of the ambient space
subspace_dim : int
Dimension of each subspace (all subspaces have the same dimension)
num_subspaces : int
Number of subspaces to be generated
num_points_per_subspace : int
Number of data points from each of the subspaces
noise_level : float
Amount of Gaussian noise on data
Returns
-------
data : shape (num_subspaces * num_points_per_subspace) by ambient_dim
Data matrix containing points drawn from a union of subspaces as its rows
label : shape (num_subspaces * num_points_per_subspace)
Membership of each data point to the subspace it lies in
"""
data = np.empty((num_points_per_subspace* num_subspaces, ambient_dim))
label = np.empty(num_points_per_subspace * num_subspaces, dtype=int)
for i in range(num_subspaces):
basis = np.random.normal(size=(ambient_dim, subspace_dim))
basis = orth(basis)
coeff = np.random.normal(size=(subspace_dim, num_points_per_subspace))
coeff = normalize(coeff, norm='l2', axis=0, copy=False)
data_per_subspace = np.matmul(basis, coeff).T
base_index = i*num_points_per_subspace
data[(0+base_index):(num_points_per_subspace+base_index), :] = data_per_subspace
label[0+base_index:num_points_per_subspace+base_index,] = i
data += np.random.normal(size=(num_points_per_subspace * num_subspaces, ambient_dim)) * noise_level
return data, label
|
the-stack_106_27184 | """Depth Measures Module.
This module includes different methods to order functional data,
from the center (larger values) outwards(smaller ones)."""
import itertools
import scipy.integrate
import numpy as np
from . import multivariate
from .multivariate import Depth
__author__ = "Amanda Hernando Bernabé"
__email__ = "[email protected]"
class IntegratedDepth(Depth):
r"""
Functional depth as the integral of a multivariate depth.
Args:
multivariate_depth (Depth): Multivariate depth to integrate.
By default it is the one used by Fraiman and Muniz, that is,
.. math::
D(x) = 1 - \left\lvert \frac{1}{2}- F(x)\right\rvert
Examples:
>>> import skfda
>>>
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = skfda.exploratory.depth.IntegratedDepth()
>>> depth(fd)
array([ 0.5 , 0.75 , 0.925, 0.875])
References:
Fraiman, R., & Muniz, G. (2001). Trimmed means for functional
data. Test, 10(2), 419–440. https://doi.org/10.1007/BF02595706
"""
def __init__(self, *,
multivariate_depth=multivariate._UnivariateFraimanMuniz()):
self.multivariate_depth = multivariate_depth
def fit(self, X, y=None):
self._domain_range = X.domain_range
self._grid_points = X.grid_points
self.multivariate_depth.fit(X.data_matrix)
return self
def predict(self, X):
pointwise_depth = self.multivariate_depth.predict(X.data_matrix)
interval_len = (self._domain_range[0][1]
- self._domain_range[0][0])
integrand = pointwise_depth
for d, s in zip(X.domain_range, X.grid_points):
integrand = scipy.integrate.simps(integrand,
x=s,
axis=1)
interval_len = d[1] - d[0]
integrand /= interval_len
return integrand
@property
def max(self):
return self.multivariate_depth.max
@property
def min(self):
return self.multivariate_depth.min
class ModifiedBandDepth(IntegratedDepth):
r"""
Implementation of Modified Band Depth for functional data.
The band depth of each sample is obtained by computing the fraction of time
its graph is contained in the bands determined by two sample curves.
In the case the fdatagrid :term:`domain` dimension is 2, instead of curves,
surfaces determine the bands. In larger dimensions, the hyperplanes
determine the bands.
Examples:
>>> import skfda
>>>
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = skfda.exploratory.depth.ModifiedBandDepth()
>>> values = depth(fd)
>>> values.round(2)
array([ 0.5 , 0.83, 0.73, 0.67])
References:
López-Pintado, S., & Romo, J. (2009). On the Concept of
Depth for Functional Data. Journal of the American Statistical
Association, 104(486), 718–734.
https://doi.org/10.1198/jasa.2009.0108
"""
def __init__(self):
super().__init__(multivariate_depth=multivariate.SimplicialDepth())
class BandDepth(Depth):
r"""
Implementation of Band Depth for functional data.
The band depth of each sample is obtained by computing the fraction of the
bands determined by two sample curves containing the whole graph of the
first one. In the case the fdatagrid :term:`domain` dimension is 2, instead
of curves, surfaces determine the bands. In larger dimensions, the
hyperplanes determine the bands.
Examples:
>>> import skfda
>>>
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = skfda.exploratory.depth.BandDepth()
>>> depth(fd)
array([ 0.5 , 0.83333333, 0.5 , 0.5 ])
References:
López-Pintado, S., & Romo, J. (2009). On the Concept of
Depth for Functional Data. Journal of the American Statistical
Association, 104(486), 718–734.
https://doi.org/10.1198/jasa.2009.0108
"""
def fit(self, X, y=None):
if X.dim_codomain != 1:
raise NotImplementedError("Band depth not implemented for vector "
"valued functions")
self._distribution = X
return self
def predict(self, X):
num_in = 0
n_total = 0
for f1, f2 in itertools.combinations(self._distribution, 2):
between_range_1 = (f1.data_matrix <= X.data_matrix) & (
X.data_matrix <= f2.data_matrix)
between_range_2 = (f2.data_matrix <= X.data_matrix) & (
X.data_matrix <= f1.data_matrix)
between_range = between_range_1 | between_range_2
num_in += np.all(between_range,
axis=tuple(range(1, X.data_matrix.ndim)))
n_total += 1
return num_in / n_total
|
the-stack_106_27186 | # -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2007-2019 NV Access Limited, Peter Vágner, Renaud Paquay, Babbage B.V.
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
import os
import queue
from ctypes import (
c_short,
c_long,
c_int,
c_int64,
c_bool,
c_float,
c_char,
c_wchar,
c_wchar_p,
c_void_p,
Structure,
POINTER,
byref,
cdll,
windll,
CFUNCTYPE,
WinError,
create_string_buffer,
create_unicode_buffer
)
from ctypes.wintypes import BOOL, HWND, WCHAR
import time
import queueHandler
from logHandler import log
import winUser
import api
import eventHandler
import controlTypes
import NVDAObjects.JAB
import core
import textUtils
import NVDAHelper
import config
import globalVars
#: The path to the user's .accessibility.properties file, used
#: to enable JAB.
A11Y_PROPS_PATH = os.path.expanduser(r"~\.accessibility.properties")
#: The content of ".accessibility.properties" when JAB is enabled.
A11Y_PROPS_CONTENT = (
"assistive_technologies=com.sun.java.accessibility.AccessBridge\n"
"screen_magnifier_present=true\n"
)
#Some utility functions to help with function defines
def _errcheck(res, func, args):
if not res:
raise RuntimeError("Result %s" % res)
return res
def _fixBridgeFunc(restype,name,*argtypes,**kwargs):
try:
func=getattr(bridgeDll,name)
except AttributeError:
log.warning("%s not found in Java Access Bridge dll"%name)
return
func.restype=restype
func.argtypes=argtypes
if kwargs.get('errcheck'):
func.errcheck=_errcheck
bridgeDll = None
#Definitions of access bridge types, structs and prototypes
jchar=c_wchar
jint=c_int
jfloat=c_float
jboolean=c_bool
class JOBJECT64(c_int64):
pass
AccessibleTable=JOBJECT64
MAX_STRING_SIZE=1024
SHORT_STRING_SIZE=256
class AccessBridgeVersionInfo(Structure):
_fields_=[
('VMVersion',WCHAR*SHORT_STRING_SIZE),
('bridgeJavaClassVersion',WCHAR*SHORT_STRING_SIZE),
('bridgeJavaDLLVersion',WCHAR*SHORT_STRING_SIZE),
('bridgeWinDLLVersion',WCHAR*SHORT_STRING_SIZE),
]
class AccessibleContextInfo(Structure):
_fields_=[
('name',WCHAR*MAX_STRING_SIZE),
('description',WCHAR*MAX_STRING_SIZE),
('role',WCHAR*SHORT_STRING_SIZE),
('role_en_US',WCHAR*SHORT_STRING_SIZE),
('states',WCHAR*SHORT_STRING_SIZE),
('states_en_US',WCHAR*SHORT_STRING_SIZE),
('indexInParent',jint),
('childrenCount',jint),
('x',jint),
('y',jint),
('width',jint),
('height',jint),
('accessibleComponent',BOOL),
('accessibleAction',BOOL),
('accessibleSelection',BOOL),
('accessibleText',BOOL),
('accessibleValue',BOOL),
]
class AccessibleTextInfo(Structure):
_fields_=[
('charCount',jint),
('caretIndex',jint),
('indexAtPoint',jint),
]
class AccessibleTextItemsInfo(Structure):
_fields_=[
('letter',WCHAR),
('word',WCHAR*SHORT_STRING_SIZE),
('sentence',WCHAR*MAX_STRING_SIZE),
]
class AccessibleTextSelectionInfo(Structure):
_fields_=[
('selectionStartIndex',jint),
('selectionEndIndex',jint),
('selectedText',WCHAR*MAX_STRING_SIZE),
]
class AccessibleTextRectInfo(Structure):
_fields_=[
('x',jint),
('y',jint),
('width',jint),
('height',jint),
]
class AccessibleTextAttributesInfo(Structure):
_fields_=[
('bold',BOOL),
('italic',BOOL),
('underline',BOOL),
('strikethrough',BOOL),
('superscript',BOOL),
('subscript',BOOL),
('backgroundColor',WCHAR*SHORT_STRING_SIZE),
('foregroundColor',WCHAR*SHORT_STRING_SIZE),
('fontFamily',WCHAR*SHORT_STRING_SIZE),
('fontSize',jint),
('alignment',jint),
('bidiLevel',jint),
('firstLineIndent',jfloat),
('LeftIndent',jfloat),
('rightIndent',jfloat),
('lineSpacing',jfloat),
('spaceAbove',jfloat),
('spaceBelow',jfloat),
('fullAttributesString',WCHAR*MAX_STRING_SIZE),
]
MAX_RELATION_TARGETS = 25
MAX_RELATIONS = 5
class AccessibleRelationInfo(Structure):
_fields_ = [
("key", WCHAR * SHORT_STRING_SIZE),
("targetCount", jint),
("targets", JOBJECT64 * MAX_RELATION_TARGETS),
]
class AccessibleRelationSetInfo(Structure):
_fields_ = [
("relationCount", jint),
("relations", AccessibleRelationInfo * MAX_RELATIONS),
]
MAX_ACTION_INFO = 256
MAX_ACTIONS_TO_DO = 32
class AccessibleActionInfo(Structure):
_fields_ = (
("name", c_wchar * SHORT_STRING_SIZE),
)
class AccessibleActions(Structure):
_fields_ = (
("actionsCount", jint),
("actionInfo", AccessibleActionInfo * MAX_ACTION_INFO),
)
class AccessibleActionsToDo(Structure):
_fields_ = (
("actionsCount", jint),
("actions", AccessibleActionInfo * MAX_ACTIONS_TO_DO),
)
class AccessibleTableInfo(Structure):
_fields_=[
('caption',JOBJECT64),
('summary',JOBJECT64),
('rowCount',jint),
('columnCount',jint),
('accessibleContext',JOBJECT64),
('accessibleTable',JOBJECT64),
]
class AccessibleTableCellInfo(Structure):
_fields_=[
('accessibleContext',JOBJECT64),
('index',jint),
('row',jint),
('column',jint),
('rowExtent',jint),
('columnExtent',jint),
('isSelected',jboolean),
]
MAX_KEY_BINDINGS=50
ACCESSIBLE_SHIFT_KEYSTROKE=1
ACCESSIBLE_CONTROL_KEYSTROKE=2
ACCESSIBLE_META_KEYSTROKE=4
ACCESSIBLE_ALT_KEYSTROKE=8
ACCESSIBLE_ALT_GRAPH_KEYSTROKE=16
ACCESSIBLE_BUTTON1_KEYSTROKE=32
ACCESSIBLE_BUTTON2_KEYSTROKE=64
ACCESSIBLE_BUTTON3_KEYSTROKE=128
class AccessibleKeyBindingInfo(Structure):
_fields_=[
('character',jchar),
('modifiers',jint),
]
class AccessibleKeyBindings(Structure):
_fields_=[
('keyBindingsCount',c_int),
('keyBindingInfo',AccessibleKeyBindingInfo*MAX_KEY_BINDINGS),
]
AccessBridge_FocusGainedFP=CFUNCTYPE(None,c_long,JOBJECT64,JOBJECT64)
AccessBridge_PropertyNameChangeFP=CFUNCTYPE(None,c_long,JOBJECT64,JOBJECT64,c_wchar_p,c_wchar_p)
AccessBridge_PropertyDescriptionChangeFP=CFUNCTYPE(None,c_long,JOBJECT64,JOBJECT64,c_wchar_p,c_wchar_p)
AccessBridge_PropertyValueChangeFP=CFUNCTYPE(None,c_long,JOBJECT64,JOBJECT64,c_wchar_p,c_wchar_p)
AccessBridge_PropertyStateChangeFP=CFUNCTYPE(None,c_long,JOBJECT64,JOBJECT64,c_wchar_p,c_wchar_p)
AccessBridge_PropertyCaretChangeFP=CFUNCTYPE(None,c_long,JOBJECT64,JOBJECT64,c_int,c_int)
AccessBridge_PropertyActiveDescendentChangeFP=CFUNCTYPE(None,c_long,JOBJECT64,JOBJECT64,JOBJECT64,JOBJECT64)
def _fixBridgeFuncs():
"""Appropriately set the return and argument types of all the access bridge dll functions
"""
_fixBridgeFunc(None,'Windows_run')
_fixBridgeFunc(None,'setFocusGainedFP',c_void_p)
_fixBridgeFunc(None,'setPropertyNameChangeFP',c_void_p)
_fixBridgeFunc(None,'setPropertyDescriptionChangeFP',c_void_p)
_fixBridgeFunc(None,'setPropertyValueChangeFP',c_void_p)
_fixBridgeFunc(None,'setPropertyStateChangeFP',c_void_p)
_fixBridgeFunc(None,'setPropertyCaretChangeFP',c_void_p)
_fixBridgeFunc(None,'setPropertyActiveDescendentChangeFP',c_void_p)
_fixBridgeFunc(None,'releaseJavaObject',c_long,JOBJECT64)
_fixBridgeFunc(BOOL,'getVersionInfo',POINTER(AccessBridgeVersionInfo),errcheck=True)
_fixBridgeFunc(BOOL,'isJavaWindow',HWND)
_fixBridgeFunc(BOOL,'isSameObject',c_long,JOBJECT64,JOBJECT64)
_fixBridgeFunc(BOOL,'getAccessibleContextFromHWND',HWND,POINTER(c_long),POINTER(JOBJECT64),errcheck=True)
_fixBridgeFunc(HWND,'getHWNDFromAccessibleContext',c_long,JOBJECT64,errcheck=True)
_fixBridgeFunc(BOOL,'getAccessibleContextAt',c_long,JOBJECT64,jint,jint,POINTER(JOBJECT64),errcheck=True)
_fixBridgeFunc(BOOL,'getAccessibleContextWithFocus',HWND,POINTER(c_long),POINTER(JOBJECT64),errcheck=True)
_fixBridgeFunc(BOOL,'getAccessibleContextInfo',c_long,JOBJECT64,POINTER(AccessibleContextInfo),errcheck=True)
_fixBridgeFunc(JOBJECT64,'getAccessibleChildFromContext',c_long,JOBJECT64,jint,errcheck=True)
_fixBridgeFunc(JOBJECT64,'getAccessibleParentFromContext',c_long,JOBJECT64)
_fixBridgeFunc(JOBJECT64,'getParentWithRole',c_long,JOBJECT64,POINTER(c_wchar))
_fixBridgeFunc(BOOL,'getAccessibleRelationSet',c_long,JOBJECT64,POINTER(AccessibleRelationSetInfo),errcheck=True)
_fixBridgeFunc(BOOL,'getAccessibleTextInfo',c_long,JOBJECT64,POINTER(AccessibleTextInfo),jint,jint,errcheck=True)
_fixBridgeFunc(BOOL,'getAccessibleTextItems',c_long,JOBJECT64,POINTER(AccessibleTextItemsInfo),jint,errcheck=True)
_fixBridgeFunc(BOOL,'getAccessibleTextSelectionInfo',c_long,JOBJECT64,POINTER(AccessibleTextSelectionInfo),errcheck=True)
_fixBridgeFunc(BOOL,'getAccessibleTextAttributes',c_long,JOBJECT64,jint,POINTER(AccessibleTextAttributesInfo),errcheck=True)
_fixBridgeFunc(
BOOL,
'getAccessibleTextRect',
c_long,
JOBJECT64,
POINTER(AccessibleTextRectInfo),
jint,
errcheck=True
)
_fixBridgeFunc(BOOL,'getAccessibleTextLineBounds',c_long,JOBJECT64,jint,POINTER(jint),POINTER(jint),errcheck=True)
_fixBridgeFunc(BOOL,'getAccessibleTextRange',c_long,JOBJECT64,jint,jint,POINTER(c_char),c_short,errcheck=True)
_fixBridgeFunc(BOOL,'getCurrentAccessibleValueFromContext',c_long,JOBJECT64,POINTER(c_wchar),c_short,errcheck=True)
_fixBridgeFunc(BOOL,'selectTextRange',c_long,JOBJECT64,c_int,c_int,errcheck=True)
_fixBridgeFunc(BOOL,'getTextAttributesInRange',c_long,JOBJECT64,c_int,c_int,POINTER(AccessibleTextAttributesInfo),POINTER(c_short),errcheck=True)
_fixBridgeFunc(JOBJECT64,'getTopLevelObject',c_long,JOBJECT64,errcheck=True)
_fixBridgeFunc(c_int,'getObjectDepth',c_long,JOBJECT64)
_fixBridgeFunc(JOBJECT64,'getActiveDescendent',c_long,JOBJECT64)
_fixBridgeFunc(BOOL,'requestFocus',c_long,JOBJECT64,errcheck=True)
_fixBridgeFunc(BOOL,'setCaretPosition',c_long,JOBJECT64,c_int,errcheck=True)
_fixBridgeFunc(BOOL,'getCaretLocation',c_long,JOBJECT64,POINTER(AccessibleTextRectInfo),jint,errcheck=True)
_fixBridgeFunc(BOOL,'getAccessibleActions',c_long,JOBJECT64,POINTER(AccessibleActions),errcheck=True)
_fixBridgeFunc(BOOL,'doAccessibleActions',c_long,JOBJECT64,POINTER(AccessibleActionsToDo),POINTER(jint),errcheck=True)
_fixBridgeFunc(BOOL,'getAccessibleTableInfo',c_long,JOBJECT64,POINTER(AccessibleTableInfo))
_fixBridgeFunc(BOOL,'getAccessibleTableCellInfo',c_long,AccessibleTable,jint,jint,POINTER(AccessibleTableCellInfo),errcheck=True)
_fixBridgeFunc(BOOL,'getAccessibleTableRowHeader',c_long,JOBJECT64,POINTER(AccessibleTableInfo))
_fixBridgeFunc(BOOL,'getAccessibleTableColumnHeader',c_long,JOBJECT64,POINTER(AccessibleTableInfo))
_fixBridgeFunc(JOBJECT64,'getAccessibleTableRowDescription',c_long,JOBJECT64,jint)
_fixBridgeFunc(JOBJECT64,'getAccessibleTableColumnDescription',c_long,JOBJECT64,jint)
_fixBridgeFunc(jint,'getAccessibleTableRow',c_long,AccessibleTable,jint)
_fixBridgeFunc(jint,'getAccessibleTableColumn',c_long,AccessibleTable,jint)
_fixBridgeFunc(jint,'getAccessibleTableIndex',c_long,AccessibleTable,jint,jint)
_fixBridgeFunc(BOOL,'getAccessibleKeyBindings',c_long,JOBJECT64,POINTER(AccessibleKeyBindings),errcheck=True)
#NVDA specific code
isRunning=False
# Cache of the last active window handle for a given JVM ID. In theory, this
# cache should not be needed, as it should always be possible to retrieve the
# window handle of a given accessible context by calling getTopLevelObject then
# getHWNDFromAccessibleContext. However, getTopLevelObject sometimes returns
# accessible contexts that make getHWNDFromAccessibleContext fail. To workaround
# the issue, we use this cache as a fallback when either getTopLevelObject or
# getHWNDFromAccessibleContext fails.
vmIDsToWindowHandles={}
internalFunctionQueue=queue.Queue(1000)
internalFunctionQueue.__name__="JABHandler.internalFunctionQueue"
def internalQueueFunction(func,*args,**kwargs):
internalFunctionQueue.put_nowait((func,args,kwargs))
core.requestPump()
def internal_getWindowHandleFromAccContext(vmID,accContext):
try:
topAC=bridgeDll.getTopLevelObject(vmID,accContext)
try:
return bridgeDll.getHWNDFromAccessibleContext(vmID,topAC)
finally:
bridgeDll.releaseJavaObject(vmID,topAC)
except:
return None
def getWindowHandleFromAccContext(vmID,accContext):
hwnd=internal_getWindowHandleFromAccContext(vmID,accContext)
if hwnd:
vmIDsToWindowHandles[vmID]=hwnd
return hwnd
else:
return vmIDsToWindowHandles.get(vmID)
class JABContext(object):
def __init__(self,hwnd=None,vmID=None,accContext=None):
if hwnd and not vmID:
vmID=c_long()
accContext=JOBJECT64()
bridgeDll.getAccessibleContextFromHWND(hwnd,byref(vmID),byref(accContext))
#Record this vm ID and window handle for later use with other objects
vmID=vmID.value
vmIDsToWindowHandles[vmID]=hwnd
elif vmID and not hwnd:
hwnd = getWindowHandleFromAccContext(vmID,accContext)
self.hwnd=hwnd
self.vmID=vmID
self.accContext=accContext
def __del__(self):
if isRunning:
try:
bridgeDll.releaseJavaObject(self.vmID,self.accContext)
except:
log.debugWarning("Error releasing java object",exc_info=True)
def __eq__(self,jabContext):
if self.vmID==jabContext.vmID and bridgeDll.isSameObject(self.vmID,self.accContext,jabContext.accContext):
return True
else:
return False
# As __eq__ was defined on this class, we must provide __hash__ to remain hashable.
# The default hash implementation is fine for our purposes.
def __hash__(self):
return super().__hash__()
def __ne__(self,jabContext):
if self.vmID!=jabContext.vmID or not bridgeDll.isSameObject(self.vmID,self.accContext,jabContext.accContext):
return True
else:
return False
def getVersionInfo(self):
info=AccessBridgeVersionInfo()
bridgeDll.getVersionInfo(self.vmID,byref(info))
return info
def getObjectDepth(self):
return bridgeDll.getObjectDepth(self.vmID,self.accContext)
def getAccessibleContextInfo(self):
info=AccessibleContextInfo()
bridgeDll.getAccessibleContextInfo(self.vmID,self.accContext,byref(info))
return info
def getAccessibleTextInfo(self,x,y):
textInfo=AccessibleTextInfo()
bridgeDll.getAccessibleTextInfo(self.vmID,self.accContext,byref(textInfo),x,y)
return textInfo
def getAccessibleTextItems(self,index):
textItemsInfo=AccessibleTextItemsInfo()
bridgeDll.getAccessibleTextItems(self.vmID,self.accContext,byref(textItemsInfo),index)
return textItemsInfo
def getAccessibleTextSelectionInfo(self):
textSelectionInfo=AccessibleTextSelectionInfo()
bridgeDll.getAccessibleTextSelectionInfo(self.vmID,self.accContext,byref(textSelectionInfo))
return textSelectionInfo
def getAccessibleTextRange(self,start,end):
length=((end+1)-start)
if length<=0:
return u""
# Use a string buffer, as from an unicode buffer, we can't get the raw data.
buf = create_string_buffer((length +1) * 2)
bridgeDll.getAccessibleTextRange(self.vmID, self.accContext, start, end, buf, length)
return textUtils.getTextFromRawBytes(buf.raw, numChars=length, encoding=textUtils.WCHAR_ENCODING)
def getAccessibleTextLineBounds(self,index):
index=max(index,0)
log.debug("lineBounds: index %s"%index)
#Java returns end as the last character, not end as past the last character
startIndex=c_int()
endIndex=c_int()
bridgeDll.getAccessibleTextLineBounds(self.vmID,self.accContext,index,byref(startIndex),byref(endIndex))
start=startIndex.value
end=endIndex.value
log.debug("line bounds: start %s, end %s"%(start,end))
if end<start or start<0:
# Invalid or empty line.
return (0,-1)
ok=False
# OpenOffice sometimes returns offsets encompassing more than one line, so try to narrow them down.
# Try to retract the end offset.
while not ok:
bridgeDll.getAccessibleTextLineBounds(self.vmID,self.accContext,end,byref(startIndex),byref(endIndex))
tempStart=max(startIndex.value,0)
tempEnd=max(endIndex.value,0)
log.debug("line bounds: tempStart %s, tempEnd %s"%(tempStart,tempEnd))
if tempStart>(index+1):
# This line starts after the requested index, so set end to point at the line before.
end=tempStart-1
else:
ok=True
ok=False
# Try to retract the start.
while not ok:
bridgeDll.getAccessibleTextLineBounds(self.vmID,self.accContext,start,byref(startIndex),byref(endIndex))
tempStart=max(startIndex.value,0)
tempEnd=max(endIndex.value,0)
log.debug("line bounds: tempStart %s, tempEnd %s"%(tempStart,tempEnd))
if tempEnd<(index-1):
# This line ends before the requested index, so set start to point at the line after.
start=tempEnd+1
else:
ok=True
log.debug("line bounds: returning %s, %s"%(start,end))
return (start,end)
def getAccessibleParentFromContext(self):
accContext=bridgeDll.getAccessibleParentFromContext(self.vmID,self.accContext)
if accContext:
return self.__class__(self.hwnd,self.vmID,accContext)
else:
return None
def getAccessibleParentWithRole(self, role):
accContext=bridgeDll.getParentWithRole(self.vmID,self.accContext, role)
if accContext:
return self.__class__(self.hwnd,self.vmID,accContext)
else:
return None
def getAccessibleChildFromContext(self,index):
accContext=bridgeDll.getAccessibleChildFromContext(self.vmID,self.accContext,index)
if accContext:
return self.__class__(self.hwnd,self.vmID,accContext)
else:
return None
def getActiveDescendent(self):
accContext=bridgeDll.getActiveDescendent(self.vmID,self.accContext)
if accContext:
return self.__class__(self.hwnd,self.vmID,accContext)
else:
return None
def getAccessibleContextAt(self,x,y):
newAccContext=JOBJECT64()
res=bridgeDll.getAccessibleContextAt(self.vmID,self.accContext,x,y,byref(newAccContext))
if not res or not newAccContext:
return None
if not bridgeDll.isSameObject(self.vmID,newAccContext,self.accContext):
return self.__class__(self.hwnd,self.vmID,newAccContext)
elif newAccContext!=self.accContext:
bridgeDll.releaseJavaObject(self.vmID,newAccContext)
return None
def getCurrentAccessibleValueFromContext(self):
buf=create_unicode_buffer(SHORT_STRING_SIZE+1)
bridgeDll.getCurrentAccessibleValueFromContext(self.vmID,self.accContext,buf,SHORT_STRING_SIZE)
return buf.value
def selectTextRange(self,start,end):
bridgeDll.selectTextRange(start,end)
def setCaretPosition(self,offset):
bridgeDll.setCaretPosition(self.vmID,self.accContext,offset)
def getTextAttributesInRange(self, startIndex, endIndex):
attributes = AccessibleTextAttributesInfo()
length = c_short()
bridgeDll.getTextAttributesInRange(self.vmID, self.accContext, startIndex, endIndex, byref(attributes), byref(length))
return attributes, length.value
def getAccessibleTextRect(self, index):
rect = AccessibleTextRectInfo()
bridgeDll.getAccessibleTextRect(self.vmID, self.accContext, byref(rect), index)
return rect
def getAccessibleRelationSet(self):
relations = AccessibleRelationSetInfo()
bridgeDll.getAccessibleRelationSet(self.vmID, self.accContext, byref(relations))
return relations
def getAccessibleTableInfo(self):
info=AccessibleTableInfo()
if bridgeDll.getAccessibleTableInfo(self.vmID,self.accContext,byref(info)):
# #6992: Querying the hwnd for table related objects can cause the app to crash.
# A table is almost certainly contained within a single hwnd,
# so just pass the hwnd for the querying object.
info.jabCaption=JABContext(hwnd=self.hwnd,vmID=self.vmID,accContext=info.caption) if info.caption else None
info.jabSummary=JABContext(hwnd=self.hwnd,vmID=self.vmID,accContext=info.summary) if info.summary else None
info.jabContext=JABContext(hwnd=self.hwnd,vmID=self.vmID,accContext=info.accessibleContext) if info.accessibleContext else None
info.jabTable=JABContext(hwnd=self.hwnd,vmID=self.vmID,accContext=info.accessibleTable) if info.accessibleTable else None
return info
def getAccessibleTableCellInfo(self,row,col):
info=AccessibleTableCellInfo()
if bridgeDll.getAccessibleTableCellInfo(self.vmID,self.accContext,row,col,byref(info)):
# #6992: Querying the hwnd for table related objects can cause the app to crash.
# A table is almost certainly contained within a single hwnd,
# so just pass the hwnd for the querying object.
info.jabContext=JABContext(hwnd=self.hwnd,vmID=self.vmID,accContext=info.accessibleContext) if info.accessibleContext else None
return info
def getAccessibleTableRow(self,index):
return bridgeDll.getAccessibleTableRow(self.vmID,self.accContext,index)
def getAccessibleTableColumn(self,index):
return bridgeDll.getAccessibleTableColumn(self.vmID,self.accContext,index)
def getAccessibleTableRowHeader(self):
info=AccessibleTableInfo()
if bridgeDll.getAccessibleTableRowHeader(self.vmID,self.accContext,byref(info)):
# #6992: Querying the hwnd for table related objects can cause the app to crash.
# A table is almost certainly contained within a single hwnd,
# so just pass the hwnd for the querying object.
info.jabCaption=JABContext(hwnd=self.hwnd,vmID=self.vmID,accContext=info.caption) if info.caption else None
info.jabSummary=JABContext(hwnd=self.hwnd,vmID=self.vmID,accContext=info.summary) if info.summary else None
info.jabContext=JABContext(hwnd=self.hwnd,vmID=self.vmID,accContext=info.accessibleContext) if info.accessibleContext else None
info.jabTable=JABContext(hwnd=self.hwnd,vmID=self.vmID,accContext=info.accessibleTable) if info.accessibleTable else None
return info
def getAccessibleTableRowDescription(self,row):
accContext=bridgeDll.getAccessibleTableRowDescription(self.vmID,self.accContext,row)
if accContext:
# #6992: Querying the hwnd for table related objects can cause the app to crash.
# A table is almost certainly contained within a single hwnd,
# so just pass the hwnd for the querying object.
return JABContext(hwnd=self.hwnd, vmID=self.vmID, accContext=accContext)
def getAccessibleTableColumnHeader(self):
info=AccessibleTableInfo()
if bridgeDll.getAccessibleTableColumnHeader(self.vmID,self.accContext,byref(info)):
# #6992: Querying the hwnd for table related objects can cause the app to crash.
# A table is almost certainly contained within a single hwnd,
# so just pass the hwnd for the querying object.
info.jabCaption=JABContext(hwnd=self.hwnd,vmID=self.vmID,accContext=info.caption) if info.caption else None
info.jabSummary=JABContext(hwnd=self.hwnd,vmID=self.vmID,accContext=info.summary) if info.summary else None
info.jabContext=JABContext(hwnd=self.hwnd,vmID=self.vmID,accContext=info.accessibleContext) if info.accessibleContext else None
info.jabTable=JABContext(hwnd=self.hwnd,vmID=self.vmID,accContext=info.accessibleTable) if info.accessibleTable else None
return info
def getAccessibleTableColumnDescription(self,column):
accContext=bridgeDll.getAccessibleTableColumnDescription(self.vmID,self.accContext,column)
if accContext:
# #6992: Querying the hwnd for table related objects can cause the app to crash.
# A table is almost certainly contained within a single hwnd,
# so just pass the hwnd for the querying object.
return JABContext(hwnd=self.hwnd, vmID=self.vmID, accContext=accContext)
def getAccessibleKeyBindings(self):
bindings=AccessibleKeyBindings()
if bridgeDll.getAccessibleKeyBindings(self.vmID,self.accContext,byref(bindings)):
return bindings
@AccessBridge_FocusGainedFP
def internal_event_focusGained(vmID, event,source):
hwnd=getWindowHandleFromAccContext(vmID,source)
internalQueueFunction(event_gainFocus,vmID,source,hwnd)
bridgeDll.releaseJavaObject(vmID,event)
def event_gainFocus(vmID,accContext,hwnd):
jabContext=JABContext(hwnd=hwnd,vmID=vmID,accContext=accContext)
if not winUser.isDescendantWindow(winUser.getForegroundWindow(),jabContext.hwnd):
return
focus=eventHandler.lastQueuedFocusObject
if (isinstance(focus,NVDAObjects.JAB.JAB) and focus.jabContext==jabContext):
return
obj=NVDAObjects.JAB.JAB(jabContext=jabContext)
if obj.role==controlTypes.ROLE_UNKNOWN:
return
eventHandler.queueEvent("gainFocus",obj)
@AccessBridge_PropertyActiveDescendentChangeFP
def internal_event_activeDescendantChange(vmID, event,source,oldDescendant,newDescendant):
hwnd=getWindowHandleFromAccContext(vmID,source)
internalQueueFunction(event_gainFocus,vmID,newDescendant,hwnd)
for accContext in [event,oldDescendant]:
bridgeDll.releaseJavaObject(vmID,accContext)
@AccessBridge_PropertyNameChangeFP
def event_nameChange(vmID,event,source,oldVal,newVal):
jabContext=JABContext(vmID=vmID,accContext=source)
focus=api.getFocusObject()
if isinstance(focus, NVDAObjects.JAB.JAB) and focus.jabContext == jabContext:
obj = focus
else:
obj = NVDAObjects.JAB.JAB(jabContext=jabContext)
if obj:
eventHandler.queueEvent("nameChange", obj)
bridgeDll.releaseJavaObject(vmID,event)
@AccessBridge_PropertyDescriptionChangeFP
def event_descriptionChange(vmID,event,source,oldVal,newVal):
jabContext=JABContext(vmID=vmID,accContext=source)
focus=api.getFocusObject()
if isinstance(focus, NVDAObjects.JAB.JAB) and focus.jabContext == jabContext:
obj = focus
else:
obj = NVDAObjects.JAB.JAB(jabContext=jabContext)
if obj:
eventHandler.queueEvent("descriptionChange", obj)
bridgeDll.releaseJavaObject(vmID,event)
@AccessBridge_PropertyValueChangeFP
def event_valueChange(vmID,event,source,oldVal,newVal):
jabContext=JABContext(vmID=vmID,accContext=source)
focus=api.getFocusObject()
if isinstance(focus, NVDAObjects.JAB.JAB) and focus.jabContext == jabContext:
obj = focus
else:
obj = NVDAObjects.JAB.JAB(jabContext=jabContext)
if obj:
eventHandler.queueEvent("valueChange", obj)
bridgeDll.releaseJavaObject(vmID,event)
@AccessBridge_PropertyStateChangeFP
def internal_event_stateChange(vmID,event,source,oldState,newState):
internalQueueFunction(event_stateChange,vmID,source,oldState,newState)
bridgeDll.releaseJavaObject(vmID,event)
def event_stateChange(vmID,accContext,oldState,newState):
jabContext=JABContext(vmID=vmID,accContext=accContext)
focus=api.getFocusObject()
#For broken tabs and menus, we need to watch for things being selected and pretend its a focus change
stateList=newState.split(',')
if "focused" in stateList or "selected" in stateList:
obj=NVDAObjects.JAB.JAB(jabContext=jabContext)
if not obj:
return
if focus!=obj and eventHandler.lastQueuedFocusObject!=obj and obj.role in (controlTypes.ROLE_MENUITEM,controlTypes.ROLE_TAB,controlTypes.ROLE_MENU):
eventHandler.queueEvent("gainFocus",obj)
return
if isinstance(focus,NVDAObjects.JAB.JAB) and focus.jabContext==jabContext:
obj=focus
else:
obj=NVDAObjects.JAB.JAB(jabContext=jabContext)
if not obj:
return
eventHandler.queueEvent("stateChange",obj)
@AccessBridge_PropertyCaretChangeFP
def internal_event_caretChange(vmID, event,source,oldPos,newPos):
hwnd=getWindowHandleFromAccContext(vmID,source)
if oldPos<0 and newPos>=0:
internalQueueFunction(event_gainFocus,vmID,source,hwnd)
else:
internalQueueFunction(event_caret,vmID,source,hwnd)
bridgeDll.releaseJavaObject(vmID,event)
def event_caret(vmID, accContext, hwnd):
jabContext = JABContext(hwnd=hwnd, vmID=vmID, accContext=accContext)
focus = api.getFocusObject()
if isinstance(focus, NVDAObjects.JAB.JAB) and focus.jabContext == jabContext:
obj = focus
else:
obj = NVDAObjects.JAB.JAB(jabContext=jabContext)
if not obj:
return
eventHandler.queueEvent("caret", obj)
def event_enterJavaWindow(hwnd):
internalQueueFunction(enterJavaWindow_helper,hwnd)
def enterJavaWindow_helper(hwnd):
vmID=c_long()
accContext=JOBJECT64()
timeout=time.time()+0.2
while time.time()<timeout and not eventHandler.isPendingEvents("gainFocus"):
try:
bridgeDll.getAccessibleContextWithFocus(hwnd,byref(vmID),byref(accContext))
except:
pass
if vmID and accContext:
break
time.sleep(0.01)
if not vmID or not accContext:
try:
bridgeDll.getAccessibleContextFromHWND(hwnd,byref(vmID),byref(accContext))
except:
return
vmID=vmID.value
vmIDsToWindowHandles[vmID]=hwnd
lastFocus=eventHandler.lastQueuedFocusObject
if isinstance(lastFocus,NVDAObjects.JAB.JAB) and lastFocus.windowHandle==hwnd:
return
event_gainFocus(vmID,accContext,hwnd)
def isJavaWindow(hwnd):
if not bridgeDll or not isRunning:
return False
return bridgeDll.isJavaWindow(hwnd)
def isBridgeEnabled():
try:
data = open(A11Y_PROPS_PATH, "rt").read()
except OSError:
return False
return data == A11Y_PROPS_CONTENT
def enableBridge():
try:
props = open(A11Y_PROPS_PATH, "wt")
props.write(A11Y_PROPS_CONTENT)
log.info("Enabled Java Access Bridge for user")
except OSError:
log.warning("Couldn't enable Java Access Bridge for user", exc_info=True)
def initialize():
global bridgeDll, isRunning
try:
bridgeDll = cdll.LoadLibrary(
os.path.join(NVDAHelper.versionedLibPath, "windowsaccessbridge-32.dll"))
except WindowsError:
raise NotImplementedError("dll not available")
_fixBridgeFuncs()
if (
not globalVars.appArgs.secure and config.isInstalledCopy()
and not isBridgeEnabled()
):
enableBridge()
# Accept wm_copydata and any wm_user messages from other processes even if running with higher privileges
if not windll.user32.ChangeWindowMessageFilter(winUser.WM_COPYDATA, 1):
raise WinError()
for msg in range(winUser.WM_USER + 1, 0xffff):
if not windll.user32.ChangeWindowMessageFilter(msg, 1):
raise WinError()
bridgeDll.Windows_run()
# Register java events
bridgeDll.setFocusGainedFP(internal_event_focusGained)
bridgeDll.setPropertyActiveDescendentChangeFP(internal_event_activeDescendantChange)
bridgeDll.setPropertyNameChangeFP(event_nameChange)
bridgeDll.setPropertyDescriptionChangeFP(event_descriptionChange)
bridgeDll.setPropertyValueChangeFP(event_valueChange)
bridgeDll.setPropertyStateChangeFP(internal_event_stateChange)
bridgeDll.setPropertyCaretChangeFP(internal_event_caretChange)
isRunning=True
def pumpAll():
if isRunning:
queueHandler.flushQueue(internalFunctionQueue)
def terminate():
global isRunning, bridgeDll
if not bridgeDll or not isRunning:
return
bridgeDll.setFocusGainedFP(None)
bridgeDll.setPropertyActiveDescendentChangeFP(None)
bridgeDll.setPropertyStateChangeFP(None)
bridgeDll.setPropertyCaretChangeFP(None)
h=bridgeDll._handle
bridgeDll=None
windll.kernel32.FreeLibrary(h)
isRunning=False
|
the-stack_106_27188 | from __future__ import unicode_literals
import os
import codecs
from subprocess import call
from SourceFile import SourceFile
class ReconstructionBlock:
def __init__(self, name, nextBlock=None):
self.name = name
self.nextBlock = nextBlock
os.makedirs("output/"+self.name)
def process(self, project):
print("[CON] Making dir: output/%s/%s" %
(self.name, project.projectId))
os.makedirs("output/%s/%s" % (self.name, project.projectId))
for f in project.files:
if len(f.name.split('/')) > 1:
theDir = "output/%s/%s/%s" % (self.name,
project.projectId, f.name[:f.name.rfind('/')])
try:
os.stat(theDir)
except:
os.makedirs(theDir)
print("[CON] making subdirs %s" % theDir)
with codecs.open("output/%s/%s/%s" % (self.name, project.projectId, f.name), "w+", encoding="utf-8") as output:
#print("[CON] call print compile output with: src: %s mastereventid: %s" % (str(f.sourceFileId), str(f.masterEventId)))
# gives searching rest as content (useless)
#call(["/tools/nccb/bin/print-compile-input", "/data/compile-inputs", str(f.sourceFileId), str(f.masterEventId)], stdout=output)
# slower but hopefully better
print("[CON] call print source state with: src: %s mastereventid: %s" % (str(f.sourceFileId), str(f.masterEventId)))
call(["/tools/nccb/bin/print-source-state", str(f.sourceFileId), str(f.masterEventId)], stdout=output)
if self.nextBlock is not None:
self.nextBlock.process(project)
|
the-stack_106_27191 | # Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import torchvision
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.datasets import STL10, ImageFolder
def build_custom_pipeline():
"""Builds augmentation pipelines for custom data.
If you want to do exoteric augmentations, you can just re-write this function.
Needs to return a dict with the same structure.
"""
pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
return pipeline
def prepare_transforms(dataset: str) -> Tuple[nn.Module, nn.Module]:
"""Prepares pre-defined train and test transformation pipelines for some datasets.
Args:
dataset (str): dataset name.
Returns:
Tuple[nn.Module, nn.Module]: training and validation transformation pipelines.
"""
cifar_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=32, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
}
stl_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=96, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize((96, 96)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
}
imagenet_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
custom_pipeline = build_custom_pipeline()
pipelines = {
"cifar10": cifar_pipeline,
"cifar100": cifar_pipeline,
"stl10": stl_pipeline,
"imagenet100": imagenet_pipeline,
"imagenet": imagenet_pipeline,
"custom": custom_pipeline,
}
assert dataset in pipelines
pipeline = pipelines[dataset]
T_train = pipeline["T_train"]
T_val = pipeline["T_val"]
return T_train, T_val
def prepare_datasets(
dataset: str,
T_train: Callable,
T_val: Callable,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
) -> Tuple[Dataset, Dataset]:
"""Prepares train and val datasets.
Args:
dataset (str): dataset name.
T_train (Callable): pipeline of transformations for training dataset.
T_val (Callable): pipeline of transformations for validation dataset.
data_dir Optional[Union[str, Path]]: path where to download/locate the dataset.
train_dir Optional[Union[str, Path]]: subpath where the training data is located.
val_dir Optional[Union[str, Path]]: subpath where the validation data is located.
Returns:
Tuple[Dataset, Dataset]: training dataset and validation dataset.
"""
if data_dir is None:
sandbox_dir = Path(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
data_dir = sandbox_dir / "datasets"
else:
data_dir = Path(data_dir)
if train_dir is None:
train_dir = Path(f"{dataset}/train")
else:
train_dir = Path(train_dir)
if val_dir is None:
val_dir = Path(f"{dataset}/val")
else:
val_dir = Path(val_dir)
assert dataset in ["cifar10", "cifar100", "stl10", "imagenet", "imagenet100", "custom"]
if dataset in ["cifar10", "cifar100"]:
DatasetClass = vars(torchvision.datasets)[dataset.upper()]
train_dataset = DatasetClass(
data_dir / train_dir,
train=True,
download=True,
transform=T_train,
)
val_dataset = DatasetClass(
data_dir / val_dir,
train=False,
download=True,
transform=T_val,
)
elif dataset == "stl10":
train_dataset = STL10(
data_dir / train_dir,
split="train",
download=True,
transform=T_train,
)
val_dataset = STL10(
data_dir / val_dir,
split="test",
download=True,
transform=T_val,
)
elif dataset in ["imagenet", "imagenet100", "custom"]:
train_dir = data_dir / train_dir
val_dir = data_dir / val_dir
train_dataset = ImageFolder(train_dir, T_train)
val_dataset = ImageFolder(val_dir, T_val)
return train_dataset, val_dataset
def prepare_dataloaders(
train_dataset: Dataset, val_dataset: Dataset, batch_size: int = 64, num_workers: int = 4
) -> Tuple[DataLoader, DataLoader]:
"""Wraps a train and a validation dataset with a DataLoader.
Args:
train_dataset (Dataset): object containing training data.
val_dataset (Dataset): object containing validation data.
batch_size (int): batch size.
num_workers (int): number of parallel workers.
Returns:
Tuple[DataLoader, DataLoader]: training dataloader and validation dataloader.
"""
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=False,
)
return train_loader, val_loader
def prepare_data(
dataset: str,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
batch_size: int = 64,
num_workers: int = 4,
) -> Tuple[DataLoader, DataLoader]:
"""Prepares transformations, creates dataset objects and wraps them in dataloaders.
Args:
dataset (str): dataset name.
data_dir (Optional[Union[str, Path]], optional): path where to download/locate the dataset.
Defaults to None.
train_dir (Optional[Union[str, Path]], optional): subpath where the
training data is located. Defaults to None.
val_dir (Optional[Union[str, Path]], optional): subpath where the
validation data is located. Defaults to None.
batch_size (int, optional): batch size. Defaults to 64.
num_workers (int, optional): number of parallel workers. Defaults to 4.
Returns:
Tuple[DataLoader, DataLoader]: prepared training and validation dataloader;.
"""
T_train, T_val = prepare_transforms(dataset)
train_dataset, val_dataset = prepare_datasets(
dataset,
T_train,
T_val,
data_dir=data_dir,
train_dir=train_dir,
val_dir=val_dir,
)
train_loader, val_loader = prepare_dataloaders(
train_dataset,
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
)
return train_loader, val_loader
|
the-stack_106_27192 | """
Data schema, each record represents a file in a google bucket.
"""
from schemas.fastq_schema import FASTQ_SCHEMA
DATA = {
'public_methods': [],
'resource_methods': ['GET'],
'item_methods': ['GET'],
'allowed_roles': ['admin', 'user', 'uploader', 'system'],
'allowed_item_roles': ['admin', 'user', 'uploader', 'system'],
'datasource': {
'source': 'data',
'filter': {
'visibility': True
},
},
'schema': {
'data_format': {
"type": "string",
"required": True,
},
'file_name': {
'type': 'string',
'required': True,
},
'file_size': {
'type': 'integer',
'required': True
},
'sample_ids': {
'type': 'list',
'schema': {
'type': 'string',
'required': True
}
},
'number_of_samples': {
'type': 'integer',
'required': True
},
'trial': {
'type': 'objectid',
'required': True,
},
'trial_name': {
'type': 'string',
'required': True,
},
'gs_uri': {
'type': 'string',
'required': True,
},
'assay': {
'type': 'objectid',
'required': True,
},
'experimental_strategy': {
'type': 'string',
'required': True,
},
'date_created': {
'type': 'string',
'required': True,
},
'analysis_id': {
'type': 'objectid',
},
'mapping': {
'type': 'string',
'required': True,
},
'processed': {
'type': 'boolean'
},
'visibility': {
'type': 'boolean'
},
'uuid_alias': {
'type': 'string',
'required': True,
},
'children': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'_id': {
'type': 'objectid',
'required': True
},
'resource': {
'type': 'string',
'required': True
}
}
}
},
'fastq_properties': {
'type': 'dict',
'nullable': True,
'schema': FASTQ_SCHEMA
},
'download_link': {
'type': 'string',
'nullable': True
}
}
}
DATA_EDIT = {
"public_methods": [],
"allowed_roles": ["admin", "system"],
"allowed_item_roles": ["admin", "system"],
"resource_methods": ["POST"],
"item_methods": ["PATCH", "DELETE"],
"datasource": {
'source': 'data',
},
"schema": DATA["schema"]
}
DATA_TOGGLE_VIS = {
"public_methods": [],
"allowed_roles": ["admin", "user", "uploader", "system"],
"allowed_item_roles": ["admin", "user", "uploader", "system"],
"resource_methods": ["GET"],
"item_methods": ["PATCH"],
"datasource": {
"source": "data",
"projection": {
"visibility": 1
}
},
"schema": {
"visibility": {
"type": "boolean"
}
}
}
DATA_AGG_INPUTS = {
'allowed_roles': ["admin", "system"],
'allowed_item_roles': ["admin", "system"],
'datasource': {
'source': 'data',
'aggregation': {
'pipeline': [
{
"$match": {
"mapping": {
"$in": "$inputs"
},
"processed": False,
"visibility": True
}
},
{
"$group": {
"_id": {
"sample_ids": "$sample_ids",
"assay": "$assay",
"trial": "$trial",
"experimental_strategy": "$experimental_strategy",
"trial_name": "$trial_name"
},
"records": {
"$push": {
"file_name": "$file_name",
"gs_uri": "$gs_uri",
"mapping": "$mapping",
"data_format": "$data_format",
'_id': '$_id'
}
}
}
}
]
}
}
}
|
the-stack_106_27193 | import requests
from bs4 import BeautifulSoup
from re import search
from flask import Flask ,jsonify
app = Flask(__name__)
app.url_map.strict_slashes = False
def getNotesAndBooks(title):
url = f'https://doku.pub/search/{title}?sort=popular'
header = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'}
r = requests.get(url, headers=header)
soup = BeautifulSoup(r.content,features='lxml')
articals = soup.find_all('div', class_ ='col-lg-3 col-md-4 col-sm-6 col-6')
complete_json = []
for item in articals:
complete_json.append(
{
'name':item.find('h5', class_='card-title').text,
'uploadDate':item.find('small', class_='text-muted float-left' ).text,
'imgurl':item.find('img' )['src'],
'dawnloadurl':"https://doku.pub/download/"+item.find('a' )['href'].rsplit('/', 1)[1],
# 'documentOpenUrl':BeautifulSoup(requests.get(item.find('a')['href'], headers=header).content,features='lxml').find_all('iframe',id='viewer'),
'url':item.find('a' )['href'],
}
)
return complete_json
@app.route('/')
def home_page():
return "Welcome to https://doku.pub/ unofficial API"
@app.route('/<query>')
def home(query):
return jsonify(getNotesAndBooks(query))
if __name__ == "__main__":
app.debug = True
app.run(port=5000)
|
the-stack_106_27196 | """Test for tackle.utils.command"""
import pytest
from tackle.utils.command import unpack_args_kwargs_string, unpack_input_string
TEMPLATES = [
('foo bar baz', 3, 0, 0),
('foo --bar baz bing', 2, 1, 0),
('foo bar --baz foo', 2, 1, 0),
('foo bar --baz foo --bing baz', 2, 2, 0),
('foo --bar baz', 1, 1, 0),
('foo bar --baz', 2, 0, 1),
('foo bar baz --bing', 3, 0, 1),
('foo --bar baz --foo', 1, 1, 1),
('foo bar --foo bar --bing --baz bling', 2, 2, 1),
('foo --bar baz blah --bling', 2, 1, 1),
('this --if "expanded == \'that\'"', 1, 1, 0),
('"{{foo}}" bar baz', 4, 0, 0),
('{{ foo }} bar baz', 4, 0, 0),
('{{ foo }} bar baz bing', 5, 0, 0),
('{{ foo}} bar baz', 4, 0, 0),
('{{foo }} bar baz', 4, 0, 0),
('{{ foo }}-foo bar baz', 4, 0, 0),
('bar-{{ foo }}-foo', 2, 0, 0),
('bar-{{ foo in var }}-foo', 2, 0, 0),
]
@pytest.mark.parametrize("template,len_args,len_kwargs,len_flags", TEMPLATES)
def test_unpack_args_kwargs(template, len_args, len_kwargs, len_flags):
"""Validate the count of each input arg/kwarg/flag."""
args, kwargs, flags = unpack_input_string(template)
assert len_args == len(args)
assert len_kwargs == len(kwargs)
assert len_flags == len(flags)
FIXTURES = [
("this --if \"expanded == 'that'\"", ["this"], {"if": "expanded == 'that'"}, []),
(
"this that --if \"expanded == 'that'\"",
["this", "that"],
{"if": "expanded == 'that'"},
[],
),
]
@pytest.mark.parametrize("input_string,args,kwargs,flags", FIXTURES)
def test_unpack_input_string(input_string, args, kwargs, flags):
"""Validate expressions for input strings."""
args_out, kwargs_out, flags_out = unpack_args_kwargs_string(input_string)
assert args_out == args
assert kwargs_out == kwargs
assert flags_out == flags
|
the-stack_106_27197 | # File: P (Python 2.4)
from direct.controls.GravityWalker import GravityWalker
from direct.showbase.InputStateGlobal import inputState
from pandac.PandaModules import *
from direct.task.Task import Task
class PiratesGravityWalker(GravityWalker):
notify = directNotify.newCategory('PiratesGravityWalker')
def __init__(self, *args, **kwargs):
GravityWalker.__init__(self, *args, **args)
self.predicting = 0
def handleAvatarControls(self, task):
run = inputState.isSet('run')
forward = inputState.isSet('forward')
reverse = inputState.isSet('reverse')
turnLeft = inputState.isSet('turnLeft')
turnRight = inputState.isSet('turnRight')
slideLeft = inputState.isSet('slideLeft')
slideRight = inputState.isSet('slideRight')
jump = inputState.isSet('jump')
if base.localAvatar.getAutoRun():
forward = 1
reverse = 0
if (forward or self.avatarControlForwardSpeed) and reverse:
pass
self.speed = -(self.avatarControlReverseSpeed)
if not reverse and slideLeft or -(self.avatarControlReverseSpeed) * 0.75:
if not reverse and slideRight or self.avatarControlReverseSpeed * 0.75:
if (slideLeft or -(self.avatarControlForwardSpeed) * 0.75) and slideRight:
pass
self.slideSpeed = self.avatarControlForwardSpeed * 0.75
if (turnLeft or self.avatarControlRotateSpeed) and turnRight:
pass
self.rotationSpeed = -(self.avatarControlRotateSpeed)
if self.speed and self.slideSpeed:
self.speed *= GravityWalker.DiagonalFactor
self.slideSpeed *= GravityWalker.DiagonalFactor
debugRunning = inputState.isSet('debugRunning')
if debugRunning:
self.speed *= base.debugRunningMultiplier
self.slideSpeed *= base.debugRunningMultiplier
self.rotationSpeed *= 1.25
if self.needToDeltaPos:
self.setPriorParentVector()
self.needToDeltaPos = 0
if self.wantDebugIndicator:
self.displayDebugInfo()
def sendLandMessage(impact):
if impact > -15.0:
messenger.send('jumpEnd')
elif -15.0 >= impact:
pass
elif impact > -15.0:
messenger.send('jumpLand')
self.startJumpDelay(0.5)
else:
messenger.send('jumpLandHard')
self.startJumpDelay(0.5)
def predictHeightAndVelocity(aheadFrames):
dt = globalClock.getDt()
vel = self.lifter.getVelocity()
height = self.getAirborneHeight()
grav = self.lifter.getGravity()
dtt = dt * aheadFrames
futureHeight = height + vel * dtt + 0.5 * grav * dtt * dtt
futureVel = vel - grav * dtt
return (futureHeight, futureVel)
if self.lifter.isOnGround():
if self.isAirborne:
self.isAirborne = 0
self.predicting = 0
impact = self.lifter.getImpactVelocity()
sendLandMessage(impact)
self.priorParent = Vec3.zero()
if jump and self.mayJump:
def doJump(task):
self.lifter.addVelocity(self.avatarControlJumpForce)
self.isAirborne = 1
self.predicting = 1
if not taskMgr.hasTaskNamed('jumpWait'):
taskMgr.doMethodLater(0.20000000000000001, doJump, 'jumpWait')
messenger.send('jumpStart')
elif self.isAirborne and self.predicting:
(futureHeight, futureVel) = predictHeightAndVelocity(2)
if futureHeight <= 0.0:
self.isAirborne = 0
self.predicting = 0
sendLandMessage(futureVel)
elif self.getAirborneHeight() > 2.0:
self.isAirborne = 1
self.predicting = 1
self._PiratesGravityWalker__oldPosDelta = self.avatarNodePath.getPosDelta(render)
self._PiratesGravityWalker__oldDt = ClockObject.getGlobalClock().getDt()
dt = self._PiratesGravityWalker__oldDt
if not self.speed and self.slideSpeed and self.rotationSpeed:
pass
self.moving = self.priorParent != Vec3.zero()
if self.moving:
distance = dt * self.speed
slideDistance = dt * self.slideSpeed
rotation = dt * self.rotationSpeed
if distance and slideDistance or self.priorParent != Vec3.zero():
rotMat = Mat3.rotateMatNormaxis(self.avatarNodePath.getH(), Vec3.up())
if self.isAirborne:
forward = Vec3.forward()
else:
contact = self.lifter.getContactNormal()
forward = contact.cross(Vec3.right())
forward.normalize()
self.vel = Vec3(forward * distance)
if slideDistance:
if self.isAirborne:
right = Vec3.right()
else:
right = forward.cross(contact)
right.normalize()
self.vel = Vec3(self.vel + right * slideDistance)
self.vel = Vec3(rotMat.xform(self.vel))
step = self.vel + self.priorParent * dt
self.avatarNodePath.setFluidPos(Point3(self.avatarNodePath.getPos() + step))
self.vel /= dt
self.avatarNodePath.setH(self.avatarNodePath.getH() + rotation)
else:
self.vel.set(0.0, 0.0, 0.0)
if self.moving or jump:
messenger.send('avatarMoving')
return task.cont
def disableJump(self):
if base.localAvatar.controlManager.forceAvJumpToken is None:
base.localAvatar.controlManager.disableAvatarJump()
def enableJump(self):
if base.localAvatar.controlManager.forceAvJumpToken is not None:
base.localAvatar.controlManager.enableAvatarJump()
def abortJump(self):
taskMgr.remove('jumpWait')
def reset(self):
GravityWalker.reset(self)
self.abortJump()
def disableAvatarControls(self):
GravityWalker.disableAvatarControls(self)
self.abortJump()
|
the-stack_106_27198 | """
This module provides the ``FlyteRemote`` object, which is the end-user's main starting point for interacting
with a Flyte backend in an interactive and programmatic way. This of this experience as kind of like the web UI
but in Python object form.
"""
from __future__ import annotations
import logging
import os
import time
import typing
import uuid
from collections import OrderedDict
from copy import deepcopy
from dataclasses import asdict, dataclass
from datetime import datetime, timedelta
import grpc
from flyteidl.core import literals_pb2 as literals_pb2
from flytekit.clients.friendly import SynchronousFlyteClient
from flytekit.common import utils as common_utils
from flytekit.common.exceptions.user import FlyteEntityAlreadyExistsException, FlyteEntityNotExistException
from flytekit.configuration import internal
from flytekit.configuration import platform as platform_config
from flytekit.configuration import sdk as sdk_config
from flytekit.configuration import set_flyte_config_file
from flytekit.core import context_manager
from flytekit.core.interface import Interface
from flytekit.loggers import remote_logger
from flytekit.models import filters as filter_models
from flytekit.models.admin import common as admin_common_models
try:
from functools import singledispatchmethod
except ImportError:
from singledispatchmethod import singledispatchmethod
from flytekit.clients.helpers import iterate_node_executions, iterate_task_executions
from flytekit.clis.flyte_cli.main import _detect_default_config_file
from flytekit.clis.sdk_in_container import serialize
from flytekit.common import constants
from flytekit.common.exceptions import user as user_exceptions
from flytekit.common.translator import FlyteControlPlaneEntity, FlyteLocalEntity, get_serializable
from flytekit.configuration import auth as auth_config
from flytekit.configuration.internal import DOMAIN, PROJECT
from flytekit.core.base_task import PythonTask
from flytekit.core.context_manager import FlyteContextManager, ImageConfig, SerializationSettings, get_image_config
from flytekit.core.data_persistence import FileAccessProvider
from flytekit.core.launch_plan import LaunchPlan
from flytekit.core.type_engine import TypeEngine
from flytekit.core.workflow import WorkflowBase
from flytekit.models import common as common_models
from flytekit.models import launch_plan as launch_plan_models
from flytekit.models import literals as literal_models
from flytekit.models.admin.common import Sort
from flytekit.models.core.identifier import Identifier, ResourceType, WorkflowExecutionIdentifier
from flytekit.models.core.workflow import NodeMetadata
from flytekit.models.execution import (
ExecutionMetadata,
ExecutionSpec,
NodeExecutionGetDataResponse,
NotificationList,
WorkflowExecutionGetDataResponse,
)
from flytekit.remote.executions import FlyteNodeExecution, FlyteTaskExecution, FlyteWorkflowExecution
from flytekit.remote.launch_plan import FlyteLaunchPlan
from flytekit.remote.nodes import FlyteNode
from flytekit.remote.task import FlyteTask
from flytekit.remote.workflow import FlyteWorkflow
ExecutionDataResponse = typing.Union[WorkflowExecutionGetDataResponse, NodeExecutionGetDataResponse]
MOST_RECENT_FIRST = admin_common_models.Sort("created_at", admin_common_models.Sort.Direction.DESCENDING)
@dataclass
class ResolvedIdentifiers:
project: str
domain: str
name: str
version: str
def _get_latest_version(list_entities_method: typing.Callable, project: str, domain: str, name: str):
named_entity = common_models.NamedEntityIdentifier(project, domain, name)
entity_list, _ = list_entities_method(
named_entity,
limit=1,
sort_by=Sort("created_at", Sort.Direction.DESCENDING),
)
admin_entity = None if not entity_list else entity_list[0]
if not admin_entity:
raise user_exceptions.FlyteEntityNotExistException("Named entity {} not found".format(named_entity))
return admin_entity.id.version
def _get_entity_identifier(
list_entities_method: typing.Callable,
resource_type: int, # from flytekit.models.core.identifier.ResourceType
project: str,
domain: str,
name: str,
version: typing.Optional[str] = None,
):
return Identifier(
resource_type,
project,
domain,
name,
version if version is not None else _get_latest_version(list_entities_method, project, domain, name),
)
class FlyteRemote(object):
"""Main entrypoint for programmatically accessing a Flyte remote backend.
The term 'remote' is synonymous with 'backend' or 'deployment' and refers to a hosted instance of the
Flyte platform, which comes with a Flyte Admin server on some known URI.
.. warning::
This feature is in beta.
"""
@classmethod
def from_config(
cls,
default_project: typing.Optional[str] = None,
default_domain: typing.Optional[str] = None,
config_file_path: typing.Optional[str] = None,
grpc_credentials: typing.Optional[grpc.ChannelCredentials] = None,
venv_root: typing.Optional[str] = None,
) -> FlyteRemote:
"""Create a FlyteRemote object using flyte configuration variables and/or environment variable overrides.
:param default_project: default project to use when fetching or executing flyte entities.
:param default_domain: default domain to use when fetching or executing flyte entities.
:param config_file_path: config file to use when connecting to flyte admin. we will use '~/.flyte/config' by default.
:param grpc_credentials: gRPC channel credentials for connecting to flyte admin as returned by :func:`grpc.ssl_channel_credentials`
"""
if config_file_path is None:
_detect_default_config_file()
else:
set_flyte_config_file(config_file_path)
raw_output_data_prefix = auth_config.RAW_OUTPUT_DATA_PREFIX.get() or os.path.join(
sdk_config.LOCAL_SANDBOX.get(), "control_plane_raw"
)
file_access = FileAccessProvider(
local_sandbox_dir=os.path.join(sdk_config.LOCAL_SANDBOX.get(), "control_plane_metadata"),
raw_output_prefix=raw_output_data_prefix,
)
venv_root = venv_root or serialize._DEFAULT_FLYTEKIT_VIRTUALENV_ROOT
entrypoint = context_manager.EntrypointSettings(
path=os.path.join(venv_root, serialize._DEFAULT_FLYTEKIT_RELATIVE_ENTRYPOINT_LOC)
)
return cls(
flyte_admin_url=platform_config.URL.get(),
insecure=platform_config.INSECURE.get(),
default_project=default_project or PROJECT.get() or None,
default_domain=default_domain or DOMAIN.get() or None,
file_access=file_access,
auth_role=common_models.AuthRole(
assumable_iam_role=auth_config.ASSUMABLE_IAM_ROLE.get(),
kubernetes_service_account=auth_config.KUBERNETES_SERVICE_ACCOUNT.get(),
),
notifications=None,
labels=None,
annotations=None,
image_config=get_image_config(),
raw_output_data_config=(
common_models.RawOutputDataConfig(raw_output_data_prefix) if raw_output_data_prefix else None
),
grpc_credentials=grpc_credentials,
entrypoint_settings=entrypoint,
)
def __init__(
self,
flyte_admin_url: str,
insecure: bool,
default_project: typing.Optional[str] = None,
default_domain: typing.Optional[str] = None,
file_access: typing.Optional[FileAccessProvider] = None,
auth_role: typing.Optional[common_models.AuthRole] = None,
notifications: typing.Optional[typing.List[common_models.Notification]] = None,
labels: typing.Optional[common_models.Labels] = None,
annotations: typing.Optional[common_models.Annotations] = None,
image_config: typing.Optional[ImageConfig] = None,
raw_output_data_config: typing.Optional[common_models.RawOutputDataConfig] = None,
grpc_credentials: typing.Optional[grpc.ChannelCredentials] = None,
entrypoint_settings: typing.Optional[context_manager.EntrypointSettings] = None,
):
"""Initialize a FlyteRemote object.
:param flyte_admin_url: url pointing to the remote backend.
:param insecure: whether or not the enable SSL.
:param default_project: default project to use when fetching or executing flyte entities.
:param default_domain: default domain to use when fetching or executing flyte entities.
:param file_access: file access provider to use for offloading non-literal inputs/outputs.
:param auth_role: auth role config
:param notifications: notification config
:param labels: label config
:param annotations: annotation config
:param image_config: image config
:param raw_output_data_config: location for offloaded data, e.g. in S3
:param grpc_credentials: gRPC channel credentials for connecting to flyte admin as returned
by :func:`grpc.ssl_channel_credentials`
:param entrypoint_settings: EntrypointSettings object for use with Spark tasks. If supplied, this will be
used when serializing Spark tasks, which need to know the path to the flytekit entrypoint.py file,
inside the container.
"""
remote_logger.warning("This feature is still in beta. Its interface and UX is subject to change.")
if flyte_admin_url is None:
raise user_exceptions.FlyteAssertion("Cannot find flyte admin url in config file.")
self._client = SynchronousFlyteClient(flyte_admin_url, insecure=insecure, credentials=grpc_credentials)
# read config files, env vars, host, ssl options for admin client
self._flyte_admin_url = flyte_admin_url
self._insecure = insecure
self._default_project = default_project
self._default_domain = default_domain
self._image_config = image_config
self._auth_role = auth_role
self._notifications = notifications
self._labels = labels
self._annotations = annotations
self._raw_output_data_config = raw_output_data_config
# Not exposing this as a property for now.
self._entrypoint_settings = entrypoint_settings
# Save the file access object locally, but also make it available for use from the context.
FlyteContextManager.with_context(FlyteContextManager.current_context().with_file_access(file_access).build())
self._file_access = file_access
# TODO: Reconsider whether we want this. Probably best to not cache.
self._serialized_entity_cache = OrderedDict()
@property
def client(self) -> SynchronousFlyteClient:
"""Return a SynchronousFlyteClient for additional operations."""
return self._client
@property
def default_project(self) -> str:
"""Default project to use when fetching or executing flyte entities."""
return self._default_project
@property
def default_domain(self) -> str:
"""Default project to use when fetching or executing flyte entities."""
return self._default_domain
@property
def image_config(self) -> ImageConfig:
"""Image config."""
return self._image_config
@property
def file_access(self) -> FileAccessProvider:
"""File access provider to use for offloading non-literal inputs/outputs."""
return self._file_access
@property
def auth_role(self):
"""Auth role config."""
return self._auth_role
@property
def notifications(self):
"""Notification config."""
return self._notifications
@property
def labels(self):
"""Label config."""
return self._labels
@property
def annotations(self):
"""Annotation config."""
return self._annotations
@property
def raw_output_data_config(self):
"""Location for offloaded data, e.g. in S3"""
return self._raw_output_data_config
@property
def version(self) -> str:
"""Get a randomly generated version string."""
return uuid.uuid4().hex[:30] + str(int(time.time()))
def remote_context(self):
"""Context manager with remote-specific configuration."""
return FlyteContextManager.with_context(
FlyteContextManager.current_context().with_file_access(self.file_access)
)
def with_overrides(
self,
default_project: typing.Optional[str] = None,
default_domain: typing.Optional[str] = None,
flyte_admin_url: typing.Optional[str] = None,
insecure: typing.Optional[bool] = None,
file_access: typing.Optional[FileAccessProvider] = None,
auth_role: typing.Optional[common_models.AuthRole] = None,
notifications: typing.Optional[typing.List[common_models.Notification]] = None,
labels: typing.Optional[common_models.Labels] = None,
annotations: typing.Optional[common_models.Annotations] = None,
image_config: typing.Optional[ImageConfig] = None,
raw_output_data_config: typing.Optional[common_models.RawOutputDataConfig] = None,
):
"""Create a copy of the remote object, overriding the specified attributes."""
new_remote = deepcopy(self)
if default_project:
new_remote._default_project = default_project
if default_domain:
new_remote._default_domain = default_domain
if flyte_admin_url:
new_remote._flyte_admin_url = flyte_admin_url
new_remote._client = SynchronousFlyteClient(flyte_admin_url, self._insecure)
if insecure:
new_remote._insecure = insecure
new_remote._client = SynchronousFlyteClient(self._flyte_admin_url, insecure)
if file_access:
new_remote._file_access = file_access
if auth_role:
new_remote._auth_role = auth_role
if notifications:
new_remote._notifications = notifications
if labels:
new_remote._labels = labels
if annotations:
new_remote._annotations = annotations
if image_config:
new_remote._image_config = image_config
if raw_output_data_config:
new_remote._raw_output_data_config = raw_output_data_config
return new_remote
def fetch_task(self, project: str = None, domain: str = None, name: str = None, version: str = None) -> FlyteTask:
"""Fetch a task entity from flyte admin.
:param project: fetch entity from this project. If None, uses the default_project attribute.
:param domain: fetch entity from this domain. If None, uses the default_domain attribute.
:param name: fetch entity with matching name.
:param version: fetch entity with matching version. If None, gets the latest version of the entity.
:returns: :class:`~flytekit.remote.tasks.task.FlyteTask`
:raises: FlyteAssertion if name is None
"""
if name is None:
raise user_exceptions.FlyteAssertion("the 'name' argument must be specified.")
task_id = _get_entity_identifier(
self.client.list_tasks_paginated,
ResourceType.TASK,
project or self.default_project,
domain or self.default_domain,
name,
version,
)
admin_task = self.client.get_task(task_id)
flyte_task = FlyteTask.promote_from_model(admin_task.closure.compiled_task.template)
flyte_task._id = task_id
return flyte_task
def fetch_workflow(
self, project: str = None, domain: str = None, name: str = None, version: str = None
) -> FlyteWorkflow:
"""Fetch a workflow entity from flyte admin.
:param project: fetch entity from this project. If None, uses the default_project attribute.
:param domain: fetch entity from this domain. If None, uses the default_domain attribute.
:param name: fetch entity with matching name.
:param version: fetch entity with matching version. If None, gets the latest version of the entity.
:raises: FlyteAssertion if name is None
"""
if name is None:
raise user_exceptions.FlyteAssertion("the 'name' argument must be specified.")
workflow_id = _get_entity_identifier(
self.client.list_workflows_paginated,
ResourceType.WORKFLOW,
project or self.default_project,
domain or self.default_domain,
name,
version,
)
admin_workflow = self.client.get_workflow(workflow_id)
compiled_wf = admin_workflow.closure.compiled_workflow
node_launch_plans = {}
# TODO: Inspect branch nodes for launch plans
for node in FlyteWorkflow.get_non_system_nodes(compiled_wf.primary.template.nodes):
if node.workflow_node is not None and node.workflow_node.launchplan_ref is not None:
node_launch_plans[node.workflow_node.launchplan_ref] = self.client.get_launch_plan(
node.workflow_node.launchplan_ref
).spec
return FlyteWorkflow.promote_from_closure(compiled_wf, node_launch_plans)
def fetch_launch_plan(
self, project: str = None, domain: str = None, name: str = None, version: str = None
) -> FlyteLaunchPlan:
"""Fetch a launchplan entity from flyte admin.
:param project: fetch entity from this project. If None, uses the default_project attribute.
:param domain: fetch entity from this domain. If None, uses the default_domain attribute.
:param name: fetch entity with matching name.
:param version: fetch entity with matching version. If None, gets the latest version of the entity.
:returns: :class:`~flytekit.remote.launch_plan.FlyteLaunchPlan`
:raises: FlyteAssertion if name is None
"""
if name is None:
raise user_exceptions.FlyteAssertion("the 'name' argument must be specified.")
launch_plan_id = _get_entity_identifier(
self.client.list_launch_plans_paginated,
ResourceType.LAUNCH_PLAN,
project or self.default_project,
domain or self.default_domain,
name,
version,
)
admin_launch_plan = self.client.get_launch_plan(launch_plan_id)
flyte_launch_plan = FlyteLaunchPlan.promote_from_model(launch_plan_id, admin_launch_plan.spec)
wf_id = flyte_launch_plan.workflow_id
workflow = self.fetch_workflow(wf_id.project, wf_id.domain, wf_id.name, wf_id.version)
flyte_launch_plan._interface = workflow.interface
flyte_launch_plan.guessed_python_interface = Interface(
inputs=TypeEngine.guess_python_types(flyte_launch_plan.interface.inputs),
outputs=TypeEngine.guess_python_types(flyte_launch_plan.interface.outputs),
)
return flyte_launch_plan
def fetch_workflow_execution(
self, project: str = None, domain: str = None, name: str = None
) -> FlyteWorkflowExecution:
"""Fetch a workflow execution entity from flyte admin.
:param project: fetch entity from this project. If None, uses the default_project attribute.
:param domain: fetch entity from this domain. If None, uses the default_domain attribute.
:param name: fetch entity with matching name.
:returns: :class:`~flytekit.remote.workflow_execution.FlyteWorkflowExecution`
:raises: FlyteAssertion if name is None
"""
if name is None:
raise user_exceptions.FlyteAssertion("the 'name' argument must be specified.")
return FlyteWorkflowExecution.promote_from_model(
self.client.get_execution(
WorkflowExecutionIdentifier(
project or self.default_project,
domain or self.default_domain,
name,
)
)
)
######################
# Listing Entities #
######################
def recent_executions(
self,
project: typing.Optional[str] = None,
domain: typing.Optional[str] = None,
limit: typing.Optional[int] = 100,
) -> typing.List[FlyteWorkflowExecution]:
# Ignore token for now
exec_models, _ = self.client.list_executions_paginated(
project or self.default_project,
domain or self.default_domain,
limit,
sort_by=MOST_RECENT_FIRST,
)
return [FlyteWorkflowExecution.promote_from_model(e) for e in exec_models]
def list_tasks_by_version(
self,
version: str,
project: typing.Optional[str] = None,
domain: typing.Optional[str] = None,
limit: typing.Optional[int] = 100,
) -> typing.List[FlyteTask]:
if not version:
raise ValueError("Must specify a version")
named_entity_id = common_models.NamedEntityIdentifier(
project=project or self.default_project,
domain=domain or self.default_domain,
)
# Ignore token for now
t_models, _ = self.client.list_tasks_paginated(
named_entity_id,
filters=[filter_models.Filter.from_python_std(f"eq(version,{version})")],
limit=limit,
)
return [FlyteTask.promote_from_model(t.closure.compiled_task.template) for t in t_models]
######################
# Serialize Entities #
######################
@singledispatchmethod
def _serialize(
self,
entity: FlyteLocalEntity,
project: str = None,
domain: str = None,
version: str = None,
**kwargs,
) -> FlyteControlPlaneEntity:
"""Serialize an entity for registration."""
# TODO: Revisit cache
return get_serializable(
self._serialized_entity_cache,
SerializationSettings(
project or self.default_project,
domain or self.default_domain,
version or self.version,
self.image_config,
# https://github.com/flyteorg/flyte/issues/1359
env={internal.IMAGE.env_var: self.image_config.default_image.full},
entrypoint_settings=self._entrypoint_settings,
),
entity=entity,
)
#####################
# Register Entities #
#####################
@singledispatchmethod
def register(
self,
entity: typing.Union[PythonTask, WorkflowBase, LaunchPlan],
project: str = None,
domain: str = None,
name: str = None,
version: str = None,
) -> typing.Union[FlyteTask, FlyteWorkflow, FlyteLaunchPlan]:
"""Register an entity to flyte admin.
:param entity: entity to register.
:param project: register entity into this project. If None, uses ``default_project`` attribute
:param domain: register entity into this domain. If None, uses ``default_domain`` attribute
:param name: register entity with this name. If None, uses ``entity.name``
:param version: register entity with this version. If None, uses auto-generated version.
"""
raise NotImplementedError(f"entity type {type(entity)} not recognized for registration")
@register.register
def _(
self, entity: PythonTask, project: str = None, domain: str = None, name: str = None, version: str = None
) -> FlyteTask:
"""Register an @task-decorated function or TaskTemplate task to flyte admin."""
resolved_identifiers = asdict(self._resolve_identifier_kwargs(entity, project, domain, name, version))
self.client.create_task(
Identifier(ResourceType.TASK, **resolved_identifiers),
task_spec=self._serialize(entity, **resolved_identifiers),
)
return self.fetch_task(**resolved_identifiers)
@register.register
def _(
self, entity: WorkflowBase, project: str = None, domain: str = None, name: str = None, version: str = None
) -> FlyteWorkflow:
"""Register an @workflow-decorated function to flyte admin."""
resolved_identifiers = asdict(self._resolve_identifier_kwargs(entity, project, domain, name, version))
self.client.create_workflow(
Identifier(ResourceType.WORKFLOW, **resolved_identifiers),
workflow_spec=self._serialize(entity, **resolved_identifiers),
)
return self.fetch_workflow(**resolved_identifiers)
@register.register
def _(
self, entity: LaunchPlan, project: str = None, domain: str = None, name: str = None, version: str = None
) -> FlyteLaunchPlan:
"""Register a LaunchPlan object to flyte admin."""
# See _get_patch_launch_plan_fn for what we need to patch. These are the elements of a launch plan
# that are not set at serialization time and are filled in either by flyte-cli register files or flytectl.
resolved_identifiers = asdict(self._resolve_identifier_kwargs(entity, project, domain, name, version))
serialized_lp: launch_plan_models.LaunchPlan = self._serialize(entity, **resolved_identifiers)
if self.auth_role:
serialized_lp.spec._auth_role = common_models.AuthRole(
self.auth_role.assumable_iam_role, self.auth_role.kubernetes_service_account
)
if self.raw_output_data_config:
serialized_lp.spec._raw_output_data_config = common_models.RawOutputDataConfig(
self.raw_output_data_config.output_location_prefix
)
# Patch in labels and annotations
if self.labels:
for k, v in self.labels.values.items():
serialized_lp.spec._labels.values[k] = v
if self.annotations:
for k, v in self.annotations.values.items():
serialized_lp.spec._annotations.values[k] = v
self.client.create_launch_plan(
Identifier(ResourceType.LAUNCH_PLAN, **resolved_identifiers),
launch_plan_spec=serialized_lp.spec,
)
return self.fetch_launch_plan(**resolved_identifiers)
def _register_entity_if_not_exists(self, entity: WorkflowBase, resolved_identifiers_dict: dict):
# Try to register all the entity in WorkflowBase including LaunchPlan, PythonTask, or subworkflow.
node_identifiers_dict = deepcopy(resolved_identifiers_dict)
for node in entity.nodes:
try:
node_identifiers_dict["name"] = node.flyte_entity.name
if isinstance(node.flyte_entity, WorkflowBase):
self._register_entity_if_not_exists(node.flyte_entity, node_identifiers_dict)
self.register(node.flyte_entity, **node_identifiers_dict)
elif isinstance(node.flyte_entity, PythonTask) or isinstance(node.flyte_entity, LaunchPlan):
self.register(node.flyte_entity, **node_identifiers_dict)
else:
raise NotImplementedError(f"We don't support registering this kind of entity: {node.flyte_entity}")
except FlyteEntityAlreadyExistsException:
logging.info(f"{entity.name} already exists")
except Exception as e:
logging.info(f"Failed to register entity {entity.name} with error {e}")
####################
# Execute Entities #
####################
def _resolve_identifier_kwargs(
self,
entity,
project: typing.Optional[str],
domain: typing.Optional[str],
name: typing.Optional[str],
version: typing.Optional[str],
) -> ResolvedIdentifiers:
"""
Resolves the identifier attributes based on user input, falling back on the default project/domain and
auto-generated version, and ultimately the entity project/domain if entity is a remote flyte entity.
"""
error_msg = (
"entity {entity} of type {entity_type} is not associated with a {arg_name}. Please specify the {arg_name} "
"argument when invoking the FlyteRemote.execute method or a default_{arg_name} value when initializig the "
"FlyteRemote object."
)
if project:
resolved_project, msg_project = project, "execute-method"
elif self.default_project:
resolved_project, msg_project = self.default_project, "remote"
elif hasattr(entity, "id"):
resolved_project, msg_project = entity.id.project, "entity"
else:
raise TypeError(error_msg.format(entity=entity, entity_type=type(entity), arg_name="project"))
if domain:
resolved_domain, msg_domain = domain, "execute-method"
elif self.default_domain:
resolved_domain, msg_domain = self.default_domain, "remote"
elif hasattr(entity, "id"):
resolved_domain, msg_domain = entity.id.domain, "entity"
else:
raise TypeError(error_msg.format(entity=entity, entity_type=type(entity), arg_name="domain"))
remote_logger.debug(
f"Using {msg_project}-supplied value for project and {msg_domain}-supplied value for domain."
)
return ResolvedIdentifiers(
resolved_project,
resolved_domain,
name or entity.name,
version or self.version,
)
def _execute(
self,
entity: typing.Union[FlyteTask, FlyteWorkflow, FlyteLaunchPlan],
inputs: typing.Dict[str, typing.Any],
project: str,
domain: str,
execution_name: typing.Optional[str] = None,
wait: bool = False,
labels: typing.Optional[common_models.Labels] = None,
annotations: typing.Optional[common_models.Annotations] = None,
auth_role: typing.Optional[common_models.AuthRole] = None,
) -> FlyteWorkflowExecution:
"""Common method for execution across all entities.
:param flyte_id: entity identifier
:param inputs: dictionary mapping argument names to values
:param project: project on which to execute the entity referenced by flyte_id
:param domain: domain on which to execute the entity referenced by flyte_id
:param execution_name: name of the execution
:param wait: if True, waits for execution to complete
:returns: :class:`~flytekit.remote.workflow_execution.FlyteWorkflowExecution`
"""
execution_name = execution_name or "f" + uuid.uuid4().hex[:19]
disable_all = self.notifications == []
if disable_all:
notifications = None
else:
notifications = NotificationList(self.notifications or [])
disable_all = None
with self.remote_context() as ctx:
input_python_types = entity.guessed_python_interface.inputs
expected_input = entity.interface.inputs
for k, v in inputs.items():
if expected_input.get(k) is None:
raise user_exceptions.FlyteValueException(
k, f"The {entity.__class__.__name__} doesn't have this input key."
)
literal_inputs = TypeEngine.dict_to_literal_map(ctx, inputs, input_python_types)
try:
# TODO: re-consider how this works. Currently, this will only execute the flyte entity referenced by
# flyte_id in the same project and domain. However, it is possible to execute it in a different project
# and domain, which is specified in the first two arguments of client.create_execution. This is useful
# in the case that I want to use a flyte entity from e.g. project "A" but actually execute the entity on a
# different project "B". For now, this method doesn't support this use case.
exec_id = self.client.create_execution(
project,
domain,
execution_name,
ExecutionSpec(
entity.id,
ExecutionMetadata(
ExecutionMetadata.ExecutionMode.MANUAL,
"placeholder", # TODO: get principle
0,
),
notifications=notifications,
disable_all=disable_all,
labels=labels or self.labels,
annotations=annotations or self.annotations,
auth_role=auth_role or self.auth_role,
),
literal_inputs,
)
except user_exceptions.FlyteEntityAlreadyExistsException:
exec_id = WorkflowExecutionIdentifier(flyte_id.project, flyte_id.domain, execution_name)
execution = FlyteWorkflowExecution.promote_from_model(self.client.get_execution(exec_id))
if wait:
return self.wait(execution)
return execution
@singledispatchmethod
def execute(
self,
entity: typing.Union[FlyteTask, FlyteLaunchPlan, FlyteWorkflow, PythonTask, WorkflowBase, LaunchPlan],
inputs: typing.Dict[str, typing.Any],
project: str = None,
domain: str = None,
name: str = None,
version: str = None,
execution_name: str = None,
wait: bool = False,
) -> FlyteWorkflowExecution:
"""Execute a task, workflow, or launchplan.
This method supports:
- ``Flyte{Task, Workflow, LaunchPlan}`` remote module objects.
- ``@task``-decorated functions and ``TaskTemplate`` tasks.
- ``@workflow``-decorated functions.
- ``LaunchPlan`` objects.
:param entity: entity to execute
:param inputs: dictionary mapping argument names to values
:param project: execute entity in this project. If entity doesn't exist in the project, register the entity
first before executing.
:param domain: execute entity in this domain. If entity doesn't exist in the domain, register the entity
first before executing.
:param name: execute entity using this name. If not None, use this value instead of ``entity.name``
:param version: execute entity using this version. If None, uses auto-generated value.
:param execution_name: name of the execution. If None, uses auto-generated value.
:param wait: if True, waits for execution to complete
.. note:
The ``name`` and ``version`` arguments do not apply to ``FlyteTask``, ``FlyteLaunchPlan``, and
``FlyteWorkflow`` entity inputs. These values are determined by referencing the entity identifier values.
"""
raise NotImplementedError(f"entity type {type(entity)} not recognized for execution")
# Flyte Remote Entities
# ---------------------
@execute.register(FlyteTask)
@execute.register(FlyteLaunchPlan)
def _(
self,
entity: typing.Union[FlyteTask, FlyteLaunchPlan],
inputs: typing.Dict[str, typing.Any],
project: str = None,
domain: str = None,
name: str = None,
version: str = None,
execution_name: str = None,
wait: bool = False,
) -> FlyteWorkflowExecution:
"""Execute a FlyteTask, or FlyteLaunchplan.
NOTE: the name and version arguments are currently not used and only there consistency in the function signature
"""
if name or version:
remote_logger.warning(f"The 'name' and 'version' arguments are ignored for entities of type {type(entity)}")
resolved_identifiers = self._resolve_identifier_kwargs(
entity, project, domain, entity.id.name, entity.id.version
)
return self._execute(
entity,
inputs,
project=resolved_identifiers.project,
domain=resolved_identifiers.domain,
execution_name=execution_name,
wait=wait,
labels=entity.labels if isinstance(entity, FlyteLaunchPlan) and entity.labels.values else None,
annotations=entity.annotations
if isinstance(entity, FlyteLaunchPlan) and entity.annotations.values
else None,
auth_role=entity.auth_role
if isinstance(entity, FlyteLaunchPlan)
and (entity.auth_role.assumable_iam_role or entity.auth_role.kubernetes_service_account)
else None,
)
@execute.register
def _(
self,
entity: FlyteWorkflow,
inputs: typing.Dict[str, typing.Any],
project: str = None,
domain: str = None,
name: str = None,
version: str = None,
execution_name: str = None,
wait: bool = False,
) -> FlyteWorkflowExecution:
"""Execute a FlyteWorkflow.
NOTE: the name and version arguments are currently not used and only there consistency in the function signature
"""
if name or version:
remote_logger.warning(f"The 'name' and 'version' arguments are ignored for entities of type {type(entity)}")
resolved_identifiers = self._resolve_identifier_kwargs(
entity, project, domain, entity.id.name, entity.id.version
)
launch_plan = self.fetch_launch_plan(entity.id.project, entity.id.domain, entity.id.name, entity.id.version)
return self.execute(
launch_plan,
inputs,
project=resolved_identifiers.project,
domain=resolved_identifiers.domain,
execution_name=execution_name,
wait=wait,
)
# Flytekit Entities
# -----------------
@execute.register
def _(
self,
entity: PythonTask,
inputs: typing.Dict[str, typing.Any],
project: str = None,
domain: str = None,
name: str = None,
version: str = None,
execution_name: str = None,
wait: bool = False,
) -> FlyteWorkflowExecution:
"""Execute an @task-decorated function or TaskTemplate task."""
resolved_identifiers = self._resolve_identifier_kwargs(entity, project, domain, name, version)
resolved_identifiers_dict = asdict(resolved_identifiers)
try:
flyte_task: FlyteTask = self.fetch_task(**resolved_identifiers_dict)
except Exception:
flyte_task: FlyteTask = self.register(entity, **resolved_identifiers_dict)
flyte_task.guessed_python_interface = entity.python_interface
return self.execute(
flyte_task,
inputs,
project=resolved_identifiers.project,
domain=resolved_identifiers.domain,
execution_name=execution_name,
wait=wait,
)
@execute.register
def _(
self,
entity: WorkflowBase,
inputs: typing.Dict[str, typing.Any],
project: str = None,
domain: str = None,
name: str = None,
version: str = None,
execution_name: str = None,
wait: bool = False,
) -> FlyteWorkflowExecution:
"""Execute an @workflow-decorated function."""
resolved_identifiers = self._resolve_identifier_kwargs(entity, project, domain, name, version)
resolved_identifiers_dict = asdict(resolved_identifiers)
try:
flyte_workflow: FlyteWorkflow = self.fetch_workflow(**resolved_identifiers_dict)
except FlyteEntityNotExistException:
logging.info("Try to register FlyteWorkflow because it wasn't found in Flyte Admin!")
self._register_entity_if_not_exists(entity, resolved_identifiers_dict)
flyte_workflow: FlyteWorkflow = self.register(entity, **resolved_identifiers_dict)
flyte_workflow.guessed_python_interface = entity.python_interface
ctx = context_manager.FlyteContext.current_context()
try:
self.fetch_launch_plan(**resolved_identifiers_dict)
except FlyteEntityNotExistException:
logging.info("Try to register default launch plan because it wasn't found in Flyte Admin!")
default_lp = LaunchPlan.get_default_launch_plan(ctx, entity)
self.register(default_lp, **resolved_identifiers_dict)
return self.execute(
flyte_workflow,
inputs,
project=resolved_identifiers.project,
domain=resolved_identifiers.domain,
execution_name=execution_name,
wait=wait,
)
@execute.register
def _(
self,
entity: LaunchPlan,
inputs: typing.Dict[str, typing.Any],
project: str = None,
domain: str = None,
name: str = None,
version: str = None,
execution_name: str = None,
wait: bool = False,
) -> FlyteWorkflowExecution:
"""Execute a LaunchPlan object."""
resolved_identifiers = self._resolve_identifier_kwargs(entity, project, domain, name, version)
resolved_identifiers_dict = asdict(resolved_identifiers)
try:
flyte_launchplan: FlyteLaunchPlan = self.fetch_launch_plan(**resolved_identifiers_dict)
except Exception:
flyte_launchplan: FlyteLaunchPlan = self.register(entity, **resolved_identifiers_dict)
flyte_launchplan.guessed_python_interface = entity.python_interface
return self.execute(
flyte_launchplan,
inputs,
project=resolved_identifiers.project,
domain=resolved_identifiers.domain,
execution_name=execution_name,
wait=wait,
)
###################################
# Wait for Executions to Complete #
###################################
def wait(
self,
execution: FlyteWorkflowExecution,
timeout: typing.Optional[timedelta] = None,
poll_interval: typing.Optional[timedelta] = None,
sync_nodes: bool = True,
) -> FlyteWorkflowExecution:
"""Wait for an execution to finish.
:param execution: execution object to wait on
:param timeout: maximum amount of time to wait
:param poll_interval: sync workflow execution at this interval
:param sync_nodes: passed along to the sync call for the workflow execution
"""
poll_interval = poll_interval or timedelta(seconds=30)
time_to_give_up = datetime.max if timeout is None else datetime.utcnow() + timeout
while datetime.utcnow() < time_to_give_up:
execution = self.sync_workflow_execution(execution, sync_nodes=sync_nodes)
if execution.is_complete:
return execution
time.sleep(poll_interval.total_seconds())
raise user_exceptions.FlyteTimeout(f"Execution {self} did not complete before timeout.")
########################
# Sync Execution State #
########################
def sync(
self,
execution: FlyteWorkflowExecution,
entity_definition: typing.Union[FlyteWorkflow, FlyteTask] = None,
sync_nodes: bool = False,
) -> FlyteWorkflowExecution:
"""
This function was previously a singledispatchmethod. We've removed that but this function remains
so that we don't break people.
:param execution:
:param entity_definition:
:param sync_nodes: By default sync will fetch data on all underlying node executions (recursively,
so subworkflows will also get picked up). Set this to False in order to prevent that (which
will make this call faster).
:return: Returns the same execution object, but with additional information pulled in.
"""
if not isinstance(execution, FlyteWorkflowExecution):
raise ValueError(f"remote.sync should only be called on workflow executions, got {type(execution)}")
return self.sync_workflow_execution(execution, entity_definition, sync_nodes)
def sync_workflow_execution(
self,
execution: FlyteWorkflowExecution,
entity_definition: typing.Union[FlyteWorkflow, FlyteTask] = None,
sync_nodes: bool = False,
) -> FlyteWorkflowExecution:
"""
Sync a FlyteWorkflowExecution object with its corresponding remote state.
"""
if entity_definition is not None:
raise ValueError("Entity definition arguments aren't supported when syncing workflow executions")
# Update closure, and then data, because we don't want the execution to finish between when we get the data,
# and then for the closure to have is_complete to be true.
execution._closure = self.client.get_execution(execution.id).closure
execution_data = self.client.get_execution_data(execution.id)
lp_id = execution.spec.launch_plan
if sync_nodes:
underlying_node_executions = [
FlyteNodeExecution.promote_from_model(n) for n in iterate_node_executions(self.client, execution.id)
]
if execution.spec.launch_plan.resource_type == ResourceType.TASK:
# This condition is only true for single-task executions
flyte_entity = self.fetch_task(lp_id.project, lp_id.domain, lp_id.name, lp_id.version)
if sync_nodes:
# Need to construct the mapping. There should've been returned exactly three nodes, a start,
# an end, and a task node.
task_node_exec = [
x
for x in filter(
lambda x: x.id.node_id != constants.START_NODE_ID and x.id.node_id != constants.END_NODE_ID,
underlying_node_executions,
)
]
# We need to manually make a map of the nodes since there is none for single task executions
# Assume the first one is the only one.
node_mapping = (
{
task_node_exec[0].id.node_id: FlyteNode(
id=flyte_entity.id,
upstream_nodes=[],
bindings=[],
metadata=NodeMetadata(name=""),
flyte_task=flyte_entity,
)
}
if len(task_node_exec) >= 1
else {} # This is for the case where node executions haven't appeared yet
)
else:
# This is the default case, an execution of a normal workflow through a launch plan
wf_id = self.fetch_launch_plan(lp_id.project, lp_id.domain, lp_id.name, lp_id.version).workflow_id
flyte_entity = self.fetch_workflow(wf_id.project, wf_id.domain, wf_id.name, wf_id.version)
execution._flyte_workflow = flyte_entity
node_mapping = flyte_entity._node_map
# update node executions (if requested), and inputs/outputs
if sync_nodes:
node_execs = {}
for n in underlying_node_executions:
node_execs[n.id.node_id] = self.sync_node_execution(n, node_mapping)
execution._node_executions = node_execs
return self._assign_inputs_and_outputs(execution, execution_data, flyte_entity.interface)
def sync_node_execution(
self, execution: FlyteNodeExecution, node_mapping: typing.Dict[str, FlyteNode]
) -> FlyteNodeExecution:
"""
Get data backing a node execution. These FlyteNodeExecution objects should've come from Admin with the model
fields already populated correctly. For purposes of the remote experience, we'd like to supplement the object
with some additional fields:
- inputs/outputs
- task/workflow executions, and/or underlying node executions in the case of parent nodes
- TypedInterface (remote wrapper type)
A node can have several different types of executions behind it. That is, the node could've run (perhaps
multiple times because of retries):
- A task
- A static subworkflow
- A dynamic subworkflow (which in turn may have run additional tasks, subwfs, and/or launch plans)
- A launch plan
The data model is complicated, so ascertaining which of these happened is a bit tricky. That logic is
encapsulated in this function.
"""
# For single task execution - the metadata spec node id is missing. In these cases, revert to regular node id
node_id = execution.metadata.spec_node_id
if not node_id:
node_id = execution.id.node_id
remote_logger.debug(f"No metadata spec_node_id found, using {node_id}")
# First see if it's a dummy node, if it is, we just skip it.
if constants.START_NODE_ID in node_id or constants.END_NODE_ID in node_id:
return execution
# Look for the Node object in the mapping supplied
if node_id in node_mapping:
execution._node = node_mapping[node_id]
else:
raise Exception(f"Missing node from mapping: {node_id}")
# Get the node execution data
node_execution_get_data_response = self.client.get_node_execution_data(execution.id)
# Calling a launch plan directly case
# If a node ran a launch plan directly (i.e. not through a dynamic task or anything) then
# the closure should have a workflow_node_metadata populated with the launched execution id.
# The parent node flag should not be populated here
# This is the simplest case
if not execution.metadata.is_parent_node and execution.closure.workflow_node_metadata:
launched_exec_id = execution.closure.workflow_node_metadata.execution_id
# This is a recursive call, basically going through the same process that brought us here in the first
# place, but on the launched execution.
launched_exec = self.fetch_workflow_execution(
project=launched_exec_id.project, domain=launched_exec_id.domain, name=launched_exec_id.name
)
self.sync_workflow_execution(launched_exec)
if launched_exec.is_complete:
# The synced underlying execution should've had these populated.
execution._inputs = launched_exec.inputs
execution._outputs = launched_exec.outputs
execution._workflow_executions.append(launched_exec)
execution._interface = launched_exec._flyte_workflow.interface
return execution
# If a node ran a static subworkflow or a dynamic subworkflow then the parent flag will be set.
if execution.metadata.is_parent_node:
# We'll need to query child node executions regardless since this is a parent node
child_node_executions = iterate_node_executions(
self.client,
workflow_execution_identifier=execution.id.execution_id,
unique_parent_id=execution.id.node_id,
)
child_node_executions = [x for x in child_node_executions]
# If this was a dynamic task, then there should be a CompiledWorkflowClosure inside the
# NodeExecutionGetDataResponse
if node_execution_get_data_response.dynamic_workflow is not None:
compiled_wf = node_execution_get_data_response.dynamic_workflow.compiled_workflow
node_launch_plans = {}
# TODO: Inspect branch nodes for launch plans
for node in FlyteWorkflow.get_non_system_nodes(compiled_wf.primary.template.nodes):
if (
node.workflow_node is not None
and node.workflow_node.launchplan_ref is not None
and node.workflow_node.launchplan_ref not in node_launch_plans
):
node_launch_plans[node.workflow_node.launchplan_ref] = self.client.get_launch_plan(
node.workflow_node.launchplan_ref
).spec
dynamic_flyte_wf = FlyteWorkflow.promote_from_closure(compiled_wf, node_launch_plans)
execution._underlying_node_executions = [
self.sync_node_execution(FlyteNodeExecution.promote_from_model(cne), dynamic_flyte_wf._node_map)
for cne in child_node_executions
]
# This is copied from below - dynamic tasks have both task executions (executions of the parent
# task) as well as underlying node executions (of the generated subworkflow). Feel free to refactor
# if you can think of a better way.
execution._task_executions = [
self.sync_task_execution(FlyteTaskExecution.promote_from_model(t))
for t in iterate_task_executions(self.client, execution.id)
]
execution._interface = dynamic_flyte_wf.interface
else:
# If it does not, then it should be a static subworkflow
if not isinstance(execution._node.flyte_entity, FlyteWorkflow):
remote_logger.error(
f"NE {execution} entity should be a workflow, {type(execution._node)}, {execution._node}"
)
raise Exception(f"Node entity has type {type(execution._node)}")
sub_flyte_workflow = execution._node.flyte_entity
sub_node_mapping = {n.id: n for n in sub_flyte_workflow.flyte_nodes}
execution._underlying_node_executions = [
self.sync_node_execution(FlyteNodeExecution.promote_from_model(cne), sub_node_mapping)
for cne in child_node_executions
]
execution._interface = sub_flyte_workflow.interface
# This is the plain ol' task execution case
else:
execution._task_executions = [
self.sync_task_execution(FlyteTaskExecution.promote_from_model(t))
for t in iterate_task_executions(self.client, execution.id)
]
execution._interface = execution._node.flyte_entity.interface
self._assign_inputs_and_outputs(
execution,
node_execution_get_data_response,
execution.interface,
)
return execution
def sync_task_execution(
self, execution: FlyteTaskExecution, entity_definition: typing.Union[FlyteWorkflow, FlyteTask] = None
) -> FlyteTaskExecution:
"""Sync a FlyteTaskExecution object with its corresponding remote state."""
if entity_definition is not None:
raise ValueError("Entity definition arguments aren't supported when syncing task executions")
# sync closure and inputs/outputs
execution._closure = self.client.get_task_execution(execution.id).closure
execution_data = self.client.get_task_execution_data(execution.id)
task_id = execution.id.task_id
task = self.fetch_task(task_id.project, task_id.domain, task_id.name, task_id.version)
return self._assign_inputs_and_outputs(execution, execution_data, task.interface)
#############################
# Terminate Execution State #
#############################
def terminate(self, execution: FlyteWorkflowExecution, cause: str):
"""Terminate a workflow execution.
:param execution: workflow execution to terminate
:param cause: reason for termination
"""
self.client.terminate_execution(execution.id, cause)
##################
# Helper Methods #
##################
def _assign_inputs_and_outputs(
self,
execution: typing.Union[FlyteWorkflowExecution, FlyteNodeExecution, FlyteTaskExecution],
execution_data,
interface,
):
"""Helper for assigning synced inputs and outputs to an execution object."""
with self.remote_context() as ctx:
execution._inputs = TypeEngine.literal_map_to_kwargs(
ctx=ctx,
lm=self._get_input_literal_map(execution_data),
python_types=TypeEngine.guess_python_types(interface.inputs),
)
if execution.is_complete and not execution.error:
execution._outputs = TypeEngine.literal_map_to_kwargs(
ctx=ctx,
lm=self._get_output_literal_map(execution_data),
python_types=TypeEngine.guess_python_types(interface.outputs),
)
return execution
def _get_input_literal_map(self, execution_data: ExecutionDataResponse) -> literal_models.LiteralMap:
# Inputs are returned inline unless they are too big, in which case a url blob pointing to them is returned.
if bool(execution_data.full_inputs.literals):
return execution_data.full_inputs
elif execution_data.inputs.bytes > 0:
with self.remote_context() as ctx:
tmp_name = os.path.join(ctx.file_access.local_sandbox_dir, "inputs.pb")
ctx.file_access.get_data(execution_data.inputs.url, tmp_name)
return literal_models.LiteralMap.from_flyte_idl(
common_utils.load_proto_from_file(literals_pb2.LiteralMap, tmp_name)
)
return literal_models.LiteralMap({})
def _get_output_literal_map(self, execution_data: ExecutionDataResponse) -> literal_models.LiteralMap:
# Outputs are returned inline unless they are too big, in which case a url blob pointing to them is returned.
if bool(execution_data.full_outputs.literals):
return execution_data.full_outputs
elif execution_data.outputs.bytes > 0:
with self.remote_context() as ctx:
tmp_name = os.path.join(ctx.file_access.local_sandbox_dir, "outputs.pb")
ctx.file_access.get_data(execution_data.outputs.url, tmp_name)
return literal_models.LiteralMap.from_flyte_idl(
common_utils.load_proto_from_file(literals_pb2.LiteralMap, tmp_name)
)
return literal_models.LiteralMap({})
|
the-stack_106_27201 | import torch
import torch.nn as nn
from fixed_gru import FixedGru
from fixed_rnn import FixedRnn
class CharCnnRnn(nn.Module):
# Char-cnn-rnn text embedding
def __init__(self, rnn_type='fixed_rnn', model_type='cvpr'):
super().__init__()
if model_type == 'cvpr':
rnn_dim = 256
use_maxpool3 = True
rnn = FixedRnn
rnn_num_steps = 8
else:
# icml model type
rnn_dim = 512
if rnn_type == 'fixed_rnn':
use_maxpool3 = True
rnn = FixedRnn
rnn_num_steps = 8
else:
# Use fixed_gru
use_maxpool3 = False
rnn = FixedGru
rnn_num_steps = 18
self.rnn_type = rnn_type
self.model_type = model_type
self.use_maxpool3 = use_maxpool3
# network setup
# (B, 70, 201)
self.conv1 = nn.Conv1d(70, 384, kernel_size=4)
self.threshold1 = nn.Threshold(1e-6, 0)
self.maxpool1 = nn.MaxPool1d(kernel_size=3, stride=3)
# (B, 384, 66)
self.conv2 = nn.Conv1d(384, 512, kernel_size=4)
self.threshold2 = nn.Threshold(1e-6, 0)
self.maxpool2 = nn.MaxPool1d(kernel_size=3, stride=3)
# (B, 512, 21)
self.conv3 = nn.Conv1d(512, rnn_dim, kernel_size=4)
self.threshold3 = nn.Threshold(1e-6, 0)
if use_maxpool3:
self.maxpool3 = nn.MaxPool1d(kernel_size=3, stride=2)
# (B, rnn_dim, rnn_num_steps)
self.rnn = rnn(num_steps=rnn_num_steps, emb_dim=rnn_dim)
# (B, rnn_dim)
self.emb_proj = nn.Linear(rnn_dim, 1024)
# (B, 1024)
def forward(self, txt):
# temporal convolutions
out = self.conv1(txt)
out = self.threshold1(out)
out = self.maxpool1(out)
out = self.conv2(out)
out = self.threshold2(out)
out = self.maxpool2(out)
out = self.conv3(out)
out = self.threshold3(out)
if self.use_maxpool3:
out = self.maxpool3(out)
# recurrent computation
out = out.permute(0, 2, 1)
out = self.rnn(out)
# linear projection
out = self.emb_proj(out)
return out
def str_to_labelvec(string, max_str_len=201):
string = string.lower()
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{} "
# {'char': num, ...}
alpha_to_num = {k: v + 1 for k, v in zip(alphabet, range(len(alphabet)))}
labels = torch.zeros(max_str_len, requires_grad=False).long()
max_i = min(max_str_len, len(string))
for i in range(max_i):
# Append ' ' number if char not found
labels[i] = alpha_to_num.get(string[i], alpha_to_num[' '])
return labels
def labelvec_to_onehot(labels):
labels = torch.LongTensor(labels).unsqueeze(1)
one_hot = torch.zeros(labels.size(0), 71, requires_grad=False).scatter_(1, labels, 1.)
# Ignore zeros in one-hot mask (position 0 = empty one-hot)
one_hot = one_hot[:, 1:]
one_hot = one_hot.permute(1, 0)
return one_hot
def prepare_text(string, max_str_len=201):
# Converts a text description from string format to one-hot tensor format.
labels = str_to_labelvec(string, max_str_len)
one_hot = labelvec_to_onehot(labels)
return one_hot
|
the-stack_106_27202 | import asyncio
import copy
import logging
import os
import random
import shutil
import ssl
import sys
import tempfile
import time
from argparse import Namespace
from dataclasses import replace
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple, Any
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
from chiabip158 import PyBIP158
from chives.cmds.init_funcs import create_all_ssl, create_default_chives_config
from chives.daemon.keychain_proxy import connect_to_keychain_and_validate, wrap_local_keychain
from chives.full_node.bundle_tools import (
best_solution_generator_from_template,
detect_potential_template_generator,
simple_solution_generator,
)
from chives.util.errors import Err
from chives.full_node.generator import setup_generator_args
from chives.full_node.mempool_check_conditions import GENERATOR_MOD
from chives.plotting.create_plots import create_plots, PlotKeys
from chives.consensus.block_creation import unfinished_block_to_full_block
from chives.consensus.block_record import BlockRecord
from chives.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from chives.consensus.blockchain_interface import BlockchainInterface
from chives.consensus.coinbase import create_puzzlehash_for_pk, create_farmer_coin, create_pool_coin
from chives.consensus.condition_costs import ConditionCost
from chives.consensus.constants import ConsensusConstants
from chives.consensus.default_constants import DEFAULT_CONSTANTS
from chives.consensus.deficit import calculate_deficit
from chives.consensus.full_block_to_block_record import block_to_block_record
from chives.consensus.make_sub_epoch_summary import next_sub_epoch_summary
from chives.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
calculate_sp_interval_iters,
calculate_sp_iters,
is_overflow_block,
)
from chives.consensus.vdf_info_computation import get_signage_point_vdf_info
from chives.full_node.signage_point import SignagePoint
from chives.plotting.util import PlotsRefreshParameter, PlotRefreshResult, PlotRefreshEvents, parse_plot_info
from chives.plotting.manager import PlotManager
from chives.server.server import ssl_context_for_server
from chives.types.blockchain_format.classgroup import ClassgroupElement
from chives.types.blockchain_format.coin import Coin, hash_coin_list
from chives.types.blockchain_format.foliage import Foliage, FoliageBlockData, FoliageTransactionBlock, TransactionsInfo
from chives.types.blockchain_format.pool_target import PoolTarget
from chives.types.blockchain_format.program import INFINITE_COST
from chives.types.blockchain_format.proof_of_space import ProofOfSpace
from chives.types.blockchain_format.reward_chain_block import RewardChainBlockUnfinished
from chives.types.blockchain_format.sized_bytes import bytes32
from chives.types.blockchain_format.slots import (
ChallengeChainSubSlot,
InfusedChallengeChainSubSlot,
RewardChainSubSlot,
SubSlotProofs,
)
from chives.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chives.types.blockchain_format.vdf import VDFInfo, VDFProof
from chives.types.end_of_slot_bundle import EndOfSubSlotBundle
from chives.types.full_block import FullBlock
from chives.types.generator_types import BlockGenerator, CompressorArg
from chives.types.spend_bundle import SpendBundle
from chives.types.unfinished_block import UnfinishedBlock
from chives.util.bech32m import encode_puzzle_hash
from chives.util.block_cache import BlockCache
from chives.util.condition_tools import ConditionOpcode
from chives.util.config import load_config, save_config
from chives.util.hash import std_hash
from chives.util.ints import uint8, uint16, uint32, uint64, uint128
from chives.util.keychain import Keychain, bytes_to_mnemonic
from chives.util.merkle_set import MerkleSet
from chives.util.prev_transaction_block import get_prev_transaction_block
from chives.util.path import mkdir
from chives.util.vdf_prover import get_vdf_info_and_proof
from tests.time_out_assert import time_out_assert
from tests.wallet_tools import WalletTool
from chives.wallet.derive_keys import (
master_sk_to_farmer_sk,
master_sk_to_local_sk,
master_sk_to_pool_sk,
master_sk_to_wallet_sk,
)
test_constants = DEFAULT_CONSTANTS.replace(
**{
"MIN_PLOT_SIZE": 18,
"MIN_BLOCKS_PER_CHALLENGE_BLOCK": 12,
"DIFFICULTY_STARTING": 2 ** 12,
"DISCRIMINANT_SIZE_BITS": 16,
"SUB_EPOCH_BLOCKS": 170,
"WEIGHT_PROOF_THRESHOLD": 2,
"WEIGHT_PROOF_RECENT_BLOCKS": 380,
"DIFFICULTY_CONSTANT_FACTOR": 33554432,
"NUM_SPS_SUB_SLOT": 16, # Must be a power of 2
"MAX_SUB_SLOT_BLOCKS": 50,
"EPOCH_BLOCKS": 340,
"BLOCKS_CACHE_SIZE": 340 + 3 * 50, # Coordinate with the above values
"SUB_SLOT_TIME_TARGET": 600, # The target number of seconds per slot, mainnet 600
"SUB_SLOT_ITERS_STARTING": 2 ** 10, # Must be a multiple of 64
"NUMBER_ZERO_BITS_PLOT_FILTER": 1, # H(plot signature of the challenge) must start with these many zeroes
"MAX_FUTURE_TIME": 3600
* 24
* 10, # Allows creating blockchains with timestamps up to 10 days in the future, for testing
"COST_PER_BYTE": 1337,
"MEMPOOL_BLOCK_BUFFER": 6,
"NETWORK_TYPE": 1,
}
)
log = logging.getLogger(__name__)
class BlockTools:
"""
Tools to generate blocks for testing.
"""
def __init__(
self,
constants: ConsensusConstants = test_constants,
root_path: Optional[Path] = None,
const_dict=None,
keychain: Optional[Keychain] = None,
):
self._tempdir = None
if root_path is None:
self._tempdir = tempfile.TemporaryDirectory()
root_path = Path(self._tempdir.name)
self.root_path = root_path
self.local_keychain = keychain
create_default_chives_config(root_path)
create_all_ssl(root_path)
self.local_sk_cache: Dict[bytes32, Tuple[PrivateKey, Any]] = {}
self._config = load_config(self.root_path, "config.yaml")
self._config["logging"]["log_stdout"] = True
self._config["selected_network"] = "testnet0"
for service in ["harvester", "farmer", "full_node", "wallet", "introducer", "timelord", "pool"]:
self._config[service]["selected_network"] = "testnet0"
save_config(self.root_path, "config.yaml", self._config)
overrides = self._config["network_overrides"]["constants"][self._config["selected_network"]]
updated_constants = constants.replace_str_to_bytes(**overrides)
if const_dict is not None:
updated_constants = updated_constants.replace(**const_dict)
self.constants = updated_constants
self.refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(batch_size=2)
self.plot_dir: Path = get_plot_dir()
self.temp_dir: Path = get_plot_tmp_dir()
mkdir(self.plot_dir)
mkdir(self.temp_dir)
self.expected_plots: Dict[bytes32, Path] = {}
self.total_result = PlotRefreshResult()
def test_callback(event: PlotRefreshEvents, update_result: PlotRefreshResult):
assert update_result.duration < 5
if event == PlotRefreshEvents.started:
self.total_result = PlotRefreshResult()
if event == PlotRefreshEvents.batch_processed:
self.total_result.loaded += update_result.loaded
self.total_result.removed += update_result.removed
self.total_result.processed += update_result.processed
self.total_result.duration += update_result.duration
assert update_result.remaining == len(self.expected_plots) - self.total_result.processed
assert update_result.loaded <= self.refresh_parameter.batch_size
if event == PlotRefreshEvents.done:
assert self.total_result.loaded == update_result.loaded
assert self.total_result.removed == update_result.removed
assert self.total_result.processed == update_result.processed
assert self.total_result.duration == update_result.duration
assert update_result.remaining == 0
assert len(self.plot_manager.plots) == len(self.expected_plots)
self.plot_manager: PlotManager = PlotManager(
self.root_path, refresh_parameter=self.refresh_parameter, refresh_callback=test_callback
)
async def setup_keys(self):
if self.local_keychain:
self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=log)
else:
self.keychain_proxy = await connect_to_keychain_and_validate(
self.root_path, log, user="testing-1.8.0", service="chives-testing-1.8.0"
)
await self.keychain_proxy.delete_all_keys()
self.farmer_master_sk_entropy = std_hash(b"block_tools farmer key")
self.pool_master_sk_entropy = std_hash(b"block_tools pool key")
self.farmer_master_sk = await self.keychain_proxy.add_private_key(
bytes_to_mnemonic(self.farmer_master_sk_entropy), ""
)
self.pool_master_sk = await self.keychain_proxy.add_private_key(
bytes_to_mnemonic(self.pool_master_sk_entropy), ""
)
self.farmer_pk = master_sk_to_farmer_sk(self.farmer_master_sk).get_g1()
self.pool_pk = master_sk_to_pool_sk(self.pool_master_sk).get_g1()
self.farmer_ph: bytes32 = create_puzzlehash_for_pk(
master_sk_to_wallet_sk(self.farmer_master_sk, uint32(0)).get_g1()
)
self.pool_ph: bytes32 = create_puzzlehash_for_pk(
master_sk_to_wallet_sk(self.pool_master_sk, uint32(0)).get_g1()
)
self.all_sks: List[PrivateKey] = [sk for sk, _ in await self.keychain_proxy.get_all_private_keys()]
self.pool_pubkeys: List[G1Element] = [master_sk_to_pool_sk(sk).get_g1() for sk in self.all_sks]
self.farmer_pubkeys: List[G1Element] = [master_sk_to_farmer_sk(sk).get_g1() for sk in self.all_sks]
if len(self.pool_pubkeys) == 0 or len(self.farmer_pubkeys) == 0:
raise RuntimeError("Keys not generated. Run `chives generate keys`")
self.plot_manager.set_public_keys(self.farmer_pubkeys, self.pool_pubkeys)
def change_config(self, new_config: Dict):
self._config = new_config
overrides = self._config["network_overrides"]["constants"][self._config["selected_network"]]
updated_constants = self.constants.replace_str_to_bytes(**overrides)
self.constants = updated_constants
save_config(self.root_path, "config.yaml", self._config)
async def setup_plots(self):
assert len(self.expected_plots) == 0
# OG Plots
for i in range(15):
await self.new_plot()
# Pool Plots
for i in range(5):
await self.new_plot(self.pool_ph)
await self.refresh_plots()
async def new_plot(
self, pool_contract_puzzle_hash: Optional[bytes32] = None, path: Path = None
) -> Optional[bytes32]:
final_dir = self.plot_dir
if path is not None:
final_dir = path
mkdir(final_dir)
args = Namespace()
# Can't go much lower than 20, since plots start having no solutions and more buggy
args.size = 22
# Uses many plots for testing, in order to guarantee proofs of space at every height
args.num = 1
args.buffer = 100
args.tmp_dir = self.temp_dir
args.tmp2_dir = final_dir
args.final_dir = final_dir
args.plotid = None
args.memo = None
args.buckets = 0
args.stripe_size = 2000
args.num_threads = 0
args.nobitfield = False
args.exclude_final_dir = False
args.list_duplicates = False
try:
pool_pk: Optional[G1Element] = None
pool_address: Optional[str] = None
if pool_contract_puzzle_hash is None:
pool_pk = self.pool_pk
else:
pool_address = encode_puzzle_hash(pool_contract_puzzle_hash, "xcc")
keys = PlotKeys(self.farmer_pk, pool_pk, pool_address)
# No datetime in the filename, to get deterministic filenames and not re-plot
created, existed = await create_plots(
args,
keys,
self.root_path,
use_datetime=False,
test_private_keys=[AugSchemeMPL.key_gen(std_hash(len(self.expected_plots).to_bytes(2, "big")))],
)
plot_id_new: Optional[bytes32] = None
path_new: Path = Path()
if len(created):
assert len(existed) == 0
plot_id_new, path_new = list(created.items())[0]
if len(existed):
assert len(created) == 0
plot_id_new, path_new = list(existed.items())[0]
self.expected_plots[plot_id_new] = path_new
# create_plots() updates plot_directories. Ensure we refresh our config to reflect the updated value
self._config["harvester"]["plot_directories"] = load_config(self.root_path, "config.yaml", "harvester")[
"plot_directories"
]
return plot_id_new
except KeyboardInterrupt:
shutil.rmtree(self.plot_dir, ignore_errors=True)
sys.exit(1)
async def refresh_plots(self):
self.plot_manager.refresh_parameter.batch_size = (
4 if len(self.expected_plots) % 3 == 0 else 3
) # Make sure we have at least some batches + a remainder
self.plot_manager.trigger_refresh()
assert self.plot_manager.needs_refresh()
self.plot_manager.start_refreshing()
await time_out_assert(10, self.plot_manager.needs_refresh, value=False)
self.plot_manager.stop_refreshing()
assert not self.plot_manager.needs_refresh()
async def delete_plot(self, plot_id: bytes32):
assert plot_id in self.expected_plots
self.expected_plots[plot_id].unlink()
del self.expected_plots[plot_id]
await self.refresh_plots()
@property
def config(self) -> Dict:
return copy.deepcopy(self._config)
def get_daemon_ssl_context(self) -> Optional[ssl.SSLContext]:
crt_path = self.root_path / self.config["daemon_ssl"]["private_crt"]
key_path = self.root_path / self.config["daemon_ssl"]["private_key"]
ca_cert_path = self.root_path / self.config["private_ssl_ca"]["crt"]
ca_key_path = self.root_path / self.config["private_ssl_ca"]["key"]
return ssl_context_for_server(ca_cert_path, ca_key_path, crt_path, key_path)
def get_plot_signature(self, m: bytes32, plot_pk: G1Element) -> G2Element:
"""
Returns the plot signature of the header data.
"""
farmer_sk = master_sk_to_farmer_sk(self.all_sks[0])
for plot_info in self.plot_manager.plots.values():
if plot_pk == plot_info.plot_public_key:
# Look up local_sk from plot to save locked memory
if plot_info.prover.get_id() in self.local_sk_cache:
local_master_sk, pool_pk_or_ph = self.local_sk_cache[plot_info.prover.get_id()]
else:
pool_pk_or_ph, _, local_master_sk = parse_plot_info(plot_info.prover.get_memo())
self.local_sk_cache[plot_info.prover.get_id()] = (local_master_sk, pool_pk_or_ph)
if isinstance(pool_pk_or_ph, G1Element):
include_taproot = False
else:
assert isinstance(pool_pk_or_ph, bytes32)
include_taproot = True
local_sk = master_sk_to_local_sk(local_master_sk)
agg_pk = ProofOfSpace.generate_plot_public_key(local_sk.get_g1(), farmer_sk.get_g1(), include_taproot)
assert agg_pk == plot_pk
harv_share = AugSchemeMPL.sign(local_sk, m, agg_pk)
farm_share = AugSchemeMPL.sign(farmer_sk, m, agg_pk)
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(local_sk.get_g1(), farmer_sk.get_g1())
taproot_share: G2Element = AugSchemeMPL.sign(taproot_sk, m, agg_pk)
else:
taproot_share = G2Element()
return AugSchemeMPL.aggregate([harv_share, farm_share, taproot_share])
raise ValueError(f"Do not have key {plot_pk}")
def get_pool_key_signature(self, pool_target: PoolTarget, pool_pk: Optional[G1Element]) -> Optional[G2Element]:
# Returns the pool signature for the corresponding pk. If no pk is provided, returns None.
if pool_pk is None:
return None
for sk in self.all_sks:
sk_child = master_sk_to_pool_sk(sk)
if sk_child.get_g1() == pool_pk:
return AugSchemeMPL.sign(sk_child, bytes(pool_target))
raise ValueError(f"Do not have key {pool_pk}")
def get_farmer_wallet_tool(self) -> WalletTool:
return WalletTool(self.constants, self.farmer_master_sk)
def get_pool_wallet_tool(self) -> WalletTool:
return WalletTool(self.constants, self.pool_master_sk)
def get_consecutive_blocks(
self,
num_blocks: int,
block_list_input: List[FullBlock] = None,
farmer_reward_puzzle_hash: Optional[bytes32] = None,
pool_reward_puzzle_hash: Optional[bytes32] = None,
transaction_data: Optional[SpendBundle] = None,
seed: bytes = b"",
time_per_block: Optional[float] = None,
force_overflow: bool = False,
skip_slots: int = 0, # Force at least this number of empty slots before the first SB
guarantee_transaction_block: bool = False, # Force that this block must be a tx block
normalized_to_identity_cc_eos: bool = False,
normalized_to_identity_icc_eos: bool = False,
normalized_to_identity_cc_sp: bool = False,
normalized_to_identity_cc_ip: bool = False,
current_time: bool = False,
previous_generator: CompressorArg = None,
genesis_timestamp: Optional[uint64] = None,
force_plot_id: Optional[bytes32] = None,
) -> List[FullBlock]:
assert num_blocks > 0
if block_list_input is not None:
block_list = block_list_input.copy()
else:
block_list = []
constants = self.constants
transaction_data_included = False
if time_per_block is None:
time_per_block = float(constants.SUB_SLOT_TIME_TARGET) / float(constants.SLOT_BLOCKS_TARGET)
if farmer_reward_puzzle_hash is None:
farmer_reward_puzzle_hash = self.farmer_ph
if len(block_list) == 0:
if force_plot_id is not None:
raise ValueError("Cannot specify plot_id for genesis block")
initial_block_list_len = 0
genesis = self.create_genesis_block(
constants,
seed,
force_overflow=force_overflow,
skip_slots=skip_slots,
timestamp=(uint64(int(time.time())) if genesis_timestamp is None else genesis_timestamp),
)
log.info(f"Created block 0 iters: {genesis.total_iters}")
num_empty_slots_added = skip_slots
block_list = [genesis]
num_blocks -= 1
else:
initial_block_list_len = len(block_list)
num_empty_slots_added = uint32(0) # Allows forcing empty slots in the beginning, for testing purposes
if num_blocks == 0:
return block_list
height_to_hash, difficulty, blocks = load_block_list(block_list, constants)
latest_block: BlockRecord = blocks[block_list[-1].header_hash]
curr = latest_block
while not curr.is_transaction_block:
curr = blocks[curr.prev_hash]
start_timestamp = curr.timestamp
start_height = curr.height
curr = latest_block
blocks_added_this_sub_slot = 1
while not curr.first_in_sub_slot:
curr = blocks[curr.prev_hash]
blocks_added_this_sub_slot += 1
finished_sub_slots_at_sp: List[EndOfSubSlotBundle] = [] # Sub-slots since last block, up to signage point
finished_sub_slots_at_ip: List[EndOfSubSlotBundle] = [] # Sub-slots since last block, up to infusion point
sub_slot_iters: uint64 = latest_block.sub_slot_iters # The number of iterations in one sub-slot
same_slot_as_last = True # Only applies to first slot, to prevent old blocks from being added
sub_slot_start_total_iters: uint128 = latest_block.ip_sub_slot_total_iters(constants)
sub_slots_finished = 0
pending_ses: bool = False
# Start at the last block in block list
# Get the challenge for that slot
while True:
slot_cc_challenge, slot_rc_challenge = get_challenges(
constants,
blocks,
finished_sub_slots_at_sp,
latest_block.header_hash,
)
prev_num_of_blocks = num_blocks
if num_empty_slots_added < skip_slots:
# If did not reach the target slots to skip, don't make any proofs for this sub-slot
num_empty_slots_added += 1
else:
# Loop over every signage point (Except for the last ones, which are used for overflows)
for signage_point_index in range(0, constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA):
curr = latest_block
while curr.total_iters > sub_slot_start_total_iters + calculate_sp_iters(
constants, sub_slot_iters, uint8(signage_point_index)
):
if curr.height == 0:
break
curr = blocks[curr.prev_hash]
if curr.total_iters > sub_slot_start_total_iters:
finished_sub_slots_at_sp = []
if same_slot_as_last:
if signage_point_index < latest_block.signage_point_index:
# Ignore this signage_point because it's in the past
continue
signage_point: SignagePoint = get_signage_point(
constants,
BlockCache(blocks),
latest_block,
sub_slot_start_total_iters,
uint8(signage_point_index),
finished_sub_slots_at_sp,
sub_slot_iters,
normalized_to_identity_cc_sp,
)
if signage_point_index == 0:
cc_sp_output_hash: bytes32 = slot_cc_challenge
else:
assert signage_point.cc_vdf is not None
cc_sp_output_hash = signage_point.cc_vdf.output.get_hash()
qualified_proofs: List[Tuple[uint64, ProofOfSpace]] = self.get_pospaces_for_challenge(
constants,
slot_cc_challenge,
cc_sp_output_hash,
seed,
difficulty,
sub_slot_iters,
force_plot_id=force_plot_id,
)
for required_iters, proof_of_space in sorted(qualified_proofs, key=lambda t: t[0]):
if blocks_added_this_sub_slot == constants.MAX_SUB_SLOT_BLOCKS or force_overflow:
break
if same_slot_as_last:
if signage_point_index == latest_block.signage_point_index:
# Ignore this block because it's in the past
if required_iters <= latest_block.required_iters:
continue
assert latest_block.header_hash in blocks
additions = None
removals = None
if transaction_data_included:
transaction_data = None
if transaction_data is not None and not transaction_data_included:
additions = transaction_data.additions()
removals = transaction_data.removals()
assert start_timestamp is not None
if proof_of_space.pool_contract_puzzle_hash is not None:
if pool_reward_puzzle_hash is not None:
# The caller wants to be paid to a specific address, but this PoSpace is tied to an
# address, so continue until a proof of space tied to a pk is found
continue
pool_target = PoolTarget(proof_of_space.pool_contract_puzzle_hash, uint32(0))
else:
if pool_reward_puzzle_hash is not None:
pool_target = PoolTarget(pool_reward_puzzle_hash, uint32(0))
else:
pool_target = PoolTarget(self.pool_ph, uint32(0))
if transaction_data is not None:
if previous_generator is not None:
block_generator: Optional[BlockGenerator] = best_solution_generator_from_template(
previous_generator, transaction_data
)
else:
block_generator = simple_solution_generator(transaction_data)
aggregate_signature = transaction_data.aggregated_signature
else:
block_generator = None
aggregate_signature = G2Element()
full_block, block_record = get_full_block_and_block_record(
constants,
blocks,
sub_slot_start_total_iters,
uint8(signage_point_index),
proof_of_space,
slot_cc_challenge,
slot_rc_challenge,
farmer_reward_puzzle_hash,
pool_target,
start_timestamp,
start_height,
time_per_block,
block_generator,
aggregate_signature,
additions,
removals,
height_to_hash,
difficulty,
required_iters,
sub_slot_iters,
self.get_plot_signature,
self.get_pool_key_signature,
finished_sub_slots_at_ip,
signage_point,
latest_block,
seed,
normalized_to_identity_cc_ip,
current_time=current_time,
)
if block_record.is_transaction_block:
transaction_data_included = True
else:
if guarantee_transaction_block:
continue
if pending_ses:
pending_ses = False
block_list.append(full_block)
if full_block.transactions_generator is not None:
compressor_arg = detect_potential_template_generator(
full_block.height, full_block.transactions_generator
)
if compressor_arg is not None:
previous_generator = compressor_arg
blocks_added_this_sub_slot += 1
blocks[full_block.header_hash] = block_record
log.info(f"Created block {block_record.height} ove=False, iters " f"{block_record.total_iters}")
height_to_hash[uint32(full_block.height)] = full_block.header_hash
latest_block = blocks[full_block.header_hash]
finished_sub_slots_at_ip = []
num_blocks -= 1
if num_blocks == 0:
return block_list
# Finish the end of sub-slot and try again next sub-slot
# End of sub-slot logic
if len(finished_sub_slots_at_ip) == 0:
# Block has been created within this sub-slot
eos_iters: uint64 = uint64(sub_slot_iters - (latest_block.total_iters - sub_slot_start_total_iters))
cc_input: ClassgroupElement = latest_block.challenge_vdf_output
rc_challenge: bytes32 = latest_block.reward_infusion_new_challenge
else:
# No blocks were successfully created within this sub-slot
eos_iters = sub_slot_iters
cc_input = ClassgroupElement.get_default_element()
rc_challenge = slot_rc_challenge
cc_vdf, cc_proof = get_vdf_info_and_proof(
constants,
cc_input,
slot_cc_challenge,
eos_iters,
)
rc_vdf, rc_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
rc_challenge,
eos_iters,
)
eos_deficit: uint8 = (
latest_block.deficit if latest_block.deficit > 0 else constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
)
icc_eos_vdf, icc_ip_proof = get_icc(
constants,
uint128(sub_slot_start_total_iters + sub_slot_iters),
finished_sub_slots_at_ip,
latest_block,
blocks,
sub_slot_start_total_iters,
eos_deficit,
)
# End of slot vdf info for icc and cc have to be from challenge block or start of slot, respectively,
# in order for light clients to validate.
cc_vdf = VDFInfo(cc_vdf.challenge, sub_slot_iters, cc_vdf.output)
if normalized_to_identity_cc_eos:
_, cc_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_vdf.challenge,
sub_slot_iters,
True,
)
if pending_ses:
sub_epoch_summary: Optional[SubEpochSummary] = None
else:
sub_epoch_summary = next_sub_epoch_summary(
constants,
BlockCache(blocks, height_to_hash),
latest_block.required_iters,
block_list[-1],
False,
)
pending_ses = True
if sub_epoch_summary is not None:
ses_hash = sub_epoch_summary.get_hash()
new_sub_slot_iters: Optional[uint64] = sub_epoch_summary.new_sub_slot_iters
new_difficulty: Optional[uint64] = sub_epoch_summary.new_difficulty
log.info(f"Sub epoch summary: {sub_epoch_summary}")
else:
ses_hash = None
new_sub_slot_iters = None
new_difficulty = None
if icc_eos_vdf is not None:
# Icc vdf (Deficit of latest block is <= 4)
if len(finished_sub_slots_at_ip) == 0:
# This means there are blocks in this sub-slot
curr = latest_block
while not curr.is_challenge_block(constants) and not curr.first_in_sub_slot:
curr = blocks[curr.prev_hash]
if curr.is_challenge_block(constants):
icc_eos_iters = uint64(sub_slot_start_total_iters + sub_slot_iters - curr.total_iters)
else:
icc_eos_iters = sub_slot_iters
else:
# This means there are no blocks in this sub-slot
icc_eos_iters = sub_slot_iters
icc_eos_vdf = VDFInfo(
icc_eos_vdf.challenge,
icc_eos_iters,
icc_eos_vdf.output,
)
if normalized_to_identity_icc_eos:
_, icc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
icc_eos_vdf.challenge,
icc_eos_iters,
True,
)
icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = InfusedChallengeChainSubSlot(icc_eos_vdf)
assert icc_sub_slot is not None
icc_sub_slot_hash = icc_sub_slot.get_hash() if latest_block.deficit == 0 else None
cc_sub_slot = ChallengeChainSubSlot(
cc_vdf,
icc_sub_slot_hash,
ses_hash,
new_sub_slot_iters,
new_difficulty,
)
else:
# No icc
icc_sub_slot = None
cc_sub_slot = ChallengeChainSubSlot(cc_vdf, None, ses_hash, new_sub_slot_iters, new_difficulty)
finished_sub_slots_at_ip.append(
EndOfSubSlotBundle(
cc_sub_slot,
icc_sub_slot,
RewardChainSubSlot(
rc_vdf,
cc_sub_slot.get_hash(),
icc_sub_slot.get_hash() if icc_sub_slot is not None else None,
eos_deficit,
),
SubSlotProofs(cc_proof, icc_ip_proof, rc_proof),
)
)
finished_sub_slots_eos = finished_sub_slots_at_ip.copy()
latest_block_eos = latest_block
overflow_cc_challenge = finished_sub_slots_at_ip[-1].challenge_chain.get_hash()
overflow_rc_challenge = finished_sub_slots_at_ip[-1].reward_chain.get_hash()
additions = None
removals = None
if transaction_data_included:
transaction_data = None
if transaction_data is not None and not transaction_data_included:
additions = transaction_data.additions()
removals = transaction_data.removals()
sub_slots_finished += 1
log.info(
f"Sub slot finished. blocks included: {blocks_added_this_sub_slot} blocks_per_slot: "
f"{(len(block_list) - initial_block_list_len)/sub_slots_finished}"
)
blocks_added_this_sub_slot = 0 # Sub slot ended, overflows are in next sub slot
# Handle overflows: No overflows on new epoch
if new_sub_slot_iters is None and num_empty_slots_added >= skip_slots and new_difficulty is None:
for signage_point_index in range(
constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA,
constants.NUM_SPS_SUB_SLOT,
):
# note that we are passing in the finished slots which include the last slot
signage_point = get_signage_point(
constants,
BlockCache(blocks),
latest_block_eos,
sub_slot_start_total_iters,
uint8(signage_point_index),
finished_sub_slots_eos,
sub_slot_iters,
normalized_to_identity_cc_sp,
)
if signage_point_index == 0:
cc_sp_output_hash = slot_cc_challenge
else:
assert signage_point is not None
assert signage_point.cc_vdf is not None
cc_sp_output_hash = signage_point.cc_vdf.output.get_hash()
# If did not reach the target slots to skip, don't make any proofs for this sub-slot
qualified_proofs = self.get_pospaces_for_challenge(
constants,
slot_cc_challenge,
cc_sp_output_hash,
seed,
difficulty,
sub_slot_iters,
force_plot_id=force_plot_id,
)
for required_iters, proof_of_space in sorted(qualified_proofs, key=lambda t: t[0]):
if blocks_added_this_sub_slot == constants.MAX_SUB_SLOT_BLOCKS:
break
assert start_timestamp is not None
if proof_of_space.pool_contract_puzzle_hash is not None:
if pool_reward_puzzle_hash is not None:
# The caller wants to be paid to a specific address, but this PoSpace is tied to an
# address, so continue until a proof of space tied to a pk is found
continue
pool_target = PoolTarget(proof_of_space.pool_contract_puzzle_hash, uint32(0))
else:
if pool_reward_puzzle_hash is not None:
pool_target = PoolTarget(pool_reward_puzzle_hash, uint32(0))
else:
pool_target = PoolTarget(self.pool_ph, uint32(0))
if transaction_data is not None:
if previous_generator is not None:
block_generator = best_solution_generator_from_template(
previous_generator, transaction_data
)
else:
block_generator = simple_solution_generator(transaction_data)
aggregate_signature = transaction_data.aggregated_signature
else:
block_generator = None
aggregate_signature = G2Element()
full_block, block_record = get_full_block_and_block_record(
constants,
blocks,
sub_slot_start_total_iters,
uint8(signage_point_index),
proof_of_space,
slot_cc_challenge,
slot_rc_challenge,
farmer_reward_puzzle_hash,
pool_target,
start_timestamp,
start_height,
time_per_block,
block_generator,
aggregate_signature,
additions,
removals,
height_to_hash,
difficulty,
required_iters,
sub_slot_iters,
self.get_plot_signature,
self.get_pool_key_signature,
finished_sub_slots_at_ip,
signage_point,
latest_block,
seed,
overflow_cc_challenge=overflow_cc_challenge,
overflow_rc_challenge=overflow_rc_challenge,
normalized_to_identity_cc_ip=normalized_to_identity_cc_ip,
current_time=current_time,
)
if block_record.is_transaction_block:
transaction_data_included = True
elif guarantee_transaction_block:
continue
if pending_ses:
pending_ses = False
block_list.append(full_block)
if full_block.transactions_generator is not None:
compressor_arg = detect_potential_template_generator(
full_block.height, full_block.transactions_generator
)
if compressor_arg is not None:
previous_generator = compressor_arg
blocks_added_this_sub_slot += 1
log.info(f"Created block {block_record.height } ov=True, iters " f"{block_record.total_iters}")
num_blocks -= 1
if num_blocks == 0:
return block_list
blocks[full_block.header_hash] = block_record
height_to_hash[uint32(full_block.height)] = full_block.header_hash
latest_block = blocks[full_block.header_hash]
finished_sub_slots_at_ip = []
finished_sub_slots_at_sp = finished_sub_slots_eos.copy()
same_slot_as_last = False
sub_slot_start_total_iters = uint128(sub_slot_start_total_iters + sub_slot_iters)
if num_blocks < prev_num_of_blocks:
num_empty_slots_added += 1
if new_sub_slot_iters is not None:
assert new_difficulty is not None
sub_slot_iters = new_sub_slot_iters
difficulty = new_difficulty
def create_genesis_block(
self,
constants: ConsensusConstants,
seed: bytes32 = b"",
timestamp: Optional[uint64] = None,
force_overflow: bool = False,
skip_slots: int = 0,
) -> FullBlock:
if timestamp is None:
timestamp = uint64(int(time.time()))
finished_sub_slots: List[EndOfSubSlotBundle] = []
unfinished_block: Optional[UnfinishedBlock] = None
ip_iters: uint64 = uint64(0)
sub_slot_total_iters: uint128 = uint128(0)
# Keep trying until we get a good proof of space that also passes sp filter
while True:
cc_challenge, rc_challenge = get_challenges(constants, {}, finished_sub_slots, None)
for signage_point_index in range(0, constants.NUM_SPS_SUB_SLOT):
signage_point: SignagePoint = get_signage_point(
constants,
BlockCache({}, {}),
None,
sub_slot_total_iters,
uint8(signage_point_index),
finished_sub_slots,
constants.SUB_SLOT_ITERS_STARTING,
)
if signage_point_index == 0:
cc_sp_output_hash: bytes32 = cc_challenge
else:
assert signage_point is not None
assert signage_point.cc_vdf is not None
cc_sp_output_hash = signage_point.cc_vdf.output.get_hash()
# If did not reach the target slots to skip, don't make any proofs for this sub-slot
qualified_proofs: List[Tuple[uint64, ProofOfSpace]] = self.get_pospaces_for_challenge(
constants,
cc_challenge,
cc_sp_output_hash,
seed,
constants.DIFFICULTY_STARTING,
constants.SUB_SLOT_ITERS_STARTING,
)
# Try each of the proofs of space
for required_iters, proof_of_space in qualified_proofs:
sp_iters: uint64 = calculate_sp_iters(
constants,
uint64(constants.SUB_SLOT_ITERS_STARTING),
uint8(signage_point_index),
)
ip_iters = calculate_ip_iters(
constants,
uint64(constants.SUB_SLOT_ITERS_STARTING),
uint8(signage_point_index),
required_iters,
)
is_overflow = is_overflow_block(constants, uint8(signage_point_index))
if force_overflow and not is_overflow:
continue
if len(finished_sub_slots) < skip_slots:
continue
unfinished_block = create_test_unfinished_block(
constants,
sub_slot_total_iters,
constants.SUB_SLOT_ITERS_STARTING,
uint8(signage_point_index),
sp_iters,
ip_iters,
proof_of_space,
cc_challenge,
constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH,
PoolTarget(constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH, uint32(0)),
self.get_plot_signature,
self.get_pool_key_signature,
signage_point,
timestamp,
BlockCache({}),
seed=seed,
finished_sub_slots_input=finished_sub_slots,
)
assert unfinished_block is not None
if not is_overflow:
cc_ip_vdf, cc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_challenge,
ip_iters,
)
cc_ip_vdf = replace(cc_ip_vdf, number_of_iterations=ip_iters)
rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
rc_challenge,
ip_iters,
)
assert unfinished_block is not None
total_iters_sp = uint128(sub_slot_total_iters + sp_iters)
return unfinished_block_to_full_block(
unfinished_block,
cc_ip_vdf,
cc_ip_proof,
rc_ip_vdf,
rc_ip_proof,
None,
None,
finished_sub_slots,
None,
BlockCache({}),
total_iters_sp,
constants.DIFFICULTY_STARTING,
)
if signage_point_index == constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA - 1:
# Finish the end of sub-slot and try again next sub-slot
cc_vdf, cc_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_challenge,
constants.SUB_SLOT_ITERS_STARTING,
)
rc_vdf, rc_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
rc_challenge,
constants.SUB_SLOT_ITERS_STARTING,
)
cc_slot = ChallengeChainSubSlot(cc_vdf, None, None, None, None)
finished_sub_slots.append(
EndOfSubSlotBundle(
cc_slot,
None,
RewardChainSubSlot(
rc_vdf,
cc_slot.get_hash(),
None,
uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK),
),
SubSlotProofs(cc_proof, None, rc_proof),
)
)
if unfinished_block is not None:
cc_ip_vdf, cc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
finished_sub_slots[-1].challenge_chain.get_hash(),
ip_iters,
)
rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
finished_sub_slots[-1].reward_chain.get_hash(),
ip_iters,
)
total_iters_sp = uint128(
sub_slot_total_iters
+ calculate_sp_iters(
self.constants,
self.constants.SUB_SLOT_ITERS_STARTING,
unfinished_block.reward_chain_block.signage_point_index,
)
)
return unfinished_block_to_full_block(
unfinished_block,
cc_ip_vdf,
cc_ip_proof,
rc_ip_vdf,
rc_ip_proof,
None,
None,
finished_sub_slots,
None,
BlockCache({}),
total_iters_sp,
constants.DIFFICULTY_STARTING,
)
sub_slot_total_iters = uint128(sub_slot_total_iters + constants.SUB_SLOT_ITERS_STARTING)
def get_pospaces_for_challenge(
self,
constants: ConsensusConstants,
challenge_hash: bytes32,
signage_point: bytes32,
seed: bytes,
difficulty: uint64,
sub_slot_iters: uint64,
force_plot_id: Optional[bytes32] = None,
) -> List[Tuple[uint64, ProofOfSpace]]:
found_proofs: List[Tuple[uint64, ProofOfSpace]] = []
random.seed(seed)
for plot_info in self.plot_manager.plots.values():
plot_id: bytes32 = plot_info.prover.get_id()
if force_plot_id is not None and plot_id != force_plot_id:
continue
if ProofOfSpace.passes_plot_filter(constants, plot_id, challenge_hash, signage_point):
new_challenge: bytes32 = ProofOfSpace.calculate_pos_challenge(plot_id, challenge_hash, signage_point)
qualities = plot_info.prover.get_qualities_for_challenge(new_challenge)
for proof_index, quality_str in enumerate(qualities):
required_iters = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
quality_str,
plot_info.prover.get_size(),
difficulty,
signage_point,
)
if required_iters < calculate_sp_interval_iters(constants, sub_slot_iters):
proof_xs: bytes = plot_info.prover.get_full_proof(new_challenge, proof_index)
# Look up local_sk from plot to save locked memory
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(plot_info.prover.get_memo())
local_sk = master_sk_to_local_sk(local_master_sk)
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
include_taproot = False
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
include_taproot = True
plot_pk = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, include_taproot
)
proof_of_space: ProofOfSpace = ProofOfSpace(
new_challenge,
plot_info.pool_public_key,
plot_info.pool_contract_puzzle_hash,
plot_pk,
plot_info.prover.get_size(),
proof_xs,
)
found_proofs.append((required_iters, proof_of_space))
random_sample = found_proofs
if len(found_proofs) >= 1:
if random.random() < 0.1:
# Removes some proofs of space to create "random" chains, based on the seed
random_sample = random.sample(found_proofs, len(found_proofs) - 1)
return random_sample
def get_signage_point(
constants: ConsensusConstants,
blocks: BlockchainInterface,
latest_block: Optional[BlockRecord],
sub_slot_start_total_iters: uint128,
signage_point_index: uint8,
finished_sub_slots: List[EndOfSubSlotBundle],
sub_slot_iters: uint64,
normalized_to_identity_cc_sp: bool = False,
) -> SignagePoint:
if signage_point_index == 0:
return SignagePoint(None, None, None, None)
sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
overflow = is_overflow_block(constants, signage_point_index)
sp_total_iters = uint128(
sub_slot_start_total_iters + calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
)
(
cc_vdf_challenge,
rc_vdf_challenge,
cc_vdf_input,
rc_vdf_input,
cc_vdf_iters,
rc_vdf_iters,
) = get_signage_point_vdf_info(
constants,
finished_sub_slots,
overflow,
latest_block,
blocks,
sp_total_iters,
sp_iters,
)
cc_sp_vdf, cc_sp_proof = get_vdf_info_and_proof(
constants,
cc_vdf_input,
cc_vdf_challenge,
cc_vdf_iters,
)
rc_sp_vdf, rc_sp_proof = get_vdf_info_and_proof(
constants,
rc_vdf_input,
rc_vdf_challenge,
rc_vdf_iters,
)
cc_sp_vdf = replace(cc_sp_vdf, number_of_iterations=sp_iters)
if normalized_to_identity_cc_sp:
_, cc_sp_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_sp_vdf.challenge,
sp_iters,
True,
)
return SignagePoint(cc_sp_vdf, cc_sp_proof, rc_sp_vdf, rc_sp_proof)
def finish_block(
constants: ConsensusConstants,
blocks: Dict[bytes32, BlockRecord],
height_to_hash: Dict[uint32, bytes32],
finished_sub_slots: List[EndOfSubSlotBundle],
sub_slot_start_total_iters: uint128,
signage_point_index: uint8,
unfinished_block: UnfinishedBlock,
required_iters: uint64,
ip_iters: uint64,
slot_cc_challenge: bytes32,
slot_rc_challenge: bytes32,
latest_block: BlockRecord,
sub_slot_iters: uint64,
difficulty: uint64,
normalized_to_identity_cc_ip: bool = False,
) -> Tuple[FullBlock, BlockRecord]:
is_overflow = is_overflow_block(constants, signage_point_index)
cc_vdf_challenge = slot_cc_challenge
if len(finished_sub_slots) == 0:
new_ip_iters = unfinished_block.total_iters - latest_block.total_iters
cc_vdf_input = latest_block.challenge_vdf_output
rc_vdf_challenge = latest_block.reward_infusion_new_challenge
else:
new_ip_iters = ip_iters
cc_vdf_input = ClassgroupElement.get_default_element()
rc_vdf_challenge = slot_rc_challenge
cc_ip_vdf, cc_ip_proof = get_vdf_info_and_proof(
constants,
cc_vdf_input,
cc_vdf_challenge,
new_ip_iters,
)
cc_ip_vdf = replace(cc_ip_vdf, number_of_iterations=ip_iters)
if normalized_to_identity_cc_ip:
_, cc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_ip_vdf.challenge,
ip_iters,
True,
)
deficit = calculate_deficit(
constants,
uint32(latest_block.height + 1),
latest_block,
is_overflow,
len(finished_sub_slots),
)
icc_ip_vdf, icc_ip_proof = get_icc(
constants,
unfinished_block.total_iters,
finished_sub_slots,
latest_block,
blocks,
uint128(sub_slot_start_total_iters + sub_slot_iters) if is_overflow else sub_slot_start_total_iters,
deficit,
)
rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
rc_vdf_challenge,
new_ip_iters,
)
assert unfinished_block is not None
sp_total_iters = uint128(
sub_slot_start_total_iters + calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
)
full_block: FullBlock = unfinished_block_to_full_block(
unfinished_block,
cc_ip_vdf,
cc_ip_proof,
rc_ip_vdf,
rc_ip_proof,
icc_ip_vdf,
icc_ip_proof,
finished_sub_slots,
latest_block,
BlockCache(blocks),
sp_total_iters,
difficulty,
)
block_record = block_to_block_record(constants, BlockCache(blocks), required_iters, full_block, None)
return full_block, block_record
def get_challenges(
constants: ConsensusConstants,
blocks: Dict[uint32, BlockRecord],
finished_sub_slots: List[EndOfSubSlotBundle],
prev_header_hash: Optional[bytes32],
) -> Tuple[bytes32, bytes32]:
if len(finished_sub_slots) == 0:
if prev_header_hash is None:
return constants.GENESIS_CHALLENGE, constants.GENESIS_CHALLENGE
curr: BlockRecord = blocks[prev_header_hash]
while not curr.first_in_sub_slot:
curr = blocks[curr.prev_hash]
assert curr.finished_challenge_slot_hashes is not None
assert curr.finished_reward_slot_hashes is not None
cc_challenge = curr.finished_challenge_slot_hashes[-1]
rc_challenge = curr.finished_reward_slot_hashes[-1]
else:
cc_challenge = finished_sub_slots[-1].challenge_chain.get_hash()
rc_challenge = finished_sub_slots[-1].reward_chain.get_hash()
return cc_challenge, rc_challenge
def get_plot_dir() -> Path:
cache_path = Path(os.path.expanduser(os.getenv("CHIVES_ROOT", "~/.chives/"))) / "test-plots"
mkdir(cache_path)
return cache_path
def get_plot_tmp_dir():
return get_plot_dir() / "tmp"
def load_block_list(
block_list: List[FullBlock], constants: ConsensusConstants
) -> Tuple[Dict[uint32, bytes32], uint64, Dict[uint32, BlockRecord]]:
difficulty = 0
height_to_hash: Dict[uint32, bytes32] = {}
blocks: Dict[uint32, BlockRecord] = {}
for full_block in block_list:
if full_block.height == 0:
difficulty = uint64(constants.DIFFICULTY_STARTING)
else:
difficulty = full_block.weight - block_list[full_block.height - 1].weight
if full_block.reward_chain_block.signage_point_index == 0:
challenge = full_block.reward_chain_block.pos_ss_cc_challenge_hash
sp_hash = challenge
else:
assert full_block.reward_chain_block.challenge_chain_sp_vdf is not None
challenge = full_block.reward_chain_block.challenge_chain_sp_vdf.challenge
sp_hash = full_block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
quality_str = full_block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants, challenge, sp_hash
)
required_iters: uint64 = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
quality_str,
full_block.reward_chain_block.proof_of_space.size,
uint64(difficulty),
sp_hash,
)
blocks[full_block.header_hash] = block_to_block_record(
constants,
BlockCache(blocks),
required_iters,
full_block,
None,
)
height_to_hash[uint32(full_block.height)] = full_block.header_hash
return height_to_hash, uint64(difficulty), blocks
def get_icc(
constants: ConsensusConstants,
vdf_end_total_iters: uint128,
finished_sub_slots: List[EndOfSubSlotBundle],
latest_block: BlockRecord,
blocks: Dict[bytes32, BlockRecord],
sub_slot_start_total_iters: uint128,
deficit: uint8,
) -> Tuple[Optional[VDFInfo], Optional[VDFProof]]:
if len(finished_sub_slots) == 0:
prev_deficit = latest_block.deficit
else:
prev_deficit = finished_sub_slots[-1].reward_chain.deficit
if deficit == prev_deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
# new slot / overflow sb to new slot / overflow sb
return None, None
if deficit == (prev_deficit - 1) == (constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1):
# new slot / overflow sb to challenge sb
return None, None
if len(finished_sub_slots) != 0:
last_ss = finished_sub_slots[-1]
assert last_ss.infused_challenge_chain is not None
assert finished_sub_slots[-1].reward_chain.deficit <= (constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1)
return get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
last_ss.infused_challenge_chain.get_hash(),
uint64(vdf_end_total_iters - sub_slot_start_total_iters),
)
curr = latest_block # curr deficit is 0, 1, 2, 3, or 4
while not curr.is_challenge_block(constants) and not curr.first_in_sub_slot:
curr = blocks[curr.prev_hash]
icc_iters = uint64(vdf_end_total_iters - latest_block.total_iters)
if latest_block.is_challenge_block(constants):
icc_input: Optional[ClassgroupElement] = ClassgroupElement.get_default_element()
else:
icc_input = latest_block.infused_challenge_vdf_output
assert icc_input is not None
if curr.is_challenge_block(constants): # Deficit 4
icc_challenge_hash = curr.challenge_block_info_hash
else:
assert curr.finished_infused_challenge_slot_hashes is not None
# First block in sub slot has deficit 0,1,2 or 3
icc_challenge_hash = curr.finished_infused_challenge_slot_hashes[-1]
return get_vdf_info_and_proof(
constants,
icc_input,
icc_challenge_hash,
icc_iters,
)
def get_full_block_and_block_record(
constants: ConsensusConstants,
blocks: Dict[uint32, BlockRecord],
sub_slot_start_total_iters: uint128,
signage_point_index: uint8,
proof_of_space: ProofOfSpace,
slot_cc_challenge: bytes32,
slot_rc_challenge: bytes32,
farmer_reward_puzzle_hash: bytes32,
pool_target: PoolTarget,
start_timestamp: uint64,
start_height: uint32,
time_per_block: float,
block_generator: Optional[BlockGenerator],
aggregate_signature: G2Element,
additions: Optional[List[Coin]],
removals: Optional[List[Coin]],
height_to_hash: Dict[uint32, bytes32],
difficulty: uint64,
required_iters: uint64,
sub_slot_iters: uint64,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
finished_sub_slots: List[EndOfSubSlotBundle],
signage_point: SignagePoint,
prev_block: BlockRecord,
seed: bytes = b"",
overflow_cc_challenge: bytes32 = None,
overflow_rc_challenge: bytes32 = None,
normalized_to_identity_cc_ip: bool = False,
current_time: bool = False,
) -> Tuple[FullBlock, BlockRecord]:
if current_time is True:
if prev_block.timestamp is not None:
timestamp = uint64(max(int(time.time()), prev_block.timestamp + int(time_per_block)))
else:
timestamp = uint64(int(time.time()))
else:
timestamp = uint64(start_timestamp + int((prev_block.height + 1 - start_height) * time_per_block))
sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
ip_iters = calculate_ip_iters(constants, sub_slot_iters, signage_point_index, required_iters)
unfinished_block = create_test_unfinished_block(
constants,
sub_slot_start_total_iters,
sub_slot_iters,
signage_point_index,
sp_iters,
ip_iters,
proof_of_space,
slot_cc_challenge,
farmer_reward_puzzle_hash,
pool_target,
get_plot_signature,
get_pool_signature,
signage_point,
timestamp,
BlockCache(blocks),
seed,
block_generator,
aggregate_signature,
additions,
removals,
prev_block,
finished_sub_slots,
)
if (overflow_cc_challenge is not None) and (overflow_rc_challenge is not None):
slot_cc_challenge = overflow_cc_challenge
slot_rc_challenge = overflow_rc_challenge
full_block, block_record = finish_block(
constants,
blocks,
height_to_hash,
finished_sub_slots,
sub_slot_start_total_iters,
signage_point_index,
unfinished_block,
required_iters,
ip_iters,
slot_cc_challenge,
slot_rc_challenge,
prev_block,
sub_slot_iters,
difficulty,
normalized_to_identity_cc_ip,
)
return full_block, block_record
def compute_cost_test(generator: BlockGenerator, cost_per_byte: int) -> Tuple[Optional[uint16], uint64]:
try:
block_program, block_program_args = setup_generator_args(generator)
clvm_cost, result = GENERATOR_MOD.run_safe_with_cost(INFINITE_COST, block_program, block_program_args)
size_cost = len(bytes(generator.program)) * cost_per_byte
condition_cost = 0
for res in result.first().as_iter():
res = res.rest() # skip parent coind id
res = res.rest() # skip puzzle hash
res = res.rest() # skip amount
for cond in res.first().as_iter():
condition = cond.first().as_atom()
if condition in [ConditionOpcode.AGG_SIG_UNSAFE, ConditionOpcode.AGG_SIG_ME]:
condition_cost += ConditionCost.AGG_SIG.value
elif condition == ConditionOpcode.CREATE_COIN:
condition_cost += ConditionCost.CREATE_COIN.value
return None, uint64(clvm_cost + size_cost + condition_cost)
except Exception:
return uint16(Err.GENERATOR_RUNTIME_ERROR.value), uint64(0)
def create_test_foliage(
constants: ConsensusConstants,
reward_block_unfinished: RewardChainBlockUnfinished,
block_generator: Optional[BlockGenerator],
aggregate_sig: G2Element,
additions: List[Coin],
removals: List[Coin],
prev_block: Optional[BlockRecord],
blocks: BlockchainInterface,
total_iters_sp: uint128,
timestamp: uint64,
farmer_reward_puzzlehash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
seed: bytes32 = b"",
) -> Tuple[Foliage, Optional[FoliageTransactionBlock], Optional[TransactionsInfo]]:
"""
Creates a foliage for a given reward chain block. This may or may not be a tx block. In the case of a tx block,
the return values are not None. This is called at the signage point, so some of this information may be
tweaked at the infusion point.
Args:
constants: consensus constants being used for this chain
reward_block_unfinished: the reward block to look at, potentially at the signage point
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transctions (or infinity element)
prev_block: the previous block at the signage point
blocks: dict from header hash to blocks, of all ancestor blocks
total_iters_sp: total iters at the signage point
timestamp: timestamp to put into the foliage block
farmer_reward_puzzlehash: where to pay out farming reward
pool_target: where to pay out pool reward
get_plot_signature: retrieve the signature corresponding to the plot public key
get_pool_signature: retrieve the signature corresponding to the pool public key
seed: seed to randomize block
"""
if prev_block is not None:
res = get_prev_transaction_block(prev_block, blocks, total_iters_sp)
is_transaction_block: bool = res[0]
prev_transaction_block: Optional[BlockRecord] = res[1]
else:
# Genesis is a transaction block
prev_transaction_block = None
is_transaction_block = True
random.seed(seed)
# Use the extension data to create different blocks based on header hash
extension_data: bytes32 = random.randint(0, 100000000).to_bytes(32, "big")
if prev_block is None:
height: uint32 = uint32(0)
else:
height = uint32(prev_block.height + 1)
# Create filter
byte_array_tx: List[bytes32] = []
tx_additions: List[Coin] = []
tx_removals: List[bytes32] = []
pool_target_signature: Optional[G2Element] = get_pool_signature(
pool_target, reward_block_unfinished.proof_of_space.pool_public_key
)
foliage_data = FoliageBlockData(
reward_block_unfinished.get_hash(),
pool_target,
pool_target_signature,
farmer_reward_puzzlehash,
extension_data,
)
foliage_block_data_signature: G2Element = get_plot_signature(
foliage_data.get_hash(),
reward_block_unfinished.proof_of_space.plot_public_key,
)
prev_block_hash: bytes32 = constants.GENESIS_CHALLENGE
if height != 0:
assert prev_block is not None
prev_block_hash = prev_block.header_hash
generator_block_heights_list: List[uint32] = []
if is_transaction_block:
cost = uint64(0)
# Calculate the cost of transactions
if block_generator is not None:
generator_block_heights_list = block_generator.block_height_list()
err, cost = compute_cost_test(block_generator, constants.COST_PER_BYTE)
assert err is None
removal_amount = 0
addition_amount = 0
for coin in removals:
removal_amount += coin.amount
for coin in additions:
addition_amount += coin.amount
spend_bundle_fees = removal_amount - addition_amount
# in order to allow creating blocks that mint coins, clamp the fee
# to 0, if it ends up being negative
if spend_bundle_fees < 0:
spend_bundle_fees = 0
else:
spend_bundle_fees = 0
reward_claims_incorporated = []
if height > 0:
assert prev_transaction_block is not None
assert prev_block is not None
curr: BlockRecord = prev_block
while not curr.is_transaction_block:
curr = blocks.block_record(curr.prev_hash)
assert curr.fees is not None
pool_coin = create_pool_coin(
curr.height, curr.pool_puzzle_hash, calculate_pool_reward(curr.height), constants.GENESIS_CHALLENGE
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
uint64(calculate_base_farmer_reward(curr.height) + curr.fees),
constants.GENESIS_CHALLENGE,
)
assert curr.header_hash == prev_transaction_block.header_hash
reward_claims_incorporated += [pool_coin, farmer_coin]
if curr.height > 0:
curr = blocks.block_record(curr.prev_hash)
# Prev block is not genesis
while not curr.is_transaction_block:
pool_coin = create_pool_coin(
curr.height,
curr.pool_puzzle_hash,
calculate_pool_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
calculate_base_farmer_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
reward_claims_incorporated += [pool_coin, farmer_coin]
curr = blocks.block_record(curr.prev_hash)
additions.extend(reward_claims_incorporated.copy())
for coin in additions:
tx_additions.append(coin)
byte_array_tx.append(bytearray(coin.puzzle_hash))
for coin in removals:
tx_removals.append(coin.name())
byte_array_tx.append(bytearray(coin.name()))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded = bytes(bip158.GetEncoded())
removal_merkle_set = MerkleSet()
addition_merkle_set = MerkleSet()
# Create removal Merkle set
for coin_name in tx_removals:
removal_merkle_set.add_already_hashed(coin_name)
# Create addition Merkle set
puzzlehash_coin_map: Dict[bytes32, List[Coin]] = {}
for coin in tx_additions:
if coin.puzzle_hash in puzzlehash_coin_map:
puzzlehash_coin_map[coin.puzzle_hash].append(coin)
else:
puzzlehash_coin_map[coin.puzzle_hash] = [coin]
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coins in puzzlehash_coin_map.items():
addition_merkle_set.add_already_hashed(puzzle)
addition_merkle_set.add_already_hashed(hash_coin_list(coins))
additions_root = addition_merkle_set.get_root()
removals_root = removal_merkle_set.get_root()
generator_hash = bytes32([0] * 32)
if block_generator is not None:
generator_hash = std_hash(block_generator.program)
generator_refs_hash = bytes32([1] * 32)
if generator_block_heights_list not in (None, []):
generator_ref_list_bytes = b"".join([bytes(i) for i in generator_block_heights_list])
generator_refs_hash = std_hash(generator_ref_list_bytes)
filter_hash: bytes32 = std_hash(encoded)
transactions_info: Optional[TransactionsInfo] = TransactionsInfo(
generator_hash,
generator_refs_hash,
aggregate_sig,
uint64(spend_bundle_fees),
cost,
reward_claims_incorporated,
)
if prev_transaction_block is None:
prev_transaction_block_hash: bytes32 = constants.GENESIS_CHALLENGE
else:
prev_transaction_block_hash = prev_transaction_block.header_hash
assert transactions_info is not None
foliage_transaction_block: Optional[FoliageTransactionBlock] = FoliageTransactionBlock(
prev_transaction_block_hash,
timestamp,
filter_hash,
additions_root,
removals_root,
transactions_info.get_hash(),
)
assert foliage_transaction_block is not None
foliage_transaction_block_hash: Optional[bytes32] = foliage_transaction_block.get_hash()
foliage_transaction_block_signature: Optional[G2Element] = get_plot_signature(
foliage_transaction_block_hash, reward_block_unfinished.proof_of_space.plot_public_key
)
assert foliage_transaction_block_signature is not None
else:
foliage_transaction_block_hash = None
foliage_transaction_block_signature = None
foliage_transaction_block = None
transactions_info = None
assert (foliage_transaction_block_hash is None) == (foliage_transaction_block_signature is None)
foliage = Foliage(
prev_block_hash,
reward_block_unfinished.get_hash(),
foliage_data,
foliage_block_data_signature,
foliage_transaction_block_hash,
foliage_transaction_block_signature,
)
return foliage, foliage_transaction_block, transactions_info
def create_test_unfinished_block(
constants: ConsensusConstants,
sub_slot_start_total_iters: uint128,
sub_slot_iters: uint64,
signage_point_index: uint8,
sp_iters: uint64,
ip_iters: uint64,
proof_of_space: ProofOfSpace,
slot_cc_challenge: bytes32,
farmer_reward_puzzle_hash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
signage_point: SignagePoint,
timestamp: uint64,
blocks: BlockchainInterface,
seed: bytes32 = b"",
block_generator: Optional[BlockGenerator] = None,
aggregate_sig: G2Element = G2Element(),
additions: Optional[List[Coin]] = None,
removals: Optional[List[Coin]] = None,
prev_block: Optional[BlockRecord] = None,
finished_sub_slots_input: List[EndOfSubSlotBundle] = None,
) -> UnfinishedBlock:
"""
Creates a new unfinished block using all the information available at the signage point. This will have to be
modified using information from the infusion point.
Args:
constants: consensus constants being used for this chain
sub_slot_start_total_iters: the starting sub-slot iters at the signage point sub-slot
sub_slot_iters: sub-slot-iters at the infusion point epoch
signage_point_index: signage point index of the block to create
sp_iters: sp_iters of the block to create
ip_iters: ip_iters of the block to create
proof_of_space: proof of space of the block to create
slot_cc_challenge: challenge hash at the sp sub-slot
farmer_reward_puzzle_hash: where to pay out farmer rewards
pool_target: where to pay out pool rewards
get_plot_signature: function that returns signature corresponding to plot public key
get_pool_signature: function that returns signature corresponding to pool public key
signage_point: signage point information (VDFs)
timestamp: timestamp to add to the foliage block, if created
seed: seed to randomize chain
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transctions (or infinity element)
additions: Coins added in spend_bundle
removals: Coins removed in spend_bundle
prev_block: previous block (already in chain) from the signage point
blocks: dictionary from header hash to SBR of all included SBR
finished_sub_slots_input: finished_sub_slots at the signage point
Returns:
"""
if finished_sub_slots_input is None:
finished_sub_slots: List[EndOfSubSlotBundle] = []
else:
finished_sub_slots = finished_sub_slots_input.copy()
overflow: bool = sp_iters > ip_iters
total_iters_sp: uint128 = uint128(sub_slot_start_total_iters + sp_iters)
is_genesis: bool = prev_block is None
new_sub_slot: bool = len(finished_sub_slots) > 0
cc_sp_hash: Optional[bytes32] = slot_cc_challenge
# Only enters this if statement if we are in testing mode (making VDF proofs here)
if signage_point.cc_vdf is not None:
assert signage_point.rc_vdf is not None
cc_sp_hash = signage_point.cc_vdf.output.get_hash()
rc_sp_hash = signage_point.rc_vdf.output.get_hash()
else:
if new_sub_slot:
rc_sp_hash = finished_sub_slots[-1].reward_chain.get_hash()
else:
if is_genesis:
rc_sp_hash = constants.GENESIS_CHALLENGE
else:
assert prev_block is not None
assert blocks is not None
curr = prev_block
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_reward_slot_hashes is not None
rc_sp_hash = curr.finished_reward_slot_hashes[-1]
signage_point = SignagePoint(None, None, None, None)
cc_sp_signature: Optional[G2Element] = get_plot_signature(cc_sp_hash, proof_of_space.plot_public_key)
rc_sp_signature: Optional[G2Element] = get_plot_signature(rc_sp_hash, proof_of_space.plot_public_key)
assert cc_sp_signature is not None
assert rc_sp_signature is not None
assert AugSchemeMPL.verify(proof_of_space.plot_public_key, cc_sp_hash, cc_sp_signature)
total_iters = uint128(sub_slot_start_total_iters + ip_iters + (sub_slot_iters if overflow else 0))
rc_block = RewardChainBlockUnfinished(
total_iters,
signage_point_index,
slot_cc_challenge,
proof_of_space,
signage_point.cc_vdf,
cc_sp_signature,
signage_point.rc_vdf,
rc_sp_signature,
)
if additions is None:
additions = []
if removals is None:
removals = []
(foliage, foliage_transaction_block, transactions_info,) = create_test_foliage(
constants,
rc_block,
block_generator,
aggregate_sig,
additions,
removals,
prev_block,
blocks,
total_iters_sp,
timestamp,
farmer_reward_puzzle_hash,
pool_target,
get_plot_signature,
get_pool_signature,
seed,
)
return UnfinishedBlock(
finished_sub_slots,
rc_block,
signage_point.cc_proof,
signage_point.rc_proof,
foliage,
foliage_transaction_block,
transactions_info,
block_generator.program if block_generator else None,
block_generator.block_height_list() if block_generator else [],
)
async def create_block_tools_async(
constants: ConsensusConstants = test_constants,
root_path: Optional[Path] = None,
const_dict=None,
keychain: Optional[Keychain] = None,
) -> BlockTools:
bt = BlockTools(constants, root_path, const_dict, keychain)
await bt.setup_keys()
await bt.setup_plots()
return bt
def create_block_tools(
constants: ConsensusConstants = test_constants,
root_path: Optional[Path] = None,
const_dict=None,
keychain: Optional[Keychain] = None,
) -> BlockTools:
bt = BlockTools(constants, root_path, const_dict, keychain)
asyncio.get_event_loop().run_until_complete(bt.setup_keys())
asyncio.get_event_loop().run_until_complete(bt.setup_plots())
return bt
|
the-stack_106_27203 | import logging
import math
import re
import hikari
import lightbulb
from peacebot.core.utils.embed_colors import EmbedColors
logger = logging.getLogger("error_handler")
async def on_error(event: lightbulb.CommandErrorEvent) -> None:
error = event.exception
if isinstance(error, lightbulb.CommandNotFound):
return
if isinstance(error, lightbulb.BotMissingRequiredPermission):
missing = [
perm.replace("_", " ").replace("guild", "server").title()
for perm in str(error.missing_perms).split("|")
]
if len(missing) > 2:
fmt = "{}, and {}".format("**, **".join(missing[:-1]), missing[-1])
else:
fmt = " and ".join(missing)
_message = f"I need the **{fmt}** permission(s) to run this command."
embed = hikari.Embed(
title="I am Missing Permissions",
color=EmbedColors.ALERT,
description=_message,
)
return await event.context.respond(embed=embed)
if isinstance(error, lightbulb.CommandIsOnCooldown):
embed = hikari.Embed(
title="Command on Cooldown",
color=EmbedColors.ALERT,
description=f"This command is on cooldown, please retry in {math.ceil(error.retry_after)}s.",
)
return await event.context.respond(embed=embed)
if isinstance(error, lightbulb.NotEnoughArguments):
return await event.bot.help_command.send_command_help(
event.context, event.context.command
)
if isinstance(error, lightbulb.MissingRequiredPermission):
missing = [
perm.replace("_", " ").replace("guild", "server").title()
for perm in str(error.missing_perms).split("|")
]
if len(missing) > 2:
fmt = "{}, and {}".format("**, **".join(missing[:-1]), missing[-1])
else:
fmt = " and ".join(missing)
_message = "You need the **{}** permission(s) to use this command.".format(fmt)
embed = hikari.Embed(
title="You are missing permissions",
color=EmbedColors.ALERT,
description=_message,
)
return await event.context.respond(embed=embed)
title = " ".join(re.compile(r"[A-Z][a-z]*").findall(error.__class__.__name__))
await event.context.respond(
embed=hikari.Embed(title=title, description=str(error), color=EmbedColors.ALERT)
)
raise error
|
the-stack_106_27204 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('browser', '0024_compare_add_info'),
]
operations = [
migrations.RemoveField(
model_name='compare',
name='add_info',
),
migrations.AddField(
model_name='compare',
name='yearRange',
field=models.CharField(default='', max_length=10),
preserve_default=False,
),
]
|
the-stack_106_27205 | import sysv_ipc
# Create a shared memory segment and write the (English) alphabet to the shared memory.
mem = sysv_ipc.SharedMemory(None, sysv_ipc.IPC_CREX, size=sysv_ipc.PAGE_SIZE)
ASCII_A = 0x61
alphabet = ''.join([chr(ASCII_A + i) for i in range(26)])
alphabet = bytes(alphabet, 'ASCII')
mem.write(alphabet)
# Create a bytearray from the SharedMemory.
ba = bytearray(mem)
# bytearray instances have "most of the usual methods of mutable sequences", such as replace.
# https://docs.python.org/3/library/functions.html#func-bytearray
ba = ba.replace(b'c', b'x')
assert(ba[:4] == b'abxd')
# Unlike a memoryview (see below), changes to the bytearray do *not* affect the underlying
# SharedMemory -- the bytearray is a copy.
assert(mem.read(4) == b'abcd')
# Reset the memory to contain the alphabet unmodified.
mem.write(alphabet)
# Create a memoryview from the SharedMemory.
mv = memoryview(mem)
# This memoryview has format = 'B', itemsize = 1, shape = (sysv_ipc.PAGE_SIZE, ), ndim = 1,
# strides = (1, ), and is read/write.
# This shows that you can take slices of a memoryview
assert([chr(c) for c in mv[3:6]] == ['d', 'e', 'f'])
# This shows that you can write to the memoryview.
mv[4] = ord('x')
assert([chr(c) for c in mv[3:6]] == ['d', 'x', 'f'])
# Changes to the underlying segment are reflected in the memoryview
mem.write(b'xxx')
assert([chr(c) for c in mv[:6]] == ['x', 'x', 'x', 'd', 'x', 'f'])
mem.detach()
mem.remove()
print('Done!')
|
the-stack_106_27207 | import json
from investing_algorithm_framework import OrderSide, \
OrderType, OrderStatus, Order
from tests.resources import TestBase, TestOrderAndPositionsObjectsMixin
from tests.resources.serialization_dicts import position_serialization_dict
class Test(TestBase, TestOrderAndPositionsObjectsMixin):
def setUp(self):
super(Test, self).setUp()
self.start_algorithm()
orders = [
Order.from_dict(
{
"reference_id": 2,
"target_symbol": self.TARGET_SYMBOL_A,
"trading_symbol": "usdt",
"amount_target_symbol": 4,
"price": self.get_price(self.TARGET_SYMBOL_A).price,
"status": OrderStatus.PENDING.value,
"side": OrderSide.BUY.value,
"type": OrderType.LIMIT.value
}
),
Order.from_dict(
{
"reference_id": 3,
"target_symbol": self.TARGET_SYMBOL_B,
"trading_symbol": "usdt",
"amount_target_symbol": 4,
"price": self.get_price(self.TARGET_SYMBOL_A).price,
"status": OrderStatus.CLOSED.value,
"initial_price": self.get_price(
self.TARGET_SYMBOL_A).price,
"side": OrderSide.BUY.value,
"type": OrderType.LIMIT.value
}
)
]
self.algo_app.algorithm.add_orders(orders, identifier="default")
def tearDown(self):
self.algo_app.algorithm.stop()
super(Test, self).tearDown()
def test(self):
query_params = {'identifier': "default"}
response = self.client.get("/api/positions", query_string=query_params)
self.assert200(response)
data = json.loads(response.data.decode())
self.assertEqual(3, len(data["items"]))
self.assertEqual(
position_serialization_dict, set(data.get("items")[0])
)
def test_list_orders_with_identifier_query_params(self):
query_params = {
'identifier': "default"
}
response = self.client.get("/api/positions", query_string=query_params)
self.assert200(response)
data = json.loads(response.data.decode())
self.assertEqual(3, len(data["items"]))
self.assertEqual(
position_serialization_dict, set(data.get("items")[0])
)
query_params = {
'identifier': "random"
}
response = self.client.get("/api/positions", query_string=query_params)
self.assert404(response)
|
the-stack_106_27208 | from datetime import datetime
from decimal import Decimal
from django.contrib.sites.models import Site
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from l10n.utils import moneyfmt
from payment.modules.giftcertificate.utils import generate_certificate_code
from payment.utils import get_processor_by_key
from product.models import Product
from satchmo_store.contact.models import Contact
from satchmo_store.shop.models import Order
import logging
GIFTCODE_KEY = 'GIFTCODE'
log = logging.getLogger('giftcertificate.models')
SATCHMO_PRODUCT = True
def get_product_types():
return ("GiftcertificateProduct",)
class GiftCertificateManager(models.Manager):
def from_order(self, order):
code = order.get_variable(GIFTCODE_KEY, "")
log.debug("GiftCert.from_order code=%s", code)
if code:
site = order.site
return GiftCertificate.objects.get(code__exact=code.value, valid__exact=True, site=site)
raise GiftCertificate.DoesNotExist()
class GiftCertificate(models.Model):
"""A Gift Cert which holds value."""
site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_('Site'))
order = models.ForeignKey(Order, null=True, blank=True, related_name="giftcertificates", verbose_name=_('Order'))
code = models.CharField(_('Certificate Code'), max_length=100,
blank=True, null=True)
purchased_by = models.ForeignKey(Contact, verbose_name=_('Purchased by'),
blank=True, null=True, related_name='giftcertificates_purchased')
date_added = models.DateField(_("Date added"), null=True, blank=True)
valid = models.BooleanField(_('Valid'), default=True)
message = models.CharField(_('Message'), blank=True, null=True, max_length=255)
recipient_email = models.EmailField(_("Email"), blank=True, max_length=75)
start_balance = models.DecimalField(_("Starting Balance"), decimal_places=2,
max_digits=8)
objects = GiftCertificateManager()
def balance(self):
b = Decimal(self.start_balance)
for usage in self.usages.all():
log.info('usage: %s' % usage)
b = b - Decimal(usage.balance_used)
return b
balance = property(balance)
def apply_to_order(self, order):
"""Apply up to the full amount of the balance of this cert to the order.
Returns new balance.
"""
amount = min(order.balance, self.balance)
log.info('applying %s from giftcert #%i [%s] to order #%i [%s]',
moneyfmt(amount),
self.id,
moneyfmt(self.balance),
order.id,
moneyfmt(order.balance))
processor = get_processor_by_key('PAYMENT_GIFTCERTIFICATE')
orderpayment = processor.record_payment(order=order, amount=amount)
self.orderpayment = orderpayment
return self.use(amount, orderpayment=orderpayment)
def use(self, amount, notes="", orderpayment=None):
"""Use some amount of the gift cert, returning the current balance."""
u = GiftCertificateUsage(notes=notes, balance_used = amount,
orderpayment=orderpayment, giftcertificate=self)
u.save()
return self.balance
def save(self, **kwargs):
if not self.pk:
self.date_added = timezone.now()
if not self.code:
self.code = generate_certificate_code()
if not self.site:
self.site = Site.objects.get_current()
super(GiftCertificate, self).save(**kwargs)
def __unicode__(self):
sb = moneyfmt(self.start_balance)
b = moneyfmt(self.balance)
return u"Gift Cert: %s/%s" % (sb, b)
class Meta:
verbose_name = _("Gift Certificate")
verbose_name_plural = _("Gift Certificates")
class GiftCertificateUsage(models.Model):
"""Any usage of a Gift Cert is logged with one of these objects."""
usage_date = models.DateField(_("Date of usage"), null=True, blank=True)
notes = models.TextField(_('Notes'), blank=True, null=True)
balance_used = models.DecimalField(_("Amount Used"), decimal_places=2,
max_digits=8, )
orderpayment = models.ForeignKey('shop.OrderPayment', null=True, verbose_name=_('Order Payment'))
used_by = models.ForeignKey(Contact, verbose_name=_('Used by'),
blank=True, null=True, related_name='giftcertificates_used')
giftcertificate = models.ForeignKey(GiftCertificate, related_name='usages')
def __unicode__(self):
return u"GiftCertificateUsage: %s" % self.balance_used
def save(self, **kwargs):
if not self.pk:
self.usage_date = timezone.now()
super(GiftCertificateUsage, self).save(**kwargs)
class GiftCertificateProduct(models.Model):
"""
The product model for a Gift Certificate
"""
product = models.OneToOneField(Product, verbose_name=_('Product'), primary_key=True)
is_shippable = False
discountable = False
def __unicode__(self):
return u"GiftCertificateProduct: %s" % self.product.name
def _get_subtype(self):
return 'GiftCertificateProduct'
def order_success(self, order, order_item):
log.debug("Order success called, creating gift certs on order: %s", order)
message = ""
email = ""
for detl in order_item.orderitemdetail_set.all():
if detl.name == "email":
email = detl.value
elif detl.name == "message":
message = detl.value
price=order_item.line_item_price
log.debug("Creating gc for %s", price)
gc = GiftCertificate(
order = order,
start_balance= price,
purchased_by = order.contact,
valid=True,
message=message,
recipient_email=email
)
gc.save()
def save(self, **kwargs):
if hasattr(self.product,'_sub_types'):
del self.product._sub_types
super(GiftCertificateProduct, self).save(**kwargs)
class Meta:
verbose_name = _("Gift certificate product")
verbose_name_plural = _("Gift certificate products")
import config
PAYMENT_PROCESSOR=True
|
the-stack_106_27210 | """Module containing the attributes for describe-get-system proof of concept module."""
from os import path
from textwrap import dedent
import yaml
import templateapp
import regexapp
import dlapp
import pytest
import robot
import xmlrunner
from dgspoc.utils import File
from dgspoc.utils import Misc
__version__ = '0.3.10.1'
version = __version__
__all__ = [
'version',
'Data'
]
class Data:
console_cli_name = 'dgs'
console_cli_fullname = 'describe-get-system'
console_supported_commands = ['build', 'info', 'report', 'search',
'test', 'version', 'usage']
# app yaml files
app_directory = File.get_path('.geekstrident', 'dgspoc', is_home=True)
template_storage_filename = File.get_path(app_directory, 'template_storage.yaml')
# main app
main_app_text = 'dgs v{}'.format(version)
# company
company = 'Geeks Trident LLC'
company_url = 'https://www.geekstrident.com/'
# URL
repo_url = 'https://github.com/Geeks-Trident-LLC/dgspoc'
# TODO: Need to update wiki page for documentation_url instead of README.md.
documentation_url = path.join(repo_url, 'blob/develop/README.md')
license_url = path.join(repo_url, 'blob/develop/LICENSE')
# License
years = '2022-2040'
license_name = 'BSD 3-Clause License'
copyright_text = 'Copyright @ {}'.format(years)
license = dedent(
"""
BSD 3-Clause License
Copyright (c) {}, {}
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""".format(years, company)
).strip()
@classmethod
def get_app_info(cls):
from platform import uname as u, python_version as v
lst = [cls.main_app_text,
'Project : {}'.format(cls.repo_url),
'License : {}'.format(cls.license_name),
'Platform: {0.system} {0.release} - Python {1}'.format(u(), v()),
]
app_info = '\n'.join(lst)
return app_info
@classmethod
def get_dependency(cls):
obj = dict(
templateapp=dict(
package='templateapp v{}'.format(templateapp.version),
url='https://pypi.org/project/templateapp/'
),
pytest=dict(
package='pytest v{}'.format(pytest.__version__),
url='https://pypi.org/project/pytest/'
),
robotframework=dict(
package='robotframework v{}'.format(robot.__version__),
url='https://pypi.org/project/robotframework/'
),
unittest_xml_reporting=dict(
package='unittest-xml-reporting v{}'.format(xmlrunner.__version__),
url='https://pypi.org/project/unittest-xml-reporting/'
)
)
obj.update(templateapp.config.Data.get_dependency())
obj.update(regexapp.config.Data.get_dependency())
obj.update(dlapp.config.Data.get_dependency())
dependencies = dict(sorted(obj.items(), key=lambda x: str(x[0]))) # noqa
return dependencies
@classmethod
def get_template_storage_info(cls):
fn = cls.template_storage_filename
generic_fn = File.change_home_dir_to_generic(fn)
if File.is_exist(fn):
existed = 'Yes'
with open(fn) as stream:
node = yaml.safe_load(stream)
total = len(node) if Misc.is_dict(node) else 0
else:
existed = 'No'
total = 0
lst = [
'Template Storage Info:',
# ' - Location: {}'.format(fn),
' - Location: {}'.format(generic_fn),
' - Existed: {}'.format(existed),
' - Total Templates: {}'.format(total)
]
return '\n'.join(lst)
|
the-stack_106_27211 | import argparse
import json
from torch.utils.data import DataLoader
from utils import google_utils
from utils.datasets import *
from utils.utils import *
def test(data,
weights=None,
batch_size=16,
imgsz=640,
conf_thres=0.001,
iou_thres=0.6, # for NMS
save_json=False,
single_cls=False,
augment=False,
verbose=False,
model=None,
dataloader=None,
merge=False):
# Initialize/load model and set device
if model is None:
training = False
device = torch_utils.select_device(opt.device, batch_size=batch_size)
half = device.type != 'cpu' # half precision only supported on CUDA
# Remove previous
for f in glob.glob('test_batch*.jpg'):
os.remove(f)
# Load model
google_utils.attempt_download(weights)
model = torch.load(weights, map_location=device)['model'].float() # load to FP32
torch_utils.model_info(model)
model.fuse()
model.to(device)
if half:
model.half() # to FP16
# Multi-GPU disabled, incompatible with .half()
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
else: # called by train.py
training = True
device = next(model.parameters()).device # get model device
# half disabled https://github.com/ultralytics/yolov5/issues/99
half = False # device.type != 'cpu' and torch.cuda.device_count() == 1
if half:
model.half() # to FP16
# Configure
model.eval()
with open(data) as f:
data = yaml.load(f, Loader=yaml.FullLoader) # model dict
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95
# iouv = iouv[0].view(1) # comment for [email protected]:0.95
niou = iouv.numel()
# Dataloader
if dataloader is None: # not training
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
merge = opt.merge # use Merge NMS
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
dataset = LoadImagesAndLabels(path,
imgsz,
batch_size,
rect=True, # rectangular inference
single_cls=opt.single_cls, # single class mode
stride=int(max(model.stride)), # model stride
pad=0.5) # padding
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
dataloader = DataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
pin_memory=True,
collate_fn=dataset.collate_fn)
seen = 0
names = model.names if hasattr(model, 'names') else model.module.names
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
whwh = torch.Tensor([width, height, width, height]).to(device)
# Disable gradients
with torch.no_grad():
# Run model
t = torch_utils.time_synchronized()
inf_out, train_out = model(img, augment=augment) # inference and training outputs
t0 += torch_utils.time_synchronized() - t
# Compute loss
if training: # if model has loss hyperparameters
loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # GIoU, obj, cls
# Run NMS
t = torch_utils.time_synchronized()
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, merge=merge)
t1 += torch_utils.time_synchronized() - t
# Statistics per image
for si, pred in enumerate(output):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
seen += 1
if pred is None:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Append to text file
# with open('test.txt', 'a') as file:
# [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred]
# Clip boxes to image bounds
clip_coords(pred, (height, width))
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(Path(paths[si]).stem.split('_')[-1])
box = pred[:, :4].clone() # xyxy
scale_coords(img[si].shape[1:], box, shapes[si][0], shapes[si][1]) # to original shape
box = xyxy2xywh(box) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])],
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5]) * whwh
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero().view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero().view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
for j in (ious > iouv[0]).nonzero():
d = ti[i[j]] # detected target
if d not in detected:
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
if batch_i < 1:
f = 'test_batch%g_gt.jpg' % batch_i # filename
plot_images(img, targets, paths, f, names) # ground truth
f = 'test_batch%g_pred.jpg' % batch_i
plot_images(img, output_to_target(output, width, height), paths, f, names) # predictions
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats):
p, r, ap, f1, ap_class = ap_per_class(*stats)
p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, [email protected], [email protected]:0.95]
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if verbose and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Save JSON
if save_json and map50 and len(jdict):
imgIds = [int(Path(x).stem.split('_')[-1]) for x in dataloader.dataset.img_files]
f = 'detections_val2017_%s_results.json' % \
(weights.split(os.sep)[-1].replace('.pt', '') if weights else '') # filename
print('\nCOCO mAP with pycocotools... saving %s...' % f)
with open(f, 'w') as file:
json.dump(jdict, file)
try:
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
# https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
cocoGt = COCO(glob.glob('../coco/annotations/instances_val*.json')[0]) # initialize COCO ground truth api
cocoDt = cocoGt.loadRes(f) # initialize COCO pred api
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
cocoEval.params.imgIds = imgIds # image IDs to evaluate
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
map, map50 = cocoEval.stats[:2] # update results ([email protected]:0.95, [email protected])
except:
print('WARNING: pycocotools must be installed with numpy==1.17 to run correctly. '
'See https://github.com/cocodataset/cocoapi/issues/356')
# Return results
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', type=str, default='weights/yolov5s.pt', help='model.pt path')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.65, help='IOU threshold for NMS')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--merge', action='store_true', help='use Merge NMS')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
opt = parser.parse_args()
opt.img_size = check_img_size(opt.img_size)
opt.save_json = opt.save_json or opt.data.endswith('coco.yaml')
opt.data = check_file(opt.data) # check file
print(opt)
# task = 'val', 'test', 'study'
if opt.task in ['val', 'test']: # (default) run normally
test(opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment,
opt.verbose)
elif opt.task == 'study': # run over a range of settings and save/plot
for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov3-spp.pt']:
f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
x = list(range(352, 832, 64)) # x axis
y = [] # y axis
for i in x: # img-size
print('\nRunning %s point %s...' % (f, i))
r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
# plot_study_txt(f, x) # plot
|
the-stack_106_27214 | import tensorflow as tf
def init_inputs(num_features, selection_methods):
inputs = dict()
for selection_method in selection_methods:
with tf.name_scope('input_' + selection_method):
inputs[selection_method] = tf.placeholder(dtype=tf.float32,
shape=[None, num_features],
name=selection_method + '_input')
return inputs
def logits_layer(feed_forward, units):
logits_layers = dict()
for name, ff in feed_forward.items():
logits_layers[name] = tf.layers.dense(ff, units=units, name=name + '_logits')
return logits_layers
|
the-stack_106_27216 | import asyncio, discord
try:
from _command import Command
except:
from coms._command import Command
import pathlib
class Com(Command):
def __init__(self):
self.usage = "!help"
self.description = "Gives some basic help info about kermi"
self.keys = ["!help", "bot help", ".help", "!commands", ".commands"]
self.permissions = ["*"]
async def command(self, client, message, rawtext):
msg = "**Kermit's info!**\nTo find out more info about any of the commands below, run them with -help after them.\n*e.g\t\t"
msg += "!deepfry -help*\n\n**Commands:**\n"
curdir = pathlib.Path('./coms')
for command in curdir.iterdir():
if command.name[0] != "_" and command.name[0] != ".":
com = __import__("coms")
com = getattr(com, command.name[:-3])
comobj = com.Com()
if not comobj.keys[0] == "*":
msg += comobj.keys[0] + "\n"
await self.send(client, message.channel, msg)
if __name__ == "__main__":
command = Com()
print(command.help())
|
the-stack_106_27222 | import pickle
import sys
def parse(files):
parsed = {}
binding = {}
for fname in files:
f = open(fname)
for line in f:
splitted = line.split('\t')
split_size = len(splitted)
id = splitted[0]
domain = splitted[4]
if id in parsed.keys():
parsed[id].append(domain)
else:
parsed[id] = [domain]
binding_sites = [splitted[6], splitted[7]]
if id in binding.keys():
binding[id][domain] = binding_sites
else:
binding[id] = {}
binding[id][domain] = binding_sites
pickle.dump(parsed, open('tmp/parsed.pickle', 'w'))
pickle.dump(binding, open('tmp/binding_sites.pickle', 'w'))
if __name__ == "__main__":
files = []
if len(sys.argv) > 2:
files = sys.argv[1:]
else:
files = [sys.argv[1]]
parse(files)
|
the-stack_106_27225 | #!/usr/bin/env python3
from contextlib import closing
import random
import subprocess
import os
from typing import Optional
def xclip(*args: list, data: Optional[bytes] = None) -> Optional[str]:
base_command = ["xclip", "-selection", "CLIPBOARD", *args]
if "-o" in args:
with closing(
subprocess.Popen(base_command, stdout=subprocess.PIPE).stdout
) as stdout:
return stdout.read().decode("UTF-8")
else:
with closing(
subprocess.Popen(base_command, stdin=subprocess.PIPE).stdin
) as stdin:
stdin.write(data.encode("UTF-8"))
def notify(text: str):
subprocess.Popen(
["notify-send", os.path.basename(__file__).removesuffix(".py"), text]
)
def get_clipboard_text():
return xclip("-o")
def set_clipboard_text(data):
xclip(data=data)
notify(data)
def main():
text = get_clipboard_text()
if not text:
notify("No text found in clipboard!")
return
text = "".join(
c.upper() if (i % 2 == 0 or random.random() <= 0.3) else c
for i, c in enumerate(text.lower())
)
set_clipboard_text(text)
if __name__ == "__main__":
try:
main()
except Exception as exc:
notify(type(exc).__name__)
raise exc
|
the-stack_106_27226 | from django.urls import path
from .views import (
# CommentView,
CommentListCreateView,
CommentRetrieveUpdateDestroyView,
BlogListCreateView,
BlogRetrieveUpdateDestroyView
)
urlpatterns = [
path('post/', BlogListCreateView.as_view()),
# path('post/<blog_id>/', BlogRetrieveUpdateDestroyView.as_view()),
path('post/<pk>/', BlogRetrieveUpdateDestroyView.as_view()),
path('post/<blog_id>/comment/', CommentListCreateView.as_view()),
path('comment/<comment_id>/', CommentRetrieveUpdateDestroyView.as_view()),
]
|
the-stack_106_27228 | """
RDS utilities functions for bdd
"""
# pylint: disable=wrong-import-position,no-value-for-parameter
import time
import mysql.connector
from botocore.exceptions import ClientError
from mysql.connector import errorcode
from sls.retry.api import retry
from sls.utils.exceptions import ClusterExists, DatabaseNotAvailable, InstanceExists
from sls.utils.aws_rds import RdsClient
def is_db_present(context, rds_client, db_instance_name):
try:
db_exists = \
rds_client.describe_db_instances(DBInstanceIdentifier=db_instance_name)['DBInstances'][
0][
'DBInstanceStatus']
context.logger.info(
f"DB instance %s already exists with status %s" % (db_instance_name, db_exists))
return True
except ClientError as boto_client_error:
if boto_client_error.response["Error"]["Code"] in "DBInstanceNotFound":
context.logger.info(f"DB instance %s does not exist" % db_instance_name)
return False
else:
raise boto_client_error
def create_rds_instance(context, rds_client, db_instance_name, db_info):
if not is_db_present(context, rds_client, db_instance_name):
try:
res = rds_client.create_db_instance(**db_info)
if res['ResponseMetadata']['HTTPStatusCode'] == 200:
context.logger.info(f"Successfully initiated create DB instance %s" % db_instance_name)
context.logger.info(f"waiting for db instance %s to be available" % db_instance_name)
wait_for_instance(rds_client, db_instance_name)
return True
else:
context.logger.info(f"Couldn't create DB instance %s" % db_instance_name)
return False
except Exception as error:
context.logger.error(error)
return False
return True
def wait_for_instance(rds_client, db_instance_name):
rds_waiter = rds_client.get_waiter('db_instance_available')
rds_waiter.wait(
DBInstanceIdentifier=db_instance_name
)
def create_rds_cluster(context, rds_client, db_cluster_name, db_info):
try:
db_exists = \
rds_client.describe_db_clusters(DBClusterIdentifier=db_cluster_name)['DBClusters'][0][
'Status']
context.logger.info(
f"DB cluster %s already exists with status %s" % (db_cluster_name, db_exists))
except ClientError as boto_client_error:
if boto_client_error.response["Error"]["Code"] in "DBClusterNotFoundFault":
context.logger.info(f"DB cluster %s does not exist" % db_cluster_name)
res_c = rds_client.create_db_cluster(
VpcSecurityGroupIds=db_info['sg_name'],
DBSubnetGroupName=db_info['subnet_name'],
BackupRetentionPeriod=1,
DBClusterIdentifier=db_cluster_name,
Engine=db_info['engine'],
EngineVersion=db_info['version'],
MasterUsername=db_info['master_user'],
MasterUserPassword=db_info['master_pwd'],
EngineMode=db_info['mode']
)
if res_c['ResponseMetadata']['HTTPStatusCode'] == 200:
context.logger.info(
f"Successfully initiated create DB cluster %s" % db_cluster_name)
else:
context.logger.info(f"Couldn't create DB cluster %s" % db_cluster_name)
context.logger.info(f"waiting for db cluster %s to be available" % db_cluster_name)
check_cluster_available(db_cluster_name, context)
else:
raise boto_client_error
if db_info['mode'] == 'serverless':
return True
try:
db_details = rds_client.describe_db_clusters(DBClusterIdentifier=db_cluster_name)
if len(db_details['DBClusters'][0]['DBClusterMembers']) > 0:
context.logger.info(f"DB cluster instance already exists for %s" % db_cluster_name)
return True
except ClientError as boto_client_error:
if boto_client_error.response["Error"]["Code"] in "DBClusterNotFoundFault":
context.logger.info(f"DB cluster %s does not exist" % db_cluster_name)
return False
db_instance_name = db_cluster_name + '-instance'
res_i = rds_client.create_db_instance(
DBInstanceIdentifier=db_instance_name,
DBInstanceClass="db.r4.large",
Engine=db_info['engine'],
DBClusterIdentifier=db_cluster_name,
PubliclyAccessible=False
)
if res_i['ResponseMetadata']['HTTPStatusCode'] == 200:
context.logger.info(
f"Successfully initiated create DB cluster-instance %s" % db_instance_name)
else:
context.logger.info(f"Couldn't create DB cluster-instance %s" % db_instance_name)
context.logger.info(f"waiting for db cluster-instance %s to be available" % db_instance_name)
check_instance_available(db_instance_name, context)
def delete_rds_cluster(context, rds_client, db_cluster_name):
try:
db_details = rds_client.describe_db_clusters(DBClusterIdentifier=db_cluster_name)
for i in db_details['DBClusters'][0]['DBClusterMembers']:
delete_rds_instance(context, rds_client, i['DBInstanceIdentifier'])
except ClientError as boto_client_error:
if boto_client_error.response["Error"]["Code"] in "DBClusterNotFoundFault":
context.logger.info(f"DB cluster %s does not exist" % db_cluster_name)
return True
res = rds_client.delete_db_cluster(
DBClusterIdentifier=db_cluster_name,
SkipFinalSnapshot=True)
if res['ResponseMetadata']['HTTPStatusCode'] == 200:
context.logger.info(f"Successfully initiated delete DB cluster %s" % db_cluster_name)
else:
context.logger.info(f"Couldn't delete DB cluster %s" % db_cluster_name)
context.logger.info(f"waiting for db cluster %s to be deleted" % db_cluster_name)
check_cluster_removed(db_cluster_name, context)
def delete_rds_instance(context, rds_client, db_instance_name):
try:
db_status = \
rds_client.describe_db_instances(DBInstanceIdentifier=db_instance_name)['DBInstances'][
0][
'DBInstanceStatus']
except ClientError as boto_client_error:
if boto_client_error.response["Error"]["Code"] in "DBInstanceNotFound":
context.logger.info(f"DB instance %s is already deleted" % db_instance_name)
return True
try:
res = rds_client.delete_db_instance(
DBInstanceIdentifier=db_instance_name,
SkipFinalSnapshot=True,
DeleteAutomatedBackups=True)
except ClientError as boto_client_error:
if 'NoDeleteAutomatedBackups' in boto_client_error.response["Error"]["Message"]:
res = rds_client.delete_db_instance(
DBInstanceIdentifier=db_instance_name,
SkipFinalSnapshot=True,
DeleteAutomatedBackups=False)
else:
raise boto_client_error
if res['ResponseMetadata']['HTTPStatusCode'] == 200:
context.logger.info(f"Successfully initiated delete DB instance %s" % db_instance_name)
else:
context.logger.info(f"Couldn't delete DB instance %s" % db_instance_name)
context.logger.info(f"waiting for db instance %s to be deleted" % db_instance_name)
check_instance_removed(db_instance_name, context)
def get_db_connection(context, rds_client, db_conn_info):
"""
Function to test DB connection
Args:
context:
pwd:
db_name:
Returns:
"""
try:
context.logger.info(f"checking connection for %s" % db_conn_info['db_name'])
conn_config = {"user": db_conn_info['user_name'], "password": db_conn_info['user_pwd'],
"host": db_conn_info['endpoint'], "port": db_conn_info['port']}
cnx = mysql.connector.connect(**conn_config)
return cnx
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
context.logger.error("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
context.logger.error("Database does not exist")
else:
context.logger.error(err)
raise err
@retry(DatabaseNotAvailable, delay=30, tries=30)
def check_instance_available(db_instance_name, context):
"""
Retry loop to ensure instance is available
Args:
db_instance_name:
context:
rds_client:
Returns:
"""
rds_client = RdsClient(
context.db_creds, context.db_region, context.logger
).client
db_status = rds_client.describe_db_instances(DBInstanceIdentifier=db_instance_name)[
"DBInstances"
][0]["DBInstanceStatus"]
if db_status == "available":
context.logger.info(f"DB instance %s is ready" % db_instance_name)
else:
raise DatabaseNotAvailable(f"DB instance %s is not ready" % db_instance_name)
@retry(DatabaseNotAvailable, delay=30, tries=30)
def check_cluster_available(db_cluster_name, context):
"""
Retry loop to ensure cluster is available
Args:
db_cluster_name:
context:
rds_client:
Returns:
"""
rds_client = RdsClient(
context.db_creds, context.db_region, context.logger
).client
db_status = rds_client.describe_db_clusters(DBClusterIdentifier=db_cluster_name)[
"DBClusters"
][0]["Status"]
if db_status == "available":
context.logger.info(f"DB cluster %s is ready" % db_cluster_name)
else:
raise DatabaseNotAvailable(f"DB cluster %s is not ready" % db_cluster_name)
@retry(ClusterExists, delay=30, tries=25)
def check_cluster_removed(db_cluster_name, context):
"""
Retry loop to ensure cluster is deleted
Args:
db_cluster_name:
context:
Returns:
"""
try:
rds_client = RdsClient(
context.db_creds, context.db_region, context.logger
).client
db_status = rds_client.describe_db_clusters(
DBClusterIdentifier=db_cluster_name
)["DBClusters"][0]["Status"]
raise ClusterExists(f"DB cluster %s is not deleted" % db_cluster_name)
except ClientError as boto_client_error:
if boto_client_error.response["Error"]["Code"] in "DBClusterNotFoundFault":
context.logger.info(f"DB cluster %s is deleted" % db_cluster_name)
else:
raise boto_client_error
@retry(InstanceExists, delay=30, tries=25)
def check_instance_removed(db_instance_name, context):
"""
Retry loop to ensure instance is deleted
Args:
db_instance_name:
context:
Returns:
"""
try:
rds_client = RdsClient(
context.instance_creds, context.instance_region, context.logger
).client
rds_client.describe_db_instances(
DBInstanceIdentifier=db_instance_name
)["DBInstances"][0]["DBInstanceStatus"]
raise InstanceExists(f"DB instance %s is not deleted" % db_instance_name)
except ClientError as boto_client_error:
if boto_client_error.response["Error"]["Code"] in "DBInstanceNotFound":
context.logger.info(f"DB instance %s is deleted" % db_instance_name)
else:
raise boto_client_error
|
the-stack_106_27229 | # importing the necessary modules
import requests
from bs4 import BeautifulSoup
import zipfile
from io import BytesIO
# Creating a new file to store the zip file links
newfile = open('zipfiles.txt','w')
newdir = "C:/Users/mattp/Desktop/WorkFiles/XMLFiles/2021Tiger/Zip"
#Set variable for page to be opened and url to be concatenated
page =requests.get('https://www2.census.gov/geo/tiger/TIGER2021/ELSD/')
baseurl= 'https://www2.census.gov/'
#Use BeautifulSoup to clean up the page
soup = BeautifulSoup(page.content)
soup.prettify()
#Find all the links on the page that end in .zip and write them into the text file
for anchor in soup.findAll('a', href=True):
links = anchor['href']
if links.endswith('.zip'):
newfile.write(links + '\n')
newfile.close()
#Fetching the links for the zip file and downloading the files
with open('zipfiles.txt', 'r') as links:
for link in links:
if link:
filename1= link.split('/')[-1]
filename= filename1[:-1]
filenameb = newdir +"/" + filename
link = baseurl + link
print(filename + ' file started to download')
response = requests.get(link[:-1])
# Writing the zip file into local file system
with open(filenameb,'wb') as output_file:
try:
output_file.write(response.content)
except URLError as e:
if hasattr(e, 'reason'):
print('We failed to reach a server.')
print('Reason: ', e.reason)
continue
elif hasattr(e, 'code'):
print('The server couldn\'t fulfill the request.')
print('Error code: ', e.code)
continue
zipfile = zipfile.ZipFile(BytesIO(response.content))
#zipfile.extractall(output_file)
print(filenameb + 'file is downloaded') |
the-stack_106_27230 | from PySide6.QtWidgets import QMainWindow
from monitorcontrol_gui.controller import Controller
from monitorcontrol_gui.model import Model, Monitor
from monitorcontrol_gui.ui.main_window import Ui_MainWindow
class View(QMainWindow):
"""Class handling the main window of the app."""
def __init__(self, ctrl: Controller, model: Model) -> None:
"""Initializer of the graphical interface.
Setting up the controller and model as well as initializing the actual
graphical qt interface. It also queries for monitors using QThread to
not block the main thread.
Args:
ctrl (Controller): _description_
model (Model): _description_
"""
super().__init__()
self.ctrl = ctrl
self.model = model
print("Setting up UI")
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.buttonSet.clicked.connect(self.set_button_handler)
self.ctrl.query_monitors(self.init_monitor_combobox)
def init_monitor_combobox(self, monitors: list[Monitor]) -> None:
"""Initialize the monitor combobox.
Insert all monitors into the combobox. Also adds 'All' with index 0 as
the first entry, and also the default. Sets the items from the model
using the ID and Model.
Sets the current index to 'All'.
"""
self.ui.monitorComboBox.addItem("0: All")
self.ui.monitorComboBox.addItems([
f"{monitor.idx}: {monitor.model}"
for monitor in monitors
])
self.ui.monitorComboBox.setCurrentIndex(0)
def set_button_handler(self) -> None:
"""Handler for clicking the setButton.
Setting the luminosity of the monitor with the given index of the
combo box. Extracts the index from the text value in the combobox.
"""
value = int(self.ui.luminanceValueLabel.text())
idx = int(self.ui.monitorComboBox.currentText().split(":")[0])
self.ctrl.set_luminosity(value, idx)
|
the-stack_106_27231 | #
# (c) Copyright 2015 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017-2018 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import logging.config
from ardana_configurationprocessor.cp.model.ValidatorPlugin \
import ValidatorPlugin
from ardana_configurationprocessor.cp.model.CPLogging \
import CPLogging as KenLog
LOG = logging.getLogger(__name__)
class ServerGroupsValidator(ValidatorPlugin):
def __init__(self, instructions, config_files):
super(ServerGroupsValidator, self).__init__(
2.0, instructions, config_files,
'server-groups-2.0')
self._valid = False
LOG.info('%s()' % KenLog.fcn())
def validate(self):
LOG.info('%s()' % KenLog.fcn())
version = float(self.version())
input = self._create_content(version, "server-groups")
# Server Groups are optional
if not input:
return True
self._valid = self.validate_schema(input, "server_group")
if self._valid:
server_groups = input['server-groups']
self._validate_names(server_groups)
return self._valid
def _validate_names(self, server_groups):
#
# Check each model is only defined once
#
names = set()
for group in server_groups:
if group['name'] in names:
msg = ("Server Group %s is defined more than once." %
(group['name']))
self.add_error(msg)
self.valid = False
else:
names.add(group['name'])
@property
def instructions(self):
return self._instructions
def get_dependencies(self):
return []
|
the-stack_106_27232 | import unittest
from tests.test_base import *
from rfho.models import *
class TestModelVectorization(unittest.TestCase):
def test_augmented_gradient(self):
iris, x, y, model, w, model_y, error, accuracy = iris_logistic_regression(2)
d = int(w.tensor.get_shape()[0].value/3)
self.assertEqual(d, 4*3 + 3)
gt = tf.gradients(error, w.tensor)
print('gradients w.r.t. augmented state', gt, sep='\n')
self.assertFalse(any([g is None for g in gt])) # all gradients are defined (all(gt) rises a tf error
grads_intermediate = tf.gradients(error, w._var_list_as_tensors())
print('gradients w.r.t. w, m, p', grads_intermediate, sep='\n')
self.assertFalse(any([g is None for g in grads_intermediate]))
grads_wrt_single_variables = tf.gradients(error, w._get_base_variable_list())
print('gradients w.r.t. w, m, p', grads_wrt_single_variables, sep='\n')
self.assertFalse(any([g is None for g in grads_wrt_single_variables]))
fd = {x: iris.train.data, y: iris.train.target}
with tf.Session().as_default() as ss:
tf.global_variables_initializer().run()
print('gradients w.r.t. augmented state', ss.run(gt, feed_dict=fd), sep='\n')
print('gradients w.r.t. w, m, p', ss.run(grads_intermediate, feed_dict=fd), sep='\n')
print('gradients w.r.t. w, m, p', ss.run(grads_wrt_single_variables, feed_dict=fd), sep='\n')
class TestModels(unittest.TestCase):
# FIXME (last time error:tensorflow.python.framework.errors_impl.InternalError: Dst tensor is not initialized.
# def test_sparse_input_models(self):
# import rfho.datasets as ddt
#
# real_sim = ddt.load_20newsgroup_vectorized(partitions_proportions=[.5, .3])
#
# model_train = LinearModel(real_sim.train.data, real_sim.train.dim_data, real_sim.train.dim_target,
# init_w=tf.random_normal, init_b=tf.random_normal, benchmark=False)
#
# model_train2 = model_train.for_input(real_sim.train.data)
#
# model_valid = model_train.for_input(real_sim.validation.data)
#
# with tf.Session().as_default():
# tf.global_variables_initializer().run()
# self.assertEqual(np.sum(model_train.Ws[0].eval()), np.sum(model_valid.Ws[0].eval()))
# self.assertEqual(np.sum(model_train.bs[0].eval()), np.sum(model_valid.bs[0].eval()))
#
# print(np.sum(model_train.inp[-1].eval() - model_train2.inp[-1].eval()))
# print(np.sum(model_train.inp[-1].eval() - model_train2.inp[-1].eval()))
# print(np.sum(model_train.inp[-1].eval() - model_train2.inp[-1].eval()))
#
# print(np.sum(model_train.inp[-1].eval() - model_train.inp[-1].eval()))
# print(np.sum(model_train.inp[-1].eval() - model_train.inp[-1].eval()))
# # if sparse matmul is used then on gpu these last values are not 0!
def test_ffnn(self):
x, y = tf.placeholder(tf.float32), tf.placeholder(tf.float32)
model = FFNN(x, dims=[10, 123, 89, 47], active_gen_kwargs=(
{'activ': mixed_activation(tf.identity, tf.nn.sigmoid)},
{}
))
mod_y = model.for_input(y)
self.assertEqual(model.var_list, mod_y.var_list) # share variables
self.assertNotEqual(model.inp[1:], mod_y.inp[1:]) # various activations are different nodes!
def test_simpleCNN(self):
x, y = tf.placeholder(tf.float32), tf.placeholder(tf.float32)
model = SimpleCNN(tf.reshape(x, [-1, 15, 15, 1]),
conv_dims=[
[5, 5, 1, 4],
[5, 5, 4, 8],
], ffnn_dims=[20, 10],
active_gen_kwargs=(
{'activ': mixed_activation(tf.identity, tf.nn.sigmoid)},
{}
))
mod_y = model.for_input(tf.reshape(y, [-1, 15, 15, 1]))
self.assertEqual(model.ffnn_part.var_list, mod_y.ffnn_part.var_list)
self.assertEqual(model.var_list, mod_y.var_list) # share variables
self.assertNotEqual(model.inp[1:], mod_y.inp[1:]) # various activations are different nodes!
w, out, out_y = vectorize_model(model.var_list, model.inp[-1], mod_y.inp[-1])
self.assertIsNotNone(out)
def test_determinitstic_initialization(self):
x = tf.constant([[1., 2.]])
mod = FFNN(x, [2, 2, 1], deterministic_initialization=True)
with tf.Session().as_default() as ss:
mod.initialize()
vals = ss.run(mod.Ws)
mod.initialize()
assert_array_lists_same(ss.run(mod.Ws), vals, test_case=self)
print()
with tf.Session().as_default() as ss:
mod.initialize()
assert_array_lists_same(ss.run(mod.Ws), vals, test_case=self)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_27238 | import requests
import ast
import json
import glob
base_url = "https://developers.zomato.com/api/v2.1/"
def initialize_app(config):
return Zomato(config)
class Zomato:
def __init__(self, config):
self.user_key = config["user_key"]
def get_categories(self):
"""
Takes no input.
Returns a dictionary of IDs and their respective category names.
"""
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "categories", headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
categories = {}
for category in a['categories']:
categories.update({category['categories']['id'] : category['categories']['name']})
return categories
def get_city_ID(self, city_name):
"""
Takes City Name as input.
Returns the ID for the city given as input.
"""
if city_name.isalpha() == False:
raise ValueError('InvalidCityName')
city_name = city_name.split(' ')
city_name = '%20'.join(city_name)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "cities?q=" + city_name, headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
if len(a['location_suggestions']) == 0:
raise Exception('invalid_city_name')
elif 'name' in a['location_suggestions'][0]:
city_name = city_name.replace('%20', ' ')
if str(a['location_suggestions'][0]['name']).lower() == str(city_name).lower():
return a['location_suggestions'][0]['id']
else:
raise ValueError('InvalidCityId')
def get_city_name(self, city_ID):
"""
Takes City ID as input.
Returns the name of the city ID given as input.
"""
self.is_valid_city_id(city_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "cities?city_ids=" + str(city_ID), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
if a['location_suggestions'][0]['country_name'] == "":
raise ValueError('InvalidCityId')
else:
temp_city_ID = a['location_suggestions'][0]['id']
if temp_city_ID == str(city_ID):
return a['location_suggestions'][0]['name']
def get_collections(self, city_ID, limit=None):
"""
Takes City ID as input. limit parameter is optional.
Returns dictionary of Zomato restaurant collections in a city and their respective URLs.
"""
#self.is_valid_city_id(city_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
if limit == None:
r = (requests.get(base_url + "collections?city_id=" + str(city_ID), headers=headers).content).decode("utf-8")
else:
if str(limit).isalpha() == True:
raise ValueError('LimitNotInteger')
else:
r = (requests.get(base_url + "collections?city_id=" + str(city_ID) + "&count=" + str(limit), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
collections = {}
for collection in a['collections']:
collections.update({collection['collection']['title'] : collection['collection']['url']})
return collections
def get_cuisines(self, city_ID):
"""
Takes City ID as input.
Returns a sorted dictionary of all cuisine IDs and their respective cuisine names.
"""
self.is_valid_city_id(city_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "cuisines?city_id=" + str(city_ID), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
if len(a['cuisines']) == 0:
raise ValueError('InvalidCityId')
temp_cuisines = {}
cuisines = {}
for cuisine in a['cuisines']:
temp_cuisines.update({cuisine['cuisine']['cuisine_id'] : cuisine['cuisine']['cuisine_name']})
for cuisine in sorted(temp_cuisines):
cuisines.update({cuisine : temp_cuisines[cuisine]})
return cuisines
def get_establishment_types(self, city_ID):
"""
Takes City ID as input.
Returns a sorted dictionary of all establishment type IDs and their respective establishment type names.
"""
self.is_valid_city_id(city_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "establishments?city_id=" + str(city_ID), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
temp_establishment_types = {}
establishment_types = {}
if 'establishments' in a:
for establishment_type in a['establishments']:
temp_establishment_types.update({establishment_type['establishment']['id'] : establishment_type['establishment']['name']})
for establishment_type in sorted(temp_establishment_types):
establishment_types.update({establishment_type : temp_establishment_types[establishment_type]})
return establishment_types
else:
raise ValueError('InvalidCityId')
def get_nearby_restaurants(self, latitude, longitude):
"""
Takes the latitude and longitude as inputs.
Returns a dictionary of Restaurant IDs and their corresponding Zomato URLs.
"""
try:
float(latitude)
float(longitude)
except ValueError:
raise ValueError('InvalidLatitudeOrLongitude')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "geocode?lat=" + str(latitude) + "&lon=" + str(longitude), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
nearby_restaurants = {}
for nearby_restaurant in a['nearby_restaurants']:
nearby_restaurants.update({nearby_restaurant['restaurant']['id'] : nearby_restaurant['restaurant']['url']})
return nearby_restaurants
def get_restaurant(self, restaurant_ID):
"""
Takes Restaurant ID as input.
Returns a dictionary of restaurant details.
"""
self.is_valid_restaurant_id(restaurant_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "restaurant?res_id=" + str(restaurant_ID), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
if 'code' in a:
if a['code'] == 404:
raise('InvalidRestaurantId')
restaurant_details = {}
restaurant_details.update({"name" : a['name']})
restaurant_details.update({"url" : a['url']})
restaurant_details.update({"location" : a['location']['address']})
restaurant_details.update({"city" : a['location']['city']})
restaurant_details.update({"city_ID" : a['location']['city_id']})
restaurant_details.update({"user_rating" : a['user_rating']['aggregate_rating']})
restaurant_details = DotDict(restaurant_details)
return restaurant_details
def restaurant_search(self, query="", latitude="", longitude="", cuisines="",sort="",order="",limit=10):
"""
Takes either query, latitude and longitude or cuisine as input.
Returns a list of Restaurant IDs.
"""
cuisines = "%2C".join(cuisines.split(","))
if str(limit).isalpha() == True:
raise ValueError('LimitNotInteger')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
result = [[] for i in range(5)]
for i, start in zip(range(0,5, 1), range(0, 100, 20)):
r = (requests.get(base_url + "search?q=" + str(query)+ "&start="+str(start) + "&count=20" + "&lat=" + str(latitude) + "&lon=" + str(longitude) + "&cuisines=" + str(cuisines)+"&sort="+str(sort)+"&order="+str(order), headers=headers).content).decode("utf-8")
result[i] = r
return result#retun list of 5 strings in json format
def restaurant_search_by_keyword(self, query="", cuisines="", limit=5):
"""
Takes either query, latitude and longitude or cuisine as input.
Returns a list of Restaurant IDs.
"""
cuisines = "%2C".join(cuisines.split(","))
if str(limit).isalpha() == True:
raise ValueError('LimitNotInteger')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "search?q=" + str(query) + "&count=" + str(limit) + "&cuisines=" + str(cuisines), headers=headers).content).decode("utf-8")
return r
def get_location(self, query="", limit=5):
"""
Takes either query, latitude and longitude or cuisine as input.
Returns a list of Restaurant IDs.
"""
if str(limit).isalpha() == True:
raise ValueError('LimitNotInteger')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "locations?query=" + str(query) + "&count=" + str(limit), headers=headers).content).decode("utf-8")
return r
def is_valid_restaurant_id(self, restaurant_ID):
"""
Checks if the Restaurant ID is valid or invalid.
If invalid, throws a InvalidRestaurantId Exception.
"""
restaurant_ID = str(restaurant_ID)
if restaurant_ID.isnumeric() == False:
raise ValueError('InvalidRestaurantId')
def is_valid_city_id(self, city_ID):
"""
Checks if the City ID is valid or invalid.
If invalid, throws a InvalidCityId Exception.
"""
city_ID = str(city_ID)
if city_ID.isnumeric() == False:
return True# raise ValueError('InvalidCityId')
def is_key_invalid(self, a):
"""
Checks if the API key provided is valid or invalid.
If invalid, throws a InvalidKey Exception.
"""
if 'code' in a:
if a['code'] == 403:
raise ValueError('InvalidKey')
def is_rate_exceeded(self, a):
"""
Checks if the request limit for the API key is exceeded or not.
If exceeded, throws a ApiLimitExceeded Exception.
"""
if 'code' in a:
if a['code'] == 440:
raise Exception('ApiLimitExceeded')
class DotDict(dict):
"""
Dot notation access to dictionary attributes
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
|
the-stack_106_27239 | dataset_type = 'IcdarDataset'
data_root = '/home/atom/Research_STD/Datasets/mmocr/ctw1500'
train = dict(type=dataset_type, ann_file=f'{data_root}/instances_training.json', img_prefix=f'{data_root}/imgs', pipeline=None)
test = dict(type=dataset_type, ann_file=f'{data_root}/instances_test.json', img_prefix=f'{data_root}/imgs', pipeline=None)
train_list = [train]
test_list = [test]
|
the-stack_106_27241 | #======================================================================
#
# This module contains routines to postprocess the VFI
# solutions.
#
# Simon Scheidegger, 01/19
# Cameron Gordon, 11/21 - updates to Python3 (print statements + pickle)
#======================================================================
import numpy as np
from parameters import *
#import cPickle as pickle
import pickle
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
import nonlinear_solver_iterate as solver
import os
#======================================================================
# Routine compute the errors
def ls_error(n_agents, t1, t2, num_points):
file=open('errors.txt', 'w')
np.random.seed(0)
dim = n_agents
k_test = np.random.uniform(k_bar, k_up, (No_samples, dim))
# test target container
y_test = np.zeros(No_samples, float)
to_print=np.empty((1,5))
for i in range(t1, t2-1):
sum_diffs=0
diff = 0
# Load the model from the previous iteration step
restart_data = filename + str(i) + ".pcl"
with open(restart_data, 'rb') as fd_old:
gp_old = pickle.load(fd_old)
print("data from iteration step ", i , "loaded from disk")
fd_old.close()
# Load the model from the previous iteration step
restart_data = filename + str(i+1) + ".pcl"
with open(restart_data, 'rb') as fd:
gp = pickle.load(fd)
print("data from iteration step ", i+1 , "loaded from disk")
fd.close()
mean_old, sigma_old = gp_old.predict(k_test, return_std=True)
mean, sigma = gp.predict(k_test, return_std=True)
gp_old = gp
# solve bellman equations at test points
for i in range(len(k_test)):
y_test[i] = solver.iterate(k_test[i], n_agents, gp_old)[0]
targ_new = y_test
# plot predictive mean and 95% quantiles
#for j in range(num_points):
#print k_test[j], " ",y_pred_new[j], " ",y_pred_new[j] + 1.96*sigma_new[j]," ",y_pred_new[j] - 1.96*sigma_new[j]
diff_mean = mean_old - mean
max_diff_mean = np.amax(np.fabs(diff_mean))
avg_diff_mean = np.average(np.fabs(diff_mean))
diff_targ = mean - targ_new
max_diff_targ = np.amax(np.fabs(diff_targ))
avg_diff_targ = np.average(np.fabs(diff_targ))
to_print[0,0]= i+1
to_print[0,1]= max_diff_mean
to_print[0,2]= avg_diff_mean
to_print[0,3]= max_diff_targ
to_print[0,4]= avg_diff_targ
np.savetxt(file, to_print, fmt='%2.16f')
msg = "Cauchy:" + str(diff_mean) + ", max = " + str(round(max_diff_mean,3))
msg += os.linesep
msg += "Absolute:" + str(diff_targ) + ", max = " + str(round(max_diff_targ,3))
print(msg)
print("===================================")
file.close()
return
#======================================================================
|
the-stack_106_27242 | from pytorch_lightning import callbacks
import yaml
import argparse
import numpy as np
import cv2
from models import *
from experiments.vae_experiment import VAEXperiment
from experiments.vae_pix2pix_exp import Pix2pixExperiment
import torch.backends.cudnn as cudnn
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TestTubeLogger
from torch.utils.data import DataLoader
from terrain_loader import TerrainDataset
from pytorch_lightning.callbacks import ModelCheckpoint
parser = argparse.ArgumentParser(description='Generic runner model')
parser.add_argument('--config', '-c',
dest="filename",
metavar='FILE',
help = 'path to the config file',
default='configs/train.yaml')
args = parser.parse_args()
with open(args.filename, 'r') as file:
try:
config = yaml.safe_load(file)
except yaml.YAMLError as exc:
print(exc)
tt_logger = TestTubeLogger(
save_dir=config['logging_params']['save_dir'],
name=config['logging_params']['name'],
debug=False,
create_git_tag=False,
)
# For reproducibility
torch.manual_seed(config['logging_params']['manual_seed'])
np.random.seed(config['logging_params']['manual_seed'])
cudnn.deterministic = True
cudnn.benchmark = False
#Vae Model
vae_model = vae_models[config['vae_model_params']['name']](**config['vae_model_params'])
# pix2pix model
gen_model = pix2pix_model[config['pix2pix_model_params']['gen_name']](config['exp_params']['in_channels'],config['exp_params']['out_channels'])
disc_model = pix2pix_model[config['pix2pix_model_params']['disc_name']](config['exp_params']['in_channels'])
if config['vae_model_params']['load_model']:
experiment_vae = VAEXperiment.load_from_checkpoint(config['vae_model_params']['pretrained_model'], vae_model=vae_model,params=config['exp_params'])
print("[INFO] Loaded pretrained model")
vae_model.eval()
experiment = Pix2pixExperiment(gen_model,disc_model,vae_model,config['exp_params'])
else:
experiment = VAEXperiment(vae_model, config['exp_params'])
print("[INFO] Loaded randomly initialized model")
checkpoint_callback = ModelCheckpoint(
monitor='val_loss',
save_last=True,
save_top_k=3
)
runner = Trainer(default_root_dir=f"{tt_logger.save_dir}",
min_epochs=1,
logger=tt_logger,
flush_logs_every_n_steps=100,
limit_train_batches=1.,
limit_val_batches=1.,
num_sanity_val_steps=5,
**config['trainer_params'])
if config['exp_params']['train']:
print(f"======= Training {config['pix2pix_model_params']['name']} =======")
runner.fit(experiment)
runner.save_checkpoint()
else:
print(f"======= Testing =======")
dataset = TerrainDataset(root = config['exp_params']['data_path'],
train=False,
hide_green=config['exp_params']['hide_green'],
norm=config['exp_params']['norm'])
sample_dataloader = DataLoader(dataset,
batch_size= 1,
num_workers=config['exp_params']['n_workers'],
shuffle = False,
drop_last=False)
def denormalize(result):
# minv, maxv = torch.min(result), torch.max(result)
new = (result+1)*127.5
return torch.squeeze(new).detach().numpy().transpose((1,2,0)).astype(np.uint8)
vae_model.eval()
gen_model.eval()
for ip, op in sample_dataloader:
res = vae_model(ip)[0]
vae_res = torch.squeeze(res*255).detach().numpy().transpose((1,2,0)).astype(np.uint8)
res = res*2-1
res = gen_model(res)
# print(len(res))
res = denormalize(res)
op = denormalize(op)
cv2.imshow('images/sampled',vae_res)
cv2.imshow('images/generated',res)
cv2.imshow('images/desired',op)
cv2.imwrite('images/sampled.png',vae_res)
cv2.imwrite('images/generated.png',res)
cv2.imwrite('images/desired.png',op)
cv2.waitKey() |
the-stack_106_27244 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: lenovo
@file: CyclicCoordinate.py
@time: 2021/5/22 10:26
"""
import math
import time
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import ticker
def f(x, y):
return (1 - x) ** 2 + 100 * (y - x * x) ** 2
def H(x, y):
return np.matrix([[1200 * x * x - 400 * y + 2, -400 * x],
[-400 * x, 200]])
def grad(x, y):
return np.matrix([[2 * x - 2 + 400 * x * (x * x - y)],
[200 * (y - x * x)]])
def s(n):
if n%2 == 0:
return np.matrix([[1],[0]])
else:
return np.matrix([[0],[1]])
def advance_retreat_method( x, y, direction: list, step=0, delta=0.1) -> tuple:
"""
find the initial section of step
"""
point0 = (x,y)
alpha0= step
alpha1 = alpha0 + delta
point1 = (point0[0]+direction[0]*delta, point0[1]+direction[1]*delta)
if f(point0[0],point0[1]) < f(point1[0],point1[1]):
while True:
delta *= 2
alpha2 = alpha0 - delta
point2 = (point0[0] - direction[0] * delta, point0[1] - direction[1] * delta)
if f(point2[0],point2[1]) < f(point0[0],point0[1]):
alpha1, alpha0 = alpha0, alpha2
point1, point0 = point0, point2
else:
return alpha2, alpha1
else:
while True:
delta *= 2
alpha2 = alpha1 + delta
point2 = (point1[0] + direction[0] * delta, point1[1] + direction[1] * delta)
if f(point2[0],point2[1]) < f(point1[0],point1[1]):
alpha0, alpha1 = alpha1, alpha2
point0, point1 = point1, point2
else:
return alpha0, alpha2
def goldsteinsearch(d,x,y, rho):
'''
线性搜索子函数
当前迭代点x,y和当前搜索方向d
'''
d = np.squeeze(np.asarray(d))
a,b = advance_retreat_method(x,y,d)
golden_num = (math.sqrt(5) - 1) / 2
p,q= a + (1 - golden_num) * (b - a), a + golden_num * (b - a)
while abs(b - a) > rho:
fp = f(x + p * d[0],y + p * d[1])
fq = f(x + q * d[0],y + q * d[1])
if fp<fq:
b,q = q,p
p = a + (1 - golden_num) * (b - a)
else:
a,p = p,q
q = a + golden_num * (b - a)
alpha = (a+b)/2
return alpha
def calLambda(x, y, d):
return goldsteinsearch(d,x,y,0.01)
def cyc_coo(x,y,n):
lambda_ = calLambda(x,y,s(n))
print(lambda_)
delta = lambda_ * s(n)
return delta
# ----- 绘制等高线 -----
# 数据数目
n = 256
# 定义x, y
x = np.linspace(-1, 1.1, n)
y = np.linspace(-1, 1.1, n)
# 生成网格数据
X, Y = np.meshgrid(x, y)
plt.figure()
# 填充等高线的颜色, 8是等高线分为几部分
plt.contourf(X, Y, f(X, Y), 5, alpha=0, cmap=plt.cm.hot)
# 绘制等高线
C = plt.contour(X, Y, f(X, Y), 8, locator=ticker.LogLocator(), colors='black', linewidth=0.01)
# 绘制等高线数据
plt.clabel(C, inline=True, fontsize=10)
# ---------------------
x = np.matrix([[-0.3],
[-0.4]])
tol = 0.01
xv = [x[0, 0]]
yv = [x[1, 0]]
plt.plot(x[0, 0], x[1, 0], marker='o')
start = time.time()
for t in range(10000):
x = np.matrix([[xv[-1]],
[yv[-1]]])
delta = cyc_coo(x[0, 0], x[1, 0],t)
x1 = x + delta
if np.linalg.norm(grad(x1[0,0],x1[1,0])) < tol:
break
xv.append(x1[0, 0])
yv.append(x1[1, 0])
end = time.time()
print("iteration:" + str(t))
print(xv)
print(yv)
print(xv[-1])
print(yv[-1])
print("耗时:"+str(end-start))
plt.plot(xv, yv, label='track')
# plt.plot(xv, yv, label='track', marker='o')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Cyclic Coordinate\'s Method for Rosenbrock Function')
plt.legend()
plt.show()
|
the-stack_106_27246 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import shutil
import logging
import traceback
from tg import tmpl_context as c, app_globals as g
from ming.odm import session
from allura.lib.decorators import task
from allura.lib.repository import RepositoryApp
from allura.lib.utils import skip_mod_date
@task
def init(**kwargs):
c.app.repo.init()
@task
def clone(cloned_from_path, cloned_from_name, cloned_from_url):
from allura import model as M
try:
c.app.repo.init_as_clone(
cloned_from_path,
cloned_from_name,
cloned_from_url)
M.Notification.post_user(
c.user, c.app.repo, 'created',
text='Repository %s/%s created' % (
c.project.shortname, c.app.config.options.mount_point))
except Exception:
g.post_event('repo_clone_task_failed', cloned_from_url,
cloned_from_path, traceback.format_exc())
@task
def reclone(*args, **kwargs):
from allura import model as M
from ming.orm import ThreadLocalORMSession
repo = c.app.repo
if repo is not None:
shutil.rmtree(repo.full_fs_path, ignore_errors=True)
M.MergeRequest.query.remove(dict(
app_config_id=c.app.config._id))
ThreadLocalORMSession.flush_all()
clone(*args, **kwargs)
@task
def refresh(**kwargs):
from allura import model as M
log = logging.getLogger(__name__)
# don't create multiple refresh tasks
q = {
'task_name': 'allura.tasks.repo_tasks.refresh',
'state': {'$in': ['busy', 'ready']},
'context.app_config_id': c.app.config._id,
'context.project_id': c.project._id,
}
refresh_tasks_count = M.MonQTask.query.find(q).count()
if refresh_tasks_count <= 1: # only this task
c.app.repo.refresh()
# checking if we have new commits arrived
# during refresh and re-queue task if so
new_commit_ids = c.app.repo.unknown_commit_ids()
if len(new_commit_ids) > 0:
refresh.post()
log.info('New refresh task is queued due to new commit(s).')
else:
log.info('Refresh task for %s:%s skipped due to backlog',
c.project.shortname, c.app.config.options.mount_point)
@task
def uninstall(**kwargs):
from allura import model as M
repo = c.app.repo
if repo is not None:
shutil.rmtree(repo.full_fs_path, ignore_errors=True)
repo.delete()
M.MergeRequest.query.remove(dict(
app_config_id=c.app.config._id))
super(RepositoryApp, c.app).uninstall(c.project)
from ming.orm import ThreadLocalORMSession
ThreadLocalORMSession.flush_all()
@task
def nop():
log = logging.getLogger(__name__)
log.info('nop')
@task
def reclone_repo(*args, **kwargs):
from allura import model as M
try:
nbhd = M.Neighborhood.query.get(url_prefix='/%s/' % kwargs['prefix'])
c.project = M.Project.query.get(
shortname=kwargs['shortname'], neighborhood_id=nbhd._id)
c.app = c.project.app_instance(kwargs['mount_point'])
source_url = c.app.config.options.get('init_from_url')
source_path = c.app.config.options.get('init_from_path')
c.app.repo.init_as_clone(source_path, None, source_url)
M.Notification.post_user(
c.user, c.app.repo, 'created',
text='Repository %s/%s created' % (
c.project.shortname, c.app.config.options.mount_point))
except Exception:
g.post_event('repo_clone_task_failed', source_url,
source_path, traceback.format_exc())
@task
def tarball(revision, path):
log = logging.getLogger(__name__)
if revision:
repo = c.app.repo
status = repo.get_tarball_status(revision, path)
if status == 'complete':
log.info(
'Skipping snapshot for repository: %s:%s rev %s because it is already %s' %
(c.project.shortname, c.app.config.options.mount_point, revision, status))
else:
try:
repo.tarball(revision, path)
except:
log.error(
'Could not create snapshot for repository: %s:%s revision %s path %s' %
(c.project.shortname, c.app.config.options.mount_point, revision, path), exc_info=True)
raise
else:
log.warn(
'Skipped creation of snapshot: %s:%s because revision is not specified' %
(c.project.shortname, c.app.config.options.mount_point))
@task
def merge(merge_request_id):
from allura import model as M
mr = M.MergeRequest.query.get(_id=merge_request_id)
mr.app.repo.merge(mr)
mr.add_meta_post(changes={'Status': [mr.status, 'merged']})
mr.status = 'merged'
g.director.create_activity(c.user, 'merged', mr,
related_nodes=[c.project], tags=['merge-request'])
session(mr).flush(mr)
@task
def can_merge(merge_request_id):
from allura import model as M
mr = M.MergeRequest.query.get(_id=merge_request_id)
result = mr.app.repo.can_merge(mr)
mr.set_can_merge_cache(result)
@task
def determine_mr_commits(merge_request_id):
from allura import model as M
mr = M.MergeRequest.query.get(_id=merge_request_id)
mr.commits # build & cache the commits
|
the-stack_106_27248 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.context import Context, context_merge
from airflow.utils.operator_helpers import determine_kwargs
class PythonSensor(BaseSensorOperator):
"""
Waits for a Python callable to return True.
User could put input argument in templates_dict
e.g ``templates_dict = {'start_ds': 1970}``
and access the argument by calling ``kwargs['templates_dict']['start_ds']``
in the callable
:param python_callable: A reference to an object that is callable
:type python_callable: python callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function
:type op_kwargs: dict
:param op_args: a list of positional arguments that will get unpacked when
calling your callable
:type op_args: list
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied.
:type templates_dict: dict of str
"""
template_fields: Sequence[str] = ('templates_dict', 'op_args', 'op_kwargs')
def __init__(
self,
*,
python_callable: Callable,
op_args: Optional[List] = None,
op_kwargs: Optional[Mapping[str, Any]] = None,
templates_dict: Optional[Dict] = None,
**kwargs,
):
super().__init__(**kwargs)
self.python_callable = python_callable
self.op_args = op_args or []
self.op_kwargs = op_kwargs or {}
self.templates_dict = templates_dict
def poke(self, context: Context) -> bool:
context_merge(context, self.op_kwargs, templates_dict=self.templates_dict)
self.op_kwargs = determine_kwargs(self.python_callable, self.op_args, context)
self.log.info("Poking callable: %s", str(self.python_callable))
return_value = self.python_callable(*self.op_args, **self.op_kwargs)
return bool(return_value)
|
the-stack_106_27250 | from time import time
import os
_startTime = None
def loadInput(day):
global _startTime
day = str(day)
filename = "input" + day.zfill(2) + ".txt"
filepath = os.path.join("inputs", filename)
with open(filepath) as f:
content = [l.rstrip("\n") for l in f.readlines()]
_startTime = time()
if len(content) == 1:
try:
return int(content[0])
except:
try:
return [int(i) for i in content[0].split()]
except:
return content[0]
else:
try:
return [int(i) for i in content]
except:
return content
def printTimeTaken():
global _startTime
_endTime = time()
print("Time: {:.3f}s".format(_endTime - _startTime)) |
the-stack_106_27251 | import os
import shlex
import subprocess
import sys
import parse
from PySide2.QtCore import QFile, QObject, QTimer
from PySide2.QtUiTools import QUiLoader
from PySide2.QtWidgets import QApplication, QGraphicsScene, QLabel
from . import xrandr
from .monitor_item import MonitorItem
class Window(QObject):
def __init__(self, ui):
super().__init__()
self.ui = ui
ui.show()
self.ui.setWindowTitle("Display Configuration")
self.ui.screenCombo.currentTextChanged.connect(self.monitor_selected)
self.ui.replicaOf.currentTextChanged.connect(self.replica_changed)
self.ui.orientationCombo.currentIndexChanged.connect(self.orientation_changed)
self.get_xrandr_info()
self.fill_ui()
self.ui.horizontalScale.valueChanged.connect(self.scale_changed)
self.ui.verticalScale.valueChanged.connect(self.scale_changed)
self.ui.modes.currentTextChanged.connect(self.mode_changed)
self.ui.applyButton.clicked.connect(self.do_apply)
self.ui.okButton.clicked.connect(self.do_ok)
self.ui.resetButton.clicked.connect(self.do_reset)
self.ui.cancelButton.clicked.connect(self.ui.reject)
self.ui.scaleModeCombo.currentTextChanged.connect(self.scale_mode_changed)
self.ui.primary.stateChanged.connect(self.primary_changed)
self.ui.enabled.stateChanged.connect(self.enabled_changed)
self.pos_label = QLabel(self.ui.sceneView)
self.pos_label.move(5, 5)
def enabled_changed(self):
mon = self.ui.screenCombo.currentText()
enabled = self.ui.enabled.isChecked()
print(f"Setting {mon} enabled status to {enabled}")
monitor = self.screen.monitors[mon]
monitor.enabled = enabled
if enabled and not monitor.get_current_mode():
# Choose a mode
self.ui.modes.setCurrentText(str(monitor.get_preferred_mode()))
self.mode_changed()
self.screen.update_replica_of()
for mon in self.screen.monitors.values():
mon.item.update_visuals(mon)
self.adjust_view()
def primary_changed(self):
mon_name = self.ui.screenCombo.currentText()
primary = self.ui.primary.isChecked()
if primary:
self.screen.set_primary(mon_name)
else:
self.screen.set_primary("foobar") # no primary
for monitor in self.screen.monitors.values():
monitor.item.update_visuals(monitor)
def scale_mode_changed(self):
mon = self.ui.screenCombo.currentText()
scale_mode = self.ui.scaleModeCombo.currentText()
print(f"Set {mon} scale mode to {scale_mode}")
if scale_mode == "Manual":
self.ui.horizontalScale.setEnabled(True)
self.ui.verticalScale.setEnabled(True)
try:
self.ui.horizontalScale.valueChanged.disconnect(
self.ui.verticalScale.setValue
)
except RuntimeError: # Not connected
pass
elif scale_mode == "Disabled (1x1)":
self.ui.verticalScale.setEnabled(False)
self.ui.horizontalScale.setEnabled(False)
self.ui.horizontalScale.setValue(1000)
self.ui.verticalScale.setValue(1000)
try:
self.ui.horizontalScale.valueChanged.disconnect(
self.ui.verticalScale.setValue
)
except RuntimeError: # Not connected
pass
elif scale_mode == "Automatic: physical dimensions":
# Calculate scale factors so that the logical pixels will be the same
# size as in the primary window
if self.ui.primary.isChecked():
print("Has no effect on primary display.")
return
# Find the primary monitor
primary = self.screen.get_primary()
if not primary:
print("Oops, no primary!")
return
monitor = self.screen.monitors[mon]
prim_density_x = primary.res_x / primary.w_in_mm
prim_density_y = primary.res_y / primary.h_in_mm
dens_x = monitor.res_x / monitor.w_in_mm
dens_y = monitor.res_y / monitor.h_in_mm
try:
self.ui.horizontalScale.valueChanged.disconnect(
self.ui.verticalScale.setValue
)
except RuntimeError: # Not connected
pass
self.ui.horizontalScale.setEnabled(False)
self.ui.verticalScale.setEnabled(False)
self.ui.horizontalScale.setValue(prim_density_x / dens_x * 1000)
self.ui.verticalScale.setValue(prim_density_y / dens_y * 1000)
elif scale_mode == "Manual, same in both dimensions":
self.ui.horizontalScale.setEnabled(True)
self.ui.verticalScale.setEnabled(False)
self.ui.horizontalScale.valueChanged.connect(self.ui.verticalScale.setValue)
self.ui.verticalScale.setValue(self.ui.horizontalScale.value())
def replica_changed(self):
mon_name = self.ui.screenCombo.currentText()
replicate = self.ui.replicaOf.currentText()
mon = self.screen.monitors[mon_name]
if replicate in ("None", "", None):
print(f"Making {mon_name} NOT a replica")
mon.pos_x += 300
else:
replicate = self.screen.monitors[replicate]
print(f"Making {mon_name} a replica of {replicate}")
# Making a replica implies:
# Set the same position
mon.pos_x = replicate.pos_x
mon.pos_y = replicate.pos_y
# Set the same mode if possible
matching_mode = mon.get_matching_mode(replicate.get_current_mode())
if matching_mode:
mon.set_current_mode(matching_mode.name)
else:
# Keep the current mode, and change scaling so it
# has the same effective size as the desired mode
c_mode = mon.get_current_mode()
mod_x, mod_y = c_mode.res_x, c_mode.res_y
r_mode = replicate.get_current_mode
target_x, target_y = r_mode.res_x, r_mode.res_y
scale_x = 1000 * target_x / mod_x
scale_y = 1000 * target_y / mod_y
self.ui.horizontalScale.setValue(scale_x)
self.ui.verticalScale.setValue(scale_y)
self.screen.update_replica_of()
for mon in self.screen.monitors.values():
mon.item.update_visuals(mon)
def run(self, commands):
for i, cmd in enumerate(commands, 1):
print(f"Running {cmd} [{i}/{len(commands)}]")
subprocess.check_call(shlex.split(cmd))
def do_reset(self):
self.run(self.reset_screen.generate())
self.fill_ui()
def do_ok(self):
self.do_apply()
self.ui.accept()
def do_apply(self):
cli = self.screen.generate()
self.run(cli)
def fill_ui(self):
"""Configure UI out of our screen data."""
self.scene = QGraphicsScene(self)
self.ui.sceneView.setScene(self.scene)
self.ui.screenCombo.clear()
for name, monitor in self.screen.monitors.items():
self.ui.screenCombo.addItem(name)
mon_item = MonitorItem(
data=monitor,
window=self,
name=name,
)
self.scene.addItem(mon_item)
monitor.item = mon_item
self.ui.screenCombo.setCurrentText(self.screen.choose_a_monitor())
self.adjust_view()
# self.scale_changed() # Trigger scale labels update
def orientation_changed(self):
mon_name = self.ui.screenCombo.currentText()
orientation = self.ui.orientationCombo.currentText().split()[0].lower()
self.screen.monitors[mon_name].orientation = orientation
self.mode_changed()
def mode_changed(self):
mon = self.ui.screenCombo.currentText()
mode = parse.search("({mode_name})", self.ui.modes.currentText())["mode_name"]
if not mode:
return
print(f"Changing {mon} to {mode}")
monitor = self.screen.monitors[mon]
monitor.set_current_mode(mode)
mode_x, mode_y = (
monitor.get_current_mode().res_x,
monitor.get_current_mode().res_y,
)
# use resolution via scaling
if monitor.orientation in ("normal", "inverted"):
monitor.res_x = int(mode_x * self.ui.horizontalScale.value() / 1000)
monitor.res_y = int(mode_y * self.ui.verticalScale.value() / 1000)
else:
monitor.res_x = int(mode_y * self.ui.horizontalScale.value() / 1000)
monitor.res_y = int(mode_x * self.ui.verticalScale.value() / 1000)
monitor.item.update_visuals(monitor)
def show_pos(self, x, y):
self.pos_label.setText(f"{x},{y}")
self.pos_label.resize(self.pos_label.sizeHint())
def monitor_moved(self):
"Update screen with new monitor positions"
for mon in self.screen.monitors.values():
item = mon.item
mon.pos_x = item.x()
mon.pos_y = item.y()
self.screen.update_replica_of()
for mon in self.screen.monitors.values():
mon.item.update_visuals(mon)
# Adjust view a little later
QTimer.singleShot(0, self.adjust_view)
def possible_snaps(self, name):
"""Return two lists of values to which the x and y position
of monitor "name" could snap to."""
snaps_x = []
snaps_y = []
for output, monitor in self.screen.monitors.items():
if output == name:
continue
else:
mode = monitor.get_current_mode()
mod_x, mod_y = mode.res_x, mode.res_y
snaps_x.append(monitor.pos_x)
snaps_x.append(monitor.pos_x + mod_x)
snaps_y.append(monitor.pos_y)
snaps_y.append(monitor.pos_y + mod_y)
return snaps_x, snaps_y
def adjust_view(self):
print("Adjusting view")
self.ui.sceneView.resetTransform()
self.ui.sceneView.ensureVisible(self.scene.sceneRect(), 100, 100)
try:
scale_factor = 0.8 * min(
self.ui.sceneView.width() / self.scene.sceneRect().width(),
self.ui.sceneView.height() / self.scene.sceneRect().height(),
)
self.ui.sceneView.scale(scale_factor, scale_factor)
except ZeroDivisionError:
# Don't worry
pass
def get_xrandr_info(self):
_xrandr_data = xrandr.read_data()
self.screen = xrandr.parse_data(_xrandr_data)
self.screen.update_replica_of()
self.reset_screen = xrandr.parse_data(_xrandr_data)
def monitor_selected(self, name):
if not name:
return
# needed so we don't flip through all modes as they are added
self.ui.modes.blockSignals(True)
self.ui.primary.blockSignals(True)
# Show modes
self.ui.modes.clear()
monitor = self.screen.monitors[name]
for _, mode in monitor.modes.items():
self.ui.modes.addItem(str(mode))
mode = monitor.get_current_mode()
self.ui.modes.setCurrentText(str(mode))
if monitor.orientation in ("normal", "inverted"):
h_scale = monitor.res_x / mode.res_x
v_scale = monitor.res_y / mode.res_y
else:
h_scale = monitor.res_y / mode.res_x
v_scale = monitor.res_x / mode.res_y
self.ui.horizontalScale.setValue(h_scale * 1000)
self.ui.verticalScale.setValue(v_scale * 1000)
self.ui.primary.setChecked(monitor.primary)
self.ui.enabled.setChecked(monitor.enabled)
self.ui.orientationCombo.setCurrentText(monitor.orientation)
self.ui.replicaOf.clear()
self.ui.replicaOf.addItem("None")
for mon in self.screen.monitors:
if mon != name:
self.ui.replicaOf.addItem(mon)
if mon in self.screen.monitors[name].replica_of:
self.ui.replicaOf.setCurrentText(mon)
self.ui.modes.blockSignals(False)
self.ui.primary.blockSignals(False)
guessed_scale_mode = monitor.guess_scale_mode()
self.ui.scaleModeCombo.setCurrentText(guessed_scale_mode)
self.scale_mode_changed()
def scale_changed(self):
self.ui.horizontalScaleLabel.setText(
f"{int(self.ui.horizontalScale.value()/10)}%"
)
self.ui.verticalScaleLabel.setText(f"{int(self.ui.verticalScale.value()/10)}%")
self.mode_changed() # Not really, but it's the same thing
def main():
app = QApplication(sys.argv)
ui_file = QFile(os.path.join(os.path.dirname(__file__), "main.ui"))
ui_file.open(QFile.ReadOnly)
loader = QUiLoader()
Window(loader.load(ui_file))
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
the-stack_106_27253 | from tkinter import *
from PIL import Image, ImageTk
root = Tk()
root.geometry("1200x700")
load = Image.open("Network-Monitoring.jpg")
render = ImageTk.PhotoImage(load)
img = Label(image=render)
img.place(x=0, y=0)
label = Label(root , text="Welcome To The Network!",font=("bold,italic",25) , width=70,)
label.pack(pady=50)
e= Entry(root, width=100)
e.pack(pady=50)
e.insert(0,"Enter your IPaddress:")
button=Button(root,text="Submit",width=30)
button.pack()
root.mainloop()
|
the-stack_106_27254 | # -*- coding: utf-8 -*-
'''
Control of entries in SSH authorized_key files
==============================================
The information stored in a user's SSH authorized key file can be easily
controlled via the ssh_auth state. Defaults can be set by the enc, options,
and comment keys. These defaults can be overridden by including them in the
name.
Since the YAML specification limits the length of simple keys to 1024
characters, and since SSH keys are often longer than that, you may have
to use a YAML 'explicit key', as demonstrated in the second example below.
.. code-block:: yaml
AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyY==:
ssh_auth:
- present
- user: root
- enc: ssh-dss
? AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyY==...
:
ssh_auth:
- present
- user: root
- enc: ssh-dss
thatch:
ssh_auth:
- present
- user: root
- source: salt://ssh_keys/thatch.id_rsa.pub
sshkeys:
ssh_auth:
- present
- user: root
- enc: ssh-rsa
- options:
- option1="value1"
- option2="value2 flag2"
- comment: myuser
- names:
- AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyY==
- ssh-dss AAAAB3NzaCL0sQ9fJ5bYTEyY== user@domain
- option3="value3" ssh-dss AAAAB3NzaC1kcQ9J5bYTEyY== other@testdomain
- AAAAB3NzaC1kcQ9fJFF435bYTEyY== newcomment
'''
# Import python libs
import re
import sys
def _present_test(user, name, enc, comment, options, source, config):
'''
Run checks for "present"
'''
result = None
if source:
keys = __salt__['ssh.check_key_file'](
user,
source,
config,
saltenv=__env__)
if keys:
comment = ''
for key, status in keys.items():
if status == 'exists':
continue
comment += 'Set to {0}: {1}\n'.format(status, key)
if comment:
return result, comment
err = sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('ssh_auth.error', None)
if err:
return False, err
else:
return (
True,
'All host keys in file {0} are already present'.format(source)
)
check = __salt__['ssh.check_key'](
user,
name,
enc,
comment,
options,
config)
if check == 'update':
comment = (
'Key {0} for user {1} is set to be updated'
).format(name, user)
elif check == 'add':
comment = (
'Key {0} for user {1} is set to be added'
).format(name, user)
elif check == 'exists':
result = True
comment = ('The authorized host key {0} is already present '
'for user {1}'.format(name, user))
return result, comment
def present(
name,
user,
enc='ssh-rsa',
comment='',
source='',
options=None,
config='.ssh/authorized_keys',
**kwargs):
'''
Verifies that the specified SSH key is present for the specified user
name
The SSH key to manage
user
The user who owns the SSH authorized keys file to modify
enc
Defines what type of key is being used; can be ecdsa, ssh-rsa or ssh-dss
comment
The comment to be placed with the SSH public key
source
The source file for the key(s). Can contain any number of public keys,
in standard "authorized_keys" format. If this is set, comment, enc,
and options will be ignored.
.. note::
The source file must contain keys in the format ``<enc> <key>
<comment>``. If you have generated a keypair using PuTTYgen, then you
will need to do the following to retrieve an OpenSSH-compatible public
key.
1. In PuTTYgen, click ``Load``, and select the *private* key file (not
the public key), and click ``Open``.
2. Copy the public key from the box labeled ``Public key for pasting
into OpenSSH authorized_keys file``.
3. Paste it into a new file.
options
The options passed to the key, pass a list object
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/authorized_keys"
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if __opts__['test']:
ret['result'], ret['comment'] = _present_test(
user,
name,
enc,
comment,
options or [],
source,
config,
)
return ret
if source != '':
data = __salt__['ssh.set_auth_key_from_file'](
user,
source,
config,
saltenv=__env__)
else:
# check if this is of form {options} {enc} {key} {comment}
sshre = re.compile(r'^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$')
fullkey = sshre.search(name)
# if it is {key} [comment]
if not fullkey:
key_and_comment = name.split()
name = key_and_comment[0]
if len(key_and_comment) == 2:
comment = key_and_comment[1]
else:
# if there are options, set them
if fullkey.group(1):
options = fullkey.group(1).split(',')
# key is of format: {enc} {key} [comment]
comps = fullkey.group(2).split()
enc = comps[0]
name = comps[1]
if len(comps) == 3:
comment = comps[2]
data = __salt__['ssh.set_auth_key'](
user,
name,
enc,
comment,
options or [],
config)
if data == 'replace':
ret['changes'][name] = 'Updated'
ret['comment'] = ('The authorized host key {0} for user {1} was '
'updated'.format(name, user))
return ret
elif data == 'no change':
ret['comment'] = ('The authorized host key {0} is already present '
'for user {1}'.format(name, user))
elif data == 'new':
ret['changes'][name] = 'New'
ret['comment'] = ('The authorized host key {0} for user {1} was added'
.format(name, user))
elif data == 'fail':
ret['result'] = False
err = sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('ssh_auth.error', None)
if err:
ret['comment'] = err
else:
ret['comment'] = ('Failed to add the ssh key. Is the home '
'directory available, and/or does the key file '
'exist?')
elif data == 'invalid':
ret['result'] = False
ret['comment'] = 'Invalid public ssh key, most likely has spaces'
return ret
def absent(name,
user,
enc='ssh-rsa',
comment='',
options=None,
config='.ssh/authorized_keys'):
'''
Verifies that the specified SSH key is absent
name
The SSH key to manage
user
The user who owns the SSH authorized keys file to modify
enc
Defines what type of key is being used; can be ecdsa, ssh-rsa or ssh-dss
comment
The comment to be placed with the SSH public key
options
The options passed to the key, pass a list object
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/authorized_keys"
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
# Get just the key
keydata = name.split(' ')
if len(keydata) > 1:
name = keydata[1]
else:
name = keydata[0]
if __opts__['test']:
check = __salt__['ssh.check_key'](
user,
name,
enc,
comment,
options or [],
config)
if check == 'update' or check == 'exists':
ret['result'] = None
ret['comment'] = 'Key {0} is set for removal'.format(name)
return ret
else:
ret['comment'] = 'Key is already absent'
return ret
ret['comment'] = __salt__['ssh.rm_auth_key'](user, name, config)
if ret['comment'] == 'User authorized keys file not present':
ret['result'] = False
return ret
elif ret['comment'] == 'Key removed':
ret['changes'][name] = 'Removed'
return ret
|
the-stack_106_27255 | from time import time
from cutqc.helper_fun import check_valid
from cutqc.cutter import find_cuts, cut_circuit
from cutqc.evaluator import run_subcircuit_instances
from cutqc.post_process import generate_summation_terms, build
from cutqc.verify import verify
class CutQC:
'''
The main module for CutQC
cut --> evaluate results --> verify (optional)
'''
def __init__(self, tasks, verbose):
'''
Args:
tasks (list): the input quantum circuits
Each element is a dictionary with 'name', 'circuit' and 'kwargs'
verbose: setting verbose to True to turn on logging information.
Useful to visualize what happens,
but may produce very long outputs for complicated circuits.
'''
self.tasks = tasks
for task in self.tasks:
for field in ['name','circuit','kwargs']:
if field not in task:
raise ValueError('Missing %s'%field)
check_valid(circuit=task['circuit'])
self.verbose = verbose
def cut(self):
'''
Cut the given circuits
If use the MIP solver to automatically find cuts, the following are required:
max_subcircuit_width: max number of qubits in each subcircuit
The following are optional:
max_cuts: max total number of cuts allowed
num_subcircuits: list of subcircuits to try, CutQC returns the best solution found among the trials
max_subcircuit_cuts: max number of cuts for a subcircuit
max_subcircuit_size: max number of gates in a subcircuit
quantum_cost_weight: quantum_cost_weight : MIP overall cost objective is given by
quantum_cost_weight * num_subcircuit_instances + (1-quantum_cost_weight) * classical_postprocessing_cost
Else supply the subcircuit_vertices manually
Note that supplying subcircuit_vertices overrides all other arguments
'''
for task in self.tasks:
circuit_name = task['name']
circuit = task['circuit']
kwargs = task['kwargs']
if self.verbose:
print('*'*20,'Cut %s'%circuit_name,'*'*20)
print('width = %d depth = %d size = %d -->'%(circuit.num_qubits,circuit.depth(),circuit.size()))
print(kwargs)
if 'subcircuit_vertices' not in kwargs:
if 'max_subcircuit_width' not in kwargs:
raise AttributeError('Automatic MIP cut searcher requires users to define max subcircuit width!')
task.update(find_cuts(**kwargs,circuit=circuit,verbose=self.verbose))
else:
task.update(cut_circuit(**kwargs,circuit=circuit,verbose=self.verbose))
def evaluate(self, eval_mode, num_shots_fn, mem_limit, num_threads):
'''
eval_mode = qasm: simulate shots
eval_mode = sv: statevector simulation
num_shots_fn: a function that gives the number of shots to take for a given circuit
'''
if self.verbose:
print('*'*20,'evaluation mode = %s'%(eval_mode),'*'*20,flush=True)
self._generate_metadata()
self._run_subcircuits(eval_mode=eval_mode,num_shots_fn=num_shots_fn)
self._attribute_shots()
self._build(mem_limit=mem_limit,num_threads=num_threads)
def verify(self):
for task in self.tasks:
print('*'*20,'Verify %s'%task['name'],'*'*20,flush=True)
reconstructed_output, metrics = verify(full_circuit=task['circuit'],
unordered=task['unordered_prob'],
complete_path_map=task['complete_path_map'],
subcircuits=task['subcircuits'],
smart_order=task['smart_order'])
for quasi_conversion_mode in metrics:
print('Quasi probability conversion mode: %s'%quasi_conversion_mode)
for metric_name in metrics[quasi_conversion_mode]:
print(metric_name,metrics[quasi_conversion_mode][metric_name])
task['ordered_prob'] = reconstructed_output
task['metrics'] = metrics
def _generate_metadata(self):
circ_dict = {}
all_subcircuit_entries_sampled = {}
for task in self.tasks:
task['summation_terms'], task['subcircuit_entries'], task['subcircuit_instances'] = generate_summation_terms(
subcircuits=task['subcircuits'],
complete_path_map=task['complete_path_map'],
num_cuts=task['num_cuts'])
# if self.verbose:
# print('--> %s subcircuit_instances:'%task['name'],flush=True)
# for subcircuit_idx in task['subcircuit_instances']:
# for init_meas in task['subcircuit_instances'][subcircuit_idx]:
# subcircuit_instance_idx = task['subcircuit_instances'][subcircuit_idx][init_meas]
# print('Subcircuit {:d}, {}, instance_idx {:d}'.format(
# subcircuit_idx,init_meas,subcircuit_instance_idx))
# print('--> %s subcircuit_entries:'%task['name'],flush=True)
# for subcircuit_idx in task['subcircuit_entries']:
# for subcircuit_entry_key in task['subcircuit_entries'][subcircuit_idx]:
# subcircuit_entry_idx, kronecker_term = task['subcircuit_entries'][subcircuit_idx][subcircuit_entry_key]
# print('Subcircuit {:d} {}, entry_idx {:d}, Kronecker term = {}'.format(
# subcircuit_idx,subcircuit_entry_key,subcircuit_entry_idx,kronecker_term))
# print('--> %s summation_terms:'%task['name'])
# [print(summation_term) for summation_term in task['summation_terms']]
def _run_subcircuits(self,eval_mode,num_shots_fn):
'''
Run all the subcircuit instances
task['subcircuit_instance_probs'][subcircuit_idx][subcircuit_instance_idx] = measured prob
'''
for task in self.tasks:
if self.verbose:
print('--> Running Subcircuits %s'%task['name'],flush=True)
task['subcircuit_instance_probs'] = run_subcircuit_instances(subcircuits=task['subcircuits'],subcircuit_instances=task['subcircuit_instances'],
eval_mode=eval_mode,num_shots_fn=num_shots_fn)
def _attribute_shots(self):
'''
Attribute the shots into respective subcircuit entries
task['subcircuit_entry_probs'][subcircuit_idx][subcircuit_entry_idx] = prob
'''
for task in self.tasks:
if self.verbose:
print('--> Attribute shots %s'%task['name'],flush=True)
attribute_begin = time()
task['subcircuit_entry_probs'] = {}
for subcircuit_idx in task['subcircuit_entries']:
task['subcircuit_entry_probs'][subcircuit_idx] = {}
for label in task['subcircuit_entries'][subcircuit_idx]:
subcircuit_entry_idx, kronecker_term = task['subcircuit_entries'][subcircuit_idx][label]
# print('Subcircuit {:d} entry {:d} kronecker_term {}'.format(
# subcircuit_idx, subcircuit_entry_idx, kronecker_term
# ))
subcircuit_entry_prob = None
for term in kronecker_term:
coefficient, subcircuit_instance_idx = term
if subcircuit_entry_prob is None:
subcircuit_entry_prob = coefficient * task['subcircuit_instance_probs'][subcircuit_idx][subcircuit_instance_idx]
else:
subcircuit_entry_prob += coefficient * task['subcircuit_instance_probs'][subcircuit_idx][subcircuit_instance_idx]
task['subcircuit_entry_probs'][subcircuit_idx][subcircuit_entry_idx] = subcircuit_entry_prob
attribute_time = time()-attribute_begin
if self.verbose:
print('%s attribute took %.3e seconds'%(task['name'],attribute_time),flush=True)
def _build(self, mem_limit, num_threads):
for task in self.tasks:
if self.verbose:
print('--> Build %s'%task['name'],flush=True)
[print(summation_term,flush=True) for summation_term in task['summation_terms'][:10]]
print('... Total %d summation terms\n'%len(task['summation_terms']),flush=True)
build_begin = time()
reconstructed_prob, smart_order, overhead = build(
summation_terms=task['summation_terms'],
subcircuit_entry_probs=task['subcircuit_entry_probs'],
num_cuts=task['num_cuts'],
num_threads=num_threads)
build_time = time()-build_begin
task['unordered_prob'] = reconstructed_prob
task['build_time'] = build_time
task['smart_order'] = smart_order
if self.verbose:
print('Overhead = {}, took {:.3e} seconds'.format(overhead,build_time)) |
the-stack_106_27257 | class Warrior(object):
def __init__(self, health=50, attack=5):
self.health = health
self.attack = attack
self.is_alive = True if health > 0 else False
"""Getting damage."""
def BishBashBosh(self, damage):
self.health -= damage
self.is_alive = True if self.health > 0 else False
class Knight(Warrior):
def __init__(self, *args, **kwargs):
#super(Knight, self).__init__(*args, **kwargs)
super(Knight, self).__init__(50, 7)
def fight(unit_1, unit_2):
while (unit_1.is_alive and unit_2.is_alive):
unit_2.BishBashBosh(unit_1.attack)
if not unit_2.is_alive: return True
unit_1.BishBashBosh(unit_2.attack)
if not unit_1.is_alive: return False
if __name__ == '__main__':
chuck = Warrior()
bruce = Warrior()
carl = Knight()
dave = Warrior()
mark = Warrior()
assert fight(chuck, bruce) == True
assert fight(dave, carl) == False
assert chuck.is_alive == True
assert bruce.is_alive == False
assert carl.is_alive == True
assert dave.is_alive == False
assert fight(carl, mark) == False
assert carl.is_alive == False
print("Coding complete? Let's try tests!")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.