filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_20337
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import argparse
from collections import defaultdict
from functools import reduce
import gc
import logging
import math
import operator
import pprint
import time
from datasets.wikitext2_data import get_real_dataloaders as get_real_wikitext2_dataloaders
from datasets.wikitext2_data import get_synthetic_dataloaders as get_synthetic_wikitext2_dataloaders
from golden_configs import lm_wikitext2
from models import transformer_lm
import numpy as np
import torch
import torch.distributed as dist
from torch.distributed import rpc
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import Adam
from fairscale.nn import Pipe
from fairscale.nn.model_parallel import initialize_model_parallel
from fairscale.nn.model_parallel.initialize import get_data_parallel_group, get_pipeline_parallel_group
from fairscale.nn.pipe import LazyModule, MultiProcessPipe
from fairscale.optim.oss import OSS
from fairscale.utils.testing import dist_init, get_worker_map
MPI_PORT = 29500
RPC_PORT = 29501
def init_random_seed(seed: int):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
def get_model_and_optimizer(args, device, config):
"""Return instantiated model and optimizer function."""
if args.model_name == "lm":
model = get_lm_model(args, device, config)
lr = config["lr"]
def make_adam(params):
if args.ddp_zero:
return OSS(params=params, optim=Adam, group=get_data_parallel_group(), lr=lr)
else:
return Adam(params, lr=lr)
optimizer = make_adam
return model, optimizer
def get_lm_model(args, device, config):
"""Get language model(based on GPT-2) used for sequence prediction."""
ninp = config["ninp"]
nhead = config["nhead"]
initrange = config["initrange"]
dropout = config["dropout"]
vocab_size = config["vocab_size"]
nhid = config["nhid"]
ndecoder = config["num_decoder_layers"]
if args.lazy_construction:
layers = [
LazyModule(lambda: transformer_lm.EmbeddingLayer(vocab_size, ninp, initrange)),
LazyModule(lambda: transformer_lm.PositionalEncodingLayer(ninp, dropout)),
]
for _ in range(ndecoder):
layers.append(LazyModule(lambda: transformer_lm.TransformerDecoderLayer(ninp, nhead, nhid, dropout)))
layers.append(LazyModule(lambda: transformer_lm.LinearLayer(ninp, vocab_size, initrange)))
model = layers
else:
model = transformer_lm.TransformerLM(vocab_size, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)
return model
def get_tensors_by_size_bucket():
size_buckets = defaultdict(int)
for obj in gc.get_objects():
if not isinstance(obj, torch.Tensor):
continue
if obj.device.type == "cuda":
size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1
return size_buckets
def dump_size_buckets(size_buckets, prefix=""):
total = 0
for key, value in size_buckets.items():
this = reduce(operator.mul, key) * value
total += this
print(prefix + f"{key} : {value}, {this}")
print(prefix + f"total = {total}")
last_size_buckets = None
once = True
def safe_rank():
try:
return torch.distributed.get_rank()
except AssertionError:
return 0
def check_size_buckets():
global last_size_buckets
global once
size_buckets = get_tensors_by_size_bucket()
if last_size_buckets is not None:
if size_buckets != last_size_buckets:
print(f"difference is oustanding tensors: {safe-rank()}")
dump_size_buckets(last_size_buckets, "old: ")
dump_size_buckets(size_buckets, "new: ")
if once:
print(f"dumping buckets for: {safe_rank()}")
dump_size_buckets(last_size_buckets, "old: ")
dump_size_buckets(size_buckets, "new: ")
once = False
else:
print(f"size buckets none on {safe_rank()}")
last_size_buckets = size_buckets
def dump_cuda_tensors():
print(f"dumping cuda tensors...")
for obj in gc.get_objects():
if not isinstance(obj, torch.Tensor):
continue
if obj.device.type == "cuda":
size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1
print(f"outstanding cuda tensors:")
total = 0
for key, value in size_buckets.items():
this = reduce(operator.mul, key) * value
total += this
print(f"{key} : {value}, {this}")
print(f"total size = {total}")
pprint.pprint(torch.cuda.memory_stats())
def log_number_of_parameters(model):
num_params = reduce(operator.add, (reduce(operator.mul, x.size()) for x in model.parameters()))
if hasattr(model, "group"):
total = torch.Tensor([num_params])
if torch.cuda.is_available():
total = total.cuda()
torch.distributed.all_reduce(total, group=model.group)
logging.info(
f"training model, #params = {num_params}, group: {model.group.rank()}, grank:"
f" {torch.distributed.get_rank()}, sizes {model.group.size()}"
)
torch.distributed.barrier()
if model.group.rank() == 0:
logging.info(f"total #prams = {total.item()}")
else:
logging.info(f"training model, #params = {num_params}")
def get_device(model, index):
if isinstance(model, DDP):
model = model.module
if not torch.cuda.is_available():
return torch.device("cpu")
if hasattr(model, "devices"):
return model.devices[index]
else:
return torch.cuda.current_device()
def get_fake_dataloader(lm_dataloader_len, args):
fake_input = {"input": torch.zeros(args.batch_size)}
class FakeDataset:
def __getitem__(self, index):
return fake_input
def __len__(self):
return lm_dataloader_len
return FakeDataset()
def train(model_config, model, benchmark_config, args):
lm_dataloader, _, _ = model_config["data"]
criterion = benchmark_config["criterion"]
vocab_size = benchmark_config["vocab_size"]
optimizer = model_config["optimizer"]
model.train()
log_number_of_parameters(model)
total_loss = 0.0
word_counter = 0
optimizer = optimizer(model.parameters())
pipe_group = model.group if hasattr(model, "group") else None
if args.ddp_zero:
model = DDP(
model,
device_ids=[torch.cuda.current_device()],
process_group=get_data_parallel_group(),
find_unused_parameters=False,
)
# TODO(anj-s): Avoid sending fake data to all replicas except the first and last one.
if pipe_group and pipe_group.rank() != 0 and pipe_group.rank() != (pipe_group.size() - 1):
lm_dataloader, _, _ = get_synthetic_dataloaders(args, benchmark_config)
total_tokens = 0
total_tokens_per_log_interval = 0
bptt = 2
start_time = time.time()
epoch_start_time = 0.0
def get_batch(source):
seq_len = len(source) - 1
data = source[0:seq_len]
target = source[1 : 1 + seq_len]
return data, target
for i, batch in enumerate(lm_dataloader):
if i == 1:
epoch_start_time = time.time()
source, target = get_batch(batch)
if args.max_batch and i > args.max_batch:
break
if i > 0:
total_tokens += source.numel()
optimizer.zero_grad()
try:
if (pipe_group is None or pipe_group.rank() == 0) and not args.ddp_zero:
tmp = source.to(get_device(model, 0))
output = model(tmp)
else:
output = model(source)
except Exception as e:
raise RuntimeError(f"training failed on {torch.distributed.get_rank()}") from e
if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:
target = target.to(get_device(model, -1))
output = output.to(target.device)
loss = criterion(output.view(-1, vocab_size), target.view(-1))
if args.ddp_zero:
ddp_group = get_data_parallel_group()
torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.SUM, group=ddp_group)
loss /= ddp_group.size()
loss.backward()
del target
else:
if args.ddp_zero:
model.module.back_helper(output)
else:
model.back_helper(output)
del output
torch.nn.utils.clip_grad_value_(model.parameters(), benchmark_config["clip_value"])
optimizer.step()
if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:
total_loss += loss.item()
log_interval = 1
total_tokens_per_log_interval += source.numel()
if i % log_interval == 0 and i > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
if not args.multiprocess or dist.get_rank() == dist.get_world_size() - 1:
print(
"| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}".format(
i, total_tokens_per_log_interval / elapsed, cur_loss, math.exp(cur_loss)
)
)
total_tokens_per_log_interval = 0
total_loss = 0
start_time = time.time()
if epoch_start_time != 0:
wps = total_tokens / (time.time() - epoch_start_time)
else:
raise RuntimeError(
"Unable to benchmark on a single batch. Increase the size " " of the dataset and rerun the benchmark."
)
if not args.multiprocess or dist.get_rank() == dist.get_world_size() - 1:
return wps, loss.item()
else:
return 0.0, 0.0
# TODO(anj-s): Add an option for users to be able to benchmark evaluate.
def evaluate(eval_model, data_source, criterion, ntokens):
eval_model.eval()
total_loss = 0.0
# TODO(anj-s): Move this to the benchmark config if we want to benchmark evaluation.
bptt = 35
def get_batch(source, i, bptt):
seq_len = min(bptt, len(source) - 1 - i)
data = source[i : i + seq_len]
target = source[i + 1 : i + 1 + seq_len].view(-1)
return data, target
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, bptt):
data, targets = get_batch(data_source, i, bptt)
output = eval_model(data)
output = output.to(targets.device)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
return total_loss / (len(data_source) - 1)
def get_number_of_words(data):
return data.size()[0] * data.size()[1]
def verify_peak_memory(rank, golden_config, std_dev):
print("Peak allocated bytes on cuda:0: {:1d}".format(torch.cuda.memory_stats(rank)["allocated_bytes.all.peak"]))
current_device_usage = torch.cuda.memory_stats(rank)["allocated_bytes.all.peak"]
golden_ref = golden_config["peak_mem_usage"][rank]
if not current_device_usage < golden_ref * std_dev:
raise RuntimeError(
"Peak memory usage for cuda device {:d} is {:d} which"
"is less than golden reference value of {:d}".format(rank, current_device_usage, golden_ref)
)
def verify_lm_run(wps, golden_config, args):
"""Verify that words per second for a given benchmark run matches the golden data."""
# Verify wps only on the last rank in multiprocess pipe
if not args.multiprocess or dist.get_rank() == dist.get_world_size() - 1:
# Assert that words per second is within 3 standard deviations of the average
# of five golden runs
print("Throughput(wps) is {:.2f}.".format(wps))
if not wps > (golden_config["avg_wps"] - (3 * golden_config["std_dev_wps"])):
raise RuntimeError(
"Throughput(wps):{:.2f} is below the golden threshold of an "
"average value of {:.2f} and standard dev of {:.2f}.".format(
wps, golden_config["avg_wps"], golden_config["std_dev_wps"]
)
)
if args.multiprocess:
verify_peak_memory(dist.get_rank(), golden_config, 1.5)
else:
for i in range(4):
verify_peak_memory(i, golden_config, 1.1)
def benchmark_language_model(model_config, model, benchmark_config, args):
golden_config = get_golden_config(args.model_name, args)
epoch = benchmark_config["epochs"]
start_time = time.time()
if dist.get_rank() == dist.get_world_size() - 1:
print("-" * 110)
print("| start of epoch {:1d}".format(epoch))
print("-" * 110)
wps, loss = train(model_config, model, benchmark_config, args)
elapsed_time = time.time() - start_time
if dist.get_rank() == dist.get_world_size() - 1:
print("-" * 110)
print("| end of epoch {:1d} | time: {:5.2f}s | train loss {:5.2f} ".format(epoch, elapsed_time, loss))
print("-" * 110)
print("Throughput(wps) is {:.2f}.".format(wps))
print(
"Peak allocated bytes on cuda:{}: {:1d}".format(
dist.get_rank(), torch.cuda.memory_stats(dist.get_rank())["allocated_bytes.all.peak"]
)
)
if len(model.balance) == 4:
if args.model_name == "lm":
verify_lm_run(wps, golden_config, args)
else:
raise RuntimeError("Unrecognized args.model_name " % args.model_name)
def generate_balance_weighted(num_devices, num_layers, fraction=0.5):
balance = []
layers_assigned = 0
average_count = num_layers / num_devices
last_layers = int(average_count * fraction)
balance = generate_balance(num_devices - 1, num_layers - last_layers)
balance.append(last_layers)
return balance
def generate_balance(num_devices, num_layers):
balance = []
layers_assigned = 0
for i in range(num_devices):
x = (num_layers - layers_assigned) / (num_devices - i)
if x.is_integer():
balance.append(int(x))
layers_assigned += x
else:
balance.append(math.ceil(x))
layers_assigned += math.ceil(x)
return balance
def get_synthetic_dataloaders(args, benchmark_config):
"""Returns dataloader for synthetic data."""
if args.model_name == "lm":
return get_synthetic_wikitext2_dataloaders(args, benchmark_config)
else:
raise RuntimeError("Unrecognized args.model_mame " % args.model_name)
def get_real_dataloaders(args, device, benchmark_config):
"""Returns dataloaders for real data."""
if args.model_name == "lm":
data = get_real_wikitext2_dataloaders(args, benchmark_config)
ntokens, train_dataloader, valid_dataloader, test_dataloader = data
benchmark_config["vocab_size"] = ntokens
return train_dataloader, valid_dataloader, test_dataloader
else:
raise RuntimeError("Unrecognized args.model_mame " % args.model_name)
def create_model_config(args, benchmark_config=None):
"""Return a dict with the given model, dataset and optimizer."""
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
if args.use_synthetic_data:
dataloader_fn = get_synthetic_dataloaders
else:
dataloader_fn = get_real_dataloaders
data = dataloader_fn(args, device, benchmark_config)
model, optimizer = get_model_and_optimizer(args, device, benchmark_config)
return {
"model": model,
"optimizer": optimizer,
"data": data,
}
def create_benchmark_config(model_name):
"""Return a dict with configurations required for benchmarking `model_name` model."""
if model_name == "lm":
return lm_wikitext2.get_benchmark_config()
else:
raise RuntimeError("Unrecognized args.model_mame " % args.model_name)
def get_golden_config(model_name, args):
"""Return a dict with the golden data for throughput and memory usage."""
if model_name == "lm":
return lm_wikitext2.get_golden_real_stats(args.multiprocess)
else:
raise RuntimeError("Unrecognized args.model_mame " % args.model_name)
def benchmark_single_process(args):
"""Benchmark a given model using a single process and multiple devices."""
init_method_pgroup = "tcp://localhost:{}".format(MPI_PORT)
torch.distributed.init_process_group(backend="gloo", rank=0, world_size=1, init_method=init_method_pgroup)
num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
assert num_devices > 0
init_random_seed(0)
benchmark_config = create_benchmark_config(args.model_name)
model_config = create_model_config(args, benchmark_config=benchmark_config)
model = model_config["model"]
balance = generate_balance(min(num_devices, 4), len(model))
pipe_model = Pipe(model, balance, chunks=args.chunks, checkpoint=args.checkpoint)
del model
del model_config["model"]
if args.dry_run:
train(model_config, pipe_model, benchmark_config, args)
else:
benchmark_language_model(model_config, pipe_model, benchmark_config, args)
def run_mp_worker(args, available_workers):
benchmark_config = create_benchmark_config(args.model_name)
model_config = create_model_config(args, benchmark_config=benchmark_config)
model = model_config["model"]
balance = generate_balance(get_pipeline_parallel_group().size(), len(model))
pipe_model = MultiProcessPipe(
model,
balance,
chunks=args.chunks,
worker_map=get_worker_map(),
input_device=torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu"),
checkpoint=args.checkpoint,
# TODO(anj-s): Do we need to comment this out? loss_fn=benchmark_config["criterion"],
)
if torch.cuda.is_available():
pipe_model = pipe_model.cuda()
if args.all_at_once and pipe_model.pipeline:
print(f"running all at once")
pipe_model.pipeline.all_at_once = True
if args.dry_run:
train(model_config, pipe_model, benchmark_config, args)
else:
benchmark_language_model(model_config, pipe_model, benchmark_config, args)
def run_worker(rank, world_size, args):
if args.world_size != 0:
world_size = args.world_size
dist_init(rank + args.rank_base, world_size, hostname=args.host)
initialize_model_parallel(1, world_size)
init_random_seed(0)
run_mp_worker(args, world_size)
rpc.shutdown()
torch.distributed.destroy_process_group()
def benchmark_multiprocess(rank, world_size, args):
init_method_pgroup = "tcp://localhost:{}".format(MPI_PORT)
# TODO(anj-s): Add regression benchmarks for nccl as well.
torch.distributed.init_process_group(
backend="gloo", rank=rank, world_size=world_size, init_method=init_method_pgroup
)
torch.cuda.set_device(rank % torch.cuda.device_count())
# TODO(anj-s): Move to TensorPipeRpcBackendOptions.
rpc.init_rpc(
f"Test{rank}",
rank=rank,
world_size=world_size,
backend=rpc.BackendType.PROCESS_GROUP,
rpc_backend_options=rpc.ProcessGroupRpcBackendOptions(
rpc_timeout=20, init_method="tcp://localhost:{}".format(RPC_PORT)
),
)
initialize_model_parallel(1, world_size)
init_random_seed(0)
run_mp_worker(args, world_size)
rpc.shutdown()
torch.distributed.destroy_process_group()
parser = argparse.ArgumentParser(description="benchmark")
parser.add_argument("--multiprocess", action="store_true", help="Runs single process benchmarks.")
parser.add_argument("--host", "-o", type=str, default="localhost", help="hostname")
parser.add_argument("--chunks", type=int, default=1, help="number of microbatches per batch")
parser.add_argument("--batch-size", type=int, default=8, help="size of a batch")
parser.add_argument("--all-at-once", action="store_true", default=False, help="do backward pass on whole batch at once")
parser.add_argument("--max-batch", type=int, default=4, help="Max number of batches")
parser.add_argument("--ddp-zero", action="store_true", default=False, help="enable ddp")
parser.add_argument(
"--lazy-construction", action="store_true", default=False, help="Number of decoder layers in the model"
)
parser.add_argument(
"--checkpoint", default="never", choices=["always", "except_last", "never"], help="Checkpointing strategy for pipe"
)
parser.add_argument("--use_synthetic_data", action="store_true", help="Uses synthetic data for running benchmarks.")
parser.add_argument("--dry_run", action="store_true", help="Run a sample training run without regression testing.")
parser.add_argument(
# TODO(anj-s): In the process of adding more models and hence the requirement for a flag.
"--model_name",
default="lm",
help="Language Model(LM) used to benchmark nn.pipe.",
)
if __name__ == "__main__":
args = parser.parse_args()
# TODO(anj-s): Remove print statements and introduce logging levels.
if not args.multiprocess:
print(f"Running single process benchmark with args: {args}")
benchmark_single_process(args)
else:
world_size = max(torch.cuda.device_count(), 1)
print(f"Running multiprocess benchmark with args: {args}")
mp.spawn(benchmark_multiprocess, args=(world_size, args), nprocs=world_size, join=True)
|
the-stack_106_20338
|
import os
import networkx as nx
import numpy as np
from six import iteritems
from opensfm import types
import opensfm.dataset
def normalized(x):
return x / np.linalg.norm(x)
def camera_pose(position, lookat, up):
'''
Pose from position and look at direction
>>> position = [1.0, 2.0, 3.0]
>>> lookat = [0., 10.0, 2.0]
>>> up = [0.0, 0.0, 1.0]
>>> pose = camera_pose(position, lookat, up)
>>> np.allclose(pose.get_origin(), position)
True
>>> d = normalized(pose.transform(lookat))
>>> np.allclose(d, [0, 0, 1])
True
'''
ez = normalized(np.array(lookat) - np.array(position))
ex = normalized(np.cross(ez, up))
ey = normalized(np.cross(ez, ex))
pose = types.Pose()
pose.set_rotation_matrix([ex, ey, ez])
pose.set_origin(position)
return pose
class CubeDataset:
'''
Dataset of cameras looking at point in a cube
>>> d = CubeDataset(3, 10, 0.1, 0.3)
>>> len(d.cameras)
3
>>> len(d.shots)
3
>>> len(d.points)
10
'''
def __init__(self, num_cameras, num_points, noise, outlier_fraction):
self.cameras = {}
for i in range(num_cameras):
camera = types.PerspectiveCamera()
camera.id = 'camera' + str(i)
camera.focal = 0.9
camera.k1 = -0.1
camera.k2 = 0.01
camera.height = 600
camera.width = 800
self.cameras[camera.id] = camera
self.shots = {}
for i in range(num_cameras):
alpha = float(i) / (num_cameras - 1)
position = [alpha, -5.0, 0.5]
lookat = [1.0 - alpha, alpha, alpha]
up = [alpha * 0.2, alpha * 0.2, 1.0]
shot = types.Shot()
shot.id = 'shot' + str(i)
shot.camera = self.cameras['camera' + str(i)]
shot.pose = camera_pose(position, lookat, up)
self.shots[shot.id] = shot
points = np.random.rand(num_points, 3)
self.points = {'point' + str(i): p for i, p in enumerate(points)}
g = nx.Graph()
for shot_id, shot in iteritems(self.shots):
for point_id, point in iteritems(self.points):
feature = shot.project(point)
g.add_node(shot_id, bipartite=0)
g.add_node(point_id, bipartite=1)
g.add_edge(shot_id, point_id, feature=feature,
feature_id=point_id, feature_color=(0, 0, 0))
self.tracks = g
def create_berlin_test_folder(tmpdir):
path = str(tmpdir.mkdir('berlin'))
os.symlink(os.path.abspath('data/berlin/images'),
os.path.join(path, 'images'))
return opensfm.dataset.DataSet(path)
|
the-stack_106_20339
|
from typing import Any, Optional, Text, List, Type
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.components import Component
from rasa.nlu.featurizers.featurizer import DenseFeaturizer
from rasa.utils.features import Features
from rasa.nlu.utils.hugging_face.hf_transformers import HFTransformersNLP
from rasa.nlu.tokenizers.lm_tokenizer import LanguageModelTokenizer
from rasa.nlu.training_data import Message, TrainingData
from rasa.nlu.constants import (
TEXT,
LANGUAGE_MODEL_DOCS,
DENSE_FEATURIZABLE_ATTRIBUTES,
SEQUENCE_FEATURES,
SENTENCE_FEATURES,
FEATURE_TYPE_SENTENCE,
FEATURE_TYPE_SEQUENCE,
FEATURIZER_CLASS_ALIAS,
)
class LanguageModelFeaturizer(DenseFeaturizer):
"""Featurizer using transformer based language models.
Uses the output of HFTransformersNLP component to set the sequence and sentence
level representations for dense featurizable attributes of each message object.
"""
@classmethod
def required_components(cls) -> List[Type[Component]]:
return [HFTransformersNLP, LanguageModelTokenizer]
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
for example in training_data.training_examples:
for attribute in DENSE_FEATURIZABLE_ATTRIBUTES:
self._set_lm_features(example, attribute)
def _get_doc(self, message: Message, attribute: Text) -> Any:
"""
Get the language model doc. A doc consists of
{'token_ids': ..., 'tokens': ...,
'sequence_features': ..., 'sentence_features': ...}
"""
return message.get(LANGUAGE_MODEL_DOCS[attribute])
def process(self, message: Message, **kwargs: Any) -> None:
"""Sets the dense features from the language model doc to the incoming
message."""
for attribute in DENSE_FEATURIZABLE_ATTRIBUTES:
self._set_lm_features(message, attribute)
def _set_lm_features(self, message: Message, attribute: Text = TEXT) -> None:
"""Adds the precomputed word vectors to the messages features."""
doc = self._get_doc(message, attribute)
if doc is None:
return
sequence_features = doc[SEQUENCE_FEATURES]
sentence_features = doc[SENTENCE_FEATURES]
final_sequence_features = Features(
sequence_features,
FEATURE_TYPE_SEQUENCE,
attribute,
self.component_config[FEATURIZER_CLASS_ALIAS],
)
message.add_features(final_sequence_features)
final_sentence_features = Features(
sentence_features,
FEATURE_TYPE_SENTENCE,
attribute,
self.component_config[FEATURIZER_CLASS_ALIAS],
)
message.add_features(final_sentence_features)
|
the-stack_106_20340
|
# Python program to reverse a singly linked list
# Node class
class Node:
# Constructor to initialise data and next
def __init__(self, data=None):
self.data = data
self.next = None
class SinglyLinkedList:
# Constructor to initialise head
def __init__(self):
self.head = None
# Function to reverse a linked list
def reverse(self):
# If linked list is empty
if self.head is None:
return None
current = self.head
prev = None
while current is not None:
# Store the value of current.next
next = current.next
# Set current.next to point to the previous node
current.next = prev
# Update pointers for next iteration
prev = current
current = next
self.head = prev
# Function to Insert data at the beginning of the linked list
def insert_at_beg(self, data):
node = Node(data)
node.next = self.head
self.head = node
# Function to print the linked list
def print_data(self):
current = self.head
while current is not None:
print(current.data, '-> ', end='')
current = current.next
print('None')
if __name__ == '__main__':
linked_list = SinglyLinkedList()
linked_list.insert_at_beg(7)
linked_list.insert_at_beg(6)
linked_list.insert_at_beg(5)
linked_list.insert_at_beg(4)
linked_list.insert_at_beg(3)
linked_list.insert_at_beg(2)
linked_list.insert_at_beg(1)
linked_list.print_data()
# call the reverse function
linked_list.reverse()
# print the reversed list
linked_list.print_data()
|
the-stack_106_20341
|
"""
MIT License
Copyright (c) 2017 Jaehyun Park
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import division
import numpy as np
import scipy.sparse as sp
import cvxpy as cvx
from cvxpy.utilities import QuadCoeffExtractor
from numpy import linalg as LA
from collections import defaultdict
from itertools import chain
import logging
# Encodes a quadratic function x^T P x + q^T x + r,
# with an optional relation operator '<=' or '=='
# so that the function can also encode a constraint.
# P is a scipy sparse matrix of size n*n
# q is a scipy sparse matrix of size 1*n
# r is a scalar
class QuadraticFunction:
def __init__(self, P, q, r, relop=None):
self.P, self.q, self.r = P, q, r
self.qarray = np.squeeze(np.asarray(q.todense()))
self.relop = relop
self.eigh = None # for ADMM
# Evalutes f with a numpy array x.
def eval(self, x):
return (self.P.dot(x) + self.qarray).dot(x) + self.r
# Evaluates f with a cvx expression object x.
def eval_cvx(self, x):
return cvx.quad_form(x, self.P) + self.q.T*x + self.r
def violation(self, x):
assert self.relop is not None
if self.relop == '==':
ret = abs(self.eval(x))
else:
ret = max(0., self.eval(x))
return ret
# Returns the "homogeneous form" matrix M of the function
# so that (x, 1)^T M (x, 1) is same as f(x).
def homogeneous_form(self):
return sp.bmat([[self.P, self.q/2], [self.q.T/2, self.r]])
# Returns QuadraticFunction f1, f2 such that
# f(x) = f1(x) - f2(x), with f1 and f2 both convex.
# Affine and constant components are always put into f1.
def dc_split(self, use_eigen_split=False):
n = self.P.shape[0]
if self.P.nnz == 0: # P is zero
P1, P2 = sp.csr_matrix((n, n)), sp.csr_matrix((n, n))
if use_eigen_split:
lmb, Q = LA.eigh(self.P.todense())
P1 = sum([Q[:, i]*lmb[i]*Q[:, i].T for i in range(n) if lmb[i] > 0])
P2 = sum([-Q[:, i]*lmb[i]*Q[:, i].T for i in range(n) if lmb[i] < 0])
assert abs(np.sum(P1 - P2 - self.P)) < 1e-8
else:
lmb_min = np.min(LA.eigh(self.P.todense())[0])
if lmb_min < 0:
P1 = self.P + (1-lmb_min)*sp.identity(n)
P2 = (1-lmb_min)*sp.identity(n)
else:
P1 = self.P
P2 = sp.csr_matrix((n, n))
f1 = QuadraticFunction(P1, self.q, self.r)
f2 = QuadraticFunction(P2, sp.csc_matrix((n, 1)), 0)
return (f1, f2)
# Returns the one-variable function when regarding f(x)
# as a quadratic expression in x[k].
# f is an instance of QuadraticFunction
# return value is an instance of OneVarQuadraticFunction
# TODO: speedup
def get_onevar_func(self, x, k):
z = np.copy(x)
z[k] = 0
t2 = self.P[k, k]
t1 = 2*self.P[k, :].dot(z)[0] + self.qarray[k]
t0 = (self.P.dot(z) + self.qarray).dot(z) + self.r
return OneVarQuadraticFunction(t2, t1, t0, self.relop)
class OneVarQuadraticFunction(QuadraticFunction):
def __init__(self, P, q, r, relop=None):
self.P, self.q, self.r = P, q, r
self.relop = relop
def __repr__(self):
return '%+.3f x^2 %+.3f x %+.3f' % (self.P, self.q, self.r)
def eval(self, x):
if np.isinf(x):
if self.P != 0: return self.P*x*x
if self.q != 0: return self.q*x
return r
return x*(self.P*x + self.q) + self.r
class QCQPForm:
def __init__(self, f0, fs):
assert all([f.relop is not None for f in fs])
self.f0 = f0
self.fs = fs
self.n = f0.P.shape[0]
self.m = len(fs)
self.rho = None # for ADMM
self.z_solver = None # for ADMM
def fi(self, i):
return self.fs[i]
def violations(self, x): # list of constraint violations
return [f.violation(x) for f in self.fs]
def better(self, x1, x2, tol=1e-4):
# returns the better point
# bucketize the violations in order to avoid chains of
# "better" points ultimately preferring high infeasibility
v1 = int(max(self.violations(x1))/tol)
v2 = int(max(self.violations(x2))/tol)
f1 = self.f0.eval(x1)
f2 = self.f0.eval(x2)
if v1 < v2: return x1
if v2 < v1: return x2
if f1 < f2: return x1
return x2
# TODO: optimize repeated calculations (cache factors, etc.)
def onecons_qcqp(z, f, tol=1e-6):
""" Solves a nonconvex problem
minimize ||x-z||_2^2
subject to f(x) = x^T P x + q^T x + r ~ 0
where the relation ~ is given by f.relop (either <= or ==)
"""
# if constraint is ineq and z is feasible: z is the solution
if f.relop == '<=' and f.eval(z) <= 0:
return z
if f.eigh is None:
Psymm = (f.P + f.P.T)/2.
f.eigh = LA.eigh(np.asarray(Psymm.todense()))
lmb, Q = f.eigh
zhat = Q.T.dot(z)
qhat = Q.T.dot(f.qarray)
# now solve a transformed problem
# minimize ||xhat - zhat||_2^2
# subject to sum(lmb_i xhat_i^2) + qhat^T xhat + r = 0
# constraint is now equality from
# complementary slackness
xhat = lambda nu: -np.divide(nu*qhat-2*zhat, 2*(1+nu*lmb))
phi = lambda xhat: lmb.dot(np.power(xhat, 2)) + qhat.dot(xhat) + f.r
s = -np.inf
e = np.inf
for l in lmb:
if l > 0: s = max(s, -1./l)
if l < 0: e = min(e, -1./l)
if s == -np.inf:
s = -1.
while phi(xhat(s)) <= 0: s *= 2.
if e == np.inf:
e = 1.
while phi(xhat(e)) >= 0: e *= 2.
while e-s > tol:
m = (s+e)/2.
p = phi(xhat(m))
if p > 0: s = m
elif p < 0: e = m
else:
s = e = m
break
nu = (s+e)/2.
return Q.dot(xhat(nu))
def get_feasible_intervals(f, s=0, tol=1e-4):
p, q, r = f.P, f.q, f.r
if f.relop == '==': # |px^2 + qx + r| <= s
f1 = OneVarQuadraticFunction(p, q, r-s, '<=')
f2 = OneVarQuadraticFunction(-p, -q, -r-s, '<=')
I = []
for I1 in get_feasible_intervals(f1):
for I2 in get_feasible_intervals(f2):
i = (max(I1[0], I2[0]), min(I1[1], I2[1]))
if i[0] <= i[1]:
I.append(i)
else: # px^2 + qx + r-s <= 0
if p > tol:
D = q*q - 4*p*(r-s)
if D >= 0:
rD = np.sqrt(D)
I = [((-q-rD)/(2*p), (-q+rD)/(2*p))]
else: # never feasible
I = []
elif p < -tol:
D = q*q - 4*p*(r-s)
if D >= 0:
rD = np.sqrt(D)
# note that p < 0
I = [(-np.inf, (-q+rD)/(2*p)), ((-q-rD)/(2*p), np.inf)]
else: # always feasible
I = [(-np.inf, np.inf)]
else:
if q > tol:
I = [(-np.inf, (s-r)/q)]
elif q < -tol:
I = [((s-r)/q, np.inf)]
else: # always feasible
I = [(-np.inf, np.inf)]
return I
# returns the optimal point of the following program, or None if infeasible
# minimize f0(x)
# subject to fi(x) ~ s
# where the only variable is a real number x
# The relation operator ~ can be <= or ==. In case ~ is ==,
# the constraint means |fi(x)| <= s.
def onevar_qcqp(f0, fs, s):
# O(m log m) routine for finding feasible set
Is = list(chain(*[get_feasible_intervals(f, s) for f in fs]))
m = len(fs)
counts = defaultdict(lambda: 0, {-np.inf: +1, +np.inf: -1})
for I in Is:
counts[I[0]] += 1
counts[I[1]] -= 1
xs = [x for x in sorted(counts.items()) if x[1] != 0]
C = []
tot = 0
for i in range(len(xs)):
tot += xs[i][1]
if tot == m and xs[i][1] == -1:
C.append((xs[i-1][0], xs[i][0]))
# no feasible points
if len(C) == 0: return None
bestxs = []
bestf = np.inf
p, q = f0.P, f0.q
# any point in C works
# not using tolerance to check zeroness
if p == 0 and q == 0:
return np.random.uniform(*C[np.random.choice(len(C))])
# unconstrained minimizer
x0 = -q/(2.*p) if p > 0 else np.nan
# endpoints of feasible intervals
for I in C:
if I[0] <= x0 and x0 <= I[1]: return x0
# note that endpoints or the function values can be +-inf
(fl, fr) = (f0.eval(I[0]), f0.eval(I[1]))
if bestf > fl:
(bestxs, bestf) = [I[0]], fl
elif bestf == fl:
bestxs.append(I[0])
if bestf > fr:
(bestxs, bestf) = [I[1]], fr
elif bestf == fr:
bestxs.append(I[1])
if len(bestxs) == 0:
return None
else:
return np.random.choice(bestxs)
def get_id_map(xs):
id_map = {}
N = 0
for x in xs:
id_map[x.id] = N
N += x.size[0]*x.size[1]
return id_map, N
def assign_vars(xs, vals):
if vals is None:
for x in xs:
size = x.size[0]*x.size[1]
x.value = np.full(x.size, np.nan)
else:
ind = 0
for x in xs:
size = x.size[0]*x.size[1]
x.value = np.reshape(vals[ind:ind+size], x.size, order='F')
ind += size
def flatten_vars(xs, n):
ret = np.empty(n)
ind = 0
for x in xs:
size = x.size[0]*x.size[1]
ret[ind:ind+size] = np.ravel(x.value, order='F')
return ret
def get_qcqp_form(prob):
"""Returns the problem metadata in QCQP class
"""
# Check quadraticity
if not prob.objective.args[0].is_quadratic():
raise Exception("Objective is not quadratic.")
if not all([constr._expr.is_quadratic() for constr in prob.constraints]):
raise Exception("Not all constraints are quadratic.")
if prob.is_dcp():
logging.warning("Problem is already convex; specifying solve method is unnecessary.")
extractor = QuadCoeffExtractor(*get_id_map(prob.variables()))
P0, q0, r0 = extractor.get_coeffs(prob.objective.args[0])
# unpacking values
P0, q0, r0 = (P0[0]+P0[0].T)/2., q0.T.tocsc(), r0[0]
if prob.objective.NAME == "maximize":
P0, q0, r0 = -P0, -q0, -r0
f0 = QuadraticFunction(P0, q0, r0)
fs = []
for constr in prob.constraints:
sz = constr._expr.size[0]*constr._expr.size[1]
Pc, qc, rc = extractor.get_coeffs(constr._expr)
for i in range(sz):
fs.append(QuadraticFunction((Pc[i]+Pc[i].T)/2., qc[i, :].T.tocsc(), rc[i], constr.OP_NAME))
return QCQPForm(f0, fs)
|
the-stack_106_20343
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run3_dd4hep_cff import Run3_dd4hep
process = cms.Process("GeometryTest", Run3_dd4hep)
process.load("FWCore.MessageLogger.MessageLogger_cfi")
# Choose Tracker Geometry
process.load('Configuration.Geometry.GeometryDD4hepExtended2021Reco_cff')
process.TrackerGeometricDetESModule = cms.ESProducer( "TrackerGeometricDetESModule",
fromDDD = cms.bool( False ),
fromDD4hep = cms.bool( True )
)
process.es_prefer_geomdet = cms.ESPrefer("TrackerGeometricDetESModule","")
process.load("Geometry.TrackerGeometryBuilder.TrackerAdditionalParametersPerDet_cfi")
process.load("Alignment.CommonAlignmentProducer.FakeAlignmentSource_cfi")
process.preferFakeAlign = cms.ESPrefer("FakeAlignmentSource")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.out = cms.OutputModule("AsciiOutputModule")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.debugModules.append('*')
process.MessageLogger.cout = cms.untracked.PSet(
threshold = cms.untracked.string('DEBUG'),
default = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
TrackerNumberingBuilder = cms.untracked.PSet( limit = cms.untracked.int32(-1) ),
TrackerGeometryBuilder = cms.untracked.PSet( limit = cms.untracked.int32(-1) ),
ModuleInfo = cms.untracked.PSet( limit = cms.untracked.int32(-1) ),
)
process.prod = cms.EDAnalyzer("ModuleInfo",
fromDDD = cms.bool(False),
printDDD = cms.untracked.bool(False),
tolerance = cms.untracked.double(1.0e-23)
)
process.p1 = cms.Path(process.prod)
process.ep = cms.EndPath(process.out)
|
the-stack_106_20344
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RTweenr(RPackage):
"""Interpolate Data for Smooth Animations.
In order to create smooth animation between states of data, tweening is
necessary. This package provides a range of functions for creating tweened
data that can be used as basis for animation. Furthermore it adds a number
of vectorized interpolaters for common R data types such as numeric, date
and colour."""
cran = "tweenr"
version('1.0.2', sha256='1805f575da6705ca4e5ec1c4605222fc826ba806d9ff9af41770294fe08ff69f')
version('1.0.1', sha256='efd68162cd6d5a4f6d833dbf785a2bbce1cb7b9f90ba3fb060931a4bd705096b')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-farver', type=('build', 'run'))
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-rlang', type=('build', 'run'))
|
the-stack_106_20345
|
import random
from PIL import Image
from time import sleep
from states.base import BaseState
import glob
class State(BaseState):
# module information
name = "gifs"
index = 0
delay = 12
# check function
def check(self, _state):
return True
def get_image(self, path):
image = Image.open(path)
image.resize((96, 32))
sequence = []
try:
while True:
temp = image.copy()
sequence.append(temp.convert('RGB'))
image.seek(len(sequence))
except EOFError:
pass
return sequence
# module runner
def run(self):
gifs = glob.glob('static/gifs/*.gif')
gif = gifs[random.randint(0, len(gifs) - 1)]
sequence = self.get_image(gif)
while not self.killed:
for image in sequence:
self.output_image(image)
sleep(image.info['duration'] / 1000)
|
the-stack_106_20346
|
#!/usr/bin/env python3
#
# Copyright 2021 James Yoo.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration, Command
from launch_ros.actions import Node
from launch_ros.substitutions import FindPackageShare
def generate_launch_description():
use_sim_time = LaunchConfiguration('use_sim_time', default='false')
#xacro_file_name = 'leo.urdf.xacro'
#xacro_path_default = os.path.join(
# get_package_share_directory('leo_description'),
# 'urdf',
# xacro_file_name)
#xacro_path = LaunchConfiguration('xacro_path', default=xacro_path_default)
urdf_file_name = 'leo.urdf'
urdf_default_path = os.path.join(
get_package_share_directory('leo_description'),
'urdf',
urdf_file_name)
urdf_path = LaunchConfiguration('urdf_path', default=urdf_default_path)
return LaunchDescription([
DeclareLaunchArgument(
'use_sim_time',
default_value=use_sim_time,
description='Use simulation clock(Gazebo) if true'
),
DeclareLaunchArgument(
'urdf_path',
default_value=urdf_default_path,
description='path to urdf file to publish'
),
Node(
package='robot_state_publisher',
executable='robot_state_publisher',
output='screen',
parameters=[{
'use_sim_time': use_sim_time
# 'robot_description':Command(['xacro',' ', xacro_path])
}],
arguments=[urdf_path]
),
Node(
package='joint_state_publisher',
executable='joint_state_publisher',
name='joint_state_publisher',
output='screen',
parameters=[{
'use_sim_time': use_sim_time
}],
arguments=[urdf_path]
)
])
|
the-stack_106_20347
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.hdl
~~~~~~~~~~~~~~~~~~~
Lexers for hardware descriptor languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, using, this, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer']
class VerilogLexer(RegexLexer):
"""
For verilog source code with preprocessor directives.
.. versionadded:: 1.4
"""
name = 'verilog'
aliases = ['verilog', 'v']
filenames = ['*.v']
mimetypes = ['text/x-verilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[01]+', Number.Bin),
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_]\w*', Name.Constant),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text),
'import'),
(words((
'always', 'always_comb', 'always_ff', 'always_latch', 'and',
'assign', 'automatic', 'begin', 'break', 'buf', 'bufif0', 'bufif1',
'case', 'casex', 'casez', 'cmos', 'const', 'continue', 'deassign',
'default', 'defparam', 'disable', 'do', 'edge', 'else', 'end', 'endcase',
'endfunction', 'endgenerate', 'endmodule', 'endpackage', 'endprimitive',
'endspecify', 'endtable', 'endtask', 'enum', 'event', 'final', 'for',
'force', 'forever', 'fork', 'function', 'generate', 'genvar', 'highz0',
'highz1', 'if', 'initial', 'inout', 'input', 'integer', 'join', 'large',
'localparam', 'macromodule', 'medium', 'module', 'nand', 'negedge',
'nmos', 'nor', 'not', 'notif0', 'notif1', 'or', 'output', 'packed',
'parameter', 'pmos', 'posedge', 'primitive', 'pull0', 'pull1',
'pulldown', 'pullup', 'rcmos', 'ref', 'release', 'repeat', 'return',
'rnmos', 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 'scalared', 'signed',
'small', 'specify', 'specparam', 'strength', 'string', 'strong0',
'strong1', 'struct', 'table', 'task', 'tran', 'tranif0', 'tranif1',
'type', 'typedef', 'unsigned', 'var', 'vectored', 'void', 'wait',
'weak0', 'weak1', 'while', 'xnor', 'xor'), suffix=r'\b'),
Keyword),
(words((
'accelerate', 'autoexpand_vectornets', 'celldefine', 'default_nettype',
'else', 'elsif', 'endcelldefine', 'endif', 'endprotect', 'endprotected',
'expand_vectornets', 'ifdef', 'ifndef', 'include', 'noaccelerate',
'noexpand_vectornets', 'noremove_gatenames', 'noremove_netnames',
'nounconnected_drive', 'protect', 'protected', 'remove_gatenames',
'remove_netnames', 'resetall', 'timescale', 'unconnected_drive',
'undef'), prefix=r'`', suffix=r'\b'),
Comment.Preproc),
(words((
'bits', 'bitstoreal', 'bitstoshortreal', 'countdrivers', 'display', 'fclose',
'fdisplay', 'finish', 'floor', 'fmonitor', 'fopen', 'fstrobe', 'fwrite',
'getpattern', 'history', 'incsave', 'input', 'itor', 'key', 'list', 'log',
'monitor', 'monitoroff', 'monitoron', 'nokey', 'nolog', 'printtimescale',
'random', 'readmemb', 'readmemh', 'realtime', 'realtobits', 'reset',
'reset_count', 'reset_value', 'restart', 'rtoi', 'save', 'scale', 'scope',
'shortrealtobits', 'showscopes', 'showvariables', 'showvars', 'sreadmemb',
'sreadmemh', 'stime', 'stop', 'strobe', 'time', 'timeformat', 'write'),
prefix=r'\$', suffix=r'\b'),
Name.Builtin),
(words((
'byte', 'shortint', 'int', 'longint', 'integer', 'time',
'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand',
'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wo'
'shortreal', 'real', 'realtime'), suffix=r'\b'),
Keyword.Type),
(r'[a-zA-Z_]\w*:(?!:)', Name.Label),
(r'\$?[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[\w:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
class SystemVerilogLexer(RegexLexer):
"""
Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
1800-2009 standard.
.. versionadded:: 1.5
"""
name = 'systemverilog'
aliases = ['systemverilog', 'sv']
filenames = ['*.sv', '*.svh']
mimetypes = ['text/x-systemverilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), 'import'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[01]+', Number.Bin),
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_]\w*', Name.Constant),
(words((
'accept_on', 'alias', 'always', 'always_comb', 'always_ff', 'always_latch',
'and', 'assert', 'assign', 'assume', 'automatic', 'before', 'begin', 'bind', 'bins',
'binsof', 'bit', 'break', 'buf', 'bufif0', 'bufif1', 'byte', 'case', 'casex', 'casez',
'cell', 'chandle', 'checker', 'class', 'clocking', 'cmos', 'config', 'const', 'constraint',
'context', 'continue', 'cover', 'covergroup', 'coverpoint', 'cross', 'deassign',
'default', 'defparam', 'design', 'disable', 'dist', 'do', 'edge', 'else', 'end', 'endcase',
'endchecker', 'endclass', 'endclocking', 'endconfig', 'endfunction', 'endgenerate',
'endgroup', 'endinterface', 'endmodule', 'endpackage', 'endprimitive',
'endprogram', 'endproperty', 'endsequence', 'endspecify', 'endtable',
'endtask', 'enum', 'event', 'eventually', 'expect', 'export', 'extends', 'extern',
'final', 'first_match', 'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin',
'function', 'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff', 'ifnone',
'ignore_bins', 'illegal_bins', 'implies', 'import', 'incdir', 'include',
'initial', 'inout', 'input', 'inside', 'instance', 'int', 'integer', 'interface',
'intersect', 'join', 'join_any', 'join_none', 'large', 'let', 'liblist', 'library',
'local', 'localparam', 'logic', 'longint', 'macromodule', 'matches', 'medium',
'modport', 'module', 'nand', 'negedge', 'new', 'nexttime', 'nmos', 'nor', 'noshowcancelled',
'not', 'notif0', 'notif1', 'null', 'or', 'output', 'package', 'packed', 'parameter',
'pmos', 'posedge', 'primitive', 'priority', 'program', 'property', 'protected',
'pull0', 'pull1', 'pulldown', 'pullup', 'pulsestyle_ondetect', 'pulsestyle_onevent',
'pure', 'rand', 'randc', 'randcase', 'randsequence', 'rcmos', 'real', 'realtime',
'ref', 'reg', 'reject_on', 'release', 'repeat', 'restrict', 'return', 'rnmos',
'rpmos', 'rtran', 'rtranif0', 'rtranif1', 's_always', 's_eventually', 's_nexttime',
's_until', 's_until_with', 'scalared', 'sequence', 'shortint', 'shortreal',
'showcancelled', 'signed', 'small', 'solve', 'specify', 'specparam', 'static',
'string', 'strong', 'strong0', 'strong1', 'struct', 'super', 'supply0', 'supply1',
'sync_accept_on', 'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout',
'time', 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1', 'tri', 'tri0',
'tri1', 'triand', 'trior', 'trireg', 'type', 'typedef', 'union', 'unique', 'unique0',
'unsigned', 'until', 'until_with', 'untyped', 'use', 'uwire', 'var', 'vectored',
'virtual', 'void', 'wait', 'wait_order', 'wand', 'weak', 'weak0', 'weak1', 'while',
'wildcard', 'wire', 'with', 'within', 'wor', 'xnor', 'xor'), suffix=r'\b'),
Keyword),
(words((
'`__FILE__', '`__LINE__', '`begin_keywords', '`celldefine', '`default_nettype',
'`define', '`else', '`elsif', '`end_keywords', '`endcelldefine', '`endif',
'`ifdef', '`ifndef', '`include', '`line', '`nounconnected_drive', '`pragma',
'`resetall', '`timescale', '`unconnected_drive', '`undef', '`undefineall'),
suffix=r'\b'),
Comment.Preproc),
(words((
'$display', '$displayb', '$displayh', '$displayo', '$dumpall', '$dumpfile',
'$dumpflush', '$dumplimit', '$dumpoff', '$dumpon', '$dumpports',
'$dumpportsall', '$dumpportsflush', '$dumpportslimit', '$dumpportsoff',
'$dumpportson', '$dumpvars', '$fclose', '$fdisplay', '$fdisplayb',
'$fdisplayh', '$fdisplayo', '$feof', '$ferror', '$fflush', '$fgetc',
'$fgets', '$finish', '$fmonitor', '$fmonitorb', '$fmonitorh', '$fmonitoro',
'$fopen', '$fread', '$fscanf', '$fseek', '$fstrobe', '$fstrobeb', '$fstrobeh',
'$fstrobeo', '$ftell', '$fwrite', '$fwriteb', '$fwriteh', '$fwriteo',
'$monitor', '$monitorb', '$monitorh', '$monitoro', '$monitoroff',
'$monitoron', '$plusargs', '$random', '$readmemb', '$readmemh', '$rewind',
'$sformat', '$sformatf', '$sscanf', '$strobe', '$strobeb', '$strobeh', '$strobeo',
'$swrite', '$swriteb', '$swriteh', '$swriteo', '$test', '$ungetc',
'$value$plusargs', '$write', '$writeb', '$writeh', '$writememb',
'$writememh', '$writeo'), suffix=r'\b'),
Name.Builtin),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(words((
'byte', 'shortint', 'int', 'longint', 'integer', 'time',
'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand',
'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wo'
'shortreal', 'real', 'realtime'), suffix=r'\b'),
Keyword.Type),
(r'[a-zA-Z_]\w*:(?!:)', Name.Label),
(r'\$?[a-zA-Z_]\w*', Name),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[\w:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
class VhdlLexer(RegexLexer):
"""
For VHDL source code.
.. versionadded:: 1.5
"""
name = 'vhdl'
aliases = ['vhdl']
filenames = ['*.vhdl', '*.vhd']
mimetypes = ['text/x-vhdl']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'--.*?$', Comment.Single),
(r"'(U|X|0|1|Z|W|L|H|-)'", String.Char),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r"'[a-z_]\w*", Name.Attribute),
(r'[()\[\],.;\']', Punctuation),
(r'"[^\n\\"]*"', String),
(r'(library)(\s+)([a-z_]\w*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(use)(\s+)(entity)', bygroups(Keyword, Text, Keyword)),
(r'(use)(\s+)([a-z_][\w.]*\.)(all)',
bygroups(Keyword, Text, Name.Namespace, Keyword)),
(r'(use)(\s+)([a-z_][\w.]*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(std|ieee)(\.[a-z_]\w*)',
bygroups(Name.Namespace, Name.Namespace)),
(words(('std', 'ieee', 'work'), suffix=r'\b'),
Name.Namespace),
(r'(entity|component)(\s+)([a-z_]\w*)',
bygroups(Keyword, Text, Name.Class)),
(r'(architecture|configuration)(\s+)([a-z_]\w*)(\s+)'
r'(of)(\s+)([a-z_]\w*)(\s+)(is)',
bygroups(Keyword, Text, Name.Class, Text, Keyword, Text,
Name.Class, Text, Keyword)),
(r'([a-z_]\w*)(:)(\s+)(process|for)',
bygroups(Name.Class, Operator, Text, Keyword)),
(r'(end)(\s+)', bygroups(using(this), Text), 'endblock'),
include('types'),
include('keywords'),
include('numbers'),
(r'[a-z_]\w*', Name),
],
'endblock': [
include('keywords'),
(r'[a-z_]\w*', Name.Class),
(r'(\s+)', Text),
(r';', Punctuation, '#pop'),
],
'types': [
(words((
'boolean', 'bit', 'character', 'severity_level', 'integer', 'time',
'delay_length', 'natural', 'positive', 'string', 'bit_vector',
'file_open_kind', 'file_open_status', 'std_ulogic', 'std_ulogic_vector',
'std_logic', 'std_logic_vector', 'signed', 'unsigned'), suffix=r'\b'),
Keyword.Type),
],
'keywords': [
(words((
'abs', 'access', 'after', 'alias', 'all', 'and',
'architecture', 'array', 'assert', 'attribute', 'begin', 'block',
'body', 'buffer', 'bus', 'case', 'component', 'configuration',
'constant', 'disconnect', 'downto', 'else', 'elsif', 'end',
'entity', 'exit', 'file', 'for', 'function', 'generate',
'generic', 'group', 'guarded', 'if', 'impure', 'in',
'inertial', 'inout', 'is', 'label', 'library', 'linkage',
'literal', 'loop', 'map', 'mod', 'nand', 'new',
'next', 'nor', 'not', 'null', 'of', 'on',
'open', 'or', 'others', 'out', 'package', 'port',
'postponed', 'procedure', 'process', 'pure', 'range', 'record',
'register', 'reject', 'rem', 'return', 'rol', 'ror', 'select',
'severity', 'signal', 'shared', 'sla', 'sll', 'sra',
'srl', 'subtype', 'then', 'to', 'transport', 'type',
'units', 'until', 'use', 'variable', 'wait', 'when',
'while', 'with', 'xnor', 'xor'), suffix=r'\b'),
Keyword),
],
'numbers': [
(r'\d{1,2}#[0-9a-f_]+#?', Number.Integer),
(r'\d+', Number.Integer),
(r'(\d+\.\d*|\.\d+|\d+)E[+-]?\d+', Number.Float),
(r'X"[0-9a-f_]+"', Number.Hex),
(r'O"[0-7_]+"', Number.Oct),
(r'B"[01_]+"', Number.Bin),
],
}
|
the-stack_106_20349
|
import discord, os
from discord.ext import commands
from utils import checks, output, parsing
from aiohttp import ClientSession
import urllib.request
import json
class Stats:
def __init__(self, bot: discord.ext.commands.Bot):
self.bot = bot
@commands.command(pass_context=True)
async def stats(self, ctx, amount=1):
"""
Show stats about HLIX
"""
channel_name = ctx.message.channel.name
allowed_channels = parsing.parse_json('config.json')['command_channels'][ctx.command.name]
if channel_name not in allowed_channels:
return
headers={"user-agent" : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36"}
try:
async with ClientSession() as session:
async with session.get("", headers=headers) as response:
responseRaw = await response.read()
priceData = json.loads(responseRaw)
for item in priceData:
embed= discord.Embed(colour=0x00FF00)
embed.set_author(name='HLIX Information', icon_url="https://i.imgur.com/ZnwpZ3r.png")
embed.add_field(name="Price (USD)", value="${}".format(item['price_usd:']))
embed.add_field(name="Price (BTC)", value="{} BTC".format(item['price_btc:']))
embed.add_field(name='\u200b',value='\u200b')
embed.add_field(name="Volume (USD)", value="${}".format(item['24h_volume_usd:']))
embed.add_field(name="Market Cap", value="${}".format(item['market_cap_usd:']))
embed.add_field(name='\u200b',value='\u200b')
embed.add_field(name="% 1h", value="{}%".format(item['percent_change_1h:']))
embed.add_field(name="% 24h", value="{}%".format(item['percent_change_24h:']))
embed.add_field(name="% 7d", value="{}%".format(item['percent_change_7d:']))
embed.add_field(name="Circulating Supply", value="{} HLIX".format(item['available_supply:']))
embed.add_field(name="Total Supply", value="{} HLIX".format(item['total_supply:']))
embed.add_field(name="Maximum Supply", value="500,000,000 HLIX")
embed.set_footer(text="", icon_url="https://i.imgur.com/ZnwpZ3r.png")
await self.bot.say(embed=embed)
except:
await self.bot.say(":warning: Error fetching prices!")
def setup(bot):
bot.add_cog(Stats(bot))
|
the-stack_106_20350
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import google.auth
import google.datalab
import pytest
import IPython
from IPython.testing import tools
from IPython.terminal import interactiveshell
# Get default project
_, PROJECT_ID = google.auth.default()
# Set Datalab project ID
context = google.datalab.Context.default()
context.set_project_id(PROJECT_ID)
@pytest.fixture(scope='session')
def ipython_interactive():
config = tools.default_config()
config.TerminalInteractiveShell.simple_prompt = True
shell = interactiveshell.TerminalInteractiveShell.instance(config=config)
return shell
@pytest.fixture
def to_delete():
from google.cloud import bigquery
client = bigquery.Client()
doomed = []
yield doomed
for dataset_id in doomed:
dataset = client.get_dataset(dataset_id)
client.delete_dataset(dataset, delete_contents=True)
def _set_up_ipython(extension):
ip = IPython.get_ipython()
ip.extension_manager.load_extension(extension)
return ip
def _strip_region_tags(sample_text):
"""Remove blank lines and region tags from sample text"""
magic_lines = [line for line in sample_text.split('\n')
if len(line) > 0 and '# [' not in line]
return '\n'.join(magic_lines)
def test_datalab_query_magic(ipython_interactive):
import google.datalab.bigquery as bq
ip = _set_up_ipython('google.datalab.kernel')
sample = """
# [START bigquery_migration_datalab_query_magic]
%%bq query
SELECT word, SUM(word_count) as count
FROM `bigquery-public-data.samples.shakespeare`
GROUP BY word
ORDER BY count ASC
LIMIT 100
# [END bigquery_migration_datalab_query_magic]
"""
ip.run_cell(_strip_region_tags(sample))
results = ip.user_ns["_"] # Last returned object in notebook session
assert isinstance(results, bq.QueryResultsTable)
df = results.to_dataframe()
assert len(df) == 100
def test_client_library_query_magic(ipython_interactive):
import pandas
ip = _set_up_ipython('google.cloud.bigquery')
sample = """
# [START bigquery_migration_client_library_query_magic]
%%bigquery
SELECT word, SUM(word_count) as count
FROM `bigquery-public-data.samples.shakespeare`
GROUP BY word
ORDER BY count ASC
LIMIT 100
# [END bigquery_migration_client_library_query_magic]
"""
ip.run_cell(_strip_region_tags(sample))
df = ip.user_ns["_"] # Last returned object in notebook session
assert isinstance(df, pandas.DataFrame)
assert len(df) == 100
def test_datalab_query_magic_results_variable(ipython_interactive):
ip = _set_up_ipython('google.datalab.kernel')
sample = """
# [START bigquery_migration_datalab_query_magic_define_query]
%%bq query -n my_query
SELECT name FROM `bigquery-public-data.usa_names.usa_1910_current`
WHERE state = "TX"
LIMIT 100
# [END bigquery_migration_datalab_query_magic_define_query]
"""
ip.run_cell(_strip_region_tags(sample))
sample = """
# [START bigquery_migration_datalab_execute_query]
import google.datalab.bigquery as bq
my_variable = my_query.execute().result().to_dataframe()
# [END bigquery_migration_datalab_execute_query]
"""
ip.run_cell(_strip_region_tags(sample))
variable_name = "my_variable"
assert variable_name in ip.user_ns # verify that variable exists
my_variable = ip.user_ns[variable_name]
assert len(my_variable) == 100
ip.user_ns.pop(variable_name) # clean up variable
def test_client_library_query_magic_results_variable(ipython_interactive):
ip = _set_up_ipython('google.cloud.bigquery')
sample = """
# [START bigquery_migration_client_library_query_magic_results_variable]
%%bigquery my_variable
SELECT name FROM `bigquery-public-data.usa_names.usa_1910_current`
WHERE state = "TX"
LIMIT 100
# [END bigquery_migration_client_library_query_magic_results_variable]
"""
ip.run_cell(_strip_region_tags(sample))
variable_name = "my_variable"
assert variable_name in ip.user_ns # verify that variable exists
my_variable = ip.user_ns[variable_name]
assert len(my_variable) == 100
ip.user_ns.pop(variable_name) # clean up variable
def test_datalab_magic_parameterized_query(ipython_interactive):
import pandas
ip = _set_up_ipython('google.datalab.kernel')
sample = """
# [START bigquery_migration_datalab_magic_define_parameterized_query]
%%bq query -n my_query
SELECT word, SUM(word_count) as count
FROM `bigquery-public-data.samples.shakespeare`
WHERE corpus = @corpus_name
GROUP BY word
ORDER BY count ASC
LIMIT @limit
# [END bigquery_migration_datalab_magic_define_parameterized_query]
"""
ip.run_cell(_strip_region_tags(sample))
sample = """
# [START bigquery_migration_datalab_magic_query_params]
corpus_name = "hamlet"
limit = 10
# [END bigquery_migration_datalab_magic_query_params]
"""
ip.run_cell(_strip_region_tags(sample))
sample = """
# [START bigquery_migration_datalab_magic_execute_parameterized_query]
%%bq execute -q my_query --to-dataframe
parameters:
- name: corpus_name
type: STRING
value: $corpus_name
- name: limit
type: INTEGER
value: $limit
# [END bigquery_migration_datalab_magic_execute_parameterized_query]
"""
ip.run_cell(_strip_region_tags(sample))
df = ip.user_ns["_"] # Retrieves last returned object in notebook session
assert isinstance(df, pandas.DataFrame)
assert len(df) == 10
def test_client_library_magic_parameterized_query(ipython_interactive):
import pandas
ip = _set_up_ipython('google.cloud.bigquery')
sample = """
# [START bigquery_migration_client_library_magic_query_params]
params = {"corpus_name": "hamlet", "limit": 10}
# [END bigquery_migration_client_library_magic_query_params]
"""
ip.run_cell(_strip_region_tags(sample))
sample = """
# [START bigquery_migration_client_library_magic_parameterized_query]
%%bigquery --params $params
SELECT word, SUM(word_count) as count
FROM `bigquery-public-data.samples.shakespeare`
WHERE corpus = @corpus_name
GROUP BY word
ORDER BY count ASC
LIMIT @limit
# [END bigquery_migration_client_library_magic_parameterized_query]
"""
ip.run_cell(_strip_region_tags(sample))
df = ip.user_ns["_"] # Retrieves last returned object in notebook session
assert isinstance(df, pandas.DataFrame)
assert len(df) == 10
def test_datalab_list_tables_magic(ipython_interactive):
ip = _set_up_ipython('google.datalab.kernel')
sample = """
# [START bigquery_migration_datalab_list_tables_magic]
%bq tables list --dataset bigquery-public-data.samples
# [END bigquery_migration_datalab_list_tables_magic]
"""
ip.run_cell(_strip_region_tags(sample))
# Retrieves last returned object in notebook session
html_element = ip.user_ns["_"]
assert "shakespeare" in html_element.data
def test_datalab_query():
# [START bigquery_migration_datalab_query]
import google.datalab.bigquery as bq
sql = """
SELECT name FROM `bigquery-public-data.usa_names.usa_1910_current`
WHERE state = "TX"
LIMIT 100
"""
df = bq.Query(sql).execute().result().to_dataframe()
# [END bigquery_migration_datalab_query]
assert len(df) == 100
def test_client_library_query():
# [START bigquery_migration_client_library_query]
from google.cloud import bigquery
client = bigquery.Client()
sql = """
SELECT name FROM `bigquery-public-data.usa_names.usa_1910_current`
WHERE state = "TX"
LIMIT 100
"""
df = client.query(sql).to_dataframe()
# [END bigquery_migration_client_library_query]
assert len(df) == 100
def test_datalab_load_table_from_gcs_csv(to_delete):
# [START bigquery_migration_datalab_load_table_from_gcs_csv]
import google.datalab.bigquery as bq
# Create the dataset
dataset_id = 'import_sample'
# [END bigquery_migration_datalab_load_table_from_gcs_csv]
# Use unique dataset ID to avoid collisions when running tests
dataset_id = 'test_dataset_{}'.format(int(time.time() * 1000))
to_delete.append(dataset_id)
# [START bigquery_migration_datalab_load_table_from_gcs_csv]
bq.Dataset(dataset_id).create()
# Create the table
schema = [
{'name': 'name', 'type': 'STRING'},
{'name': 'post_abbr', 'type': 'STRING'},
]
table = bq.Table(
'{}.us_states'.format(dataset_id)).create(schema=schema)
table.load(
'gs://cloud-samples-data/bigquery/us-states/us-states.csv',
mode='append',
source_format='csv',
csv_options=bq.CSVOptions(skip_leading_rows=1)
) # Waits for the job to complete
# [END bigquery_migration_datalab_load_table_from_gcs_csv]
assert table.length == 50
def test_client_library_load_table_from_gcs_csv(to_delete):
# [START bigquery_migration_client_library_load_table_from_gcs_csv]
from google.cloud import bigquery
client = bigquery.Client(location='US')
# Create the dataset
dataset_id = 'import_sample'
# [END bigquery_migration_client_library_load_table_from_gcs_csv]
# Use unique dataset ID to avoid collisions when running tests
dataset_id = 'test_dataset_{}'.format(int(time.time() * 1000))
to_delete.append(dataset_id)
# [START bigquery_migration_client_library_load_table_from_gcs_csv]
dataset = client.create_dataset(dataset_id)
# Create the table
job_config = bigquery.LoadJobConfig(
schema=[
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
],
skip_leading_rows=1,
# The source format defaults to CSV, so the line below is optional.
source_format=bigquery.SourceFormat.CSV
)
load_job = client.load_table_from_uri(
'gs://cloud-samples-data/bigquery/us-states/us-states.csv',
dataset.table('us_states'),
job_config=job_config
)
load_job.result() # Waits for table load to complete.
# [END bigquery_migration_client_library_load_table_from_gcs_csv]
table = client.get_table(dataset.table('us_states'))
assert table.num_rows == 50
def test_datalab_load_table_from_dataframe(to_delete):
# [START bigquery_migration_datalab_load_table_from_dataframe]
import google.datalab.bigquery as bq
import pandas
# Create the dataset
dataset_id = 'import_sample'
# [END bigquery_migration_datalab_load_table_from_dataframe]
# Use unique dataset ID to avoid collisions when running tests
dataset_id = 'test_dataset_{}'.format(int(time.time() * 1000))
to_delete.append(dataset_id)
# [START bigquery_migration_datalab_load_table_from_dataframe]
bq.Dataset(dataset_id).create()
# Create the table and load the data
dataframe = pandas.DataFrame([
{'title': 'The Meaning of Life', 'release_year': 1983},
{'title': 'Monty Python and the Holy Grail', 'release_year': 1975},
{'title': 'Life of Brian', 'release_year': 1979},
{
'title': 'And Now for Something Completely Different',
'release_year': 1971
},
])
schema = bq.Schema.from_data(dataframe)
table = bq.Table(
'{}.monty_python'.format(dataset_id)).create(schema=schema)
table.insert(dataframe) # Starts steaming insert of data
# [END bigquery_migration_datalab_load_table_from_dataframe]
# The Datalab library uses tabledata().insertAll() to load data from
# pandas DataFrames to tables. Because it can take a long time for the rows
# to be available in the table, this test does not assert on the number of
# rows in the destination table after the job is run. If errors are
# encountered during the insertion, this test will fail.
# See https://cloud.google.com/bigquery/streaming-data-into-bigquery
def test_client_library_load_table_from_dataframe(to_delete):
# [START bigquery_migration_client_library_load_table_from_dataframe]
from google.cloud import bigquery
import pandas
client = bigquery.Client(location='US')
dataset_id = 'import_sample'
# [END bigquery_migration_client_library_load_table_from_dataframe]
# Use unique dataset ID to avoid collisions when running tests
dataset_id = 'test_dataset_{}'.format(int(time.time() * 1000))
to_delete.append(dataset_id)
# [START bigquery_migration_client_library_load_table_from_dataframe]
dataset = client.create_dataset(dataset_id)
# Create the table and load the data
dataframe = pandas.DataFrame([
{'title': 'The Meaning of Life', 'release_year': 1983},
{'title': 'Monty Python and the Holy Grail', 'release_year': 1975},
{'title': 'Life of Brian', 'release_year': 1979},
{
'title': 'And Now for Something Completely Different',
'release_year': 1971
},
])
table_ref = dataset.table('monty_python')
load_job = client.load_table_from_dataframe(dataframe, table_ref)
load_job.result() # Waits for table load to complete.
# [END bigquery_migration_client_library_load_table_from_dataframe]
table = client.get_table(table_ref)
assert table.num_rows == 4
|
the-stack_106_20352
|
from django.db import models
from django.forms import CharField
from django.core.exceptions import ValidationError
from connector import ElfinderConnector
class ElfinderFile(object):
"""
This class represents an Elfinder file.
"""
def __init__(self, hash_, optionset):
self.hash = hash_
self.optionset = optionset
self._info = None
def _get_info(self):
if self._info is None:
if not self.hash:
self._info = {}
else:
try:
from conf import settings as ls
connector = ElfinderConnector(ls.ELFINDER_CONNECTOR_OPTION_SETS[self.optionset])
info = connector.execute('info', targets = [self.hash], options=True)['files'][0]
#get image dimensions
if 'mime' in info and info['mime'].startswith('image'):
info['dim'] = connector.execute('dim', target=self.hash)['dim']
#calculate thumbnail url
if 'tmb' in info and 'tmbUrl' in info:
info['tmb'] = '%s%s' % (info['tmbUrl'], info['tmb'])
del info['tmbUrl']
#`url` key is the equivelant `rootUrl` of the elfinderwidget
if 'url' in info:
info['rootUrl'] = info['url']
del info['url']
if 'archivers' in info:
del info['archivers']
if 'extract' in info:
del info['extract']
self._info = info
except:
from django.utils.translation import ugettext as _
self._info = { 'error' : _('This file is no longer valid') }
return self._info
@property
def url(self):
"""
Get the file url
"""
info = self._get_info()
return '%s%s' % (info['rootUrl'], '/'.join(info['path'].split(info['separator'])[1:])) if info else ''
@property
def info(self):
"""
Returns:
a **dictionary** holding information about the file,
as returned by the volume driver.
"""
return self._get_info()
def __unicode__(self):
return self.hash
class ElfinderFormField(CharField):
"""
Override the standard CharField form field
to set :class:`elfinder.widgets.ElfinderWidget` as the default widget.
"""
def __init__(self, optionset, start_path, *args, **kwargs):
from widgets import ElfinderWidget
super(ElfinderFormField, self).__init__(*args, **kwargs)
#TODO: elfinder widget should be initialized using possible client options from model field declaration
self.optionset = optionset
self.widget = ElfinderWidget(optionset, start_path)
def to_python(self, value):
"""
Convert ``value`` to an :class:`elfinder.fields.ElfinderFile` object.
"""
if isinstance(value, ElfinderFile):
return value
return ElfinderFile(hash_=value, optionset=self.optionset)
def clean(self, value):
"""
Override the default CharField validation to validate the
ElfinderFile hash string before converting it to an ElfinderField
object. Finally, return a cleaned ElfinderFile object.
"""
self.validate(value)
self.run_validators(value)
value = self.to_python(value)
return value
class ElfinderField(models.Field):
"""
Custom model field holding an :class:`elfinder.fields.ElfinderFile` object.
"""
description = "An elfinder file model field."
__metaclass__ = models.SubfieldBase
def __init__(self, optionset='default', start_path=None, *args, **kwargs):
self.optionset = optionset
self.start_path = start_path
if not 'max_length' in kwargs:
kwargs['max_length'] = 100 #default field length
super(ElfinderField, self).__init__(*args, **kwargs)
def get_internal_type(self):
"""
This lets Django know how to handle the field
"""
return "CharField"
def to_python(self, value):
"""
Convert ``value`` to an :class:`elfinder.fields.ElfinderFile` object.
"""
if isinstance(value, ElfinderFile):
return value
return ElfinderFile(hash_=value, optionset=self.optionset)
def get_prep_value(self, value):
"""
Overriden method to return a string representation of
the :class:`elfinder.fields.ElfinderFile`.
"""
if isinstance(value, ElfinderFile):
return value.hash
return value
def get_prep_lookup(self, lookup_type, value):
"""
Overriden method to disallow
``year``, ``month`` and ``day`` queries
"""
if lookup_type in ['year', 'month', 'day']:
raise TypeError('Lookup type %r not supported.' % lookup_type)
return super(ElfinderField, self).get_prep_lookup(lookup_type, value)
def formfield(self, **kwargs):
"""
Overriden method to set the form field defaults.
See :class:`elfinder.fields.ElfinderFormField`
"""
defaults = {
'form_class': ElfinderFormField,
'optionset' : self.optionset,
'start_path' : self.start_path
}
defaults.update(kwargs)
return super(ElfinderField, self).formfield(**defaults)
|
the-stack_106_20353
|
"""
Numpy API for xhistogram.
"""
import dask
import numpy as np
from functools import reduce
from collections.abc import Iterable
from numpy import (
searchsorted,
bincount,
reshape,
ravel_multi_index,
concatenate,
broadcast_arrays,
)
# range is a keyword so save the builtin so they can use it.
_range = range
try:
import dask.array as dsa
has_dask = True
except ImportError:
has_dask = False
def _any_dask_array(*args):
if not has_dask:
return False
else:
return any(isinstance(a, dsa.core.Array) for a in args)
def _ensure_correctly_formatted_bins(bins, N_expected):
# TODO: This could be done better / more robustly
if bins is None:
raise ValueError("bins must be provided")
if isinstance(bins, (int, str, np.ndarray)):
bins = N_expected * [bins]
if len(bins) == N_expected:
return bins
else:
raise ValueError(
"The number of bin definitions doesn't match the number of args"
)
def _ensure_correctly_formatted_range(range_, N_expected):
# TODO: This could be done better / more robustly
def _iterable_nested(x):
return all(isinstance(i, Iterable) for i in x)
if range_ is not None:
if (len(range_) == 2) & (not _iterable_nested(range_)):
return N_expected * [range_]
elif N_expected == len(range_):
if all(len(x) == 2 for x in range_):
return range_
else:
raise ValueError(
"range should be provided as (lower_range, upper_range). In the "
+ "case of multiple args, range should be a list of such tuples"
)
else:
raise ValueError("The number of ranges doesn't match the number of args")
else:
return N_expected * [range_]
def _bincount_2d(bin_indices, weights, N, hist_shapes):
# a trick to apply bincount on an axis-by-axis basis
# https://stackoverflow.com/questions/40591754/vectorizing-numpy-bincount
# https://stackoverflow.com/questions/40588403/vectorized-searchsorted-numpy
M = bin_indices.shape[0]
if weights is not None:
weights = weights.ravel()
bin_indices_offset = (bin_indices + (N * np.arange(M)[:, None])).ravel()
bc_offset = bincount(bin_indices_offset, weights=weights, minlength=N * M)
final_shape = (M,) + tuple(hist_shapes)
return bc_offset.reshape(final_shape)
def _bincount_loop(bin_indices, weights, N, hist_shapes, block_chunks):
M = bin_indices.shape[0]
assert sum(block_chunks) == M
block_counts = []
# iterate over chunks
bounds = np.cumsum((0,) + block_chunks)
for m_start, m_end in zip(bounds[:-1], bounds[1:]):
bin_indices_block = bin_indices[m_start:m_end]
weights_block = weights[m_start:m_end] if weights is not None else None
bc_block = _bincount_2d(bin_indices_block, weights_block, N, hist_shapes)
block_counts.append(bc_block)
all_counts = concatenate(block_counts)
final_shape = (bin_indices.shape[0],) + tuple(hist_shapes)
return all_counts.reshape(final_shape)
def _determine_block_chunks(bin_indices, block_size):
M, N = bin_indices.shape
if block_size is None:
return (M,)
if block_size == "auto":
try:
# dask arrays - use the pre-existing chunks
chunks = bin_indices.chunks
return chunks[0]
except AttributeError:
# automatically pick a chunk size
# this a a heueristic without much basis
_MAX_CHUNK_SIZE = 10_000_000
block_size = min(_MAX_CHUNK_SIZE // N, M)
assert isinstance(block_size, int)
num_chunks = M // block_size
block_chunks = num_chunks * (block_size,)
residual = M % block_size
if residual:
block_chunks += (residual,)
assert sum(block_chunks) == M
return block_chunks
def _dispatch_bincount(bin_indices, weights, N, hist_shapes, block_size=None):
# block_chunks is like a dask chunk, a tuple that divides up the first
# axis of bin_indices
block_chunks = _determine_block_chunks(bin_indices, block_size)
if len(block_chunks) == 1:
# single global chunk, don't need a loop over chunks
return _bincount_2d(bin_indices, weights, N, hist_shapes)
else:
return _bincount_loop(bin_indices, weights, N, hist_shapes, block_chunks)
def _bincount_2d_vectorized(
*args, bins=None, weights=None, density=False, right=False, block_size=None
):
"""Calculate the histogram independently on each row of a 2D array"""
N_inputs = len(args)
a0 = args[0]
# consistency checks for inputa
for a, b in zip(args, bins):
assert a.ndim == 2
assert b.ndim == 1
assert a.shape == a0.shape
if weights is not None:
assert weights.shape == a0.shape
nrows, ncols = a0.shape
nbins = [len(b) for b in bins]
hist_shapes = [nb + 1 for nb in nbins]
# The maximum possible value of searchsorted is nbins
# For _searchsorted_inclusive:
# - 0 corresponds to a < b[0]
# - i corresponds to b[i-1] <= a < b[i]
# - nbins-1 corresponds to b[-2] <= a <= b[-1]
# - nbins corresponds to a >= b[-1]
def _searchsorted_inclusive(a, b):
"""
Like `searchsorted`, but where the last bin is also right-edge inclusive.
"""
# Similar to implementation in np.histogramdd
# see https://github.com/numpy/numpy/blob/9c98662ee2f7daca3f9fae9d5144a9a8d3cabe8c/numpy/lib/histograms.py#L1056
# This assumes the bins (b) are sorted
bin_indices = searchsorted(b, a, side="right")
on_edge = a == b[-1]
# Shift these points one bin to the left.
bin_indices[on_edge] -= 1
return bin_indices
each_bin_indices = [_searchsorted_inclusive(a, b) for a, b in zip(args, bins)]
# product of the bins gives the joint distribution
if N_inputs > 1:
bin_indices = ravel_multi_index(each_bin_indices, hist_shapes)
else:
bin_indices = each_bin_indices[0]
# total number of unique bin indices
N = reduce(lambda x, y: x * y, hist_shapes)
bin_counts = _dispatch_bincount(
bin_indices, weights, N, hist_shapes, block_size=block_size
)
# just throw out everything outside of the bins, as np.histogram does
# TODO: make this optional?
slices = (slice(None),) + (N_inputs * (slice(1, -1),))
bin_counts = bin_counts[slices]
return bin_counts
def _bincount(
*all_arrays, weights=False, axis=None, bins=None, density=None, block_size=None
):
a0 = all_arrays[0]
do_full_array = (axis is None) or (set(axis) == set(_range(a0.ndim)))
if do_full_array:
kept_axes_shape = (1,) * a0.ndim
else:
kept_axes_shape = tuple(
[a0.shape[i] if i not in axis else 1 for i in _range(a0.ndim)]
)
def reshape_input(a):
if do_full_array:
d = a.ravel()[None, :]
else:
# reshape the array to 2D
# axis 0: preserved axis after histogram
# axis 1: calculate histogram along this axis
new_pos = tuple(_range(-len(axis), 0))
c = np.moveaxis(a, axis, new_pos)
split_idx = c.ndim - len(axis)
dims_0 = c.shape[:split_idx]
# assert dims_0 == kept_axes_shape
dims_1 = c.shape[split_idx:]
new_dim_0 = np.prod(dims_0)
new_dim_1 = np.prod(dims_1)
d = reshape(c, (new_dim_0, new_dim_1))
return d
all_arrays_reshaped = [reshape_input(a) for a in all_arrays]
if weights:
weights_array = all_arrays_reshaped.pop()
else:
weights_array = None
bin_counts = _bincount_2d_vectorized(
*all_arrays_reshaped,
bins=bins,
weights=weights_array,
density=density,
block_size=block_size,
)
final_shape = kept_axes_shape + bin_counts.shape[1:]
bin_counts = reshape(bin_counts, final_shape)
return bin_counts
def histogram(
*args,
bins=None,
range=None,
axis=None,
weights=None,
density=False,
block_size="auto",
):
"""Histogram applied along specified axis / axes.
Parameters
----------
args : array_like
Input data. The number of input arguments determines the dimensionality
of the histogram. For example, two arguments produce a 2D histogram.
All args must have the same size.
bins : int, str or numpy array or a list of ints, strs and/or arrays, optional
If a list, there should be one entry for each item in ``args``.
The bin specifications are as follows:
* If int; the number of bins for all arguments in ``args``.
* If str; the method used to automatically calculate the optimal bin width
for all arguments in ``args``, as defined by numpy `histogram_bin_edges`.
* If numpy array; the bin edges for all arguments in ``args``.
* If a list of ints, strs and/or arrays; the bin specification as
above for every argument in ``args``.
When bin edges are specified, all but the last (righthand-most) bin include
the left edge and exclude the right edge. The last bin includes both edges.
A TypeError will be raised if args contains dask arrays and bins are not
specified explicitly as an array or list of arrays. This is because other
bin specifications trigger computation.
range : (float, float) or a list of (float, float), optional
If a list, there should be one entry for each item in ``args``.
The range specifications are as follows:
* If (float, float); the lower and upper range(s) of the bins for all
arguments in ``args``. Values outside the range are ignored. The first
element of the range must be less than or equal to the second. `range`
affects the automatic bin computation as well. In this case, while bin
width is computed to be optimal based on the actual data within `range`,
the bin count will fill the entire range including portions containing
no data.
* If a list of (float, float); the ranges as above for every argument in
``args``.
* If not provided, range is simply ``(arg.min(), arg.max())`` for each
arg.
axis : None or int or tuple of ints, optional
Axis or axes along which the histogram is computed. The default is to
compute the histogram of the flattened array
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
block_size : int or 'auto', optional
A parameter which governs the algorithm used to compute the histogram.
Using a nonzero value splits the histogram calculation over the
non-histogram axes into blocks of size ``block_size``, iterating over
them with a loop (numpy inputs) or in parallel (dask inputs). If
``'auto'``, blocks will be determined either by the underlying dask
chunks (dask inputs) or an experimental built-in heuristic (numpy inputs).
Returns
-------
hist : array
The values of the histogram.
bin_edges : list of arrays
Return the bin edges for each input array.
See Also
--------
numpy.histogram, numpy.bincount, numpy.searchsorted
"""
a0 = args[0]
ndim = a0.ndim
n_inputs = len(args)
is_dask_array = any([dask.is_dask_collection(a) for a in args])
if axis is not None:
axis = np.atleast_1d(axis)
assert axis.ndim == 1
axis_normed = []
for ax in axis:
if ax >= 0:
ax_positive = ax
else:
ax_positive = ndim + ax
assert ax_positive < ndim, "axis must be less than ndim"
axis_normed.append(ax_positive)
axis = [int(i) for i in axis_normed]
all_arrays = list(args)
n_inputs = len(all_arrays)
if weights is not None:
all_arrays.append(weights)
has_weights = True
else:
has_weights = False
dtype = "i8" if not has_weights else weights.dtype
# Broadcast input arrays. Note that this dispatches to `dsa.broadcast_arrays` as necessary.
all_arrays = broadcast_arrays(*all_arrays)
# Since all arrays now have the same shape, just get the axes of the first.
input_axes = tuple(_range(all_arrays[0].ndim))
# Some sanity checks and format bins and range correctly
bins = _ensure_correctly_formatted_bins(bins, n_inputs)
range = _ensure_correctly_formatted_range(range, n_inputs)
# histogram_bin_edges triggers computation on dask arrays. It would be possible
# to write a version of this that doesn't trigger when `range` is provided, but
# for now let's just use np.histogram_bin_edges
if is_dask_array:
if not all(isinstance(b, np.ndarray) for b in bins):
raise TypeError(
"When using dask arrays, bins must be provided as numpy array(s) of edges"
)
else:
bins = [
np.histogram_bin_edges(
a, bins=b, range=r, weights=all_arrays[-1] if has_weights else None
)
for a, b, r in zip(all_arrays, bins, range)
]
bincount_kwargs = dict(
weights=has_weights,
axis=axis,
bins=bins,
density=density,
block_size=block_size,
)
# remove these axes from the inputs
if axis is not None:
drop_axes = tuple(axis)
else:
drop_axes = input_axes
if _any_dask_array(weights, *all_arrays):
# We should be able to just apply the bin_count function to every
# block and then sum over all blocks to get the total bin count.
# The main challenge is to figure out the chunk shape that will come
# out of _bincount. We might also need to add dummy dimensions to sum
# over in the _bincount function
import dask.array as dsa
# Important note from blockwise docs
# > Any index, like i missing from the output index is interpreted as a contraction...
# > In the case of a contraction the passed function should expect an iterable of blocks
# > on any array that holds that index.
# This means that we need to have all the input indexes present in the output index
# However, they will be reduced to singleton (len 1) dimensions
adjust_chunks = {i: (lambda x: 1) for i in drop_axes}
new_axes_start = max(input_axes) + 1
new_axes = {new_axes_start + i: len(bin) - 1 for i, bin in enumerate(bins)}
out_index = input_axes + tuple(new_axes)
blockwise_args = []
for arg in all_arrays:
blockwise_args.append(arg)
blockwise_args.append(input_axes)
bin_counts = dsa.blockwise(
_bincount,
out_index,
*blockwise_args,
new_axes=new_axes,
adjust_chunks=adjust_chunks,
meta=np.array((), dtype),
**bincount_kwargs,
)
# sum over the block dims
bin_counts = bin_counts.sum(drop_axes)
else:
# drop the extra axis used for summing over blocks
bin_counts = _bincount(*all_arrays, **bincount_kwargs).squeeze(drop_axes)
if density:
# Normalize by dividing by bin counts and areas such that all the
# histogram data integrated over all dimensions = 1
bin_widths = [np.diff(b) for b in bins]
if n_inputs == 1:
bin_areas = bin_widths[0]
elif n_inputs == 2:
bin_areas = np.outer(*bin_widths)
else:
# Slower, but N-dimensional logic
bin_areas = np.prod(np.ix_(*bin_widths))
# Sum over the last n_inputs axes, which correspond to the bins. All other axes
# are "bystander" axes. Sums must be done independently for each bystander axes
# so that nans are dealt with correctly (#51)
bin_axes = tuple(_range(-n_inputs, 0))
bin_count_sums = bin_counts.sum(axis=bin_axes)
bin_count_sums_shape = bin_count_sums.shape + len(bin_axes) * (1,)
h = bin_counts / bin_areas / reshape(bin_count_sums, bin_count_sums_shape)
else:
h = bin_counts
return h, bins
|
the-stack_106_20354
|
from lxml import objectify, etree
import pytest
from controlled_vocabularies.vocabulary_handler import VocabularyHandler
from . import factories
pytestmark = pytest.mark.django_db
# Namespaces
RDF = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
DC = 'http://purl.org/dc/elements/1.1/'
RDFS = 'http://www.w3.org/2000/01/rdf-schema#'
NS = {
'rdf': RDF,
'dc': DC,
'rdfs': RDFS
}
PURL = 'http://purl.org/NET/UNTL/vocabularies/'
def test_xml_response():
vocab = factories.VocabularyFactory()
vocab_handler = VocabularyHandler().xml_response(vocab)
assert vocab_handler.vocab == vocab
assert isinstance(vocab_handler, VocabularyHandler)
@pytest.fixture
def vocab_file_xml():
prop = factories.PropertyFactory(property_name='description')
vocab_handler = VocabularyHandler().xml_response(prop.term_key.vocab_list)
return prop, etree.fromstring(vocab_handler.vocab_file)
def test_create_xml_RDF_element(vocab_file_xml):
_, root = vocab_file_xml
assert root.tag == '{{{}}}RDF'.format(RDF)
def test_create_xml_Description_element(vocab_file_xml):
prop, root = vocab_file_xml
element = root.xpath('rdf:Description', namespaces=NS)[0]
attrib = element.get('{{{}}}about'.format(RDF))
assert attrib == PURL + prop.term_key.vocab_list.name
def test_create_xml_title_element(vocab_file_xml):
prop, root = vocab_file_xml
element = root.xpath('rdf:Description/dc:title', namespaces=NS)[0]
assert element.text == prop.term_key.vocab_list.label
def test_create_xml_publisher_element(vocab_file_xml):
_, root = vocab_file_xml
element = root.xpath('rdf:Description/dc:publisher', namespaces=NS)[0]
assert element.text == 'University of North Texas Libraries'
def test_create_xml_Description_subelement_description(vocab_file_xml):
prop, root = vocab_file_xml
element = root.xpath('rdf:Description/dc:description', namespaces=NS)[0]
assert element.text == prop.term_key.vocab_list.definition
def test_create_xml_language_element(vocab_file_xml):
_, root = vocab_file_xml
element = root.xpath('rdf:Description/dc:language', namespaces=NS)[0]
assert element.text == 'English'
def test_create_xml_date_element(vocab_file_xml):
prop, root = vocab_file_xml
element = root.xpath('rdf:Description/dc:date', namespaces=NS)[0]
assert element.text == prop.term_key.vocab_list.created.strftime('%Y')
def test_create_xml_Property_element(vocab_file_xml):
prop, root = vocab_file_xml
element = root.xpath('rdf:Property', namespaces=NS)[0]
attrib = element.get('{{{}}}about'.format(RDF))
assert attrib == '{}#{}'.format(PURL + prop.term_key.vocab_list.name, prop.term_key.name)
def test_create_xml_label_element(vocab_file_xml):
prop, root = vocab_file_xml
element = root.xpath('rdf:Property/rdfs:label', namespaces=NS)[0]
assert element.text == prop.term_key.label
def test_create_xml_Property_subelement_description(vocab_file_xml):
prop, root = vocab_file_xml
element = root.xpath('rdf:Property/dc:description', namespaces=NS)[0]
assert element.text == prop.label
def test_create_xml_isDefinedBy_element(vocab_file_xml):
prop, root = vocab_file_xml
element = root.xpath('rdf:Property/rdfs:isDefinedBy', namespaces=NS)[0]
attrib = element.get('{{{}}}resource'.format(RDF))
assert attrib == PURL + prop.term_key.vocab_list.name
def test_py_response():
vocab = factories.VocabularyFactory()
vocab_handler = VocabularyHandler().py_response(vocab)
assert vocab_handler.vocab == vocab
assert isinstance(vocab_handler, VocabularyHandler)
def test_create_py():
vocab_handler = VocabularyHandler().py_response(
factories.VocabularyFactory())
vocab_handler.create_py()
assert vocab_handler.vocab_mimetype == 'text/plain'
def test_json_response():
vocab = factories.VocabularyFactory()
vocab_handler = VocabularyHandler().json_response(vocab)
assert vocab_handler.vocab == vocab
assert isinstance(vocab_handler, VocabularyHandler)
def test_create_json():
vocab_handler = VocabularyHandler().json_response(
factories.VocabularyFactory())
vocab_handler.create_json()
assert vocab_handler.vocab_mimetype == 'application/json'
def test_tkl_response():
vocab = factories.VocabularyFactory()
vocab_handler = VocabularyHandler().tkl_response(vocab)
assert vocab_handler.vocab == vocab
assert isinstance(vocab_handler, VocabularyHandler)
def test_create_tkl():
"""Check that the xml doc has all the expected elements, values, and attributes."""
prop = factories.PropertyFactory(property_name='linkback')
term = prop.term_key
vocab = term.vocab_list
vocab_handler = VocabularyHandler().tkl_response(vocab)
root = objectify.fromstring(vocab_handler.vocab_file)
assert root.tag == 'authority'
assert root.get('creator') == vocab.maintainer
assert root.get('created') == str(vocab.created).replace(' ', ', ')
assert root.get('modifier') == vocab.maintainer
assert root.get('modified') == str(vocab.modified).replace(' ', ', ')
assert root.enum.get('value') == term.name
assert root.enum.get('order') == '1'
assert root.enum.string.get('{http://www.w3.org/XML/1998/namespace}lang') == 'en'
assert root.enum.string == term.label
assert root.enum.linkback == prop.label
assert vocab_handler.vocab_mimetype == 'text/xml'
assert b'<?xml version="1.0" encoding="UTF-8"?>' in vocab_handler.vocab_file
def test_create_tkl_order_by_name():
vocab = factories.VocabularyFactory(order='name')
factories.TermFactory.create_batch(4, vocab_list=vocab)
vocab_handler = VocabularyHandler().tkl_response(vocab)
root = objectify.fromstring(vocab_handler.vocab_file)
sorted_terms = vocab.term_set.order_by('name')
for actual, expected in zip(root.enum, sorted_terms):
assert actual.get('value') == expected.name
def test_create_tkl_order_by_label():
vocab = factories.VocabularyFactory(order='label')
factories.TermFactory.create_batch(4, vocab_list=vocab)
vocab_handler = VocabularyHandler().tkl_response(vocab)
root = objectify.fromstring(vocab_handler.vocab_file)
sorted_terms = vocab.term_set.order_by('label')
for actual, expected in zip(root.enum, sorted_terms):
assert actual.get('value') == expected.name
def test_create_tkl_order_by_order():
vocab = factories.VocabularyFactory(order='order')
factories.OrderedTermFactory.create_batch(4, vocab_list=vocab)
vocab_handler = VocabularyHandler().tkl_response(vocab)
root = objectify.fromstring(vocab_handler.vocab_file)
sorted_terms = vocab.term_set.order_by('order', 'name')
for actual, expected in zip(root.enum, sorted_terms):
assert actual.get('value') == expected.name
def test_create_vocab_dict():
# Create a vocab, term, and property that should be in the vocab_dict.
prop = factories.PropertyFactory()
vocab = prop.term_key.vocab_list
vocab_handler = VocabularyHandler().py_response(vocab)
vocab_dict = vocab_handler.create_vocab_dict('py')
assert vocab_dict['name'] == vocab.name
assert vocab_dict['label'] == vocab.label
assert vocab_dict['order'] == vocab.order
assert vocab_dict['maintainerEmail'] == vocab.maintainerEmail
assert vocab_dict['definition'] == vocab.definition
assert vocab_dict['created'] == vocab.created
assert vocab_dict['modified'] == vocab.modified
assert 'terms' in vocab_dict.keys()
def test_create_vocab_dict_term_sub_dict():
"""Test that the embedded dictionary contains the right information."""
prop = factories.PropertyFactory()
term = prop.term_key
vocab = term.vocab_list
vocab_handler = VocabularyHandler().py_response(vocab)
vocab_dict = vocab_handler.create_vocab_dict('py')
term_dict = vocab_dict['terms'][0]
assert term_dict['name'] == term.name
assert term_dict['label'] == term.label
assert term_dict['order'] == term.order
assert term_dict['url'] == 'http://purl.org/NET/UNTL/vocabularies/{}/#{}'.format(
vocab.name, term.name)
assert 'properties' in term_dict
def test_create_vocab_dict_properties_sub_dict():
"""Test that the embedded dictionary contains the right information."""
prop = factories.PropertyFactory()
vocab_handler = VocabularyHandler().py_response(prop.term_key.vocab_list)
vocab_dict = vocab_handler.create_vocab_dict('py')
prop_dict = vocab_dict['terms'][0]['properties'][0]
assert prop_dict['property_name'] == prop.property_name
assert prop_dict['label'] == prop.label
def test_create_vocab_dict_format_py():
prop = factories.PropertyFactory()
vocab = prop.term_key.vocab_list
vocab_handler = VocabularyHandler().py_response(vocab)
vocab_dict = vocab_handler.create_vocab_dict('py')
assert vocab_dict['created'] == vocab.created
assert vocab_dict['modified'] == vocab.modified
def test_create_vocab_dict_format_json():
prop = factories.PropertyFactory()
vocab = prop.term_key.vocab_list
vocab_handler = VocabularyHandler().py_response(vocab)
vocab_dict = vocab_handler.create_vocab_dict('json')
assert vocab_dict['created'] == str(vocab.created)
assert vocab_dict['modified'] == str(vocab.modified)
|
the-stack_106_20355
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from op_test import OpTest
# Correct: General.
class TestSqueezeOp(OpTest):
def setUp(self):
self.op_type = "squeeze"
self.init_test_case()
self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
self.init_attrs()
self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape), }
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
def init_test_case(self):
self.ori_shape = (1, 3, 1, 40)
self.axes = (0, 2)
self.new_shape = (3, 40)
def init_attrs(self):
self.attrs = {"axes": self.axes}
# Correct: There is mins axis.
class TestSqueezeOp1(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (1, 3, 1, 40)
self.axes = (0, -2)
self.new_shape = (3, 40)
# Correct: No axes input.
class TestSqueezeOp2(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (1, 3, 1, 5)
self.axes = ()
self.new_shape = (3, 5)
# Correct: Just part of axes be squeezed.
class TestSqueezeOp3(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (3, 1, 5, 1, 4, 1)
self.axes = (1, -1)
self.new_shape = (3, 5, 1, 4)
class TestSqueezeOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of softmax_op must be Variable.
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.squeeze, x1)
# The input axes of squeeze must be list.
x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
self.assertRaises(TypeError, fluid.layers.squeeze, x2, axes=0)
# The input dtype of squeeze not support float16.
x3 = fluid.layers.data(name='x3', shape=[4], dtype="float16")
self.assertRaises(TypeError, fluid.layers.squeeze, x3, axes=0)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_20356
|
import json
import uuid
from loguru import logger
import googleapiclient.discovery
from google.oauth2 import service_account
from cloudproxy.providers.config import set_auth
from cloudproxy.providers.settings import config
gcp = config["providers"]["gcp"]
if gcp["enabled"] == 'True':
try:
credentials = service_account.Credentials.from_service_account_info(
json.loads(gcp["secrets"]["service_account_key"])
)
compute = googleapiclient.discovery.build('compute', 'v1', credentials=credentials)
except TypeError:
logger.error("GCP -> Invalid service account key")
def create_proxy():
image_response = compute.images().getFromFamily(
project=gcp["image_project"],
family=gcp["image_family"]
).execute()
source_disk_image = image_response['selfLink']
body = {
'name': 'cloudproxy-' + str(uuid.uuid4()),
'machineType':
f"zones/{gcp['zone']}/machineTypes/{gcp['size']}",
'tags': {
'items': [
'cloudproxy'
]
},
"labels": {
'cloudproxy': 'cloudproxy'
},
'disks': [
{
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': source_disk_image,
}
}
],
'networkInterfaces': [{
'network': 'global/networks/default',
'accessConfigs': [
{
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT',
'networkTier': 'STANDARD'
}
]
}],
'metadata': {
'items': [{
'key': 'startup-script',
'value': set_auth(config["auth"]["username"], config["auth"]["password"])
}]
}
}
return compute.instances().insert(
project=gcp["project"],
zone=gcp["zone"],
body=body
).execute()
def delete_proxy(name):
try:
return compute.instances().delete(
project=gcp["project"],
zone=gcp["zone"],
instance=name
).execute()
except(googleapiclient.errors.HttpError):
logger.info(f"GCP --> HTTP Error when trying to delete proxy {name}. Probably has already been deleted.")
return None
def stop_proxy(name):
try:
return compute.instances().stop(
project=gcp["project"],
zone=gcp["zone"],
instance=name
).execute()
except(googleapiclient.errors.HttpError):
logger.info(f"GCP --> HTTP Error when trying to stop proxy {name}. Probably has already been deleted.")
return None
def start_proxy(name):
try:
return compute.instances().start(
project=gcp["project"],
zone=gcp["zone"],
instance=name
).execute()
except(googleapiclient.errors.HttpError):
logger.info(f"GCP --> HTTP Error when trying to start proxy {name}. Probably has already been deleted.")
return None
def list_instances():
result = compute.instances().list(
project=gcp["project"],
zone=gcp["zone"],
filter='labels.cloudproxy eq cloudproxy'
).execute()
return result['items'] if 'items' in result else []
|
the-stack_106_20357
|
import matplotlib.pyplot as plt
import numpy as np
import warnings
warnings.filterwarnings('ignore')
SIZE = (14,14)
class CrossRoadGridWorld():
def __init__(self, size=(14,14)):
super(CrossRoadGridWorld, self).__init__()
self.state = np.zeros(size)
self.size = size
self.init_state = self.get_init_state()
if size[0] <= 4 or size[1] <= 4:
raise ValueError("Size error, the grid size must be larger than 4*4")
self.title = "Cross Road Grid World"
self.length = size[0]
self.width = size[1]
def reset(self):
self.state = self.get_init_state()
def get_init_state(self):
#plot the horizon block
init_state = np.zeros((self.size))
for i in range(self.size[0]):
for j in [self.size[1]//2-1, self.size[1]//2]:
init_state[i,j] = 200
#plot the vertical block
for i in [self.size[0]//2-1, self.size[0]//2]:
for j in range(self.size[1]):
init_state[i,j] = 200
return init_state
|
the-stack_106_20358
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.aiplatform_v1.types import pipeline_job
from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job
from google.cloud.aiplatform_v1.types import pipeline_service
from google.cloud.aiplatform_v1.types import training_pipeline
from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class PipelineServiceTransport(abc.ABC):
"""Abstract transport class for PipelineService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_training_pipeline: gapic_v1.method.wrap_method(
self.create_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.get_training_pipeline: gapic_v1.method.wrap_method(
self.get_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.list_training_pipelines: gapic_v1.method.wrap_method(
self.list_training_pipelines,
default_timeout=5.0,
client_info=client_info,
),
self.delete_training_pipeline: gapic_v1.method.wrap_method(
self.delete_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.cancel_training_pipeline: gapic_v1.method.wrap_method(
self.cancel_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.create_pipeline_job: gapic_v1.method.wrap_method(
self.create_pipeline_job, default_timeout=None, client_info=client_info,
),
self.get_pipeline_job: gapic_v1.method.wrap_method(
self.get_pipeline_job, default_timeout=None, client_info=client_info,
),
self.list_pipeline_jobs: gapic_v1.method.wrap_method(
self.list_pipeline_jobs, default_timeout=None, client_info=client_info,
),
self.delete_pipeline_job: gapic_v1.method.wrap_method(
self.delete_pipeline_job, default_timeout=None, client_info=client_info,
),
self.cancel_pipeline_job: gapic_v1.method.wrap_method(
self.cancel_pipeline_job, default_timeout=None, client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_training_pipeline(
self,
) -> Callable[
[pipeline_service.CreateTrainingPipelineRequest],
Union[
gca_training_pipeline.TrainingPipeline,
Awaitable[gca_training_pipeline.TrainingPipeline],
],
]:
raise NotImplementedError()
@property
def get_training_pipeline(
self,
) -> Callable[
[pipeline_service.GetTrainingPipelineRequest],
Union[
training_pipeline.TrainingPipeline,
Awaitable[training_pipeline.TrainingPipeline],
],
]:
raise NotImplementedError()
@property
def list_training_pipelines(
self,
) -> Callable[
[pipeline_service.ListTrainingPipelinesRequest],
Union[
pipeline_service.ListTrainingPipelinesResponse,
Awaitable[pipeline_service.ListTrainingPipelinesResponse],
],
]:
raise NotImplementedError()
@property
def delete_training_pipeline(
self,
) -> Callable[
[pipeline_service.DeleteTrainingPipelineRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_training_pipeline(
self,
) -> Callable[
[pipeline_service.CancelTrainingPipelineRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def create_pipeline_job(
self,
) -> Callable[
[pipeline_service.CreatePipelineJobRequest],
Union[gca_pipeline_job.PipelineJob, Awaitable[gca_pipeline_job.PipelineJob]],
]:
raise NotImplementedError()
@property
def get_pipeline_job(
self,
) -> Callable[
[pipeline_service.GetPipelineJobRequest],
Union[pipeline_job.PipelineJob, Awaitable[pipeline_job.PipelineJob]],
]:
raise NotImplementedError()
@property
def list_pipeline_jobs(
self,
) -> Callable[
[pipeline_service.ListPipelineJobsRequest],
Union[
pipeline_service.ListPipelineJobsResponse,
Awaitable[pipeline_service.ListPipelineJobsResponse],
],
]:
raise NotImplementedError()
@property
def delete_pipeline_job(
self,
) -> Callable[
[pipeline_service.DeletePipelineJobRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_pipeline_job(
self,
) -> Callable[
[pipeline_service.CancelPipelineJobRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
__all__ = ("PipelineServiceTransport",)
|
the-stack_106_20359
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""End-to-end test for bandits against a mushroom environment.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl import app
from absl import flags
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.bandits.agents import lin_ucb_agent
from tf_agents.bandits.agents import linear_thompson_sampling_agent as lin_ts_agent
from tf_agents.bandits.agents.examples.v2 import trainer
from tf_agents.bandits.environments import classification_environment as ce
from tf_agents.bandits.environments import dataset_utilities
from tf_agents.bandits.environments import environment_utilities as env_util
from tf_agents.bandits.metrics import tf_metrics as tf_bandit_metrics
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_enum(
'agent', 'LinUCB', ['LinUCB', 'LinTS'],
'Which agent to use. Possible values are `LinUCB` and `LinTS`.')
flags.DEFINE_string(
'mushroom_csv', '',
'Location of the csv file containing the mushroom dataset.')
FLAGS = flags.FLAGS
tfd = tfp.distributions
BATCH_SIZE = 8
TRAINING_LOOPS = 200
STEPS_PER_LOOP = 2
AGENT_ALPHA = 10.0
def main(unused_argv):
tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled.
with tf.device('/CPU:0'): # due to b/128333994
mushroom_reward_distribution = (
dataset_utilities.mushroom_reward_distribution(
r_noeat=0.0, r_eat_safe=5.0, r_eat_poison_bad=-35.0,
r_eat_poison_good=5.0, prob_poison_bad=0.5))
mushroom_dataset = (
dataset_utilities.convert_mushroom_csv_to_tf_dataset(
FLAGS.mushroom_csv))
environment = ce.ClassificationBanditEnvironment(
mushroom_dataset, mushroom_reward_distribution, BATCH_SIZE)
optimal_reward_fn = functools.partial(
env_util.compute_optimal_reward_with_classification_environment,
environment=environment)
optimal_action_fn = functools.partial(
env_util.compute_optimal_action_with_classification_environment,
environment=environment)
if FLAGS.agent == 'LinUCB':
agent = lin_ucb_agent.LinearUCBAgent(
time_step_spec=environment.time_step_spec(),
action_spec=environment.action_spec(),
alpha=AGENT_ALPHA,
gamma=0.95,
emit_log_probability=False,
dtype=tf.float32)
elif FLAGS.agent == 'LinTS':
agent = lin_ts_agent.LinearThompsonSamplingAgent(
time_step_spec=environment.time_step_spec(),
action_spec=environment.action_spec(),
alpha=AGENT_ALPHA,
gamma=0.95,
dtype=tf.float32)
regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn)
suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric(
optimal_action_fn)
trainer.train(
root_dir=FLAGS.root_dir,
agent=agent,
environment=environment,
training_loops=TRAINING_LOOPS,
steps_per_loop=STEPS_PER_LOOP,
additional_metrics=[regret_metric, suboptimal_arms_metric])
if __name__ == '__main__':
app.run(main)
|
the-stack_106_20360
|
import sys
import pandas as pd
import numpy as np
import numpy.linalg as la
from scipy import stats
from collections import Counter
def quantile_normalization(data):
'''
This function does quantile normalization to input data. After normalization, the samples (rows) in output
data follow the same distribution, which is the average distribution calculated based on all samples.
This function allows missing values, and assume missing values occur at random.
Parameters:
-----------
data: numpy array or pandas data frame of numeric values, with a shape of [n_samples, n_features].
Returns:
--------
norm_data: numpy array or pandas data frame containing the data after quantile normalization.
'''
colnames = None
rownames = None
if isinstance(data, pd.DataFrame):
colnames = data.columns
rownames = data.index
data = data.values
elif not isinstance(data, np.ndarray):
print('Input data must be a numpy array or pandas data frame')
sys.exit(1)
norm_data = data.copy()
nan_mask = np.isnan(norm_data)
if np.sum(nan_mask) > 0:
n_samples, n_features = norm_data.shape
for i in range(n_samples):
idi_nan = np.where(np.isnan(norm_data[i, :]))[0]
if len(idi_nan) > 0:
idi = np.setdiff1d(range(n_features), idi_nan)
norm_data[i, idi_nan] = np.random.choice(norm_data[i, idi], size=len(idi_nan), replace=True)
quantiles = np.mean(np.sort(norm_data, axis=1), axis=0)
ranks = np.apply_along_axis(stats.rankdata, 1, norm_data)
rank_indices = ranks.astype(int) - 1
norm_data = quantiles[rank_indices]
if np.sum(nan_mask) > 0:
row_id, col_id = np.where(nan_mask)
norm_data[row_id, col_id] = np.nan
if colnames is not None and rownames is not None:
norm_data = pd.DataFrame(norm_data, columns=colnames, index=rownames)
return norm_data
def generate_cross_validation_partition(group_label, n_folds=5, n_repeats=1, portions=None, random_seed=None):
'''
This function generates partition indices of samples for cross-validation analysis.
Parameters:
-----------
group_label: 1-D array or list of group labels of samples. If there are no groups in samples, a list of
sample indices can be supplied for generating partitions based on individual samples rather than sample groups.
n_folds: positive integer larger than 1, indicating the number of folds for cross-validation. Default is 5.
n_repeats: positive integer, indicating how many times the n_folds cross-validation should be repeated.
So the total number of cross-validation trials is n_folds * n_repeats. Default is 1.
portions: 1-D array or list of positive integers, indicating the number of data folds in each set
(e.g. training set, testing set, or validation set) after partitioning. The summation of elements
in portions must be equal to n_folds. Default is [1, n_folds - 1].
random_seed: positive integer, the seed for random generator. Default is None.
Returns:
--------
partition: list of n_folds * n_repeats lists, each of which contains len(portions) sample index lists for
a cross-validation trial.
'''
group_counter = Counter(group_label)
unique_label = np.array(list(group_counter.keys()))
n_group = len(unique_label)
if n_group < n_folds:
print('The number of groups in labels can not be smaller than the number of folds.')
sys.exit(1)
sorted_label = np.array(sorted(unique_label, key=lambda x: group_counter[x], reverse=True))
if portions is None:
portions = [1, n_folds - 1]
else:
if np.sum(portions) != n_folds:
print('The summation of elements in portions must be equal to n_folds')
sys.exit(1)
if random_seed is not None:
np.random.seed(random_seed)
n_set = len(portions)
partition = []
for r in range(n_repeats):
if r == 0 and random_seed is None:
label = sorted_label.copy()
else:
idr = np.random.permutation(n_group)
label = sorted_label[idr]
folds = [[] for _ in range(n_folds)]
fold_size = np.zeros((n_folds, ))
for g in range(n_group):
f = np.argmin(fold_size)
folds[f].append(label[g])
fold_size[f] += group_counter[label[g]]
for f in range(n_folds):
folds[f] = list(np.where(np.isin(group_label, folds[f]))[0])
a = list(range(n_folds)) + list(range(n_folds))
for f in range(n_folds):
temp = []
end = f
for s in range(n_set):
start = end
end = start + portions[s]
t = []
for i in range(start, end):
t = t + folds[a[i]]
temp.append(sorted(t))
partition.append(temp)
return partition
|
the-stack_106_20362
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class KeystoneListAuthDomainsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'domains': 'list[Domains]',
'links': 'LinksSelf'
}
attribute_map = {
'domains': 'domains',
'links': 'links'
}
def __init__(self, domains=None, links=None):
"""KeystoneListAuthDomainsResponse - a model defined in huaweicloud sdk"""
super(KeystoneListAuthDomainsResponse, self).__init__()
self._domains = None
self._links = None
self.discriminator = None
if domains is not None:
self.domains = domains
if links is not None:
self.links = links
@property
def domains(self):
"""Gets the domains of this KeystoneListAuthDomainsResponse.
账号信息列表。
:return: The domains of this KeystoneListAuthDomainsResponse.
:rtype: list[Domains]
"""
return self._domains
@domains.setter
def domains(self, domains):
"""Sets the domains of this KeystoneListAuthDomainsResponse.
账号信息列表。
:param domains: The domains of this KeystoneListAuthDomainsResponse.
:type: list[Domains]
"""
self._domains = domains
@property
def links(self):
"""Gets the links of this KeystoneListAuthDomainsResponse.
:return: The links of this KeystoneListAuthDomainsResponse.
:rtype: LinksSelf
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this KeystoneListAuthDomainsResponse.
:param links: The links of this KeystoneListAuthDomainsResponse.
:type: LinksSelf
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KeystoneListAuthDomainsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_20363
|
from __future__ import unicode_literals
import django
from future.builtins import int, zip
from functools import reduce
from operator import ior, iand
from string import punctuation
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Manager, Q, CharField, TextField
from django.db.models.manager import ManagerDescriptor
from django.db.models.query import QuerySet
from django.contrib.sites.managers import CurrentSiteManager as DjangoCSM
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.utils.sites import current_site_id
from mezzanine.utils.urls import home_slug
if django.VERSION >= (1, 10):
class ManagerDescriptor(ManagerDescriptor):
"""
This class exists purely to skip the abstract model check
in the __get__ method of Django's ManagerDescriptor.
"""
def __get__(self, instance, cls=None):
if instance is not None:
raise AttributeError(
"Manager isn't accessible via %s instances" % cls.__name__
)
# In ManagerDescriptor.__get__, an exception is raised here
# if cls is abstract
if cls._meta.swapped:
raise AttributeError(
"Manager isn't available; "
"'%s.%s' has been swapped for '%s'" % (
cls._meta.app_label,
cls._meta.object_name,
cls._meta.swapped,
)
)
return cls._meta.managers_map[self.manager.name]
class PublishedManager(Manager):
"""
Provides filter for restricting items returned by status and
publish date when the given user is not a staff member.
"""
def published(self, for_user=None):
"""
For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified.
"""
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all()
return self.filter(
Q(publish_date__lte=now()) | Q(publish_date__isnull=True),
Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True),
Q(status=CONTENT_STATUS_PUBLISHED))
def get_by_natural_key(self, slug):
return self.get(slug=slug)
def search_fields_to_dict(fields):
"""
In ``SearchableQuerySet`` and ``SearchableManager``, search fields
can either be a sequence, or a dict of fields mapped to weights.
This function converts sequences to a dict mapped to even weights,
so that we're consistently dealing with a dict of fields mapped to
weights, eg: ("title", "content") -> {"title": 1, "content": 1}
"""
if not fields:
return {}
try:
int(list(dict(fields).values())[0])
except (TypeError, ValueError):
fields = dict(zip(fields, [1] * len(fields)))
return fields
class SearchableQuerySet(QuerySet):
"""
QuerySet providing main search functionality for
``SearchableManager``.
"""
def __init__(self, *args, **kwargs):
self._search_ordered = False
self._search_terms = set()
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableQuerySet, self).__init__(*args, **kwargs)
def search(self, query, search_fields=None):
"""
Build a queryset matching words in the given search query,
treating quoted terms as exact phrases and taking into
account + and - symbols as modifiers controlling which terms
to require and exclude.
"""
# ### DETERMINE FIELDS TO SEARCH ###
# Use search_fields arg if given, otherwise use search_fields
# initially configured by the manager class.
if search_fields:
self._search_fields = search_fields_to_dict(search_fields)
if not self._search_fields:
return self.none()
# ### BUILD LIST OF TERMS TO SEARCH FOR ###
# Remove extra spaces, put modifiers inside quoted terms.
terms = " ".join(query.split()).replace("+ ", "+") \
.replace('+"', '"+') \
.replace("- ", "-") \
.replace('-"', '"-') \
.split('"')
# Strip punctuation other than modifiers from terms and create
# terms list, first from quoted terms and then remaining words.
terms = [("" if t[0:1] not in "+-" else t[0:1]) + t.strip(punctuation)
for t in terms[1::2] + "".join(terms[::2]).split()]
# Remove stop words from terms that aren't quoted or use
# modifiers, since words with these are an explicit part of
# the search query. If doing so ends up with an empty term
# list, then keep the stop words.
terms_no_stopwords = [t for t in terms if t.lower() not in
settings.STOP_WORDS]
get_positive_terms = lambda terms: [t.lower().strip(punctuation)
for t in terms if t[0:1] != "-"]
positive_terms = get_positive_terms(terms_no_stopwords)
if positive_terms:
terms = terms_no_stopwords
else:
positive_terms = get_positive_terms(terms)
# Append positive terms (those without the negative modifier)
# to the internal list for sorting when results are iterated.
if not positive_terms:
return self.none()
else:
self._search_terms.update(positive_terms)
# ### BUILD QUERYSET FILTER ###
# Create the queryset combining each set of terms.
excluded = [reduce(iand, [~Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "-"]
required = [reduce(ior, [Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "+"]
optional = [reduce(ior, [Q(**{"%s__icontains" % f: t}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] not in "+-"]
queryset = self
if excluded:
queryset = queryset.filter(reduce(iand, excluded))
if required:
queryset = queryset.filter(reduce(iand, required))
# Optional terms aren't relevant to the filter if there are
# terms that are explicitly required.
elif optional:
queryset = queryset.filter(reduce(ior, optional))
return queryset.distinct()
def _clone(self, *args, **kwargs):
"""
Ensure attributes are copied to subsequent queries.
"""
for attr in ("_search_terms", "_search_fields", "_search_ordered"):
kwargs[attr] = getattr(self, attr)
return super(SearchableQuerySet, self)._clone(*args, **kwargs)
def order_by(self, *field_names):
"""
Mark the filter as being ordered if search has occurred.
"""
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super(SearchableQuerySet, self).order_by(*field_names)
def annotate_scores(self):
"""
If search has occurred and no ordering has occurred, decorate
each result with the number of search terms so that it can be
sorted by the number of occurrence of terms.
In the case of search fields that span model relationships, we
cannot accurately match occurrences without some very
complicated traversal code, which we won't attempt. So in this
case, namely when there are no matches for a result (count=0),
and search fields contain relationships (double underscores),
we assume one match for one of the fields, and use the average
weight of all search fields with relationships.
"""
results = super(SearchableQuerySet, self).iterator()
if self._search_terms and not self._search_ordered:
results = list(results)
for i, result in enumerate(results):
count = 0
related_weights = []
for (field, weight) in self._search_fields.items():
if "__" in field:
related_weights.append(weight)
for term in self._search_terms:
field_value = getattr(result, field, None)
if field_value:
count += field_value.lower().count(term) * weight
if not count and related_weights:
count = int(sum(related_weights) / len(related_weights))
if result.publish_date:
age = (now() - result.publish_date).total_seconds()
count = count / age**settings.SEARCH_AGE_SCALE_FACTOR
results[i].result_count = count
return iter(results)
return results
class SearchableManager(Manager):
"""
Manager providing a chainable queryset.
Adapted from http://www.djangosnippets.org/snippets/562/
search method supports spanning across models that subclass the
model being used to search.
"""
def __init__(self, *args, **kwargs):
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableManager, self).__init__(*args, **kwargs)
def get_search_fields(self):
"""
Returns the search field names mapped to weights as a dict.
Used in ``get_queryset`` below to tell ``SearchableQuerySet``
which search fields to use. Also used by ``DisplayableAdmin``
to populate Django admin's ``search_fields`` attribute.
Search fields can be populated via
``SearchableManager.__init__``, which then get stored in
``SearchableManager._search_fields``, which serves as an
approach for defining an explicit set of fields to be used.
Alternatively and more commonly, ``search_fields`` can be
defined on models themselves. In this case, we look at the
model and all its base classes, and build up the search
fields from all of those, so the search fields are implicitly
built up from the inheritence chain.
Finally if no search fields have been defined at all, we
fall back to any fields that are ``CharField`` or ``TextField``
instances.
"""
search_fields = self._search_fields.copy()
if not search_fields:
for cls in reversed(self.model.__mro__):
super_fields = getattr(cls, "search_fields", {})
search_fields.update(search_fields_to_dict(super_fields))
if not search_fields:
search_fields = []
for f in self.model._meta.fields:
if isinstance(f, (CharField, TextField)):
search_fields.append(f.name)
search_fields = search_fields_to_dict(search_fields)
return search_fields
def get_queryset(self):
search_fields = self.get_search_fields()
return SearchableQuerySet(self.model, search_fields=search_fields)
def contribute_to_class(self, model, name):
"""
Newer versions of Django explicitly prevent managers being
accessed from abstract classes, which is behaviour the search
API has always relied on. Here we reinstate it.
"""
super(SearchableManager, self).contribute_to_class(model, name)
setattr(model, name, ManagerDescriptor(self))
def search(self, *args, **kwargs):
"""
Proxy to queryset's search method for the manager's model and
any models that subclass from this manager's model if the
model is abstract.
"""
if not settings.SEARCH_MODEL_CHOICES:
# No choices defined - build a list of leaf models (those
# without subclasses) that inherit from Displayable.
models = [m for m in apps.get_models()
if issubclass(m, self.model)]
parents = reduce(ior, [set(m._meta.get_parent_list())
for m in models])
models = [m for m in models if m not in parents]
elif getattr(self.model._meta, "abstract", False):
# When we're combining model subclasses for an abstract
# model (eg Displayable), we only want to use models that
# are represented by the ``SEARCH_MODEL_CHOICES`` setting.
# Now this setting won't contain an exact list of models
# we should use, since it can define superclass models such
# as ``Page``, so we check the parent class list of each
# model when determining whether a model falls within the
# ``SEARCH_MODEL_CHOICES`` setting.
search_choices = set()
models = set()
parents = set()
errors = []
for name in settings.SEARCH_MODEL_CHOICES:
try:
model = apps.get_model(*name.split(".", 1))
except LookupError:
errors.append(name)
else:
search_choices.add(model)
if errors:
raise ImproperlyConfigured("Could not load the model(s) "
"%s defined in the 'SEARCH_MODEL_CHOICES' setting."
% ", ".join(errors))
for model in apps.get_models():
# Model is actually a subclasses of what we're
# searching (eg Displayabale)
is_subclass = issubclass(model, self.model)
# Model satisfies the search choices list - either
# there are no search choices, model is directly in
# search choices, or its parent is.
this_parents = set(model._meta.get_parent_list())
in_choices = not search_choices or model in search_choices
in_choices = in_choices or this_parents & search_choices
if is_subclass and (in_choices or not search_choices):
# Add to models we'll seach. Also maintain a parent
# set, used below for further refinement of models
# list to search.
models.add(model)
parents.update(this_parents)
# Strip out any models that are superclasses of models,
# specifically the Page model which will generally be the
# superclass for all custom content types, since if we
# query the Page model as well, we will get duplicate
# results.
models -= parents
else:
models = [self.model]
all_results = []
user = kwargs.pop("for_user", None)
for model in models:
try:
queryset = model.objects.published(for_user=user)
except AttributeError:
queryset = model.objects.get_queryset()
all_results.extend(
queryset.search(*args, **kwargs).annotate_scores())
return sorted(all_results, key=lambda r: r.result_count, reverse=True)
class CurrentSiteManager(DjangoCSM):
"""
Extends Django's site manager to first look up site by ID stored in
the request, the session, then domain for the current request
(accessible via threadlocals in ``mezzanine.core.request``), the
environment variable ``MEZZANINE_SITE_ID`` (which can be used by
management commands with the ``--site`` arg, finally falling back
to ``settings.SITE_ID`` if none of those match a site.
"""
use_in_migrations = False
def __init__(self, field_name=None, *args, **kwargs):
super(DjangoCSM, self).__init__(*args, **kwargs)
self.__field_name = field_name
self.__is_validated = False
def get_queryset(self):
if not self.__is_validated:
self._get_field_name()
lookup = {self.__field_name + "__id__exact": current_site_id()}
return super(DjangoCSM, self).get_queryset().filter(**lookup)
class DisplayableManager(CurrentSiteManager, PublishedManager,
SearchableManager):
"""
Manually combines ``CurrentSiteManager``, ``PublishedManager``
and ``SearchableManager`` for the ``Displayable`` model.
"""
def url_map(self, for_user=None, **kwargs):
"""
Returns a dictionary of urls mapped to Displayable subclass
instances, including a fake homepage instance if none exists.
Used in ``mezzanine.core.sitemaps``.
"""
class Home:
title = _("Home")
home = Home()
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in apps.get_models():
if issubclass(model, self.model):
for item in (model.objects.published(for_user=for_user)
.filter(**kwargs)
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items
|
the-stack_106_20364
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A TensorSpec class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import type_spec
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util.tf_export import tf_export
class DenseSpec(type_spec.TypeSpec):
"""Describes a dense object with shape, dtype, and name."""
__slots__ = ["_shape", "_shape_tuple", "_dtype", "_name"]
_component_specs = property(lambda self: self)
def __init__(self, shape, dtype=dtypes.float32, name=None):
"""Creates a TensorSpec.
Args:
shape: Value convertible to `tf.TensorShape`. The shape of the tensor.
dtype: Value convertible to `tf.DType`. The type of the tensor values.
name: Optional name for the Tensor.
Raises:
TypeError: If shape is not convertible to a `tf.TensorShape`, or dtype is
not convertible to a `tf.DType`.
"""
self._shape = tensor_shape.TensorShape(shape)
try:
self._shape_tuple = tuple(self.shape.as_list())
except ValueError:
self._shape_tuple = None
self._dtype = dtypes.as_dtype(dtype)
self._name = name
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of the tensor."""
return self._shape
@property
def dtype(self):
"""Returns the `dtype` of elements in the tensor."""
return self._dtype
@property
def name(self):
"""Returns the (optionally provided) name of the described tensor."""
return self._name
def is_compatible_with(self, spec_or_value):
return (isinstance(spec_or_value, (DenseSpec, self.value_type)) and
self._dtype.is_compatible_with(spec_or_value.dtype) and
self._shape.is_compatible_with(spec_or_value.shape))
def __repr__(self):
return "{}(shape={}, dtype={}, name={})".format(
type(self).__name__, self.shape, repr(self.dtype), repr(self.name))
def __hash__(self):
return hash((self._shape_tuple, self.dtype))
def __eq__(self, other):
# pylint: disable=protected-access
return (type(self) is type(other) and
self._shape_tuple == other._shape_tuple
and self._dtype == other._dtype
and self._name == other._name)
def __ne__(self, other):
return not self == other
def most_specific_compatible_type(self, other):
if (type(self) is not type(other)) or (self._dtype != other.dtype):
raise ValueError("Types are not compatible: %r vs %r" % (self, other))
shape = self._shape.most_specific_compatible_shape(other.shape)
name = self._name if self._name == other.name else None
return type(self)(shape, self._dtype, name)
def _serialize(self):
return (self._shape, self._dtype, self._name)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._shape
def _to_legacy_output_classes(self):
return self.value_type
@tf_export("TensorSpec")
@type_spec.register("tf.TensorSpec")
class TensorSpec(DenseSpec, type_spec.BatchableTypeSpec):
"""Describes a tf.Tensor.
Metadata for describing the `tf.Tensor` objects accepted or returned
by some TensorFlow APIs.
"""
__slots__ = []
def is_compatible_with(self, spec_or_tensor): # pylint:disable=useless-super-delegation
"""Returns True if spec_or_tensor is compatible with this TensorSpec.
Two tensors are considered compatible if they have the same dtype
and their shapes are compatible (see `tf.TensorShape.is_compatible_with`).
Args:
spec_or_tensor: A tf.TensorSpec or a tf.Tensor
Returns:
True if spec_or_tensor is compatible with self.
"""
return super(TensorSpec, self).is_compatible_with(spec_or_tensor)
@classmethod
def from_spec(cls, spec, name=None):
"""Returns a `TensorSpec` with the same shape and dtype as `spec`.
>>> spec = tf.TensorSpec(shape=[8, 3], dtype=tf.int32, name="OriginalName")
>>> tf.TensorSpec.from_spec(spec, "NewName")
TensorSpec(shape=(8, 3), dtype=tf.int32, name='NewName')
Args:
spec: The `TypeSpec` used to create the new `TensorSpec`.
name: The name for the new `TensorSpec`. Defaults to `spec.name`.
"""
return cls(spec.shape, spec.dtype, name or spec.name)
@classmethod
def from_tensor(cls, tensor, name=None):
"""Returns a `TensorSpec` that describes `tensor`.
>>> tf.TensorSpec.from_tensor(tf.constant([1, 2, 3]))
TensorSpec(shape=(3,), dtype=tf.int32, name=None)
Args:
tensor: The `tf.Tensor` that should be described.
name: A name for the `TensorSpec`. Defaults to `tensor.op.name`.
Returns:
A `TensorSpec` that describes `tensor`.
"""
if isinstance(tensor, ops.EagerTensor):
return TensorSpec(tensor.shape, tensor.dtype, name)
elif isinstance(tensor, ops.Tensor):
return TensorSpec(tensor.shape, tensor.dtype, name or tensor.op.name)
else:
raise ValueError("`tensor` should be a tf.Tensor")
@property
def value_type(self):
"""The Python type for values that are compatible with this TypeSpec."""
return ops.Tensor
def _to_components(self, value):
try:
value = ops.convert_to_tensor(value, self._dtype)
except (TypeError, ValueError):
raise ValueError("Value %r is not convertible to a tensor with dtype %s "
"and shape %s." % (value, self._dtype, self._shape))
if not value.shape.is_compatible_with(self._shape):
raise ValueError("Value %r is not convertible to a tensor with dtype %s "
"and shape %s." % (value, self._dtype, self._shape))
return value
def _from_components(self, components):
return components
def _from_compatible_tensor_list(self, tensor_list):
# TODO(b/112266545): It would be cleaner to create a new `ensure_shape()`
# op here and return that, instead of mutating the input's shape using
# `Tensor.set_shape()`. However, that would add extra ops, which could
# impact performance. When this bug is resolved, we should be able to add
# the `ensure_shape()` ops and optimize them away using contextual shape
# information.
assert len(tensor_list) == 1
tensor_list[0].set_shape(self._shape)
return tensor_list[0]
def _to_batchable_tensor_list(self, value, batched=False):
if batched and self._shape.merge_with(value.shape).ndims == 0:
raise ValueError("Unbatching a tensor is only supported for rank >= 1")
return self._to_components(value)
def _batch(self, batch_size):
return TensorSpec(
tensor_shape.TensorShape([batch_size]).concatenate(self._shape),
self._dtype)
def _unbatch(self):
if self._shape.ndims == 0:
raise ValueError("Unbatching a tensor is only supported for rank >= 1")
return TensorSpec(self._shape[1:], self._dtype)
# TODO(b/133606651): Should is_compatible_with should check min/max bounds?
@type_spec.register("tf.BoundedTensorSpec")
class BoundedTensorSpec(TensorSpec):
"""A `TensorSpec` that specifies minimum and maximum values.
Example usage:
```python
spec = tensor_spec.BoundedTensorSpec((1, 2, 3), tf.float32, 0, (5, 5, 5))
tf_minimum = tf.convert_to_tensor(spec.minimum, dtype=spec.dtype)
tf_maximum = tf.convert_to_tensor(spec.maximum, dtype=spec.dtype)
```
Bounds are meant to be inclusive. This is especially important for
integer types. The following spec will be satisfied by tensors
with values in the set {0, 1, 2}:
```python
spec = tensor_spec.BoundedTensorSpec((3, 5), tf.int32, 0, 2)
```
"""
__slots__ = ("_minimum", "_maximum")
def __init__(self, shape, dtype, minimum, maximum, name=None):
"""Initializes a new `BoundedTensorSpec`.
Args:
shape: Value convertible to `tf.TensorShape`. The shape of the tensor.
dtype: Value convertible to `tf.DType`. The type of the tensor values.
minimum: Number or sequence specifying the minimum element bounds
(inclusive). Must be broadcastable to `shape`.
maximum: Number or sequence specifying the maximum element bounds
(inclusive). Must be broadcastable to `shape`.
name: Optional string containing a semantic name for the corresponding
array. Defaults to `None`.
Raises:
ValueError: If `minimum` or `maximum` are not provided or not
broadcastable to `shape`.
TypeError: If the shape is not an iterable or if the `dtype` is an invalid
numpy dtype.
"""
super(BoundedTensorSpec, self).__init__(shape, dtype, name)
if minimum is None or maximum is None:
raise ValueError("minimum and maximum must be provided; but saw "
"'%s' and '%s'" % (minimum, maximum))
try:
minimum_shape = np.shape(minimum)
common_shapes.broadcast_shape(
tensor_shape.TensorShape(minimum_shape), self.shape)
except ValueError as exception:
raise ValueError("minimum is not compatible with shape. "
"Message: {!r}.".format(exception))
try:
maximum_shape = np.shape(maximum)
common_shapes.broadcast_shape(
tensor_shape.TensorShape(maximum_shape), self.shape)
except ValueError as exception:
raise ValueError("maximum is not compatible with shape. "
"Message: {!r}.".format(exception))
self._minimum = np.array(minimum, dtype=self.dtype.as_numpy_dtype)
self._minimum.setflags(write=False)
self._maximum = np.array(maximum, dtype=self.dtype.as_numpy_dtype)
self._maximum.setflags(write=False)
@classmethod
def from_spec(cls, spec):
"""Returns a `TensorSpec` with the same shape and dtype as `spec`.
If `spec` is a `BoundedTensorSpec`, then the new spec's bounds are set to
`spec.minimum` and `spec.maximum`; otherwise, the bounds are set to
`spec.dtype.min` and `spec.dtype.max`.
>>> spec = tf.TensorSpec(shape=[8, 3], dtype=tf.int32, name="x")
>>> BoundedTensorSpec.from_spec(spec)
BoundedTensorSpec(shape=(8, 3), dtype=tf.int32, name='x',
minimum=array(-2147483648, dtype=int32),
maximum=array(2147483647, dtype=int32))
Args:
spec: The `TypeSpec` used to create the new `BoundedTensorSpec`.
"""
dtype = dtypes.as_dtype(spec.dtype)
minimum = getattr(spec, "minimum", dtype.min)
maximum = getattr(spec, "maximum", dtype.max)
return BoundedTensorSpec(spec.shape, dtype, minimum, maximum, spec.name)
@property
def minimum(self):
"""Returns a NumPy array specifying the minimum bounds (inclusive)."""
return self._minimum
@property
def maximum(self):
"""Returns a NumPy array specifying the maximum bounds (inclusive)."""
return self._maximum
def __repr__(self):
s = "BoundedTensorSpec(shape={}, dtype={}, name={}, minimum={}, maximum={})"
return s.format(self.shape, repr(self.dtype), repr(self.name),
repr(self.minimum), repr(self.maximum))
def __eq__(self, other):
tensor_spec_eq = super(BoundedTensorSpec, self).__eq__(other)
return (tensor_spec_eq and np.allclose(self.minimum, other.minimum) and
np.allclose(self.maximum, other.maximum))
def __hash__(self):
return hash((self._shape_tuple, self.dtype))
def __reduce__(self):
return BoundedTensorSpec, (self._shape, self._dtype, self._minimum,
self._maximum, self._name)
def _serialize(self):
return (self._shape, self._dtype, self._minimum, self._maximum, self._name)
_pywrap_utils.RegisterType("TensorSpec", TensorSpec)
# Note: we do not include Tensor names when constructing TypeSpecs.
type_spec.register_type_spec_from_value_converter(
ops.Tensor,
lambda tensor: TensorSpec(tensor.shape, tensor.dtype))
type_spec.register_type_spec_from_value_converter(
np.ndarray,
lambda array: TensorSpec(array.shape, array.dtype))
|
the-stack_106_20366
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from subprocess import PIPE
# Import salt libs
import salt.modules.openscap as openscap
# Import 3rd-party libs
from salt.ext import six
from tests.support.mock import MagicMock, Mock, patch
# Import salt test libs
from tests.support.unit import TestCase
class OpenscapTestCase(TestCase):
random_temp_dir = "/tmp/unique-name"
policy_file = "/usr/share/openscap/policy-file-xccdf.xml"
def setUp(self):
import salt.modules.openscap
salt.modules.openscap.__salt__ = MagicMock()
patchers = [
patch("salt.modules.openscap.__salt__", MagicMock()),
patch("salt.modules.openscap.shutil.rmtree", Mock()),
patch(
"salt.modules.openscap.tempfile.mkdtemp",
Mock(return_value=self.random_temp_dir),
),
]
for patcher in patchers:
self.apply_patch(patcher)
def apply_patch(self, patcher):
patcher.start()
self.addCleanup(patcher.stop)
def test_openscap_xccdf_eval_success(self):
with patch(
"salt.modules.openscap.Popen",
MagicMock(
return_value=Mock(
**{"returncode": 0, "communicate.return_value": ("", "")}
)
),
):
response = openscap.xccdf(
"eval --profile Default {0}".format(self.policy_file)
)
self.assertEqual(openscap.tempfile.mkdtemp.call_count, 1)
expected_cmd = [
"oscap",
"xccdf",
"eval",
"--oval-results",
"--results",
"results.xml",
"--report",
"report.html",
"--profile",
"Default",
self.policy_file,
]
openscap.Popen.assert_called_once_with(
expected_cmd,
cwd=openscap.tempfile.mkdtemp.return_value,
stderr=PIPE,
stdout=PIPE,
)
openscap.__salt__["cp.push_dir"].assert_called_once_with(
self.random_temp_dir
)
self.assertEqual(openscap.shutil.rmtree.call_count, 1)
self.assertEqual(
response,
{
"upload_dir": self.random_temp_dir,
"error": "",
"success": True,
"returncode": 0,
},
)
def test_openscap_xccdf_eval_success_with_failing_rules(self):
with patch(
"salt.modules.openscap.Popen",
MagicMock(
return_value=Mock(
**{"returncode": 2, "communicate.return_value": ("", "some error")}
)
),
):
response = openscap.xccdf(
"eval --profile Default {0}".format(self.policy_file)
)
self.assertEqual(openscap.tempfile.mkdtemp.call_count, 1)
expected_cmd = [
"oscap",
"xccdf",
"eval",
"--oval-results",
"--results",
"results.xml",
"--report",
"report.html",
"--profile",
"Default",
self.policy_file,
]
openscap.Popen.assert_called_once_with(
expected_cmd,
cwd=openscap.tempfile.mkdtemp.return_value,
stderr=PIPE,
stdout=PIPE,
)
openscap.__salt__["cp.push_dir"].assert_called_once_with(
self.random_temp_dir
)
self.assertEqual(openscap.shutil.rmtree.call_count, 1)
self.assertEqual(
response,
{
"upload_dir": self.random_temp_dir,
"error": "some error",
"success": True,
"returncode": 2,
},
)
def test_openscap_xccdf_eval_fail_no_profile(self):
response = openscap.xccdf("eval --param Default /unknown/param")
if six.PY2:
error = "argument --profile is required"
else:
error = "the following arguments are required: --profile"
self.assertEqual(
response,
{"error": error, "upload_dir": None, "success": False, "returncode": None},
)
def test_openscap_xccdf_eval_success_ignore_unknown_params(self):
with patch(
"salt.modules.openscap.Popen",
MagicMock(
return_value=Mock(
**{"returncode": 2, "communicate.return_value": ("", "some error")}
)
),
):
response = openscap.xccdf(
"eval --profile Default --param Default /policy/file"
)
self.assertEqual(
response,
{
"upload_dir": self.random_temp_dir,
"error": "some error",
"success": True,
"returncode": 2,
},
)
expected_cmd = [
"oscap",
"xccdf",
"eval",
"--oval-results",
"--results",
"results.xml",
"--report",
"report.html",
"--profile",
"Default",
"/policy/file",
]
openscap.Popen.assert_called_once_with(
expected_cmd,
cwd=openscap.tempfile.mkdtemp.return_value,
stderr=PIPE,
stdout=PIPE,
)
def test_openscap_xccdf_eval_evaluation_error(self):
with patch(
"salt.modules.openscap.Popen",
MagicMock(
return_value=Mock(
**{
"returncode": 1,
"communicate.return_value": ("", "evaluation error"),
}
)
),
):
response = openscap.xccdf(
"eval --profile Default {0}".format(self.policy_file)
)
self.assertEqual(
response,
{
"upload_dir": None,
"error": "evaluation error",
"success": False,
"returncode": 1,
},
)
def test_openscap_xccdf_eval_fail_not_implemented_action(self):
response = openscap.xccdf("info {0}".format(self.policy_file))
if six.PY2:
mock_err = "argument action: invalid choice: 'info' (choose from u'eval')"
else:
mock_err = "argument action: invalid choice: 'info' (choose from 'eval')"
self.assertEqual(
response,
{
"upload_dir": None,
"error": mock_err,
"success": False,
"returncode": None,
},
)
|
the-stack_106_20371
|
"""
string_util.py
A sample repository for MolSSI Workshop.
Misc. string processing functions
"""
def title_case(sentence):
"""
Convert a string to title case.
Parameters
----------
sentence: string
String to be converted to title case
Returns
-------
ret : string
String converted to title case
Example
-------
>>> title_case('ThIS iS a StrInG to BE ConVerTed.')
'This Is A String To Be Converted.'
"""
#Check that input is a string
if not isinstance(sentence, str):
raise TypeError('Invalid input %s - Input must be type string' % (sentence))
#Error if empty string
if sentence == ' ':
raise Warning('Input is an empty string!')
#Handle empty string
if len(sentence)==0:
raise ValueError('Input is an empty string!')
ret = sentence[0].upper()
for i in range(1, len(sentence)):
if sentence[i - 1] == ' ':
ret += sentence[i].upper()
else:
ret += sentence[i].lower()
return ret
y = 'ThIS iS a StrInG to BE ConVerTed.'
print(title_case(y))
|
the-stack_106_20372
|
""""""
"""
Copyright (c) 2021 Olivier Sprangers as part of Airlab Amsterdam
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
WaveNet
Paper: https://arxiv.org/pdf/1609.03499.pdf
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# This implementation of causal conv is faster than using normal conv1d module
class CustomConv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding, dilation):
super(CustomConv1d, self).__init__()
k = np.sqrt(1 / (in_channels * kernel_size))
weight_data = -k + 2 * k * torch.rand((out_channels, in_channels, kernel_size))
bias_data = -k + 2 * k * torch.rand((out_channels))
self.weight = nn.Parameter(weight_data, requires_grad=True)
self.bias = nn.Parameter(bias_data, requires_grad=True)
self.dilation = dilation
self.padding = padding
def forward(self, x):
xp = F.pad(x, (self.padding, 0))
return F.conv1d(xp, self.weight, self.bias, dilation=self.dilation)
class wavenet_cell(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding, dilation):
super(wavenet_cell, self).__init__()
self.conv_dil = CustomConv1d(in_channels, out_channels * 2, kernel_size, padding, dilation)
self.conv_skipres = nn.Conv1d(out_channels, out_channels * 2, 1)
def forward(self, x):
h_prev, skip_prev = x
f, g = self.conv_dil(h_prev).chunk(2, 1)
h_next, skip_next = self.conv_skipres(torch.tanh(f) * torch.sigmoid(g)).chunk(2, 1)
return (h_prev + h_next, skip_prev + skip_next)
class wavenet(nn.Module):
def __init__(self, d_lag, d_cov, d_emb, d_output, d_hidden, Nl, kernel_size):
super(wavenet, self).__init__()
# Embedding layer for time series ID
self.emb = nn.ModuleList([nn.Embedding(d_emb[i, 0], d_emb[i, 1]) for i in range(len(d_emb))])
d_emb_tot = d_emb[:, 1].sum()
self.upscale = nn.Linear(d_lag + d_cov + d_emb_tot, d_hidden)
# Wavenet
wnet_layers = nn.ModuleList([wavenet_cell(
d_hidden, d_hidden,
kernel_size, padding=(kernel_size-1) * 2**i,
dilation = 2**i) for i in range(Nl)])
self.wnet = nn.Sequential(*wnet_layers)
# Output layer
self.loc_scale = nn.Linear(d_hidden, d_output * 2)
self.epsilon = 1e-6
def forward(self, x_lag, x_cov, x_idx, d_outputseqlen):
# Embedding layers
x_emb = []
for i, layer in enumerate(self.emb):
out = layer(x_idx[:, :, i])
x_emb.append(out)
x_emb = torch.cat(x_emb, -1)
# Concatenate inputs
dim_seq = x_lag.shape[0]
h = torch.cat((x_lag, x_cov[:dim_seq], x_emb[:dim_seq]), dim=-1)
h = self.upscale(h)
# Apply wavenet
_, h = self.wnet((h.permute(1, 2, 0), 0))
# Output layers - location & scale of the distribution
output = h[:, :, -d_outputseqlen:].permute(2, 0, 1)
loc, scale = F.softplus(self.loc_scale(output)).chunk(2, -1)
return loc, scale + self.epsilon
|
the-stack_106_20373
|
# -*- coding: utf-8 -*-
#from pathos.multiprocessing import ProcessingPool as Pool
#from pathos.helpers import mp
import multiprocessing as mp
import sys
class mproc():
'''
This is a class that spawns N new processes to do the designated job
@initializer params :
- N : number of processes to spawn
'''
def __init__(self, N, jobtype='workers'):
#self.pool = mp.Pool(processes=N, initializer=initializer, initargs=jobtype)
self.pool = mp.Pool(processes=N)
self.no_of_prcs = N
self.STATUS = 'RUNNING'
#ierr = mp.Value(ctypes.c_int, 0)
def apply(self, runner, *funcargs):
# 입력 패러미터 확인을 위한 출력
# print ('<pure>', funcargs, '<pure>')
'''
Applies the designated job (runner) to the spawned processes
@params:
- runner : function to be run
- args : arguments to be passed to runner function
'''
try:
''' 멀티프로세싱 실행 라인
Pool 방식으로 멀티프로세싱 실행
주요 프로그래밍 키워드 in 라이브러리 : multiprocessing pool apply_async get
# apply_async => built_in code
'''
self.async_results = [ self.pool.apply_async(runner, (funcargs[0], idx, funcargs[1], funcargs[2])) \
for idx in range(self.no_of_prcs) ]
#self.pool.amap
self.STATUS = 'DONE'
except Exception as e:
print(e)
print('Exception occured during apply_async()', __file__)
def _close(self):
self.pool.close()
self.pool.join()
def get(self, timeout=None):
'''
returns result when it's ready
returns the list of results
success : list of result
fail : empty list
'''
self._close()
rets = [] #list of return values
if self.STATUS == 'DONE':
try:
for result in self.async_results:
if timeout: #raise exception if the result is not ready
result.successful()
rets.append(result.get(timeout))
#need modification
except mp.TimeoutError as e:
print(e)
raise e
except AssertionError as e:
print(e)
raise e
except Exception as e:
print(e)
pass
#raise e
return rets
|
the-stack_106_20374
|
from math import floor, ceil
import numpy as np
from pyspark import SparkContext
from shapely.geometry import Point
from geopyspark.geotrellis.constants import LayerType
from geopyspark.geotrellis import SpaceTimeKey, Extent, Tile, get_spark_context
from geopyspark.geotrellis.layer import TiledRasterLayer
class Gddp(object):
x_offset = (-360.0 + 1/8.0)
y_offset = (-90.0 + 1/8.0)
@classmethod
def rdd_of_rasters(cls, uri, extent, days, num_partitions=None):
if not isinstance(uri, str):
raise Exception
sc = get_spark_context()
int_days = list(map(lambda day: int(day), days))
float_extent = list(map(lambda coord: float(coord), extent))
jvm = sc._gateway.jvm
rdd = jvm.geopyspark.netcdf.datasets.Gddp.rasters(uri, float_extent, int_days, num_partitions, sc._jsc.sc())
return TiledRasterLayer(LayerType.SPACETIME, rdd)
@classmethod
def raster(cls, uri, extent, day):
if not isinstance(uri, str):
raise Exception
sc = get_spark_context()
int_day = int(day)
float_extent = list(map(lambda coord: float(coord), extent))
jvm = sc._gateway.jvm
tup = jvm.geopyspark.netcdf.datasets.Gddp.raster(uri, float_extent, int_day)
cols = tup._1()
rows = tup._2()
jvm_array = tup._3()
array = np.flipud(np.array(list(jvm_array)).reshape((rows, cols)))
return array
@classmethod
def samples(cls, uri, point, days):
if not isinstance(uri, str):
raise Exception
sc = get_spark_context()
int_days = list(map(lambda day: int(day), days))
float_point = list(map(lambda coord: float(coord), point))
jvm = sc._gateway.jvm
rdd = jvm.geopyspark.netcdf.datasets.Gddp.samples(uri, float_point, int_days, sc._jsc.sc())
return rdd
|
the-stack_106_20375
|
from paida.paida_core.PAbsorber import *
def dscal(n, da, dx, incx):
"""scales a vector by a constant.
uses unrolled loops for increment equal to one.
jack dongarra, linpack, 3/11/78.
modified 3/93 to return if incx .le. 0.
modified 12/3/93, array(1) declarations changed to array(*)
Python replacement by K. KISHIMOTO ([email protected])
"""
if (n <= 0) or (incx <= 0):
return
if incx == 1:
### code for increment equal to 1
m = n % 5
if m != 0:
for i in range(1, m + 1):
dx[i - 1] *= da
if n < 5:
return
mp1 = m + 1
for i in range(mp1, n + 1, 5):
dx[i - 1] *= da
dx[i] *= da
dx[i + 1] *= da
dx[i + 2] *= da
dx[i + 3] *= da
return
else:
### code for increment not equal to 1
nincx = n * incx
for i in range(1, nincx + 1, incx):
dx[i - 1] *= da
return
|
the-stack_106_20376
|
# Copyright 2020 Graphcore Ltd.
from pathlib import Path
import pytest
# NOTE: The import below is dependent on 'pytest.ini' in the root of
# the repository
from examples_tests.test_util import SubProcessChecker
working_path = Path(__file__).parent.parent
class TestTensorFlowGroupedConvBenchmarks(SubProcessChecker):
"""High-level integration tests for TensorFlow grouped convolution synthetic benchmarks"""
@pytest.mark.category1
def test_help(self):
self.run_command("python3 grouped_conv.py --help",
working_path,
"usage: grouped_conv.py")
@pytest.mark.category1
@pytest.mark.ipus(1)
def test_default(self):
self.run_command("python3 grouped_conv.py",
working_path,
[r"(\w+.\w+) items/sec"])
@pytest.mark.category1
@pytest.mark.ipus(1)
def test_inference(self):
self.run_command("python3 grouped_conv.py --batch-size 8 --use-data",
working_path,
[r"(\w+.\w+) items/sec"])
@pytest.mark.category1
@pytest.mark.ipus(1)
def test_block_repeats_and_group_dims(self):
self.run_command("python3 grouped_conv.py --block-repeats 20 --group-dim 8",
working_path,
[r"(\w+.\w+) items/sec"])
@pytest.mark.category1
@pytest.mark.ipus(1)
def test_training(self):
self.run_command("python3 grouped_conv.py --train --input-size 112 --stride 3 --filter-in 32 --filter-out 16",
working_path,
[r"(\w+.\w+) items/sec", "Input size 112"])
@pytest.mark.category1
@pytest.mark.ipus(2)
def test_replicas(self):
self.run_command("python3 grouped_conv.py --replicas 2",
working_path,
[r"(\w+.\w+) items/sec"])
|
the-stack_106_20378
|
#!/usr/bin/env python
# Quick flame color test based on the Unicorn pHat example code:
# https://github.com/pimoroni/unicorn-hat/blob/master/examples/random_blinky.py
import colorsys
import time
from sys import exit
import numpy
import unicornhat as unicorn
def flame(hue, duration):
""" Flicker flame effect for specified hue (0-1) and duration (in seconds)"""
print("flame: ", hue)
sleepTime = 0.05
for i in range(0, int(duration/sleepTime)):
rand_mat = numpy.random.rand(width,height)
for y in range(height):
for x in range(width):
h = hue + (0.1 * rand_mat[x, y])
s = 0.8
v = rand_mat[x, y]
rgb = colorsys.hsv_to_rgb(h, s, v)
r = int(rgb[0]*255.0)
g = int(rgb[1]*255.0)
b = int(rgb[2]*255.0)
unicorn.set_pixel(x, y, r, g, b)
unicorn.show()
time.sleep(sleepTime)
print("Flame color test")
unicorn.set_layout(unicorn.AUTO)
unicorn.rotation(0)
unicorn.brightness(0.5)
width,height=unicorn.get_shape()
flame(0.0, 2) # Red
flame(0.1, 2) # Yellow
flame(0.2, 2) # Green
flame(0.4, 2) # Light Blue
flame(0.8, 2) # Purple
|
the-stack_106_20379
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(
regex=r'^loadData/$',
view=views.LoadData.as_view(),
name='load_data'
),
url(
regex=r'^hClustering/$',
view=views.HClustering.as_view(),
name='h_clustering'
),
url(
regex=r'^hClusteringForAllLVs/$',
view=views.HClusteringForAllLVs.as_view(),
name='h_clustering_for_all_lvs'
),
url(
regex=r'^optimizeEdgesForCats/$',
view=views.OptimizeEdgesForCats.as_view(),
name='optimize_edges_for_cats'
),
url(
regex=r'^optimizeEdgesForCls/$',
view=views.OptimizeEdgesForCls.as_view(),
name='optimize_edges_for_cls'
),
]
|
the-stack_106_20380
|
import os
import io
import discord
import matplotlib.pyplot as plt
from PIL import Image
from gamestonk_terminal.helper_funcs import plot_autoscale
from gamestonk_terminal.stocks.technical_analysis import finviz_model
from gamestonk_terminal.config_plot import PLOT_DPI
import discordbot.config_discordbot as cfg
from discordbot.run_discordbot import gst_imgur, logger
async def view_command(ctx, ticker=""):
"""Displays image from Finviz [Finviz]"""
try:
# Debug
if cfg.DEBUG:
logger.debug("!stocks.ta.view %s", ticker)
# Check for argument
if ticker == "":
raise Exception("Stock ticker is required")
image_data = finviz_model.get_finviz_image(ticker)
dataBytesIO = io.BytesIO(image_data)
im = Image.open(dataBytesIO)
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax.set_axis_off()
fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.imshow(im)
plt.savefig("ta_view.png")
uploaded_image = gst_imgur.upload_image("ta_view.png", title="something")
image_link = uploaded_image.link
if cfg.DEBUG:
logger.debug("Image URL: %s", image_link)
title = "Stocks: [Finviz] Trendlines & Data " + ticker
embed = discord.Embed(title=title, colour=cfg.COLOR)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
embed.set_image(url=image_link)
os.remove("ta_view.png")
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(
title="ERROR Stocks: [Finviz] Trendlines & Data",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed)
|
the-stack_106_20381
|
import os
import numpy as np
from srd import add_params_as_attr
module_dir = os.path.dirname(os.path.dirname(__file__))
def create_stub():
lines = ['cerb', 'cesb', 'iprew']
return dict(zip(lines, np.zeros(len(lines))))
class policy:
"""
Mesures liées à la COVID-19.
Permet de choisir quelles mesures sont appliquées dans le simulateur. Par défaut, les 5 premières mesures ci-dessous sont appliquées (PCU, PCUE, PIRTE, majorations au crédit de TPS/TVH et à l'ACE).
Parameters
----------
icerb: boolean
la PCU est appliquée
icesb: boolean
la PCUE est appliquée
iiprew: boolean
le PIRTE est appliqué au Québec
icovid_gst: boolean
La majoration du crédit pour la TPS/TVH est appliquée
icovid_ccb: boolean
La majoration de l'Allocation canadienne pour enfants (ACE) est appliquée
iei: boolean
Assurance emploi d'urgence: scénario d'AE alternative à la PCU utilisé dans certaines analyses de la CREEi
"""
def __init__(self, icerb=True, icesb=True, iiprew=True, icovid_gst=True,
icovid_ccb=True, iei=False):
self.icerb = icerb
self.icesb = icesb
self.iiprew = iiprew
self.icovid_gst = icovid_gst
self.icovid_ccb = icovid_ccb
self.iei = iei
def shut_all_measures(self):
"""
Ne tient pas compte des mesures spéciales COVID-19 dans la simulation.
"""
for var in vars(self):
setattr(self, var, False)
@property
def some_measures(self):
"""
Indique qu'au moins une mesure spéciale COVID-19 est incluse.
Returns
-------
boolean
True s'il y a au moins une mesure d'incluse, False sinon.
"""
return any(v is True for k, v in vars(self).items() if k != 'iei')
class programs:
"""
Calcul des prestations d'urgence liées à la COVID-19: la Prestation canadienne d'urgence (PCU), la Prestation canadienne d'urgence pour les étudiants (PCUE) et le Programme incitatif pour la rétention des travailleurs essentiels (PIRTE).
Parameters
----------
policy: policy
instance de la classe policy
"""
def __init__(self, policy):
add_params_as_attr(self, module_dir + '/covid/covid.csv')
self.policy = policy
def compute(self, hh):
"""
Fonction qui fait le calcul et crée le rapport de cotisations.
Parameters
----------
hh: Hhold
instance de la classe Hhold
"""
for p in hh.sp:
p.covid = create_stub()
if self.policy.icerb:
p.inc_cerb = self.compute_cerb(p)
p.covid['cerb'] = p.inc_cerb
if self.policy.icesb:
p.inc_cesb = self.compute_cesb(p, hh)
p.covid['cesb'] = p.inc_cesb
if self.policy.iiprew and hh.prov == 'qc':
p.inc_iprew = self.compute_iprew(p)
p.covid['iprew'] = p.inc_iprew
def compute_cerb(self, p):
"""
Fonction pour le calcul de la PCU.
Calcule la PCU en fonction du nombre de blocs de 4 semaines (mois) pour lesquels la prestation est demandée.
Parameters
----------
p: Person
instance de la classe Person
Returns
-------
float
Montant de la PCU.
"""
if p.months_cerb == 0 or p.prev_inc_work < self.cerb_min_inc_work:
return 0
else:
l_cerb = [self.cerb_base for month
in range(self.begin_april, self.begin_april + p.months_cerb)
if p.inc_work_month[month] <= self.cerb_max_earn]
return sum(l_cerb)
def compute_cesb(self, p, hh):
"""
Fonction pour le calcul de la PCUE.
Calcule la PCUE en fonction de la prestation mensuelle à laquelle l'individu a droit et du nombre de blocs de 4 semaines (mois) pour lesquels la prestation est demandée.
Parameters
----------
p: Person
instance de la classe Person
hh: Hhold
instance de la classe Hhold
Returns
-------
float
Montant de la PCUE.
"""
if p.months_cesb == 0:
return 0
else:
monthly_cesb = self.compute_monthly_cesb(p, hh)
l_cesb = [monthly_cesb for month
in range(self.begin_april, self.begin_april + p.months_cesb)
if p.inc_work_month[month] <= self.cesb_max_earn]
return sum(l_cesb)
def compute_monthly_cesb(self, p, hh):
"""
Calcule le montant mensuel de la PCUE en fonction du statut (invalidité, dépendants).
Parameters
----------
Returns
-------
float
Prestation mensuelle de PCUE.
"""
dep = len(hh.dep) > 0
if p.disabled:
return self.cesb_base + self.cesb_supp
if not p.disabled and not dep:
return self.cesb_base
if dep:
if hh.couple:
spouse = hh.sp[1 - hh.sp.index(p)]
if spouse.disabled:
return self.cesb_base + self.cesb_supp
else:
return self.cesb_base + self.cesb_supp / 2
else:
return self.cesb_base + self.cesb_supp
def compute_iprew(self, p):
"""
Fonction pour le calcul du PIRTE.
Calcule la PIRTE pour la période de 16 semaines (4 mois) si le travailleur est admissible.
Parameters
----------
p: Person
instance de la classe Person
Returns
-------
float
Montant de PIRTE pour les 16 semaines.
"""
if (not p.essential_worker or p.inc_work < self.iprew_min_inc_work or
p.inc_tot > self.iprew_max_inc_tot):
return 0
else:
l_iprew = [self.iprew_monthly_amount for month
in range(self.begin_april, self.begin_april + self.iprew_max_months)
if 0 < p.inc_work_month[month] <= self.iprew_max_earn]
return sum(l_iprew)
|
the-stack_106_20383
|
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from mock import Mock, call
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
from unittest import TestCase
from octodns.record import Record
from octodns.provider.dnsmadeeasy import DnsMadeEasyClientNotFound, \
DnsMadeEasyProvider
from octodns.provider.yaml import YamlProvider
from octodns.zone import Zone
import json
class TestDnsMadeEasyProvider(TestCase):
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
source.populate(expected)
# Our test suite differs a bit, add our NS and remove the simple one
expected.add_record(Record.new(expected, 'under', {
'ttl': 3600,
'type': 'NS',
'values': [
'ns1.unit.tests.',
'ns2.unit.tests.',
]
}))
# Add some ALIAS records
expected.add_record(Record.new(expected, '', {
'ttl': 1800,
'type': 'ALIAS',
'value': 'aname.unit.tests.'
}))
expected.add_record(Record.new(expected, 'sub', {
'ttl': 1800,
'type': 'ALIAS',
'value': 'aname.unit.tests.'
}))
for record in list(expected.records):
if record.name == 'sub' and record._type == 'NS':
expected._remove_record(record)
break
def test_populate(self):
provider = DnsMadeEasyProvider('test', 'api', 'secret')
# Bad auth
with requests_mock() as mock:
mock.get(ANY, status_code=401,
text='{"error": ["API key not found"]}')
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals('Unauthorized', ctx.exception.message)
# Bad request
with requests_mock() as mock:
mock.get(ANY, status_code=400,
text='{"error": ["Rate limit exceeded"]}')
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals('\n - Rate limit exceeded',
ctx.exception.message)
# General error
with requests_mock() as mock:
mock.get(ANY, status_code=502, text='Things caught fire')
with self.assertRaises(HTTPError) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(502, ctx.exception.response.status_code)
# Non-existant zone doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=404,
text='<html><head></head><body></body></html>')
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set(), zone.records)
# No diffs == no changes
with requests_mock() as mock:
base = 'https://api.dnsmadeeasy.com/V2.0/dns/managed'
with open('tests/fixtures/dnsmadeeasy-domains.json') as fh:
mock.get('{}{}'.format(base, '/'), text=fh.read())
with open('tests/fixtures/dnsmadeeasy-records.json') as fh:
mock.get('{}{}'.format(base, '/123123/records'),
text=fh.read())
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(15, len(zone.records))
changes = self.expected.changes(zone, provider)
self.assertEquals(0, len(changes))
# 2nd populate makes no network calls/all from cache
again = Zone('unit.tests.', [])
provider.populate(again)
self.assertEquals(15, len(again.records))
# bust the cache
del provider._zone_records[zone.name]
def test_apply(self):
# Create provider with sandbox enabled
provider = DnsMadeEasyProvider('test', 'api', 'secret', True)
resp = Mock()
resp.json = Mock()
provider._client._request = Mock(return_value=resp)
with open('tests/fixtures/dnsmadeeasy-domains.json') as fh:
domains = json.load(fh)
# non-existant domain, create everything
resp.json.side_effect = [
DnsMadeEasyClientNotFound, # no zone in populate
DnsMadeEasyClientNotFound, # no domain during apply
domains
]
plan = provider.plan(self.expected)
# No root NS, no ignored, no excluded, no unsupported
n = len(self.expected.records) - 5
self.assertEquals(n, len(plan.changes))
self.assertEquals(n, provider.apply(plan))
provider._client._request.assert_has_calls([
# created the domain
call('POST', '/', data={'name': 'unit.tests'}),
# get all domains to build the cache
call('GET', '/'),
# created at least one of the record with expected data
call('POST', '/123123/records', data={
'name': '_srv._tcp',
'weight': 20,
'value': 'foo-1.unit.tests.',
'priority': 10,
'ttl': 600,
'type': 'SRV',
'port': 30
}),
])
self.assertEquals(27, provider._client._request.call_count)
provider._client._request.reset_mock()
# delete 1 and update 1
provider._client.records = Mock(return_value=[
{
'id': 11189897,
'name': 'www',
'value': '1.2.3.4',
'ttl': 300,
'type': 'A',
},
{
'id': 11189898,
'name': 'www',
'value': '2.2.3.4',
'ttl': 300,
'type': 'A',
},
{
'id': 11189899,
'name': 'ttl',
'value': '3.2.3.4',
'ttl': 600,
'type': 'A',
}
])
# Domain exists, we don't care about return
resp.json.side_effect = ['{}']
wanted = Zone('unit.tests.', [])
wanted.add_record(Record.new(wanted, 'ttl', {
'ttl': 300,
'type': 'A',
'value': '3.2.3.4'
}))
plan = provider.plan(wanted)
self.assertEquals(2, len(plan.changes))
self.assertEquals(2, provider.apply(plan))
# recreate for update, and deletes for the 2 parts of the other
provider._client._request.assert_has_calls([
call('POST', '/123123/records', data={
'value': '3.2.3.4',
'type': 'A',
'name': 'ttl',
'ttl': 300
}),
call('DELETE', '/123123/records/11189899'),
call('DELETE', '/123123/records/11189897'),
call('DELETE', '/123123/records/11189898')
], any_order=True)
|
the-stack_106_20386
|
from CyberSource import *
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
# To delete None values in Input Request Json body
def del_none(d):
for key, value in list(d.items()):
if value is None:
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
def create_customer_nondefault_payment_instrument_card():
customerTokenId = "AB695DA801DD1BB6E05341588E0A3BDC"
_default = False
cardExpirationMonth = "12"
cardExpirationYear = "2031"
cardType = "001"
card = Tmsv2customersEmbeddedDefaultPaymentInstrumentCard(
expiration_month = cardExpirationMonth,
expiration_year = cardExpirationYear,
type = cardType
)
billToFirstName = "John"
billToLastName = "Doe"
billToCompany = "CyberSource"
billToAddress1 = "1 Market St"
billToLocality = "San Francisco"
billToAdministrativeArea = "CA"
billToPostalCode = "94105"
billToCountry = "US"
billToEmail = "[email protected]"
billToPhoneNumber = "4158880000"
billTo = Tmsv2customersEmbeddedDefaultPaymentInstrumentBillTo(
first_name = billToFirstName,
last_name = billToLastName,
company = billToCompany,
address1 = billToAddress1,
locality = billToLocality,
administrative_area = billToAdministrativeArea,
postal_code = billToPostalCode,
country = billToCountry,
email = billToEmail,
phone_number = billToPhoneNumber
)
instrumentIdentifierId = "7010000000016241111"
instrumentIdentifier = Tmsv2customersEmbeddedDefaultPaymentInstrumentInstrumentIdentifier(
id = instrumentIdentifierId
)
requestObj = PostCustomerPaymentInstrumentRequest(
default = _default,
card = card.__dict__,
bill_to = billTo.__dict__,
instrument_identifier = instrumentIdentifier.__dict__
)
requestObj = del_none(requestObj.__dict__)
requestObj = json.dumps(requestObj)
try:
config_obj = configuration.Configuration()
client_config = config_obj.get_configuration()
api_instance = CustomerPaymentInstrumentApi(client_config)
return_data, status, body = api_instance.post_customer_payment_instrument(customerTokenId, requestObj)
print("\nAPI RESPONSE CODE : ", status)
print("\nAPI RESPONSE BODY : ", body)
return return_data
except Exception as e:
print("\nException when calling CustomerPaymentInstrumentApi->post_customer_payment_instrument: %s\n" % e)
if __name__ == "__main__":
create_customer_nondefault_payment_instrument_card()
|
the-stack_106_20390
|
from datetime import datetime
from pandas.core.frame import DataFrame
def parse_date(date: str):
return datetime.strptime(date, '%m/%d/%y').isoformat()
def parse_column_name(column: str):
return column.split("(")[0].strip().replace(" ", "_").lower()
def parse_boolean(string: str):
return True if string == "Yes" else False if string == "No" else None
def clean_kg_dataframe(dataframe: DataFrame):
dataframe = dataframe.rename(columns=lambda x: parse_column_name(x))
dataframe["date_egg"] = dataframe["date_egg"].apply(
lambda d: parse_date(d))
dataframe = dataframe.drop("comments", 1)
dataframe["clutch_completion"] = dataframe["clutch_completion"].apply(
lambda x: parse_boolean(x))
dataframe = dataframe[(dataframe["sex"] == "FEMALE")
| (dataframe["sex"] == "MALE")]
species_arr = dataframe["species"]
common_name = []
scientific_name = []
for s in species_arr:
name_arr = s.split("(")
common_name.append(name_arr[0].replace(")", "").strip().lower())
scientific_name.append(name_arr[1].replace(")", "").strip().lower())
dataframe['common_name'] = common_name
dataframe['scientific_name'] = scientific_name
dataframe.dropna(inplace=True)
return dataframe
def save_dataframe_to_file(dataframe: DataFrame, location):
dataframe.to_json(location, "records")
|
the-stack_106_20391
|
# Copyright 2019 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import List, Callable, Optional
from deeppavlov.skills.dsl_skill.context import UserContext
from deeppavlov.skills.dsl_skill.handlers.handler import Handler
class RegexHandler(Handler):
"""
This handler checks whether the message that is passed to it is matched by a regex.
Adds the following key to ```context.handler_payload```:
- 'regex_groups' - groups parsed from regular expression in command, by name
Attributes:
func: handler function
state: state in which handler can be activated
priority: priority of the function. If 2 or more handlers can be activated, function
with the highest priority is selected
context_condition: predicate that accepts user context and checks if the handler should be activated.
Example: `lambda context: context.user_id != 1` checks if user_id is not equal to 1.
That means a user with id 1 will be always ignored by the handler.
commands: handler is activated if regular expression from this list is matched with a user message
"""
def __init__(self,
func: Callable,
commands: Optional[List[str]] = None,
state: Optional[str] = None,
context_condition: Optional[Callable] = None,
priority: int = 0):
super().__init__(func, state, context_condition, priority)
self.commands = [re.compile(command) for command in commands]
def check(self, context: UserContext) -> bool:
"""
Checks:
- if the handler function should be triggered based on the given context via context condition.
- if at least one of the commands is matched to the `context.message`.
Args:
context: user context
Returns:
True, if handler should be activated, False otherwise
"""
is_previous_matches = super().check(context)
if not is_previous_matches:
return False
message = context.message
return any(re.search(regexp, ' '.join(message)) for regexp in self.commands)
def expand_context(self, context: UserContext) -> UserContext:
context.handler_payload = {'regex_groups': {}}
message = context.message
for regexp in self.commands:
match = re.search(regexp, ' '.join(message))
if match is not None:
for group_ind, span in enumerate(match.regs):
context.handler_payload['regex_groups'][group_ind] = message[span[0]: span[1]]
for group_name, group_ind in regexp.groupindex.items():
context.handler_payload['regex_groups'][group_name] = \
context.handler_payload['regex_groups'][group_ind]
return context
|
the-stack_106_20394
|
from MOON_RouteRiderVanClass import *
import networkx as nx
import datetime as dt
class Sim(object):
def __init__(self, startTime, vehicles, riders, graph, timeDict, waitCoeff, driveCoeff, timeWindow, maxUberWaitTime):
self.time = startTime # Start time is at 6:00am
self.vehiclesDict = vehicles
self.vehicles = [ ]
self.remainingRiders = riders
self.dailyRiders = riders
self.graph = graph
self.systemQueue = [ ]
self.timeDict = timeDict
self.waitCoeff = waitCoeff
self.driveCoeff = driveCoeff
self.timeWindow = timeWindow
self.uberRiders = uberQueue()
self.maxUberWaitTime = maxUberWaitTime
def step(self):
if (self.time == 9*3600-5):
self.time += 7*3600+5
else:
self.time += 5
# Convert time to a timeperiod (e.g., 15min time intervals)
def getTimePeriod(self, timeString):
splitTime = list(timeString.split(':'))
timePeriod = splitTime[0] + ':00:00'
return timePeriod
# Converts seconds to time string
def convertSecondsToTimeString(self, seconds):
hours = str(int(seconds//3600))
minutes = str(int((seconds%3600)//60))
if len(hours) < 2:
hours = '0' + hours
if len(minutes) < 2:
minutes = '0' + minutes
secs = '00'
return ':'.join([hours, minutes, secs])
# Activate vans based on their start times
def activateVans(self):
for veh, startTime in self.vehiclesDict.items():
if (startTime <= self.time) and ((startTime + 5) > self.time):
self.vehicles.append(van(veh, 3, self.time))
# Finds vehicle with minimum marginal cost
def findMinCost(self, timePeriod, rider):
marginalCosts = []
routes = []
for veh in self.vehicles:
marginalCost, newRoute, riderPickup = veh.findBestRoute(self.graph[timePeriod], rider, \
self.timeDict, self.waitCoeff, \
self.driveCoeff, uberRiders=None)
marginalCosts.append(marginalCost)
routes.append(newRoute)
bestIndex = marginalCosts.index(min(marginalCosts))
self.vehicles[bestIndex].route = routes[bestIndex]
self.vehicles[bestIndex].riderQueue.append(rider)
rider.van = self.vehicles[bestIndex].vehID
self.remainingRiders.remove(rider)
def assignRiderToVan(self):
for rider in self.remainingRiders:
# Checks if rider is within the pre-defined time window
#afternoonWindow = 0
#if (self.time >= 57600):
# afternoonWindow = 10*60
if (rider.origTime <= self.time + self.timeWindow):
timePeriod = self.getTimePeriod(self.convertSecondsToTimeString(self.time))
# The case when only one vehicle is working
if (len(self.vehicles) == 1):
marginalCost, newRoute, riderPickup = self.vehicles[0].findBestRoute(self.graph[timePeriod], rider, \
self.timeDict, self.waitCoeff, \
self.driveCoeff, uberRiders=None)
# If wait time for next rider is greater than defined max --> assign Uber
if (riderPickup - self.timeDict[rider.rideID]) > self.maxUberWaitTime:
self.uberRiders.uberQueue.append(rider)
self.remainingRiders.remove(rider)
rider.van = 'Uber'
else:
self.vehicles[0].riderQueue.append(rider)
rider.van = self.vehicles[0].vehID
self.remainingRiders.remove(rider)
self.vehicles[0].route = newRoute
# Case with multiple vans --> No Uber case is built in to this case yet
else:
self.findMinCost(timePeriod, rider)
# Makes the next trip for the van based on the vehicles pre-determined route
def nextTrip(self):
timePeriod = self.getTimePeriod(self.convertSecondsToTimeString(self.time))
for veh in self.vehicles:
if (veh.inTransit == False) and ((veh.departureTime == None) or (veh.departureTime <= self.time)):
travTime = veh.getLinkTravTime(self.graph[timePeriod])
veh.arrivalTime = self.time + travTime
veh.inTransit = True
# Moved the van between locations within the van's route
def moveVans(self):
for veh in self.vehicles:
if (veh.arrivalTime <= self.time) and (veh.inTransit == True):
veh.currentLocation = veh.route[0][1]
veh.inTransit = False
# The case when the vehicle has to dwell before pickup of next request in route
# Takes max of arrival time and the request time
if (len(veh.route) >= 2):
# No dwell if current location == next location
dwellTime = 30 if (veh.route[0][1] != veh.route[1][1]) else 0
if (len(veh.route[0]) == 3) and (veh.route[0][2] == 'pickup'):
# Case when next location is a pickup --> can't depart before request pickup time
veh.departureTime = max(veh.arrivalTime, self.timeDict[veh.route[0][0]]) + dwellTime
elif (len(veh.route[0]) == 3) and (veh.route[0][2] == 'dropoff'):
# Case when next location is a dropoff --> add dwellTime
veh.departureTime = veh.arrivalTime + dwellTime
else:
veh.departureTime = veh.arrivalTime
veh.route.pop(0)
veh.dropoffRiders(veh.arrivalTime)
veh.pickupRiders(veh.departureTime)
|
the-stack_106_20397
|
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import
from io import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pylinenotify',
version='1.2.0',
description='using LineNotify more easily',
url='https://github.com/reud/PyLineNotify',
author='reud',
author_email='[email protected]',
license='MIT',
install_requires=['requests'],
keywords='LINENotify',
classifiers=[
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
project_urls={
'Bug Reports': 'https://github.com/reud/PyLineNotify/issues',
'Source': 'https://github.com/reud/PyLineNotify',
},
packages=find_packages(),
)
|
the-stack_106_20400
|
import discord
import meille_secret as secret
import os
import random
from discord.ext import commands
from modules.meille_settings import Settings as settings
async def get_prefix (bot, msg):
return settings.get_config (config, msg.guild.id) ['prefix']
intents = discord.Intents.default ()
intents.members = True
bot = commands.Bot (command_prefix = get_prefix, intents = intents, case_insensitive = True, help_command = None)
config = settings.load_config ()
# LOAD ALL COGS FROM .MODULES
for filename in os.listdir ('./modules'):
if filename.startswith ('cog_') and filename.endswith ('.py'):
try:
bot.load_extension (f'modules.{filename [:-3]}')
print (f'Loaded {filename [4 : -3].upper ()};')
except ImportError as err:
print (f'Failed to load {filename [4 : -3].upper ()}\n\nError Log: {err}')
# this function allows the bot to randomly learn what users say in the server it's in
def learn (config, ctx, cfg):
no_duplicates = True
for i in cfg ['interaction_data']:
if i == ctx.message.content:
no_duplicates = False
if no_duplicates:
settings.get_config (config, ctx.guild.id) ['interaction_data'].append (ctx.message.content)
print (f"Learned a new interaction in {cfg ['server_name']}. Possible interactions: {len (cfg ['interaction_data'])}.")
settings.save_config (config)
# message cache for all servers the bot's in; a mechanism to prevent bot spam on interactions
def initialize_message_cache (guilds):
guild_list = []
cache = {
'last_sent' : None,
'last_msg' : None,
'count' : 1
}
for g in guilds:
guild_list.append (g.id)
cache = dict.fromkeys (guild_list, cache)
print ('Message cache initialized for all servers.')
return cache
def repeat_processing (ctx, cfg):
msg = ctx.message.content
guild_id = ctx.guild.id
if message_cache [guild_id] ['last_sent'] == msg:
return False
elif message_cache [guild_id] ['last_msg'] == msg:
message_cache [guild_id] ['count'] += 1
if message_cache [guild_id] ['count'] >= cfg ['repeat_trigger']:
message_cache [guild_id] ['last_sent'] = msg
message_cache [guild_id] ['count'] = 1
return True
else:
return False
else:
message_cache [guild_id] ['last_msg'] = msg
message_cache [guild_id] ['count'] = 1
return False
@bot.command
async def load (ctx, extension):
bot.load_extension (f'modules.{extension}')
@bot.event
async def on_ready ():
#config = settings.initialize_config (bot.guilds)
global message_cache
message_cache = initialize_message_cache (bot.guilds)
for g in bot.guilds:
for c in config:
if g.id == c ['server_id']:
if g.name != c ['server_name']:
c ['server_name'] = g.name
print (f'Updated server name: {g.name}')
settings.save_config (config)
print ('\nEy, I\'m here! M\'I late to class? \'v\'')
@bot.event
async def on_guild_join (guild):
print (f'Added to new server: {guild.name}.')
settings.add_config (config, guild.id, guild.name)
settings.save_config (config)
@bot.event
async def on_guild_remove (guild):
print (f'Removed from server: {guild.name}.')
settings.remove_config (config, guild.id)
settings.save_config (config)
@bot.event
async def on_member_join (member):
greettings = settings.get_config (config, member.guild.id) ['greetings']
auto_add_roles = settings.get_config (config, member.guild.id) ['auto_add_roles']
if greettings != False:
greettings_message = settings.get_config (config, member.guild.id) ['greetings_message']
for c in member.guild.channels:
if c.name == greettings:
msg = f'Wassup, {member.name}!'
if greettings_message is not None:
msg = msg + ' ' + greettings_message
await c.send (msg)
if auto_add_roles != False:
try:
for i in member.guild.roles:
for j in auto_add_roles:
if i.name == j:
await member.add_roles (i)
except:
# ONE OR MORE ROLES NOT FOUND, skip to prevent errors
# ex.: role's been deleted but the server owner forgot to update the bot's configuration accordingly
pass
@bot.event
async def on_message (message):
if not message.author.bot and message.guild is not None:
ctx = await bot.get_context (message, cls = commands.context.Context)
cfg = settings.get_config (config, ctx.guild.id)
# BOT COMMAND INPUT; check bugcheck and console log
if type (ctx.prefix) == tuple or type (ctx.prefix) == str:
if message.content.startswith (ctx.prefix):
pfx = message.content [0 : len (ctx.prefix)]
cmd_raw = message.content.removeprefix (pfx)
cmd = cmd_raw.split (' ').pop (0)
args = cmd_raw.split (' ') [1:]
if type (args) is not list:
args = [args]
print (f'PREFIX = {pfx}; COMMAND = {cmd}; ARGS = {args}; COMMAND LINE = {cmd_raw}')
# misc interaction that occurs on occasion if the interaction functions are enabled via configuration
if cfg ['interact'] and random.randint (1, cfg ['interaction_rate']) == cfg ['interaction_rate']:
await ctx.send ("Ah... not now. I'm kinda busy. 'v'")
else:
setattr (ctx, 'cfg', cfg)
await bot.invoke (ctx)
# NORMAL MESSAGES
else:
if message.content == "'v'":
if message_cache [ctx.guild.id] ['last_msg'] != "'v' !!":
message_cache [ctx.guild.id] ['last_msg'] = "'v' !!"
await message.channel.send ("'v' !!")
# entertainment interactions:
# the bot will jump in onto chains- for example: when everyone reacts to a post with the same message;
# the bot will also randomly send messages to join conversations. the messages learned are based on what the users say.
else:
if cfg ['interact']:
if random.randint (1, cfg ['interaction_rate']) == cfg ['interaction_rate']:
await ctx.send (cfg ['interaction_data'] [random.randint (0, len (cfg ['interaction_data']) - 1)])
elif random.randint (1, cfg ['interaction_rate']) == cfg ['interaction_rate'] and message.content and not message.mentions:
learn (config, ctx, cfg)
if cfg ['repeat'] and message.content and not message.mentions:
if repeat_processing (ctx, cfg):
await ctx.send (message.content)
# auto-save and load up server settings whenever there's a change
@bot.event
async def on_command_completion (ctx):
if ctx.cog.qualified_name == 'Config' and ctx.invoked_with.lower () not in ('config', 'show', 'list'):
global config
config = settings.load_config ()
bot.run (secret.token)
|
the-stack_106_20401
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('records', '0006_auto_20170524_1851'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True,
serialize=False, auto_created=True)),
('text', models.TextField()),
],
),
migrations.AddField(
model_name='record',
name='message',
field=models.ForeignKey(blank=True, null=True,
to='records.Message',
on_delete=models.CASCADE),
),
]
|
the-stack_106_20402
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from libs.utils import distance
import sys
DEFAULT_LINE_COLOR = QColor(0, 255, 0, 128)
DEFAULT_FILL_COLOR = QColor(255, 0, 0, 128)
DEFAULT_SELECT_LINE_COLOR = QColor(255, 255, 255)
DEFAULT_SELECT_FILL_COLOR = QColor(0, 128, 255, 155)
DEFAULT_VERTEX_FILL_COLOR = QColor(0, 255, 0, 255)
DEFAULT_HVERTEX_FILL_COLOR = QColor(255, 0, 0)
class Shape(object):
P_SQUARE, P_ROUND = range(2)
MOVE_VERTEX, NEAR_VERTEX = range(2)
# The following class variables influence the drawing
# of _all_ shape objects.
line_color = DEFAULT_LINE_COLOR
fill_color = DEFAULT_FILL_COLOR
select_line_color = DEFAULT_SELECT_LINE_COLOR
select_fill_color = DEFAULT_SELECT_FILL_COLOR
vertex_fill_color = DEFAULT_VERTEX_FILL_COLOR
h_vertex_fill_color = DEFAULT_HVERTEX_FILL_COLOR
point_type = P_ROUND
point_size = 8
scale = 1.0
label_font_size = 8
def __init__(self, label=None, line_color=None, difficult=False, paint_label=False):
self.label = label
self.points = []
self.fill = False
self.selected = False
self.difficult = difficult
self.paint_label = paint_label
self._highlight_index = None
self._highlight_mode = self.NEAR_VERTEX
self._highlight_settings = {
self.NEAR_VERTEX: (4, self.P_ROUND),
self.MOVE_VERTEX: (1.5, self.P_SQUARE),
}
self._closed = False
if line_color is not None:
# Override the class line_color attribute
# with an object attribute. Currently this
# is used for drawing the pending line a different color.
self.line_color = line_color
def close(self):
self._closed = True
def reach_max_points(self):
if len(self.points) >= 4:
return True
return False
def add_point(self, point):
if not self.reach_max_points():
self.points.append(point)
def pop_point(self):
if self.points:
return self.points.pop()
return None
def is_closed(self):
return self._closed
def set_open(self):
self._closed = False
def paint(self, painter):
if self.points:
color = self.select_line_color if self.selected else self.line_color
pen = QPen(color)
# Try using integer sizes for smoother drawing(?)
pen.setWidth(max(1, int(round(2.0 / self.scale))))
painter.setPen(pen)
line_path = QPainterPath()
vertex_path = QPainterPath()
line_path.moveTo(self.points[0])
# Uncommenting the following line will draw 2 paths
# for the 1st vertex, and make it non-filled, which
# may be desirable.
# self.drawVertex(vertex_path, 0)
for i, p in enumerate(self.points):
line_path.lineTo(p)
self.draw_vertex(vertex_path, i)
if self.is_closed():
line_path.lineTo(self.points[0])
painter.drawPath(line_path)
painter.drawPath(vertex_path)
painter.fillPath(vertex_path, self.vertex_fill_color)
# Draw text at the top-left
if self.paint_label:
min_x = sys.maxsize
min_y = sys.maxsize
min_y_label = int(1.25 * self.label_font_size)
for point in self.points:
min_x = min(min_x, point.x())
min_y = min(min_y, point.y())
if min_x != sys.maxsize and min_y != sys.maxsize:
font = QFont()
font.setPointSize(self.label_font_size)
font.setBold(True)
painter.setFont(font)
if self.label is None:
self.label = ""
if min_y < min_y_label:
min_y += min_y_label
painter.drawText(min_x, min_y, self.label)
if self.fill:
color = self.select_fill_color if self.selected else self.fill_color
painter.fillPath(line_path, color)
def draw_vertex(self, path, i):
d = self.point_size / self.scale
shape = self.point_type
point = self.points[i]
if i == self._highlight_index:
size, shape = self._highlight_settings[self._highlight_mode]
d *= size
if self._highlight_index is not None:
self.vertex_fill_color = self.h_vertex_fill_color
else:
self.vertex_fill_color = Shape.vertex_fill_color
if shape == self.P_SQUARE:
path.addRect(point.x() - d / 2, point.y() - d / 2, d, d)
elif shape == self.P_ROUND:
path.addEllipse(point, d / 2.0, d / 2.0)
else:
assert False, "unsupported vertex shape"
def nearest_vertex(self, point, epsilon):
for i, p in enumerate(self.points):
if distance(p - point) <= epsilon:
return i
return None
def contains_point(self, point):
return self.make_path().contains(point)
def make_path(self):
path = QPainterPath(self.points[0])
for p in self.points[1:]:
path.lineTo(p)
return path
def bounding_rect(self):
return self.make_path().boundingRect()
def move_by(self, offset):
self.points = [p + offset for p in self.points]
def move_vertex_by(self, i, offset):
self.points[i] = self.points[i] + offset
def highlight_vertex(self, i, action):
self._highlight_index = i
self._highlight_mode = action
def highlight_clear(self):
self._highlight_index = None
def copy(self):
shape = Shape("%s" % self.label)
shape.points = [p for p in self.points]
shape.fill = self.fill
shape.selected = self.selected
shape._closed = self._closed
if self.line_color != Shape.line_color:
shape.line_color = self.line_color
if self.fill_color != Shape.fill_color:
shape.fill_color = self.fill_color
shape.difficult = self.difficult
return shape
def __len__(self):
return len(self.points)
def __getitem__(self, key):
return self.points[key]
def __setitem__(self, key, value):
self.points[key] = value
|
the-stack_106_20404
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for creating security policies rules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute.security_policies import client
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.security_policies import flags as security_policies_flags
from googlecloudsdk.command_lib.compute.security_policies.rules import flags
from googlecloudsdk.core import properties
class Create(base.CreateCommand):
r"""Create a Google Compute Engine security policy rule.
*{command}* is used to create security policy rules.
For example to create a rule at priority 1000 to block the IP range
1.2.3.0/24, run:
$ {command} 1000 \
--action deny-403 \
--security-policy my-policy \
--description "block 1.2.3.0/24" \
--src-ip-ranges 1.2.3.0/24
"""
SECURITY_POLICY_ARG = None
@classmethod
def Args(cls, parser):
flags.AddPriority(parser, 'add')
cls.SECURITY_POLICY_ARG = (
security_policies_flags.SecurityPolicyArgumentForRules())
cls.SECURITY_POLICY_ARG.AddArgument(parser)
flags.AddMatcher(parser)
flags.AddAction(parser)
flags.AddDescription(parser)
flags.AddPreview(parser, default=None)
parser.display_info.AddCacheUpdater(
security_policies_flags.SecurityPoliciesCompleter)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
ref = holder.resources.Parse(
args.name,
collection='compute.securityPolicyRules',
params={
'project': properties.VALUES.core.project.GetOrFail,
'securityPolicy': args.security_policy
})
security_policy_rule = client.SecurityPolicyRule(
ref, compute_client=holder.client)
return security_policy_rule.Create(
src_ip_ranges=args.src_ip_ranges,
expression=args.expression,
action=args.action,
description=args.description,
preview=args.preview)
|
the-stack_106_20405
|
import numpy as np
from OperatorBase import *
class OAdd(TwoOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
def Calculate(self):
TensorInput1=self.Inputs[0]
TensorInput2=self.Inputs[1]
#print(TensorInput1.Data.shape)
#print(TensorInput2.Data.shape)
assert TensorInput1.Data.shape==TensorInput2.Data.shape
AddResult=TensorInput1.Data+TensorInput2.Data
self.Output.SetData(AddResult)
def LocalGrad(self,DataNode,DownStreamGrad):
return np.ones(DataNode.Data.shape)*DownStreamGrad
class OMul(TwoOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
def Calculate(self):
TensorInput1=self.Inputs[0]
TensorInput2=self.Inputs[1]
assert TensorInput1.Data.shape==TensorInput2.Data.shape
AddResult=TensorInput1.Data*TensorInput2.Data
self.Output.SetData(AddResult)
def LocalGrad(self,DataNode,DownStreamGrad):
TensorInput1=self.Inputs[0]
TensorInput2=self.Inputs[1]
if DataNode==TensorInput1:
return TensorInput2.Data*DownStreamGrad
else:
return TensorInput1.Data*DownStreamGrad
class OMinus(TwoOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
def Calculate(self):
TensorInput1=self.Inputs[0]
TensorInput2=self.Inputs[1]
assert TensorInput1.Data.shape==TensorInput2.Data.shape
AddResult=TensorInput1.Data-TensorInput2.Data
self.Output.SetData(AddResult)
def LocalGrad(self,DataNode,DownStreamGrad):
TensorInput1=self.Inputs[0]
TensorInput2=self.Inputs[1]
if DataNode==TensorInput1:
return np.ones(DataNode.Data.shape)*DownStreamGrad
else:
return -1*np.ones(DataNode.Data.shape)*DownStreamGrad
class OMatMul(TwoOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
def Calculate(self):
TensorInput1=self.Inputs[0]
TensorInput2=self.Inputs[1]
assert TensorInput1.Data.shape[1]==TensorInput2.Data.shape[0]
MatMulResult=np.matmul(TensorInput1.Data,TensorInput2.Data)
self.Output.SetData(MatMulResult)
def LocalGrad(self,DataNode,DownStreamGrad):
TensorInput1=self.Inputs[0]
TensorInput2=self.Inputs[1]
if DataNode==TensorInput1:
return np.matmul(DownStreamGrad,TensorInput2.Data.T)
else:
return np.matmul(TensorInput1.Data.T,DownStreamGrad)
class ORelu(OneOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
self.Cache=None
def Calculate(self):
TensorInput=self.Inputs[0]
self.Cache=TensorInput.Data>0
ReluResult=TensorInput.Data*self.Cache
self.Output.SetData(ReluResult)
def LocalGrad(self,DataNode,DownStreamGrad):
TensorInput1=self.Inputs[0]
return DownStreamGrad*self.Cache
class OSigmoid(OneOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
def Calculate(self):
TensorInput=self.Inputs[0]
SigmoidResult=1/(1+np.exp(-1*TensorInput.Data))
self.Output.SetData(SigmoidResult)
def LocalGrad(self,DataNode,DownStreamGrad):
TensorInput1=self.Inputs[0]
OutputData=self.Output.Data
return DownStreamGrad*(OutputData*(1-OutputData))
class ODropout(OneOperandOperator):
def __init__(self,Dropout=0.1,Name=""):
super().__init__(Name)
self.Cache=None
self.Dropout=Dropout
assert 0<=Dropout<1.0
def Calculate(self):
TensorInput=self.Inputs[0]
if TRAIN():
self.Cache=np.random.binomial(1,1-self.Dropout,TensorInput.Data.shape)
DropoutResult=TensorInput.Data*self.Cache
self.Output.SetData(DropoutResult)
else:
DropoutResult=TensorInput.Data*(1-self.Dropout)
self.Output.SetData(DropoutResult)
def LocalGrad(self,DataNode,DownStreamGrad):
TensorInput1=self.Inputs[0]
assert type(self.Cache)!=type(None)
return DownStreamGrad*self.Cache
class OSum(OneOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
def Calculate(self):
TensorInput=self.Inputs[0]
#self.Cache=TensorInput.Data>0
ReluResult=np.sum(TensorInput.Data)
self.Output.SetData(ReluResult)
def LocalGrad(self,DataNode,DownStreamGrad):
TensorInput1=self.Inputs[0]
return np.ones(TensorInput1.Data.shape)*DownStreamGrad
class OSelect(OneOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
def __call__(self,TensorInput,Range):
self.Inputs.append(TensorInput)
self.Output=DataNode()
self.Range=Range
if DEBUG():
print(type(self).__name__+self.DEBUGID+"->Builded")
return self.Output
def Calculate(self):
TensorInput=self.Inputs[0]
Range=self.Range
InputH=TensorInput.Data.shape[0]
InputW=TensorInput.Data.shape[1]
assert 0<=Range[0]<=Range[1]<=InputH and 0<=Range[2]<=Range[3]<=InputW
SelectResult=TensorInput.Data[Range[0]:Range[1],Range[2]:Range[3]]
self.Output.SetData(SelectResult)
def LocalGrad(self,DataNode,DownStreamGrad):
TensorInput=self.Inputs[0]
ResultGrad=np.zeros(TensorInput.Data.shape)
ResultGrad[self.Range[0]:self.Range[1],self.Range[2]:self.Range[3]]=np.ones(DownStreamGrad.shape)*DownStreamGrad
return ResultGrad
class OConcat(OneOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
def __call__(self,TensorList,Shape=None):
self.Inputs.extend(TensorList)
self.Output=DataNode()
self.Shape=Shape
self.ConcatShape=None
self.Range=None
self.FirstDimRangeDict=None
if DEBUG():
print(type(self).__name__+self.DEBUGID+"->Builded")
return self.Output
def Calculate(self):
self.CheckAndRecordRange()
ConcatResult=np.concatenate([DataNode.Data for DataNode in self.Inputs])
self.ConcatShape=ConcatResult.shape
if self.Shape!=None:
self.ConcatShape=ConcatResult.shape
ConcatResult=ConcatResult.reshape(self.Shape)
self.Output.SetData(ConcatResult)
def LocalGrad(self,DataNode,DownStreamGrad):
if type(self.Shape)!=type(None):
ConcatDownStreamGrad=DownStreamGrad.reshape(self.ConcatShape)
else:
ConcatDownStreamGrad=DownStreamGrad
Range=self.FirstDimRangeDict[DataNode]
return np.ones(Range[0])*ConcatDownStreamGrad[Range[1]:Range[2]]
def CheckAndRecordRange(self):
#check
InputShapes=[DataNode.Data.shape[1:] for DataNode in self.Inputs]
CurrentShape=InputShapes[0]
LenOfShape=len(CurrentShape)
ResultBool=True
for Elem in InputShapes:
if len(Elem) ==0:
continue
ResultBool=ResultBool and np.sum(Elem==CurrentShape)==LenOfShape
assert ResultBool
#record
FirstDimRange={}
Acumulate=0
#print([DataNode.Data.shape for DataNode in self.Inputs])
#print([DataNode.Data for DataNode in self.Inputs])
for DataNode,Elem in [( DataNode , DataNode.Data.shape[0]) for DataNode in self.Inputs]:
FirstDimRange[DataNode]=(DataNode.Data.shape,Acumulate,Acumulate+Elem)
Acumulate=Acumulate+Elem
self.FirstDimRangeDict=FirstDimRange
class OTranspose(OneOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
self.Cache=None
def Calculate(self):
TensorInput=self.Inputs[0]
TransInputData=np.transpose(TensorInput.Data)
TransResult=TransInputData*np.ones(TransInputData.shape)
self.Output.SetData(TransResult)
def LocalGrad(self,DataNode,DownStreamGrad):
TransDownStreamGrad=np.transpose(DownStreamGrad)
return TransDownStreamGrad*np.ones(TransDownStreamGrad.shape)
class OSoftmaxEntropy(TwoOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
self.Label=None
self.SoftmaxResult=None
def Calculate(self):
TensorInput1=self.Inputs[0]
TensorInput2=self.Inputs[1]
assert TensorInput1.Data.shape[1]==1
assert TensorInput2.Data.shape[1]==1
assert TensorInput1.Data.shape[0]==TensorInput2.Data.shape[0]
assert TensorInput2.CanBeClear==False
assert TensorInput2.NeedGrad==False
#assumption:TesorInput2 is an onehot
#x=np.exp(TensorInput1.Data)
#print(TensorInput1.Data)
#print(x)
SoftmaxResult=np.exp(TensorInput1.Data)/(np.sum(np.exp(TensorInput1.Data))+1e-9)
Elem=SoftmaxResult[0]
Flag=True
for E in SoftmaxResult:
Flag=Flag and Elem==E
if Flag:
SoftmaxResult=np.ones(SoftmaxResult.shape)*(1/SoftmaxResult.shape[0])
Label=np.argmax(TensorInput2.Data)
self.Label=Label
self.SoftmaxResult=SoftmaxResult
SoftmaxEntropyResult=-1*np.log(SoftmaxResult[Label])
self.Output.SetData(SoftmaxEntropyResult)
def LocalGrad(self,DataNode,DownStreamGrad):
TensorInput1=self.Inputs[0]
assert DownStreamGrad.shape[0]==1
assert DownStreamGrad.shape[1]==1
if DataNode==TensorInput1:
Local=np.zeros(TensorInput1.Data.shape)
Local[self.Label]=1-self.SoftmaxResult[self.Label]
#print(Local*DownStreamGrad)
return Local*DownStreamGrad
else:
assert "TensorInput2 should not need Grad"
#return DownStreamGrad*self.Cache
class OSoftmaxForEval(OneOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
def Calculate(self):
TensorInput=self.Inputs[0]
SoftmaxResult=np.exp(TensorInput.Data)/np.sum(np.exp(TensorInput.Data))
self.Output.SetData(SoftmaxResult)
def LocalGrad(self,DataNode,DownStreamGrad):
assert "this operator is only for eval"==0
#TensorInput1=self.Inputs[0]
#pass
#return DownStreamGrad*self.Cache
class OSoftmax(OneOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
self.Sum=None
self.Softmax=None
def Calculate(self):
TensorInput=self.Inputs[0]
#print(TensorInput.Data)
self.Sum=np.sum(np.exp(TensorInput.Data))
SoftmaxResult=np.exp(TensorInput.Data)/(self.Sum+1e-9)
self.Softmax=SoftmaxResult
self.Output.SetData(SoftmaxResult)
def LocalGrad(self,DataNode,DownStreamGrad):
TensorInput=self.Inputs[0]
return DownStreamGrad*(self.Sum-self.Softmax)*self.Softmax/(self.Sum*self.Sum+1e-9)
#TensorInput1=self.Inputs[0]
#pass
#return DownStreamGrad*self.Cache
class OEntropy(TwoOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
def Calculate(self):
TensorInput1=self.Inputs[0]
TensorInput2=self.Inputs[1]
assert TensorInput1.Data.shape[0]==TensorInput2.Data.shape[0]
assert TensorInput1.Data.shape[1]==1
assert TensorInput2.Data.shape[1]==1
Labels=TensorInput2
Result=-1*Labels.Data*np.log(TensorInput1.Data+1e-9)
self.Output.SetData(np.sum(Result))
def LocalGrad(self,DataNode,DownStreamGrad):
TensorInput1=self.Inputs[0]
TensorInput2=self.Inputs[1]
if DataNode==TensorInput2:
return -1*DownStreamGrad*np.log(TensorInput1.Data+1e-9)
else:
return -1*DownStreamGrad*TensorInput2.Data*(1/(TensorInput1.Data+1e-9))
class OMaxPool(OneOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
def __call__(self,TensorInput,KernelSize):
self.Inputs.append(TensorInput)
self.Output=DataNode()
self.KernelSize=KernelSize
self.Indexs=None
self.OutputH=None
self.OutputW=None
self.TrueIndexs=None
if DEBUG():
print(type(self).__name__+self.DEBUGID+"->Builded")
return self.Output
def Calculate(self):
TensorInput=self.Inputs[0]
Indexs=self.GenSubIndx()
Result=[]
SubIndexs=[]
for Ind in Indexs:
SubMatrix=TensorInput.Data[Ind[0]:Ind[1],Ind[2]:Ind[3]]
SubIndexs.append(self.GetMaxInd(SubMatrix))
Result.append(np.max(SubMatrix))
Result=np.array(Result)
Result=Result.reshape((self.OutputH,self.OutputW))
self.Output.SetData(Result)
self.BackToInputShape(self.Indexs,SubIndexs)
def GenSubIndx(self):
TensorInput=self.Inputs[0]
InputSize=TensorInput.Data.shape
KernelSize=self.KernelSize
assert InputSize[0]%KernelSize[0]==0
assert InputSize[1]%KernelSize[1]==0
self.OutputH=int(InputSize[0]/KernelSize[0])
self.OutputW=int(InputSize[1]/KernelSize[1])
Indexs=[]
for x in range(self.OutputH):
for y in range(self.OutputW):
Indexs.append((x*KernelSize[0],(x+1)*KernelSize[0],y*KernelSize[1],(y+1)*KernelSize[1]))
self.Indexs=Indexs
return Indexs
def GetMaxInd(self,Matrix):
x=np.argmax(np.max(Matrix,axis=1))
y=np.argmax(np.max(Matrix,axis=0))
return (x,y)
def BackToInputShape(self,Indexs,SubIndexs):
def LocalBack(Index,SubIndex):
return (Index[0]+SubIndex[0],Index[2]+SubIndex[1])
TrueIndexs=[]
for I,SI in zip(Indexs,SubIndexs):
TrueIndexs.append(LocalBack(I,SI))
self.TrueIndexs=TrueIndexs
def LocalGrad(self,DataNode,DownStreamGrad):
TensorInput=self.Inputs[0]
Result=np.zeros(TensorInput.Data.shape)
FlattenDownStreamGrad=DownStreamGrad.reshape([self.OutputH*self.OutputW])
for i,Ind in enumerate(self.TrueIndexs):
Result[Ind[0]][Ind[1]]=FlattenDownStreamGrad[i]
ResultGrad=Result
return ResultGrad
class OFlatten(OneOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
self.Length=None
self.InputH=None
self.InputW=None
def Calculate(self):
TensorInput=self.Inputs[0]
self.InputH=TensorInput.Data.shape[0]
self.InputW=TensorInput.Data.shape[1]
self.Length=self.InputH*self.InputW
FlattenResult=np.ones([self.Length,1])*TensorInput.Data.reshape([self.Length,1])
self.Output.SetData(FlattenResult)
def LocalGrad(self,DataNode,DownStreamGrad):
return DownStreamGrad.reshape(self.InputH,self.InputW)*np.ones([self.InputH,self.InputW])
class OTanh(OneOperandOperator):
def __init__(self,Name=""):
super().__init__(Name)
self.Result=None
def Calculate(self):
TensorInput=self.Inputs[0]
EXPPositive=np.exp(TensorInput.Data)
EXPNegatvie=np.exp(-1*TensorInput.Data)
Result=(EXPPositive-EXPNegatvie)/(EXPPositive+EXPNegatvie)
self.Result=Result
self.Output.SetData(Result)
def LocalGrad(self,DataNode,DownStreamGrad):
return DownStreamGrad*(1-self.Result*self.Result)
#class ONormal(OneOperandOperator):
# def __init__(self,Name=""):
# super().__init__(Name)
# self.Norm=None
# def Calculate(self):
# TensorInput=self.Inputs[0]
# Norm=np.linalg.norm(TensorInput.Data)
# self.Norm=Norm
# NormResult=TensorInput.Data/Norm
# self.Output.SetData(NormResult)
# def LocalGrad(self,DataNode,DownStreamGrad):
# TensorInput=self.Inputs[0]
#
# return DownStreamGrad*(1/self.Norm)
|
the-stack_106_20406
|
#!/usr/bin/python3
from argparse import ArgumentParser
from multiprocessing import Pool, cpu_count, Value
from time import sleep
from moviepy.editor import (
ImageClip,
VideoFileClip,
concatenate_videoclips,
AudioFileClip,
afx,
CompositeAudioClip,
)
from os import listdir, makedirs
from os.path import isfile, join, exists
import sys
import tarfile
from datetime import date
import subprocess
verbose = False
school = ""
# home = "S:\\Interdepartmental Share\\IT\\Scripts\\VID MERGE\\VidBooth" #Where are the Video Booth Clips?
home = sys.path[0]
audioFiles = [["bensound-funday.mp3", 6]] # pairs, file -> start time/beat drop
introCards = ["introCard.png"]
outroCards = ["outroCard.png"]
filecount = 0
currentfile = 0
dir = ""
# mainLog = open("log.txt" ,"w+") # Where do we store the output meta file
def main():
## SETUP ARGPARSE ##
global school
global verbose
global mainLog
global filecount
global currentfile
global home
parser = ArgumentParser(description="MWS Video Booth : Utility : Render Videos")
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
default=False,
required=False,
action="store_true",
help="Display progress",
)
parser.add_argument(
"-s",
"--school",
dest="school",
default=False,
required=True,
action="store",
help="The School + City",
)
args = parser.parse_args()
verbose = args.verbose
school = args.school
#######################
# MAIN #
#######################
# mainLog.write("Number of cpu : " + str(cpu_count()) + '\n')
if verbose:
print("Number of cpu : ", cpu_count())
dir = join(home, "Captures", school)
files = listdir(dir)
filecount = len(files)
for i in range(len(files)):
files[i] = [school, files[i]]
dispatcher(files)
def init(_currentfile, _filecount):
global currentfile
global filecount
currentfile = _currentfile
filecount = _filecount
def dispatcher(files):
global currentfile
global filecount
global verbose
global home
global school
dir = join(home, "Finals", school)
currentfile = Value("i", 0)
filecount = Value("i", filecount)
p = Pool(cpu_count(), initializer=init, initargs=(currentfile, filecount))
p.map(dispatcher_process, files)
p.close()
p.join()
if verbose:
print("Archiving... ", dir)
# Archive the finals
arc = archive(dir)
if verbose:
print(arc, " has been archived in: ", dir)
def dispatcher_process(file):
global filecount
global currentfile
# global mainLog
school = file[0]
name = file[1]
currentfile.value += 1
_currentfile = currentfile.value
# invoke = "python mergeClips.py"
# args = '-v -school "' + school + '" -student "' + name + '"'
# mainLog.write("[Starting]: School: " + school + ' | Student: ' + name + '\n')
print(
"[Starting "
+ str(_currentfile)
+ "/"
+ str(filecount.value)
+ "]\nSchool: "
+ school
+ " | Student: "
+ name
)
mergeClips(school, name)
# mainLog.write("[Done]: School: " + school + ' | Student: ' + name + '\n')
print(
"[Done "
+ str(_currentfile)
+ "/"
+ str(filecount.value)
+ "]\nSchool: "
+ school
+ " | Student: "
+ name
)
def get_volume(file):
file = file.replace(" ", "\\ ")
cmd = "ffmpeg -i {0} -filter:a volumedetect -f null /dev/null".format(file)
out = subprocess.run(cmd, shell=True, stderr=subprocess.PIPE).stderr.decode("utf-8")
spool = [o for o in out.split("\n") if "[Parsed_volumedetect" in o]
mean = max = 0.0
# print ("FILE: {0} \nOUT: {1} \nSPOOL: {2}".format(file, out, spool))
for item in spool:
if "mean_volume" in item:
mean = float(item.split(":")[-1].replace("dB", "").strip())
# print("FILE: {0} | MEAN: {1}".format(file, mean))
elif "max_volume" in item:
max = float(item.split(":")[-1].replace("dB", "").strip())
# print("FILE: {0} | MAX: {1}".format(file, max))
return mean, max
def mergeClips(school, student, verbose=False):
# global mainLog
global home
# global verbose
# SET VARIABLES
dir = join(
home, "Finals", school, student
) # The base of the video booth clips including school and student subfolders
capturesHome = join(home, "Captures", school, student)
introHome = join(
home, "Assets", "IntroCards"
) # The folder intro cards can be found
outroHome = join(
home, "Assets", "OutroCards"
) # The folder outro cards can be found
audioHome = join(home, "Assets", "Audio") # The folder the audio can be found
introCard = introCards[0] # The selected intro card name
outroCard = outroCards[0] # The selected outro card name
audioFile = audioFiles[0][0] # The selected audio file name
# replace this with FFMPEG-NORMALIZE
clipVolumeMult = (
1
) # How much to boost/reduce the Video Booth audio, 1.0 remains the same
musicVolumeMult = (
0.05
) # 0.05 #How much to boost/reduce the music, 1.0 remains the same
fade_duration = 0.5 # in seconds
introCardDuration = audioFiles[0][1] # in seconds
outroCardDuration = audioFiles[0][1] # in seconds
# create folders if they do not already exist
if not exists(dir):
makedirs(dir)
meta = open(
join(dir, student.replace(" ", "_") + "_meta.txt"), "w+"
) # Where do we store the output meta file
log = open(
join(dir, student.replace(" ", "_") + "_log.txt"), "w+"
) # Where do we store the output meta file
outFile = join(
dir, student.replace(" ", "_") + "_Video_Booth_final.mp4"
) # Where to store the output video
clips = [] # an empty container for clips to be added
# Find Video Booth Clips
log.write("Gathering Files\n")
if verbose:
print("Gathering files\n")
files = [
f
for f in listdir(capturesHome)
if isfile(join(capturesHome, f)) and ".mp4" in f
]
for f in range(len(files)):
# print(join(capturesHome, files[f]))
mean, max = get_volume(join(capturesHome, files[f]))
# clipVolumeMult.append(max/3) #divide by 3db
ratio = 4.65
db = (max * -1) / ratio
clipVolumeMult = db
# print ("\nFile: {0} \nMax: {1} \nclipVolumeMult: {2}".format(join(capturesHome, files[f]), max, clipVolumeMult))
### BEFORE ANYTHING ELSE, NORMALIZE THE CLIPS AND UPDATE FILE LOCATIONS/NAMES
# INTRO CARD
log.write("Checking Intro Card: " + join(introHome, introCard) + "\n")
if verbose:
print("Checking Intro Card: " + join(introHome, introCard) + "\n")
clips.append(ImageClip(join(introHome, introCard)).set_duration(introCardDuration))
meta.write(join(introHome, introCard) + "\n")
# VIDEO BOOTH CLIPS
for file in files:
log.write("Checking Clips: " + join(capturesHome, file) + "\n")
if verbose:
print("Checking Clips: " + join(capturesHome, file) + "\n")
if file.endswith(".mp4"):
meta.write(join(capturesHome, file) + "\n")
clips.append(VideoFileClip(join(capturesHome, file)))
# OUTRO CARD
log.write("Checking Outro Card: " + join(outroHome, outroCard) + "\n")
if verbose:
print("Checking: " + join(outroHome, outroCard))
clips.append(ImageClip(join(outroHome, outroCard)).set_duration(outroCardDuration))
meta.write(join(outroHome, outroCard) + "\n")
# IMPLEMENT CROSSFADE BETWEEN CLIPS
for i in range(len(clips)):
if i != 0:
clips[i] = clips[i].volumex(clipVolumeMult)
clips[i] = clips[i].crossfadein(fade_duration)
# Piece all clips together into one clip
final_clip = concatenate_videoclips(clips, padding=-fade_duration, method="compose")
# Check Audio
log.write("Checking Audio: " + join(audioHome, audioFile) + "\n")
if verbose:
print("Checking Audio: " + join(audioHome, audioFile))
audioclip = AudioFileClip(join(audioHome, audioFile)).set_duration(
final_clip.duration
)
audioclip = afx.audio_loop(audioclip, duration=final_clip.duration)
meta.write(join(audioHome, audioFile) + "\n")
# mix audio with clip audio
new_audioclip = CompositeAudioClip(
[
final_clip.audio,
audioclip.fx(afx.volumex, musicVolumeMult).fx(
afx.audio_fadeout, fade_duration
),
]
)
final_clip.audio = new_audioclip.set_duration(final_clip.duration)
log.write("Writing Video File...\n")
log.write("School: " + school + " | Student: " + student + "\n")
if verbose:
final_clip.write_videofile(outFile, bitrate="5000k", verbose=verbose)
else:
final_clip.write_videofile(
outFile, bitrate="5000k", verbose=verbose, logger=None
)
log.write("Done Writing Video File!\n")
# close meta file and begin writing
meta.close()
log.close()
# mainLog.close()
def archive(dir):
global school
files = listdir(dir)
formatted_date = date.today().strftime("%Y-%m-%d")
name = school + "-" + formatted_date + ".tar.bz2"
tf = tarfile.open("Archive/" + name, mode="w:bz2")
for filename in files:
file = join(dir, filename)
tf.add(file, arcname=join(school, filename))
tf.close()
return name
if __name__ == "__main__":
main()
|
the-stack_106_20409
|
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
import json
import os
import paunch as p
import re
import yaml
from paunch import runner as prunner
from paunch.builder import compose1 as pcompose1
from paunch.builder import podman as ppodman
from paunch.utils import common as putils_common
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: paunch
author:
- OpenStack TripleO Contributors
version_added: '1.0'
short_description: Manage containers with Paunch
notes: []
requirements:
- "paunch module"
- "podman or docker"
description:
- Start or stop containers with Paunch
options:
config:
description:
- JSON file or directory of JSON files containing configuration data
config_id:
description:
- ID to assign to containers
required: True
type: list
action:
description:
- The desired action to apply for the container.
default: apply
choices:
- apply
- cleanup
container_cli:
description:
- The container CLI.
default: podman
choices:
- podman
- docker
container_log_stdout_path:
description:
- Absolute path to a directory where container stdout will be stored.
default: /var/log/containers/stdouts
healthcheck_disabled:
description:
- Whether or not we disable the Containers Healthchecks
type: bool
default: False
managed_by:
description:
- Name of the tool managing the containers. Only containers labelled with
this will be modified
default: paunch
debug:
description:
- Whether or not we enable Debug
type: bool
default: True
log_file:
description:
- Absolute path for the paunch log file.
default: /var/log/paunch.log
"""
EXAMPLES = """
# Paunch apply example
- name: Start containers for step 1
paunch:
config: /var/lib/tripleo-config/hashed-container-startup-config-step_1.json
config_id: tripleo_step1
action: apply
# Paunch cleanup example
- name: Cleanup containers for step 1 and step 2
paunch:
config_id:
- tripleo_step1
- tripleo_step2
action: cleanup
"""
class PaunchManager:
def __init__(self, module, results):
super(PaunchManager, self).__init__()
self.module = module
self.results = results
self.config = self.module.params['config']
self.config_id = self.module.params['config_id']
self.action = self.module.params['action']
self.healthcheck_disabled = \
self.module.params['healthcheck_disabled']
self.container_cli = self.module.params['container_cli']
self.container_log_stdout_path = \
self.module.params['container_log_stdout_path']
self.managed_by = self.module.params['managed_by']
self.debug = self.module.params['debug']
self.log_file = self.module.params['log_file']
if self.debug:
self.log_level = 3
else:
# if debug is disabled, only show WARNING level
self.log_level = 1
self.log = putils_common.configure_logging('paunch-ansible',
level=self.log_level,
log_file=self.log_file)
if self.config:
self.config_yaml = putils_common.load_config(self.config)
if self.action == 'apply':
self.paunch_apply()
elif self.action == 'cleanup':
self.paunch_cleanup()
def paunch_apply(self):
self.results['action'].append('Applying config_id %s' % self.config_id)
if not self.config:
self.module.fail_json(
msg="Paunch apply requires 'config' parameter",
stdout='',
stderr='',
rc=1)
stdout_list, stderr_list, rc = p.apply(
self.config_id,
self.config_yaml,
managed_by=self.managed_by,
labels=[],
cont_cmd=self.container_cli,
log_level=self.log_level,
log_file=self.log_file,
cont_log_path=self.container_log_stdout_path,
healthcheck_disabled=self.healthcheck_disabled
)
stdout, stderr = ["\n".join(i) for i in (stdout_list, stderr_list)]
# Test paunch idempotency how we can.
changed_strings = ['rm -f', 'Completed', 'Created']
if any(s in stdout for s in changed_strings):
self.results['changed'] = True
self.results.update({"stdout": stdout, "stderr": stderr, "rc": rc})
if rc != 0:
self.module.fail_json(
msg="Paunch failed with config_id %s" % self.config_id,
stdout=stdout,
stderr=stderr,
rc=rc)
self.module.exit_json(**self.results)
def paunch_cleanup(self):
self.results['action'].append('Cleaning-up config_id(s) '
'%s' % self.config_id)
p.cleanup(
self.config_id,
managed_by=self.managed_by,
cont_cmd=self.container_cli,
log_level=self.log_level,
log_file=self.log_file
)
self.module.exit_json(**self.results)
def main():
module = AnsibleModule(
argument_spec=yaml.safe_load(DOCUMENTATION)['options'],
supports_check_mode=False,
)
results = dict(
changed=False,
action=[]
)
PaunchManager(module, results)
if __name__ == '__main__':
main()
|
the-stack_106_20410
|
#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import sys
import sqlite3
from prompt_toolkit import PromptSession
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.styles import Style
from pygments.lexers import SqlLexer
sql_completer = WordCompleter([
'abort', 'action', 'add', 'after', 'all', 'alter', 'analyze', 'and',
'as', 'asc', 'attach', 'autoincrement', 'before', 'begin', 'between',
'by', 'cascade', 'case', 'cast', 'check', 'collate', 'column',
'commit', 'conflict', 'constraint', 'create', 'cross', 'current_date',
'current_time', 'current_timestamp', 'database', 'default',
'deferrable', 'deferred', 'delete', 'desc', 'detach', 'distinct',
'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive',
'exists', 'explain', 'fail', 'for', 'foreign', 'from', 'full', 'glob',
'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index',
'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect',
'into', 'is', 'isnull', 'join', 'key', 'left', 'like', 'limit',
'match', 'natural', 'no', 'not', 'notnull', 'null', 'of', 'offset',
'on', 'or', 'order', 'outer', 'plan', 'pragma', 'primary', 'query',
'raise', 'recursive', 'references', 'regexp', 'reindex', 'release',
'rename', 'replace', 'restrict', 'right', 'rollback', 'row',
'savepoint', 'select', 'set', 'table', 'temp', 'temporary', 'then',
'to', 'transaction', 'trigger', 'union', 'unique', 'update', 'using',
'vacuum', 'values', 'view', 'virtual', 'when', 'where', 'with',
'without'], ignore_case=True)
style = Style.from_dict({
'completion-menu.completion': 'bg:#008888 #ffffff',
'completion-menu.completion.current': 'bg:#00aaaa #000000',
'scrollbar.background': 'bg:#88aaaa',
'scrollbar.button': 'bg:#222222',
})
def main(database):
connection = sqlite3.connect(database)
session = PromptSession(
lexer=PygmentsLexer(SqlLexer), completer=sql_completer, style=style)
while True:
try:
text = session.prompt('> ')
except KeyboardInterrupt:
continue # Control-C pressed. Try again.
except EOFError:
break # Control-D pressed.
with connection:
try:
messages = connection.execute(text)
except Exception as e:
print(repr(e))
else:
for message in messages:
print(message)
print('GoodBye!')
if __name__ == '__main__':
if len(sys.argv) < 2:
db = ':memory:'
else:
db = sys.argv[1]
main(db)
|
the-stack_106_20411
|
"""
A commandline tool for testing if RDF graphs are isomorpic, i.e. equal
if BNode labels are ignored.
"""
from rdflib.graph import Graph
from rdflib import BNode
try:
from itertools import combinations
assert combinations
except ImportError: # Python == 2.5
# Copied from
# http://docs.python.org/2/library/itertools.html#itertools.combinations
def combinations(iterable, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i + 1, r):
indices[j] = indices[j - 1] + 1
yield tuple(pool[i] for i in indices)
class IsomorphicTestableGraph(Graph):
"""
Ported from:
http://www.w3.org/2001/sw/DataAccess/proto-tests/tools/rdfdiff.py
(Sean B Palmer's RDF Graph Isomorphism Tester)
"""
def __init__(self, **kargs):
super(IsomorphicTestableGraph, self).__init__(**kargs)
self.hash = None
def internal_hash(self):
"""
This is defined instead of __hash__ to avoid a circular recursion
scenario with the Memory store for rdflib which requires a hash
lookup in order to return a generator of triples
"""
return hash(tuple(sorted(self.hashtriples())))
def hashtriples(self):
for triple in self:
g = ((isinstance(t, BNode) and self.vhash(t)) or t for t in triple)
yield hash(tuple(g))
def vhash(self, term, done=False):
return tuple(sorted(self.vhashtriples(term, done)))
def vhashtriples(self, term, done):
for t in self:
if term in t:
yield tuple(self.vhashtriple(t, term, done))
def vhashtriple(self, triple, term, done):
for p in xrange(3):
if not isinstance(triple[p], BNode):
yield triple[p]
elif done or (triple[p] == term):
yield p
else:
yield self.vhash(triple[p], done=True)
def __eq__(self, G):
"""Graph isomorphism testing."""
if not isinstance(G, IsomorphicTestableGraph):
return False
elif len(self) != len(G):
return False
elif list.__eq__(list(self), list(G)):
return True # @@
return self.internal_hash() == G.internal_hash()
def __ne__(self, G):
"""Negative graph isomorphism testing."""
return not self.__eq__(G)
def main():
import sys
from optparse import OptionParser
usage = '''usage: %prog [options] file1 file2 ... fileN'''
op = OptionParser(usage=usage)
op.add_option('-s', '--stdin', action='store_true', default=False,
help='Load from STDIN as well')
op.add_option('--format',
default='xml',
dest='inputFormat',
metavar='RDF_FORMAT',
choices=['xml', 'trix', 'n3', 'nt', 'rdfa'],
help="The format of the RDF document(s) to compare" +
"One of 'xml','n3','trix', 'nt', " +
"or 'rdfa'. The default is %default")
(options, args) = op.parse_args()
graphs = []
graph2FName = {}
if options.stdin:
graph = IsomorphicTestableGraph().parse(
sys.stdin, format=options.inputFormat)
graphs.append(graph)
graph2FName[graph] = '(STDIN)'
for fn in args:
graph = IsomorphicTestableGraph().parse(
fn, format=options.inputFormat)
graphs.append(graph)
graph2FName[graph] = fn
checked = set()
for graph1, graph2 in combinations(graphs, 2):
if (graph1, graph2) not in checked and (graph2, graph1) not in checked:
assert graph1 == graph2, "%s != %s" % (
graph2FName[graph1], graph2FName[graph2])
if __name__ == '__main__':
main()
|
the-stack_106_20413
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import cv2
import logging
project_path = '/Downloads/GL'
def load_train_test_data():
pathToTrainData = 'Dataset/Car Images/Train Images'
cars_train_data = load_data(pathToTrainData)
logging.info("cars_train_data loaded and built successfully")
cars_train_data.to_csv(r'references/cars_train_data.csv', index=False)
logging.info("cars_train_data saved successfully")
print(cars_train_data.head())
print(cars_train_data.info())
pathToTestData = 'Dataset/Car Images/Test Images'
cars_test_data = load_data(pathToTestData)
print(cars_test_data.head())
print(cars_test_data.info())
logging.info("cars_test_data loaded and built successfully")
cars_test_data.to_csv(r'references/cars_test_data.csv', index=False)
logging.info("cars_test_data saved successfully")
cars_train_data.sort_values(['imageName'],axis=0,ascending=[True],inplace=True)
cars_test_data.sort_values(['imageName'],axis=0,ascending=[True],inplace=True)
logging.info("cars train and test data sorted successfully")
logging.info('Renaming imageName to match to Annotations Data Set')
cars_train_data.rename(columns = {'imageName': 'Image Name'},inplace = True)
cars_test_data.rename(columns = {'imageName': 'Image Name'},inplace = True)
print(cars_train_data.head())
print(cars_test_data.head())
return cars_train_data, cars_test_data
def load_data(pathToData):
path = os.getcwd()
print(path)
# os.chdir(project_path)
# print(os.getcwd())
# Importing the data set
data = pd.DataFrame(columns=['imageName', 'imagePath', 'class', 'height', 'width'])
for dirname, _, filenames in os.walk(pathToData):
for filename in filenames:
path = os.path.join(dirname, filename)
img_name = os.path.split(path)[1]
if img_name != '.DS_Store':
img = cv2.imread(path)
height, width, channel = img.shape
class_label = dirname.split('/')[-1]
data = data.append(
{'imageName': img_name, 'imagePath': path, 'class': class_label, 'height': height, 'width': width},
ignore_index=True)
logging.info("Data loaded and built successfully")
return data
def load_train_test_annotations():
pathToAnotations ='Dataset/Annotations'
cars_train_annotations = pd.read_csv(pathToAnotations+'/Train Annotations.csv')
print(cars_train_annotations.head())
print('Train anotations loaded')
pathToAnotations ='Dataset/Annotations'
cars_test_annotations = pd.read_csv(pathToAnotations+'/Test Annotation.csv')
print(cars_test_annotations.head())
print('Test anotations loaded')
return cars_train_annotations,cars_test_annotations
def get_final_data(data, annotations):
car_image_details = pd.merge(data, annotations,
on='Image Name',
how='outer')
print(car_image_details.head())
car_image_details.rename(columns = {'Bounding Box coordinates': 'X1','Unnamed: 2':'Y1','Unnamed: 3':'X2','Unnamed: 4':'Y2'},inplace = True)
print(car_image_details.head())
print(car_image_details['class'].value_counts())
return car_image_details
|
the-stack_106_20414
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# finpie - a simple library to download some financial data
# https://github.com/peterlacour/finpie
#
# Copyright (c) 2020 Peter la Cour
#
# Licensed under the MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import sys
import time
from selenium import webdriver
from bs4 import BeautifulSoup as bs
from requests_html import HTMLSession
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class DataBase(object):
def __init__(self):
self.head = False
self.download_path = os.getcwd()
self.chromedriver_path = os.path.dirname(__file__)
def _get_chromedriver_path(self):
filepath = self.chromedriver_path
if '/' in filepath:
filepath = '/'.join( filepath.split('/')) + '/webdrivers/'
elif '\\' in filepath:
filepath = '\\'.join( filepath.split('\\')) + '\\webdrivers\\'
return filepath
def _get_chromedriver(self):
filepath = self._get_chromedriver_path()
if sys.platform == 'darwin':
return filepath + 'chromedriver_mac'
elif 'win' in sys.platform:
return filepath + 'chromedriver_windows.exe'
else:
return 'chromedriver'
def _load_driver(self, caps = 'none'):
options = webdriver.ChromeOptions()
prefs = {}
prefs['profile.default_content_settings.popups'] = 0
prefs['download.default_directory'] = self.download_path
prefs['profile.default_content_setting_values.automatic_downloads'] = 1
options.add_experimental_option('prefs', prefs)
options.add_experimental_option("excludeSwitches", ['enable-automation'])
options.add_experimental_option('useAutomationExtension', False)
options.add_argument('--no-sandbox')
options.add_argument('--disable-setuid-sandbox')
options.add_argument('--start-maximized')
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.53 Safari/537.36'
options.add_argument(f'user-agent={user_agent}')
if not self.head:
options.add_argument('--headless')
try:
if caps == 'none':
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "none"
driver = webdriver.Chrome( executable_path=self._get_chromedriver(), options = options, desired_capabilities=caps ) # chromedriver
else:
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "normal"
driver = webdriver.Chrome( executable_path=self._get_chromedriver(), options = options, desired_capabilities=caps ) # chromedriver
driver.execute_script(f"var s=window.document.createElement('script'); s.src='{self._get_chromedriver_path}javascriptM.js';window.document.head.appendChild(s);")
driver.set_window_size(1400,1000)
driver.set_page_load_timeout(600)
driver.delete_all_cookies()
except Exception as e:
print('Failed to start driver: ' + str(e) )
if 'chrome not reachable' in str(e):
print('Try turning off your firewall...')
return driver
def _get_session(self, url):
'''
...
'''
session = HTMLSession()
r = session.get(url)
soup = bs(r.content, 'html5lib')
return soup
def _downloads_done(self, filename):
'''
https://stackoverflow.com/questions/48263317/selenium-python-waiting-for-a-download-process-to-complete-using-chrome-web
'''
bool = True
while bool:
if filename not in os.listdir(self.download_path):
time.sleep(0.5)
#self._downloads_done(filename)
else:
bool = False
return None
def _col_to_float(self, df):
'''
Converts string columns to floats replacing percentage signs and T, B, M, k
to trillions, billions, millions and thousands.
'''
for col in df.columns:
try:
df.loc[df[col].str.contains('T'), col] = (df[col][df[col].str.contains('T')] \
.replace('T', '', regex = True).replace(',', '', regex = True) \
.astype('float') * 1000000000000) #.astype('str')
df.loc[df[col].str.contains('B'), col] = (df[col][df[col].str.contains('B', case=True)] \
.replace('B', '', regex = True).replace(',', '', regex = True) \
.astype('float') * 1000000000) #.astype('str')
df.loc[df[col].str.contains('M'), col] = (df[col][df[col].str.contains('M', case=True)] \
.replace('M', '', regex = True).replace(',', '', regex = True) \
.astype('float') * 1000000) #.astype('str')
df.loc[df[col].str.contains('k'), col] = (df[col][df[col].str.contains('k', case=True)] \
.replace('k', '', regex = True).replace(',', '', regex = True) \
.astype('float') * 1000) #.astype('str')
df.loc[df[col].str.contains('%'), col] = (df[col][df[col].str.contains('%', case=True)] \
.replace('%', '', regex = True).replace(',', '', regex = True) \
.astype('float') / 100) #.astype('str')
df.loc[df[col].str.contains('K'), col] = (df[col][df[col].str.contains('K', case=True)] \
.replace('K', '', regex = True) \
.astype('float') * 1000) #.astype('str')
except:
continue
return df
|
the-stack_106_20418
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Copyright (c) 2018-2021 The CSPN Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node disconnect and ban behavior"""
import time
from test_framework.test_framework import CSPNTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes_bi,
wait_until,
)
class DisconnectBanTest(CSPNTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Test setban and listbanned RPCs")
self.log.info("setban: successfully ban single IP address")
assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point
self.nodes[1].setban(subnet="127.0.0.1", command="add")
wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("clearbanned: successfully clear ban list")
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].setban("127.0.0.0/24", "add")
self.log.info("setban: fail to ban an already banned subnet")
assert_equal(len(self.nodes[1].listbanned()), 1)
assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
self.log.info("setban: fail to ban an invalid subnet")
assert_raises_rpc_error(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
self.log.info("setban remove: fail to unban a non-banned subnet")
assert_raises_rpc_error(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("setban remove: successfully unban subnet")
self.nodes[1].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.log.info("setban: test persistence across node restart")
self.nodes[1].setban("127.0.0.0/32", "add")
self.nodes[1].setban("127.0.0.0/24", "add")
# Set the mocktime so we can control when bans expire
old_time = int(time.time())
self.nodes[1].setmocktime(old_time)
self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds
self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds
listBeforeShutdown = self.nodes[1].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
# Move time forward by 3 seconds so the third ban has expired
self.nodes[1].setmocktime(old_time + 3)
assert_equal(len(self.nodes[1].listbanned()), 3)
self.stop_node(1)
self.start_node(1)
listAfterShutdown = self.nodes[1].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
# Clear ban lists
self.nodes[1].clearbanned()
connect_nodes_bi(self.nodes, 0, 1)
self.log.info("Test disconnectnode RPCs")
self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
node1 = self.nodes[0].getpeerinfo()[0]['addr']
assert_raises_rpc_error(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1)
self.log.info("disconnectnode: fail to disconnect when calling with junk address")
assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street")
self.log.info("disconnectnode: successfully disconnect node by address")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
self.nodes[0].disconnectnode(address=address1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully reconnect node")
connect_nodes_bi(self.nodes, 0, 1) # reconnect the node
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully disconnect node by node id")
id1 = self.nodes[0].getpeerinfo()[0]['id']
self.nodes[0].disconnectnode(nodeid=id1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
if __name__ == '__main__':
DisconnectBanTest().main()
|
the-stack_106_20419
|
from kiwi.database.DataAccessor import DataAccessor
import numpy
import math
class ActivationCalculator:
def __init__(self, heuristics, data_accessor: DataAccessor):
self.heuristics = heuristics
self.accessor = data_accessor
def g(self, x):
return math.exp(-3.0*x)*100.0
def f(self, x):
v = numpy.random.exponential(1, None)/10.0
v2 = 1 - v
x = 1 - x
gesamt = (((1-x)*v)+(((x**2)*(v2**2))**5))*50.0
return gesamt
async def get_activation(self):
#needed heuristics:
# user-votes in %
# for this we need "user"
MAX = 100
user = self.heuristics["user"]
voted_count, unvoted_count = await self.accessor.get_voted_and_unvoted_count(user)
user_vote_ratio = float(voted_count) / float(voted_count + unvoted_count)
a = min(MAX, 2*self.f(user_vote_ratio) + 0.8*self.g(user_vote_ratio))
print("Kiwi-Latest Activation: {}".format(a))
return a
|
the-stack_106_20421
|
import torch
from ..logging.logger import Logger
from ..losses.loss import DiscriminatorLoss, GeneratorLoss
from ..models.model import Discriminator, Generator
from .base_trainer import BaseTrainer
__all__ = ["ProximalTrainer"]
class ProximalTrainer(BaseTrainer):
r"""Standard Trainer for various GANs. This has been designed to work only on one GPU in case
you are using a GPU.
Most of the functionalities provided by the Trainer are flexible enough and can be customized by
simply passing different arguments. You can train anything from a simple DCGAN to complex CycleGANs
without ever having to subclass this ``Trainer``.
Args:
models (dict): A dictionary containing a mapping between the variable name, storing the
``generator``, ``discriminator`` and any other model that you might want to define, with the
function and arguments that are needed to construct the model. Refer to the examples to
see how to define complex models using this API.
losses_list (list): A list of the Loss Functions that need to be minimized. For a list of
pre-defined losses look at :mod:`torchgan.losses`. All losses in the list must be a
subclass of atleast ``GeneratorLoss`` or ``DiscriminatorLoss``.
metrics_list (list, optional): List of Metric Functions that need to be logged. For a list of
pre-defined metrics look at :mod:`torchgan.metrics`. All losses in the list must be a
subclass of ``EvaluationMetric``.
device (torch.device, optional): Device in which the operation is to be carried out. If you
are using a CPU machine make sure that you change it for proper functioning.
ncritic (int, optional): Setting it to a value will make the discriminator train that many
times more than the generator. If it is set to a negative value the generator will be
trained that many times more than the discriminator.
sample_size (int, optional): Total number of images to be generated at the end of an epoch
for logging purposes.
epochs (int, optional): Total number of epochs for which the models are to be trained.
checkpoints (str, optional): Path where the models are to be saved. The naming convention is
if checkpoints is ``./model/gan`` then models are saved as ``./model/gan0.model`` and so on.
retain_checkpoints (int, optional): Total number of checkpoints that should be retained. For
example, if the value is set to 3, we save at most 3 models and start rewriting the models
after that.
recon (str, optional): Directory where the sampled images are saved. Make sure the directory
exists from beforehand.
log_dir (str, optional): The directory for logging tensorboard. It is ignored if
TENSORBOARD_LOGGING is 0.
test_noise (torch.Tensor, optional): If provided then it will be used as the noise for image
sampling.
nrow (int, optional): Number of rows in which the image is to be stored.
Any other argument that you need to store in the object can be simply passed via keyword arguments.
Example:
>>> dcgan = Trainer(
{"generator": {"name": DCGANGenerator, "args": {"out_channels": 1, "step_channels":
16}, "optimizer": {"name": Adam, "args": {"lr": 0.0002,
"betas": (0.5, 0.999)}}},
"discriminator": {"name": DCGANDiscriminator, "args": {"in_channels": 1,
"step_channels": 16}, "optimizer": {"var": "opt_discriminator",
"name": Adam, "args": {"lr": 0.0002, "betas": (0.5, 0.999)}}}},
[MinimaxGeneratorLoss(), MinimaxDiscriminatorLoss()],
sample_size=64, epochs=20)
"""
def __init__(
self,
models,
losses_list,
metrics_list=None,
device=torch.device("cuda:0"),
ncritic=10,
epochs=5,
sample_size=8,
checkpoints="./model/gan",
retain_checkpoints=None,
recon="./images",
log_dir=None,
test_noise=None,
nrow=8,
verbose=True,
tune_report=None,
**kwargs
):
super(ProximalTrainer, self).__init__(
losses_list,
metrics_list=metrics_list,
device=device,
ncritic=ncritic,
epochs=epochs,
sample_size=sample_size,
checkpoints=checkpoints,
retain_checkpoints=retain_checkpoints,
recon=recon,
log_dir=log_dir,
test_noise=test_noise,
nrow=nrow,
**kwargs
)
self.model_names = []
self.optimizer_names = []
self.schedulers = []
for key, model in models.items():
self.model_names.append(key)
if "args" in model:
setattr(self, key, (model["name"](**model["args"])).to(self.device))
if("discriminator" in key):
setattr(self, "proximal_"+key, (model["name"](**model["args"])).to(self.device))
else:
setattr(self, key, (model["name"]()).to(self.device))
if("discriminator" in key):
setattr(self, "proximal_"+key, (model["name"](**model["args"])).to(self.device))
opt = model["optimizer"]
opt_name = "optimizer_{}".format(key)
if "var" in opt:
opt_name = opt["var"]
self.optimizer_names.append(opt_name)
model_params = getattr(self, key).parameters()
if "args" in opt:
setattr(self, opt_name, (opt["name"](model_params, **opt["args"])))
else:
setattr(self, opt_name, (opt["name"](model_params)))
if "scheduler" in opt:
sched = opt["scheduler"]
if "args" in sched:
self.schedulers.append(
sched["name"](getattr(self, opt_name), **sched["args"])
)
else:
self.schedulers.append(sched["name"](getattr(self, opt_name)))
self.logger = Logger(
self,
losses_list,
metrics_list,
log_dir=log_dir,
nrow=nrow,
test_noise=test_noise,
verbose = verbose
)
self.verbose = verbose
self._store_loss_maps()
self._store_metric_maps()
self.tune_report=tune_report
def train_iter(self):
r"""Calls the train_ops of the loss functions. This is the core function of the Trainer. In most
cases you will never have the need to extend this function. In extreme cases simply extend
``train_iter_custom``.
.. warning::
This function is needed in this exact state for the Trainer to work correctly. So it is
highly recommended that this function is not changed even if the ``Trainer`` is subclassed.
Returns:
An NTuple of the ``generator loss``, ``discriminator loss``, ``number of times the generator
was trained`` and the ``number of times the discriminator was trained``.
"""
self.train_iter_custom()
ldis, lgen, dis_iter, gen_iter = 0.0, 0.0, 0, 0
loss_logs = self.logger.get_loss_viz()
grad_logs = self.logger.get_grad_viz()
for name, loss in self.losses.items():
if isinstance(loss, GeneratorLoss) and isinstance(loss, DiscriminatorLoss):
# NOTE(avik-pal): In most cases this loss is meant to optimize the Discriminator
# but we might need to think of a better solution
if self.loss_information["generator_iters"] % self.ngen == 0:
cur_loss = loss.train_ops(
**self._get_arguments(self.loss_arg_maps[name])
)
loss_logs.logs[name].append(cur_loss)
if type(cur_loss) is tuple:
lgen, ldis, gen_iter, dis_iter = (
lgen + cur_loss[0],
ldis + cur_loss[1],
gen_iter + 1,
dis_iter + 1,
)
else:
# NOTE(avik-pal): We assume that it is a Discriminator Loss by default.
ldis, dis_iter = ldis + cur_loss, dis_iter + 1
for model_name in self.model_names:
grad_logs.update_grads(model_name, getattr(self, model_name))
elif isinstance(loss, GeneratorLoss):
# if self.loss_information["discriminator_iters"] % self.ncritic == 0:
for _ in range(self.ngen):
cur_loss = loss.train_ops(
**self._get_arguments(self.loss_arg_maps[name])
)
loss_logs.logs[name].append(cur_loss)
lgen, gen_iter = lgen + cur_loss, gen_iter + 1
for model_name in self.model_names:
model = getattr(self, model_name)
if isinstance(model, Generator):
grad_logs.update_grads(model_name, model)
elif isinstance(loss, DiscriminatorLoss):
self.proximal_discriminator.load_state_dict(self.discriminator.state_dict())
for _ in range(self.ncritic):
# if self.loss_information["generator_iters"] % self.ngen == 0:
cur_loss = loss.train_ops(
**self._get_arguments(self.loss_arg_maps[name])
)
loss_logs.logs[name].append(cur_loss)
ldis, dis_iter = ldis + cur_loss, dis_iter + 1
for model_name in self.model_names:
model = getattr(self, model_name)
if isinstance(model, Discriminator):
grad_logs.update_grads(model_name, model)
return lgen, ldis, gen_iter, dis_iter
|
the-stack_106_20422
|
# Embedded player in Armory Space
import bpy
from bpy.types import Header
from bpy.app.translations import contexts as i18n_contexts
import arm.utils
import arm.make as make
import arm.make_state as state
import arm.log as log
class ArmorySpaceHeader(Header):
bl_space_type = 'VIEW_ARMORY'
def draw(self, context):
layout = self.layout
view = context.space_data
obj = context.active_object
toolsettings = context.tool_settings
row = layout.row(align=True)
row.template_header()
row.operator('arm.space_stop', icon='MESH_PLANE')
if state.is_paused:
row.operator('arm.space_resume', icon="PLAY")
else:
row.operator('arm.space_pause', icon="PAUSE")
layout.label(log.header_info_text)
class ArmorySpaceStopButton(bpy.types.Operator):
'''Switch back to 3D view'''
bl_idname = 'arm.space_stop'
bl_label = 'Stop'
def execute(self, context):
area = bpy.context.area
if area == None:
area = state.play_area
area.type = 'VIEW_3D'
state.is_paused = False
log.clear()
return{'FINISHED'}
class ArmorySpacePauseButton(bpy.types.Operator):
'''Pause rendering'''
bl_idname = 'arm.space_pause'
bl_label = 'Pause'
def execute(self, context):
state.is_paused = True
return{'FINISHED'}
class ArmorySpaceResumeButton(bpy.types.Operator):
'''Resume rendering'''
bl_idname = 'arm.space_resume'
bl_label = 'Resume'
def execute(self, context):
state.is_paused = False
return{'FINISHED'}
def register():
if arm.utils.with_krom():
bpy.utils.register_class(ArmorySpaceHeader)
bpy.utils.register_class(ArmorySpaceStopButton)
bpy.utils.register_class(ArmorySpacePauseButton)
bpy.utils.register_class(ArmorySpaceResumeButton)
def unregister():
if arm.utils.with_krom():
bpy.utils.unregister_class(ArmorySpaceHeader)
bpy.utils.unregister_class(ArmorySpaceStopButton)
bpy.utils.unregister_class(ArmorySpacePauseButton)
bpy.utils.unregister_class(ArmorySpaceResumeButton)
|
the-stack_106_20424
|
import unittest
import pathlib
from sourcehold.maps.sections.tools import TileIndexTranslator
from sourcehold.maps.sections.types import TileSystem
from sourcehold import load_map, expand_var_path
import random
class TestCoordinates(unittest.TestCase):
def test_tile_index_translator(self):
m = load_map(pathlib.Path("resources") / "map" / "crusader" / "xlcr.map")
ts = m.directory.sections[0].get_system()
tit = TileIndexTranslator(square_size=400)
self.assertEqual(tit.translate_file_index_to_game_tile_index(0, 0), 199)
self.assertEqual(tit.translate_game_tile_index_to_file_index(199), (0, 0))
r = random.randint(0, (400*400)-1)
r = 54464
titi, titj = tit.translate_game_tile_index_to_file_index(r)
ts.get_tile_number_for_index((titi, titj))
self.assertEqual((0, 199), ts.get_index_for_tile_number(0, True))
ts.get_tile_number_for_index((0, 0))
self.assertEqual(m.directory.sections[0].get_system().get_tile_number_for_index((2, 199), True), 8)
self.assertEqual(12, m.directory.sections[0].get_system().get_tile_number_for_index((3, 0), False))
self.assertEqual(12, m.directory.sections[0].get_system().get_tile_number_for_index((3, 196), True))
self.assertEqual(12, m.directory.sections[0].get_system().get_tile_number_for_index((3, 196), True))
|
the-stack_106_20426
|
###
# M4cs Keymap for dekuNukem/duckyPad QMK firmware
# Copyright (C) 2020 Max Bridgland
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###
import hid
import time
import string
import psutil
import GPUtil
import datetime
vendor_id = 0x444E
product_id = 0x4450
usage_page = 0xFF60
usage = 0x61
device_interfaces = hid.enumerate(vendor_id, product_id)
raw_hid_interfaces = [i for i in device_interfaces if i['usage_page'] == usage_page and i['usage'] == usage]
if len(raw_hid_interfaces) == 0:
print('Couldnt find any interfaces')
exit()
interface = hid.device()
interface.open_path(raw_hid_interfaces[0]['path'])
print("Manufacturer: %s" % interface.get_manufacturer_string())
print("Product: %s" % interface.get_product_string())
time.sleep(0.05)
while True:
time.sleep(0.75)
cpufreq = psutil.cpu_freq()
currFreq = int(cpufreq.current)
svmem = psutil.virtual_memory()
memPerc = int(svmem.percent * 10)
gpus = GPUtil.getGPUs()
gpu = gpus[0]
load = int(gpu.load*100)
temp = int(gpu.temperature)
data = [0]
for x in str(currFreq):
data.append(int(x))
data.append(13)
for x in str(memPerc):
data.append(int(x))
data.append(13)
for x in str(load):
data.append(int(x))
data.append(13)
for x in str(temp):
data.append(int(x))
data.append(13)
now_hour = datetime.datetime.now().strftime("%I")
now_min = datetime.datetime.now().strftime("%M")
data.append(int(now_hour[0]))
data.append(int(now_hour[1]))
data.append(13)
data.append(int(now_min[0]))
data.append(int(now_min[1]))
data.append(13)
interface.write(data)
|
the-stack_106_20429
|
import os
import sys
import shutil
from zipfile import ZipFile
import sys
sys.path.append("../")
from lib.Version import VERSION
baseTargetDir = "./build/dist/bundle/"
targetDir = baseTargetDir + "pack/VGC_Analyze/"
if os.path.exists(baseTargetDir):
shutil.rmtree(baseTargetDir)
if not os.path.exists(targetDir):
os.makedirs(targetDir)
shutil.copytree("../assets/", targetDir + "assets/")
shutil.copytree("../gui/", targetDir + "gui/")
shutil.copytree("../lib/", targetDir + "lib/")
shutil.copy("../VGC_Analyze.py", targetDir + "VGC_Analyze.py")
shutil.rmtree(targetDir + "gui/__pycache__/")
shutil.rmtree(targetDir + "lib/__pycache__/")
os.chdir(targetDir + "..")
zip = ZipFile("../../VGC_Analyze_"+VERSION+"_script.zip", "w")
for root, dirs, files in os.walk("./"):
for f in files:
zip.write(os.path.join(root, f))
zip.close()
|
the-stack_106_20430
|
"""Common methods used across tests for Bond."""
from typing import Any, Dict
from homeassistant import core
from homeassistant.components.bond.const import DOMAIN as BOND_DOMAIN
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_HOST
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
MOCK_HUB_VERSION: dict = {"bondid": "test-bond-id"}
async def setup_bond_entity(
hass: core.HomeAssistant, config_entry: MockConfigEntry, hub_version=None
):
"""Set up Bond entity."""
if hub_version is None:
hub_version = MOCK_HUB_VERSION
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.bond.Bond.getVersion", return_value=hub_version
):
return await hass.config_entries.async_setup(config_entry.entry_id)
async def setup_platform(
hass: core.HomeAssistant, platform: str, discovered_device: Dict[str, Any]
):
"""Set up the specified Bond platform."""
mock_entry = MockConfigEntry(
domain=BOND_DOMAIN,
data={CONF_HOST: "1.1.1.1", CONF_ACCESS_TOKEN: "test-token"},
)
mock_entry.add_to_hass(hass)
with patch("homeassistant.components.bond.PLATFORMS", [platform]), patch(
"homeassistant.components.bond.Bond.getVersion", return_value=MOCK_HUB_VERSION
), patch(
"homeassistant.components.bond.Bond.getDeviceIds",
return_value=["bond-device-id"],
), patch(
"homeassistant.components.bond.Bond.getDevice", return_value=discovered_device
), patch(
"homeassistant.components.bond.Bond.getDeviceState", return_value={}
):
assert await async_setup_component(hass, BOND_DOMAIN, {})
await hass.async_block_till_done()
return mock_entry
|
the-stack_106_20431
|
from argparse import Namespace
import csv
from logging import Logger
import pickle
import random
from typing import List, Set, Tuple
import os
from rdkit import Chem
import numpy as np
from tqdm import tqdm
from .data import MoleculeDatapoint, MoleculeDataset
from .scaffold import log_scaffold_stats, scaffold_split
from chemprop.features import load_features
def get_task_names(path: str, use_compound_names: bool = False) -> List[str]:
"""
Gets the task names from a data CSV file.
:param path: Path to a CSV file.
:param use_compound_names: Whether file has compound names in addition to smiles strings.
:return: A list of task names.
"""
index = 2 if use_compound_names else 1
task_names = get_header(path)[index:]
return task_names
def get_header(path: str) -> List[str]:
"""
Returns the header of a data CSV file.
:param path: Path to a CSV file.
:return: A list of strings containing the strings in the comma-separated header.
"""
with open(path) as f:
header = next(csv.reader(f))
return header
def get_num_tasks(path: str) -> int:
"""
Gets the number of tasks in a data CSV file.
:param path: Path to a CSV file.
:return: The number of tasks.
"""
return len(get_header(path)) - 1
def get_smiles(path: str, header: bool = True) -> List[str]:
"""
Returns the smiles strings from a data CSV file (assuming the first line is a header).
:param path: Path to a CSV file.
:param header: Whether the CSV file contains a header (that will be skipped).
:return: A list of smiles strings.
"""
with open(path) as f:
reader = csv.reader(f)
if header:
next(reader) # Skip header
smiles = [line[0] for line in reader]
return smiles
def filter_invalid_smiles(data: MoleculeDataset) -> MoleculeDataset:
"""
Filters out invalid SMILES.
:param data: A MoleculeDataset.
:return: A MoleculeDataset with only valid molecules.
"""
return MoleculeDataset([datapoint for datapoint in data
if datapoint.smiles != '' and datapoint.mol is not None
and datapoint.mol.GetNumHeavyAtoms() > 0])
def get_data(path: str,
skip_invalid_smiles: bool = True,
args: Namespace = None,
features_path: List[str] = None,
max_data_size: int = None,
use_compound_names: bool = None,
logger: Logger = None) -> MoleculeDataset:
"""
Gets smiles string and target values (and optionally compound names if provided) from a CSV file.
:param path: Path to a CSV file.
:param skip_invalid_smiles: Whether to skip and filter out invalid smiles.
:param args: Arguments.
:param features_path: A list of paths to files containing features. If provided, it is used
in place of args.features_path.
:param max_data_size: The maximum number of data points to load.
:param use_compound_names: Whether file has compound names in addition to smiles strings.
:param logger: Logger.
:return: A MoleculeDataset containing smiles strings and target values along
with other info such as additional features and compound names when desired.
"""
debug = logger.debug if logger is not None else print
if args is not None:
# Prefer explicit function arguments but default to args if not provided
features_path = features_path if features_path is not None else args.features_path
max_data_size = max_data_size if max_data_size is not None else args.max_data_size
use_compound_names = use_compound_names if use_compound_names is not None else args.use_compound_names
else:
use_compound_names = False
max_data_size = max_data_size or float('inf')
# Load features
if features_path is not None:
features_data = []
for feat_path in features_path:
features_data.append(load_features(feat_path)) # each is num_data x num_features
features_data = np.concatenate(features_data, axis=1)
else:
features_data = None
skip_smiles = set()
# Load data
with open(path) as f:
reader = csv.reader(f)
next(reader) # skip header
lines = []
true_vals=[]
for line in reader:
smiles = line[0]
true_val=float(line[1])
if smiles in skip_smiles:
continue
lines.append(line)
true_vals.append(true_val)
if len(lines) >= max_data_size:
break
data = MoleculeDataset([
MoleculeDatapoint(
line=line,
args=args,
features=features_data[i] if features_data is not None else None,
use_compound_names=use_compound_names
) for i, line in tqdm(enumerate(lines), total=len(lines))
])
# Filter out invalid SMILES
if skip_invalid_smiles:
original_data_len = len(data)
data = filter_invalid_smiles(data)
if len(data) < original_data_len:
debug(f'Warning: {original_data_len - len(data)} SMILES are invalid.')
if data.data[0].features is not None:
args.features_dim = len(data.data[0].features)
if args.write_true_val:
return data, true_vals
else:
return data
def get_data_from_smiles(smiles: List[str], skip_invalid_smiles: bool = True, logger: Logger = None) -> MoleculeDataset:
"""
Converts SMILES to a MoleculeDataset.
:param smiles: A list of SMILES strings.
:param skip_invalid_smiles: Whether to skip and filter out invalid smiles.
:param logger: Logger.
:return: A MoleculeDataset with all of the provided SMILES.
"""
debug = logger.debug if logger is not None else print
data = MoleculeDataset([MoleculeDatapoint([smile]) for smile in smiles])
# Filter out invalid SMILES
if skip_invalid_smiles:
original_data_len = len(data)
data = filter_invalid_smiles(data)
if len(data) < original_data_len:
debug(f'Warning: {original_data_len - len(data)} SMILES are invalid.')
return data
def split_data(data: MoleculeDataset,
split_type: str = 'random',
sizes: Tuple[float, float, float] = (0.8, 0.1, 0.1),
seed: int = 0,
args: Namespace = None,
logger: Logger = None) -> Tuple[MoleculeDataset,
MoleculeDataset,
MoleculeDataset]:
"""
Splits data into training, validation, and test splits.
:param data: A MoleculeDataset.
:param split_type: Split type.
:param sizes: A length-3 tuple with the proportions of data in the
train, validation, and test sets.
:param seed: The random seed to use before shuffling data.
:param args: Namespace of arguments.
:param logger: A logger.
:return: A tuple containing the train, validation, and test splits of the data.
"""
assert len(sizes) == 3 and sum(sizes) == 1
if args is not None:
folds_file, val_fold_index, test_fold_index = \
args.folds_file, args.val_fold_index, args.test_fold_index
else:
folds_file = val_fold_index = test_fold_index = None
if split_type == 'crossval':
index_set = args.crossval_index_sets[args.seed]
data_split = []
for split in range(3):
split_indices = []
for index in index_set[split]:
with open(os.path.join(args.crossval_index_dir, f'{index}.pkl'), 'rb') as rf:
split_indices.extend(pickle.load(rf))
data_split.append([data[i] for i in split_indices])
train, val, test = tuple(data_split)
return MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset(test)
elif split_type == 'index_predetermined':
split_indices = args.crossval_index_sets[args.seed]
assert len(split_indices) == 3
data_split = []
for split in range(3):
data_split.append([data[i] for i in split_indices[split]])
train, val, test = tuple(data_split)
return MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset(test)
elif split_type == 'predetermined':
if not val_fold_index:
assert sizes[2] == 0 # test set is created separately so use all of the other data for train and val
assert folds_file is not None
assert test_fold_index is not None
try:
with open(folds_file, 'rb') as f:
all_fold_indices = pickle.load(f)
except UnicodeDecodeError:
with open(folds_file, 'rb') as f:
all_fold_indices = pickle.load(f, encoding='latin1') # in case we're loading indices from python2
# assert len(data) == sum([len(fold_indices) for fold_indices in all_fold_indices])
log_scaffold_stats(data, all_fold_indices, logger=logger)
folds = [[data[i] for i in fold_indices] for fold_indices in all_fold_indices]
test = folds[test_fold_index]
if val_fold_index is not None:
val = folds[val_fold_index]
train_val = []
for i in range(len(folds)):
if i != test_fold_index and (val_fold_index is None or i != val_fold_index):
train_val.extend(folds[i])
if val_fold_index is not None:
train = train_val
else:
random.seed(seed)
random.shuffle(train_val)
train_size = int(sizes[0] * len(train_val))
train = train_val[:train_size]
val = train_val[train_size:]
return MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset(test)
elif split_type == 'scaffold_balanced':
return scaffold_split(data, sizes=sizes, balanced=True, seed=seed, logger=logger)
elif split_type == 'random':
data.shuffle(seed=seed)
train_size = int(sizes[0] * len(data))
train_val_size = int((sizes[0] + sizes[1]) * len(data))
train = data[:train_size]
val = data[train_size:train_val_size]
test = data[train_val_size:]
return MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset(test)
else:
raise ValueError(f'split_type "{split_type}" not supported.')
def get_class_sizes(data: MoleculeDataset) -> List[List[float]]:
"""
Determines the proportions of the different classes in the classification dataset.
:param data: A classification dataset
:return: A list of lists of class proportions. Each inner list contains the class proportions
for a task.
"""
targets = data.targets()
# Filter out Nones
valid_targets = [[] for _ in range(data.num_tasks())]
for i in range(len(targets)):
for task_num in range(len(targets[i])):
if targets[i][task_num] is not None:
valid_targets[task_num].append(targets[i][task_num])
class_sizes = []
for task_targets in valid_targets:
# Make sure we're dealing with a binary classification task
assert set(np.unique(task_targets)) <= {0, 1}
try:
ones = np.count_nonzero(task_targets) / len(task_targets)
except ZeroDivisionError:
ones = float('nan')
print('Warning: class has no targets')
class_sizes.append([1 - ones, ones])
return class_sizes
def validate_data(data_path: str) -> Set[str]:
"""
Validates a data CSV file, returning a set of errors.
:param data_path: Path to a data CSV file.
:return: A set of error messages.
"""
errors = set()
header = get_header(data_path)
with open(data_path) as f:
reader = csv.reader(f)
next(reader) # Skip header
smiles, targets = [], []
for line in reader:
smiles.append(line[0])
targets.append(line[1:])
# Validate header
if len(header) == 0:
errors.add('Empty header')
elif len(header) < 2:
errors.add('Header must include task names.')
mol = Chem.MolFromSmiles(header[0])
if mol is not None:
errors.add('First row is a SMILES string instead of a header.')
# Validate smiles
for smile in tqdm(smiles, total=len(smiles)):
mol = Chem.MolFromSmiles(smile)
if mol is None:
errors.add('Data includes an invalid SMILES.')
# Validate targets
num_tasks_set = set(len(mol_targets) for mol_targets in targets)
if len(num_tasks_set) != 1:
errors.add('Inconsistent number of tasks for each molecule.')
if len(num_tasks_set) == 1:
num_tasks = num_tasks_set.pop()
if num_tasks != len(header) - 1:
errors.add('Number of tasks for each molecule doesn\'t match number of tasks in header.')
unique_targets = set(np.unique([target for mol_targets in targets for target in mol_targets]))
if unique_targets <= {''}:
errors.add('All targets are missing.')
for target in unique_targets - {''}:
try:
float(target)
except ValueError:
errors.add('Found a target which is not a number.')
return errors
|
the-stack_106_20432
|
#! /usr/bin/env python3
import math
# QR分解の結果を比較する
fout = open('../../InverceMatrix2/InverceMatrix2.sim/sim_1/behav/result.txt', 'r')
fref = open('ref.txt', 'r')
ok = True
line_no = 1
lout = fout.readline()
while lout:
# シミュレーション結果を読み込む
results = lout[:-1].split(' ')
val = int(results[0], 16)
# 負の値に対応する
if (val & 0x8000):
val -= 0x10000
val /= 2**8 # フォーマットは7Q8
tlast = results[1]
print('val = {val}, tlast = {tlast}'.format(val=val, tlast=tlast))
# 比較値を読み込む
lref = fref.readline()
if not lref:
print("Error: too much data")
raise Exception()
results = lref[:-1].split(' ')
ref_val = float(results[0])
ref_tlast = results[1]
print('ref val = {val}, ref_tlast = {tlast}'.format(val=ref_val, tlast=ref_tlast))
# 結果をリファレンスと比較する
d = val - ref_val
if -0.1 < ref_val < 0.1:
d = math.fabs(d)
else:
d = math.fabs(d/ref_val)
print('diff = ', d)
if (0.022 < d):
print('Error: line {line} value is {sim}, expected {ref}'.
format(line=line_no, sim=val, ref=ref_val))
ok = False
if tlast != ref_tlast:
print('Error: line {line} tlast {sim}, expected {ref}'.
format(line=line_no, sim=tlast, ref=ref_tlast))
ok = False
print()
# 次の結果の行を読み込む
lout = fout.readline()
line_no += 1
# まだ行が残っていたらエラー
if fref.readline():
print("Error: less data")
ok = False
if ok:
print("All data is good")
fout.close()
fref.close()
|
the-stack_106_20433
|
from itertools import islice
from typing import Dict, Iterable, List, Union
from modelforge import merge_strings, Model, register_model, split_strings
import numpy
from sourced.ml.models.license import DEFAULT_LICENSE
@register_model
class DocumentFrequencies(Model):
"""
Document frequencies - number of times a source code identifier appeared
in different repositories. Each repository counts only once.
"""
NAME = "docfreq"
VENDOR = "source{d}"
DESCRIPTION = "Model that contains document frequencies of features extracted from code."
LICENSE = DEFAULT_LICENSE
def construct(self, docs: int, tokfreqs: Union[Iterable[Dict[str, int]], Dict[str, int]]):
"""
Initializes this model.
:param docs: The number of documents.
:param tokfreqs: The dictionary of token -> frequency or the iterable collection of such
dictionaries.
:return: self
"""
if isinstance(tokfreqs, dict):
df = tokfreqs
else:
df = {}
for d in tokfreqs:
df.update(d)
self._docs = docs
self._df = df
return self
"""
WE DO NOT ADD THIS
def df(self) -> dict:
"""
def _load_tree(self, tree: dict, tokens=None):
if tokens is None:
tokens = split_strings(tree["tokens"])
freqs = tree["freqs"]
self._log.info("Building the docfreq dictionary...")
tokfreq = dict(zip(tokens, freqs))
self.construct(docs=tree["docs"], tokfreqs=tokfreq)
def _generate_tree(self):
tokens = self.tokens()
freqs = numpy.array([self._df[t] for t in tokens], dtype=numpy.float32)
return {"docs": self.docs, "tokens": merge_strings(tokens), "freqs": freqs}
def dump(self):
return """Number of words: %d
Random 10 words: %s
Number of documents: %d""" % (
len(self._df), dict(islice(self._df.items(), 10)), self.docs)
@property
def docs(self) -> int:
"""
Returns the number of documents.
"""
return self._docs
"""
WE DO NOT ADD THIS
def df(self) -> dict:
"""
def prune(self, threshold: int) -> "DocumentFrequencies":
"""
Removes tokens which occur less than `threshold` times.
The operation happens *not* in-place - a new model is returned.
:param threshold: Minimum number of occurrences.
:return: The new model if the current one had to be changed, otherwise self.
"""
if threshold < 1:
raise ValueError("Invalid threshold: %d" % threshold)
if threshold == 1:
return self
self._log.info("Pruning to min %d occurrences", threshold)
pruned = type(self)()
pruned._docs = self.docs
pruned._df = {k: v for k, v in self._df.items() if v >= threshold}
self._log.info("Size: %d -> %d", len(self), len(pruned))
pruned._meta = self.meta
return pruned
def greatest(self, max_size: int) -> "DocumentFrequencies":
"""
Truncates the model to most frequent `max_size` tokens.
The operation happens *not* in-place - a new model is returned.
:param max_size: The maximum vocabulary size.
:return: The new model if the current one had to be changed, otherwise self.
"""
if max_size < 1:
raise ValueError("Invalid max_size: %d" % max_size)
if len(self) <= max_size:
return self
self._log.info("Pruning to max %d size", max_size)
pruned = type(self)()
pruned._docs = self.docs
freqs = numpy.fromiter(self._df.values(), dtype=numpy.int32, count=len(self))
keys = numpy.array(list(self._df.keys()), dtype=object)
chosen = numpy.argpartition(freqs, len(freqs) - max_size)[len(freqs) - max_size:]
border_freq = freqs[chosen].min()
chosen = freqs >= border_freq
# argpartition can leave some of the elements with freq == border_freq outside
# so next step ensures that we include everything.
freqs = freqs[chosen]
keys = keys[chosen]
# we need to be deterministic at the cutoff frequency
# argpartition returns random samples every time
# so we treat words with the cutoff frequency separately
if max_size != freqs.shape[0]:
assert max_size < freqs.shape[0]
border_freq_indexes = freqs == border_freq
border_keys = keys[border_freq_indexes]
border_keys.sort()
border_keys = border_keys[:max_size - freqs.shape[0]]
df = dict(zip(keys[~border_freq_indexes], freqs[~border_freq_indexes]))
df.update({key: border_freq for key in border_keys})
else:
df = dict(zip(keys, freqs))
pruned._df = df
self._log.info("Size: %d -> %d", len(self), len(pruned))
pruned._meta = self.meta
return pruned
def __getitem__(self, item):
return self._df[item]
def __iter__(self):
return iter(self._df.items())
def __len__(self):
"""
Returns the number of tokens in the model.
"""
return len(self._df)
def get(self, item, default=None) -> Union[int, None]:
"""
Return the document frequency for a given token.
:param item: The token to query.
:param default: Returned value in case the token is missing.
:return: int or `default`
"""
return self._df.get(item, default)
def tokens(self) -> List[str]:
"""
Returns the list of tokens.
"""
return list(self._df)
"""
WE DO NOT ADD THIS
def df(self) -> dict:
"""
|
the-stack_106_20434
|
import os
import torch
import wandb
from torch import nn
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger
from data.ljspeech import get_dataset
from data.transforms import (
MelSpectrogram, Compose, AddLengths, Pad,
TextPreprocess, ToNumpy, AudioSqueeze, ToGpu)
from data.collate import no_pad_collate
from utils import fix_seeds
from tacotron2.model.net import Tacotron2
class Tacotron2Trainer(pl.LightningModule):
def __init__(
self,
config,
Vocoder=None
):
super(Tacotron2Trainer, self).__init__()
fix_seeds(seed=config.train.seed)
self.model = Tacotron2(config)
self.lr = config.train.lr
self.batch_size = config.train.batch_size
self.weight_decay = config.train.get('weight_decay', 0.)
self.num_workers = config.train.get('num_workers', 4)
self.step_size = config.train.get('step_size', 15)
self.gamma = config.train.get('gamma', 0.2)
self.text_transform = TextPreprocess(config.alphabet)
self.mel = MelSpectrogram()
self.gpu = ToGpu('cuda' if torch.cuda.is_available() else 'cpu')
self.preprocess = Compose([
AddLengths(),
Pad()
])
self.mseloss = nn.MSELoss()
self.gate_bce = nn.BCEWithLogitsLoss()
self.g = config.train.get(
'guiding_window_width', 0.2
)
if Vocoder is not None:
self.vocoder = Vocoder().eval()
else:
self.vocoder = None
self.config = config
self.sample_rate = config.dataset.get('sample_rate', 16000)
self.epoch_idx = 0
def forward(self, batch):
if self.training:
return self.model(
text_inputs=batch['text'],
lengths=batch['text_lengths'],
mels=batch['mel']
)
else:
return self.model(
text_inputs=batch['text'])
def mels_mse(self, mel_outputs, mel_outputs_postnet, batch):
if self.training:
y = batch['mel']
y.requires_grad = False
batch_size, max_length, n_mel_channels = y.shape
output_lengths = batch['mel_lengths']
mask = torch.arange(max_length, device=output_lengths.device,
dtype=output_lengths.dtype)[None, :] < output_lengths[:, None]
mask = mask.bool()
mask = mask[..., None].repeat_interleave(n_mel_channels, dim=2)
mask.requires_grad = False
return self.mseloss(mel_outputs * mask, y * mask) + self.mseloss(
mel_outputs_postnet * mask, y * mask)
else:
y = batch['mel'][:, :mel_outputs.shape[1]]
mel_outputs = mel_outputs[:, :y.shape[1]]
mel_outputs_postnet = mel_outputs_postnet[:, :y.shape[1]]
return self.mseloss(mel_outputs, y) + self.mseloss(mel_outputs_postnet, y)
def guided_attention_loss(self, alignments):
b, t, n = alignments.shape
grid_t, grid_n = torch.meshgrid(torch.arange(t, device=alignments.device), torch.arange(n, device=alignments.device))
W = 1. - torch.exp(-(-grid_n / n + grid_t/t) ** 2 / 2 / self.g**2)
W.requires_grad = False
return torch.mean(alignments * W[None].repeat_interleave(b, dim=0)), W
def gate_loss(self, gate_out, mel_lengths):
gate_target = torch.zeros_like(gate_out)
for i in range(gate_out.shape[0]):
gate_target[i, mel_lengths[i]:] = 1
gate_target.requires_grad = False
return self.gate_bce(gate_out, gate_target)
def training_step(self, batch, batch_nb):
# REQUIRED
batch = self.mel(self.gpu(batch))
batch = self.preprocess(batch)
batch['mel'] = batch['mel'].permute(0, 2, 1)
mel_outputs, mel_outputs_postnet, gate_out, alignments = self(batch)
train_mse = self.mels_mse(mel_outputs, mel_outputs_postnet, batch)
train_gate = self.gate_loss(gate_out, batch['mel_lengths'])
loss = train_mse + train_gate
losses_dict = {
'train_loss': loss.item(), 'train_mse': train_mse.item(), 'train_gate_loss': train_gate.item()
}
if self.config.train.use_guided_attention:
attn_loss, guide = self.guided_attention_loss(alignments)
loss += attn_loss
losses_dict['train_attn_loss'] = attn_loss.item()
self.logger.experiment.log(losses_dict)
if batch_nb % self.config.train.train_log_period == 1:
examples = [
wandb.Image(mel_outputs_postnet[0].detach().cpu().numpy(), caption='predicted_mel'),
wandb.Image(batch['mel'][0].detach().cpu().numpy(), caption='target_mel'),
wandb.Image(alignments[0].detach().cpu().numpy(), caption='alignment')
]
self.logger.experiment.log({'input_texts_train' : wandb.Table(data=[
self.text_transform.reverse(batch['text'][0].detach().cpu().numpy())], columns=["Text"])})
if self.config.train.use_guided_attention:
examples.append(wandb.Image(guide.cpu().numpy(), caption='attention_guide'))
self.logger.experiment.log({
"plots_train": examples
})
examples = []
if self.vocoder is not None:
reconstructed_wav = self.vocoder.inference(mel_outputs_postnet[0].detach().permute(1, 0)[None])[0]
examples.append(wandb.Audio(reconstructed_wav.detach().cpu().numpy(), caption='reconstructed_wav', sample_rate=self.sample_rate))
examples.append(wandb.Audio(batch['audio'][0].detach().cpu().numpy(), caption='target_wav', sample_rate=self.sample_rate))
self.logger.experiment.log({
"audios_train": examples
})
return loss
def validation_step(self, batch, batch_nb):
# OPTIONAL
batch = self.mel(self.gpu(batch))
batch = self.preprocess(batch)
batch['mel'] = batch['mel'].permute(0, 2, 1)
mel_outputs, mel_outputs_postnet, gate_out, alignments = self(batch)
mse = self.mels_mse(mel_outputs, mel_outputs_postnet, batch)
gate = self.gate_loss(gate_out, batch['mel_lengths'])
loss = mse + gate
losses_dict = {'val_loss': loss, 'val_mse': mse, 'val_gate_loss': gate}
if self.config.train.use_guided_attention:
attn_loss, guide = self.guided_attention_loss(alignments)
losses_dict['val_attn_loss'] = attn_loss
loss += attn_loss
if batch_nb % self.config.train.val_log_period == 1:
examples = [
wandb.Image(mel_outputs_postnet[0].cpu().numpy(), caption='predicted_mel'),
wandb.Image(batch['mel'][0].cpu().numpy(), caption='target_mel'),
wandb.Image(alignments[0].cpu().numpy(), caption='alignment')
]
self.logger.experiment.log({'input_texts_val' : wandb.Table(data=[
self.text_transform.reverse(batch['text'][0].cpu().numpy())], columns=["Text"])})
self.logger.experiment.log({
"plots_val": examples
})
examples = []
if self.vocoder is not None:
reconstructed_wav = self.vocoder.inference(mel_outputs_postnet[0].permute(1, 0)[None])[0]
examples.append(wandb.Audio(reconstructed_wav.cpu().numpy(), caption='reconstructed_wav', sample_rate=self.sample_rate))
examples.append(wandb.Audio(batch['audio'][0].cpu().numpy(), caption='target_wav', sample_rate=self.sample_rate))
self.logger.experiment.log({
"audios_val": examples
})
return losses_dict
def validation_epoch_end(self, outputs):
# outputs is an array with what you returned in validation_step for each batch
# outputs = [{'loss': batch_0_loss}, {'loss': batch_1_loss}, ..., {'loss': batch_n_loss}]
keys = outputs[0].keys()
logdict = {}
for key in keys:
logdict[f'avg_{key}'] = torch.stack([x[key] for x in outputs]).mean().item()
self.logger.experiment.log(logdict)
os.makedirs(self.config.train.get('checkpoint_path', 'checkpoints'), exist_ok=True)
torch.save(
self.model.state_dict(),
os.path.join(self.config.train.get('checkpoint_path', 'checkpoints'), f'model_{self.epoch_idx}.pth')
)
self.logger.experiment.save(os.path.join(self.config.train.get('checkpoint_path', 'checkpoints'), f'model_{self.epoch_idx}.pth'))
self.epoch_idx += 1
def configure_optimizers(self):
# REQUIRED
# can return multiple optimizers and learning_rate schedulers
# (LBFGS it is automatically supported, no need for closure function)
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return [optimizer], [scheduler]
# dataset:
def prepare_data(self):
get_dataset(self.config, download=True)
def train_dataloader(self):
transforms = Compose([
self.text_transform,
ToNumpy(),
AudioSqueeze()
])
dataset_train = get_dataset(self.config, part='train', transforms=transforms)
dataset_train = torch.utils.data.DataLoader(dataset_train,
batch_size=self.batch_size, collate_fn=no_pad_collate, shuffle=True, num_workers=self.num_workers)
return dataset_train
def val_dataloader(self):
transforms = Compose([
self.text_transform,
ToNumpy(),
AudioSqueeze()
])
dataset_val = get_dataset(self.config, part='val', transforms=transforms)
dataset_val = torch.utils.data.DataLoader(dataset_val,
batch_size=1, collate_fn=no_pad_collate, num_workers=1)
return dataset_val
|
the-stack_106_20435
|
#=============================================================================================================
# Imports
import os
import bpy
import codecs
import xml.etree.ElementTree as ET
from struct import unpack
#=============================================================================================================
# Loader
class ModelLoader:
def __init__(self):
self.geometry_file = None
self.counts = [] #Number of vertex types, number of index types, number of vertex blocs, number of index blocs, number of collision blocs, number of armor blocs
self.info_positions = [] #Locations of vertex and index bloc info tables
self.section_positions = [] #Locations of a secion type (vertices, indices, collision, and armor).
self.vertex_info = [] #Contains the name, type data, location, and count for each bloc
self.index_info = [] #Contains the name, type data, location, and count for each bloc
self.vertex_type_info = [] #Contains the location, type string length, type string location, and individual vertex length for each vertex type
self.index_type_info = [] #Contains the location, type string length, type string location, and individual index length for each vertex type
self.data = [] #Will contain unpacked vertex, index data
def load_geometry(self, file_path, displacement, rotation, scale):
file_dir = os.path.dirname(file_path) #Directory of the selected file
base_filename = os.path.basename(file_path) #Base name of the selected file
geometry_filename = '%s.geometry' % os.path.splitext(base_filename)[0] #Extended name of the .geometry file
geometry_path = os.path.join(file_dir, geometry_filename) #Full path of the .geometry file
self.geometry_file = open(geometry_path, 'rb')
print(' >Start counts: %s' % self.geometry_file.tell())
for i in range(6): #Read number of vertex types, index types, vertex blocs, index blocs, collision blocs, and armor blocs
self.counts.append(unpack('<i', self.geometry_file.read(4))[0])
print('counts (vertex types, index types, vertex blocs, index blocs, collision blocs, armor blocs): %s' % self.counts)
print(' >End counts: %s \n' % (self.geometry_file.tell()-1))
print(' >Start info locations: %s' % self.geometry_file.tell())
for i in range(2): #Read info table locations (name, etc)
self.info_positions.append(unpack('<ixxxx', self.geometry_file.read(8))[0])
print('info table locations: %s' % self.info_positions)
print(' >End info locations: %s \n' % (self.geometry_file.tell()-1))
print(' >Start section locations: %s' % self.geometry_file.tell())
for i in range(4): #Read section locations (start of vertex coords, etc)
self.section_positions.append(unpack('<ixxxx', self.geometry_file.read(8))[0])
print('section locations: %s' % self.section_positions)
print(' >End section locations: %s \n' % (self.geometry_file.tell()-1))
print(' >Start vertex info: %s' % self.geometry_file.tell())
for i in range(self.counts[2]): #Read vertex info
self.vertex_info.append({
'name' : self.geometry_file.read(4).hex(),
'type_index' : unpack('<h', self.geometry_file.read(2))[0],
'???' : unpack('<h', self.geometry_file.read(2))[0],
'position' : unpack('<i', self.geometry_file.read(4))[0],
'vertices_count' : unpack('<i', self.geometry_file.read(4))[0],
'type' : None
})
print(' >End vertex info: %s \n' % (self.geometry_file.tell()-1))
print(' >Start index info: %s' % self.geometry_file.tell())
for i in range(self.counts[3]): #Read index info
self.index_info.append({
'name' : self.geometry_file.read(4).hex(),
'type_index' : unpack('<h', self.geometry_file.read(2))[0],
'???' : unpack('<h', self.geometry_file.read(2))[0],
'position' : unpack('<i', self.geometry_file.read(4))[0],
'indices_count' : unpack('<i', self.geometry_file.read(4))[0],
'type' : None
})
print(' >End index info: %s \n' % (self.geometry_file.tell()-1))
print(' >Start vertex type info: %s' % self.geometry_file.tell())
for i in range(self.counts[0]): #Read vertex type info
bookmark = self.geometry_file.tell()
self.vertex_type_info.append({
'vertex_type_location' : unpack('<ixxxx', self.geometry_file.read(8))[0]+bookmark,
'vertex_type_string_length' : unpack('<ixxxx', self.geometry_file.read(8))[0],
'vertex_type_string_location' : unpack('<ixxxx', self.geometry_file.read(8))[0]+bookmark+8,
'vertex_type_length' : unpack('<i', self.geometry_file.read(4))[0],
'single_vertex_length' : unpack('<hxx', self.geometry_file.read(4))[0]
})
print('vertex type info: %s' % self.vertex_type_info)
print(' >End vertex type info: %s \n' % (self.geometry_file.tell()-1))
for i in range(len(self.vertex_info)): #Go to the type string for each type and fill in info for each vertex bloc
self.geometry_file.seek(self.vertex_type_info[self.vertex_info[i]['type_index']]['vertex_type_string_location'])
type_raw=self.geometry_file.read(self.vertex_type_info[self.vertex_info[i]['type_index']]['vertex_type_string_length'])
self.vertex_info[i]['type'] = type_raw.decode('utf-8').rstrip('\x00')
print('vertex info: %s' %self.vertex_info)
print(' >Start index type info: %s' % self.geometry_file.tell())
self.geometry_file.seek(self.section_positions[1])
for i in range(self.counts[1]): #Read index type info
bookmark = self.geometry_file.tell()
self.index_type_info.append({
'index_type_location' : unpack('<ixxxx', self.geometry_file.read(8))[0]+bookmark,
'index_type_length' : unpack('<i', self.geometry_file.read(4))[0],
'index_type_number' : unpack('<h', self.geometry_file.read(2))[0],
'single_index_length' : unpack('<h', self.geometry_file.read(2))[0]
})
print('index type info: %s' % self.index_type_info)
print(' >End index type info: %s \n' % (self.geometry_file.tell()-1))
for i in range(len(self.index_info)): #Go to the type string for each type and fill in info for each index bloc
type_raw=self.index_type_info[self.index_info[i]['type_index']]['index_type_number']
if type_raw == 0:
self.index_info[i]['type'] = 'list16'
else:
self.index_info[i]['type'] = 'list32'
print('index info: %s' % self.index_info)
for i in range(len(self.vertex_info)):
self.geometry_file.seek(self.vertex_info[i]['position']*self.vertex_type_info[self.vertex_info[i]['type_index']]['single_vertex_length']+self.vertex_type_info[self.vertex_info[i]['type_index']]['vertex_type_location'])
print(' >Start vertex data: %s' % self.geometry_file.tell())
temp_vertices = []
if self.vertex_info[i]['type'] == 'set3/xyznuvpc':
for ii in range(self.vertex_info[i]['vertices_count']):
temp=self.geometry_file.read(12)
(x,z,y)=unpack('<3f', temp)
temp_vertices.append((x,y,z))
self.geometry_file.seek(8,1)
elif self.vertex_info[i]['type'] == 'set3/xyznuvrpc':
for ii in range(self.vertex_info[i]['vertices_count']):
temp=self.geometry_file.read(12)
(x,z,y)=unpack('<3f', temp)
temp_vertices.append((x,y,z))
self.geometry_file.seek(12,1)
elif self.vertex_info[i]['type'] == 'set3/xyznuvtbpc':
for ii in range(self.vertex_info[i]['vertices_count']):
temp=self.geometry_file.read(12)
(x,z,y)=unpack('<3f', temp)
temp_vertices.append((x,y,z))
self.geometry_file.seek(16,1)
elif self.vertex_info[i]['type'] == 'set3/xyznuviiiwwtbpc':
for ii in range(self.vertex_info[i]['vertices_count']):
temp=self.geometry_file.read(12)
(x,z,y)=unpack('<3f', temp)
temp_vertices.append((x,y,z))
self.geometry_file.seek(20,1)
else:
raise Exception('[Import Error] Unrecognized import format.')
self.data.append({
'vertices' : temp_vertices,
'indices' : [],
'vertices_count' : len(temp_vertices),
'indices_count' : 0,
'uv' : None,
'???' : self.vertex_info[i]['???']
})
print(' >End vertex data: %s \n' % (self.geometry_file.tell()-1))
for i in range(int(len(self.index_info))):
self.geometry_file.seek(self.index_info[i]['position']*self.index_type_info[self.index_info[i]['type_index']]['single_index_length']+self.index_type_info[self.index_info[i]['type_index']]['index_type_location'])
print(' >Start index data: %s' % self.geometry_file.tell())
temp_indices = []
temp_max = 0
if self.index_info[i]['type'] == 'list16':
for ii in range(int(self.index_info[i]['indices_count']/3)):
temp=self.geometry_file.read(6)
(a,b,c)=unpack('<3H', temp)
temp_indices.append((a,b,c))
temp_max = max(temp_max, a, b, c)
else:
for ii in range(int(self.index_info[i]['indices_count']/3)):
temp=self.geometry_file.read(12)
(a,b,c)=unpack('<3I', temp)
temp_indices.append((a,b,c))
temp_max = max(temp_max, a, b, c)
for ii in self.data:
if self.index_info[i]['???'] == ii['???'] and temp_max+1==ii['vertices_count']:
print(temp_max)
print(ii['vertices_count'])
print(ii['???'])
ii['indices']=temp_indices
ii['indices_count']=len(temp_indices)
print(' >End index data: %s \n' % (self.geometry_file.tell()-1))
## print(self.data[0])
for i in range(len(self.data)):
new_mesh = bpy.data.meshes.new('Mesh')
new_mesh.from_pydata(self.data[i]['vertices'], [], self.data[i]['indices'])
new_mesh.update()
new_mesh.uv_layers.new(name='UVMap')
material = bpy.data.materials.new('Material')
material.Vertex_Format = self.vertex_info[i]['type']
new_mesh.materials.append(material)
new_object = bpy.data.objects.new('temp', new_mesh)
new_object.name = str(i)
scene = bpy.context.scene
scene.collection.objects.link(new_object)
|
the-stack_106_20437
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import torch
# Config that serves all environment
GLOBAL_CONFIG = {
"MODEL_PATH": "../model/model.pt",
"SCALAR_PATH": "../model/scaler.joblib",
"USE_CUDE_IF_AVAILABLE": True,
"ROUND_DIGIT": 6
}
# Environment specific config, or overwrite of GLOBAL_CONFIG
ENV_CONFIG = {
"development": {
"DEBUG": True
},
"staging": {
"DEBUG": True
},
"production": {
"DEBUG": False,
"ROUND_DIGIT": 3
}
}
def get_config() -> dict:
"""
Get config based on running environment
:return: dict of config
"""
# Determine running environment
ENV = os.environ['PYTHON_ENV'] if 'PYTHON_ENV' in os.environ else 'development'
ENV = ENV or 'development'
# raise error if environment is not expected
if ENV not in ENV_CONFIG:
raise EnvironmentError(f'Config for envirnoment {ENV} not found')
config = GLOBAL_CONFIG.copy()
config.update(ENV_CONFIG[ENV])
config['ENV'] = ENV
config['DEVICE'] = 'cuda' if torch.cuda.is_available() and config['USE_CUDE_IF_AVAILABLE'] else 'cpu'
return config
# load config for import
CONFIG = get_config()
if __name__ == '__main__':
# for debugging
import json
print(json.dumps(CONFIG, indent=4))
|
the-stack_106_20438
|
import curses, traceback, string, os
import dmTxt2Html
#-- Define the appearance of some interface elements
hotkey_attr = curses.A_BOLD | curses.A_UNDERLINE
menu_attr = curses.A_NORMAL
#-- Define additional constants
EXIT = 0
CONTINUE = 1
#-- Define default conversion dictionary
cfg_dict = {'target': 'DEFAULT.HTML',
'source': 'txt2html.txt',
'type': 'INFER',
'proxy': 'NONE' }
counter = 0
dmTxt2Html.promo = string.replace(dmTxt2Html.promo, dmTxt2Html.cgi_home, '')
#-- Give screen module scope
screen = None
#-- Create the topbar menu
def topbar_menu(menus):
left = 2
for menu in menus:
menu_name = menu[0]
menu_hotkey = menu_name[0]
menu_no_hot = menu_name[1:]
screen.addstr(1, left, menu_hotkey, hotkey_attr)
screen.addstr(1, left+1, menu_no_hot, menu_attr)
left = left + len(menu_name) + 3
# Add key handlers for this hotkey
topbar_key_handler((string.upper(menu_hotkey), menu[1]))
topbar_key_handler((string.lower(menu_hotkey), menu[1]))
# Little aesthetic thing to display application title
screen.addstr(1, left-1,
">"*(52-left)+ " Txt2Html Curses Interface",
curses.A_STANDOUT)
screen.refresh()
#-- Magic key handler both loads and processes keys strokes
def topbar_key_handler(key_assign=None, key_dict={}):
if key_assign:
key_dict[ord(key_assign[0])] = key_assign[1]
else:
c = screen.getch()
if c in (curses.KEY_END, ord('!')):
return 0
elif c not in key_dict.keys():
curses.beep()
return 1
else:
return eval(key_dict[c])
#-- Handlers for the topbar menus
def help_func():
help_lines = []
offset = 0
fh_help = open('txt2html.txt')
for line in fh_help.readlines():
help_lines.append(string.rstrip(line))
s = curses.newwin(19, 77, 3, 1)
s.box()
num_lines = len(help_lines)
end = 0
while not end:
for i in range(1,18):
if i+offset < num_lines:
line = string.ljust(help_lines[i+offset],74)[:74]
else:
line = " "*74
end = 1
if i<3 and offset>0: s.addstr(i, 2, line, curses.A_BOLD)
else: s.addstr(i, 2, line, curses.A_NORMAL)
s.refresh()
c = s.getch()
offset = offset+15
s.erase()
return CONTINUE
def update_txt2html():
# Check for updated functions (fail gracefully if not fetchable)
s = curses.newwin(6, 60, 4, 5)
s.box()
s.addstr(1, 2, "* PRESS ANY KEY TO CONTINUE *", curses.A_BOLD)
s.addstr(3, 2, "...downloading...")
s.refresh()
try:
from urllib import urlopen
updates = urlopen('http://gnosis.cx/download/dmTxt2Html.py').read()
fh = open('dmTxt2Html.py', 'w')
fh.write(updates)
fh.close()
s.addstr(3, 2, "Module [dmTxt2Html] downloaded to current directory")
except:
s.addstr(3, 2, "Download of updated [dmTxt2Html] module failed!")
reload(dmTxt2Html)
s.addstr(4, 2, "Module [dmTxt2Html] reloaded from current directory ")
s.refresh()
c = s.getch()
s.erase()
def file_func():
s = curses.newwin(6,10,2,1)
s.box()
s.addstr(1,2, "I", hotkey_attr)
s.addstr(1,3, "nput", menu_attr)
s.addstr(2,2, "O", hotkey_attr)
s.addstr(2,3, "utput", menu_attr)
s.addstr(3,2, "T", hotkey_attr)
s.addstr(3,3, "ype", menu_attr)
s.addstr(4,2, "U", hotkey_attr)
s.addstr(4,3, "pdate", menu_attr)
s.addstr(1,2, "", hotkey_attr)
s.refresh()
c = s.getch()
if c in (ord('U'), ord('u')): # Remote function update
update_txt2html()
elif c in (ord('I'), ord('i'), curses.KEY_ENTER, 10):
curses.echo()
s.erase()
screen.addstr(5,33, " "*43, curses.A_UNDERLINE)
cfg_dict['source'] = screen.getstr(5,33)
curses.noecho()
elif c in (ord('O'), ord('o')):
curses.echo()
s.erase()
screen.addstr(8,33, " "*43, curses.A_UNDERLINE)
cfg_dict['target'] = screen.getstr(8,33)
curses.noecho()
elif c in (ord('T'), ord('t')):
s.addstr(3,7, "->", menu_attr)
s.refresh()
s2 = curses.newwin(8,15,4,10)
s2.box()
s2.addstr(1,2, "H", hotkey_attr)
s2.addstr(1,3, "TML", menu_attr)
s2.addstr(2,2, "P", hotkey_attr)
s2.addstr(2,3, "ython", menu_attr)
s2.addstr(3,2, "F", hotkey_attr)
s2.addstr(3,3, "AQ", menu_attr)
s2.addstr(4,2, "S", hotkey_attr)
s2.addstr(4,3, "mart_ASCII", menu_attr)
s2.addstr(5,2, "R", hotkey_attr)
s2.addstr(5,3, "aw", menu_attr)
s2.addstr(6,2, "I", hotkey_attr)
s2.addstr(6,3, "nfer Type", menu_attr)
s2.addstr(6,2, "", hotkey_attr)
s2.refresh()
c = s2.getch()
if c in (ord('I'), ord('i'), curses.KEY_ENTER, 10):
cfg_dict['type'] = 'INFER'
elif c in (ord('H'), ord('h')): cfg_dict['type'] = 'HTML'
elif c in (ord('P'), ord('p')): cfg_dict['type'] = 'PYTHON'
elif c in (ord('F'), ord('f')): cfg_dict['type'] = 'FAQ'
elif c in (ord('S'), ord('s')): cfg_dict['type'] = 'SMART_ASCII'
elif c in (ord('R'), ord('r')): cfg_dict['type'] = 'RAW'
else: curses.beep()
s2.erase()
s.erase()
else:
curses.beep()
s.erase()
return CONTINUE
def doit_func():
global counter
counter = counter+1
if cfg_dict['type'] == 'INFER':
cfg_dict['type'] = dmTxt2Html.infer_type(cfg_dict['source'])
dmTxt2Html.main(cfg_dict)
return CONTINUE
def proxy_func():
s = curses.newwin(6, 15, 2, 8)
s.box()
s.addstr(1, 2, "P", hotkey_attr)
s.addstr(1, 3, "roxy Bar", menu_attr)
s.addstr(2, 2, "T", hotkey_attr)
s.addstr(2, 3, "rap Links", menu_attr)
s.addstr(3, 2, "A", hotkey_attr)
s.addstr(3, 3, "ll Proxyes", menu_attr)
s.addstr(4, 2, "N", hotkey_attr)
s.addstr(4, 3, "o Proxies", menu_attr)
s.addstr(4, 2, "", hotkey_attr)
s.refresh()
c = s.getch()
s.erase()
if c in (ord('N'), ord('n'), curses.KEY_ENTER, 10):
cfg_dict['proxy'] = 'NONE'
elif c in (ord('P'), ord('p')): cfg_dict['proxy'] = 'NAVIGATOR'
elif c in (ord('T'), ord('t')): cfg_dict['proxy'] = 'TRAP_LINKS'
elif c in (ord('A'), ord('a')): cfg_dict['proxy'] = 'ALL'
else: curses.beep()
return CONTINUE
#-- Display the currently selected options
def draw_dict():
screen.addstr(5,33, " "*43, curses.A_NORMAL)
screen.addstr(8,33, " "*43, curses.A_NORMAL)
screen.addstr(11,33, " "*43, curses.A_NORMAL)
screen.addstr(14,33, " "*43, curses.A_NORMAL)
screen.addstr(5, 33, cfg_dict['source'], curses.A_STANDOUT)
screen.addstr(8, 33, cfg_dict['target'], curses.A_STANDOUT)
screen.addstr(11,33, cfg_dict['type'], curses.A_STANDOUT)
screen.addstr(14,33, cfg_dict['proxy'], curses.A_STANDOUT)
screen.addstr(17,33, str(counter), curses.A_STANDOUT)
screen.refresh()
#-- Top level function call (everything except [curses] setup/cleanup)
def main(stdscr):
# Frame the interface area at fixed VT100 size
global screen
screen = stdscr.subwin(23, 79, 0, 0)
screen.box()
screen.hline(2, 1, curses.ACS_HLINE, 77)
screen.refresh()
# Define the topbar menus
file_menu = ("File", "file_func()")
proxy_menu = ("Proxy Mode", "proxy_func()")
doit_menu = ("Do It!", "doit_func()")
help_menu = ("Help", "help_func()")
exit_menu = ("Exit", "EXIT")
# Add the topbar menus to screen object
topbar_menu((file_menu, proxy_menu, doit_menu, help_menu, exit_menu))
# Draw the onscreen field titles
screen.addstr(5, 4, " Source of Input:", curses.A_BOLD)
screen.addstr(8, 4, " Output Destination:", curses.A_BOLD)
screen.addstr(11, 4," Conversion Type:", curses.A_BOLD)
screen.addstr(14, 4," Proxy Mode:", curses.A_BOLD)
screen.addstr(17, 4,"Conversions during Session:", curses.A_BOLD)
screen.addstr(1, 77, "", curses.A_STANDOUT)
draw_dict()
# Enter the topbar menu loop
while topbar_key_handler():
draw_dict()
if __name__=='__main__':
try:
# Initialize curses
stdscr=curses.initscr()
#curses.start_color()
# Turn off echoing of keys, and enter cbreak mode,
# where no buffering is performed on keyboard input
curses.noecho() ; curses.cbreak()
# In keypad mode, escape sequences for special keys
# (like the cursor keys) will be interpreted and
# a special value like curses.KEY_LEFT will be returned
stdscr.keypad(1)
main(stdscr) # Enter the main loop
# Set everything back to normal
stdscr.keypad(0)
curses.echo() ; curses.nocbreak()
curses.endwin() # Terminate curses
except:
# In the event of an error, restore the terminal
# to a sane state.
stdscr.keypad(0)
curses.echo() ; curses.nocbreak()
curses.endwin()
traceback.print_exc() # Print the exception
|
the-stack_106_20439
|
#!/usr/bin/env python
import os
import numpy as np
from tqdm import tqdm
from OPTIMAS.utils.files_handling import images_list, read_image_size
def merge_npy(input_data_folder, experiment):
"""
merge all the individual npy files into one bigger file for faster I/O
"""
path_input_npy_folder = f"{input_data_folder}/{experiment}/raw_data"
output_data_folder = f'{input_data_folder}/{experiment}'
json_file_path = f"{input_data_folder}/{experiment}/{experiment}_info.json"
output = []
files = images_list(path_input_npy_folder, extension = 'npy')
size_img = read_image_size(json_file_path)
for file in tqdm(files):
image = np.load(f"{path_input_npy_folder}/{file}").reshape(size_img[0], size_img[1])
output.append(image)
merged_npy_file_path = f"{output_data_folder}/raw_data.npy"
np.save(merged_npy_file_path, np.array(output))
if __name__ == "__main__":
input_data_folder = '/mnt/home_nas/jeremy/Recherches/Postdoc/Projects/Memory/Computational_Principles_of_Memory/optopatch/data/2020_03_02'
experiment = 'experiment_132'
output_path = f'{input_data_folder}/{experiment}'
merge_npy(input_data_folder, output_path, experiment)
|
the-stack_106_20440
|
import mail1
from equipment.framework.Log.AbstractLog import AbstractLog
from equipment.framework.Config.AbstractConfig import AbstractConfig
from equipment.framework.Mail.AbstractMail import AbstractMail
from typing import Union
from equipment.framework.Mail.Email.Email import Email
from equipment.framework.Mail.Email.EmailFactory import EmailFactory
class SMTPMail(AbstractMail):
def __init__(self, config: AbstractConfig, log: AbstractLog):
self.config = config
self.log = log
def send(self, email: Union[Email, EmailFactory]) -> bool:
if isinstance(email, EmailFactory):
email = email.make()
try:
mail1.send(
subject=email.subject,
text=email.text,
text_html=email.html,
sender=email.sender,
recipients=email.recipients,
cc=email.cc,
bcc=email.bcc,
attachments=email.attachments,
smtp_host=self.config.get('MAIL_SMTP', 'host'),
smtp_port=int(self.config.get('MAIL_SMTP', 'port')),
username=self.config.get('MAIL_SMTP', 'user'),
password=self.config.get('MAIL_SMTP', 'password')
)
return True
except Exception as e:
self.log.error(e, exc_info=True)
return False
|
the-stack_106_20441
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model definitions for simple speech recognition.
"""
import hashlib
import math
import os.path
import random
import re
import sys
import tarfile
import numpy as np
import urllib
import tensorflow as tf
from tensorflow.python.ops import gen_audio_ops as audio_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
MAX_NUM_WAVS_PER_CLASS = 2**27 - 1 # ~134M
SILENCE_LABEL = '_silence_'
SILENCE_INDEX = 0
UNKNOWN_WORD_LABEL = '_unknown_'
UNKNOWN_WORD_INDEX = 1
BACKGROUND_NOISE_DIR_NAME = '_background_noise_'
RANDOM_SEED = 59185
def prepare_words_list(wanted_words):
"""Prepends common tokens to the custom word list.
Args:
wanted_words: List of strings containing the custom words.
Returns:
List with the standard silence and unknown tokens added.
"""
return [SILENCE_LABEL, UNKNOWN_WORD_LABEL] + wanted_words
def which_set(filename, validation_percentage, testing_percentage):
"""Determines which data partition the file should belong to.
We want to keep files in the same training, validation, or testing sets even
if new ones are added over time. This makes it less likely that testing
samples will accidentally be reused in training when long runs are restarted
for example. To keep this stability, a hash of the filename is taken and used
to determine which set it should belong to. This determination only depends on
the name and the set proportions, so it won't change as other files are added.
It's also useful to associate particular files as related (for example words
spoken by the same person), so anything after '_nohash_' in a filename is
ignored for set determination. This ensures that 'bobby_nohash_0.wav' and
'bobby_nohash_1.wav' are always in the same set, for example.
Args:
filename: File path of the data sample.
validation_percentage: How much of the data set to use for validation.
testing_percentage: How much of the data set to use for testing.
Returns:
String, one of 'training', 'validation', or 'testing'.
"""
base_name = os.path.basename(filename)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put a wav in, so the data set creator has a way of
# grouping wavs that are close variations of each other.
hash_name = re.sub(r'_nohash_.*$', '', base_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_WAVS_PER_CLASS + 1)) *
(100.0 / MAX_NUM_WAVS_PER_CLASS))
if percentage_hash < validation_percentage:
result = 'validation'
elif percentage_hash < (testing_percentage + validation_percentage):
result = 'testing'
else:
result = 'training'
return result
def get_features_range(model_settings):
"""Returns the expected min/max for generated features.
Args:
model_settings: Information about the current model being trained.
Returns:
Min/max float pair holding the range of features.
Raises:
Exception: If preprocessing mode isn't recognized.
"""
# TODO(petewarden): These values have been derived from the observed ranges
# of spectrogram and MFCC inputs. If the preprocessing pipeline changes,
# they may need to be updated.
if model_settings['preprocess'] == 'average':
features_min = 0.0
features_max = 127.5
elif model_settings['preprocess'] == 'mfcc':
features_min = -247.0
features_max = 30.0
elif model_settings['preprocess'] == 'micro':
features_min = 0.0
features_max = 26.0
else:
raise Exception('Unknown preprocess mode "%s" (should be "mfcc",'
' "average", or "micro")' % (model_settings['preprocess']))
return features_min, features_max
class AudioProcessor(object):
"""Handles loading, partitioning, and preparing audio training data."""
def __init__(self, data_url, data_dir, silence_percentage, unknown_percentage,
wanted_words, validation_percentage, testing_percentage,
model_settings, summaries_dir):
if data_dir:
self.data_dir = data_dir
self.maybe_download_and_extract_dataset(data_url, data_dir)
self.prepare_data_index(silence_percentage, unknown_percentage,
wanted_words, validation_percentage,
testing_percentage)
self.prepare_background_data()
#self.prepare_processing_graph(model_settings, summaries_dir)
def maybe_download_and_extract_dataset(self, data_url, dest_directory):
"""Download and extract data set tar file.
If the data set we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a
directory.
If the data_url is none, don't download anything and expect the data
directory to contain the correct files already.
Args:
data_url: Web location of the tar file containing the data set.
dest_directory: File path to extract data to.
"""
if not data_url:
return
if not gfile.Exists(dest_directory):
os.makedirs(dest_directory)
filename = data_url.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not gfile.Exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' %
(filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
try:
filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)
except:
tf.compat.v1.logging.error(
'Failed to download URL: {0} to folder: {1}. Please make sure you '
'have enough free space and an internet connection'.format(
data_url, filepath))
raise
print()
statinfo = os.stat(filepath)
tf.compat.v1.logging.info(
'Successfully downloaded {0} ({1} bytes)'.format(
filename, statinfo.st_size))
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def prepare_data_index(self, silence_percentage, unknown_percentage,
wanted_words, validation_percentage,
testing_percentage):
"""Prepares a list of the samples organized by set and label.
The training loop needs a list of all the available data, organized by
which partition it should belong to, and with ground truth labels attached.
This function analyzes the folders below the `data_dir`, figures out the
right
labels for each file based on the name of the subdirectory it belongs to,
and uses a stable hash to assign it to a data set partition.
Args:
silence_percentage: How much of the resulting data should be background.
unknown_percentage: How much should be audio outside the wanted classes.
wanted_words: Labels of the classes we want to be able to recognize.
validation_percentage: How much of the data set to use for validation.
testing_percentage: How much of the data set to use for testing.
Returns:
Dictionary containing a list of file information for each set partition,
and a lookup map for each class to determine its numeric index.
Raises:
Exception: If expected files are not found.
"""
# Make sure the shuffling and picking of unknowns is deterministic.
random.seed(RANDOM_SEED)
wanted_words_index = {}
for index, wanted_word in enumerate(wanted_words):
wanted_words_index[wanted_word] = index + 2
self.data_index = {'validation': [], 'testing': [], 'training': []}
unknown_index = {'validation': [], 'testing': [], 'training': []}
all_words = {}
# Look through all the subfolders to find audio samples
search_path = os.path.join(self.data_dir, '*', '*.wav')
for wav_path in gfile.Glob(search_path):
_, word = os.path.split(os.path.dirname(wav_path))
word = word.lower()
# Treat the '_background_noise_' folder as a special case, since we expect
# it to contain long audio samples we mix in to improve training.
if word == BACKGROUND_NOISE_DIR_NAME:
continue
all_words[word] = True
set_index = which_set(wav_path, validation_percentage, testing_percentage)
# If it's a known class, store its detail, otherwise add it to the list
# we'll use to train the unknown label.
if word in wanted_words_index:
self.data_index[set_index].append({'label': word, 'file': wav_path})
else:
unknown_index[set_index].append({'label': word, 'file': wav_path})
if not all_words:
raise Exception('No .wavs found at ' + search_path)
for index, wanted_word in enumerate(wanted_words):
if wanted_word not in all_words:
raise Exception('Expected to find ' + wanted_word +
' in labels but only found ' +
', '.join(all_words.keys()))
# We need an arbitrary file to load as the input for the silence samples.
# It's multiplied by zero later, so the content doesn't matter.
silence_wav_path = self.data_index['training'][0]['file']
for set_index in ['validation', 'testing', 'training']:
set_size = len(self.data_index[set_index])
silence_size = int(math.ceil(set_size * silence_percentage / 100))
for _ in range(silence_size):
self.data_index[set_index].append({
'label': SILENCE_LABEL,
'file': silence_wav_path
})
# Pick some unknowns to add to each partition of the data set.
random.shuffle(unknown_index[set_index])
unknown_size = int(math.ceil(set_size * unknown_percentage / 100))
self.data_index[set_index].extend(unknown_index[set_index][:unknown_size])
# Make sure the ordering is random.
for set_index in ['validation', 'testing', 'training']:
random.shuffle(self.data_index[set_index])
# Prepare the rest of the result data structure.
self.words_list = prepare_words_list(wanted_words)
self.word_to_index = {}
for word in all_words:
if word in wanted_words_index:
self.word_to_index[word] = wanted_words_index[word]
else:
self.word_to_index[word] = UNKNOWN_WORD_INDEX
self.word_to_index[SILENCE_LABEL] = SILENCE_INDEX
def prepare_background_data(self):
"""Searches a folder for background noise audio, and loads it into memory.
It's expected that the background audio samples will be in a subdirectory
named '_background_noise_' inside the 'data_dir' folder, as .wavs that match
the sample rate of the training data, but can be much longer in duration.
If the '_background_noise_' folder doesn't exist at all, this isn't an
error, it's just taken to mean that no background noise augmentation should
be used. If the folder does exist, but it's empty, that's treated as an
error.
Returns:
List of raw PCM-encoded audio samples of background noise.
Raises:
Exception: If files aren't found in the folder.
"""
self.background_data = []
background_dir = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME)
if not gfile.Exists(background_dir):
return self.background_data
search_path = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME,
'*.wav')
for wav_path in gfile.Glob(search_path):
wav_loader = io_ops.read_file(tf.constant(wav_path, dtype=tf.string))
wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)
wav_data = wav_decoder.audio.numpy().flatten()
self.background_data.append(wav_data)
if not self.background_data:
raise Exception('No background wav files were found in ' + search_path)
#def prepare_processing_graph(self, model_settings, summaries_dir):
def get_output_audio(self, model_settings):
"""Builds a TensorFlow graph to apply the input distortions.
Creates a graph that loads a WAVE file, decodes it, scales the volume,
shifts it in time, adds in background noise, calculates a spectrogram, and
then builds an MFCC fingerprint from that.
This must be called with an active TensorFlow session running, and it
creates multiple placeholder inputs, and one output:
- wav_filename_placeholder_: Filename of the WAV to load.
- foreground_volume_placeholder_: How loud the main clip should be.
- time_shift_padding_placeholder_: Where to pad the clip.
- time_shift_offset_placeholder_: How much to move the clip in time.
- background_data_placeholder_: PCM sample data for background noise.
- background_volume_placeholder_: Loudness of mixed-in background.
- output_: Output 2D fingerprint of processed audio.
Args:
model_settings: Information about the current model being trained.
summaries_dir: Path to save training summary information to.
Raises:
ValueError: If the preprocessing mode isn't recognized.
Exception: If the preprocessor wasn't compiled in.
"""
#with tf.compat.v1.get_default_graph().name_scope('data'):
desired_samples = model_settings['desired_samples']
#self.wav_filename_placeholder_ = tf.compat.v1.placeholder(
# tf.string, [], name='wav_filename')
wav_loader = io_ops.read_file(self.wav_filename_placeholder_)
wav_decoder = tf.audio.decode_wav(
wav_loader, desired_channels=1, desired_samples=desired_samples)
# Allow the audio sample's volume to be adjusted.
#self.foreground_volume_placeholder_ = tf.compat.v1.placeholder(
# tf.float32, [], name='foreground_volume')
scaled_foreground = tf.multiply(wav_decoder.audio,
self.foreground_volume_placeholder_)
# Shift the sample's start position, and pad any gaps with zeros.
#self.time_shift_padding_placeholder_ = tf.compat.v1.placeholder(
# tf.int32, [2, 2], name='time_shift_padding')
#self.time_shift_offset_placeholder_ = tf.compat.v1.placeholder(
# tf.int32, [2], name='time_shift_offset')
padded_foreground = tf.pad(
tensor=scaled_foreground,
paddings=self.time_shift_padding_placeholder_,
mode='CONSTANT')
sliced_foreground = tf.slice(padded_foreground,
self.time_shift_offset_placeholder_,
[desired_samples, -1])
# Mix in background noise.
#self.background_data_placeholder_ = tf.compat.v1.placeholder(
# tf.float32, [desired_samples, 1], name='background_data')
#self.background_volume_placeholder_ = tf.compat.v1.placeholder(
# tf.float32, [], name='background_volume')
background_mul = tf.multiply(self.background_data_placeholder_,
self.background_volume_placeholder_)
background_add = tf.add(background_mul, sliced_foreground)
background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)
# Run the spectrogram and MFCC ops to get a 2D 'fingerprint' of the audio.
spectrogram = audio_ops.audio_spectrogram(
background_clamp,
window_size=model_settings['window_size_samples'],
stride=model_settings['window_stride_samples'],
magnitude_squared=True)
#tf.compat.v1.summary.image(
# 'spectrogram', tf.expand_dims(spectrogram, -1), max_outputs=1)
# The number of buckets in each FFT row in the spectrogram will depend on
# how many input samples there are in each window. This can be quite
# large, with a 160 sample window producing 127 buckets for example. We
# don't need this level of detail for classification, so we often want to
# shrink them down to produce a smaller result. That's what this section
# implements. One method is to use average pooling to merge adjacent
# buckets, but a more sophisticated approach is to apply the MFCC
# algorithm to shrink the representation.
if model_settings['preprocess'] == 'average':
self.output_ = tf.nn.pool(
input=tf.expand_dims(spectrogram, -1),
window_shape=[1, model_settings['average_window_width']],
strides=[1, model_settings['average_window_width']],
pooling_type='AVG',
padding='SAME')
tf.compat.v1.summary.image('shrunk_spectrogram',
self.output_,
max_outputs=1)
elif model_settings['preprocess'] == 'mfcc':
self.output_ = audio_ops.mfcc(
spectrogram,
wav_decoder.sample_rate,
dct_coefficient_count=model_settings['fingerprint_width'])
#tf.compat.v1.summary.image(
# 'mfcc', tf.expand_dims(self.output_, -1), max_outputs=1)
elif model_settings['preprocess'] == 'micro':
if not frontend_op:
raise Exception(
'Micro frontend op is currently not available when running'
' TensorFlow directly from Python, you need to build and run'
' through Bazel')
sample_rate = model_settings['sample_rate']
window_size_ms = (model_settings['window_size_samples'] *
1000) / sample_rate
window_step_ms = (model_settings['window_stride_samples'] *
1000) / sample_rate
int16_input = tf.cast(tf.multiply(background_clamp, 32768), tf.int16)
micro_frontend = frontend_op.audio_microfrontend(
int16_input,
sample_rate=sample_rate,
window_size=window_size_ms,
window_step=window_step_ms,
num_channels=model_settings['fingerprint_width'],
out_scale=1,
out_type=tf.float32)
self.output_ = tf.multiply(micro_frontend, (10.0 / 256.0))
#tf.compat.v1.summary.image(
# 'micro',
# tf.expand_dims(tf.expand_dims(self.output_, -1), 0),
# max_outputs=1)
else:
raise ValueError('Unknown preprocess mode "%s" (should be "mfcc", '
' "average", or "micro")' %
(model_settings['preprocess']))
## Merge all the summaries and write them out to /tmp/retrain_logs (by
## default)
#self.merged_summaries_ = tf.compat.v1.summary.merge_all(scope='data')
#if summaries_dir:
# self.summary_writer_ = tf.compat.v1.summary.FileWriter(
# summaries_dir + '/data', tf.compat.v1.get_default_graph())
def set_size(self, mode):
"""Calculates the number of samples in the dataset partition.
Args:
mode: Which partition, must be 'training', 'validation', or 'testing'.
Returns:
Number of samples in the partition.
"""
return len(self.data_index[mode])
def get_data(self, how_many, offset, model_settings, background_frequency,
background_volume_range, time_shift, mode):
"""Gather samples from the data set, applying transformations as needed.
When the mode is 'training', a random selection of samples will be returned,
otherwise the first N clips in the partition will be used. This ensures that
validation always uses the same samples, reducing noise in the metrics.
Args:
how_many: Desired number of samples to return. -1 means the entire
contents of this partition.
offset: Where to start when fetching deterministically.
model_settings: Information about the current model being trained.
background_frequency: How many clips will have background noise, 0.0 to
1.0.
background_volume_range: How loud the background noise will be.
time_shift: How much to randomly shift the clips by in time.
mode: Which partition to use, must be 'training', 'validation', or
'testing'.
sess: TensorFlow session that was active when processor was created.
Returns:
List of sample data for the transformed samples, and list of label indexes
Raises:
ValueError: If background samples are too short.
"""
# Pick one of the partitions to choose samples from.
candidates = self.data_index[mode]
if how_many == -1:
sample_count = len(candidates)
else:
sample_count = max(0, min(how_many, len(candidates) - offset))
# Data and labels will be populated and returned.
data = np.zeros((sample_count, model_settings['fingerprint_size']))
labels = np.zeros(sample_count)
desired_samples = model_settings['desired_samples']
use_background = self.background_data and (mode == 'training')
pick_deterministically = (mode != 'training')
# Use the processing graph we created earlier to repeatedly to generate the
# final output sample data we'll use in training.
for i in range(offset, offset + sample_count):
# Pick which audio sample to use.
if how_many == -1 or pick_deterministically:
sample_index = i
else:
sample_index = np.random.randint(len(candidates))
sample = candidates[sample_index]
# If we're time shifting, set up the offset for this sample.
if time_shift > 0:
time_shift_amount = np.random.randint(-time_shift, time_shift)
else:
time_shift_amount = 0
if time_shift_amount > 0:
time_shift_padding = [[time_shift_amount, 0], [0, 0]]
time_shift_offset = [0, 0]
else:
time_shift_padding = [[0, -time_shift_amount], [0, 0]]
time_shift_offset = [-time_shift_amount, 0]
#input_dict = {
# self.wav_filename_placeholder_: sample['file'],
# self.time_shift_padding_placeholder_: time_shift_padding,
# self.time_shift_offset_placeholder_: time_shift_offset,
#}
self.wav_filename_placeholder_ = tf.constant(sample['file'], tf.string)
self.time_shift_padding_placeholder_ = tf.constant(time_shift_padding,
tf.int32)
self.time_shift_offset_placeholder_ = tf.constant(time_shift_offset,
tf.int32)
# Choose a section of background noise to mix in.
if use_background or sample['label'] == SILENCE_LABEL:
background_index = np.random.randint(len(self.background_data))
background_samples = self.background_data[background_index]
if len(background_samples) <= model_settings['desired_samples']:
raise ValueError(
'Background sample is too short! Need more than %d'
' samples but only %d were found' %
(model_settings['desired_samples'], len(background_samples)))
background_offset = np.random.randint(
0, len(background_samples) - model_settings['desired_samples'])
background_clipped = background_samples[background_offset:(
background_offset + desired_samples)]
background_reshaped = background_clipped.reshape([desired_samples, 1])
if sample['label'] == SILENCE_LABEL:
background_volume = np.random.uniform(0, 1)
elif np.random.uniform(0, 1) < background_frequency:
background_volume = np.random.uniform(0, background_volume_range)
else:
background_volume = 0
else:
background_reshaped = np.zeros([desired_samples, 1])
background_volume = 0
#input_dict[self.background_data_placeholder_] = background_reshaped
#input_dict[self.background_volume_placeholder_] = background_volume
self.background_data_placeholder_ = tf.constant(background_reshaped,
tf.float32)
self.background_volume_placeholder_ = tf.constant(background_volume,
tf.float32)
# If we want silence, mute out the main sample but leave the background.
if sample['label'] == SILENCE_LABEL:
#input_dict[self.foreground_volume_placeholder_] = 0
self.foreground_volume_placeholder_ = tf.constant(0, tf.float32)
else:
#input_dict[self.foreground_volume_placeholder_] = 1
self.foreground_volume_placeholder_ = tf.constant(1, tf.float32)
# Run the graph to produce the output audio.
#summary, data_tensor = sess.run(
# [self.merged_summaries_, self.output_], feed_dict=input_dict)
#self.summary_writer_.add_summary(summary)
self.get_output_audio(model_settings)
data_tensor = self.output_
data[i - offset, :] = data_tensor.numpy().flatten()
label_index = self.word_to_index[sample['label']]
labels[i - offset] = label_index
return data, labels
def prepare_model_settings(label_count, sample_rate, clip_duration_ms,
window_size_ms, window_stride_ms, feature_bin_count,
preprocess):
"""Calculates common settings needed for all models.
Args:
label_count: How many classes are to be recognized.
sample_rate: Number of audio samples per second.
clip_duration_ms: Length of each audio clip to be analyzed.
window_size_ms: Duration of frequency analysis window.
window_stride_ms: How far to move in time between frequency windows.
feature_bin_count: Number of frequency bins to use for analysis.
preprocess: How the spectrogram is processed to produce features.
Returns:
Dictionary containing common settings.
Raises:
ValueError: If the preprocessing mode isn't recognized.
"""
desired_samples = int(sample_rate * clip_duration_ms / 1000)
window_size_samples = int(sample_rate * window_size_ms / 1000)
window_stride_samples = int(sample_rate * window_stride_ms / 1000)
length_minus_window = (desired_samples - window_size_samples)
if length_minus_window < 0:
spectrogram_length = 0
else:
spectrogram_length = 1 + int(length_minus_window / window_stride_samples)
if preprocess == 'average':
fft_bin_count = 1 + (_next_power_of_two(window_size_samples) / 2)
average_window_width = int(math.floor(fft_bin_count / feature_bin_count))
fingerprint_width = int(math.ceil(fft_bin_count / average_window_width))
elif preprocess == 'mfcc':
average_window_width = -1
fingerprint_width = feature_bin_count
elif preprocess == 'micro':
average_window_width = -1
fingerprint_width = feature_bin_count
else:
raise ValueError('Unknown preprocess mode "%s" (should be "mfcc",'
' "average", or "micro")' % (preprocess))
fingerprint_size = fingerprint_width * spectrogram_length
return {
'desired_samples': desired_samples,
'window_size_samples': window_size_samples,
'window_stride_samples': window_stride_samples,
'spectrogram_length': spectrogram_length,
'fingerprint_width': fingerprint_width,
'fingerprint_size': fingerprint_size,
'label_count': label_count,
'sample_rate': sample_rate,
'preprocess': preprocess,
'average_window_width': average_window_width,
}
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_url',
type=str,
# pylint: disable=line-too-long
default='https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.02.tar.gz',
# pylint: enable=line-too-long
help='Location of speech training data archive on the web.')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/speech_dataset/',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--background_volume',
type=float,
default=0.1,
help="""\
How loud the background noise should be, between 0 and 1.
""")
parser.add_argument(
'--background_frequency',
type=float,
default=0.8,
help="""\
How many of the training samples have background noise mixed in.
""")
parser.add_argument(
'--silence_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be silence.
""")
parser.add_argument(
'--unknown_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be unknown words.
""")
parser.add_argument(
'--time_shift_ms',
type=float,
default=100.0,
help="""\
Range to randomly shift the training audio by in time.
""")
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a test set.')
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a validation set.')
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is.',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How far to move in time between spectrogram timeslices.',
)
parser.add_argument(
'--feature_bin_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',
)
parser.add_argument(
'--how_many_training_steps',
type=str,
default='15000,3000',
help='How many training loops to run',)
parser.add_argument(
'--eval_step_interval',
type=int,
default=400,
help='How often to evaluate the training results.')
parser.add_argument(
'--learning_rate',
type=str,
default='0.001,0.0001',
help='How large a learning rate to use when training.')
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='How many items to train with at once',)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.')
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/speech_commands_train',
help='Directory to write event logs and checkpoint.')
parser.add_argument(
'--save_step_interval',
type=int,
default=100,
help='Save model checkpoint every save_steps.')
parser.add_argument(
'--start_checkpoint',
type=str,
default='',
help='If specified, restore this pretrained model before any training.')
parser.add_argument(
'--model_architecture',
type=str,
default='conv',
help='What model architecture to use')
parser.add_argument(
'--check_nans',
type=bool,
default=False,
help='Whether to check for invalid numbers during processing')
parser.add_argument(
'--quantize',
type=bool,
default=False,
help='Whether to train the model for eight-bit deployment')
parser.add_argument(
'--preprocess',
type=str,
default='mfcc',
help='Spectrogram processing mode. Can be "mfcc", "average", or "micro"')
# Function used to parse --verbosity argument
def verbosity_arg(value):
"""Parses verbosity argument.
Args:
value: A member of tf.logging.
Raises:
ArgumentTypeError: Not an expected value.
"""
value = value.upper()
if value == 'DEBUG':
return tf.compat.v1.logging.DEBUG
elif value == 'INFO':
return tf.compat.v1.logging.INFO
elif value == 'WARN':
return tf.compat.v1.logging.WARN
elif value == 'ERROR':
return tf.compat.v1.logging.ERROR
elif value == 'FATAL':
return tf.compat.v1.logging.FATAL
else:
raise argparse.ArgumentTypeError('Not an expected value')
parser.add_argument(
'--verbosity',
type=verbosity_arg,
default=tf.compat.v1.logging.INFO,
help='Log verbosity. Can be "DEBUG", "INFO", "WARN", "ERROR", or "FATAL"')
parser.add_argument(
'--optimizer',
type=str,
default='gradient_descent',
help='Optimizer (gradient_descent or momentum)')
FLAGS, unparsed = parser.parse_known_args()
#tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
model_settings = prepare_model_settings(
len(prepare_words_list(FLAGS.wanted_words.split(','))),
FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
FLAGS.window_stride_ms, FLAGS.feature_bin_count, FLAGS.preprocess)
audio_processor = AudioProcessor(
FLAGS.data_url, FLAGS.data_dir,
FLAGS.silence_percentage, FLAGS.unknown_percentage,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
FLAGS.testing_percentage, model_settings, FLAGS.summaries_dir)
time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000)
train_fingerprints, train_ground_truth = audio_processor.get_data(
FLAGS.batch_size, 0, model_settings, FLAGS.background_frequency,
FLAGS.background_volume, time_shift_samples, 'training')
validation_fingerprints, validation_ground_truth = \
audio_processor.get_data(FLAGS.batch_size, 0, model_settings, 0.0,
0.0, 0, 'validation')
|
the-stack_106_20442
|
#!/usr/bin/env python3
# Created: 06.2020
# Copyright (c) 2020, Matthew Broadway
# License: MIT License
import argparse
import signal
import sys
from functools import partial
from typing import Optional
from PyQt5 import QtWidgets as qw, QtCore as qc, QtGui as qg
import ezdxf
from ezdxf.addons.drawing import Frontend, RenderContext
from ezdxf.addons.drawing.pyqt_backend import _get_x_scale, PyQtBackend, CorrespondingDXFEntity, \
CorrespondingDXFEntityStack
from ezdxf.drawing import Drawing
class CADGraphicsView(qw.QGraphicsView):
def __init__(self, view_buffer: float = 0.2):
super().__init__()
self._zoom = 1
self._default_zoom = 1
self._zoom_limits = (0.5, 100)
self._view_buffer = view_buffer
self.setTransformationAnchor(qw.QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(qw.QGraphicsView.AnchorUnderMouse)
self.setVerticalScrollBarPolicy(qc.Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(qc.Qt.ScrollBarAlwaysOff)
self.setDragMode(qw.QGraphicsView.ScrollHandDrag)
self.setFrameShape(qw.QFrame.NoFrame)
self.setRenderHints(qg.QPainter.Antialiasing | qg.QPainter.TextAntialiasing | qg.QPainter.SmoothPixmapTransform)
def clear(self):
pass
def fit_to_scene(self):
r = self.sceneRect()
bx, by = r.width() * self._view_buffer / 2, r.height() * self._view_buffer / 2
self.fitInView(self.sceneRect().adjusted(-bx, -by, bx, by), qc.Qt.KeepAspectRatio)
self._default_zoom = _get_x_scale(self.transform())
self._zoom = 1
def _get_zoom_amount(self) -> float:
return _get_x_scale(self.transform()) / self._default_zoom
def wheelEvent(self, event: qg.QWheelEvent) -> None:
# dividing by 120 gets number of notches on a typical scroll wheel. See QWheelEvent documentation
delta_notches = event.angleDelta().y() / 120
zoom_per_scroll_notch = 0.2
factor = 1 + zoom_per_scroll_notch * delta_notches
resulting_zoom = self._zoom * factor
if resulting_zoom < self._zoom_limits[0]:
factor = self._zoom_limits[0] / self._zoom
elif resulting_zoom > self._zoom_limits[1]:
factor = self._zoom_limits[1] / self._zoom
self.scale(factor, factor)
self._zoom *= factor
class CADGraphicsViewWithOverlay(CADGraphicsView):
element_selected = qc.pyqtSignal(object, qc.QPointF)
def __init__(self):
super().__init__()
self._current_item: Optional[qw.QGraphicsItem] = None
def clear(self):
super().clear()
self._current_item = None
def drawForeground(self, painter: qg.QPainter, rect: qc.QRectF) -> None:
if self._current_item is not None:
r = self._current_item.boundingRect()
r = self._current_item.sceneTransform().mapRect(r)
painter.fillRect(r, qg.QColor(0, 255, 0, 100))
def mouseMoveEvent(self, event: qg.QMouseEvent) -> None:
pos = self.mapToScene(event.pos())
self._current_item = self.scene().itemAt(pos, qg.QTransform())
self.element_selected.emit(self._current_item, pos)
self.scene().invalidate(self.sceneRect(), qw.QGraphicsScene.ForegroundLayer)
super().mouseMoveEvent(event)
class CadViewer(qw.QMainWindow):
def __init__(self):
super().__init__()
self.doc = None
self._render_context = None
self._visible_layers = None
self._current_layout = None
self.scene = qw.QGraphicsScene()
self.view = CADGraphicsViewWithOverlay()
self.view.setScene(self.scene)
self.view.scale(1, -1) # so that +y is up
self.view.element_selected.connect(self._on_element_selected)
self.renderer = PyQtBackend(self.scene)
menu = self.menuBar()
select_doc_action = qw.QAction('Select Document', self)
select_doc_action.triggered.connect(self._select_doc)
menu.addAction(select_doc_action)
self.select_layout_menu = menu.addMenu('Select Layout')
toggle_sidebar_action = qw.QAction('Toggle Sidebar', self)
toggle_sidebar_action.triggered.connect(self._toggle_sidebar)
menu.addAction(toggle_sidebar_action)
toggle_join_polylines_action = qw.QAction('Toggle Join Polylines', self)
toggle_join_polylines_action.triggered.connect(self._toggle_join_polylines)
menu.addAction(toggle_join_polylines_action)
self.sidebar = qw.QSplitter(qc.Qt.Vertical)
self.layers = qw.QListWidget()
self.layers.setStyleSheet('font-size: 12pt')
self.layers.itemChanged.connect(self._layers_updated)
self.sidebar.addWidget(self.layers)
self.info = qw.QPlainTextEdit()
self.info.setReadOnly(True)
self.sidebar.addWidget(self.info)
container = qw.QSplitter()
self.setCentralWidget(container)
container.addWidget(self.view)
container.addWidget(self.sidebar)
container.setCollapsible(0, False)
container.setCollapsible(1, True)
w = container.width()
container.setSizes([int(3 * w / 4), int(w / 4)])
self.setWindowTitle('CAD Viewer')
self.resize(1600, 900)
self.show()
def _select_doc(self):
path, _ = qw.QFileDialog.getOpenFileName(self, caption='Select CAD Document', filter='DXF Documents (*.dxf)')
if path:
self.set_document(ezdxf.readfile(path))
def set_document(self, document: Drawing):
self.doc = document
self._render_context = RenderContext(document)
self._visible_layers = None
self._current_layout = None
self._populate_layouts()
self._populate_layer_list()
self.draw_layout('Model')
def _populate_layer_list(self):
self.layers.blockSignals(True)
self.layers.clear()
for layer in self._render_context.layers.values():
name = layer.layer
item = qw.QListWidgetItem(name)
item.setCheckState(qc.Qt.Checked)
item.setBackground(qg.QColor(layer.color))
self.layers.addItem(item)
self.layers.blockSignals(False)
def _populate_layouts(self):
self.select_layout_menu.clear()
for layout_name in self.doc.layout_names_in_taborder():
action = qw.QAction(layout_name, self)
action.triggered.connect(partial(self.draw_layout, layout_name))
self.select_layout_menu.addAction(action)
def draw_layout(self, layout_name: str):
print(f'drawing {layout_name}')
self._current_layout = layout_name
self.renderer.clear()
self.view.clear()
layout = self.doc.layout(layout_name)
self._update_render_context(layout)
Frontend(self._render_context, self.renderer).draw_layout(layout)
self.view.fit_to_scene()
def _update_render_context(self, layout):
assert self._render_context
self._render_context.set_current_layout(layout)
# Direct modification of RenderContext.layers would be more flexible, but would also expose the internals.
if self._visible_layers is not None:
self._render_context.set_layers_state(self._visible_layers, state=True)
def resizeEvent(self, event: qg.QResizeEvent) -> None:
self.view.fit_to_scene()
@qc.pyqtSlot(qw.QListWidgetItem)
def _layers_updated(self, _item: qw.QListWidgetItem):
self._visible_layers = set()
for i in range(self.layers.count()):
layer = self.layers.item(i)
if layer.checkState() == qc.Qt.Checked:
self._visible_layers.add(layer.text())
self.draw_layout(self._current_layout)
@qc.pyqtSlot()
def _toggle_sidebar(self):
self.sidebar.setHidden(not self.sidebar.isHidden())
@qc.pyqtSlot()
def _toggle_join_polylines(self):
self.renderer.draw_individual_polyline_elements = not self.renderer.draw_individual_polyline_elements
self.draw_layout(self._current_layout)
@qc.pyqtSlot(object, qc.QPointF)
def _on_element_selected(self, element: Optional[qw.QGraphicsItem], mouse_pos: qc.QPointF):
text = f'mouse position: {mouse_pos.x():.4f}, {mouse_pos.y():.4f}\n'
if element is None:
text += 'No element selected'
else:
dxf_entity = element.data(CorrespondingDXFEntity)
if dxf_entity is None:
text += 'No data'
else:
text += f'Current Entity: {dxf_entity}\nLayer: {dxf_entity.dxf.layer}\n\nDXF Attributes:\n'
for key, value in dxf_entity.dxf.all_existing_dxf_attribs().items():
text += f'- {key}: {value}\n'
dxf_entity_stack = element.data(CorrespondingDXFEntityStack)
if dxf_entity_stack:
text += '\nParents:\n'
for entity in reversed(dxf_entity_stack):
text += f'- {entity}\n'
self.info.setPlainText(text)
def _main():
parser = argparse.ArgumentParser()
parser.add_argument('--cad_file')
parser.add_argument('--layout', default='Model')
args = parser.parse_args()
signal.signal(signal.SIGINT, signal.SIG_DFL) # handle Ctrl+C properly
app = qw.QApplication(sys.argv)
v = CadViewer()
if args.cad_file is not None:
v.set_document(ezdxf.readfile(args.cad_file))
try:
v.draw_layout(args.layout)
except KeyError:
print(f'could not find layout "{args.layout}". Valid layouts: {[l.name for l in v.doc.layouts]}')
sys.exit(1)
sys.exit(app.exec_())
if __name__ == '__main__':
_main()
|
the-stack_106_20443
|
#!/usr/bin/env python
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for rqd.rqnimby."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import mock
import pyfakefs.fake_filesystem_unittest
import rqd.rqcore
import rqd.rqmachine
import rqd.rqnimby
@mock.patch('rqd.rqutil.permissionsHigh', new=mock.MagicMock())
@mock.patch('rqd.rqutil.permissionsLow', new=mock.MagicMock())
class RqNimbyTests(pyfakefs.fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
self.inputDevice = self.fs.create_file('/dev/input/event0', contents='mouse event')
self.rqMachine = mock.MagicMock(spec=rqd.rqmachine.Machine)
self.rqCore = mock.MagicMock(spec=rqd.rqcore.RqCore)
self.rqCore.machine = self.rqMachine
self.nimby = rqd.rqnimby.NimbyFactory.getNimby(self.rqCore)
self.nimby.daemon = True
@mock.patch.object(rqd.rqnimby.NimbySelect, 'unlockedIdle')
def test_initialState(self, unlockedIdleMock):
self.nimby.daemon = True
self.nimby.start()
self.nimby.join()
# Initial state should be "unlocked and idle".
unlockedIdleMock.assert_called()
self.nimby.stop()
@mock.patch('select.select', new=mock.MagicMock(return_value=[['a new mouse event'], [], []]))
@mock.patch('threading.Timer')
def test_unlockedIdle(self, timerMock):
self.nimby.active = True
self.nimby.results = [[]]
self.rqCore.machine.isNimbySafeToRunJobs.return_value = True
self.nimby.unlockedIdle()
# Given a mouse event, Nimby should transition to "locked and in use".
timerMock.assert_called_with(mock.ANY, self.nimby.lockedInUse)
timerMock.return_value.start.assert_called()
@mock.patch('select.select', new=mock.MagicMock(return_value=[[], [], []]))
@mock.patch.object(rqd.rqnimby.NimbySelect, 'unlockedIdle')
@mock.patch('threading.Timer')
def test_lockedIdleWhenIdle(self, timerMock, unlockedIdleMock):
self.nimby.active = True
self.nimby.results = [[]]
self.rqCore.machine.isNimbySafeToRunJobs.return_value = True
self.nimby.lockedIdle()
# Given no events, Nimby should transition to "unlocked and idle".
unlockedIdleMock.assert_called()
@mock.patch('select.select', new=mock.MagicMock(return_value=[['a new mouse event'], [], []]))
@mock.patch('threading.Timer')
def test_lockedIdleWhenInUse(self, timerMock):
self.nimby.active = True
self.nimby.results = [[]]
self.rqCore.machine.isNimbySafeToRunJobs.return_value = True
self.nimby.lockedIdle()
# Given a mouse event, Nimby should transition to "locked and in use".
timerMock.assert_called_with(mock.ANY, self.nimby.lockedInUse)
timerMock.return_value.start.assert_called()
@mock.patch('select.select', new=mock.MagicMock(return_value=[[], [], []]))
@mock.patch.object(rqd.rqnimby.NimbySelect, 'lockedIdle')
@mock.patch('threading.Timer')
def test_lockedInUseWhenIdle(self, timerMock, lockedIdleMock):
self.nimby.active = True
self.nimby.results = [[]]
self.rqCore.machine.isNimbySafeToRunJobs.return_value = True
self.nimby.lockedInUse()
# Given no events, Nimby should transition to "locked and idle".
lockedIdleMock.assert_called()
@mock.patch('select.select', new=mock.MagicMock(return_value=[['a new mouse event'], [], []]))
@mock.patch('threading.Timer')
def test_lockedInUseWhenInUse(self, timerMock):
self.nimby.active = True
self.nimby.results = [[]]
self.rqCore.machine.isNimbySafeToRunJobs.return_value = True
self.nimby.lockedInUse()
# Given a mouse event, Nimby should stay in state "locked and in use".
timerMock.assert_called_with(mock.ANY, self.nimby.lockedInUse)
timerMock.return_value.start.assert_called()
def test_lockNimby(self):
self.nimby.active = True
self.nimby.locked = False
self.nimby.lockNimby()
self.assertTrue(self.nimby.locked)
self.rqCore.onNimbyLock.assert_called()
def test_unlockNimby(self):
self.nimby.locked = True
self.nimby.unlockNimby()
self.assertFalse(self.nimby.locked)
self.rqCore.onNimbyUnlock.assert_called()
if __name__ == '__main__':
unittest.main()
|
the-stack_106_20445
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Create a sibling in a RIA store"""
__docformat__ = 'restructuredtext'
import logging
from datalad.cmd import WitlessRunner as Runner
from datalad.interface.common_opts import (
recursion_flag,
recursion_limit
)
from datalad.interface.base import (
build_doc,
Interface,
)
from datalad.interface.results import (
get_status_dict,
)
from datalad.interface.utils import eval_results
from datalad.support.annexrepo import AnnexRepo
from datalad.support.param import Parameter
from datalad.support.constraints import (
EnsureBool,
EnsureChoice,
EnsureNone,
EnsureStr,
)
from datalad.distribution.dataset import (
datasetmethod,
EnsureDataset,
require_dataset,
)
from datalad.distributed.ora_remote import (
LocalIO,
RIARemoteError,
RemoteCommandFailedError,
SSHRemoteIO,
)
from datalad.utils import (
Path,
quote_cmdlinearg,
)
from datalad.support.exceptions import (
CommandError
)
from datalad.support.gitrepo import (
GitRepo
)
from datalad.core.distributed.clone import (
decode_source_spec
)
from datalad.log import log_progress
from datalad.customremotes.ria_utils import (
get_layout_locations,
verify_ria_url,
create_store,
create_ds_in_store
)
lgr = logging.getLogger('datalad.distributed.create_sibling_ria')
@build_doc
class CreateSiblingRia(Interface):
"""Creates a sibling to a dataset in a RIA store
Communication with a dataset in a RIA store is implemented via two
siblings. A regular Git remote (repository sibling) and a git-annex
special remote for data transfer (storage sibling) -- with the former
having a publication dependency on the latter. By default, the name of the
storage sibling is derived from the repository sibling's name by appending
"-storage".
The store's base path is expected to not exist, be an empty directory,
or a valid RIA store.
RIA store layout
~~~~~~~~~~~~~~~~
A RIA store is a directory tree with a dedicated subdirectory for each
dataset in the store. The subdirectory name is constructed from the
DataLad dataset ID, e.g. '124/68afe-59ec-11ea-93d7-f0d5bf7b5561', where
the first three characters of the ID are used for an intermediate
subdirectory in order to mitigate files system limitations for stores
containing a large number of datasets.
Each dataset subdirectory contains a standard bare Git repository for
the dataset.
In addition, a subdirectory 'annex' hold a standard Git-annex object
store. However, instead of using the 'dirhashlower' naming scheme for
the object directories, like Git-annex would do, a 'dirhashmixed'
layout is used -- the same as for non-bare Git repositories or regular
DataLad datasets.
Optionally, there can be a further subdirectory 'archives' with
(compressed) 7z archives of annex objects. The storage remote is able to
pull annex objects from these archives, if it cannot find in the regular
annex object store. This feature can be useful for storing large
collections of rarely changing data on systems that limit the number of
files that can be stored.
Each dataset directory also contains a 'ria-layout-version' file that
identifies the data organization (as, for example, described above).
Lastly, there is a global 'ria-layout-version' file at the store's
base path that identifies where dataset subdirectories themselves are
located. At present, this file must contain a single line stating the
version (currently "1"). This line MUST end with a newline character.
It is possible to define an alias for an individual dataset in a store by
placing a symlink to the dataset location into an 'alias/' directory
in the root of the store. This enables dataset access via URLs of format:
'ria+<protocol>://<storelocation>#~<aliasname>'.
Error logging
~~~~~~~~~~~~~
To enable error logging at the remote end, append a pipe symbol and an "l"
to the version number in ria-layout-version (like so '1|l\\n').
Error logging will create files in an "error_log" directory whenever the
git-annex special remote (storage sibling) raises an exception, storing the
Python traceback of it. The logfiles are named according to the scheme
'<dataset id>.<annex uuid of the remote>.log' showing "who" ran into this
issue with which dataset. Because logging can potentially leak personal
data (like local file paths for example), it can be disabled client-side
by setting the configuration variable
"annex.ora-remote.<storage-sibling-name>.ignore-remote-config".
"""
# TODO: description?
_params_ = dict(
dataset=Parameter(
args=("-d", "--dataset"),
doc="""specify the dataset to process. If
no dataset is given, an attempt is made to identify the dataset
based on the current working directory""",
constraints=EnsureDataset() | EnsureNone()),
url=Parameter(
args=("url",),
metavar="ria+<ssh|file|http(s)>://<host>[/path]",
doc="""URL identifying the target RIA store and access protocol. If
``push_url||--push-url`` is given in addition, this is
used for read access only. Otherwise it will be used for write
access too and to create the repository sibling in the RIA store.
Note, that HTTP(S) currently is valid for consumption only thus
requiring to provide ``push_url||--push-url``.
""",
constraints=EnsureStr() | EnsureNone()),
push_url=Parameter(
args=("--push-url",),
metavar="ria+<ssh|file>://<host>[/path]",
doc="""URL identifying the target RIA store and access protocol for
write access to the storage sibling. If given this will also be used
for creation of the repository sibling in the RIA store.""",
constraints=EnsureStr() | EnsureNone()),
name=Parameter(
args=('-s', '--name',),
metavar='NAME',
doc="""Name of the sibling.
With `recursive`, the same name will be used to label all
the subdatasets' siblings.""",
constraints=EnsureStr() | EnsureNone(),
required=True),
storage_name=Parameter(
args=("--storage-name",),
metavar="NAME",
doc="""Name of the storage sibling (git-annex special remote).
Must not be identical to the sibling name. If not specified,
defaults to the sibling name plus '-storage' suffix. If only
a storage sibling is created, this setting is ignored, and
the primary sibling name is used.""",
constraints=EnsureStr() | EnsureNone()),
alias=Parameter(
args=('--alias',),
metavar='ALIAS',
doc="""Alias for the dataset in the RIA store.
Add the necessary symlink so that this dataset can be cloned from the RIA
store using the given ALIAS instead of its ID.
With `recursive=True`, only the top dataset will be aliased.""",
constraints=EnsureStr() | EnsureNone()),
post_update_hook=Parameter(
args=("--post-update-hook",),
doc="""Enable git's default post-update-hook for the created
sibling.""",
action="store_true"),
shared=Parameter(
args=("--shared",),
metavar='{false|true|umask|group|all|world|everybody|0xxx}',
doc="""If given, configures the permissions in the
RIA store for multi-users access.
Possible values for this option are identical to those of
`git init --shared` and are described in its documentation.""",
constraints=EnsureStr() | EnsureBool() | EnsureNone()),
group=Parameter(
args=("--group",),
metavar="GROUP",
doc="""Filesystem group for the repository. Specifying the group is
crucial when [CMD: --shared=group CMD][PY: shared="group" PY]""",
constraints=EnsureStr() | EnsureNone()),
storage_sibling=Parameter(
args=("--storage-sibling",),
dest='storage_sibling',
metavar='MODE',
constraints=EnsureChoice('only') | EnsureBool() | EnsureNone(),
doc="""By default, an ORA storage sibling and a Git repository
sibling are created ([CMD: on CMD][PY: True|'on' PY]).
Alternatively, creation of the storage sibling can be disabled
([CMD: off CMD][PY: False|'off' PY]), or a storage sibling
created only and no Git sibling
([CMD: only CMD][PY: 'only' PY]). In the latter mode, no Git
installation is required on the target host."""),
existing=Parameter(
args=("--existing",),
constraints=EnsureChoice(
'skip', 'error', 'reconfigure') | EnsureNone(),
metavar='MODE',
doc="""Action to perform, if a (storage) sibling is already
configured under the given name and/or a target already exists.
In this case, a dataset can be skipped ('skip'), an existing target
repository be forcefully re-initialized, and the sibling
(re-)configured ('reconfigure'), or the command be instructed to
fail ('error').""", ),
new_store_ok=Parameter(
args=("--new-store-ok",),
action='store_true',
doc="""When set, a new store will be created, if necessary. Otherwise, a sibling
will only be created if the url points to an existing RIA store.""",
),
recursive=recursion_flag,
recursion_limit=recursion_limit,
trust_level=Parameter(
args=("--trust-level",),
metavar="TRUST-LEVEL",
constraints=EnsureChoice(
'trust', 'semitrust', 'untrust') | EnsureNone(),
doc="""specify a trust level for the storage sibling. If not
specified, the default git-annex trust level is used. 'trust'
should be used with care (see the git-annex-trust man page).""",),
disable_storage__=Parameter(
args=("--no-storage-sibling",),
dest='disable_storage__',
doc="""This option is deprecated. Use '--storage-sibling off'
instead.""",
action="store_false"),
)
@staticmethod
@datasetmethod(name='create_sibling_ria')
@eval_results
def __call__(url,
name,
*, # note that `name` is required but not posarg in CLI
dataset=None,
storage_name=None,
alias=None,
post_update_hook=False,
shared=None,
group=None,
storage_sibling=True,
existing='error',
new_store_ok=False,
trust_level=None,
recursive=False,
recursion_limit=None,
disable_storage__=None,
push_url=None
):
if disable_storage__ is not None:
import warnings
warnings.warn("datalad-create-sibling-ria --no-storage-sibling "
"is deprecated, use --storage-sibling off instead.",
DeprecationWarning)
# recode to new setup
disable_storage__ = None
storage_sibling = False
if storage_sibling == 'only' and storage_name:
lgr.warning(
"Sibling name will be used for storage sibling in "
"storage-sibling-only mode, but a storage sibling name "
"was provided"
)
ds = require_dataset(
dataset, check_installed=True, purpose='create RIA sibling(s)')
res_kwargs = dict(
ds=ds,
action="create-sibling-ria",
logger=lgr,
)
# parse target URL
# Note: URL parsing is done twice ATM (for top-level ds). This can't be
# reduced to single instance, since rewriting url based on config could
# be different for subdatasets.
try:
ssh_host, base_path, rewritten_url = \
verify_ria_url(push_url if push_url else url, ds.config)
except ValueError as e:
yield get_status_dict(
status='error',
message=str(e),
**res_kwargs
)
return
if ds.repo.get_hexsha() is None or ds.id is None:
raise RuntimeError(
"Repository at {} is not a DataLad dataset, "
"run 'datalad create [--force]' first.".format(ds.path))
if not storage_sibling and storage_name:
lgr.warning(
"Storage sibling setup disabled, but a storage sibling name "
"was provided"
)
if storage_sibling and not storage_name:
storage_name = "{}-storage".format(name)
if storage_sibling and name == storage_name:
# leads to unresolvable, circular dependency with publish-depends
raise ValueError("sibling names must not be equal")
if not isinstance(url, str):
raise TypeError("url is not a string, but %s" % type(url))
# Query existing siblings upfront in order to fail early on
# existing=='error', since misconfiguration (particularly of special
# remotes) only to fail in a subdataset later on with that config, can
# be quite painful.
# TODO: messages - this is "create-sibling". Don't confuse existence of
# local remotes with existence of the actual remote sibling
# in wording
if existing == 'error':
# in recursive mode this check could take a substantial amount of
# time: employ a progress bar (or rather a counter, because we don't
# know the total in advance
pbar_id = 'check-siblings-{}'.format(id(ds))
log_progress(
lgr.info, pbar_id,
'Start checking pre-existing sibling configuration %s', ds,
label='Query siblings',
unit=' Siblings',
)
# even if we have to fail, let's report all conflicting siblings
# in subdatasets
failed = False
for r in ds.siblings(result_renderer='disabled',
recursive=recursive,
recursion_limit=recursion_limit):
log_progress(
lgr.info, pbar_id,
'Discovered sibling %s in dataset at %s',
r['name'], r['path'],
update=1,
increment=True)
if not r['type'] == 'sibling' or r['status'] != 'ok':
# this is an internal status query that has not consequence
# for the outside world. Be silent unless something useful
# can be said
#yield r
continue
if r['name'] == name:
res = get_status_dict(
status='error',
message="a sibling '{}' is already configured in "
"dataset {}".format(name, r['path']),
**res_kwargs,
)
failed = True
yield res
continue
if storage_name and r['name'] == storage_name:
res = get_status_dict(
status='error',
message="a sibling '{}' is already configured in "
"dataset {}".format(storage_name, r['path']),
**res_kwargs,
)
failed = True
yield res
continue
log_progress(
lgr.info, pbar_id,
'Finished checking pre-existing sibling configuration %s', ds,
)
if failed:
return
# TODO: - URL parsing + store creation needs to be RF'ed based on
# command abstractions
# - more generally consider store creation a dedicated command or
# option
io = SSHRemoteIO(ssh_host) if ssh_host else LocalIO()
try:
# determine the existence of a store by trying to read its layout.
# Because this raises a FileNotFound error if non-existent, we need
# to catch it
io.read_file(Path(base_path) / 'ria-layout-version')
except (FileNotFoundError, RIARemoteError, RemoteCommandFailedError) as e:
if not new_store_ok:
# we're instructed to only act in case of an existing RIA store
res = get_status_dict(
status='error',
message="No store found at '{}'. Forgot "
"--new-store-ok ?".format(
Path(base_path)),
**res_kwargs)
yield res
return
log_progress(
lgr.info, 'create-sibling-ria',
'Creating a new RIA store at %s', Path(base_path),
)
create_store(io,
Path(base_path),
'1')
yield from _create_sibling_ria(
ds,
url,
push_url,
name,
storage_sibling,
storage_name,
alias,
existing,
shared,
group,
post_update_hook,
trust_level,
res_kwargs)
if recursive:
# Note: subdatasets can be treated independently, so go full
# recursion when querying for them and _no_recursion with the
# actual call. Theoretically this can be parallelized.
for subds in ds.subdatasets(state='present',
recursive=True,
recursion_limit=recursion_limit,
result_xfm='datasets'):
yield from _create_sibling_ria(
subds,
url,
push_url,
name,
storage_sibling,
storage_name,
None, # subdatasets can't have the same alias as the parent
existing,
shared,
group,
post_update_hook,
trust_level,
res_kwargs)
def _create_sibling_ria(
ds,
url,
push_url,
name,
storage_sibling,
storage_name,
alias,
existing,
shared,
group,
post_update_hook,
trust_level,
res_kwargs):
# be safe across datasets
res_kwargs = res_kwargs.copy()
# update dataset
res_kwargs['ds'] = ds
if not isinstance(ds.repo, AnnexRepo):
# No point in dealing with a special remote when there's no annex.
# Note, that in recursive invocations this might only apply to some of
# the datasets. Therefore dealing with it here rather than one level up.
lgr.debug("No annex at %s. Ignoring special remote options.", ds.path)
storage_sibling = False
storage_name = None
# parse target URL
try:
ssh_host, base_path, rewritten_url = \
verify_ria_url(push_url if push_url else url, ds.config)
except ValueError as e:
yield get_status_dict(
status='error',
message=str(e),
**res_kwargs
)
return
base_path = Path(base_path)
git_url = decode_source_spec(
# append dataset id to url and use magic from clone-helper:
url + '#{}'.format(ds.id),
cfg=ds.config
)['giturl']
git_push_url = decode_source_spec(
push_url + '#{}'.format(ds.id),
cfg=ds.config
)['giturl'] if push_url else None
# determine layout locations; go for a v1 store-level layout
repo_path, _, _ = get_layout_locations(1, base_path, ds.id)
ds_siblings = [r['name'] for r in ds.siblings(result_renderer='disabled')]
# Figure whether we are supposed to skip this very dataset
if existing == 'skip' and (
name in ds_siblings or (
storage_name and storage_name in ds_siblings)):
yield get_status_dict(
status='notneeded',
message="Skipped on existing sibling",
**res_kwargs
)
# if we skip here, nothing else can change that decision further
# down
return
# figure whether we need to skip or error due an existing target repo before
# we try to init a special remote.
if ssh_host:
from datalad import ssh_manager
ssh = ssh_manager.get_connection(
ssh_host,
use_remote_annex_bundle=False)
ssh.open()
if existing in ['skip', 'error']:
config_path = repo_path / 'config'
# No .git -- if it's an existing repo in a RIA store it should be a
# bare repo.
# Theoretically we could have additional checks for whether we have
# an empty repo dir or a non-bare repo or whatever else.
if ssh_host:
try:
ssh('[ -e {p} ]'.format(p=quote_cmdlinearg(str(config_path))))
exists = True
except CommandError:
exists = False
else:
exists = config_path.exists()
if exists:
if existing == 'skip':
# 1. not rendered by default
# 2. message doesn't show up in ultimate result
# record as shown by -f json_pp
yield get_status_dict(
status='notneeded',
message="Skipped on existing remote "
"directory {}".format(repo_path),
**res_kwargs
)
return
else: # existing == 'error'
yield get_status_dict(
status='error',
message="remote directory {} already "
"exists.".format(repo_path),
**res_kwargs
)
return
if storage_sibling == 'only':
lgr.info("create storage sibling '{}' ...".format(name))
else:
lgr.info("create sibling{} '{}'{} ...".format(
's' if storage_name else '',
name,
" and '{}'".format(storage_name) if storage_name else '',
))
create_ds_in_store(SSHRemoteIO(ssh_host) if ssh_host else LocalIO(),
base_path, ds.id, '2', '1', alias)
if storage_sibling:
# we are using the main `name`, if the only thing we are creating
# is the storage sibling
srname = name if storage_sibling == 'only' else storage_name
lgr.debug('init special remote {}'.format(srname))
special_remote_options = [
'type=external',
'externaltype=ora',
'encryption=none',
'autoenable=true',
'url={}'.format(url)]
if push_url:
special_remote_options.append('push-url={}'.format(push_url))
try:
ds.repo.init_remote(
srname,
options=special_remote_options)
except CommandError as e:
if existing == 'reconfigure' \
and 'git-annex: There is already a special remote' \
in e.stderr:
# run enableremote instead
lgr.debug(
"special remote '%s' already exists. "
"Run enableremote instead.",
srname)
# TODO: Use AnnexRepo.enable_remote (which needs to get
# `options` first)
ds.repo.call_annex([
'enableremote',
srname] + special_remote_options)
else:
yield get_status_dict(
status='error',
message="initremote failed.\nstdout: %s\nstderr: %s"
% (e.stdout, e.stderr),
**res_kwargs
)
return
if trust_level:
trust_cmd = [trust_level]
if trust_level == 'trust':
# Following git-annex 8.20201129-73-g6a0030a11, using `git
# annex trust` requires --force.
trust_cmd.append('--force')
ds.repo.call_annex(trust_cmd + [srname])
# get uuid for use in bare repo's config
uuid = ds.config.get("remote.{}.annex-uuid".format(srname))
if storage_sibling == 'only':
# we can stop here, the rest of the function is about setting up
# the git remote part of the sibling
yield get_status_dict(
status='ok',
**res_kwargs,
)
return
# 2. create a bare repository in-store:
lgr.debug("init bare repository")
# TODO: we should prob. check whether it's there already. How?
# Note: like the special remote itself, we assume local FS if no
# SSH host is specified
disabled_hook = repo_path / 'hooks' / 'post-update.sample'
enabled_hook = repo_path / 'hooks' / 'post-update'
if group:
chgrp_cmd = "chgrp -R {} {}".format(
quote_cmdlinearg(str(group)),
quote_cmdlinearg(str(repo_path)))
if ssh_host:
ssh('cd {rootdir} && git init --bare{shared}'.format(
rootdir=quote_cmdlinearg(str(repo_path)),
shared=" --shared='{}'".format(
quote_cmdlinearg(shared)) if shared else ''
))
if storage_sibling:
# write special remote's uuid into git-config, so clone can
# which one it is supposed to be and enable it even with
# fallback URL
ssh("cd {rootdir} && git config datalad.ora-remote.uuid {uuid}"
"".format(rootdir=quote_cmdlinearg(str(repo_path)),
uuid=uuid))
if post_update_hook:
ssh('mv {} {}'.format(quote_cmdlinearg(str(disabled_hook)),
quote_cmdlinearg(str(enabled_hook))))
if group:
# Either repository existed before or a new directory was
# created for it, set its group to a desired one if was
# provided with the same chgrp
ssh(chgrp_cmd)
# finally update server
if post_update_hook:
# Conditional on post_update_hook, since one w/o the other doesn't
# seem to make much sense.
ssh('cd {rootdir} && git update-server-info'.format(
rootdir=quote_cmdlinearg(str(repo_path))
))
else:
gr = GitRepo(repo_path, create=True, bare=True,
shared=shared if shared else None)
if storage_sibling:
# write special remote's uuid into git-config, so clone can
# which one it is supposed to be and enable it even with
# fallback URL
gr.config.add("datalad.ora-remote.uuid", uuid, where='local')
if post_update_hook:
disabled_hook.rename(enabled_hook)
if group:
# No CWD needed here, since `chgrp` is expected to be found via PATH
# and the path it's operating on is absolute (repo_path). No
# repository operation involved.
Runner().run(chgrp_cmd)
# finally update server
if post_update_hook:
# Conditional on post_update_hook, since one w/o the other doesn't
# seem to make much sense.
gr.call_git(["update-server-info"])
# add a git remote to the bare repository
# Note: needs annex-ignore! Otherwise we might push into dirhash
# lower annex/object tree instead of mixed, since it's a bare
# repo. This in turn would be an issue, if we want to pack the
# entire thing into an archive. Special remote will then not be
# able to access content in the "wrong" place within the archive
lgr.debug("set up git remote")
if name in ds_siblings:
# otherwise we should have skipped or failed before
assert existing == 'reconfigure'
ds.config.set(
"remote.{}.annex-ignore".format(name),
value="true",
where="local")
ds.siblings(
'configure',
name=name,
url=str(repo_path) if url.startswith("ria+file") else git_url,
pushurl=git_push_url,
recursive=False,
# Note, that this should be None if storage_sibling was not set
publish_depends=storage_name,
result_renderer='disabled',
# Note, that otherwise a subsequent publish will report
# "notneeded".
fetch=True
)
yield get_status_dict(
status='ok',
**res_kwargs,
)
|
the-stack_106_20446
|
"""
====================================
Probabilistic Tracking on ODF fields
====================================
In this example we perform probabilistic fiber tracking on fields of ODF peaks.
This example requires importing example `reconst_csa.py`.
"""
import numpy as np
from reconst_csa import *
from dipy.reconst.interpolate import NearestNeighborInterpolator
from dipy.tracking.markov import (BoundaryStepper,
FixedSizeStepper,
ProbabilisticOdfWeightedTracker)
from dipy.tracking.utils import seeds_from_mask
stepper = FixedSizeStepper(1)
"""
Read the voxel size from the image header:
"""
zooms = img.get_header().get_zooms()[:3]
"""
Randomly select some seed points from the mask:
"""
seeds = seeds_from_mask(mask, [1, 1, 1], zooms)
seeds = seeds[:2000]
interpolator = NearestNeighborInterpolator(data, zooms)
pwt = ProbabilisticOdfWeightedTracker(csamodel, interpolator, mask,
stepper, 20, seeds, sphere)
csa_streamlines = list(pwt)
"""
Now that we have our streamlines in memory we can save the results to disk.
For this purpose we can use the TrackVis format (``*.trk``). First, we need to
create a header.
"""
import nibabel as nib
hdr = nib.trackvis.empty_header()
hdr['voxel_size'] = (2., 2., 2.)
hdr['voxel_order'] = 'LAS'
hdr['dim'] = csapeaks.gfa.shape[:3]
"""
Save the streamlines.
"""
csa_streamlines_trk = ((sl, None, None) for sl in csa_streamlines)
csa_sl_fname = 'csa_prob_streamline.trk'
nib.trackvis.write(csa_sl_fname, csa_streamlines_trk, hdr)
"""
Visualize the streamlines with fvtk (python vtk is required).
"""
from dipy.viz import fvtk
from dipy.viz.colormap import line_colors
r = fvtk.ren()
fvtk.add(r, fvtk.line(csa_streamlines, line_colors(csa_streamlines)))
print('Saving illustration as csa_prob_tracks.png')
fvtk.record(r, n_frames=1, out_path='csa_prob_tracks.png', size=(600, 600))
"""
.. figure:: csa_prob_tracks.png
:align: center
**Probabilistic streamlines applied on an ODF field modulated by GFA**.
"""
|
the-stack_106_20447
|
import numpy as np
from logging import getLogger
from PIL import Image
logger = getLogger(__name__)
def resize(x):
""" 画像サイズが最低値以下の場合に最低値に拡大する
"""
width, height = 96, 96
x_out = []
for i in range(len(x)):
img = x[i].reshape(x[i].shape[:-1])
img = Image.fromarray((img * 255).astype(np.uint8))
img = img.convert("RGB")
img = img.resize((width, height), Image.LANCZOS)
x_out.append(np.array(img).astype(np.float32) / 255)
return np.array(x_out)
|
the-stack_106_20450
|
# coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
OpenAPI spec version: 0.0.155
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Country(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'country_code2': 'str',
'display_name': 'str'
}
attribute_map = {
'country_code2': 'countryCode2',
'display_name': 'displayName'
}
def __init__(self, country_code2=None, display_name=None): # noqa: E501
"""Country - a model defined in Swagger""" # noqa: E501
self._country_code2 = None
self._display_name = None
self.discriminator = None
if country_code2 is not None:
self.country_code2 = country_code2
if display_name is not None:
self.display_name = display_name
@property
def country_code2(self):
"""Gets the country_code2 of this Country. # noqa: E501
:return: The country_code2 of this Country. # noqa: E501
:rtype: str
"""
return self._country_code2
@country_code2.setter
def country_code2(self, country_code2):
"""Sets the country_code2 of this Country.
:param country_code2: The country_code2 of this Country. # noqa: E501
:type: str
"""
self._country_code2 = country_code2
@property
def display_name(self):
"""Gets the display_name of this Country. # noqa: E501
:return: The display_name of this Country. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this Country.
:param display_name: The display_name of this Country. # noqa: E501
:type: str
"""
self._display_name = display_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Country):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_20452
|
#!/usr/bin/env python3
#
# Copyright (c) 2019, AT&T Intellectual Property.
# Copyright (c) 2015 Brocade Communications Systems, Inc.
# All Rights Reserved.
#
# # Copyright (c) 2019, AT&T Intellectual Property.
#
# SPDX-License-Identifier: GPL-2.0-only
from vplaned import Controller
from collections import Counter, defaultdict
def main():
dp_interface_stats = defaultdict(Counter)
dp_stats = defaultdict(dict)
with Controller() as controller:
for dp in controller.get_dataplanes():
with dp:
for key, value in dp.json_command("netflow show").items():
if isinstance(value, dict):
dp_interface_stats[key].update(Counter(value))
else:
dp_stats[dp.id][key] = value
print("dataplane statistics:")
for intf in sorted(dp_interface_stats.keys()):
print(" interface {}:".format(intf))
print(" monitor default:")
for stat in sorted(dp_interface_stats[intf].keys()):
# Dataplane maintainers do not want JSON keys with spaces, so we
# have to use _ and strip them out when printing
print(" {}:{}{}".format(stat.replace("_", " "),
(33 - len(stat)) * " ",
dp_interface_stats[intf][stat]))
print()
for dataplane in sorted(dp_stats.keys()):
print(" dataplane {}:".format(dataplane))
for stat in sorted(dp_stats[dataplane].keys()):
# Dataplane maintainers do not want JSON keys with spaces, so we
# have to use _ and strip them out when printing
print(" {}:{}{}".format(stat.replace("_", " "),
(33 - len(stat)) * " ",
dp_stats[dataplane][stat]))
if __name__ == '__main__':
main()
|
the-stack_106_20453
|
import numpy as np
from trainLinearReg import trainLinearReg
from linearRegCostFunction import linearRegCostFunction
def validationCurve(X, y, Xval, yval):
"""returns the train
and validation errors (in error_train, error_val)
for different values of lambda. You are given the training set (X,
y) and validation set (Xval, yval).
"""
# Selected values of lambda (you should not change this)
lambda_vec = [0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10]
# You need to return these variables correctly.
error_train = np.zeros(len(lambda_vec))
error_val = np.zeros(len(lambda_vec))
# ====================== YOUR CODE HERE ======================
# Instructions: Fill in this function to return training errors in
# error_train and the validation errors in error_val. The
# vector lambda_vec contains the different lambda parameters
# to use for each calculation of the errors, i.e,
# error_train(i), and error_val(i) should give
# you the errors obtained after training with
# lambda = lambda_vec(i)
#
# Note: You can loop over lambda_vec with the following:
#
# for i = 1:length(lambda_vec)
# lambda = lambda_vec(i)
# # Compute train / val errors when training linear
# # regression with regularization parameter lambda
# # You should store the result in error_train(i)
# # and error_val(i)
# ....
#
# end
#
#
for i, lambda_ in enumerate(lambda_vec):
theta = trainLinearReg(X, y, lambda_)
error_train[i], _ = linearRegCostFunction(X, y, theta, 0)
error_val[i], _ = linearRegCostFunction(Xval, yval, theta, 0)
# =========================================================================
return lambda_vec, error_train, error_val
|
the-stack_106_20458
|
"""
conftest
~~~~~~~~
Test fixtures and what not
:copyright: (c) 2017 by CERN.
:copyright: (c) 2019-2022 by J. Christopher Wagner (jwag).
:license: MIT, see LICENSE for more details.
"""
import os
import tempfile
import time
import typing as t
from datetime import datetime
from urllib.parse import urlsplit
import pytest
from flask import Flask, Response, jsonify, render_template
from flask import request as flask_request
from flask.json import JSONEncoder
from flask_mail import Mail
from flask_security import (
MongoEngineUserDatastore,
PeeweeUserDatastore,
PonyUserDatastore,
RoleMixin,
Security,
SQLAlchemySessionUserDatastore,
SQLAlchemyUserDatastore,
UserMixin,
WebAuthnMixin,
auth_required,
auth_token_required,
http_auth_required,
get_request_attr,
login_required,
roles_accepted,
roles_required,
permissions_accepted,
permissions_required,
uia_email_mapper,
)
from flask_security.utils import localize_callback
from tests.test_utils import populate_data
NO_BABEL = False
try:
from flask_babel import Babel
except ImportError:
try:
from flask_babelex import Babel
except ImportError:
NO_BABEL = True
if t.TYPE_CHECKING: # pragma: no cover
from flask.testing import FlaskClient
class SecurityFixture(Flask):
security: Security
mail: Mail
@pytest.fixture()
def app(request: pytest.FixtureRequest) -> "SecurityFixture":
app = SecurityFixture(__name__)
app.response_class = Response
app.debug = True
app.config["SECRET_KEY"] = "secret"
app.config["TESTING"] = True
app.config["LOGIN_DISABLED"] = False
app.config["WTF_CSRF_ENABLED"] = False
# Our test emails/domain isn't necessarily valid
app.config["SECURITY_EMAIL_VALIDATOR_ARGS"] = {"check_deliverability": False}
app.config["SECURITY_TWO_FACTOR_SECRET"] = {
"1": "TjQ9Qa31VOrfEzuPy4VHQWPCTmRzCnFzMKLxXYiZu9B"
}
app.config["SECURITY_SMS_SERVICE"] = "test"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SECURITY_PASSWORD_SALT"] = "salty"
# Make this plaintext for most tests - reduces unit test time by 50%
app.config["SECURITY_PASSWORD_HASH"] = "plaintext"
# Make this hex_md5 for token tests
app.config["SECURITY_HASHING_SCHEMES"] = ["hex_md5"]
app.config["SECURITY_DEPRECATED_HASHING_SCHEMES"] = []
for opt in [
"changeable",
"recoverable",
"registerable",
"trackable",
"passwordless",
"confirmable",
"two_factor",
"unified_signin",
"webauthn",
]:
app.config["SECURITY_" + opt.upper()] = opt in request.keywords
pytest_major = int(pytest.__version__.split(".")[0])
if pytest_major >= 4:
marker_getter = request.node.get_closest_marker
else:
marker_getter = request.keywords.get
settings = marker_getter("settings")
if settings is not None:
for key, value in settings.kwargs.items():
app.config["SECURITY_" + key.upper()] = value
app.mail = Mail(app) # type: ignore
app.json_encoder = JSONEncoder
# use babel marker to signify tests that need babel extension.
babel = marker_getter("babel")
if babel:
if NO_BABEL:
raise pytest.skip("Requires Babel")
Babel(app)
@app.route("/")
def index():
return render_template("index.html", content="Home Page")
@app.route("/profile")
@auth_required()
def profile():
if hasattr(app, "security"):
if app.security._want_json(flask_request):
return jsonify(message="profile")
return render_template("index.html", content="Profile Page")
@app.route("/post_login")
@login_required
def post_login():
return render_template("index.html", content="Post Login")
@app.route("/http")
@http_auth_required
def http():
return "HTTP Authentication"
@app.route("/http_admin_required")
@http_auth_required
@permissions_required("admin")
def http_admin_required():
assert get_request_attr("fs_authn_via") == "basic"
return "HTTP Authentication"
@app.route("/http_custom_realm")
@http_auth_required("My Realm")
def http_custom_realm():
assert get_request_attr("fs_authn_via") == "basic"
return render_template("index.html", content="HTTP Authentication")
@app.route("/token", methods=["GET", "POST"])
@auth_token_required
def token():
assert get_request_attr("fs_authn_via") == "token"
return render_template("index.html", content="Token Authentication")
@app.route("/multi_auth")
@auth_required("session", "token", "basic")
def multi_auth():
return render_template("index.html", content="Session, Token, Basic auth")
@app.route("/post_logout")
def post_logout():
return render_template("index.html", content="Post Logout")
@app.route("/post_register")
def post_register():
return render_template("index.html", content="Post Register")
@app.route("/post_confirm")
def post_confirm():
return render_template("index.html", content="Post Confirm")
@app.route("/admin")
@roles_required("admin")
def admin():
assert get_request_attr("fs_authn_via") == "session"
return render_template("index.html", content="Admin Page")
@app.route("/admin_and_editor")
@roles_required("admin", "editor")
def admin_and_editor():
return render_template("index.html", content="Admin and Editor Page")
@app.route("/admin_or_editor")
@roles_accepted("admin", "editor")
def admin_or_editor():
return render_template("index.html", content="Admin or Editor Page")
@app.route("/simple")
@roles_accepted("simple")
def simple():
return render_template("index.html", content="SimplePage")
@app.route("/admin_perm")
@permissions_accepted("full-write", "super")
def admin_perm():
return render_template(
"index.html", content="Admin Page with full-write or super"
)
@app.route("/admin_perm_required")
@permissions_required("full-write", "super")
def admin_perm_required():
return render_template("index.html", content="Admin Page required")
@app.route("/page1")
def page_1():
return "Page 1"
@app.route("/json", methods=["GET", "POST"])
def echo_json():
return jsonify(flask_request.get_json())
@app.route("/unauthz", methods=["GET", "POST"])
def unauthz():
return render_template("index.html", content="Unauthorized")
@app.route("/fresh", methods=["GET", "POST"])
@auth_required(within=60)
def fresh():
if app.security._want_json(flask_request):
return jsonify(title="Fresh Only")
else:
return render_template("index.html", content="Fresh Only")
def revert_forms():
# Some forms/tests have dynamic fields - be sure to revert them.
if hasattr(app, "security"):
if hasattr(app.security.login_form, "email"):
del app.security.login_form.email
if hasattr(app.security.register_form, "username"):
del app.security.register_form.username
if hasattr(app.security.confirm_register_form, "username"):
del app.security.confirm_register_form.username
request.addfinalizer(revert_forms)
return app
@pytest.fixture()
def mongoengine_datastore(request, app, tmpdir, realmongodburl):
return mongoengine_setup(request, app, tmpdir, realmongodburl)
def mongoengine_setup(request, app, tmpdir, realmongodburl):
# To run against a realdb: mongod --dbpath <somewhere>
pytest.importorskip("flask_mongoengine")
from flask_mongoengine import MongoEngine
from mongoengine.fields import (
BinaryField,
BooleanField,
DateTimeField,
IntField,
ListField,
ReferenceField,
StringField,
)
from mongoengine import PULL, CASCADE
db_name = "flask_security_test_%s" % str(time.time()).replace(".", "_")
app.config["MONGODB_SETTINGS"] = {
"db": db_name,
"host": realmongodburl if realmongodburl else "mongomock://localhost",
"port": 27017,
"alias": db_name,
}
db = MongoEngine(app)
class Role(db.Document, RoleMixin):
name = StringField(required=True, unique=True, max_length=80)
description = StringField(max_length=255)
permissions = StringField(max_length=255)
meta = {"db_alias": db_name}
class WebAuthn(db.Document, WebAuthnMixin):
credential_id = BinaryField(primary_key=True, max_bytes=1024, required=True)
public_key = BinaryField(required=True)
sign_count = IntField(default=0)
transports = ListField(required=False)
# a JSON string as returned from registration
extensions = StringField(max_length=255)
lastuse_datetime = DateTimeField(required=True)
# name is provided by user - we make sure it is unique per user
name = StringField(max_length=64, required=True)
usage = StringField(max_length=64, required=True)
# we need to be able to look up a user from a credential_id
user = ReferenceField("User")
# user_id = ObjectIdField(required=True)
meta = {"db_alias": db_name}
def get_user_mapping(self) -> t.Dict[str, str]:
"""
Return the mapping from webauthn back to User
"""
return dict(id=self.user.id)
class User(db.Document, UserMixin):
email = StringField(unique=True, max_length=255)
fs_uniquifier = StringField(unique=True, max_length=64, required=True)
fs_webauthn_user_handle = StringField(unique=True, max_length=64)
username = StringField(unique=True, required=False, sparse=True, max_length=255)
password = StringField(required=False, max_length=255)
security_number = IntField(unique=True, required=False, sparse=True)
last_login_at = DateTimeField()
current_login_at = DateTimeField()
tf_primary_method = StringField(max_length=255)
tf_totp_secret = StringField(max_length=255)
tf_phone_number = StringField(max_length=255)
us_totp_secrets = StringField()
us_phone_number = StringField(max_length=255)
last_login_ip = StringField(max_length=100)
current_login_ip = StringField(max_length=100)
login_count = IntField()
active = BooleanField(default=True)
confirmed_at = DateTimeField()
roles = ListField(ReferenceField(Role), default=[])
webauthn = ListField(
ReferenceField(WebAuthn, reverse_delete_rule=PULL), default=[]
)
meta = {"db_alias": db_name}
db.Document.register_delete_rule(WebAuthn, "user", CASCADE)
def tear_down():
with app.app_context():
User.drop_collection()
Role.drop_collection()
WebAuthn.drop_collection()
db.connection.drop_database(db_name)
request.addfinalizer(tear_down)
return MongoEngineUserDatastore(db, User, Role, WebAuthn)
@pytest.fixture()
def sqlalchemy_datastore(request, app, tmpdir, realdburl):
return sqlalchemy_setup(request, app, tmpdir, realdburl)
def sqlalchemy_setup(request, app, tmpdir, realdburl):
pytest.importorskip("flask_sqlalchemy")
from flask_sqlalchemy import SQLAlchemy
from flask_security.models import fsqla_v3 as fsqla
if realdburl:
db_url, db_info = _setup_realdb(realdburl)
app.config["SQLALCHEMY_DATABASE_URI"] = db_url
else:
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///:memory:"
db = SQLAlchemy(app)
fsqla.FsModels.set_db_info(db)
class Role(db.Model, fsqla.FsRoleMixin):
pass
class User(db.Model, fsqla.FsUserMixin):
security_number = db.Column(db.Integer, unique=True)
# For testing allow null passwords.
password = db.Column(db.String(255), nullable=True)
def get_security_payload(self):
# Make sure we still properly hook up to flask JSONEncoder
return {"email": str(self.email), "last_update": self.update_datetime}
class WebAuthn(db.Model, fsqla.FsWebAuthnMixin):
pass
with app.app_context():
db.create_all()
def tear_down():
if realdburl:
db.drop_all()
_teardown_realdb(db_info)
request.addfinalizer(tear_down)
return SQLAlchemyUserDatastore(db, User, Role, WebAuthn)
@pytest.fixture()
def sqlalchemy_session_datastore(request, app, tmpdir, realdburl):
return sqlalchemy_session_setup(request, app, tmpdir, realdburl)
def sqlalchemy_session_setup(request, app, tmpdir, realdburl):
"""
Note that we test having a different user id column name here.
"""
pytest.importorskip("sqlalchemy")
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker, relationship, backref
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.sql import func
from sqlalchemy import (
Boolean,
DateTime,
Column,
Integer,
LargeBinary,
String,
Text,
ForeignKey,
UnicodeText,
)
from flask_security.models.fsqla_v3 import AsaList
f, path = tempfile.mkstemp(
prefix="flask-security-test-db", suffix=".db", dir=str(tmpdir)
)
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///" + path
engine = create_engine(app.config["SQLALCHEMY_DATABASE_URI"])
db_session = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=engine)
)
Base = declarative_base()
Base.query = db_session.query_property()
class RolesUsers(Base):
__tablename__ = "roles_users"
id = Column(Integer(), primary_key=True)
user_id = Column("user_id", Integer(), ForeignKey("user.myuserid"))
role_id = Column("role_id", Integer(), ForeignKey("role.myroleid"))
class Role(Base, RoleMixin):
__tablename__ = "role"
myroleid = Column(Integer(), primary_key=True)
name = Column(String(80), unique=True)
description = Column(String(255))
permissions = Column(UnicodeText, nullable=True)
update_datetime = Column(
DateTime,
nullable=False,
server_default=func.now(),
onupdate=datetime.utcnow,
)
class User(Base, UserMixin):
__tablename__ = "user"
myuserid = Column(Integer, primary_key=True)
fs_uniquifier = Column(String(64), unique=True, nullable=False)
fs_webauthn_user_handle = Column(String(64), unique=True, nullable=True)
email = Column(String(255), unique=True)
username = Column(String(255), unique=True, nullable=True)
password = Column(String(255))
security_number = Column(Integer, unique=True)
last_login_at = Column(DateTime())
current_login_at = Column(DateTime())
tf_primary_method = Column(String(255), nullable=True)
tf_totp_secret = Column(String(255), nullable=True)
tf_phone_number = Column(String(255), nullable=True)
us_totp_secrets = Column(Text, nullable=True)
us_phone_number = Column(String(64), nullable=True)
last_login_ip = Column(String(100))
current_login_ip = Column(String(100))
login_count = Column(Integer)
active = Column(Boolean())
confirmed_at = Column(DateTime())
roles = relationship(
"Role", secondary="roles_users", backref=backref("users", lazy="dynamic")
)
update_datetime = Column(
DateTime,
nullable=False,
server_default=func.now(),
onupdate=datetime.utcnow,
)
@declared_attr
def webauthn(cls):
return relationship("WebAuthn", backref="users", cascade="all, delete")
def get_security_payload(self):
# Make sure we still properly hook up to flask JSONEncoder
return {"email": str(self.email), "last_update": self.update_datetime}
class WebAuthn(Base, WebAuthnMixin):
__tablename__ = "webauthn"
id = Column(Integer, primary_key=True)
credential_id = Column(
LargeBinary(1024), index=True, unique=True, nullable=False
)
public_key = Column(LargeBinary, nullable=False)
sign_count = Column(Integer, default=0)
transports = Column(AsaList(255), nullable=True) # comma separated
# a JSON string as returned from registration
extensions = Column(String(255), nullable=True)
create_datetime = Column(
type_=DateTime, nullable=False, server_default=func.now()
)
lastuse_datetime = Column(type_=DateTime, nullable=False)
# name is provided by user - we make sure is unique per user
name = Column(String(64), nullable=False)
usage = Column(String(64), nullable=False)
@declared_attr
def myuser_id(cls):
return Column(
Integer,
ForeignKey("user.myuserid", ondelete="CASCADE"),
nullable=False,
)
def get_user_mapping(self) -> t.Dict[str, t.Any]:
"""
Return the filter needed by find_user() to get the user
associated with this webauthn credential.
"""
return dict(myuserid=self.myuser_id)
with app.app_context():
Base.metadata.create_all(bind=engine)
def tear_down():
db_session.close()
os.close(f)
os.remove(path)
request.addfinalizer(tear_down)
return SQLAlchemySessionUserDatastore(db_session, User, Role, WebAuthn)
@pytest.fixture()
def peewee_datastore(request, app, tmpdir, realdburl):
return peewee_setup(request, app, tmpdir, realdburl)
def peewee_setup(request, app, tmpdir, realdburl):
pytest.importorskip("peewee")
from peewee import (
TextField,
DateTimeField,
Field,
IntegerField,
BooleanField,
BlobField,
ForeignKeyField,
CharField,
)
from playhouse.flask_utils import FlaskDB
if realdburl:
engine_mapper = {
"postgresql": "peewee.PostgresqlDatabase",
"mysql": "peewee.MySQLDatabase",
}
db_url, db_info = _setup_realdb(realdburl)
pieces = urlsplit(db_url)
db_config = {
"name": pieces.path[1:],
"engine": engine_mapper[pieces.scheme.split("+")[0]],
"user": pieces.username,
"password": pieces.password,
"host": pieces.hostname,
"port": pieces.port,
}
else:
f, path = tempfile.mkstemp(
prefix="flask-security-test-db", suffix=".db", dir=str(tmpdir)
)
db_config = {"name": path, "engine": "peewee.SqliteDatabase"}
app.config["DATABASE"] = db_config
db = FlaskDB(app)
class AsaList(Field):
field_type = "text"
def db_value(self, value):
if value:
return ",".join(value)
return value
def python_value(self, value):
if value:
return value.split(",")
return value
class BytesBlobField(BlobField):
# Alas pydantic/py_webauthn doesn't understand memoryviews
def python_value(self, value):
if value:
return bytes(value)
return value
class Role(RoleMixin, db.Model):
name = CharField(unique=True, max_length=80)
description = TextField(null=True)
permissions = TextField(null=True)
class User(UserMixin, db.Model):
email = TextField(unique=True, null=False)
fs_uniquifier = TextField(unique=True, null=False)
fs_webauthn_user_handle = TextField(unique=True, null=True)
username = TextField(unique=True, null=True)
security_number = IntegerField(null=True)
password = TextField(null=True)
last_login_at = DateTimeField(null=True)
current_login_at = DateTimeField(null=True)
tf_primary_method = TextField(null=True)
tf_totp_secret = TextField(null=True)
tf_phone_number = TextField(null=True)
us_totp_secrets = TextField(null=True)
us_phone_number = TextField(null=True)
last_login_ip = TextField(null=True)
current_login_ip = TextField(null=True)
login_count = IntegerField(null=True)
active = BooleanField(default=True)
confirmed_at = DateTimeField(null=True)
class WebAuthn(WebAuthnMixin, db.Model):
credential_id = BytesBlobField(unique=True, null=False, index=True)
public_key = BytesBlobField(null=False)
sign_count = IntegerField(default=0)
transports = AsaList(null=True)
# a JSON string as returned from registration
extensions = TextField(null=True)
lastuse_datetime = DateTimeField(null=False)
# name is provided by user - we make sure is unique per user
name = TextField(null=False)
usage = TextField(null=False)
# This creates a real column called user_id
user = ForeignKeyField(User, backref="webauthn")
class UserRoles(db.Model):
"""Peewee does not have built-in many-to-many support, so we have to
create this mapping class to link users to roles."""
user = ForeignKeyField(User, backref="roles")
role = ForeignKeyField(Role, backref="users")
name = property(lambda self: self.role.name)
description = property(lambda self: self.role.description)
def get_permissions(self):
return self.role.get_permissions()
with app.app_context():
for Model in (Role, User, UserRoles, WebAuthn):
Model.drop_table()
Model.create_table()
def tear_down():
if realdburl:
db.close_db(None)
_teardown_realdb(db_info)
else:
db.close_db(None)
os.close(f)
os.remove(path)
request.addfinalizer(tear_down)
return PeeweeUserDatastore(db, User, Role, UserRoles, WebAuthn)
@pytest.fixture()
def pony_datastore(request, app, tmpdir, realdburl):
return pony_setup(request, app, tmpdir, realdburl)
def pony_setup(request, app, tmpdir, realdburl):
pytest.importorskip("pony")
from pony.orm import Database, Optional, Required, Set
from pony.orm.core import SetInstance
SetInstance.append = SetInstance.add
db = Database()
class Role(db.Entity):
name = Required(str, unique=True)
description = Optional(str, nullable=True)
users = Set(lambda: User) # type: ignore
class User(db.Entity):
email = Required(str)
fs_uniquifier = Required(str, nullable=False)
username = Optional(str)
security_number = Optional(int)
password = Optional(str, nullable=True)
last_login_at = Optional(datetime)
current_login_at = Optional(datetime)
tf_primary_method = Optional(str, nullable=True)
tf_totp_secret = Optional(str, nullable=True)
tf_phone_number = Optional(str, nullable=True)
us_totp_secrets = Optional(str, nullable=True)
us_phone_number = Optional(str, nullable=True)
last_login_ip = Optional(str)
current_login_ip = Optional(str)
login_count = Optional(int)
active = Required(bool, default=True)
confirmed_at = Optional(datetime)
roles = Set(lambda: Role)
def has_role(self, name):
return name in {r.name for r in self.roles.copy()}
if realdburl:
db_url, db_info = _setup_realdb(realdburl)
pieces = urlsplit(db_url)
provider = pieces.scheme.split("+")[0]
provider = "postgres" if provider == "postgresql" else provider
db.bind(
provider=provider,
user=pieces.username,
password=pieces.password,
host=pieces.hostname,
port=pieces.port,
database=pieces.path[1:],
)
else:
app.config["DATABASE"] = {"name": ":memory:", "engine": "pony.SqliteDatabase"}
db.bind("sqlite", ":memory:", create_db=True)
db.generate_mapping(create_tables=True)
def tear_down():
if realdburl:
_teardown_realdb(db_info)
request.addfinalizer(tear_down)
return PonyUserDatastore(db, User, Role)
@pytest.fixture()
def sqlalchemy_app(
app: SecurityFixture, sqlalchemy_datastore: SQLAlchemyUserDatastore
) -> t.Callable[[], SecurityFixture]:
def create() -> SecurityFixture:
security = Security(app, datastore=sqlalchemy_datastore)
app.security = security
return app
return create
@pytest.fixture()
def sqlalchemy_session_app(app, sqlalchemy_session_datastore):
def create():
app.security = Security(app, datastore=sqlalchemy_session_datastore)
return app
return create
@pytest.fixture()
def peewee_app(app, peewee_datastore):
def create():
app.security = Security(app, datastore=peewee_datastore)
return app
return create
@pytest.fixture()
def mongoengine_app(app, mongoengine_datastore):
def create():
app.security = Security(app, datastore=mongoengine_datastore)
return app
return create
@pytest.fixture()
def pony_app(app, pony_datastore):
def create():
app.security = Security(app, datastore=pony_datastore)
return app
return create
@pytest.fixture()
def client(request: pytest.FixtureRequest, sqlalchemy_app: t.Callable) -> "FlaskClient":
app = sqlalchemy_app()
populate_data(app)
return app.test_client()
@pytest.fixture()
def client_nc(request, sqlalchemy_app):
# useful for testing token auth.
# No Cookies for You!
app = sqlalchemy_app()
populate_data(app)
return app.test_client(use_cookies=False)
@pytest.fixture(params=["cl-sqlalchemy", "c2", "cl-mongo", "cl-peewee"])
def clients(request, app, tmpdir, realdburl, realmongodburl):
if request.param == "cl-sqlalchemy":
ds = sqlalchemy_setup(request, app, tmpdir, realdburl)
elif request.param == "c2":
ds = sqlalchemy_session_setup(request, app, tmpdir, realdburl)
elif request.param == "cl-mongo":
ds = mongoengine_setup(request, app, tmpdir, realmongodburl)
elif request.param == "cl-peewee":
ds = peewee_setup(request, app, tmpdir, realdburl)
elif request.param == "cl-pony":
# Not working yet.
ds = pony_setup(request, app, tmpdir, realdburl)
app.security = Security(app, datastore=ds)
populate_data(app)
if request.param == "cl-peewee":
# peewee is insistent on a single connection?
ds.db.close_db(None)
return app.test_client()
@pytest.fixture()
def in_app_context(request, sqlalchemy_app):
app = sqlalchemy_app()
with app.app_context():
yield app
@pytest.fixture()
def get_message(app: "Flask") -> t.Callable[..., bytes]:
def fn(key, **kwargs):
rv = app.config["SECURITY_MSG_" + key][0] % kwargs
return rv.encode("utf-8")
return fn
@pytest.fixture()
def get_message_local(app):
def fn(key, **kwargs):
return localize_callback(app.config["SECURITY_MSG_" + key][0], **kwargs)
return fn
@pytest.fixture(
params=["sqlalchemy", "sqlalchemy-session", "mongoengine", "peewee", "pony"]
)
def datastore(request, app, tmpdir, realdburl, realmongodburl):
if request.param == "sqlalchemy":
rv = sqlalchemy_setup(request, app, tmpdir, realdburl)
elif request.param == "sqlalchemy-session":
rv = sqlalchemy_session_setup(request, app, tmpdir, realdburl)
elif request.param == "mongoengine":
rv = mongoengine_setup(request, app, tmpdir, realmongodburl)
elif request.param == "peewee":
rv = peewee_setup(request, app, tmpdir, realdburl)
elif request.param == "pony":
rv = pony_setup(request, app, tmpdir, realdburl)
return rv
@pytest.fixture()
# def script_info(app, datastore): # Fix me when pony works
def script_info(app, sqlalchemy_datastore):
from flask.cli import ScriptInfo
def create_app():
uia = [
{"email": {"mapper": uia_email_mapper}},
{"us_phone_number": {"mapper": lambda x: x}},
]
app.config.update(**{"SECURITY_USER_IDENTITY_ATTRIBUTES": uia})
app.security = Security(app, datastore=sqlalchemy_datastore)
return app
return ScriptInfo(create_app=create_app)
def pytest_addoption(parser):
parser.addoption(
"--realdburl",
action="store",
default=None,
help="""Set url for using real database for testing.
For postgres: 'postgresql://user:password@host/')""",
)
parser.addoption(
"--realmongodburl",
action="store",
default=None,
help="""Set url for using real mongo database for testing.
e.g. 'localhost'""",
)
@pytest.fixture(scope="session")
def realdburl(request):
"""
Support running datastore tests against a real DB.
For example psycopg2 is very strict about types in queries
compared to sqlite
To use postgres you need to of course run a postgres instance on localhost
then pass in an extra arg to pytest:
--realdburl postgresql://<user>@localhost/
For mysql same - just download and add a root password.
--realdburl "mysql+pymysql://root:<password>@localhost/"
"""
return request.config.option.realdburl
@pytest.fixture(scope="session")
def realmongodburl(request):
"""
Support running datastore tests against a real Mongo DB.
--realmongodburl "localhost"
"""
return request.config.option.realmongodburl
def _setup_realdb(realdburl):
"""
Called when we want to run unit tests against a real DB.
This is useful since different DB drivers are pickier about queries etc
(such as pyscopg2 and postgres)
"""
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
db_name = "flask_security_test_%s" % str(time.time()).replace(".", "_")
db_uri = realdburl + db_name
engine = create_engine(db_uri)
if not database_exists(engine.url):
create_database(engine.url)
print("Setting up real DB at " + db_uri)
return db_uri, {"engine": engine}
def _teardown_realdb(db_info):
from sqlalchemy_utils import drop_database
drop_database(db_info["engine"].url)
|
the-stack_106_20459
|
"""Connection configuration
===========================
.. highlight:: ini
Every sparkplug instance is attached to a single ``connection``, usually
named ``main``. The connection contains all the information necessary to
connect to a single AMQP broker.
The simplest possible connection is::
[connection:main]
which is equivalent to::
[connection:main]
# The host (or host:port) of the broker node to connect to.
host = localhost
# The virtual host to connect to.
virtual_host = /
# The user to connect as.
userid = guest
# The user's password.
password = guest
# If set, forces the use of SSL to connect to the broker.
ssl = False
# If set, changes the interval between reconnect attempts:
reconnect_delay = 10
# If set, overrides the default heartbeat interval (requested)
heartbeat = 10
# If set, overrides the quality-of-service message count to buffer.
qos = 24
Sparkplug operates by starting a connection, then applying all other
configuration directives to it (to set up queues_, exchanges_, bindings_,
and consumers_), then waiting for messages to be delivered.
.. _queues: `Queue configuration`_
.. _exchanges: `Exchange configuration`_
.. _bindings: `Binding configuration`_
.. _consumers: `Consumer configuration`_
"""
import amqp
import time
import socket
import threading
from sparkplug.config.types import convert, parse_bool
from sparkplug.logutils import LazyLogger
from amqp import spec
_log = LazyLogger(__name__)
def _locked_call(lock, fn):
# In an ideal world, we'd functool.wraps here,
# but this complicates python 2.7 support and
# isn't offering a lot of benefit in this context.
def locked_fn(*args, **kwargs):
with lock:
r = fn(*args, **kwargs)
return r
return locked_fn
class MultiThreadedConnection(object):
"""
Context Manager
Replaces methods on connection, channel
with ones that use a shared lock; to
prevent the consumer and the heartbeater
from stepping on each other between threads.
"""
def __init__(self, connection, channel):
self._connection = connection
self._channel = channel
self._connection_hold = {}
self._channel_hold = {}
self._lock = threading.RLock()
def _lock_obj(self, obj, store):
elements = set(dir(obj)) & set(['send_heartbeat', 'send_method'])
for e in elements:
attr = getattr(obj, e)
if callable(attr):
store[e] = attr
setattr(obj, e, _locked_call(self._lock, attr))
return
def _unlock_obj(self, obj, store):
for e in store:
setattr(obj, e, store[e])
store.clear()
def __enter__(self):
self._lock_obj(self._connection, self._connection_hold)
self._lock_obj(self._channel, self._channel_hold)
_log.debug("Connection frame_writer is serialized")
return self
def __exit__(self, exc_type, exc_value, traceback):
self._unlock_obj(self._channel, self._channel_hold)
self._unlock_obj(self._connection, self._connection_hold)
_log.debug("Connection frame_writer is restored")
return False
def jitter():
"returns a quasi-random floating point value between 0 and 1"
# uses the kronecker sequence to guarantee that values are spread out instead of potentially close together
return (time.process_time() * (879190747.0 ** 0.5)) % 1
class AMQPConnector(object):
def __init__(self, name, channel_configurer, reconnect_delay='10', qos=24, **kwargs):
self.qos = int(qos)
self.reconnect_delay = int(reconnect_delay)
self.connection_args = dict(kwargs)
convert(self.connection_args, 'ssl', parse_bool)
convert(self.connection_args, 'heartbeat', int)
if 'heartbeat' not in self.connection_args:
self.connection_args['heartbeat'] = 15
self.channel_configurer = channel_configurer
def run_channel(self, connection, channel):
_log.debug("Configuring channel elements.")
self.channel_configurer.start(channel)
try:
self.pump(connection, channel)
except (SystemExit, KeyboardInterrupt):
_log.debug("Tearing down connection.")
self.channel_configurer.stop(channel)
raise
def pump(self, connection, channel):
timeout = connection.heartbeat * 0.4 or None
while True:
_log.debug("Waiting for a message.")
try:
channel.wait(spec.Basic.Deliver, timeout=timeout)
except socket.timeout:
_log.debug("Idle heartbeat")
connection.send_heartbeat()
def run(self):
while True:
try:
_log.debug("Connecting to broker.")
connection = amqp.Connection(**self.connection_args)
connection.connect() # populate properties
channel = connection.channel()
mtconnection = MultiThreadedConnection(connection, channel)
with connection, mtconnection:
# you risk dropped tcp connections due to buffer overflow without setting qos:
_log.debug("Applying qos: {}".format(self.qos))
channel.basic_qos(0, self.qos, False)
with channel:
self.run_channel(connection, channel)
except (SystemExit, KeyboardInterrupt):
return
except (IOError, socket.error):
_log.exception(
"Connection error. Waiting %s seconds and trying again.",
self.reconnect_delay
)
# jitter:
time.sleep(self.reconnect_delay + jitter())
except:
_log.exception(
"Unexpected exception. Waiting %s seconds and trying again.",
self.reconnect_delay
)
time.sleep(self.reconnect_delay + jitter())
|
the-stack_106_20460
|
import os
import json
from collections import namedtuple
import copy
#
IF_RESET_TFGRAPH_SESS_RUN = False
TF_DATASET_TO_NUMPY_MODE = "graph" # eager/graph
#
#
autodl_global_config = {
"meta_solution": {
"cv_solution": "DeepWisdom",
"nlp_solution": "upwind_flys",
"speech_solution": "PASA_NJU",
},
"data_space": {
"domain_dataset": {
"text": {"if_sample": True, "sample_ratio": 0.5},
"speech": {"if_sample": True, "sample_ratio": 0.5},
}
},
}
class MetaSoluConf(object):
def __init__(self):
self.cv_solution = None
self.nlp_solution = None
self.speech_solution = None
class DsDomainDatasetConf(object):
def __init__(self):
self.if_sample = None
self.sample_ratio = None
class DsDomainDatasetSets(object):
def __init__(self):
self.text = DsDomainDatasetConf()
self.speech = DsDomainDatasetConf()
class DsConf(object):
def __init__(self):
self.domain_dataset = DsDomainDatasetSets()
class AutoDlConf(object):
def __init__(self):
self.meta_solution = MetaSoluConf()
self.data_space = DsConf()
class ConfigParserA(object):
def _json_object_hook(self, d):
return namedtuple("X", d.keys())(*d.values())
def json2obj(self, data):
return json.loads(data, object_hook=self._json_object_hook)
def from_type_autodlconf(self, conf_data) -> AutoDlConf:
return copy.deepcopy(self.json2obj(json.dumps(conf_data)))
autodl_g_conf_repr = json.dumps(autodl_global_config, indent=4)
config_parser_a = ConfigParserA()
AUTODL_G_CONF = config_parser_a.from_type_autodlconf(autodl_global_config)
META_SOLUS = AUTODL_G_CONF.meta_solution
DM_DS_PARAS = AUTODL_G_CONF.data_space.domain_dataset
|
the-stack_106_20463
|
#!/usr/bin/env python3
from qiime2 import Artifact
def main(input_type, input_path, input_format, output_path):
imported_artifact = Artifact.import_data(
input_type, input_path, view_type=input_format
)
imported_artifact.save(output_path)
if __name__ == "__main__":
INPUT_TYPE = "SampleData[PairedEndSequencesWithQuality]"
INPUT_PATH = "${manifest_file}"
INPUT_FORMAT = "PairedEndFastqManifestPhred33V2"
OUTPUT_PATH = "${meta.id}_sequences.qza"
main(INPUT_TYPE, INPUT_PATH, INPUT_FORMAT, OUTPUT_PATH)
|
the-stack_106_20464
|
import asyncio
from aiokafka.consumer import AIOKafkaConsumer
from aiokafka.errors import ConsumerStoppedError, NoOffsetForPartitionError
from aiokafka.util import create_task
from ._testutil import (
KafkaIntegrationTestCase, run_until_complete, random_string)
class TestConsumerIteratorIntegration(KafkaIntegrationTestCase):
@run_until_complete
async def test_aiter(self):
await self.send_messages(0, list(range(10)))
await self.send_messages(1, list(range(10, 20)))
consumer = AIOKafkaConsumer(
self.topic,
bootstrap_servers=self.hosts,
auto_offset_reset='earliest')
await consumer.start()
self.add_cleanup(consumer.stop)
messages = []
async for m in consumer:
messages.append(m)
if len(messages) == 20:
# Flake8==3.0.3 gives
# F999 'break' outside loop
# for `async` syntax
break # noqa
self.assert_message_count(messages, 20)
@run_until_complete
async def test_exception_ignored_with_aiter(self):
# Test relies on MessageTooLarge error, which is no more in
# Kafka 0.10.1+. So we pin the API version here to 0.9
l_msgs = [random_string(10), random_string(50000)]
large_messages = await self.send_messages(0, l_msgs)
r_msgs = [random_string(50)]
small_messages = await self.send_messages(0, r_msgs)
consumer = AIOKafkaConsumer(
self.topic,
bootstrap_servers=self.hosts,
auto_offset_reset='earliest',
max_partition_fetch_bytes=4000,
api_version="0.9")
await consumer.start()
self.add_cleanup(consumer.stop)
messages = []
with self.assertLogs(
'aiokafka.consumer.consumer', level='ERROR') as cm:
async for m in consumer:
messages.append(m)
if len(messages) == 2:
# Flake8==3.0.3 gives
# F999 'break' outside loop
# for `async` syntax
break # noqa
self.assertEqual(len(cm.output), 1)
self.assertTrue(
'ERROR:aiokafka.consumer.consumer:error in consumer iterator'
in cm.output[0])
self.assertEqual(messages[0].value, large_messages[0])
self.assertEqual(messages[1].value, small_messages[0])
@run_until_complete
async def test_exception_in_aiter(self):
await self.send_messages(0, [b'test'])
consumer = AIOKafkaConsumer(
self.topic,
bootstrap_servers=self.hosts,
auto_offset_reset="none")
await consumer.start()
self.add_cleanup(consumer.stop)
with self.assertRaises(NoOffsetForPartitionError):
async for m in consumer:
m # pragma: no cover
@run_until_complete
async def test_consumer_stops_iter(self):
consumer = AIOKafkaConsumer(
self.topic,
bootstrap_servers=self.hosts,
auto_offset_reset="earliest")
await consumer.start()
self.add_cleanup(consumer.stop)
async def iterator():
async for msg in consumer: # pragma: no cover
assert False, "No items should be here, got {}".format(msg)
task = create_task(iterator())
await asyncio.sleep(0.1)
# As we didn't input any data into Kafka
self.assertFalse(task.done())
await consumer.stop()
# Should just stop iterator, no errors
await task
# But creating another iterator should result in an error, we can't
# have dead loops like:
#
# while True:
# async for msg in consumer:
# print(msg)
with self.assertRaises(ConsumerStoppedError):
await iterator()
|
the-stack_106_20465
|
import numpy as np
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import sys, unicodedata
import random
table = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(unichr(i)).startswith('P'))
english_stopwords = stopwords.words('english')
english_stopwords = [str(x) for x in english_stopwords]
stemmer = PorterStemmer()
stats_words_b = {}
stats_words_t = {}
stats_words_e = {}
stats_words_m = {}
total_b = 0
total_t = 0
total_e = 0
total_m = 0
f = open('newsCorpora.csv')
data = f.readlines()
random.shuffle(data)
random.shuffle(data)
random.shuffle(data)
random.shuffle(data)
w = data[:(len(data)/8)]
f.close()
for i in w:
temp_list = i.decode('utf-8').strip().split('\t')
headline = temp_list[1]
cat = temp_list[4]
words = headline.split()
words_cleaned = [x.translate(table).lower() for x in words]
words_cleaned_stopwords = [stemmer.stem(x) for x in words_cleaned if x not in english_stopwords]
while '' in words_cleaned_stopwords:
words_cleaned_stopwords.remove('')
if cat=='b':
total_b += 1
for j in words_cleaned_stopwords:
if j in stats_words_b:
stats_words_b[j] += 1
else:
stats_words_b[j] = 1
if cat=='t':
total_t += 1
for j in words_cleaned_stopwords:
if j in stats_words_t:
stats_words_t[j] += 1
else:
stats_words_t[j] = 1
if cat=='e':
total_e += 1
for j in words_cleaned_stopwords:
if j in stats_words_e:
stats_words_e[j] += 1
else:
stats_words_e[j] = 1
if cat=='m':
total_m += 1
for j in words_cleaned_stopwords:
if j in stats_words_m:
stats_words_m[j] += 1
else:
stats_words_m[j] = 1
b_word_asc = sorted(stats_words_b, key=lambda k: stats_words_b[k])
b_word_asc.reverse()
t_word_asc = sorted(stats_words_t, key=lambda k: stats_words_t[k])
t_word_asc.reverse()
e_word_asc = sorted(stats_words_e, key=lambda k: stats_words_e[k])
e_word_asc.reverse()
m_word_asc = sorted(stats_words_m, key=lambda k: stats_words_m[k])
m_word_asc.reverse()
vocabulary = b_word_asc[:2000] + e_word_asc[:2000] + t_word_asc[:2000] + m_word_asc[:2000]
count = 0
p_vocabulary_given_b = {}
p_vocabulary_given_t = {}
p_vocabulary_given_m = {}
p_vocabulary_given_e = {}
for i in vocabulary:
if i in stats_words_b:
p_vocabulary_given_b[i] = float(stats_words_b[i] + 1)/(float(total_b) + float(len(vocabulary)))
else:
p_vocabulary_given_b[i] = (1.0)/(float(total_b) + float(len(vocabulary)))
if i in stats_words_t:
p_vocabulary_given_t[i] = float(stats_words_t[i] + 1)/(float(total_t) + float(len(vocabulary)))
else:
p_vocabulary_given_t[i] = (1.0)/(float(total_t) + float(len(vocabulary)))
if i in stats_words_e:
p_vocabulary_given_e[i] = float(stats_words_e[i] + 1)/(float(total_e) + float(len(vocabulary)))
else:
p_vocabulary_given_e[i] = (1.0)/(float(total_e) + float(len(vocabulary)))
if i in stats_words_m:
p_vocabulary_given_m[i] = float(stats_words_m[i] + 1)/(float(total_m) + float(len(vocabulary)))
else:
p_vocabulary_given_m[i] = (1.0)/(float(total_m) + float(len(vocabulary)))
|
the-stack_106_20466
|
#!/usr/bin/env python3
import json
import sys
import os
import argparse
import amp.utils
from amp.schema.speech_to_text import SpeechToText, SpeechToTextMedia, SpeechToTextResult
# Convert kaldi output to standardized json
def convert(media_file, kaldi_file, kaldi_transcript_file, output_json_file):
amp.utils.exception_if_file_not_exist(kaldi_file)
if not os.path.exists(kaldi_transcript_file):
raise Exception("Exception: File " + kaldi_transcript_file + " doesn't exist, the previous command generating it must have failed.")
results = SpeechToTextResult()
# Open the kaldi json
with open(kaldi_file) as json_file:
data = json.load(json_file)
# Get the kaldi transcript
transcript = open(kaldi_transcript_file, "r")
results.transcript = transcript.read()
# Get a list of words
words = data["words"]
duration = 0.00
# For each word, add a word to our results
for w in words:
time = float(w["time"])
end = time + float(w["duration"])
# Keep track of the last time and use it as the duration
if end > duration:
duration = end
results.addWord("", time, end, w["word"], None, None)
# Create the media objeect
media = SpeechToTextMedia(duration, media_file)
# Create the final object
outputFile = SpeechToText(media, results)
#write the output
amp.utils.write_json_file(outputFile, output_json_file)
def main():
#(media_file, kaldi_file, kaldi_transcript_file, output_json_file) = sys.argv[1:5]
parser = argparse.ArgumentParser()
parser.add_argument("media_file")
parser.add_argument("kaldi_file")
parser.add_argument("kaldi_transcript_file")
parser.add_argument("output_json_file")
args = parser.parse_args()
convert(amp.media_file, amp.kaldi_file, amp.kaldi_transcript_file, amp.output_json_file)
if __name__ == "__main__":
main()
|
the-stack_106_20467
|
# RT - Force Pinned Message
from typing import Tuple, Dict, List
from discord.ext import commands, tasks
import discord
from collections import defaultdict
from rtlib import DatabaseManager
from ujson import loads, dumps
class DataManager(DatabaseManager):
TABLE = "ForcePinnedMessage"
def __init__(self, db):
self.db = db
async def init_table(self, cursor) -> None:
await cursor.create_table(
self.TABLE, dict(
GuildID="BIGINT", ChannelID="BIGINT", AuthorID="BIGINT",
MessageID="BIGINT", Bool="TINYINT", Text="TEXT"
)
)
await self._update_cache(cursor)
async def _update_cache(self, cursor):
async for row in cursor.get_datas(self.TABLE, {}):
if row:
self.cache[row[0]].append(row[1])
async def update_cache(self, cursor):
return await self._update_cache(cursor)
async def setting(
self, cursor, guild_id: int, channel_id: int,
message_id: int, author_id: int,
onoff: bool, text: str
) -> None:
value = dict(Bool=int(onoff), Text=text,
AuthorID=author_id, MessageID=message_id)
target = dict(GuildID=guild_id, ChannelID=channel_id)
if await cursor.exists(self.TABLE, target):
await cursor.update_data(self.TABLE, value, target)
else:
value.update(target)
await cursor.insert_data(self.TABLE, value)
async def delete(self, cursor, channel_id: int) -> None:
target = {"ChannelID": channel_id}
if await cursor.exists(self.TABLE, target):
await cursor.delete(self.TABLE, target)
async def get(
self, cursor, guild_id: int, channel_id: int
) -> Tuple[int, int, bool, str]:
target = dict(GuildID=guild_id, ChannelID=channel_id)
if await cursor.exists(self.TABLE, target):
if (row := await cursor.get_data(self.TABLE, target)):
return row[-4], row[-3], bool(row[-2]), row[-1]
else:
return 0, 0, False, ""
else:
return 0, 0, False, ""
class ForcePinnedMessage(commands.Cog, DataManager):
def __init__(self, bot):
self.bot = bot
self.queue: Dict[int, Tuple[discord.Message, tuple]] = {}
self.remove_queue: List[int] = []
self.cache = defaultdict(list)
self.bot.loop.create_task(self.on_ready())
async def on_ready(self):
await self.bot.wait_until_ready()
super(commands.Cog, self).__init__(
self.bot.mysql
)
await self.init_table()
self.worker.start()
@commands.command(
extras={
"headding": {
"ja": "いつも下にくるメッセージ。強制ピン留めメッセージ機能。",
"en": "Messages that always come to the bottom. Force pinned message function."
},
"parent": "ServerTool"
}, aliases=["ピン留め", "ぴんどめ", "fpm", "forcepinmessage"]
)
@commands.has_permissions(manage_messages=True)
async def pin(self, ctx, onoff: bool, *, content=""):
"""!lang ja
--------
いつも下にくるメッセージを作ることができます。
メッセージ削除権限を持つ人のみ実行可能です。
別名強制ピン留めメッセージです。
Parameters
----------
onoff : bool
onにすると強制ピン留めメッセージを作ります。
もし強制ピン留めメッセージを無効にした際はこれをoffにしてください。
content : str
いつも下にくるメッセージの内容です。
onoffをoffにした際はこれは書かなくて良いです。
Aliases
-------
fpm, forcepinmessage, ピン留め, ぴんどめ
Examples
--------
```
rt!pin on 自己紹介テンプレート:
名前:
性別:
一言:
```
Notes
-----
下に来るメッセージは数秒毎に更新されるので数秒は下に来ないことがあります。
以下のように最初に`>>`を置いて`embed`コマンドの構文を使えば埋め込みにすることができます。
```
rt!pin on >>タイトル
説明
<フィールド名
フィールド内容
```
Warnings
--------
設定後はすぐに下にこないことがあります。
しばらくしても下に来ない際はメッセージを送ってみてください。
これはRTがメッセージを送りすぎてAPI制限になるということを防止するために発生するものでご了承ください。
!lang en
--------
You can create a message that always comes at the bottom.
This can only be done by someone with the Delete Message permission.
Also known as a force pinned message.
Parameters
----------
onoff : bool
When set to "on", this function creates a forced pinning message.
If you want to disable the forced pinning message, set this to off.
content : str
The content of the message that always appears below.
If you turn off onoff, you do not need to write this.
Aliases
-------
fpm, forcepinmessage
Examples
--------
```
rt!pin on Self-introduction template:
Name:
Gender:
Comment:
```
Warnings
--------
After setting it up, it may not come down immediately.
If it doesn't come down after a while, please try sending a message.
Please note that this is to prevent RTs from sending too many messages, which would limit the API."""
if hasattr(ctx.channel, "topic"):
await ctx.trigger_typing()
if content.startswith(">>"):
content = "<" + dumps(
self.bot.cogs["ServerTool"].easy_embed(
content, ctx.author.color
).to_dict()
) + ">"
await self.setting(
ctx.guild.id, ctx.channel.id, 0,
ctx.author.id, onoff, content
)
if not onoff and ctx.channel.id in self.queue:
del self.queue[ctx.channel.id]
if ctx.channel.id not in self.remove_queue:
self.remove_queue.append(ctx.channel.id)
if onoff and ctx.channel.id in self.remove_queue:
self.remove_queue.remove(ctx.channel.id)
await self.update_cache()
await ctx.reply("Ok")
else:
await ctx.reply("スレッドに設定することはできません。")
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if not message.guild or "RT-" in message.author.name or not self.bot.is_ready():
return
if message.channel.id not in self.remove_queue:
if message.guild.id in self.cache and message.channel.id in self.cache[message.guild.id]:
self.queue[message.channel.id] = (
message, await self.get(message.guild.id, message.channel.id)
)
def cog_unload(self):
self.worker.cancel()
@tasks.loop(seconds=5)
async def worker(self):
for channel_id in list(self.queue.keys()):
if channel_id in self.remove_queue:
self.remove_queue.remove(channel_id)
message, fpm = self.queue[channel_id]
new_message = None
try:
if fpm[1] != 0:
# 前回のメッセージの削除を試みる。
before_message = await message.channel.fetch_message(fpm[1])
if before_message:
await before_message.delete()
except (discord.NotFound, discord.Forbidden, discord.HTTPException):
pass
try:
del self.queue[channel_id]
except KeyError:
pass
member = message.guild.get_member(fpm[0])
if member is None:
member = self.bot.get_user(fpm[0])
content = fpm[3]
if content.startswith("<") and content.endswith(">"):
try:
kwargs = {"embed": discord.Embed.from_dict(loads(content[1:-1]))}
except ValueError:
kwargs = {"content": content}
else:
kwargs = {"content": content}
try:
new_message = await message.channel.webhook_send(
username=f"{getattr(member, 'display_name', member.name)} RT-ForcePinnedMessage",
avatar_url=member.avatar.url, wait=True, **kwargs
)
except Exception as e:
print("(ignore) Error on ForcePinnedMessage:", e)
if message.guild and message.channel and member:
await self.setting(
message.guild.id, message.channel.id,
getattr(new_message, "id", 0),
member.id, True, fpm[3]
)
else:
await self.delete(channel_id)
def setup(bot):
bot.add_cog(ForcePinnedMessage(bot))
|
the-stack_106_20469
|
import pymysql
# input as grid list
grid_list = ['23423444','7541496', '02343444', '00111122']
def select_grid_from_infos(cursor, grid_list):
grid_list_str = ', '.join(grid_list)
print(grid_list_str)
query_list = ['SELECT * FROM infos']
query_list.append(f'WHERE grid_id IN ({grid_list_str})')
query_str = " ".join(query_list)
cursor.execute(query_str)
return query_str
def select_grid_from_rooms(cursor, grid_list):
grid_list_str = ', '.join(grid_list)
print(grid_list_str)
query_list = ['SELECT * FROM rooms']
query_list.append(f'WHERE grid_id IN ({grid_list_str})')
query_str = " ".join(query_list)
cursor.execute(query_str)
print(query_str)
return query_str
if __name__ == '__main__':
#pymysql.connect(host=db_config['DB_HOST'], port=int(db_config['DB_PORT']), user=db_config['DB_USER'],
# passwd = db_config['DB_PASSWORD'], db =db_config['DB_NAME'], charset=db_config['DB_CHARSET'])
db = pymysql.connect(host='localhost', port=3306, user='dalci', passwd='UpKoDah4', db='upkodah_item', charset='utf8mb4')
cursor = db.cursor()
print("DB connection Success")
# get grid id
select_grid_from_infos(cursor, grid_list)
infos = cursor.fetchall()
for one_info in infos:
print(one_info)
select_grid_from_rooms(cursor, grid_list)
rooms = cursor.fetchall()
for one_room in rooms:
print(one_room)
db.close()
|
the-stack_106_20471
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build localization and classification losses from config."""
import functools
from object_detection.core import balanced_positive_negative_sampler as sampler
from object_detection.core import losses
from object_detection.protos import losses_pb2
from object_detection.utils import ops
def build(loss_config):
"""Build losses based on the config.
Builds classification, localization losses and optionally a hard example miner
based on the config.
Args:
loss_config: A losses_pb2.Loss object.
Returns:
classification_loss: Classification loss object.
localization_loss: Localization loss object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
hard_example_miner: Hard example miner object.
random_example_sampler: BalancedPositiveNegativeSampler object.
Raises:
ValueError: If hard_example_miner is used with sigmoid_focal_loss.
ValueError: If random_example_sampler is getting non-positive value as
desired positive example fraction.
"""
classification_loss = _build_classification_loss(
loss_config.classification_loss)
localization_loss = _build_localization_loss(
loss_config.localization_loss)
classification_weight = loss_config.classification_weight
localization_weight = loss_config.localization_weight
hard_example_miner = None
if loss_config.HasField('hard_example_miner'):
if (loss_config.classification_loss.WhichOneof('classification_loss') ==
'weighted_sigmoid_focal'):
raise ValueError('HardExampleMiner should not be used with sigmoid focal '
'loss')
hard_example_miner = build_hard_example_miner(
loss_config.hard_example_miner,
classification_weight,
localization_weight)
random_example_sampler = None
if loss_config.HasField('random_example_sampler'):
if loss_config.random_example_sampler.positive_sample_fraction <= 0:
raise ValueError('RandomExampleSampler should not use non-positive'
'value as positive sample fraction.')
random_example_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=loss_config.random_example_sampler.
positive_sample_fraction)
if loss_config.expected_loss_weights == loss_config.NONE:
expected_loss_weights_fn = None
elif loss_config.expected_loss_weights == loss_config.EXPECTED_SAMPLING:
expected_loss_weights_fn = functools.partial(
ops.expected_classification_loss_by_expected_sampling,
min_num_negative_samples=loss_config.min_num_negative_samples,
desired_negative_sampling_ratio=loss_config
.desired_negative_sampling_ratio)
elif (loss_config.expected_loss_weights == loss_config
.REWEIGHTING_UNMATCHED_ANCHORS):
expected_loss_weights_fn = functools.partial(
ops.expected_classification_loss_by_reweighting_unmatched_anchors,
min_num_negative_samples=loss_config.min_num_negative_samples,
desired_negative_sampling_ratio=loss_config
.desired_negative_sampling_ratio)
else:
raise ValueError('Not a valid value for expected_classification_loss.')
return (classification_loss, localization_loss, classification_weight,
localization_weight, hard_example_miner, random_example_sampler,
expected_loss_weights_fn)
def build_hard_example_miner(config,
classification_weight,
localization_weight):
"""Builds hard example miner based on the config.
Args:
config: A losses_pb2.HardExampleMiner object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
Returns:
Hard example miner.
"""
loss_type = None
if config.loss_type == losses_pb2.HardExampleMiner.BOTH:
loss_type = 'both'
if config.loss_type == losses_pb2.HardExampleMiner.CLASSIFICATION:
loss_type = 'cls'
if config.loss_type == losses_pb2.HardExampleMiner.LOCALIZATION:
loss_type = 'loc'
max_negatives_per_positive = None
num_hard_examples = None
if config.max_negatives_per_positive > 0:
max_negatives_per_positive = config.max_negatives_per_positive
if config.num_hard_examples > 0:
num_hard_examples = config.num_hard_examples
hard_example_miner = losses.HardExampleMiner(
num_hard_examples=num_hard_examples,
iou_threshold=config.iou_threshold,
loss_type=loss_type,
cls_loss_weight=classification_weight,
loc_loss_weight=localization_weight,
max_negatives_per_positive=max_negatives_per_positive,
min_negatives_per_image=config.min_negatives_per_image)
return hard_example_miner
def build_faster_rcnn_classification_loss(loss_config):
"""Builds a classification loss for Faster RCNN based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
return losses.WeightedSigmoidClassificationLoss()
if loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
if loss_type == 'weighted_logits_softmax':
config = loss_config.weighted_logits_softmax
return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
logit_scale=config.logit_scale)
if loss_type == 'weighted_sigmoid_focal':
config = loss_config.weighted_sigmoid_focal
alpha = None
if config.HasField('alpha'):
alpha = config.alpha
return losses.SigmoidFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
# By default, Faster RCNN second stage classifier uses Softmax loss
# with anchor-wise outputs.
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
def _build_localization_loss(loss_config):
"""Builds a localization loss based on the loss config.
Args:
loss_config: A losses_pb2.LocalizationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.LocalizationLoss):
raise ValueError('loss_config not of type losses_pb2.LocalizationLoss.')
loss_type = loss_config.WhichOneof('localization_loss')
if loss_type == 'weighted_l2':
return losses.WeightedL2LocalizationLoss()
if loss_type == 'weighted_smooth_l1':
return losses.WeightedSmoothL1LocalizationLoss(
loss_config.weighted_smooth_l1.delta)
if loss_type == 'weighted_iou':
return losses.WeightedIOULocalizationLoss()
raise ValueError('Empty loss config.')
def _build_classification_loss(loss_config):
"""Builds a classification loss based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
return losses.WeightedSigmoidClassificationLoss()
if loss_type == 'weighted_sigmoid_focal':
config = loss_config.weighted_sigmoid_focal
alpha = None
if config.HasField('alpha'):
alpha = config.alpha
return losses.SigmoidFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
if loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
if loss_type == 'weighted_logits_softmax':
config = loss_config.weighted_logits_softmax
return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
logit_scale=config.logit_scale)
if loss_type == 'bootstrapped_sigmoid':
config = loss_config.bootstrapped_sigmoid
return losses.BootstrappedSigmoidClassificationLoss(
alpha=config.alpha,
bootstrap_type=('hard' if config.hard_bootstrap else 'soft'))
raise ValueError('Empty loss config.')
|
the-stack_106_20473
|
import numpy as np
import visgeom as vg
import matplotlib
import matplotlib.pyplot as plt
from pylie import SE3
"""Exercise 2 - Estimate the mean pose from a set of random poses"""
def draw_random_poses(mean_pose, cov_pose, n=100):
"""Draw random poses from a pose distribution.
:param mean_pose: The mean pose, an SE3 object
:param cov_pose: The covariance, a 6x6 covariance matrix
:param n: The number of draws
:return: An array of drawn poses.
"""
# Create an array of poses, initialised to the mean pose.
poses = np.full(n, mean_pose, dtype=object)
# Draw random tangent space vectors.
random_xsis = np.random.multivariate_normal(np.zeros(6), cov_pose, n).T
# Perturb the mean pose with each of the random tangent space vectors.
for i in range(n):
poses[i] = poses[i] + random_xsis[0:, [i]]
return poses
def compute_mean_pose(poses, conv_thresh=1e-14, max_iters=20):
"""Compute the mean pose from an array of poses.
:param poses: An array of SE3 objects
:param conv_thresh: The convergence threshold
:param max_iters: The maximum number of iterations
:return: The estimate of the mean pose
"""
num_poses = len(poses)
# Initialise mean pose.
mean_pose = poses[0]
for it in range(max_iters):
# Compute the mean tangent vector in the tangent space at the current estimate.
mean_xsi = np.zeros((6, 1))
for pose in poses:
mean_xsi = mean_xsi + (pose - mean_pose)
mean_xsi = mean_xsi / num_poses
# Update the estimate.
mean_pose = mean_pose + mean_xsi
# Stop if the update is small.
if np.linalg.norm(mean_xsi) < conv_thresh:
break
return mean_pose
def main():
# Define the pose distribution.
mean_pose = SE3()
cov_pose = np.diag(np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.4]) ** 2)
# Draw random poses from this distribution.
poses = draw_random_poses(mean_pose, cov_pose)
# Estimate the mean pose from the random poses.
estimated_mean_pose = compute_mean_pose(poses)
print(estimated_mean_pose.to_matrix())
# Plot result.
# Use Qt 5 backend in visualisation.
matplotlib.use('qt5agg')
# Create figure and axis.
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Plot each of the randomly drawn poses.
for pose in poses:
vg.plot_pose(ax, pose.to_tuple(), alpha=0.05)
# Plot the estimated mean pose.
vg.plot_pose(ax, estimated_mean_pose.to_tuple())
# Show figure.
vg.plot.axis_equal(ax)
plt.show()
if __name__ == "__main__":
main()
|
the-stack_106_20476
|
#/!/usr/bin/env python3
import time
import json
import jwt
from pathlib import Path
import random, string
from datetime import datetime, timedelta
from common.api import api_get
from common.params import Params
from common.spinner import Spinner
from common.basedir import PERSIST
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
UNREGISTERED_DONGLE_ID = "UnregisteredDevice"
def register(show_spinner=False) -> str:
params = Params()
params.put("SubscriberInfo", HARDWARE.get_subscriber_info())
IMEI = params.get("IMEI", encoding='utf8')
HardwareSerial = params.get("HardwareSerial", encoding='utf8')
dongle_id = params.get("DongleId", encoding='utf8')
needs_registration = None in (IMEI, HardwareSerial, dongle_id)
pubkey = Path(PERSIST+"/comma/id_rsa.pub")
if not pubkey.is_file():
dongle_id = UNREGISTERED_DONGLE_ID
cloudlog.warning(f"missing public key: {pubkey}")
elif needs_registration:
if show_spinner:
spinner = Spinner()
spinner.update("registering device")
# Create registration token, in the future, this key will make JWTs directly
with open(PERSIST+"/comma/id_rsa.pub") as f1, open(PERSIST+"/comma/id_rsa") as f2:
public_key = f1.read()
private_key = f2.read()
# Block until we get the imei
serial = HARDWARE.get_serial()
start_time = time.monotonic()
imei1, imei2 = None, None
while imei1 is None and imei2 is None:
try:
imei1, imei2 = HARDWARE.get_imei(0), HARDWARE.get_imei(1)
except Exception:
cloudlog.exception("Error getting imei, trying again...")
time.sleep(1)
if time.monotonic() - start_time > 60 and show_spinner:
spinner.update(f"registering device - serial: {serial}, IMEI: ({imei1}, {imei2})")
params.put("IMEI", imei1)
params.put("HardwareSerial", serial)
backoff = 0
start_time = time.monotonic()
while True:
try:
register_token = jwt.encode({'register': True, 'exp': datetime.utcnow() + timedelta(hours=1)}, private_key, algorithm='RS256')
cloudlog.info("getting pilotauth")
resp = api_get("v2/pilotauth/", method='POST', timeout=15,
imei=imei1, imei2=imei2, serial=serial, public_key=public_key, register_token=register_token)
if resp.status_code in (402, 403):
cloudlog.info(f"Unable to register device, got {resp.status_code}")
dongle_id = UNREGISTERED_DONGLE_ID
else:
dongleauth = json.loads(resp.text)
dongle_id = ''.join(random.choices(string.ascii_lowercase + string.digits, k=16))
break
except Exception:
cloudlog.exception("failed to authenticate")
backoff = min(backoff + 1, 15)
time.sleep(backoff)
if time.monotonic() - start_time > 60 and show_spinner:
spinner.update(f"registering device - serial: {serial}, IMEI: ({imei1}, {imei2})")
if show_spinner:
spinner.close()
if dongle_id:
params.put("DongleId", dongle_id)
set_offroad_alert("Offroad_UnofficialHardware", dongle_id == UNREGISTERED_DONGLE_ID)
return dongle_id
if __name__ == "__main__":
print(register())
|
the-stack_106_20477
|
'''
notebookutils.py
2012 Brandon Mechtley
Arizona State University
A few helpful routines for using IPython notebook.
'''
from IPython.core.pylabtools import print_figure
from IPython.core.display import display, HTML, Math, Latex
from sympy import Matrix, latex
import numpy as np
def limitprec(m, prec=3):
'''Silly function to limit the maximum number of decimal places for a matrix of floats.
m: np.ndarray
input matrix'''
mi = np.array(m * 10 ** prec, dtype=int)
return np.array(mi, dtype=float) / 10 ** prec
def showmat(m, labels=('','',''), prec=3):
'''Display a numpy.ndarray as a latex matrix in an IPython notebook with optional caption.
m: np.ndarray
array to display
labels: (str, str, str), optional
Latex to insert before, between, and after matrices (default ('','','')).
prec: int, optional
Maximum number of decimal places. Hardcoding this as opposed to using ordinary string
formatting because the Numpy->SymPy->IPython chain makes things confusing. Feel free to
propose a better method @_@'''
if type(labels) != tuple and type(labels) != list:
labels = (labels, '', '')
elif len(labels) < 2:
labels = (labels[0], '', '')
elif len(labels) < 3:
labels = (labels[0], labels[1], '')
if type(m) != list and type(m) != tuple:
m = [m]
display(
Latex(
'%s$$' % labels[0] +
labels[1].join([
latex(Matrix(limitprec(a, prec)), mat_str='matrix')
for a in m
]) + '$$%s' % labels[2]
)
)
def svgfig(figures):
'''Display a matplotlib figure as SVG in an IPython notebook.
f: matplotlib.figure
figure to display as SVG
Note that this routine assumes you are NOT using ipython notebook --pylab inline, as
otherwise the original figure will be displayed as a rasterized PNG in addition to this SVG
figure by default.'''
if type(figures) != list and type(figures) != tuple: figures = [figures]
for f in figures:
display(HTML(print_figure(f, fmt='svg')))
|
the-stack_106_20479
|
import time
import sqlalchemy as tsa
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import pool
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import util
from sqlalchemy.engine import url
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assert_raises_message_context_ok
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing import ne_
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
class MockError(Exception):
pass
class MockDisconnect(MockError):
pass
class MockExitIsh(BaseException):
pass
def mock_connection():
def mock_cursor():
def execute(*args, **kwargs):
if conn.explode == "execute":
raise MockDisconnect("Lost the DB connection on execute")
elif conn.explode == "interrupt":
conn.explode = "explode_no_disconnect"
raise MockExitIsh("Keyboard / greenlet / etc interruption")
elif conn.explode == "interrupt_dont_break":
conn.explode = None
raise MockExitIsh("Keyboard / greenlet / etc interruption")
elif conn.explode in (
"execute_no_disconnect",
"explode_no_disconnect",
):
raise MockError(
"something broke on execute but we didn't lose the "
"connection"
)
elif conn.explode in (
"rollback",
"rollback_no_disconnect",
"explode_no_disconnect",
):
raise MockError(
"something broke on execute but we didn't lose the "
"connection"
)
elif args and "SELECT" in args[0]:
cursor.description = [("foo", None, None, None, None, None)]
else:
return
def close():
cursor.fetchall = cursor.fetchone = Mock(
side_effect=MockError("cursor closed")
)
cursor = Mock(
execute=Mock(side_effect=execute), close=Mock(side_effect=close)
)
return cursor
def cursor():
while True:
yield mock_cursor()
def rollback():
if conn.explode == "rollback":
raise MockDisconnect("Lost the DB connection on rollback")
if conn.explode == "rollback_no_disconnect":
raise MockError(
"something broke on rollback but we didn't lose the "
"connection"
)
else:
return
def commit():
if conn.explode == "commit":
raise MockDisconnect("Lost the DB connection on commit")
elif conn.explode == "commit_no_disconnect":
raise MockError(
"something broke on commit but we didn't lose the "
"connection"
)
else:
return
conn = Mock(
rollback=Mock(side_effect=rollback),
commit=Mock(side_effect=commit),
cursor=Mock(side_effect=cursor()),
)
return conn
def MockDBAPI():
connections = []
stopped = [False]
def connect():
while True:
if stopped[0]:
raise MockDisconnect("database is stopped")
conn = mock_connection()
connections.append(conn)
yield conn
def shutdown(explode="execute", stop=False):
stopped[0] = stop
for c in connections:
c.explode = explode
def restart():
stopped[0] = False
connections[:] = []
def dispose():
stopped[0] = False
for c in connections:
c.explode = None
connections[:] = []
return Mock(
connect=Mock(side_effect=connect()),
shutdown=Mock(side_effect=shutdown),
dispose=Mock(side_effect=dispose),
restart=Mock(side_effect=restart),
paramstyle="named",
connections=connections,
Error=MockError,
)
class PrePingMockTest(fixtures.TestBase):
def setup_test(self):
self.dbapi = MockDBAPI()
def _pool_fixture(self, pre_ping, pool_kw=None):
dialect = url.make_url(
"postgresql://foo:bar@localhost/test"
).get_dialect()()
dialect.dbapi = self.dbapi
_pool = pool.QueuePool(
creator=lambda: self.dbapi.connect("foo.db"),
pre_ping=pre_ping,
dialect=dialect,
**(pool_kw if pool_kw else {})
)
dialect.is_disconnect = lambda e, conn, cursor: isinstance(
e, MockDisconnect
)
return _pool
def teardown_test(self):
self.dbapi.dispose()
def test_ping_not_on_first_connect(self):
pool = self._pool_fixture(
pre_ping=True, pool_kw=dict(pool_size=1, max_overflow=0)
)
conn = pool.connect()
dbapi_conn = conn.connection
eq_(dbapi_conn.mock_calls, [])
conn.close()
# no ping, so no cursor() call.
eq_(dbapi_conn.mock_calls, [call.rollback()])
conn = pool.connect()
is_(conn.connection, dbapi_conn)
# ping, so cursor() call.
eq_(dbapi_conn.mock_calls, [call.rollback(), call.cursor()])
conn.close()
conn = pool.connect()
is_(conn.connection, dbapi_conn)
# ping, so cursor() call.
eq_(
dbapi_conn.mock_calls,
[call.rollback(), call.cursor(), call.rollback(), call.cursor()],
)
conn.close()
def test_ping_not_on_reconnect(self):
pool = self._pool_fixture(
pre_ping=True, pool_kw=dict(pool_size=1, max_overflow=0)
)
conn = pool.connect()
dbapi_conn = conn.connection
conn_rec = conn._connection_record
eq_(dbapi_conn.mock_calls, [])
conn.close()
conn = pool.connect()
is_(conn.connection, dbapi_conn)
# ping, so cursor() call.
eq_(dbapi_conn.mock_calls, [call.rollback(), call.cursor()])
conn.invalidate()
is_(conn.connection, None)
# connect again, make sure we're on the same connection record
conn = pool.connect()
is_(conn._connection_record, conn_rec)
# no ping
dbapi_conn = conn.connection
eq_(dbapi_conn.mock_calls, [])
def test_connect_across_restart(self):
pool = self._pool_fixture(pre_ping=True)
conn = pool.connect()
stale_connection = conn.connection
conn.close()
self.dbapi.shutdown("execute")
self.dbapi.restart()
conn = pool.connect()
cursor = conn.cursor()
cursor.execute("hi")
stale_cursor = stale_connection.cursor()
assert_raises(MockDisconnect, stale_cursor.execute, "hi")
def test_raise_db_is_stopped(self):
pool = self._pool_fixture(pre_ping=True)
conn = pool.connect()
conn.close()
self.dbapi.shutdown("execute", stop=True)
assert_raises_message_context_ok(
MockDisconnect, "database is stopped", pool.connect
)
def test_waits_til_exec_wo_ping_db_is_stopped(self):
pool = self._pool_fixture(pre_ping=False)
conn = pool.connect()
conn.close()
self.dbapi.shutdown("execute", stop=True)
conn = pool.connect()
cursor = conn.cursor()
assert_raises_message(
MockDisconnect,
"Lost the DB connection on execute",
cursor.execute,
"foo",
)
def test_waits_til_exec_wo_ping_db_is_restarted(self):
pool = self._pool_fixture(pre_ping=False)
conn = pool.connect()
conn.close()
self.dbapi.shutdown("execute", stop=True)
self.dbapi.restart()
conn = pool.connect()
cursor = conn.cursor()
assert_raises_message(
MockDisconnect,
"Lost the DB connection on execute",
cursor.execute,
"foo",
)
@testing.requires.predictable_gc
def test_pre_ping_weakref_finalizer(self):
pool = self._pool_fixture(pre_ping=True)
conn = pool.connect()
old_dbapi_conn = conn.connection
conn.close()
# no cursor() because no pre ping
eq_(old_dbapi_conn.mock_calls, [call.rollback()])
conn = pool.connect()
conn.close()
# connect again, we see pre-ping
eq_(
old_dbapi_conn.mock_calls,
[call.rollback(), call.cursor(), call.rollback()],
)
self.dbapi.shutdown("execute", stop=True)
self.dbapi.restart()
conn = pool.connect()
dbapi_conn = conn.connection
del conn
gc_collect()
# new connection was reset on return appropriately
eq_(dbapi_conn.mock_calls, [call.rollback()])
# old connection was just closed - did not get an
# erroneous reset on return
eq_(
old_dbapi_conn.mock_calls,
[
call.rollback(),
call.cursor(),
call.rollback(),
call.cursor(),
call.close(),
],
)
class MockReconnectTest(fixtures.TestBase):
def setup_test(self):
self.dbapi = MockDBAPI()
self.db = testing_engine(
"postgresql://foo:bar@localhost/test",
options=dict(module=self.dbapi, _initialize=False),
)
self.mock_connect = call(
host="localhost", password="bar", user="foo", database="test"
)
# monkeypatch disconnect checker
self.db.dialect.is_disconnect = lambda e, conn, cursor: isinstance(
e, MockDisconnect
)
def teardown_test(self):
self.dbapi.dispose()
def test_reconnect(self):
"""test that an 'is_disconnect' condition will invalidate the
connection, and additionally dispose the previous connection
pool and recreate."""
# make a connection
conn = self.db.connect()
# connection works
conn.execute(select(1))
# create a second connection within the pool, which we'll ensure
# also goes away
conn2 = self.db.connect()
conn2.close()
# two connections opened total now
assert len(self.dbapi.connections) == 2
# set it to fail
self.dbapi.shutdown()
# force windows monotonic timer to definitely increment
time.sleep(0.5)
# close on DBAPI connection occurs here, as it is detected
# as invalid.
assert_raises(tsa.exc.DBAPIError, conn.execute, select(1))
# assert was invalidated
assert not conn.closed
assert conn.invalidated
# close shouldn't break
conn.close()
# ensure one connection closed...
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], []],
)
conn = self.db.connect()
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], [call()], []],
)
conn.execute(select(1))
conn.close()
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], [call()], []],
)
def test_invalidate_on_execute_trans(self):
conn = self.db.connect()
trans = conn.begin()
self.dbapi.shutdown()
assert_raises(tsa.exc.DBAPIError, conn.execute, select(1))
eq_([c.close.mock_calls for c in self.dbapi.connections], [[call()]])
assert not conn.closed
assert conn.invalidated
assert trans.is_active
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select(1),
)
assert trans.is_active
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
trans.commit,
)
# now it's inactive...
assert not trans.is_active
# but still associated with the connection
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select(1),
)
assert not trans.is_active
# still can't commit... error stays the same
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
trans.commit,
)
trans.rollback()
assert not trans.is_active
conn.execute(select(1))
assert not conn.invalidated
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], []],
)
def test_invalidate_on_commit_trans(self):
conn = self.db.connect()
trans = conn.begin()
self.dbapi.shutdown("commit")
assert_raises(tsa.exc.DBAPIError, trans.commit)
assert not conn.closed
assert conn.invalidated
assert not trans.is_active
# error stays consistent
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select(1),
)
assert not trans.is_active
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
trans.commit,
)
assert not trans.is_active
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select(1),
)
assert not trans.is_active
trans.rollback()
assert not trans.is_active
conn.execute(select(1))
assert not conn.invalidated
def test_commit_fails_contextmanager(self):
# this test is also performed in test/engine/test_transaction.py
# using real connections
conn = self.db.connect()
def go():
with conn.begin():
self.dbapi.shutdown("commit_no_disconnect")
assert_raises(tsa.exc.DBAPIError, go)
assert not conn.in_transaction()
def test_commit_fails_trans(self):
# this test is also performed in test/engine/test_transaction.py
# using real connections
conn = self.db.connect()
trans = conn.begin()
self.dbapi.shutdown("commit_no_disconnect")
assert_raises(tsa.exc.DBAPIError, trans.commit)
assert not conn.closed
assert not conn.invalidated
assert not trans.is_active
# error stays consistent
assert_raises_message(
tsa.exc.PendingRollbackError,
"This connection is on an inactive transaction. Please rollback",
conn.execute,
select(1),
)
assert not trans.is_active
assert_raises_message(
tsa.exc.PendingRollbackError,
"This connection is on an inactive transaction. Please rollback",
trans.commit,
)
assert not trans.is_active
assert_raises_message(
tsa.exc.PendingRollbackError,
"This connection is on an inactive transaction. Please rollback",
conn.execute,
select(1),
)
assert not trans.is_active
trans.rollback()
assert not trans.is_active
conn.execute(select(1))
assert not conn.invalidated
def test_invalidate_dont_call_finalizer(self):
conn = self.db.connect()
finalizer = mock.Mock()
conn.connection._connection_record.finalize_callback.append(finalizer)
conn.invalidate()
assert conn.invalidated
eq_(finalizer.call_count, 0)
def test_conn_reusable(self):
conn = self.db.connect()
conn.execute(select(1))
eq_(self.dbapi.connect.mock_calls, [self.mock_connect])
self.dbapi.shutdown()
assert_raises(tsa.exc.DBAPIError, conn.execute, select(1))
assert not conn.closed
assert conn.invalidated
eq_([c.close.mock_calls for c in self.dbapi.connections], [[call()]])
# test reconnects
conn.execute(select(1))
assert not conn.invalidated
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], []],
)
def test_invalidated_close(self):
conn = self.db.connect()
self.dbapi.shutdown()
assert_raises(tsa.exc.DBAPIError, conn.execute, select(1))
conn.close()
assert conn.closed
assert not conn.invalidated
assert_raises_message(
tsa.exc.ResourceClosedError,
"This Connection is closed",
conn.execute,
select(1),
)
def test_noreconnect_execute_plus_closewresult(self):
conn = self.db.connect(close_with_result=True)
self.dbapi.shutdown("execute_no_disconnect")
# raises error
assert_raises_message(
tsa.exc.DBAPIError,
"something broke on execute but we didn't lose the connection",
conn.execute,
select(1),
)
assert conn.closed
assert not conn.invalidated
def test_noreconnect_rollback_plus_closewresult(self):
conn = self.db.connect(close_with_result=True)
self.dbapi.shutdown("rollback_no_disconnect")
# raises error
with expect_warnings(
"An exception has occurred during handling .*"
"something broke on execute but we didn't lose the connection",
py2konly=True,
):
assert_raises_message(
tsa.exc.DBAPIError,
"something broke on rollback but we didn't "
"lose the connection",
conn.execute,
select(1),
)
assert conn.closed
assert not conn.invalidated
assert_raises_message(
tsa.exc.ResourceClosedError,
"This Connection is closed",
conn.execute,
select(1),
)
def test_reconnect_on_reentrant(self):
conn = self.db.connect()
conn.execute(select(1))
assert len(self.dbapi.connections) == 1
self.dbapi.shutdown("rollback")
# raises error
with expect_warnings(
"An exception has occurred during handling .*"
"something broke on execute but we didn't lose the connection",
py2konly=True,
):
assert_raises_message(
tsa.exc.DBAPIError,
"Lost the DB connection on rollback",
conn.execute,
select(1),
)
assert not conn.closed
assert conn.invalidated
def test_reconnect_on_reentrant_plus_closewresult(self):
conn = self.db.connect(close_with_result=True)
self.dbapi.shutdown("rollback")
# raises error
with expect_warnings(
"An exception has occurred during handling .*"
"something broke on execute but we didn't lose the connection",
py2konly=True,
):
assert_raises_message(
tsa.exc.DBAPIError,
"Lost the DB connection on rollback",
conn.execute,
select(1),
)
assert conn.closed
assert not conn.invalidated
assert_raises_message(
tsa.exc.ResourceClosedError,
"This Connection is closed",
conn.execute,
select(1),
)
def test_check_disconnect_no_cursor(self):
conn = self.db.connect()
result = conn.execute(select(1))
result.cursor.close()
conn.close()
assert_raises_message(
tsa.exc.DBAPIError, "cursor closed", list, result
)
def test_dialect_initialize_once(self):
from sqlalchemy.engine.url import URL
from sqlalchemy.engine.default import DefaultDialect
dbapi = self.dbapi
class MyURL(URL):
def _get_entrypoint(self):
return Dialect
def get_dialect(self):
return Dialect
class Dialect(DefaultDialect):
initialize = Mock()
engine = create_engine(MyURL.create("foo://"), module=dbapi)
engine.connect()
# note that the dispose() call replaces the old pool with a new one;
# this is to test that even though a single pool is using
# dispatch.exec_once(), by replacing the pool with a new one, the event
# would normally fire again onless once=True is set on the original
# listen as well.
engine.dispose()
engine.connect()
eq_(Dialect.initialize.call_count, 1)
def test_dialect_initialize_retry_if_exception(self):
from sqlalchemy.engine.url import URL
from sqlalchemy.engine.default import DefaultDialect
dbapi = self.dbapi
class MyURL(URL):
def _get_entrypoint(self):
return Dialect
def get_dialect(self):
return Dialect
class Dialect(DefaultDialect):
initialize = Mock()
# note that the first_connect hook is only invoked when the pool
# makes a new DBAPI connection, and not when it checks out an existing
# connection. So there is a dependency here that if the initializer
# raises an exception, the pool-level connection attempt is also
# failed, meaning no DBAPI connection is pooled. If the first_connect
# exception raise did not prevent the connection from being pooled,
# there could be the case where the pool could return that connection
# on a subsequent attempt without initialization having proceeded.
Dialect.initialize.side_effect = TypeError
engine = create_engine(MyURL.create("foo://"), module=dbapi)
assert_raises(TypeError, engine.connect)
eq_(Dialect.initialize.call_count, 1)
is_true(engine.pool._pool.empty())
assert_raises(TypeError, engine.connect)
eq_(Dialect.initialize.call_count, 2)
is_true(engine.pool._pool.empty())
engine.dispose()
assert_raises(TypeError, engine.connect)
eq_(Dialect.initialize.call_count, 3)
is_true(engine.pool._pool.empty())
Dialect.initialize.side_effect = None
conn = engine.connect()
eq_(Dialect.initialize.call_count, 4)
conn.close()
is_false(engine.pool._pool.empty())
conn = engine.connect()
eq_(Dialect.initialize.call_count, 4)
conn.close()
is_false(engine.pool._pool.empty())
engine.dispose()
conn = engine.connect()
eq_(Dialect.initialize.call_count, 4)
conn.close()
is_false(engine.pool._pool.empty())
def test_invalidate_conn_w_contextmanager_interrupt(self):
# test [ticket:3803]
pool = self.db.pool
conn = self.db.connect()
self.dbapi.shutdown("interrupt")
def go():
with conn.begin():
conn.execute(select(1))
assert_raises(MockExitIsh, go)
assert conn.invalidated
eq_(pool._invalidate_time, 0) # pool not invalidated
conn.execute(select(1))
assert not conn.invalidated
def test_invalidate_conn_interrupt_nodisconnect_workaround(self):
# test [ticket:3803] workaround for no disconnect on keyboard interrupt
@event.listens_for(self.db, "handle_error")
def cancel_disconnect(ctx):
ctx.is_disconnect = False
pool = self.db.pool
conn = self.db.connect()
self.dbapi.shutdown("interrupt_dont_break")
def go():
with conn.begin():
conn.execute(select(1))
assert_raises(MockExitIsh, go)
assert not conn.invalidated
eq_(pool._invalidate_time, 0) # pool not invalidated
conn.execute(select(1))
assert not conn.invalidated
def test_invalidate_conn_w_contextmanager_disconnect(self):
# test [ticket:3803] change maintains old behavior
pool = self.db.pool
conn = self.db.connect()
self.dbapi.shutdown("execute")
def go():
with conn.begin():
conn.execute(select(1))
assert_raises(exc.DBAPIError, go) # wraps a MockDisconnect
assert conn.invalidated
ne_(pool._invalidate_time, 0) # pool is invalidated
conn.execute(select(1))
assert not conn.invalidated
class CursorErrTest(fixtures.TestBase):
# this isn't really a "reconnect" test, it's more of
# a generic "recovery". maybe this test suite should have been
# named "test_error_recovery".
def _fixture(self, explode_on_exec, initialize):
class DBAPIError(Exception):
pass
def MockDBAPI():
def cursor():
while True:
if explode_on_exec:
yield Mock(
description=[],
close=Mock(side_effect=DBAPIError("explode")),
execute=Mock(side_effect=DBAPIError("explode")),
)
else:
yield Mock(
description=[],
close=Mock(side_effect=Exception("explode")),
)
def connect():
while True:
yield Mock(
spec=["cursor", "commit", "rollback", "close"],
cursor=Mock(side_effect=cursor()),
)
return Mock(
Error=DBAPIError,
paramstyle="qmark",
connect=Mock(side_effect=connect()),
)
dbapi = MockDBAPI()
from sqlalchemy.engine import default
url = Mock(
get_dialect=lambda: default.DefaultDialect,
_get_entrypoint=lambda: default.DefaultDialect,
_instantiate_plugins=lambda kwargs: (url, [], kwargs),
translate_connect_args=lambda: {},
query={},
)
eng = testing_engine(
url, options=dict(module=dbapi, _initialize=initialize)
)
eng.pool.logger = Mock()
def get_default_schema_name(connection):
try:
cursor = connection.connection.cursor()
connection._cursor_execute(cursor, "statement", {})
cursor.close()
except exc.DBAPIError:
util.warn("Exception attempting to detect")
eng.dialect._get_default_schema_name = get_default_schema_name
return eng
def test_cursor_explode(self):
db = self._fixture(False, False)
conn = db.connect()
result = conn.exec_driver_sql("select foo")
result.close()
conn.close()
eq_(
db.pool.logger.error.mock_calls,
[call("Error closing cursor", exc_info=True)],
)
def test_cursor_shutdown_in_initialize(self):
db = self._fixture(True, True)
assert_raises_message_context_ok(
exc.SAWarning, "Exception attempting to detect", db.connect
)
eq_(
db.pool.logger.error.mock_calls,
[call("Error closing cursor", exc_info=True)],
)
def _assert_invalidated(fn, *args):
try:
fn(*args)
assert False
except tsa.exc.DBAPIError as e:
if not e.connection_invalidated:
raise
class RealReconnectTest(fixtures.TestBase):
__backend__ = True
__requires__ = "graceful_disconnects", "ad_hoc_engines"
def setup_test(self):
self.engine = engines.reconnecting_engine()
def teardown_test(self):
self.engine.dispose()
def test_reconnect(self):
with self.engine.connect() as conn:
eq_(conn.execute(select(1)).scalar(), 1)
assert not conn.closed
self.engine.test_shutdown()
_assert_invalidated(conn.execute, select(1))
assert not conn.closed
assert conn.invalidated
assert conn.invalidated
eq_(conn.execute(select(1)).scalar(), 1)
assert not conn.invalidated
# one more time
self.engine.test_shutdown()
_assert_invalidated(conn.execute, select(1))
assert conn.invalidated
eq_(conn.execute(select(1)).scalar(), 1)
assert not conn.invalidated
@testing.requires.independent_connections
def test_multiple_invalidate(self):
c1 = self.engine.connect()
c2 = self.engine.connect()
eq_(c1.execute(select(1)).scalar(), 1)
self.engine.test_shutdown()
_assert_invalidated(c1.execute, select(1))
p2 = self.engine.pool
_assert_invalidated(c2.execute, select(1))
# pool isn't replaced
assert self.engine.pool is p2
def test_branched_invalidate_branch_to_parent(self):
with self.engine.connect() as c1:
with patch.object(self.engine.pool, "logger") as logger:
c1_branch = c1.connect()
eq_(c1_branch.execute(select(1)).scalar(), 1)
self.engine.test_shutdown()
_assert_invalidated(c1_branch.execute, select(1))
assert c1.invalidated
assert c1_branch.invalidated
c1_branch._revalidate_connection()
assert not c1.invalidated
assert not c1_branch.invalidated
assert "Invalidate connection" in logger.mock_calls[0][1][0]
def test_branched_invalidate_parent_to_branch(self):
with self.engine.connect() as c1:
c1_branch = c1.connect()
eq_(c1_branch.execute(select(1)).scalar(), 1)
self.engine.test_shutdown()
_assert_invalidated(c1.execute, select(1))
assert c1.invalidated
assert c1_branch.invalidated
c1._revalidate_connection()
assert not c1.invalidated
assert not c1_branch.invalidated
def test_branch_invalidate_state(self):
with self.engine.connect() as c1:
c1_branch = c1.connect()
eq_(c1_branch.execute(select(1)).scalar(), 1)
self.engine.test_shutdown()
_assert_invalidated(c1_branch.execute, select(1))
assert not c1_branch.closed
assert not c1_branch._still_open_and_dbapi_connection_is_valid
def test_ensure_is_disconnect_gets_connection(self):
def is_disconnect(e, conn, cursor):
# connection is still present
assert conn.connection is not None
# the error usually occurs on connection.cursor(),
# though MySQLdb we get a non-working cursor.
# assert cursor is None
self.engine.dialect.is_disconnect = is_disconnect
with self.engine.connect() as conn:
self.engine.test_shutdown()
with expect_warnings(
"An exception has occurred during handling .*", py2konly=True
):
assert_raises(tsa.exc.DBAPIError, conn.execute, select(1))
def test_rollback_on_invalid_plain(self):
with self.engine.connect() as conn:
trans = conn.begin()
conn.invalidate()
trans.rollback()
@testing.requires.two_phase_transactions
def test_rollback_on_invalid_twophase(self):
with self.engine.connect() as conn:
trans = conn.begin_twophase()
conn.invalidate()
trans.rollback()
@testing.requires.savepoints
def test_rollback_on_invalid_savepoint(self):
with self.engine.connect() as conn:
conn.begin()
trans2 = conn.begin_nested()
conn.invalidate()
trans2.rollback()
def test_invalidate_twice(self):
with self.engine.connect() as conn:
conn.invalidate()
conn.invalidate()
@testing.skip_if(
[lambda: util.py3k, "oracle+cx_oracle"], "Crashes on py3k+cx_oracle"
)
def test_explode_in_initializer(self):
engine = engines.testing_engine()
def broken_initialize(connection):
connection.exec_driver_sql("select fake_stuff from _fake_table")
engine.dialect.initialize = broken_initialize
# raises a DBAPIError, not an AttributeError
assert_raises(exc.DBAPIError, engine.connect)
@testing.skip_if(
[lambda: util.py3k, "oracle+cx_oracle"], "Crashes on py3k+cx_oracle"
)
def test_explode_in_initializer_disconnect(self):
engine = engines.testing_engine()
def broken_initialize(connection):
connection.exec_driver_sql("select fake_stuff from _fake_table")
engine.dialect.initialize = broken_initialize
def is_disconnect(e, conn, cursor):
return True
engine.dialect.is_disconnect = is_disconnect
# invalidate() also doesn't screw up
assert_raises(exc.DBAPIError, engine.connect)
def test_null_pool(self):
engine = engines.reconnecting_engine(
options=dict(poolclass=pool.NullPool)
)
with engine.connect() as conn:
eq_(conn.execute(select(1)).scalar(), 1)
assert not conn.closed
engine.test_shutdown()
_assert_invalidated(conn.execute, select(1))
assert not conn.closed
assert conn.invalidated
eq_(conn.execute(select(1)).scalar(), 1)
assert not conn.invalidated
def test_close(self):
with self.engine.connect() as conn:
eq_(conn.execute(select(1)).scalar(), 1)
assert not conn.closed
self.engine.test_shutdown()
_assert_invalidated(conn.execute, select(1))
with self.engine.connect() as conn:
eq_(conn.execute(select(1)).scalar(), 1)
def test_with_transaction(self):
with self.engine.connect() as conn:
trans = conn.begin()
assert trans.is_valid
eq_(conn.execute(select(1)).scalar(), 1)
assert not conn.closed
self.engine.test_shutdown()
_assert_invalidated(conn.execute, select(1))
assert not conn.closed
assert conn.invalidated
assert trans.is_active
assert not trans.is_valid
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select(1),
)
assert trans.is_active
assert not trans.is_valid
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
trans.commit,
)
# becomes inactive
assert not trans.is_active
assert not trans.is_valid
# still asks us to rollback
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select(1),
)
# still asks us..
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
trans.commit,
)
# still...it's being consistent in what it is asking.
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select(1),
)
# OK!
trans.rollback()
assert not trans.is_active
assert not trans.is_valid
# conn still invalid but we can reconnect
assert conn.invalidated
eq_(conn.execute(select(1)).scalar(), 1)
assert not conn.invalidated
class RecycleTest(fixtures.TestBase):
__backend__ = True
def test_basic(self):
engine = engines.reconnecting_engine()
conn = engine.connect()
eq_(conn.execute(select(1)).scalar(), 1)
conn.close()
# set the pool recycle down to 1.
# we aren't doing this inline with the
# engine create since cx_oracle takes way
# too long to create the 1st connection and don't
# want to build a huge delay into this test.
engine.pool._recycle = 1
# kill the DB connection
engine.test_shutdown()
# wait until past the recycle period
time.sleep(2)
# can connect, no exception
conn = engine.connect()
eq_(conn.execute(select(1)).scalar(), 1)
conn.close()
class PrePingRealTest(fixtures.TestBase):
__backend__ = True
def test_pre_ping_db_is_restarted(self):
engine = engines.reconnecting_engine(options={"pool_pre_ping": True})
conn = engine.connect()
eq_(conn.execute(select(1)).scalar(), 1)
stale_connection = conn.connection.connection
conn.close()
engine.test_shutdown()
engine.test_restart()
conn = engine.connect()
eq_(conn.execute(select(1)).scalar(), 1)
conn.close()
with expect_raises(engine.dialect.dbapi.Error, check_context=False):
curs = stale_connection.cursor()
curs.execute("select 1")
def test_pre_ping_db_stays_shutdown(self):
engine = engines.reconnecting_engine(options={"pool_pre_ping": True})
if isinstance(engine.pool, pool.QueuePool):
eq_(engine.pool.checkedin(), 0)
eq_(engine.pool._overflow, -5)
conn = engine.connect()
eq_(conn.execute(select(1)).scalar(), 1)
conn.close()
if isinstance(engine.pool, pool.QueuePool):
eq_(engine.pool.checkedin(), 1)
eq_(engine.pool._overflow, -4)
engine.test_shutdown(stop=True)
assert_raises(exc.DBAPIError, engine.connect)
if isinstance(engine.pool, pool.QueuePool):
eq_(engine.pool.checkedin(), 1)
eq_(engine.pool._overflow, -4)
class InvalidateDuringResultTest(fixtures.TestBase):
__backend__ = True
def setup_test(self):
self.engine = engines.reconnecting_engine()
self.meta = MetaData()
table = Table(
"sometable",
self.meta,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
)
with self.engine.begin() as conn:
self.meta.create_all(conn)
conn.execute(
table.insert(),
[{"id": i, "name": "row %d" % i} for i in range(1, 100)],
)
def teardown_test(self):
with self.engine.begin() as conn:
self.meta.drop_all(conn)
self.engine.dispose()
@testing.crashes(
"oracle",
"cx_oracle 6 doesn't allow a close like this due to open cursors",
)
@testing.fails_if(
[
"+mariadbconnector",
"+mysqlconnector",
"+mysqldb",
"+cymysql",
"+pymysql",
"+pg8000",
"+asyncpg",
"+aiosqlite",
"+aiomysql",
],
"Buffers the result set and doesn't check for connection close",
)
def test_invalidate_on_results(self):
conn = self.engine.connect()
result = conn.exec_driver_sql("select * from sometable")
for x in range(20):
result.fetchone()
self.engine.test_shutdown()
try:
_assert_invalidated(result.fetchone)
assert conn.invalidated
finally:
conn.invalidate()
class ReconnectRecipeTest(fixtures.TestBase):
"""Test for the reconnect recipe given at doc/build/faq/connections.rst.
Make sure the above document is updated if changes are made here.
"""
# this recipe works on PostgreSQL also but only if the connection
# is cut off from the server side, otherwise the connection.cursor()
# method rightly fails because we explicitly closed the connection.
# since we don't have a fixture
# that can do this we currently rely on the MySQL drivers that allow
# us to call cursor() even when the connection were closed. In order
# to get a real "cut the server off" kind of fixture we'd need to do
# something in provisioning that seeks out the TCP connection at the
# OS level and kills it.
__only_on__ = ("mysql+mysqldb", "mysql+pymysql")
future = False
def make_engine(self, engine):
num_retries = 3
retry_interval = 0.5
def _run_with_retries(fn, context, cursor, statement, *arg, **kw):
for retry in range(num_retries + 1):
try:
fn(cursor, statement, context=context, *arg)
except engine.dialect.dbapi.Error as raw_dbapi_err:
connection = context.root_connection
if engine.dialect.is_disconnect(
raw_dbapi_err, connection, cursor
):
if retry > num_retries:
raise
engine.logger.error(
"disconnection error, retrying operation",
exc_info=True,
)
connection.invalidate()
if self.future:
connection.rollback()
else:
trans = connection.get_transaction()
if trans:
trans.rollback()
time.sleep(retry_interval)
context.cursor = (
cursor
) = connection.connection.cursor()
else:
raise
else:
return True
e = engine.execution_options(isolation_level="AUTOCOMMIT")
@event.listens_for(e, "do_execute_no_params")
def do_execute_no_params(cursor, statement, context):
return _run_with_retries(
context.dialect.do_execute_no_params,
context,
cursor,
statement,
)
@event.listens_for(e, "do_execute")
def do_execute(cursor, statement, parameters, context):
return _run_with_retries(
context.dialect.do_execute,
context,
cursor,
statement,
parameters,
)
return e
__backend__ = True
def setup_test(self):
self.engine = engines.reconnecting_engine(
options=dict(future=self.future)
)
self.meta = MetaData()
self.table = Table(
"sometable",
self.meta,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
)
self.meta.create_all(self.engine)
def teardown_test(self):
self.meta.drop_all(self.engine)
self.engine.dispose()
def test_restart_on_execute_no_txn(self):
engine = self.make_engine(self.engine)
with engine.connect() as conn:
eq_(conn.execute(select(1)).scalar(), 1)
self.engine.test_shutdown()
self.engine.test_restart()
eq_(conn.execute(select(1)).scalar(), 1)
def test_restart_on_execute_txn(self):
engine = self.make_engine(self.engine)
with engine.begin() as conn:
eq_(conn.execute(select(1)).scalar(), 1)
self.engine.test_shutdown()
self.engine.test_restart()
eq_(conn.execute(select(1)).scalar(), 1)
def test_autocommits_txn(self):
engine = self.make_engine(self.engine)
with engine.begin() as conn:
conn.execute(
self.table.insert(),
[
{"id": 1, "name": "some name 1"},
{"id": 2, "name": "some name 2"},
{"id": 3, "name": "some name 3"},
],
)
self.engine.test_shutdown()
self.engine.test_restart()
eq_(
conn.execute(
select(self.table).order_by(self.table.c.id)
).fetchall(),
[(1, "some name 1"), (2, "some name 2"), (3, "some name 3")],
)
def test_fail_on_executemany_txn(self):
engine = self.make_engine(self.engine)
with engine.begin() as conn:
conn.execute(
self.table.insert(),
[
{"id": 1, "name": "some name 1"},
{"id": 2, "name": "some name 2"},
{"id": 3, "name": "some name 3"},
],
)
self.engine.test_shutdown()
self.engine.test_restart()
assert_raises(
exc.DBAPIError,
conn.execute,
self.table.insert(),
[
{"id": 4, "name": "some name 4"},
{"id": 5, "name": "some name 5"},
{"id": 6, "name": "some name 6"},
],
)
if self.future:
conn.rollback()
else:
trans = conn.get_transaction()
trans.rollback()
class FutureReconnectRecipeTest(ReconnectRecipeTest):
future = True
|
the-stack_106_20481
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import io
import os
import subprocess
import tempfile
import unittest
from argparse import Namespace
from datetime import datetime, time, timedelta
from unittest.mock import MagicMock
import pytz
import airflow.bin.cli as cli
from airflow import AirflowException, models, settings
from airflow.cli.commands import dag_command
from airflow.models import DagModel
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.state import State
from tests.compat import mock
dag_folder_path = '/'.join(os.path.realpath(__file__).split('/')[:-1])
DEFAULT_DATE = timezone.make_aware(datetime(2015, 1, 1))
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(dag_folder_path), 'dags')
TEST_DAG_ID = 'unit_tests'
def create_mock_args( # pylint: disable=too-many-arguments
task_id,
dag_id,
subdir,
execution_date,
task_params=None,
dry_run=False,
queue=None,
pool=None,
priority_weight_total=None,
retries=0,
local=True,
mark_success=False,
ignore_all_dependencies=False,
ignore_depends_on_past=False,
ignore_dependencies=False,
force=False,
run_as_user=None,
executor_config=None,
cfg_path=None,
pickle=None,
raw=None,
interactive=None,
):
if executor_config is None:
executor_config = {}
args = MagicMock(spec=Namespace)
args.task_id = task_id
args.dag_id = dag_id
args.subdir = subdir
args.task_params = task_params
args.execution_date = execution_date
args.dry_run = dry_run
args.queue = queue
args.pool = pool
args.priority_weight_total = priority_weight_total
args.retries = retries
args.local = local
args.run_as_user = run_as_user
args.executor_config = executor_config
args.cfg_path = cfg_path
args.pickle = pickle
args.raw = raw
args.mark_success = mark_success
args.ignore_all_dependencies = ignore_all_dependencies
args.ignore_depends_on_past = ignore_depends_on_past
args.ignore_dependencies = ignore_dependencies
args.force = force
args.interactive = interactive
return args
EXAMPLE_DAGS_FOLDER = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.realpath(__file__))
)
),
"airflow/example_dags"
)
class TestCliDags(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dagbag = models.DagBag(include_examples=True)
cls.parser = cli.CLIFactory.get_parser()
@mock.patch("airflow.cli.commands.dag_command.DAG.run")
def test_backfill(self, mock_run):
dag_command.dag_backfill(self.parser.parse_args([
'dags', 'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
mock_run.assert_called_once_with(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
conf=None,
delay_on_limit_secs=1.0,
donot_pickle=False,
ignore_first_depends_on_past=False,
ignore_task_deps=False,
local=False,
mark_success=False,
pool=None,
rerun_failed_tasks=False,
run_backwards=False,
verbose=False,
)
mock_run.reset_mock()
dag = self.dagbag.get_dag('example_bash_operator')
with contextlib.redirect_stdout(io.StringIO()) as stdout:
dag_command.dag_backfill(self.parser.parse_args([
'dags', 'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]), dag=dag)
output = stdout.getvalue()
self.assertIn("Dry run of DAG example_bash_operator on {}\n".format(DEFAULT_DATE.isoformat()), output)
self.assertIn("Task runme_0\n".format(DEFAULT_DATE.isoformat()), output)
mock_run.assert_not_called() # Dry run shouldn't run the backfill
dag_command.dag_backfill(self.parser.parse_args([
'dags', 'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]), dag=dag)
mock_run.assert_not_called() # Dry run shouldn't run the backfill
dag_command.dag_backfill(self.parser.parse_args([
'dags', 'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]), dag=dag)
mock_run.assert_called_once_with(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
conf=None,
delay_on_limit_secs=1.0,
donot_pickle=False,
ignore_first_depends_on_past=False,
ignore_task_deps=False,
local=True,
mark_success=False,
pool=None,
rerun_failed_tasks=False,
run_backwards=False,
verbose=False,
)
mock_run.reset_mock()
def test_show_dag_print(self):
with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
dag_command.dag_show(self.parser.parse_args([
'dags', 'show', 'example_bash_operator']))
out = temp_stdout.getvalue()
self.assertIn("label=example_bash_operator", out)
self.assertIn("graph [label=example_bash_operator labelloc=t rankdir=LR]", out)
self.assertIn("runme_2 -> run_after_loop", out)
@mock.patch("airflow.cli.commands.dag_command.render_dag")
def test_show_dag_dave(self, mock_render_dag):
with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
dag_command.dag_show(self.parser.parse_args([
'dags', 'show', 'example_bash_operator', '--save', 'awesome.png']
))
out = temp_stdout.getvalue()
mock_render_dag.return_value.render.assert_called_once_with(
cleanup=True, filename='awesome', format='png'
)
self.assertIn("File awesome.png saved", out)
@mock.patch("airflow.cli.commands.dag_command.subprocess.Popen")
@mock.patch("airflow.cli.commands.dag_command.render_dag")
def test_show_dag_imgcat(self, mock_render_dag, mock_popen):
mock_render_dag.return_value.pipe.return_value = b"DOT_DATA"
mock_popen.return_value.communicate.return_value = (b"OUT", b"ERR")
with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
dag_command.dag_show(self.parser.parse_args([
'dags', 'show', 'example_bash_operator', '--imgcat']
))
out = temp_stdout.getvalue()
mock_render_dag.return_value.pipe.assert_called_once_with(format='png')
mock_popen.return_value.communicate.assert_called_once_with(b'DOT_DATA')
self.assertIn("OUT", out)
self.assertIn("ERR", out)
@mock.patch("airflow.cli.commands.dag_command.DAG.run")
def test_cli_backfill_depends_on_past(self, mock_run):
"""
Test that CLI respects -I argument
We just check we call dag.run() right. The behaviour of that kwarg is
tested in test_jobs
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + timedelta(days=1)
args = [
'dags',
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
'-I',
]
dag = self.dagbag.get_dag(dag_id)
dag_command.dag_backfill(self.parser.parse_args(args), dag=dag)
mock_run.assert_called_once_with(
start_date=run_date,
end_date=run_date,
conf=None,
delay_on_limit_secs=1.0,
donot_pickle=False,
ignore_first_depends_on_past=True,
ignore_task_deps=False,
local=True,
mark_success=False,
pool=None,
rerun_failed_tasks=False,
run_backwards=False,
verbose=False,
)
@mock.patch("airflow.cli.commands.dag_command.DAG.run")
def test_cli_backfill_depends_on_past_backwards(self, mock_run):
"""
Test that CLI respects -B argument and raises on interaction with depends_on_past
"""
dag_id = 'test_depends_on_past'
start_date = DEFAULT_DATE + timedelta(days=1)
end_date = start_date + timedelta(days=1)
args = [
'dags',
'backfill',
dag_id,
'-l',
'-s',
start_date.isoformat(),
'-e',
end_date.isoformat(),
'-I',
'-B',
]
dag = self.dagbag.get_dag(dag_id)
dag_command.dag_backfill(self.parser.parse_args(args), dag=dag)
mock_run.assert_called_once_with(
start_date=start_date,
end_date=end_date,
conf=None,
delay_on_limit_secs=1.0,
donot_pickle=False,
ignore_first_depends_on_past=True,
ignore_task_deps=False,
local=True,
mark_success=False,
pool=None,
rerun_failed_tasks=False,
run_backwards=True,
verbose=False,
)
def test_next_execution(self):
# A scaffolding function
def reset_dr_db(dag_id):
session = Session()
dr = session.query(models.DagRun).filter_by(dag_id=dag_id)
dr.delete()
session.commit()
session.close()
dag_ids = ['example_bash_operator', # schedule_interval is '0 0 * * *'
'latest_only', # schedule_interval is timedelta(hours=4)
'example_python_operator', # schedule_interval=None
'example_xcom'] # schedule_interval="@once"
# The details below is determined by the schedule_interval of example DAGs
now = timezone.utcnow()
next_execution_time_for_dag1 = pytz.utc.localize(
datetime.combine(
now.date() + timedelta(days=1),
time(0)
)
)
next_execution_time_for_dag2 = now + timedelta(hours=4)
expected_output = [str(next_execution_time_for_dag1),
str(next_execution_time_for_dag2),
"None",
"None"]
for i in range(len(dag_ids)): # pylint: disable=consider-using-enumerate
dag_id = dag_ids[i]
# Clear dag run so no execution history fo each DAG
reset_dr_db(dag_id)
proc = subprocess.Popen(["airflow", "dags", "next_execution", dag_id,
"--subdir", EXAMPLE_DAGS_FOLDER],
stdout=subprocess.PIPE)
proc.wait()
stdout = []
for line in proc.stdout:
stdout.append(str(line.decode("utf-8").rstrip()))
# `next_execution` function is inapplicable if no execution record found
# It prints `None` in such cases
self.assertEqual(stdout[-1], "None")
dag = self.dagbag.dags[dag_id]
# Create a DagRun for each DAG, to prepare for next step
dag.create_dagrun(
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.FAILED
)
proc = subprocess.Popen(["airflow", "dags", "next_execution", dag_id,
"--subdir", EXAMPLE_DAGS_FOLDER],
stdout=subprocess.PIPE)
proc.wait()
stdout = []
for line in proc.stdout:
stdout.append(str(line.decode("utf-8").rstrip()))
self.assertEqual(stdout[-1], expected_output[i])
reset_dr_db(dag_id)
def test_cli_list_dags(self):
args = self.parser.parse_args(['dags', 'list', '--report'])
dag_command.dag_list_dags(args)
def test_cli_list_dag_runs(self):
dag_command.dag_trigger(self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator', ]))
args = self.parser.parse_args(['dags', 'list_runs',
'example_bash_operator',
'--no_backfill'])
dag_command.dag_list_dag_runs(args)
def test_cli_list_jobs_with_args(self):
args = self.parser.parse_args(['dags', 'list_jobs', '--dag_id',
'example_bash_operator',
'--state', 'success',
'--limit', '100',
'--output', 'tsv'])
dag_command.dag_list_jobs(args)
def test_pause(self):
args = self.parser.parse_args([
'dags', 'pause', 'example_bash_operator'])
dag_command.dag_pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'dags', 'unpause', 'example_bash_operator'])
dag_command.dag_unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_trigger_dag(self):
dag_command.dag_trigger(self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
dag_command.dag_trigger,
self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
dag_command.dag_delete(self.parser.parse_args([
'dags', 'delete', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
dag_command.dag_delete,
self.parser.parse_args([
'dags', 'delete',
'does_not_exist_dag',
'--yes'])
)
def test_delete_dag_existing_file(self):
# Test to check that the DAG should be deleted even if
# the file containing it is not deleted
DM = DagModel
key = "my_dag_id"
session = settings.Session()
with tempfile.NamedTemporaryFile() as f:
session.add(DM(dag_id=key, fileloc=f.name))
session.commit()
dag_command.dag_delete(self.parser.parse_args([
'dags', 'delete', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
def test_cli_list_jobs(self):
args = self.parser.parse_args(['dags', 'list_jobs'])
dag_command.dag_list_jobs(args)
def test_dag_state(self):
self.assertEqual(None, dag_command.dag_state(self.parser.parse_args([
'dags', 'state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
|
the-stack_106_20482
|
import argparse
import logging
import os
import os.path as osp
import subprocess
import time
import torch
import torch.utils.data as data
import torchvision.transforms.functional as F
from PIL import Image
from tqdm import tqdm
from trainer.ExtensibleTrainer import ExtensibleTrainer
from utils import options as option
import utils.util as util
from data import create_dataloader
class FfmpegBackedVideoDataset(data.Dataset):
'''Pulls frames from a video one at a time using FFMPEG.'''
def __init__(self, opt, working_dir):
super(FfmpegBackedVideoDataset, self).__init__()
self.opt = opt
self.video = self.opt['video_file']
self.working_dir = working_dir
self.frame_rate = self.opt['frame_rate']
self.start_at = self.opt['start_at_seconds']
self.end_at = self.opt['end_at_seconds']
self.force_multiple = self.opt['force_multiple']
self.frame_count = (self.end_at - self.start_at) * self.frame_rate
# The number of (original) video frames that will be stored on the filesystem at a time.
self.max_working_files = 20
self.data_type = self.opt['data_type']
self.vertical_splits = self.opt['vertical_splits'] if 'vertical_splits' in opt.keys() else 1
def get_time_for_it(self, it):
secs = it / self.frame_rate + self.start_at
mins = int(secs / 60)
hours = int(mins / 60)
secs = secs - (mins * 60) - (hours * 3600)
mins = mins % 60
return '%02d:%02d:%06.3f' % (hours, mins, secs)
def __getitem__(self, index):
if self.vertical_splits > 0:
actual_index = int(index / self.vertical_splits)
else:
actual_index = index
# Extract the frame. Command template: `ffmpeg -ss 17:00.0323 -i <video file>.mp4 -vframes 1 destination.png`
working_file_name = osp.join(self.working_dir, "working_%d.png" % (actual_index % self.max_working_files,))
vid_time = self.get_time_for_it(actual_index)
ffmpeg_args = ['ffmpeg', '-y', '-ss', vid_time, '-i', self.video, '-vframes', '1', working_file_name]
process = subprocess.Popen(ffmpeg_args, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
process.wait()
# get LQ image
LQ_path = working_file_name
img_LQ = Image.open(LQ_path)
split_index = (index % self.vertical_splits)
if self.vertical_splits > 0:
w, h = img_LQ.size
w_per_split = int(w / self.vertical_splits)
left = w_per_split * split_index
img_LQ = F.crop(img_LQ, 0, left, h, w_per_split)
img_LQ = F.to_tensor(img_LQ)
mask = torch.ones(1, img_LQ.shape[1], img_LQ.shape[2])
ref = torch.cat([img_LQ, mask], dim=0)
if self.force_multiple > 1:
assert self.vertical_splits <= 1 # This is not compatible with vertical splits for now.
c, h, w = img_LQ.shape
h_, w_ = h, w
height_removed = h % self.force_multiple
width_removed = w % self.force_multiple
if height_removed != 0:
h_ = self.force_multiple * ((h // self.force_multiple) + 1)
if width_removed != 0:
w_ = self.force_multiple * ((w // self.force_multiple) + 1)
lq_template = torch.zeros(c,h_,w_)
lq_template[:,:h,:w] = img_LQ
ref_template = torch.zeros(c,h_,w_)
ref_template[:,:h,:w] = img_LQ
img_LQ = lq_template
ref = ref_template
return {'lq': img_LQ, 'lq_fullsize_ref': ref,
'lq_center': torch.tensor([img_LQ.shape[1] // 2, img_LQ.shape[2] // 2], dtype=torch.long) }
def __len__(self):
return self.frame_count * self.vertical_splits
def merge_images(files, output_path):
"""Merges several image files together across the vertical axis
"""
images = [Image.open(f) for f in files]
w, h = images[0].size
result_width = w * len(images)
result_height = h
result = Image.new('RGB', (result_width, result_height))
for i in range(len(images)):
result.paste(im=images[i], box=(i * w, 0))
result.save(output_path)
if __name__ == "__main__":
#### options
torch.backends.cudnn.benchmark = True
want_just_images = True
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, help='Path to options YMAL file.', default='../options/use_video_upsample.yml')
opt = option.parse(parser.parse_args().opt, is_train=False)
opt = option.dict_to_nonedict(opt)
util.mkdirs(
(path for key, path in opt['path'].items()
if not key == 'experiments_root' and 'pretrain_model' not in key and 'resume' not in key))
util.setup_logger('base', opt['path']['log'], 'test_' + opt['name'], level=logging.INFO,
screen=True, tofile=True)
logger = logging.getLogger('base')
logger.info(option.dict2str(opt))
util.loaded_options = opt
#### Create test dataset and dataloader
test_loaders = []
test_set = FfmpegBackedVideoDataset(opt['dataset'], opt['path']['results_root'])
test_loader = create_dataloader(test_set, opt['dataset'])
logger.info('Number of test images in [{:s}]: {:d}'.format(opt['dataset']['name'], len(test_set)))
test_loaders.append(test_loader)
model = ExtensibleTrainer(opt)
test_set_name = test_loader.dataset.opt['name']
logger.info('\nTesting [{:s}]...'.format(test_set_name))
test_start_time = time.time()
dataset_dir = osp.join(opt['path']['results_root'], test_set_name)
util.mkdir(dataset_dir)
frame_counter = 0
frames_per_vid = opt['frames_per_mini_vid']
minivid_crf = opt['minivid_crf']
vid_output = opt['mini_vid_output_folder'] if 'mini_vid_output_folder' in opt.keys() else dataset_dir
vid_counter = opt['minivid_start_no'] if 'minivid_start_no' in opt.keys() else 0
img_index = opt['generator_img_index']
recurrent_mode = opt['recurrent_mode']
if recurrent_mode:
assert opt['dataset']['batch_size'] == 1 # Can only do 1 frame at a time in recurrent mode, by definition.
scale = opt['scale']
first_frame = True
ffmpeg_proc = None
tq = tqdm(test_loader)
for data in tq:
need_GT = False if test_loader.dataset.opt['dataroot_GT'] is None else True
if recurrent_mode and first_frame:
b, c, h, w = data['lq'].shape
recurrent_entry = torch.zeros((b,c,h*scale,w*scale), device=data['lq'].device)
# Optionally swap out the 'generator' for the first frame to create a better image that the recurrent generator works off of.
if 'recurrent_hr_generator' in opt.keys():
recurrent_gen = model.env['generators']['generator']
model.env['generators']['generator'] = model.env['generators'][opt['recurrent_hr_generator']]
first_frame = False
if recurrent_mode:
data['recurrent'] = recurrent_entry
model.feed_data(data, need_GT=need_GT)
model.test()
visuals = model.get_current_visuals()['rlt']
if recurrent_mode:
recurrent_entry = visuals
visuals = visuals.cpu().float()
for i in range(visuals.shape[0]):
sr_img = util.tensor2img(visuals[i]) # uint8
# save images
save_img_path = osp.join(dataset_dir, '%08d.png' % (frame_counter,))
util.save_img(sr_img, save_img_path)
frame_counter += 1
if frame_counter % frames_per_vid == 0:
if ffmpeg_proc is not None:
print("Waiting for last encode..")
ffmpeg_proc.wait()
print("Encoding minivid %d.." % (vid_counter,))
# Perform stitching.
num_splits = opt['dataset']['vertical_splits'] if 'vertical_splits' in opt['dataset'].keys() else 1
if num_splits > 1:
procs = []
src_imgs_path = osp.join(dataset_dir, "joined")
os.makedirs(src_imgs_path, exist_ok=True)
for i in range(int(frames_per_vid / num_splits)):
to_join = [osp.join(dataset_dir, "%08d.png" % (j,)) for j in range(i * num_splits, i * num_splits + num_splits)]
merge_images(to_join, osp.join(src_imgs_path, "%08d.png" % (i,)))
else:
src_imgs_path = dataset_dir
# Encoding command line:
# ffmpeg -framerate 30 -i %08d.png -c:v libx265 -crf 12 -preset slow -pix_fmt yuv444p test.mkv
cmd = ['ffmpeg', '-y', '-framerate', str(opt['dataset']['frame_rate']), '-f', 'image2', '-i', osp.join(src_imgs_path, "%08d.png"),
'-c:v', 'libx265', '-crf', str(minivid_crf), '-preset', 'slow', '-pix_fmt', 'yuv444p', osp.join(vid_output, "mini_%06d.mkv" % (vid_counter,))]
print(ffmpeg_proc)
ffmpeg_proc = subprocess.Popen(cmd)#, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
vid_counter += 1
frame_counter = 0
print("Done.")
if want_just_images:
continue
|
the-stack_106_20483
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
from collections import OrderedDict
from typing import Any, Callable, Dict, List, MutableMapping, Optional, Tuple, Union
import gpytorch
import numpy as np
import torch
from ax.core.search_space import SearchSpaceDigest
from ax.core.types import TCandidateMetadata, TConfig, TGenMetadata
from ax.models.random.alebo_initializer import ALEBOInitializer
from ax.models.torch.botorch import BotorchModel
from ax.models.torch.botorch_defaults import get_NEI
from ax.models.torch_base import TorchModel
from ax.utils.common.docutils import copy_doc
from ax.utils.common.logger import get_logger
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.analytic import ExpectedImprovement
from botorch.models.gp_regression import FixedNoiseGP
from botorch.models.gpytorch import GPyTorchModel
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.optim.fit import fit_gpytorch_scipy
from botorch.optim.initializers import initialize_q_batch_nonneg
from botorch.optim.numpy_converter import module_to_array
from botorch.optim.optimize import optimize_acqf
from botorch.optim.utils import _scipy_objective_and_grad
from botorch.posteriors.gpytorch import GPyTorchPosterior
from gpytorch.distributions.multivariate_normal import MultivariateNormal
from gpytorch.kernels.kernel import Kernel
from gpytorch.kernels.rbf_kernel import postprocess_rbf
from gpytorch.kernels.scale_kernel import ScaleKernel
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from scipy.optimize import approx_fprime
from torch import Tensor
logger = get_logger(__name__)
class ALEBOKernel(Kernel):
"""The kernel for ALEBO.
Suppose there exists an ARD RBF GP on an (unknown) linear embedding with
projection matrix A. We make function evaluations in a different linear
embedding with projection matrix B (known). This is the appropriate kernel
for fitting those data.
This kernel computes a Mahalanobis distance, and the (d x d) PD distance
matrix Gamma is a parameter that must be fit. This is done by fitting its
upper Cholesky decomposition, U.
Args:
B: (d x D) Projection matrix.
batch_shape: Batch shape as usual for gpytorch kernels.
"""
def __init__(self, B: Tensor, batch_shape: torch.Size) -> None:
super().__init__(
has_lengthscale=False, ard_num_dims=None, eps=0.0, batch_shape=batch_shape
)
self.d, D = B.shape
assert self.d < D
self.B = B
# Initialize U
Arnd = torch.randn(D, D, dtype=B.dtype, device=B.device)
Arnd = torch.qr(Arnd)[0]
ABinv = Arnd[: self.d, :] @ torch.pinverse(B)
# U is the upper Cholesky decomposition of Gamma, the Mahalanobis
# matrix. Uvec is the upper triangular portion of U squeezed out into
# a vector.
U = torch.cholesky(torch.mm(ABinv.t(), ABinv), upper=True)
self.triu_indx = torch.triu_indices(self.d, self.d, device=B.device)
Uvec = U[self.triu_indx.tolist()].repeat(*batch_shape, 1)
self.register_parameter(name="Uvec", parameter=torch.nn.Parameter(Uvec))
def forward(
self,
x1: Tensor,
x2: Tensor,
diag: bool = False,
last_dim_is_batch: bool = False,
**params: Any,
) -> Tensor:
"""Compute kernel distance."""
# Unpack Uvec into an upper triangular matrix U
shapeU = self.Uvec.shape[:-1] + torch.Size([self.d, self.d])
U_t = torch.zeros(shapeU, dtype=self.B.dtype, device=self.B.device)
U_t[..., self.triu_indx[1], self.triu_indx[0]] = self.Uvec
# Compute kernel distance
z1 = torch.matmul(x1, U_t)
z2 = torch.matmul(x2, U_t)
return self.covar_dist(
z1,
z2,
square_dist=True,
diag=diag,
dist_postprocess_func=postprocess_rbf,
postprocess=True,
**params,
)
class ALEBOGP(FixedNoiseGP):
"""The GP for ALEBO.
Uses the Mahalanobis kernel defined in ALEBOKernel, along with a
ScaleKernel to add a kernel variance and a fitted constant mean.
In non-batch mode, there is a single kernel that produces MVN predictions
as usual for a GP.
With b batches, each batch has its own set of kernel hyperparameters and
each batch represents a sample from the hyperparameter posterior
distribution. When making a prediction (with `__call__`), these samples are
integrated over using moment matching. So, the predictions are an MVN as
usual with the same shape as in non-batch mode.
Args:
B: (d x D) Projection matrix.
train_X: (n x d) X training data.
train_Y: (n x 1) Y training data.
train_Yvar: (n x 1) Noise variances of each training Y.
"""
def __init__(
self, B: Tensor, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor
) -> None:
super().__init__(train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar)
self.covar_module = ScaleKernel(
base_kernel=ALEBOKernel(B=B, batch_shape=self._aug_batch_shape),
batch_shape=self._aug_batch_shape,
)
self.to(train_X)
def __call__(self, x: Tensor) -> MultivariateNormal:
"""
If model is non-batch, then just make a prediction. If model has
multiple batches, then these are samples from the kernel hyperparameter
posterior and we integrate over them with moment matching.
The shape of the MVN that this outputs will be the same regardless of
whether the model is batched or not.
Args:
x: Point to be predicted.
Returns: MultivariateNormal distribution of prediction.
"""
if len(self._aug_batch_shape) == 0:
return super().__call__(x)
# Else, approximately integrate over batches with moment matching.
# Take X as (b) x q x d, and expand to (b) x ns x q x d
if x.ndim > 3:
raise ValueError("Don't know how to predict this shape") # pragma: no cover
x = x.unsqueeze(-3).expand(
x.shape[:-2]
+ torch.Size([self._aug_batch_shape[0]]) # pyre-ignore
+ x.shape[-2:]
)
mvn_b = super().__call__(x)
mu = mvn_b.mean.mean(dim=-2)
C = (
mvn_b.covariance_matrix.mean(dim=-3)
+ torch.matmul(mvn_b.mean.transpose(-2, -1), mvn_b.mean)
/ mvn_b.mean.shape[-2]
- torch.matmul(mu.unsqueeze(-1), mu.unsqueeze(-2))
) # Law of Total Covariance
mvn = MultivariateNormal(mu, C)
return mvn
def posterior(
self,
X: Tensor,
output_indices: Optional[List[int]] = None,
observation_noise: Union[bool, Tensor] = False,
**kwargs: Any,
) -> GPyTorchPosterior:
assert output_indices is None
assert not observation_noise
mvn = self(X)
return GPyTorchPosterior(mvn=mvn)
def get_fitted_model(
B: Tensor,
train_X: Tensor,
train_Y: Tensor,
train_Yvar: Tensor,
restarts: int,
nsamp: int,
init_state_dict: Optional[Dict[str, Tensor]],
) -> ALEBOGP:
"""Get a fitted ALEBO GP.
We do random restart optimization to get a MAP model, then use the Laplace
approximation to draw posterior samples of kernel hyperparameters, and
finally construct a batch-mode model where each batch is one of those
sampled sets of kernel hyperparameters.
Args:
B: Projection matrix.
train_X: X training data.
train_Y: Y training data.
train_Yvar: Noise variances of each training Y.
restarts: Number of restarts for MAP estimation.
nsamp: Number of samples to draw from kernel hyperparameter posterior.
init_state_dict: Optionally begin MAP estimation with this state dict.
Returns: Batch-mode (nsamp batches) fitted ALEBO GP.
"""
# Get MAP estimate.
mll = get_map_model(
B=B,
train_X=train_X,
train_Y=train_Y,
train_Yvar=train_Yvar,
restarts=restarts,
init_state_dict=init_state_dict,
)
# Compute Laplace approximation of posterior
Uvec_batch, mean_constant_batch, output_scale_batch = laplace_sample_U(
mll=mll, nsamp=nsamp
)
# Construct batch model with samples
m_b = get_batch_model(
B=B,
train_X=train_X,
train_Y=train_Y,
train_Yvar=train_Yvar,
Uvec_batch=Uvec_batch,
mean_constant_batch=mean_constant_batch,
output_scale_batch=output_scale_batch,
)
return m_b
def get_map_model(
B: Tensor,
train_X: Tensor,
train_Y: Tensor,
train_Yvar: Tensor,
restarts: int,
init_state_dict: Optional[Dict[str, Tensor]],
) -> ExactMarginalLogLikelihood:
"""Do random-restart optimization for MAP fitting of an ALEBO GP model.
Args:
B: Projection matrix.
train_X: X training data.
train_Y: Y training data.
train_Yvar: Noise variances of each training Y.
restarts: Number of restarts for MAP estimation.
init_state_dict: Optionally begin MAP estimation with this state dict.
Returns: non-batch ALEBO GP with MAP kernel hyperparameters.
"""
f_best = 1e8
sd_best = {}
# Fit with random restarts
for _ in range(restarts):
m = ALEBOGP(B=B, train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar)
if init_state_dict is not None:
# pyre-fixme[6]: Expected `OrderedDict[typing.Any, typing.Any]` for 1st
# param but got `Dict[str, Tensor]`.
m.load_state_dict(init_state_dict)
mll = ExactMarginalLogLikelihood(m.likelihood, m)
mll.train()
mll, info_dict = fit_gpytorch_scipy(mll, track_iterations=False, method="tnc")
logger.debug(info_dict)
# pyre-fixme[58]: `<` is not supported for operand types
# `Union[List[botorch.optim.fit.OptimizationIteration], float]` and `float`.
if info_dict["fopt"] < f_best:
f_best = float(info_dict["fopt"]) # pyre-ignore
sd_best = m.state_dict()
# Set the final value
m = ALEBOGP(B=B, train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar)
m.load_state_dict(sd_best)
mll = ExactMarginalLogLikelihood(m.likelihood, m)
return mll
def laplace_sample_U(
mll: ExactMarginalLogLikelihood, nsamp: int
) -> Tuple[Tensor, Tensor, Tensor]:
"""Draw posterior samples of kernel hyperparameters using Laplace
approximation.
Only the Mahalanobis distance matrix is sampled.
The diagonal of the Hessian is estimated using finite differences of the
autograd gradients. The Laplace approximation is then N(p_map, inv(-H)).
We construct a set of nsamp kernel hyperparameters by drawing nsamp-1
values from this distribution, and prepending as the first sample the MAP
parameters.
Args:
mll: MLL object of MAP ALEBO GP.
nsamp: Number of samples to return.
Returns: Batch tensors of the kernel hyperparameters Uvec, mean constant,
and output scale.
"""
# Estimate diagonal of the Hessian
mll.train()
x0, property_dict, bounds = module_to_array(module=mll)
x0 = x0.astype(np.float64) # This is the MAP parameters
H = np.zeros((len(x0), len(x0)))
epsilon = 1e-4 + 1e-3 * np.abs(x0)
for i, _ in enumerate(x0):
# Compute gradient of df/dx_i wrt x_i
def f(x):
x_all = x0.copy()
x_all[i] = x[0]
return -_scipy_objective_and_grad(x_all, mll, property_dict)[1][i]
H[i, i] = approx_fprime(np.array([x0[i]]), f, epsilon=epsilon[i]) # pyre-ignore
# Sample only Uvec; leave mean and output scale fixed.
assert list(property_dict.keys()) == [
"model.mean_module.constant",
"model.covar_module.raw_outputscale",
"model.covar_module.base_kernel.Uvec",
]
H = H[2:, 2:]
H += np.diag(-1e-3 * np.ones(H.shape[0])) # Add a nugget for inverse stability
Sigma = np.linalg.inv(-H)
samples = np.random.multivariate_normal(mean=x0[2:], cov=Sigma, size=(nsamp - 1))
# Include the MAP estimate
samples = np.vstack((x0[2:], samples))
# Reshape
attrs = property_dict["model.covar_module.base_kernel.Uvec"]
Uvec_batch = torch.tensor(samples, dtype=attrs.dtype, device=attrs.device).reshape(
nsamp, *attrs.shape
)
# Get the other properties into batch mode
mean_constant_batch = mll.model.mean_module.constant.repeat(nsamp, 1)
output_scale_batch = mll.model.covar_module.raw_outputscale.repeat(nsamp)
return Uvec_batch, mean_constant_batch, output_scale_batch
def get_batch_model(
B: Tensor,
train_X: Tensor,
train_Y: Tensor,
train_Yvar: Tensor,
Uvec_batch: Tensor,
mean_constant_batch: Tensor,
output_scale_batch: Tensor,
) -> ALEBOGP:
"""Construct a batch-mode ALEBO GP using batch tensors of hyperparameters.
Args:
B: Projection matrix.
train_X: X training data.
train_Y: Y training data.
train_Yvar: Noise variances of each training Y.
Uvec_batch: Batch tensor of Uvec hyperparameters.
mean_constant_batch: Batch tensor of mean constant hyperparameter.
output_scale_batch: Batch tensor of output scale hyperparameter.
Returns: Batch-mode ALEBO GP.
"""
b = Uvec_batch.size(0)
m_b = ALEBOGP(
B=B,
train_X=train_X.repeat(b, 1, 1),
train_Y=train_Y.repeat(b, 1, 1),
train_Yvar=train_Yvar.repeat(b, 1, 1),
)
m_b.train()
# Set mean constant
m_b.mean_module.constant.requires_grad_(False)
m_b.mean_module.constant.copy_(mean_constant_batch)
m_b.mean_module.constant.requires_grad_(True)
# Set output scale
m_b.covar_module.raw_outputscale.requires_grad_(False)
m_b.covar_module.raw_outputscale.copy_(output_scale_batch)
m_b.covar_module.raw_outputscale.requires_grad_(True)
# Set Uvec
m_b.covar_module.base_kernel.Uvec.requires_grad_(False)
m_b.covar_module.base_kernel.Uvec.copy_(Uvec_batch)
m_b.covar_module.base_kernel.Uvec.requires_grad_(True)
m_b.eval()
return m_b
def extract_map_statedict(
m_b: Union[ALEBOGP, ModelListGP], num_outputs: int
) -> List[MutableMapping[str, Tensor]]:
"""Extract MAP statedict from the batch-mode ALEBO GP.
The batch GP can be either a single ALEBO GP or a ModelListGP of ALEBO GPs.
Args:
m_b: Batch-mode GP.
num_outputs: Number of outputs being modeled.
"""
is_modellist = num_outputs > 1
map_sds: List[MutableMapping[str, Tensor]] = [
OrderedDict() for i in range(num_outputs)
]
sd = m_b.state_dict()
for k, v in sd.items():
# Extract model index and parameter name
if is_modellist:
g = re.match(r"^models\.([0-9]+)\.(.*)$", k)
if g is None:
raise Exception(
"Unable to parse ModelList structure"
) # pragma: no cover
model_idx = int(g.group(1))
param_name = g.group(2)
else:
model_idx = 0
param_name = k
if len(v.shape) > 1:
v = torch.select(v, 0, 0)
map_sds[model_idx][param_name] = v
return map_sds
def ei_or_nei(
model: Union[ALEBOGP, ModelListGP],
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]],
X_observed: Tensor,
X_pending: Optional[Tensor],
q: int,
noiseless: bool,
) -> AcquisitionFunction:
"""Use analytic EI if appropriate, otherwise Monte Carlo NEI.
Analytic EI can be used if: Single outcome, no constraints, no pending
points, not batch, and no noise.
Args:
model: GP.
objective_weights: Weights on each outcome for the objective.
outcome_constraints: Outcome constraints.
X_observed: Observed points for NEI.
X_pending: Pending points.
q: Batch size.
noiseless: True if evaluations are noiseless.
Returns: An AcquisitionFunction, either analytic EI or MC NEI.
"""
if (
len(objective_weights) == 1
and outcome_constraints is None
and X_pending is None
and q == 1
and noiseless
):
maximize = objective_weights[0] > 0
if maximize:
best_f = model.train_targets.max()
else:
best_f = model.train_targets.min()
return ExpectedImprovement(model=model, best_f=best_f, maximize=maximize)
else:
with gpytorch.settings.max_cholesky_size(2000):
acq = get_NEI(
model=model,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
X_observed=X_observed,
X_pending=X_pending,
)
return acq
def alebo_acqf_optimizer(
acq_function: AcquisitionFunction,
bounds: Tensor,
n: int,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]],
fixed_features: Optional[Dict[int, float]],
rounding_func: Optional[Callable[[Tensor], Tensor]],
raw_samples: int,
num_restarts: int,
B: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Optimize the acquisition function for ALEBO.
We are optimizing over a polytope within the subspace, and so begin each
random restart of the acquisition function optimization with points that
lie within that polytope.
"""
candidate_list, acq_value_list = [], []
candidates = torch.tensor([], device=B.device, dtype=B.dtype)
try:
base_X_pending = acq_function.X_pending
acq_has_X_pend = True
except AttributeError:
base_X_pending = None
acq_has_X_pend = False
assert n == 1
for i in range(n):
# Generate initial points for optimization inside embedding
m_init = ALEBOInitializer(B.cpu().numpy(), nsamp=10 * raw_samples)
Xrnd_npy, _ = m_init.gen(n=raw_samples, bounds=[(-1.0, 1.0)] * B.shape[1])
Xrnd = torch.tensor(Xrnd_npy, dtype=B.dtype, device=B.device).unsqueeze(1)
Yrnd = torch.matmul(Xrnd, B.t()) # Project down to the embedding
with gpytorch.settings.max_cholesky_size(2000):
with torch.no_grad():
alpha = acq_function(Yrnd)
Yinit = initialize_q_batch_nonneg(X=Yrnd, Y=alpha, n=num_restarts)
# Optimize the acquisition function, separately for each random restart.
candidate, acq_value = optimize_acqf(
acq_function=acq_function,
bounds=[None, None], # pyre-ignore
q=1,
num_restarts=num_restarts,
raw_samples=0,
options={"method": "SLSQP", "batch_limit": 1},
inequality_constraints=inequality_constraints,
batch_initial_conditions=Yinit,
sequential=False,
)
candidate_list.append(candidate)
acq_value_list.append(acq_value)
candidates = torch.cat(candidate_list, dim=-2)
if acq_has_X_pend:
acq_function.set_X_pending(
# pyre-fixme[6]: Expected `Union[List[Tensor],
# typing.Tuple[Tensor, ...]]` for 1st param but got
# `List[Union[Tensor, torch.nn.Module]]`.
torch.cat([base_X_pending, candidates], dim=-2)
if base_X_pending is not None
else candidates
)
logger.info(f"Generated sequential candidate {i+1} of {n}")
if acq_has_X_pend:
# pyre-fixme[6]: Expected `Optional[Tensor]` for 1st param but got
# `Union[None, Tensor, torch.nn.Module]`.
acq_function.set_X_pending(base_X_pending)
return candidates, torch.stack(acq_value_list)
class ALEBO(BotorchModel):
"""Does Bayesian optimization in a linear subspace with ALEBO.
The (d x D) projection down matrix B must be provided, and must be that
used for the initialization.
Function evaluations happen in the high-D space. We only evaluate points
such that x = pinverse(B) @ B @ x (that is, points inside the subspace).
Under that constraint, the projection is invertible.
Args:
B: (d x D) projection matrix (projects down).
laplace_nsamp: Number of samples for posterior sampling of kernel
hyperparameters.
fit_restarts: Number of random restarts for MAP estimation.
"""
def __init__(
self, B: Tensor, laplace_nsamp: int = 25, fit_restarts: int = 10
) -> None:
self.B = B
self.Binv = torch.pinverse(B)
self.laplace_nsamp = laplace_nsamp
self.fit_restarts = fit_restarts
super().__init__(
refit_on_update=True, # Important to not get stuck in local opt.
refit_on_cv=False,
warm_start_refitting=False,
acqf_constructor=ei_or_nei, # pyre-ignore
# pyre-fixme[6]: Expected `(AcquisitionFunction, Tensor, int, Optional[Li...
acqf_optimizer=alebo_acqf_optimizer,
)
@copy_doc(TorchModel.fit)
def fit(
self,
Xs: List[Tensor],
Ys: List[Tensor],
Yvars: List[Tensor],
search_space_digest: SearchSpaceDigest,
metric_names: List[str],
candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,
) -> None:
assert len(search_space_digest.task_features) == 0
assert len(search_space_digest.fidelity_features) == 0
for b in search_space_digest.bounds:
assert b == (-1, 1)
# GP is fit in the low-d space, so project Xs down.
self.Xs = [(self.B @ X.t()).t() for X in Xs]
self.Ys = Ys
self.Yvars = Yvars
self.device = self.B.device
self.dtype = self.B.dtype
self.model = self.get_and_fit_model(Xs=self.Xs, Ys=self.Ys, Yvars=self.Yvars)
@copy_doc(TorchModel.predict)
def predict(self, X: Tensor) -> Tuple[Tensor, Tensor]:
Xd = (self.B @ X.t()).t() # Project down
with gpytorch.settings.max_cholesky_size(2000):
return super().predict(X=Xd)
@copy_doc(TorchModel.best_point)
def best_point(
self,
bounds: List[Tuple[float, float]],
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
fixed_features: Optional[Dict[int, float]] = None,
model_gen_options: Optional[TConfig] = None,
target_fidelities: Optional[Dict[int, float]] = None,
) -> Optional[Tensor]:
raise NotImplementedError
def gen(
self,
n: int,
bounds: List[Tuple[float, float]],
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
fixed_features: Optional[Dict[int, float]] = None,
pending_observations: Optional[List[Tensor]] = None,
model_gen_options: Optional[TConfig] = None,
rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
target_fidelities: Optional[Dict[int, float]] = None,
) -> Tuple[Tensor, Tensor, TGenMetadata, List[TCandidateMetadata]]:
"""Generate candidates.
Candidates are generated in the linear embedding with the polytope
constraints described in the paper.
model_gen_options can contain 'raw_samples' (number of samples used for
initializing the acquisition function optimization) and 'num_restarts'
(number of restarts for acquisition function optimization).
"""
for b in bounds:
assert b == (-1, 1)
# The following can be easily handled in the future when needed
assert linear_constraints is None
assert fixed_features is None
assert pending_observations is None
# Setup constraints
A = torch.cat((self.Binv, -self.Binv))
b = torch.ones(2 * self.Binv.shape[0], 1, dtype=self.dtype, device=self.device)
linear_constraints = (A, b)
noiseless = max(Yvar.min().item() for Yvar in self.Yvars) < 1e-5
if model_gen_options is None:
model_gen_options = {}
model_gen_options = {
"acquisition_function_kwargs": {"q": n, "noiseless": noiseless},
"optimizer_kwargs": {
"raw_samples": model_gen_options.get("raw_samples", 1000),
"num_restarts": model_gen_options.get("num_restarts", 10),
"B": self.B,
},
}
Xd_opt, w, gen_metadata, candidate_metadata = super().gen(
n=n,
bounds=[(-1e8, 1e8)] * self.B.shape[0],
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
linear_constraints=linear_constraints,
model_gen_options=model_gen_options,
)
# Project up
Xopt = (self.Binv @ Xd_opt.t()).t()
# Sometimes numerical tolerance can have Xopt epsilon outside [-1, 1],
# so clip it back.
if Xopt.min() < -1 or Xopt.max() > 1:
logger.debug(f"Clipping from [{Xopt.min()}, {Xopt.max()}]")
Xopt = torch.clamp(Xopt, min=-1.0, max=1.0)
# pyre-fixme[7]: Expected `Tuple[Tensor, Tensor, Dict[str, typing.Any],
# List[Optional[Dict[str, typing.Any]]]]` but got `Tuple[typing.Any, Tensor,
# Dict[str, typing.Any], None]`.
return Xopt, w, gen_metadata, candidate_metadata
@copy_doc(TorchModel.update)
def update(
self,
Xs: List[Tensor],
Ys: List[Tensor],
Yvars: List[Tensor],
candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,
**kwargs: Any,
) -> None:
if self.model is None:
raise RuntimeError(
"Cannot update model that has not been fit"
) # pragma: no cover
self.Xs = [(self.B @ X.t()).t() for X in Xs] # Project down.
self.Ys = Ys
self.Yvars = Yvars
if self.refit_on_update:
state_dicts = None
else:
state_dicts = extract_map_statedict(
m_b=self.model, num_outputs=len(Xs) # pyre-ignore
)
self.model = self.get_and_fit_model(
Xs=self.Xs, Ys=self.Ys, Yvars=self.Yvars, state_dicts=state_dicts
)
@copy_doc(TorchModel.cross_validate)
def cross_validate(
self,
Xs_train: List[Tensor],
Ys_train: List[Tensor],
Yvars_train: List[Tensor],
X_test: Tensor,
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
if self.model is None:
raise RuntimeError(
"Cannot cross-validate model that has not been fit"
) # pragma: no cover
if self.refit_on_cv:
state_dicts = None
else:
state_dicts = extract_map_statedict(
m_b=self.model, num_outputs=len(self.Xs) # pyre-ignore
)
Xs_train = [X @ self.B.t() for X in Xs_train] # Project down.
X_test = X_test @ self.B.t()
model = self.get_and_fit_model(
Xs=Xs_train, Ys=Ys_train, Yvars=Yvars_train, state_dicts=state_dicts
)
return self.model_predictor(model=model, X=X_test) # pyre-ignore: [28]
def get_and_fit_model(
self,
Xs: List[Tensor],
Ys: List[Tensor],
Yvars: List[Tensor],
state_dicts: Optional[List[MutableMapping[str, Tensor]]] = None,
) -> GPyTorchModel:
"""Get a fitted ALEBO model for each outcome.
Args:
Xs: X for each outcome, already projected down.
Ys: Y for each outcome.
Yvars: Noise variance of Y for each outcome.
state_dicts: State dicts to initialize model fitting.
Returns: Fitted ALEBO model.
"""
if state_dicts is None:
state_dicts = [None] * len(Xs)
fit_restarts = self.fit_restarts
else:
fit_restarts = 1 # Warm-started
Yvars = [Yvar.clamp_min_(1e-7) for Yvar in Yvars]
models = [
get_fitted_model(
B=self.B,
train_X=X,
train_Y=Ys[i],
train_Yvar=Yvars[i],
restarts=fit_restarts,
nsamp=self.laplace_nsamp,
# pyre-fixme[6]: Expected `Optional[Dict[str, Tensor]]` for 7th
# param but got `Optional[MutableMapping[str, Tensor]]`.
init_state_dict=state_dicts[i],
)
for i, X in enumerate(Xs)
]
if len(models) == 1:
model = models[0]
else:
model = ModelListGP(*models)
model.to(Xs[0])
return model
|
the-stack_106_20484
|
import os
import glob
import sys
from matplotlib import pyplot as plt
from PIL import Image
import cv2
import numpy as np
import yolo
import fhi_unet as unet
import fhi_lib.distance_estimator as de
import fhi_lib.img_coordinate as ic
import fhi_lib.geometry as ge
def yolo_detection(user_input):
img_dir = os.path.join(os.getcwd(), user_input['image_dir'])
yl_weight = os.path.join(os.getcwd(), user_input['yolo_weight'])
yl_output_dir = os.path.join(os.getcwd(), user_input['yolo_output_dir'])
yl = yolo.YOLO(model_path=yl_weight)
yl_results = []
for img_path in glob.glob(img_dir + r'\*.jpg'):
print('Processing:', img_path)
img = Image.open(img_path)
result = yl.detect_image(img, True)
# save yolo image result
basename = os.path.basename(img_path)
yl_save_path = os.path.join(yl_output_dir, basename)
cv2.imwrite(yl_save_path, cv2.cvtColor(result['result_img'], cv2.COLOR_BGR2RGB))
# add image path to yolo result dictionary
result.update({'img_path' : img_path})
yl_results.append(result)
break
return yl_results
def create_masks(un, yl_results, un_output_dir):
def enlarge_roi(roi):
center = ((roi[0] + roi[2])/2, (roi[1] + roi[3])/2)
width = 1.4*(roi[2] - roi[0])
height = 1.4*(roi[3] - roi[1])
enlarged_roi = (int(center[0]-0.5*width), int(center[1]-0.5*height),
int(center[0]+0.5*width), int(center[1]+0.5*height))
return enlarged_roi
def unet_crop(yl_result):
img = yl_result['original_img']
cropped_imgs = []
masks = []
mask_coords = []
for i, roi in enumerate(yl_result['rois']):
# Enlarge the roi boundry acquired from Yolo
roi = enlarge_roi(roi)
# Cropped the image
cropped_img = img[roi[1]:roi[3], roi[0]:roi[2],:]
mask_coord = (roi[0], roi[1])
# UNet Detection
mask = un.detect(cropped_img)
# Save masks
basename = os.path.basename(yl_result['img_path'])
filename = os.path.splitext(basename)[0]
mask_save_path = os.path.join(un_output_dir, basename)
mask_save_path = mask_save_path.replace(filename, filename + r'_{}'.format(i))
cv2.imwrite(mask_save_path, mask)
# Image Processing
morphology_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
dilation = cv2.dilate(mask, morphology_kernel, iterations=3)
mask = cv2.erode(dilation, morphology_kernel, iterations=3)
_, mask = cv2.threshold(mask, 255/2, 255, cv2.THRESH_BINARY)
# Save results
cropped_imgs.append(cropped_img)
masks.append(mask)
mask_coords.append(mask_coord)
yl_result.update({'cropped_imgs' : cropped_imgs,
'masks' : masks,
'mask_coords' : mask_coords})
for yl_result in yl_results:
unet_crop(yl_result)
def unet_detection(user_input, yl_results):
un_output_dir = user_input['unet_output_dir']
un_weight_dir = user_input['unet_weight_dir']
result_dir = user_input['result_dir']
# Start unet detection
print('#### unet initialization completed ####')
un = unet.UNET(un_weight_dir)
un.initialize()
create_masks(un, yl_results, un_output_dir)
un.close_session()
print('#### Begin computing real-world distance ####')
for yl_result in yl_results:
compute_distance(result_dir, yl_result)
def compute_distance(result_dir, yl_result):
def resize_restoration(mask_itr_pt, cropped_shape):
unet_resize = 128
aspect_ratio = cropped_shape[1]/cropped_shape[0] #x/y
itr_pt = mask_itr_pt.get_point_tuple()
restored_x = 0
restored_y = 0
if aspect_ratio >=1:
distorted_y = unet_resize / aspect_ratio
padding_y = (unet_resize - distorted_y)/2
restored_x = itr_pt[0] * cropped_shape[1] / unet_resize
restored_y = (itr_pt[1] - padding_y) * cropped_shape[1] / unet_resize
else:
distorted_x = unet_resize / aspect_ratio
padding_x = (unet_resize - distorted_x)/2
restored_x = (itr_pt[0] - padding_x) * cropped_shape[0] / unet_resize
restored_y = itr_pt[1] * cropped_shape[0] / unet_resize
return ge.Point((int(restored_x), int(restored_y)))
img = yl_result['original_img']
estimator = de.DistanceEstimator(img)
estimator.initialize()
img = estimator.display_reference_pts(img)
cropped_imgs = yl_result['cropped_imgs']
masks = yl_result['masks']
mask_coords = yl_result['mask_coords']
_, ax = plt.subplots()
for i, mask in enumerate(masks):
roi = yl_result['rois'][i]
class_id = yl_result['class_ids'][i]
info = (mask, roi, class_id)
mask_coord = mask_coords[i]
cropped_img = cropped_imgs[i]
pt_itr = None
if class_id == 0 or class_id == 1:
accessory = ic.Type1_2Coord(info)
pt_itr = accessory.get_point_of_interest()
pt_itr = resize_restoration(pt_itr, cropped_img.shape).add_point(mask_coord)
accessory.update_interest_pt(pt_itr.get_point_tuple())
img = accessory.draw_point_of_interest(img)
else:
accessory = ic.Type3_4Coord(info)
pt_itr = accessory.get_point_of_interest()
pt_itr = resize_restoration(pt_itr, cropped_img.shape).add_point(mask_coord)
accessory.update_interest_pt(pt_itr.get_point_tuple())
img = accessory.draw_point_of_interest(img)
# Distance estimator
caption = estimator.estimate(pt_itr)
ax.text(roi[0], roi[1], caption, color='lime', weight='bold', size=6, backgroundcolor="none")
print('Process completed')
img_path = yl_result['img_path']
save_path = os.path.join(result_dir, os.path.basename(img_path))
ax.text(img.shape[1]/2-600, img.shape[0]-40, os.path.splitext(os.path.basename(img_path))[0],
color='white', weight='bold', size=6, va='center', backgroundcolor='none')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.savefig(save_path, dpi=300)
|
the-stack_106_20486
|
# coding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
def filter_function(test=None):
# [START filter_function]
import apache_beam as beam
def is_perennial(plant):
return plant['duration'] == 'perennial'
with beam.Pipeline() as pipeline:
perennials = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'
},
{
'icon': '🥔', 'name': 'Potato', 'duration': 'perennial'
},
])
| 'Filter perennials' >> beam.Filter(is_perennial)
| beam.Map(print))
# [END filter_function]
if test:
test(perennials)
def filter_lambda(test=None):
# [START filter_lambda]
import apache_beam as beam
with beam.Pipeline() as pipeline:
perennials = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'
},
{
'icon': '🥔', 'name': 'Potato', 'duration': 'perennial'
},
])
| 'Filter perennials' >>
beam.Filter(lambda plant: plant['duration'] == 'perennial')
| beam.Map(print))
# [END filter_lambda]
if test:
test(perennials)
def filter_multiple_arguments(test=None):
# [START filter_multiple_arguments]
import apache_beam as beam
def has_duration(plant, duration):
return plant['duration'] == duration
with beam.Pipeline() as pipeline:
perennials = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'
},
{
'icon': '🥔', 'name': 'Potato', 'duration': 'perennial'
},
])
| 'Filter perennials' >> beam.Filter(has_duration, 'perennial')
| beam.Map(print))
# [END filter_multiple_arguments]
if test:
test(perennials)
def filter_side_inputs_singleton(test=None):
# [START filter_side_inputs_singleton]
import apache_beam as beam
with beam.Pipeline() as pipeline:
perennial = pipeline | 'Perennial' >> beam.Create(['perennial'])
perennials = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'
},
{
'icon': '🥔', 'name': 'Potato', 'duration': 'perennial'
},
])
| 'Filter perennials' >> beam.Filter(
lambda plant,
duration: plant['duration'] == duration,
duration=beam.pvalue.AsSingleton(perennial),
)
| beam.Map(print))
# [END filter_side_inputs_singleton]
if test:
test(perennials)
def filter_side_inputs_iter(test=None):
# [START filter_side_inputs_iter]
import apache_beam as beam
with beam.Pipeline() as pipeline:
valid_durations = pipeline | 'Valid durations' >> beam.Create([
'annual',
'biennial',
'perennial',
])
valid_plants = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'
},
{
'icon': '🥔', 'name': 'Potato', 'duration': 'PERENNIAL'
},
])
| 'Filter valid plants' >> beam.Filter(
lambda plant,
valid_durations: plant['duration'] in valid_durations,
valid_durations=beam.pvalue.AsIter(valid_durations),
)
| beam.Map(print))
# [END filter_side_inputs_iter]
if test:
test(valid_plants)
def filter_side_inputs_dict(test=None):
# [START filter_side_inputs_dict]
import apache_beam as beam
with beam.Pipeline() as pipeline:
keep_duration = pipeline | 'Duration filters' >> beam.Create([
('annual', False),
('biennial', False),
('perennial', True),
])
perennials = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'
},
{
'icon': '🥔', 'name': 'Potato', 'duration': 'perennial'
},
])
| 'Filter plants by duration' >> beam.Filter(
lambda plant,
keep_duration: keep_duration[plant['duration']],
keep_duration=beam.pvalue.AsDict(keep_duration),
)
| beam.Map(print))
# [END filter_side_inputs_dict]
if test:
test(perennials)
|
the-stack_106_20488
|
from __future__ import print_function, division
import torch
from torchvision import datasets, models, transforms
import torch.nn as nn
from torch.nn import functional as F
import torch.optim as optim
import torchvision
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from torch.utils.data import Dataset, DataLoader
import os
import cv2
class CrackDataset(Dataset):
def __init__(self, data_path, phase='train', transform=None):
self.data_path = data_path
self.phase = phase
self.transform = transform
filepath = os.path.join(self.data_path,phase)
file_list = os.listdir(filepath)
self.data = []
for class_name in file_list:
class_dir = os.path.join(filepath, class_name)
list_inside_dir = os.listdir(class_dir)
for item in list_inside_dir:
img_path = os.path.join(class_dir, item)
self.data.append([img_path,class_name])
self.class_map = {'Negative':0, 'Positive': 1}
# self.img_dim = (64,64)
def __len__(self):
return len(self.data)
def __getitem__(self,idx):
img_path, class_name = self.data[idx]
img = Image.open(img_path).convert("RGB") # channel , width, height
if self.transform is not None:
img_tensor = self.transform(img)
class_id = self.class_map[class_name]
class_id = torch.tensor([class_id])
return img_tensor.float(), class_id
if __name__ == "__main__":
data_path = 'data/'
train_data = CrackDataset(data_path, 'train')
test_data = CrackDataset(data_path, 'test')
traindata_loader = DataLoader(train_data, batch_size=32, shuffle=True)
testdata_loader = DataLoader(test_data, batch_size=32, shuffle=True)
# for imgs, labels in traindata_loader:
# print("Batch of images has shape: ",imgs.shape)
# print("Batch of labels has shape: ", labels.shape)
|
the-stack_106_20489
|
# Problem: https://projecteuler.net/problem=227
"""
. Define distance as the clockwise number of people between the dice.
. Use the distance between the dices as a state.
. Use Markov chain to track the probabilities.
. T[distance1][distance2] is the probability of transitioning from distance 1 to distance 2
Solve for the expected number of steps from T:
. Let E(X->Y) indicate the expected number of steps of going from state X to state Y.
. We have this relatation:
E(X->Y) = sum_{all next_state}(P(X->next_state) * [E(next_state->Y) + 1])
= 1 + sum_{all next_state}(P(X->next_state) * E(next_state->Y))
= P(X->X-2) * E(X-2->Y // for this problem
+ P(X->X-1) * E(X-1->Y)
+ P(X->X) * E(X->Y)
+ P(X->X+1) * E(X+1->Y)
+ P(X->X+2) * E(X+2->Y)
+ 1
. We have, for example,
E(30->0) = 1/36 * [E(28->0) + 1]
+ 8/36 * [E(29->0) + 1]
+ 18/36 * [E(30->0) + 1]
+ 8/36 * [E(31->0) + 1]
+ 1/36 * [E(32->0) + 1]
. Construct a matrix A from the above relations, i.e.:
A * E = y
where E[X] = E(X->0),
y[X] = 0 if X is the final state, y[X] = 1 otherwise
A = (I - T)
. Solve for E.
"""
import numpy as np
N = 100
if __name__ == "__main__":
ans = 0.0
T = np.zeros((N, N), dtype = np.double)
S = np.zeros((N), dtype = np.double)
S[50] = 1.0
#T[0][0] = 1.0
for delta in range(1, N):
# player 1 rolls 1, player 2 rolls 1: delta doesn't change
# player 1 rolls 1, player 2 rolls [2..5]
T[delta][(delta-1)%N] += 4/36
# player 1 rolls 1, player 2 rolls 6
T[delta][(delta-2)%N] += 1/36
# player 1 rolls [2..5], player 2 rolls 1
T[delta][(delta+1)%N] += 4/36
# player 1 rolls [2..5], player 2 rolls 6
T[delta][(delta-1)%N] += 4/36
# player 1 rolls 6, player 2 rolls 1
T[delta][(delta+2)%N] += 1/36
# player 1 rolls 6, player 2 rolls [2..5]
T[delta][(delta+1)%N] += 4/36
# player 1 rolls 6, player 2 rolls 6: delta doesn't change
T[delta][delta] = 18/36
y = np.ones((N, 1), dtype = np.double)
y[0,0] = 0
x = np.linalg.solve(np.eye(N) - T, y)
print(x[50][0])
|
the-stack_106_20490
|
def getMinimumNumber(numbers):
minNumber = numbers[0]
for currentNumber in numbers:
if currentNumber < minNumber:
minNumber = currentNumber
return minNumber
def getMinimumIndex(numbers):
minNumber = numbers[0]
currentIndex = 0
minIndex = currentIndex
for currentNumber in numbers:
if currentNumber < minNumber:
minNumber = currentNumber
minIndex = currentIndex
currentIndex = currentIndex+1
return minIndex
def getMaximumIndex(numbers):
maxNumber = numbers[0]
currentIndex = 0
maxIndex = currentIndex
for currentNumber in numbers:
if currentNumber > maxNumber:
maxNumber = currentNumber
maxIndex = currentIndex
currentIndex = currentIndex+1
return maxIndex
def getMaximumNumber(numbers):
maxNumber = numbers[0]
for currentNumber in numbers:
if currentNumber > maxNumber:
maxNumber = currentNumber
return maxNumber
def test():
testList = [[1,2,3,0,4,5,1] ,[11,-2,-3,0,-1,4,5,1]]
for testItem in testList:
assert(testItem[getMinimumIndex(testItem)] == getMinimumNumber(testItem))
assert (testItem[getMaximumIndex(testItem)] == getMaximumNumber(testItem))
test()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.