filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_27430 | from featuretools.tests.plugin_tests.utils import (
import_featuretools,
install_featuretools_plugin,
uninstall_featuretools_plugin
)
def test_plugin_warning():
install_featuretools_plugin()
warning = import_featuretools('warning').stdout.decode()
debug = import_featuretools('debug').stdout.decode()
uninstall_featuretools_plugin()
message = 'Featuretools failed to load plugin module from library featuretools_plugin'
traceback = 'NotImplementedError: plugin not implemented'
assert message in warning
assert traceback not in warning
assert message in debug
assert traceback in debug
|
the-stack_106_27431 | #! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
import tensorlayer as tl
import numpy as np
tl.logging.set_verbosity(tl.logging.DEBUG)
# set gpu mem fraction or allow growth
# tl.utils.set_gpu_fraction()
# prepare data
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784))
# define the network
ni = tl.layers.Input([None, 784])
nn = tl.layers.Dropout(keep=0.8)(ni)
nn = tl.layers.Dense(n_units=800, act=tf.nn.relu)(nn)
nn = tl.layers.Dropout(keep=0.5)(nn)
nn = tl.layers.Dense(n_units=800, act=tf.nn.relu)(nn)
nn = tl.layers.Dropout(keep=0.5)(nn)
nn = tl.layers.Dense(n_units=10, act=None)(nn)
network = tl.models.Model(inputs=ni, outputs=nn, name="mlp")
# define metric.
def acc(_logits, y_batch):
# return np.mean(np.equal(np.argmax(_logits, 1), y_batch))
return tf.reduce_mean(
tf.cast(tf.equal(tf.argmax(_logits, 1), tf.convert_to_tensor(y_batch, tf.int64)), tf.float32), name='accuracy'
)
# print network information
print(network)
# open tensorboard
# tl.utils.open_tensorboard('./tb_log', port=6006)
# train the network
tl.utils.fit(
network, train_op=tf.optimizers.Adam(learning_rate=0.0001), cost=tl.cost.cross_entropy, X_train=X_train,
y_train=y_train, acc=acc, batch_size=256, n_epoch=20, X_val=X_val, y_val=y_val, eval_train=True,
tensorboard_dir='./tb_log'
)
# test
tl.utils.test(network, acc, X_test, y_test, batch_size=None, cost=tl.cost.cross_entropy)
# evaluation
_logits = tl.utils.predict(network, X_test)
y_pred = np.argmax(_logits, 1)
tl.utils.evaluation(y_test, y_pred, n_classes=10)
# save network weights
network.save_weights('model.h5')
# close tensorboard
# tl.utils.exit_tensorflow(port=6006)
|
the-stack_106_27432 | import os
import sys
import math
# add dir
dir_name = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(dir_name,'./auxiliary/'))
print(dir_name)
import argparse
import options
######### parser ###########
opt = options.Options().init(argparse.ArgumentParser(description='image denoising')).parse_args()
print(opt)
import utils
######### Set GPUs ###########
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu
import torch
torch.backends.cudnn.benchmark = True
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# print(device)
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from natsort import natsorted
import glob
import random
import time
import numpy as np
from einops import rearrange, repeat
import datetime
from pdb import set_trace as stx
from losses import CharbonnierLoss,SSIMLoss, MixLoss, TVLoss
from tqdm import tqdm
from warmup_scheduler import GradualWarmupScheduler
from torch.optim.lr_scheduler import StepLR
from timm.utils import NativeScaler
from utils.loader import get_training_data,get_validation_data
from skimage.transform import resize
def expand2square(timg, factor=128):
b, c, h, w = timg.size()
X = int(math.ceil(max(h,w)/float(factor))*factor)
img = torch.zeros(b,c,X,X).type_as(timg) # 3, h,w
mask = torch.zeros(b,1,X,X).type_as(timg)
# print(img.size(),mask.size())
# print((X - h)//2, (X - h)//2+h, (X - w)//2, (X - w)//2+w)
img[:,:, ((X - h)//2):((X - h)//2 + h),((X - w)//2):((X - w)//2 + w)] = timg
mask[:,:, ((X - h)//2):((X - h)//2 + h),((X - w)//2):((X - w)//2 + w)].fill_(1.0)
return img, mask
def tensorResize(timg, factor=128):
b, c, h, w = timg.size()
X = int(math.ceil(max(h,w)/float(factor))*factor)
np_arr = timg.cpu().detach().numpy()
np_arr = np_arr[0].transpose((1,2,0))
#Image resize
im_np_resize = resize(np_arr, (X, X))
im_np_resize = im_np_resize.transpose((2,0,1))
im_np_resize = im_np_resize[np.newaxis,:]
return h, w, torch.from_numpy(im_np_resize).cuda()
def recover(img, h, w):
np_arr = img.cpu().detach().numpy()
np_arr = np_arr[0].transpose((1,2,0))
im_np_resize = resize(np_arr, (h, w))
im_np_resize = im_np_resize.transpose((2,0,1))
im_np_resize = im_np_resize[np.newaxis,:]
return torch.from_numpy(im_np_resize).cuda()
######### Logs dir ###########
log_dir = os.path.join(dir_name,'log', opt.arch+opt.env)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logname = os.path.join(log_dir, datetime.datetime.now().isoformat()+'.txt')
print("Now time is : ",datetime.datetime.now().isoformat())
result_dir = os.path.join(log_dir, 'results')
model_dir = os.path.join(log_dir, 'models')
utils.mkdir(result_dir)
utils.mkdir(model_dir)
# ######### Set Seeds ###########
random.seed(1234)
np.random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)
######### Model ###########
model_restoration = utils.get_arch(opt)
with open(logname,'a') as f:
f.write(str(opt)+'\n')
f.write(str(model_restoration)+'\n')
######### Optimizer ###########
start_epoch = 1
if opt.optimizer.lower() == 'adam':
optimizer = optim.Adam(model_restoration.parameters(), lr=opt.lr_initial, betas=(0.9, 0.999),eps=1e-8, weight_decay=opt.weight_decay)
elif opt.optimizer.lower() == 'adamw':
optimizer = optim.AdamW(model_restoration.parameters(), lr=opt.lr_initial, betas=(0.9, 0.999),eps=1e-8, weight_decay=opt.weight_decay)
else:
raise Exception("Error optimizer...")
######### DataParallel ###########
model_restoration = torch.nn.DataParallel (model_restoration)
model_restoration.cuda()
######### Resume ###########
if opt.resume:
path_chk_rest = opt.pretrain_weights
utils.load_checkpoint(model_restoration,path_chk_rest)
start_epoch = utils.load_start_epoch(path_chk_rest) + 1
lr = utils.load_optim(optimizer, path_chk_rest)
for p in optimizer.param_groups: p['lr'] = lr
warmup = False
new_lr = lr
print('------------------------------------------------------------------------------')
print("==> Resuming Training with learning rate:",new_lr)
print('------------------------------------------------------------------------------')
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, opt.nepoch-start_epoch+1, eta_min=1e-6)
######### Scheduler ###########
if opt.warmup:
print("Using warmup and cosine strategy!")
warmup_epochs = opt.warmup_epochs
scheduler_cosine = optim.lr_scheduler.CosineAnnealingLR(optimizer, opt.nepoch-warmup_epochs, eta_min=1e-6)
scheduler = GradualWarmupScheduler(optimizer, multiplier=1, total_epoch=warmup_epochs, after_scheduler=scheduler_cosine)
scheduler.step()
else:
step = 50
print("Using StepLR,step={}!".format(step))
scheduler = StepLR(optimizer, step_size=step, gamma=0.5)
scheduler.step()
######### Loss ###########
criterion0 = SSIMLoss().cuda()
criterion1 = TVLoss().cuda()
criterion2 = MixLoss().cuda()
######### DataLoader ###########
print('===> Loading datasets')
img_options_train = {'patch_size':opt.train_ps}
train_dataset = get_training_data(opt.train_dir, img_options_train, 1)
train_loader = DataLoader(dataset=train_dataset, batch_size=opt.batch_size, shuffle=True,
num_workers=opt.train_workers, pin_memory=True, drop_last=False)
val_dataset = get_validation_data(opt.val_dir)
val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=False,
num_workers=opt.eval_workers, pin_memory=False, drop_last=False)
len_trainset = train_dataset.__len__()
len_valset = val_dataset.__len__()
print("Sizeof training set: ", len_trainset,", sizeof validation set: ", len_valset)
######### validation ###########
with torch.no_grad():
psnr_val_rgb = []
for ii, data_val in enumerate((val_loader), 0):
target = data_val[0].cuda()
input_ = data_val[1].cuda()
filenames = data_val[2]
psnr_val_rgb.append(utils.batch_PSNR(input_, target, False).item())
psnr_val_rgb = sum(psnr_val_rgb)/len_valset
print('Input & GT (PSNR) -->%.4f dB'%(psnr_val_rgb))
######### train ###########
print('===> Start Epoch {} End Epoch {}'.format(start_epoch,opt.nepoch))
best_psnr = 0
best_epoch = 0
best_iter = 0
eval_now = len(train_loader)//2
print("\nEvaluation after every {} Iterations !!!\n".format(eval_now))
loss_scaler = NativeScaler()
torch.cuda.empty_cache()
stage0_checkpoint = './log/Wformer3.2_RD_0/models/model_best.pth'
utils.load_checkpoint(model_restoration, stage0_checkpoint)
for epoch in range(start_epoch, opt.nepoch + 1):
epoch_start_time = time.time()
epoch_loss = 0
train_id = 1
for i, data in enumerate(train_loader, 0):
# zero_grad
optimizer.zero_grad()
# target = data[0].cuda()
# input_ = data[1].cuda()
target = data[0].cuda()
mid = data[1].cuda()
input_ = data[2].cuda()
#if epoch>5:
# target, input_ = utils.MixUp_AUG().aug(target, input_)
#with torch.cuda.amp.autocast():
reflex, restored = model_restoration(input_, 1)
reflex = torch.clamp(reflex,0,1)
restored = torch.clamp(restored,0,1)
loss0 = criterion0(reflex, mid) + 0.001*criterion1(reflex)
loss1 = criterion2(restored, target)
rest = opt.nepoch - epoch
if rest<opt.nepoch//3:
loss = loss1
else:
loss = 0.2*(1 - epoch/opt.nepoch) *loss0 + loss1
loss_scaler(
loss, optimizer,parameters=model_restoration.parameters())
epoch_loss +=loss.item()
#### Evaluation ####
if (i+1)%eval_now==0 and i>0:
with torch.no_grad():
model_restoration.eval()
psnr_val_rgb = []
for ii, data_val in enumerate((val_loader), 0):
target = data_val[0].cuda()
valh, valw, input_ = tensorResize(data_val[1].cuda(), factor=128)
filenames = data_val[2]
reflex, restored = model_restoration(input_, 1)
#restored = torch.masked_select(restored,mask.bool()).reshape(target.shape[0], target.shape[1], target.shape[2], target.shape[3])
restored = torch.clamp(restored,0,1)
restored = recover(restored, valh, valw)
psnr_val_rgb.append(utils.batch_PSNR(restored, target, False).item())
psnr_val_rgb = sum(psnr_val_rgb)/len_valset
if psnr_val_rgb > best_psnr:
best_psnr = psnr_val_rgb
best_epoch = epoch
best_iter = i
torch.save({'epoch': epoch,
'state_dict': model_restoration.state_dict(),
'optimizer' : optimizer.state_dict()
}, os.path.join(model_dir,"model_best.pth"))
print("[Ep %d it %d\t PSNR SIDD: %.4f\t] ---- [best_Ep_SIDD %d best_it_SIDD %d Best_PSNR_SIDD %.4f] " % (epoch, i, psnr_val_rgb,best_epoch,best_iter,best_psnr))
with open(logname,'a') as f:
f.write("[Ep %d it %d\t PSNR SIDD: %.4f\t] ---- [best_Ep_SIDD %d best_it_SIDD %d Best_PSNR_SIDD %.4f] " \
% (epoch, i, psnr_val_rgb,best_epoch,best_iter,best_psnr)+'\n')
model_restoration.train()
torch.cuda.empty_cache()
scheduler.step()
print("------------------------------------------------------------------")
print("Epoch: {}\tTime: {:.4f}\tLoss: {:.4f}\tLearningRate {:.6f}".format(epoch, time.time()-epoch_start_time,epoch_loss, scheduler.get_lr()[0]))
print("------------------------------------------------------------------")
with open(logname,'a') as f:
f.write("Epoch: {}\tTime: {:.4f}\tLoss: {:.4f}\tLearningRate {:.6f}".format(epoch, time.time()-epoch_start_time,epoch_loss, scheduler.get_lr()[0])+'\n')
torch.save({'epoch': epoch,
'state_dict': model_restoration.state_dict(),
'optimizer' : optimizer.state_dict()
}, os.path.join(model_dir,"model_latest.pth"))
if epoch%opt.checkpoint == 0:
torch.save({'epoch': epoch,
'state_dict': model_restoration.state_dict(),
'optimizer' : optimizer.state_dict()
}, os.path.join(model_dir,"model_epoch_{}.pth".format(epoch)))
print("Now time is : ",datetime.datetime.now().isoformat())
|
the-stack_106_27434 | from h3map.header.map_reader import MapReader
class SodReader(MapReader):
def __init__(self, parser, version=28):
self.version = version
self.parser = parser
self.towns = towns
super().__init__(parser)
def __repr__(self):
return "Shadow of Death"
towns = [
"castle",
"rampart",
"tower",
"necropolis",
"inferno",
"dungeon",
"stronghold",
"fortress",
"conflux",
"neutral",
]
|
the-stack_106_27436 | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import argparse
import sys
class BuildArgs:
manifest: str
snapshot: bool
component: str
keep: bool
def __init__(self):
parser = argparse.ArgumentParser(description="Build an OpenSearch Bundle")
parser.add_argument(
"manifest", type=argparse.FileType("r"), help="Manifest file."
)
parser.add_argument(
"-s",
"--snapshot",
action="store_true",
default=False,
help="Build snapshot.",
)
parser.add_argument(
"-c", "--component", type=str, help="Rebuild a single component."
)
parser.add_argument(
"--keep",
dest="keep",
action="store_true",
help="Do not delete the working temporary directory.",
)
args = parser.parse_args()
self.manifest = args.manifest
self.snapshot = args.snapshot
self.component = args.component
self.keep = args.keep
self.script_path = sys.argv[0].replace("/src/build.py", "/build.sh")
def component_command(self, name):
return " ".join(
filter(
None,
[
self.script_path,
self.manifest.name,
f"--component {name}",
"--snapshot" if self.snapshot else None,
],
)
)
|
the-stack_106_27438 | """Loads run-time extensions
These loads components are considered extensions as they extend the underlying
AWS instances to add feature support and state maintenance. This composition
avoids excessively large AWS instance classes as external objects can augment
the AWS instances as needed to retain their information.
"""
import json
import os
import time
import urllib.parse
from datetime import date
from string import Template
from typing import Dict, Optional
import paramiko.client as sshclient
from influxdb import InfluxDBClient
from tornado import gen
from loadsbroker import logger
from loadsbroker.aws import EC2Collection, EC2Instance
from loadsbroker.dockerctrl import DOCKER_RETRY_EXC, DockerDaemon
from loadsbroker.options import InfluxDBOptions
from loadsbroker.ssh import makedirs
from loadsbroker.util import join_host_port, retry
SUPPORT_DIR = os.path.join(os.path.dirname(__file__), "support")
with open(os.path.join(SUPPORT_DIR, "telegraf.conf"), "r") as f:
TELEGRAF_CONF = f.read()
MONITOR_DASHBOARD_FN = "monitor-dashboard.json"
with open(os.path.join(SUPPORT_DIR, MONITOR_DASHBOARD_FN), "r") as f:
MONITOR_DASHBOARD_JSON = f.read()
UPLOAD2S3_PATH = os.path.join(SUPPORT_DIR, "upload2s3.sh")
class SSH:
"""SSH client to communicate with instances."""
def __init__(self, ssh_keyfile):
self._ssh_keyfile = ssh_keyfile
def connect(self, instance):
"""Opens an SSH connection to this instance."""
client = sshclient.SSHClient()
client.set_missing_host_key_policy(sshclient.AutoAddPolicy())
client.connect(instance.ip_address, username="core",
key_filename=self._ssh_keyfile)
return client
def _send_file(self, sftp, local_obj, remote_file):
# Ensure the base directory for the remote file exists
base_dir = os.path.dirname(remote_file)
makedirs(sftp, base_dir)
# Copy the local file to the remote location.
sftp.putfo(local_obj, remote_file)
def upload_file(self, instance, local_obj, remote_file):
"""Upload a file to an instance. Blocks."""
client = self.connect(instance)
try:
sftp = client.open_sftp()
try:
self._send_file(sftp, local_obj, remote_file)
finally:
sftp.close()
finally:
client.close()
async def reload_sysctl(self, collection):
def _reload(inst):
client = self.connect(inst.instance)
try:
stdin, stdout, stderr = client.exec_command(
"sudo sysctl -p /etc/sysctl.conf")
output = stdout.channel.recv(4096)
stdin.close()
stdout.close()
stderr.close()
return output
finally:
client.close()
await collection.map(_reload)
class Docker:
"""Docker commands for AWS instances using :class:`DockerDaemon`"""
def __init__(self, ssh):
self.sshclient = ssh
async def setup_collection(self, collection):
def setup_docker(ec2_instance):
instance = ec2_instance.instance
state = ec2_instance.state
if instance.ip_address is None:
docker_host = 'tcp://0.0.0.0:7890'
else:
docker_host = "tcp://%s:2375" % instance.ip_address
if not hasattr(state, "docker"):
state.docker = DockerDaemon(host=docker_host)
await collection.map(setup_docker)
@staticmethod
def not_responding_instances(collection):
return [x for x in collection.instances
if not x.state.docker.responded]
async def wait(self, collection, interval=60, timeout=600):
"""Waits till docker is available on every instance in the
collection."""
end = time.time() + timeout
not_responded = self.not_responding_instances(collection)
def get_container(inst):
try:
inst.state.docker.get_containers()
inst.state.docker.responded = True
except DOCKER_RETRY_EXC:
logger.debug("Docker not ready yet on %s",
str(inst.instance.id))
except Exception as exc:
logger.debug("Got exception on %s: %r",
str(inst.instance.id), exc)
# Attempt to fetch until they've all responded
while not_responded and time.time() < end:
await gen.multi([collection.execute(get_container, x)
for x in not_responded])
# Update the not_responded
not_responded = self.not_responding_instances(collection)
if not_responded:
await collection.wait(interval)
# Prune the non-responding
logger.debug("Pruning %d non-responding instances.",
len(not_responded))
await collection.remove_instances(not_responded)
async def is_running(self, collection, container_name, prune=True):
"""Checks running instances in a collection to see if the provided
container_name is running on the instance."""
def has_container(instance):
try:
all_containers = instance.state.docker.get_containers()
except:
if prune:
msg = ("Lost contact with a container on %s, "
"marking dead.")
logger.debug(msg % instance.instance.id)
instance.state.nonresponsive = True
return not prune
return any(container_name in cont["Image"]
for cont in all_containers.values())
results = await gen.multi([collection.execute(has_container, x)
for x in collection.running_instances()])
return any(results)
async def load_containers(self, collection, container_name, container_url):
"""Loads's a container of the provided name to the instance."""
@retry(on_result=lambda res: not res)
def image_loaded(docker, container_name):
return docker.has_image(container_name)
def load(instance):
def debug(msg):
logger.debug("[%s] %s" % (instance.instance.id, msg))
docker = instance.state.docker
has_container = docker.has_image(container_name)
if has_container and "latest" not in container_name:
return
if container_url:
debug("Importing %s" % container_url)
with self.sshclient.connect(instance.instance) as client:
output = docker.import_container(client, container_url)
if output:
logger.debug(output)
else:
debug("Pulling %r" % container_name)
output = docker.pull_container(container_name)
if not image_loaded(docker, container_name):
debug("Docker does not have %s" % container_name)
return False
return output
await collection.map(load)
async def run_containers(self,
collection: EC2Collection,
name: str,
command: Optional[str] = None,
env: Optional[Dict[str, str]] = None,
volumes={},
ports={},
local_dns=None,
delay=0,
pid_mode=None):
"""Run a container of the provided name with the env/command
args supplied."""
if env is None:
env = {}
if local_dns is not None:
local_dns = collection.local_dns
if isinstance(ports, str):
port_list = [x.split(":") for x in ports.split(",")]
ports = {x[0]: x[1] for x in port_list if x and len(x) == 2}
if isinstance(volumes, str):
volume_list = [x.split(":") for x in volumes.split(",")]
volumes = {x[1]: {"bind": x[0], "ro": len(x) < 3 or x[2] == "ro"}
for x in volume_list if x and len(x) >= 2}
def run(instance, tries=0):
dns = getattr(instance.state, "dns_server", None)
dns = [dns] if dns else []
docker = instance.state.docker
rinstance = instance.instance
extra = [
("HOST_IP", rinstance.ip_address),
("PRIVATE_IP", rinstance.private_ip_address),
("STATSD_HOST", rinstance.private_ip_address),
("STATSD_PORT", "8125")]
extra_env = env.copy()
extra_env.update(extra)
_env = {self.substitute_names(k, extra_env):
self.substitute_names(v, extra_env)
for k, v in extra_env.items()}
if command is None:
_command = None
else:
_command = self.substitute_names(command, _env)
_volumes = {}
for host, volume in volumes.items():
binding = volume.copy()
binding["bind"] = self.substitute_names(
binding.get("bind", host), _env)
_volumes[self.substitute_names(host, _env)] = binding
try:
return docker.safe_run_container(
name,
_command,
env=_env,
volumes=_volumes,
ports=ports,
dns=dns,
pid_mode=pid_mode
)
except Exception:
return False
results = await collection.map(run, delay=delay)
return results
async def kill_containers(self, collection, container_name):
"""Kill the container with the provided name."""
def kill(instance):
try:
instance.state.docker.kill_container(container_name)
except Exception:
logger.debug("Lost contact with a container, marking dead.",
exc_info=True)
instance.state.nonresponsive = True
await collection.map(kill)
async def stop_containers(self,
collection,
container_name,
timeout=15,
capture_stream=None):
"""Gracefully stops the container with the provided name and
timeout."""
def stop(instance):
try:
instance.state.docker.stop_container(
container_name,
timeout,
capture_stream)
except Exception:
logger.debug("Lost contact with a container, marking dead.",
exc_info=True)
instance.state.nonresponsive = True
await collection.map(stop)
@staticmethod
def substitute_names(tmpl_string, dct):
"""Given a template string, sub in values from the dct"""
return Template(tmpl_string).substitute(dct)
class DNSMasq:
"""Manages DNSMasq on AWS instances."""
def __init__(self, info, docker):
self.info = info
self.docker = docker
async def start(self, collection, hostmap):
"""Starts dnsmasq on a host with a given host mapping.
Host mapping is a dict of "Hostname" -> ["IP"].
"""
records = []
tmpl = Template("--host-record=$name,$ip")
for name, ips in hostmap.items():
for ip in ips:
records.append(tmpl.substitute(name=name, ip=ip))
cmd = "--user=root " + " ".join(records)
ports = {(53, "udp"): 53}
results = await self.docker.run_containers(
collection, self.info.name, cmd, ports=ports, local_dns=False)
# Add the dns info to the instances
for inst, response in zip(collection.instances, results):
state = inst.state
if hasattr(state, "dns_server"):
continue
dns_ip = response["NetworkSettings"]["IPAddress"]
state.dns_server = dns_ip
async def stop(self, collection):
await self.docker.stop_containers(collection, self.info.name)
class Watcher:
"""Watcher additions to AWS instances"""
def __init__(self, info, options=None):
self.info = info
self.options = options
async def start(self, collection, docker):
"""Launches Heka containers on all instances."""
if not self.options:
logger.debug("Watcher not configured")
return
bind = {'bind': '/var/run/docker.sock', 'ro': False}
volumes = {'/var/run/docker.sock': bind}
ports = {}
env = {'AWS_ACCESS_KEY_ID': self.options['AWS_ACCESS_KEY_ID'] or "",
'AWS_SECRET_ACCESS_KEY':
self.options['AWS_SECRET_ACCESS_KEY'] or ""}
logger.debug("Launching Watcher...")
await docker.run_containers(collection, self.info.name,
"python ./watch.py", env=env,
volumes=volumes, ports=ports,
pid_mode="host")
async def stop(self, collection, docker):
await docker.stop_containers(collection, self.info.name)
class InfluxDB:
"""A Run's managed InfluxDB"""
def __init__(self, info, ssh: SSH, aws_creds: Dict[str, str]) -> None:
self.info = info
self.sshclient = ssh
self.aws_creds = aws_creds
async def start(self, collection: EC2Collection, options: InfluxDBOptions):
await collection.map(self._setup_influxdb, 0, options)
def _setup_influxdb(self, instance: EC2Instance, options: InfluxDBOptions):
"""With an already running InfluxDB, upload the backup script
and create a Run db.
"""
with open(UPLOAD2S3_PATH) as fp:
self.sshclient.upload_file(
instance.instance, fp, "/home/core/upload2s3.sh")
args = options.client_args
args['host'] = instance.instance.ip_address
database = args.pop('database')
client = InfluxDBClient(**args)
logger.debug("Creating InfluxDB: %s", options.database_url)
client.create_database(database)
async def stop(self,
collection: EC2Collection,
options: InfluxDBOptions,
env: Dict[str, str],
project: str,
plan: str):
"""Backup the InfluxDB to s3."""
if not (self.aws_creds.get('AWS_ACCESS_KEY_ID') or
self.aws_creds.get('AWS_SECRET_ACCESS_KEY')):
logger.error("Unable to upload2s3: No AWS credentials defined")
return
bucket = env.get('INFLUXDB_S3_BUCKET')
if not bucket:
logger.error("Unable to upload2s3: No INFLUXDB_S3_BUCKET defined")
return
db = options.database
backup = "{:%Y-%m-%d}-{}-influxdb".format(date.today(), db)
archive = backup + ".tar.bz2"
cmd = """\
influxd backup -database {db} {destdir}/{backup} && \
tar cjvf {destdir}/{archive} -C {destdir} {backup} \
""".format(
db=db,
destdir="/influxdb-backup",
backup=backup,
archive=archive
)
# wrap in a shell to chain commands in docker exec
cmd = "sh -c '{}'".format(cmd)
await collection.map(self._container_exec, 0, self.info.name, cmd)
# upload2s3's ran from the host (vs the lightweight
# influxdb-alpine container) because it requires openssl/curl
destdir = os.path.join(project, plan)
cmd = """\
export AWS_ACCESS_KEY_ID={AWS_ACCESS_KEY_ID} && \
export AWS_SECRET_ACCESS_KEY={AWS_SECRET_ACCESS_KEY} && \
sh /home/core/upload2s3.sh {archive} {bucket} "{destdir}" \
""".format(
archive=os.path.join("/home/core/influxdb/backup", archive),
bucket=bucket,
destdir=destdir,
**self.aws_creds
)
exits = await collection.map(self._ssh_exec, 0, cmd)
url = "https://{}.s3.amazonaws.com/{}/{}".format(
bucket,
urllib.parse.quote(destdir),
archive)
if any(exits):
logger.error("InfluxDB upload2s3 failed: %s (%s)", exits, url)
else:
logger.debug("InfluxDB upload2s3 succeeded (%s)", url)
def _container_exec(self,
instance: EC2Instance,
container_name: str,
cmd: str) -> bytes:
conts = list(instance.state.docker.containers_by_name(container_name))
if not conts:
return None
cont = conts[0] # assume 1
return instance.state.docker.exec_run(cont['Id'], cmd)
def _ssh_exec(self, instance: EC2Instance, cmd: str) -> int:
with self.sshclient.connect(instance.instance) as client:
stdin, stdout, stderr = client.exec_command(cmd)
stdin.close()
status = stdout.channel.recv_exit_status()
if status:
logger.error("ssh cmd failed:\n%s", stderr.read())
return status
class Grafana:
"""Grafana monitor Dashboard for AWS instances"""
data_source_defaults = dict(
type='influxdb',
access='proxy',
isDefault=True,
basicAuth=False
)
def __init__(self, info) -> None:
self.info = info
async def start(self,
collection: EC2Collection,
run_id: str,
options: InfluxDBOptions):
data_source = self.data_source_defaults.copy()
data_source.update(
name="loads-broker InfluxDB Monitor (run_id: {})".format(run_id),
url="http://" + join_host_port(options.host, options.port),
database=options.database,
)
port = 8080
ports = {3000: port}
cmd = """\
apt-get update -qq && \
apt-get install -qq -y --no-install-recommends curl && \
/etc/init.d/grafana-server start && \
until curl "${__LOADS_GRAFANA_URL__}" \
-X POST \
-H "Accept: application/json" \
-H "Content-Type: application/json" \
--data-binary "${__LOADS_GRAFANA_DS_PAYLOAD__}"; do
sleep 1
done && \
/etc/init.d/grafana-server stop && \
mkdir "${GF_DASHBOARDS_JSON_PATH}" && \
echo "${__LOADS_GRAFANA_DASHBOARD__}" >> \
"${GF_DASHBOARDS_JSON_PATH}/monitor-dashboard.json" && \
./run.sh
"""
cmd = "sh -c '{}'".format(cmd)
# Avoid docker.run_container: it munges our special env
def run(instance, tries=0):
docker = instance.state.docker
url = "http://admin:admin@localhost:3000/api/datasources"
env = {
'GF_DEFAULT_INSTANCE_NAME': instance.instance.id,
'GF_DASHBOARDS_JSON_ENABLED': "true",
'GF_DASHBOARDS_JSON_PATH': "/var/lib/grafana/dashboards",
'__LOADS_GRAFANA_URL__': url,
'__LOADS_GRAFANA_DS_PAYLOAD__': json.dumps(data_source),
'__LOADS_GRAFANA_DASHBOARD__': MONITOR_DASHBOARD_JSON,
}
try:
docker.safe_run_container(
self.info.name,
entrypoint=cmd,
env=env,
ports=ports,
)
except Exception:
return False
# XXX: not immediately available
logger.info("Setting up Dashboard: http://%s:%s/dashboard/file/%s",
instance.instance.ip_address,
port,
MONITOR_DASHBOARD_FN)
await collection.map(run)
async def stop(self, collection, docker):
await docker.stop_containers(collection, self.info.name)
class Telegraf:
"""Telegraf monitor for AWS instances"""
def __init__(self, info) -> None:
self.info = info
async def start(self,
collection: EC2Collection,
_: Docker,
options: InfluxDBOptions,
step: str,
type_: Optional[str] = None):
ports = {(8125, "udp"): 8125}
cmd = """\
echo "${__LOADS_TELEGRAF_CONF__}" > /etc/telegraf/telegraf.conf && \
telegraf \
"""
cmd = "sh -c '{}'".format(cmd)
# Avoid docker.run_container: it munges our special env
def run(instance, tries=0):
docker = instance.state.docker
env = {
'__LOADS_TELEGRAF_CONF__': TELEGRAF_CONF,
'__LOADS_INFLUX_ADDR__':
join_host_port(options.host, options.port),
'__LOADS_INFLUX_DB__': options.database,
'__LOADS_TELEGRAF_HOST__': instance.instance.id,
'__LOADS_TELEGRAF_STEP__': step
}
if type_:
env['__LOADS_TELEGRAF_TYPE__'] = type_
try:
return docker.safe_run_container(
self.info.name,
cmd,
env=env,
ports=ports,
)
except Exception:
return False
await collection.map(run)
async def stop(self, collection, docker):
await docker.stop_containers(collection, self.info.name)
|
the-stack_106_27440 | #!/bin/env python3
import glob
import multiprocessing.dummy as multiprocessing
import subprocess
import sys
import tempfile
import time
import json
import os
exec_cmd = lambda *cmd: subprocess.check_output(cmd).decode('utf-8')
RED = exec_cmd('tput', 'setaf', '1')
GREEN = exec_cmd('tput', 'setaf', '2')
YELLOW = exec_cmd('tput', 'setaf', '3')
BOLD = exec_cmd('tput', 'bold')
RESET = exec_cmd('tput', 'sgr0')
CLEAR = exec_cmd('tput', 'clear')
BLACKLIST = [
"Failed to GET .",
"The following repos define a policy or require context",
"requested job is unknown to prow: rehearse",
"requested job is unknown to prow: promote",
"Not enough reviewers found in OWNERS files for files touched by this PR",
"failed to get path: failed to resolve sym link: failed to read",
"nil pointer evaluating *v1.Refs.Repo",
"unrecognized directory name (expected int64)",
"failed to get reader for GCS object: storage: object doesn't exist",
"failed to get reader for GCS object: storage: object doesn't exist",
"googleapi: Error 401: Anonymous caller does not have storage.objects.list access to origin-ci-private., required",
"has required jobs but has protect: false",
"Couldn't find/suggest approvers for each files.",
"remote error: upload-pack: not our ref",
"fatal: remote error: upload-pack: not our ref",
"Error getting ProwJob name for source",
"the cache is not started, can not read objects",
"owner mismatch request by",
"Get : unsupported protocol scheme",
"No available resource",
"context deadline exceeded",
"owner mismatch request by ci-op"
]
def run_oc(args):
command = ['oc', '--loglevel', '3', '--namespace', 'ci'] + args
try:
process = subprocess.run(command, capture_output=True, check=True)
except subprocess.CalledProcessError as exc:
print(exc.stderr.decode('utf-8'))
raise
return process.stdout.decode('utf-8')
def debug(msg):
if os.environ.get("DEBUG", "") == "true":
print(msg)
def main():
dcs = run_oc(['get', 'deployment', '--selector', 'app=prow', '--output', 'jsonpath={.items[*].metadata.name}']).split()
with tempfile.TemporaryDirectory() as log_dir:
fs = [(display, log_dir), *((highlight, log_dir, x) for x in dcs)]
with multiprocessing.Pool(len(fs)) as pool:
for _ in pool.imap_unordered(lambda x: x[0](*x[1:]), fs):
pass # a check for exceptions is implicit in the iteration
def display(log_dir):
logs = log_dir + '/*.log'
while True:
sys.stdout.write(CLEAR)
for log in sorted(glob.glob(logs)):
with open(log) as f:
if sys.stdout.write(f.read()):
sys.stdout.write('\n')
time.sleep(5)
def highlight(log_dir, dc):
warn = '"level":"warning"'
error = '"level":"error"'
fatal = '"level":"fatal"'
query = '"query":"'
log = '{}/{}.log'.format(log_dir, dc)
while True:
debug("deployment/{}: gathering info".format(dc))
header = renderHeader(dc)
lines = []
log_lines = []
for pod in run_oc(['get', 'pods', '--selector', 'component={}'.format(dc), '--output', 'jsonpath={.items[*].metadata.name}']).split():
debug("deployment/{}: pod/{}: gathering info".format(dc, pod))
lines.extend(renderFlavor(pod, dc))
cmd = ['logs', '--since', '20m', 'pod/{}'.format(pod)]
if dc == 'deck-internal':
cmd += ['--container', 'deck']
if dc == 'boskos':
cmd += ['--container', 'boskos']
debug("deployment/{}: pod/{}: getting logs".format(dc, pod))
try:
for l in run_oc(cmd).splitlines():
if any(word in l for word in BLACKLIST):
continue
if query in l:
data = json.loads(l)
data.pop("query")
l = json.dumps(data)
if warn in l:
log_lines.append(YELLOW + l + RESET)
elif error in l or fatal in l:
log_lines.append(RED + l + RESET)
except subprocess.CalledProcessError:
debug("deployment/{}: pod/{}: getting logs failed".format(dc, pod))
if not log_lines and not lines:
header = "{} {}{}{}".format(header, GREEN, "OK", RESET)
with open(log, 'w') as f:
f.write('\n'.join([header, *lines, *log_lines[-5:]]))
time.sleep(60)
def renderHeader(dc):
debug("deployment/{}: rendering header".format(dc))
rawdc = json.loads(run_oc(['get', 'deployment/{}'.format(dc), '--output', 'json']))
spec = rawdc.get("spec", {})
status = rawdc.get("status", {})
desired = spec.get("replicas", 0)
current = status.get("replicas", 0)
updated = status.get("updatedReplicas", 0)
available = status.get("availableReplicas", 0)
version = "<unknown-version>"
containers = spec.get("template", {}).get("spec", {}).get("containers", [])
for container in containers:
if dc == "jenkins-dev-operator":
container_name = "jenkins-operator"
elif dc == "deck-internal":
container_name = "deck"
else:
container_name = dc
if container.get("name") == container_name:
image = container.get("image", "")
version = image.split(":")[-1]
headerColor = ''
if desired != current:
headerColor = RED
message = '{} at {} [{}/{}]'.format(dc, version, current, desired)
if updated != desired:
message += ' ({} stale replicas)'.format(desired - updated)
if available != desired:
message += ' ({} unavailable replicas)'.format(desired - available)
header = '{}{}{}:{}'.format(BOLD, headerColor, message, RESET)
debug("deployment/{}: got header {}".format(dc, header))
return header
def renderFlavor(pod, dc):
debug("deployment/{}: pod/{}: rendering flavor".format(dc, pod))
lines = []
raw = json.loads(run_oc(['get', 'pod/{}'.format(pod), '--output', 'json']))
status = raw.get("status", {})
phase = status.get("phase", "")
if phase != "Running":
reason = status.get("reason", "")
message = status.get("message", "")
color = YELLOW
if phase in ["Failed", "Unknown", "CrashLoopBackOff"]:
color = RED
lines.append(color + "pod {} is {}: {}, {}".format(pod, phase, reason, message))
for container in status.get("containerStatuses", []):
debug("pod/{}: handling status for container {}".format(pod, container.get("name", "")))
if container.get("name") == dc:
state = container.get("state", {})
if "running" not in state:
if "waiting" in state:
reason = state["waiting"].get("reason")
message = state["waiting"].get("message")
lines.append(YELLOW + "pod {} is waiting: {}".format(pod, reason) + RESET)
lines.append(YELLOW + "\t{}".format(message) + RESET)
if "terminated" in state:
reason = state["terminated"].get("reason")
message = state["terminated"].get("message")
lines.append(RED + "pod {} is terminated: {}".format(pod, reason) + RESET)
lines.append(RED + "\t{}".format(message) + RESET)
restartCount = container.get("restartCount", 0)
if restartCount != 0:
lines.append(RED + "pod {} has restarted {} times".format(pod, restartCount) + RESET)
debug("deployment/{}: pod/{}: got flavor {}".format(dc, pod, lines))
return lines
if __name__ == '__main__':
main()
|
the-stack_106_27441 | import copy
from typing import Tuple
import numpy as np
import pytest
import xarray as xr
from gcm_filters import Filter, FilterShape, GridType
from gcm_filters.filter import FilterSpec
def _check_equal_filter_spec(spec1, spec2):
assert spec1.n_steps_total == spec2.n_steps_total
np.testing.assert_allclose(spec1.s, spec2.s)
assert (spec1.is_laplacian == spec2.is_laplacian).all()
assert spec1.s_max == spec2.s_max
np.testing.assert_allclose(spec1.p, spec2.p, rtol=1e-07, atol=1e-07)
# These values were just hard copied from my dev environment.
# All they do is check that the results match what I got when I ran the code.
# They do NOT assure that the filter spec is correct.
@pytest.mark.parametrize(
"filter_args, expected_filter_spec",
[
(
dict(
filter_scale=10.0,
dx_min=1.0,
filter_shape=FilterShape.GAUSSIAN,
transition_width=np.pi,
ndim=2,
grid_vars={},
),
FilterSpec(
n_steps_total=10,
s=[
8.0 + 0.0j,
3.42929331 + 0.0j,
7.71587822 + 0.0j,
2.41473596 + 0.0j,
7.18021542 + 0.0j,
1.60752541 + 0.0j,
6.42502377 + 0.0j,
0.81114415 - 0.55260985j,
5.50381534 + 0.0j,
4.48146765 + 0.0j,
],
is_laplacian=[
True,
True,
True,
True,
True,
True,
True,
False,
True,
True,
],
s_max=8.0,
p=[
0.09887381,
-0.19152534,
0.1748326,
-0.14975371,
0.12112337,
-0.09198484,
0.0662522,
-0.04479323,
0.02895827,
-0.0173953,
0.00995974,
-0.00454758,
],
),
),
(
dict(
filter_scale=2.0,
dx_min=1.0,
filter_shape=FilterShape.TAPER,
transition_width=np.pi,
ndim=1,
grid_vars={},
),
FilterSpec(
n_steps_total=3,
s=[
5.23887374 - 1.09644141j,
-0.76856043 - 1.32116962j,
3.00058907 - 2.95588288j,
],
is_laplacian=[False, False, False],
s_max=4.0,
p=[
0.83380304,
-0.23622724,
-0.06554041,
0.01593978,
0.00481014,
-0.00495532,
0.00168445,
],
),
),
],
)
def test_filter_spec(filter_args, expected_filter_spec):
"""This test just verifies that the filter specification looks as expected."""
filter = Filter(**filter_args)
_check_equal_filter_spec(filter.filter_spec, expected_filter_spec)
# TODO: check other properties of filter_spec?
# define (for now: hard-code) which grids are associated with vector Laplacians
vector_grids = [gt for gt in GridType if gt.name in {"VECTOR_C_GRID"}]
# all remaining grids are for scalar Laplacians
scalar_grids = [gt for gt in GridType if gt not in vector_grids]
scalar_transformed_regular_grids = [
gt
for gt in GridType
if gt.name
in {
"REGULAR_AREA_WEIGHTED",
"REGULAR_WITH_LAND_AREA_WEIGHTED",
"TRIPOLAR_REGULAR_WITH_LAND_AREA_WEIGHTED",
}
]
_grid_kwargs = {
GridType.REGULAR: [],
GridType.REGULAR_AREA_WEIGHTED: ["area"],
GridType.REGULAR_WITH_LAND: ["wet_mask"],
GridType.REGULAR_WITH_LAND_AREA_WEIGHTED: ["wet_mask", "area"],
GridType.IRREGULAR_WITH_LAND: [
"wet_mask",
"dxw",
"dyw",
"dxs",
"dys",
"area",
"kappa_w",
"kappa_s",
],
GridType.TRIPOLAR_REGULAR_WITH_LAND_AREA_WEIGHTED: ["wet_mask", "area"],
GridType.TRIPOLAR_POP_WITH_LAND: ["wet_mask", "dxe", "dye", "dxn", "dyn", "tarea"],
}
def _make_random_data(ny, nx):
data = np.random.rand(ny, nx)
da = xr.DataArray(data, dims=["y", "x"])
return da
def _make_mask_data(ny, nx):
mask_data = np.ones((ny, nx))
mask_data[0, :] = 0 # Antarctica; required for some kernels
mask_data[: (ny // 2), : (nx // 2)] = 0
da_mask = xr.DataArray(mask_data, dims=["y", "x"])
return da_mask
def _make_kappa_data(ny, nx):
kappa_data = np.ones((ny, nx))
da_kappa = xr.DataArray(kappa_data, dims=["y", "x"])
return da_kappa
def _make_irregular_grid_data(ny, nx):
# avoid large-amplitude variation, ensure positive values, mean of 1
grid_data = 0.9 + 0.2 * np.random.rand(ny, nx)
assert np.all(grid_data > 0)
da_grid = xr.DataArray(grid_data, dims=["y", "x"])
return da_grid
def _make_irregular_tripole_grid_data(ny, nx):
# avoid large-amplitude variation, ensure positive values, mean of 1
grid_data = 0.9 + 0.2 * np.random.rand(ny, nx)
assert np.all(grid_data > 0)
# make northern edge grid data fold onto itself
half_northern_edge = grid_data[-1, : (nx // 2)]
grid_data[-1, (nx // 2) :] = half_northern_edge[::-1]
da_grid = xr.DataArray(grid_data, dims=["y", "x"])
return da_grid
@pytest.fixture(scope="module", params=scalar_grids)
def grid_type_and_input_ds(request):
grid_type = request.param
ny, nx = 128, 256
da = _make_random_data(ny, nx)
grid_vars = {}
for name in _grid_kwargs[grid_type]:
if name == "wet_mask":
grid_vars[name] = _make_mask_data(ny, nx)
elif "kappa" in name:
grid_vars[name] = _make_kappa_data(ny, nx)
else:
grid_vars[name] = _make_irregular_grid_data(ny, nx)
if grid_type == GridType.TRIPOLAR_POP_WITH_LAND:
for name in _grid_kwargs[grid_type]:
if name in ["dxn", "dyn"]:
grid_vars[name] = _make_irregular_tripole_grid_data(ny, nx)
return grid_type, da, grid_vars
@pytest.fixture(scope="module", params=vector_grids)
def vector_grid_type_and_input_ds(request):
grid_type = request.param
ny, nx = (128, 256)
grid_vars = {}
if grid_type == GridType.VECTOR_C_GRID:
# construct spherical coordinate system similar to MOM6 NeverWorld2 grid
# define latitudes and longitudes
lat_min = -70
lat_max = 70
lat_u = np.linspace(
lat_min + 0.5 * (lat_max - lat_min) / ny,
lat_max - 0.5 * (lat_max - lat_min) / ny,
ny,
)
lat_v = np.linspace(lat_min + (lat_max - lat_min) / ny, lat_max, ny)
lon_min = 0
lon_max = 60
lon_u = np.linspace(lon_min + (lon_max - lon_min) / nx, lon_max, nx)
lon_v = np.linspace(
lon_min + 0.5 * (lon_max - lon_min) / nx,
lon_max - 0.5 * (lon_max - lon_min) / nx,
nx,
)
(geolon_u, geolat_u) = np.meshgrid(lon_u, lat_u)
(geolon_v, geolat_v) = np.meshgrid(lon_v, lat_v)
# radius of a random planet smaller than Earth
R = 6378000 * np.random.rand(1)
# dx varies spatially
dxCu = R * np.cos(geolat_u / 360 * 2 * np.pi)
dxCv = R * np.cos(geolat_v / 360 * 2 * np.pi)
dxBu = dxCv + np.roll(dxCv, -1, axis=1)
dxT = dxCu + np.roll(dxCu, 1, axis=1)
da_dxCu = xr.DataArray(dxCu, dims=["y", "x"])
da_dxCv = xr.DataArray(dxCv, dims=["y", "x"])
da_dxBu = xr.DataArray(dxBu, dims=["y", "x"])
da_dxT = xr.DataArray(dxT, dims=["y", "x"])
# dy is set constant, equal to dx at the equator
dy = np.max(dxCu) * np.ones((ny, nx))
da_dy = xr.DataArray(dy, dims=["y", "x"])
# compute grid cell areas
area_u = dxCu * dy
area_v = dxCv * dy
da_area_u = xr.DataArray(area_u, dims=["y", "x"])
da_area_v = xr.DataArray(area_v, dims=["y", "x"])
# set isotropic and anisotropic kappas
kappa_data = np.ones((ny, nx))
da_kappa = xr.DataArray(kappa_data, dims=["y", "x"])
# put a big island in the middle
mask_data = np.ones((ny, nx))
mask_data[: (ny // 2), : (nx // 2)] = 0
da_mask = xr.DataArray(mask_data, dims=["y", "x"])
grid_vars = {
"wet_mask_t": da_mask,
"wet_mask_q": da_mask,
"dxT": da_dxT,
"dyT": da_dy,
"dxCu": da_dxCu,
"dyCu": da_dy,
"dxCv": da_dxCv,
"dyCv": da_dy,
"dxBu": da_dxBu,
"dyBu": da_dy,
"area_u": da_area_u,
"area_v": da_area_v,
"kappa_iso": da_kappa,
"kappa_aniso": da_kappa,
}
data_u = np.random.rand(ny, nx)
data_v = np.random.rand(ny, nx)
da_u = xr.DataArray(data_u, dims=["y", "x"])
da_v = xr.DataArray(data_v, dims=["y", "x"])
return grid_type, da_u, da_v, grid_vars, geolat_u
#################### Diffusion-based filter tests ########################################
@pytest.mark.parametrize(
"filter_args",
[dict(filter_scale=3.0, dx_min=1.0, n_steps=0, filter_shape=FilterShape.GAUSSIAN)],
)
def test_diffusion_filter(grid_type_and_input_ds, filter_args):
"""Test all diffusion-based filters: filters that use a scalar Laplacian."""
grid_type, da, grid_vars = grid_type_and_input_ds
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **filter_args)
filter.plot_shape()
filtered = filter.apply(da, dims=["y", "x"])
# check conservation
area = 1
for k, v in grid_vars.items():
if "area" in k:
area = v
break
da_sum = (da * area).sum()
filtered_sum = (filtered * area).sum()
xr.testing.assert_allclose(da_sum, filtered_sum)
# check that we get an error if we pass scalar Laplacian to .apply_to vector,
# where the latter method is for vector Laplacians only
with pytest.raises(ValueError, match=r"Provided Laplacian *"):
filtered_u, filtered_v = filter.apply_to_vector(da, da, dims=["y", "x"])
# check variance reduction
assert (filtered ** 2).sum() < (da ** 2).sum()
# check that we get an error if we leave out any required grid_vars
for gv in grid_vars:
grid_vars_missing = {k: v for k, v in grid_vars.items() if k != gv}
with pytest.raises(ValueError, match=r"Provided `grid_vars` .*"):
filter = Filter(
grid_type=grid_type, grid_vars=grid_vars_missing, **filter_args
)
bad_filter_args = copy.deepcopy(filter_args)
# check that we get an error when transition_width <= 1
bad_filter_args["transition_width"] = 1
with pytest.raises(ValueError, match=r"Transition width .*"):
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **bad_filter_args)
# check that we get an error if ndim > 2 and n_steps = 0
bad_filter_args["transition_width"] = np.pi
bad_filter_args["ndim"] = 3
bad_filter_args["n_steps"] = 0
with pytest.raises(ValueError, match=r"When ndim > 2, you .*"):
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **bad_filter_args)
# check that we get a warning if n_steps < n_steps_default
bad_filter_args["ndim"] = 2
bad_filter_args["n_steps"] = 3
with pytest.warns(UserWarning, match=r"You have set n_steps .*"):
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **bad_filter_args)
# check that we get a warning if numerical instability possible
bad_filter_args["n_steps"] = 0
bad_filter_args["filter_scale"] = 1000
with pytest.warns(UserWarning, match=r"Filter scale much larger .*"):
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **bad_filter_args)
# check that we get an error if we pass dx_min != 1 to a regular scalar Laplacian
if grid_type in scalar_transformed_regular_grids:
bad_filter_args["filter_scale"] = 3 # restore good value for filter scale
bad_filter_args["dx_min"] = 3
with pytest.raises(ValueError, match=r"Provided Laplacian .*"):
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **bad_filter_args)
#################### Visosity-based filter tests ########################################
@pytest.mark.parametrize(
"filter_args",
[dict(filter_scale=1.0, dx_min=1.0, n_steps=10, filter_shape=FilterShape.TAPER)],
)
def test_viscosity_filter(vector_grid_type_and_input_ds, filter_args):
"""Test all viscosity-based filters: filters that use a vector Laplacian."""
grid_type, da_u, da_v, grid_vars, geolat_u = vector_grid_type_and_input_ds
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **filter_args)
filtered_u, filtered_v = filter.apply_to_vector(da_u, da_v, dims=["y", "x"])
# check conservation under solid body rotation: u = cos(lat), v=0;
data_u = np.cos(geolat_u / 360 * 2 * np.pi)
data_v = np.zeros_like(data_u)
da_u = xr.DataArray(data_u, dims=["y", "x"])
da_v = xr.DataArray(data_v, dims=["y", "x"])
filtered_u, filtered_v = filter.apply_to_vector(da_u, da_v, dims=["y", "x"])
xr.testing.assert_allclose(filtered_u, da_u, atol=1e-12)
xr.testing.assert_allclose(filtered_v, da_v, atol=1e-12)
# check that we get an error if we pass vector Laplacian to .apply, where
# the latter method is for scalar Laplacians only
with pytest.raises(ValueError, match=r"Provided Laplacian *"):
filtered_u = filter.apply(da_u, dims=["y", "x"])
# check that we get an error if we leave out any required grid_vars
for gv in grid_vars:
grid_vars_missing = {k: v for k, v in grid_vars.items() if k != gv}
with pytest.raises(ValueError, match=r"Provided `grid_vars` .*"):
filter = Filter(
grid_type=grid_type, grid_vars=grid_vars_missing, **filter_args
)
|
the-stack_106_27442 | import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.metrics import SparseCategoricalAccuracy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from spektral.data import MixedLoader
from spektral.datasets.mnist import MNIST
from spektral.layers import GCNConv
from spektral.layers.ops import sp_matrix_to_sp_tensor
tf.config.experimental_run_functions_eagerly(True)
# Parameters
batch_size = 32 # Batch size
epochs = 1000 # Number of training epochs
patience = 10 # Patience for early stopping
l2_reg = 5e-4 # Regularization rate for l2
# Load data
data = MNIST()
# The adjacency matrix is stored as an attribute of the dataset.
# Create filter for GCN and convert to sparse tensor.
data.a = GCNConv.preprocess(data.a)
data.a = sp_matrix_to_sp_tensor(data.a)
# Train/valid/test split
data_tr, data_te = data[:-10000], data[-10000:]
np.random.shuffle(data_tr)
data_tr, data_va = data_tr[:-10000], data_tr[-10000:]
# We use a MixedLoader since the dataset is in mixed mode
loader_tr = MixedLoader(data_tr, batch_size=batch_size, epochs=epochs)
loader_va = MixedLoader(data_va, batch_size=batch_size)
loader_te = MixedLoader(data_te, batch_size=batch_size)
# Build model
class Net(Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.conv1 = GCNConv(32, activation="elu", kernel_regularizer=l2(l2_reg))
self.conv2 = GCNConv(32, activation="elu", kernel_regularizer=l2(l2_reg))
self.flatten = Flatten()
self.fc1 = Dense(512, activation="relu")
self.fc2 = Dense(10, activation="softmax") # MNIST has 10 classes
def call(self, inputs):
x, a = inputs
x = self.conv1([x, a])
x = self.conv2([x, a])
output = self.flatten(x)
output = self.fc1(output)
output = self.fc2(output)
return output
# Create model
model = Net()
optimizer = Adam()
loss_fn = SparseCategoricalCrossentropy()
acc_fn = SparseCategoricalAccuracy()
# Training function
@tf.function
def train_on_batch(inputs, target):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(target, predictions) + sum(model.losses)
acc = acc_fn(target, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss, acc
# Evaluation function
def evaluate(loader):
step = 0
results = []
for batch in loader:
step += 1
inputs, target = batch
predictions = model(inputs, training=False)
loss = loss_fn(target, predictions)
acc = acc_fn(target, predictions)
results.append((loss, acc, len(target))) # Keep track of batch size
if step == loader.steps_per_epoch:
results = np.array(results)
return np.average(results[:, :-1], 0, weights=results[:, -1])
# Setup training
best_val_loss = 99999
current_patience = patience
step = 0
# Training loop
results_tr = []
for batch in loader_tr:
step += 1
# Training step
inputs, target = batch
loss, acc = train_on_batch(inputs, target)
results_tr.append((loss, acc, len(target)))
if step == loader_tr.steps_per_epoch:
results_va = evaluate(loader_va)
if results_va[0] < best_val_loss:
best_val_loss = results_va[0]
current_patience = patience
results_te = evaluate(loader_te)
else:
current_patience -= 1
if current_patience == 0:
print("Early stopping")
break
# Print results
results_tr = np.array(results_tr)
results_tr = np.average(results_tr[:, :-1], 0, weights=results_tr[:, -1])
print(
"Train loss: {:.4f}, acc: {:.4f} | "
"Valid loss: {:.4f}, acc: {:.4f} | "
"Test loss: {:.4f}, acc: {:.4f}".format(
*results_tr, *results_va, *results_te
)
)
# Reset epoch
results_tr = []
step = 0
|
the-stack_106_27447 | # Copyright (c) 2013, Kevin Greenan ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution. THIS SOFTWARE IS
# PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import random
from string import ascii_letters
import sys
import tempfile
import time
import unittest
import pyeclib_c
from pyeclib.ec_iface import PyECLib_EC_Types
from pyeclib.ec_iface import VALID_EC_TYPES
class Timer:
def __init__(self):
self.start_time = 0
self.end_time = 0
def start(self):
self.start_time = time.time()
def stop(self):
self.end_time = time.time()
def curr_delta(self):
return self.end_time - self.start_time
def stop_and_return(self):
self.end_time = time.time()
return self.curr_delta()
def require_backend(backend):
return unittest.skipIf(backend not in VALID_EC_TYPES,
"%s backend is not available" % backend)
class TestPyECLib(unittest.TestCase):
def __init__(self, *args):
self.num_datas = [12, 12, 12]
self.num_parities = [2, 3, 4]
self.iterations = 100
# EC algorithm and config parameters
self.rs_types = [(PyECLib_EC_Types.jerasure_rs_vand),
(PyECLib_EC_Types.jerasure_rs_cauchy),
(PyECLib_EC_Types.isa_l_rs_vand),
(PyECLib_EC_Types.liberasurecode_rs_vand),
(PyECLib_EC_Types.isa_l_rs_cauchy)]
self.xor_types = [(PyECLib_EC_Types.flat_xor_hd, 12, 6, 4),
(PyECLib_EC_Types.flat_xor_hd, 10, 5, 4),
(PyECLib_EC_Types.flat_xor_hd, 10, 5, 3)]
self.shss = [(PyECLib_EC_Types.shss, 6, 3),
(PyECLib_EC_Types.shss, 10, 4),
(PyECLib_EC_Types.shss, 20, 4),
(PyECLib_EC_Types.shss, 11, 7)]
self.libphazr = [(PyECLib_EC_Types.libphazr, 4, 4)]
# Input temp files for testing
self.sizes = ["101-K", "202-K", "303-K"]
self.files = {}
self._create_tmp_files()
unittest.TestCase.__init__(self, *args)
def _create_tmp_files(self):
"""
Create the temporary files needed for testing. Use the tempfile
package so that the files will be automatically removed during
garbage collection.
"""
for size_str in self.sizes:
# Determine the size of the file to create
size_desc = size_str.split("-")
size = int(size_desc[0])
if size_desc[1] == 'M':
size *= 1000000
elif size_desc[1] == 'K':
size *= 1000
# Create the dictionary of files to test with
buf = ''.join(random.choice(ascii_letters) for i in range(size))
if sys.version_info >= (3,):
buf = buf.encode('ascii')
tmp_file = tempfile.NamedTemporaryFile('w+b')
tmp_file.write(buf)
self.files[size_str] = tmp_file
def get_tmp_file(self, name):
"""
Acquire a temp file from the dictionary of pre-built, random files
with the seek position to the head of the file.
"""
tmp_file = self.files.get(name, None)
if tmp_file:
tmp_file.seek(0, 0)
return tmp_file
def setUp(self):
# Ensure that the file offset is set to the head of the file
for _, tmp_file in self.files.items():
tmp_file.seek(0, 0)
def tearDown(self):
pass
def iter_available_types(self, ec_types):
found_one = False
for ec_type in ec_types:
if ec_type.name not in VALID_EC_TYPES:
continue
found_one = True
yield ec_type
if not found_one:
type_list = ', '.join(t.name for t in ec_types)
raise unittest.SkipTest('No backend available in types: %r' %
type_list)
def time_encode(self, num_data, num_parity, ec_type, hd,
file_size, iterations):
"""
:return average encode time
"""
timer = Timer()
handle = pyeclib_c.init(num_data, num_parity, ec_type, hd)
whole_file_bytes = self.get_tmp_file(file_size).read()
timer.start()
for l in range(iterations):
pyeclib_c.encode(handle, whole_file_bytes)
tsum = timer.stop_and_return()
return tsum / iterations
def time_decode(self,
num_data, num_parity, ec_type, hd,
file_size, iterations):
"""
:return 2-tuple, (success, average decode time)
"""
timer = Timer()
tsum = 0
handle = pyeclib_c.init(num_data, num_parity, ec_type, hd)
whole_file_bytes = self.get_tmp_file(file_size).read()
success = True
fragments = pyeclib_c.encode(handle, whole_file_bytes)
orig_fragments = fragments[:]
for i in range(iterations):
num_missing = hd - 1
for j in range(num_missing):
num_frags_left = len(fragments)
idx = random.randint(0, num_frags_left - 1)
fragments.pop(idx)
timer.start()
decoded_file_bytes = pyeclib_c.decode(handle,
fragments,
len(fragments[0]))
tsum += timer.stop_and_return()
fragments = orig_fragments[:]
if whole_file_bytes != decoded_file_bytes:
success = False
return success, tsum / iterations
def time_range_decode(self,
num_data, num_parity, ec_type, hd,
file_size, iterations):
"""
:return 2-tuple, (success, average decode time)
"""
timer = Timer()
tsum = 0
handle = pyeclib_c.init(num_data, num_parity, ec_type, hd)
whole_file_bytes = self.get_tmp_file(file_size).read()
success = True
begins = [int(random.randint(0, len(whole_file_bytes) - 1))
for i in range(3)]
ends = [int(random.randint(begins[i], len(whole_file_bytes)))
for i in range(3)]
ranges = list(zip(begins, ends))
fragments = pyeclib_c.encode(handle, whole_file_bytes)
orig_fragments = fragments[:]
for i in range(iterations):
num_missing = hd - 1
for j in range(num_missing):
num_frags_left = len(fragments)
idx = random.randint(0, num_frags_left - 1)
fragments.pop(idx)
timer.start()
decoded_file_ranges = pyeclib_c.decode(handle,
fragments,
len(fragments[0]),
ranges)
tsum += timer.stop_and_return()
fragments = orig_fragments[:]
range_offset = 0
for r in ranges:
if whole_file_bytes[
r[0]: r[1] + 1] != decoded_file_ranges[range_offset]:
success = False
range_offset += 1
return success, tsum / iterations
def time_reconstruct(self,
num_data, num_parity, ec_type, hd,
file_size, iterations):
"""
:return 2-tuple, (success, average reconstruct time)
"""
timer = Timer()
tsum = 0
handle = pyeclib_c.init(num_data, num_parity, ec_type, hd)
whole_file_bytes = self.get_tmp_file(file_size).read()
success = True
orig_fragments = pyeclib_c.encode(handle, whole_file_bytes)
for i in range(iterations):
fragments = orig_fragments[:]
num_missing = 1
missing_idxs = []
for j in range(num_missing):
num_frags_left = len(fragments)
idx = random.randint(0, num_frags_left - 1)
while idx in missing_idxs:
idx = random.randint(0, num_frags_left - 1)
missing_idxs.append(idx)
fragments.pop(idx)
timer.start()
reconstructed_fragment = pyeclib_c.reconstruct(handle,
fragments,
len(fragments[0]),
missing_idxs[0])
tsum += timer.stop_and_return()
if orig_fragments[missing_idxs[0]] != reconstructed_fragment:
success = False
# Output the fragments for debugging
with open("orig_fragments", "wb") as fd_orig:
fd_orig.write(orig_fragments[missing_idxs[0]])
with open("decoded_fragments", "wb") as fd_decoded:
fd_decoded.write(reconstructed_fragment)
print("Fragment %d was not reconstructed!!!" % missing_idxs[0])
sys.exit(2)
return success, tsum / iterations
def get_throughput(self, avg_time, size_str):
size_desc = size_str.split("-")
size = float(size_desc[0])
if avg_time == 0:
return '? (test finished too fast to calculate throughput)'
if size_desc[1] == 'M':
throughput = size / avg_time
elif size_desc[1] == 'K':
throughput = (size / 1000.0) / avg_time
return format(throughput, '.10g')
@require_backend("flat_xor_hd_3")
def test_xor_code(self):
for (ec_type, k, m, hd) in self.xor_types:
print("\nRunning tests for flat_xor_hd k=%d, m=%d, hd=%d" %
(k, m, hd))
for size_str in self.sizes:
avg_time = self.time_encode(k, m, ec_type.value, hd,
size_str,
self.iterations)
print("Encode (%s): %s" %
(size_str, self.get_throughput(avg_time, size_str)))
for size_str in self.sizes:
success, avg_time = self.time_decode(k, m, ec_type.value, hd,
size_str,
self.iterations)
self.assertTrue(success)
print("Decode (%s): %s" %
(size_str, self.get_throughput(avg_time, size_str)))
for size_str in self.sizes:
success, avg_time = self.time_reconstruct(
k, m, ec_type.value, hd, size_str, self.iterations)
self.assertTrue(success)
print("Reconstruct (%s): %s" %
(size_str, self.get_throughput(avg_time, size_str)))
@require_backend("shss")
def test_shss(self):
for (ec_type, k, m) in self.shss:
print(("\nRunning tests for %s k=%d, m=%d" % (ec_type, k, m)))
success = self._test_get_required_fragments(k, m, ec_type)
self.assertTrue(success)
for size_str in self.sizes:
avg_time = self.time_encode(k, m, ec_type.value, 0,
size_str,
self.iterations)
print("Encode (%s): %s" %
(size_str, self.get_throughput(avg_time, size_str)))
for size_str in self.sizes:
success, avg_time = self.time_decode(k, m, ec_type.value, 0,
size_str,
self.iterations)
self.assertTrue(success)
print("Decode (%s): %s" %
(size_str, self.get_throughput(avg_time, size_str)))
for size_str in self.sizes:
success, avg_time = self.time_reconstruct(
k, m, ec_type.value, 0, size_str, self.iterations)
self.assertTrue(success)
print("Reconstruct (%s): %s" %
(size_str, self.get_throughput(avg_time, size_str)))
@require_backend("libphazr")
def test_libphazr(self):
for (ec_type, k, m) in self.libphazr:
print(("\nRunning tests for %s k=%d, m=%d" % (ec_type, k, m)))
success = self._test_get_required_fragments(k, m, ec_type)
self.assertTrue(success)
for size_str in self.sizes:
avg_time = self.time_encode(k, m, ec_type.value, 0,
size_str,
self.iterations)
print("Encode (%s): %s" %
(size_str, self.get_throughput(avg_time, size_str)))
for size_str in self.sizes:
success, avg_time = self.time_decode(k, m, ec_type.value, 0,
size_str,
self.iterations)
self.assertTrue(success)
print("Decode (%s): %s" %
(size_str, self.get_throughput(avg_time, size_str)))
for size_str in self.sizes:
success, avg_time = self.time_reconstruct(
k, m, ec_type.value, 0, size_str, self.iterations)
self.assertTrue(success)
print("Reconstruct (%s): %s" %
(size_str, self.get_throughput(avg_time, size_str)))
def _test_get_required_fragments(self, num_data, num_parity, ec_type):
"""
:return boolean, True if all tests passed
"""
handle = pyeclib_c.init(num_data, num_parity, ec_type.value)
success = True
#
# MDS codes need any k fragments
#
if ec_type in ["jerasure_rs_vand", "jerasure_rs_cauchy",
"liberasurecode_rs_vand"]:
expected_fragments = [i for i in range(num_data + num_parity)]
missing_fragments = []
#
# Remove between 1 and num_parity
#
for i in range(random.randint(0, num_parity - 1)):
missing_fragment = random.sample(expected_fragments, 1)[0]
missing_fragments.append(missing_fragment)
expected_fragments.remove(missing_fragment)
expected_fragments = expected_fragments[:num_data]
required_fragments = pyeclib_c.get_required_fragments(
handle,
missing_fragments, [])
if expected_fragments != required_fragments:
success = False
print("Unexpected required fragments list "
"(exp != req): %s != %s" % (
expected_fragments, required_fragments))
return success
def test_codes(self):
for ec_type in self.iter_available_types(self.rs_types):
for i in range(len(self.num_datas)):
success = self._test_get_required_fragments(
self.num_datas[i], self.num_parities[i], ec_type)
self.assertTrue(success)
for i in range(len(self.num_datas)):
for size_str in self.sizes:
avg_time = self.time_encode(
self.num_datas[i], self.num_parities[i], ec_type.value,
self.num_parities[i] + 1, size_str, self.iterations)
print("Encode (%s): %s" %
(size_str, self.get_throughput(avg_time, size_str)))
for i in range(len(self.num_datas)):
for size_str in self.sizes:
success, avg_time = self.time_decode(
self.num_datas[i], self.num_parities[i], ec_type.value,
self.num_parities[i] + 1, size_str, self.iterations)
self.assertTrue(success)
print("Decode (%s): %s" %
(size_str, self.get_throughput(avg_time, size_str)))
for i in range(len(self.num_datas)):
for size_str in self.sizes:
success, avg_time = self.time_range_decode(
self.num_datas[i], self.num_parities[i], ec_type.value,
self.num_parities[i] + 1, size_str, self.iterations)
self.assertTrue(success)
print("Range Decode (%s): %s" %
(size_str, self.get_throughput(avg_time, size_str)))
for i in range(len(self.num_datas)):
for size_str in self.sizes:
success, avg_time = self.time_reconstruct(
self.num_datas[i], self.num_parities[i], ec_type.value,
self.num_parities[i] + 1, size_str, self.iterations)
self.assertTrue(success)
print("Reconstruct (%s): %s" %
(size_str, self.get_throughput(avg_time, size_str)))
if __name__ == "__main__":
unittest.main()
|
the-stack_106_27449 | """
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import pickle
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.IntTensor([tensor.numel()]).to("cuda")
size_list = [torch.IntTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
def is_pytorch_1_1_0_or_later():
return [int(_) for _ in torch.__version__.split("+")[0].split(".")[:3]] >= [1, 1, 0]
|
the-stack_106_27450 | from pyspark.sql.functions import col
import matplotlib.pyplot as plt
def violating_precicts(nyc_data, enable_plot=True):
nyc_precints = nyc_data.select('violation_precinct')\
.filter(col('violation_precinct') != 0)\
.groupBy('violation_precinct')\
.agg({'violation_precinct':'count'})\
.withColumnRenamed("count(violation_precinct)", "no_of_violations")\
.sort('no_of_violations', ascending=False)
nyc_precints_pd =nyc_precints.toPandas()
if enable_plot:
ax = nyc_precints_pd.head(10).plot.bar(x='violation_precinct', y='no_of_violations', figsize=(10, 5))
ax.set_title("Police Precinct Zone vs No. of violations")
ax.set_xlabel("Police Precinct Zone")
ax.set_ylabel("No. of Violations")
fig = ax.get_figure()
fig.savefig('../output/violating_precicts.png')
return nyc_precints_pd
def issuing_precincts(nyc_data, enable_plot=True):
nyc_precints = nyc_data.select('issuer_precinct')\
.filter(col('issuer_precinct') != 0)\
.groupBy('issuer_precinct')\
.agg({'issuer_precinct':'count'})\
.withColumnRenamed("count(issuer_precinct)", "no_of_violations")\
.sort('count(issuer_precinct)', ascending=False)
nyc_precints_pd =nyc_precints.toPandas()
if enable_plot:
ax = nyc_precints_pd.head(10).plot.bar(x='issuer_precinct', y='no_of_violations', figsize=(10, 5))
ax.set_title("Police Precinct vs No. of issued violations")
ax.set_xlabel("Police Precinct")
ax.set_ylabel("No. of issued violations")
fig = ax.get_figure()
fig.savefig('../output/issuing_precincts.png')
return nyc_precints_pd
def violation_code_frequency_top3_precincts(nyc_data, enable_plot=True):
top3_precints = nyc_data.filter(col('violation_code') != 0)\
.filter(col('issuer_precinct') != 0)\
.select('issuer_precinct')\
.groupBy('issuer_precinct')\
.agg({'issuer_precinct':'count'})\
.sort('count(issuer_precinct)', ascending=False)\
.take(3)
top3 = [row['issuer_precinct'] for row in top3_precints]
filtered_data = nyc_data.filter((col('issuer_precinct') == top3[0]) | (col('issuer_precinct') == top3[1]) | (col('issuer_precinct') == top3[2]))
violation_frequencies_df = filtered_data.select('violation_code')\
.groupBy('violation_code')\
.agg({'violation_code':'count'})\
.withColumnRenamed('count(violation_code)', 'Freq of Violations')\
.sort('Freq of Violations', ascending=False)
violation_frequencies = violation_frequencies_df.collect()
if enable_plot:
violations = [row['violation_code'] for row in violation_frequencies]
frequencies = [row['Freq of Violations'] for row in violation_frequencies]
fig, ax = plt.subplots(1, 1, figsize=(10,5))
ax.set_title("Violation Code Vs No. of violations of Top 3 precincts (ticket issue wise)")
ax.set_xlabel("Violation Code")
ax.set_ylabel("o. of violations")
ax.bar(violations[:10], frequencies[:10])
fig.savefig('../output/violation_code_frequency_top3_precincts.png')
print("Top 3 Violating Precicts :", top3)
return violation_frequencies_df.toPandas()
|
the-stack_106_27451 | # coding: utf-8
"""Test the contents webservice API."""
import base64
from contextlib import contextmanager
import io
import json
import os
import shutil
from unicodedata import normalize
pjoin = os.path.join
import requests
from ..filecheckpoints import GenericFileCheckpoints
from traitlets.config import Config
from notebook.utils import url_path_join, url_escape, to_os_path
from notebook.tests.launchnotebook import NotebookTestBase, assert_http_error
from nbformat import read, write, from_dict
from nbformat.v4 import (
new_notebook, new_markdown_cell,
)
from nbformat import v2
from ipython_genutils import py3compat
from ipython_genutils.tempdir import TemporaryDirectory
def uniq_stable(elems):
"""uniq_stable(elems) -> list
Return from an iterable, a list of all the unique elements in the input,
maintaining the order in which they first appear.
"""
seen = set()
return [x for x in elems if x not in seen and not seen.add(x)]
def notebooks_only(dir_model):
return [nb for nb in dir_model['content'] if nb['type']=='notebook']
def dirs_only(dir_model):
return [x for x in dir_model['content'] if x['type']=='directory']
class API(object):
"""Wrapper for contents API calls."""
def __init__(self, request):
self.request = request
def _req(self, verb, path, body=None, params=None):
response = self.request(verb,
url_path_join('api/contents', path),
data=body, params=params,
)
response.raise_for_status()
return response
def list(self, path='/'):
return self._req('GET', path)
def read(self, path, type=None, format=None, content=None):
params = {}
if type is not None:
params['type'] = type
if format is not None:
params['format'] = format
if content == False:
params['content'] = '0'
return self._req('GET', path, params=params)
def create_untitled(self, path='/', ext='.ipynb'):
body = None
if ext:
body = json.dumps({'ext': ext})
return self._req('POST', path, body)
def mkdir_untitled(self, path='/'):
return self._req('POST', path, json.dumps({'type': 'directory'}))
def copy(self, copy_from, path='/'):
body = json.dumps({'copy_from':copy_from})
return self._req('POST', path, body)
def create(self, path='/'):
return self._req('PUT', path)
def upload(self, path, body):
return self._req('PUT', path, body)
def mkdir(self, path='/'):
return self._req('PUT', path, json.dumps({'type': 'directory'}))
def copy_put(self, copy_from, path='/'):
body = json.dumps({'copy_from':copy_from})
return self._req('PUT', path, body)
def save(self, path, body):
return self._req('PUT', path, body)
def delete(self, path='/'):
return self._req('DELETE', path)
def rename(self, path, new_path):
body = json.dumps({'path': new_path})
return self._req('PATCH', path, body)
def get_checkpoints(self, path):
return self._req('GET', url_path_join(path, 'checkpoints'))
def new_checkpoint(self, path):
return self._req('POST', url_path_join(path, 'checkpoints'))
def restore_checkpoint(self, path, checkpoint_id):
return self._req('POST', url_path_join(path, 'checkpoints', checkpoint_id))
def delete_checkpoint(self, path, checkpoint_id):
return self._req('DELETE', url_path_join(path, 'checkpoints', checkpoint_id))
class APITest(NotebookTestBase):
"""Test the kernels web service API"""
dirs_nbs = [('', 'inroot'),
('Directory with spaces in', 'inspace'),
(u'unicodé', 'innonascii'),
('foo', 'a'),
('foo', 'b'),
('foo', 'name with spaces'),
('foo', u'unicodé'),
('foo/bar', 'baz'),
('ordering', 'A'),
('ordering', 'b'),
('ordering', 'C'),
(u'å b', u'ç d'),
]
hidden_dirs = ['.hidden', '__pycache__']
# Don't include root dir.
dirs = uniq_stable([py3compat.cast_unicode(d) for (d,n) in dirs_nbs[1:]])
top_level_dirs = {normalize('NFC', d.split('/')[0]) for d in dirs}
@staticmethod
def _blob_for_name(name):
return name.encode('utf-8') + b'\xFF'
@staticmethod
def _txt_for_name(name):
return u'%s text file' % name
def to_os_path(self, api_path):
return to_os_path(api_path, root=self.notebook_dir.name)
def make_dir(self, api_path):
"""Create a directory at api_path"""
os_path = self.to_os_path(api_path)
try:
os.makedirs(os_path)
except OSError:
print("Directory already exists: %r" % os_path)
def make_txt(self, api_path, txt):
"""Make a text file at a given api_path"""
os_path = self.to_os_path(api_path)
with io.open(os_path, 'w', encoding='utf-8') as f:
f.write(txt)
def make_blob(self, api_path, blob):
"""Make a binary file at a given api_path"""
os_path = self.to_os_path(api_path)
with io.open(os_path, 'wb') as f:
f.write(blob)
def make_nb(self, api_path, nb):
"""Make a notebook file at a given api_path"""
os_path = self.to_os_path(api_path)
with io.open(os_path, 'w', encoding='utf-8') as f:
write(nb, f, version=4)
def delete_dir(self, api_path):
"""Delete a directory at api_path, removing any contents."""
os_path = self.to_os_path(api_path)
shutil.rmtree(os_path, ignore_errors=True)
def delete_file(self, api_path):
"""Delete a file at the given path if it exists."""
if self.isfile(api_path):
os.unlink(self.to_os_path(api_path))
def isfile(self, api_path):
return os.path.isfile(self.to_os_path(api_path))
def isdir(self, api_path):
return os.path.isdir(self.to_os_path(api_path))
def setUp(self):
for d in (self.dirs + self.hidden_dirs):
self.make_dir(d)
for d, name in self.dirs_nbs:
# create a notebook
nb = new_notebook()
self.make_nb(u'{}/{}.ipynb'.format(d, name), nb)
# create a text file
txt = self._txt_for_name(name)
self.make_txt(u'{}/{}.txt'.format(d, name), txt)
# create a binary file
blob = self._blob_for_name(name)
self.make_blob(u'{}/{}.blob'.format(d, name), blob)
self.api = API(self.request)
def tearDown(self):
for dname in (list(self.top_level_dirs) + self.hidden_dirs):
self.delete_dir(dname)
self.delete_file('inroot.ipynb')
def test_list_notebooks(self):
nbs = notebooks_only(self.api.list().json())
self.assertEqual(len(nbs), 1)
self.assertEqual(nbs[0]['name'], 'inroot.ipynb')
nbs = notebooks_only(self.api.list('/Directory with spaces in/').json())
self.assertEqual(len(nbs), 1)
self.assertEqual(nbs[0]['name'], 'inspace.ipynb')
nbs = notebooks_only(self.api.list(u'/unicodé/').json())
self.assertEqual(len(nbs), 1)
self.assertEqual(nbs[0]['name'], 'innonascii.ipynb')
self.assertEqual(nbs[0]['path'], u'unicodé/innonascii.ipynb')
nbs = notebooks_only(self.api.list('/foo/bar/').json())
self.assertEqual(len(nbs), 1)
self.assertEqual(nbs[0]['name'], 'baz.ipynb')
self.assertEqual(nbs[0]['path'], 'foo/bar/baz.ipynb')
nbs = notebooks_only(self.api.list('foo').json())
self.assertEqual(len(nbs), 4)
nbnames = { normalize('NFC', n['name']) for n in nbs }
expected = [ u'a.ipynb', u'b.ipynb', u'name with spaces.ipynb', u'unicodé.ipynb']
expected = { normalize('NFC', name) for name in expected }
self.assertEqual(nbnames, expected)
nbs = notebooks_only(self.api.list('ordering').json())
nbnames = [n['name'] for n in nbs]
expected = ['A.ipynb', 'b.ipynb', 'C.ipynb']
self.assertEqual(nbnames, expected)
def test_list_dirs(self):
dirs = dirs_only(self.api.list().json())
dir_names = {normalize('NFC', d['name']) for d in dirs}
self.assertEqual(dir_names, self.top_level_dirs) # Excluding hidden dirs
def test_get_dir_no_content(self):
for d in self.dirs:
model = self.api.read(d, content=False).json()
self.assertEqual(model['path'], d)
self.assertEqual(model['type'], 'directory')
self.assertIn('content', model)
self.assertEqual(model['content'], None)
def test_list_nonexistant_dir(self):
with assert_http_error(404):
self.api.list('nonexistant')
def test_get_nb_contents(self):
for d, name in self.dirs_nbs:
path = url_path_join(d, name + '.ipynb')
nb = self.api.read(path).json()
self.assertEqual(nb['name'], u'%s.ipynb' % name)
self.assertEqual(nb['path'], path)
self.assertEqual(nb['type'], 'notebook')
self.assertIn('content', nb)
self.assertEqual(nb['format'], 'json')
self.assertIn('metadata', nb['content'])
self.assertIsInstance(nb['content']['metadata'], dict)
def test_get_nb_no_content(self):
for d, name in self.dirs_nbs:
path = url_path_join(d, name + '.ipynb')
nb = self.api.read(path, content=False).json()
self.assertEqual(nb['name'], u'%s.ipynb' % name)
self.assertEqual(nb['path'], path)
self.assertEqual(nb['type'], 'notebook')
self.assertIn('content', nb)
self.assertEqual(nb['content'], None)
def test_get_contents_no_such_file(self):
# Name that doesn't exist - should be a 404
with assert_http_error(404):
self.api.read('foo/q.ipynb')
def test_get_text_file_contents(self):
for d, name in self.dirs_nbs:
path = url_path_join(d, name + '.txt')
model = self.api.read(path).json()
self.assertEqual(model['name'], u'%s.txt' % name)
self.assertEqual(model['path'], path)
self.assertIn('content', model)
self.assertEqual(model['format'], 'text')
self.assertEqual(model['type'], 'file')
self.assertEqual(model['content'], self._txt_for_name(name))
# Name that doesn't exist - should be a 404
with assert_http_error(404):
self.api.read('foo/q.txt')
# Specifying format=text should fail on a non-UTF-8 file
with assert_http_error(400):
self.api.read('foo/bar/baz.blob', type='file', format='text')
def test_get_binary_file_contents(self):
for d, name in self.dirs_nbs:
path = url_path_join(d, name + '.blob')
model = self.api.read(path).json()
self.assertEqual(model['name'], u'%s.blob' % name)
self.assertEqual(model['path'], path)
self.assertIn('content', model)
self.assertEqual(model['format'], 'base64')
self.assertEqual(model['type'], 'file')
self.assertEqual(
base64.decodestring(model['content'].encode('ascii')),
self._blob_for_name(name),
)
# Name that doesn't exist - should be a 404
with assert_http_error(404):
self.api.read('foo/q.txt')
def test_get_bad_type(self):
with assert_http_error(400):
self.api.read(u'unicodé', type='file') # this is a directory
with assert_http_error(400):
self.api.read(u'unicodé/innonascii.ipynb', type='directory')
def _check_created(self, resp, path, type='notebook'):
self.assertEqual(resp.status_code, 201)
location_header = py3compat.str_to_unicode(resp.headers['Location'])
self.assertEqual(location_header, url_path_join(self.url_prefix, u'api/contents', url_escape(path)))
rjson = resp.json()
self.assertEqual(rjson['name'], path.rsplit('/', 1)[-1])
self.assertEqual(rjson['path'], path)
self.assertEqual(rjson['type'], type)
isright = self.isdir if type == 'directory' else self.isfile
assert isright(path)
def test_create_untitled(self):
resp = self.api.create_untitled(path=u'å b')
self._check_created(resp, u'å b/Untitled.ipynb')
# Second time
resp = self.api.create_untitled(path=u'å b')
self._check_created(resp, u'å b/Untitled1.ipynb')
# And two directories down
resp = self.api.create_untitled(path='foo/bar')
self._check_created(resp, 'foo/bar/Untitled.ipynb')
def test_create_untitled_txt(self):
resp = self.api.create_untitled(path='foo/bar', ext='.txt')
self._check_created(resp, 'foo/bar/untitled.txt', type='file')
resp = self.api.read(path='foo/bar/untitled.txt')
model = resp.json()
self.assertEqual(model['type'], 'file')
self.assertEqual(model['format'], 'text')
self.assertEqual(model['content'], '')
def test_upload(self):
nb = new_notebook()
nbmodel = {'content': nb, 'type': 'notebook'}
path = u'å b/Upload tést.ipynb'
resp = self.api.upload(path, body=json.dumps(nbmodel))
self._check_created(resp, path)
def test_mkdir_untitled(self):
resp = self.api.mkdir_untitled(path=u'å b')
self._check_created(resp, u'å b/Untitled Folder', type='directory')
# Second time
resp = self.api.mkdir_untitled(path=u'å b')
self._check_created(resp, u'å b/Untitled Folder 1', type='directory')
# And two directories down
resp = self.api.mkdir_untitled(path='foo/bar')
self._check_created(resp, 'foo/bar/Untitled Folder', type='directory')
def test_mkdir(self):
path = u'å b/New ∂ir'
resp = self.api.mkdir(path)
self._check_created(resp, path, type='directory')
def test_mkdir_hidden_400(self):
with assert_http_error(400):
resp = self.api.mkdir(u'å b/.hidden')
def test_upload_txt(self):
body = u'ünicode téxt'
model = {
'content' : body,
'format' : 'text',
'type' : 'file',
}
path = u'å b/Upload tést.txt'
resp = self.api.upload(path, body=json.dumps(model))
# check roundtrip
resp = self.api.read(path)
model = resp.json()
self.assertEqual(model['type'], 'file')
self.assertEqual(model['format'], 'text')
self.assertEqual(model['content'], body)
def test_upload_b64(self):
body = b'\xFFblob'
b64body = base64.encodestring(body).decode('ascii')
model = {
'content' : b64body,
'format' : 'base64',
'type' : 'file',
}
path = u'å b/Upload tést.blob'
resp = self.api.upload(path, body=json.dumps(model))
# check roundtrip
resp = self.api.read(path)
model = resp.json()
self.assertEqual(model['type'], 'file')
self.assertEqual(model['path'], path)
self.assertEqual(model['format'], 'base64')
decoded = base64.decodestring(model['content'].encode('ascii'))
self.assertEqual(decoded, body)
def test_upload_v2(self):
nb = v2.new_notebook()
ws = v2.new_worksheet()
nb.worksheets.append(ws)
ws.cells.append(v2.new_code_cell(input='print("hi")'))
nbmodel = {'content': nb, 'type': 'notebook'}
path = u'å b/Upload tést.ipynb'
resp = self.api.upload(path, body=json.dumps(nbmodel))
self._check_created(resp, path)
resp = self.api.read(path)
data = resp.json()
self.assertEqual(data['content']['nbformat'], 4)
def test_copy(self):
resp = self.api.copy(u'å b/ç d.ipynb', u'å b')
self._check_created(resp, u'å b/ç d-Copy1.ipynb')
resp = self.api.copy(u'å b/ç d.ipynb', u'å b')
self._check_created(resp, u'å b/ç d-Copy2.ipynb')
def test_copy_copy(self):
resp = self.api.copy(u'å b/ç d.ipynb', u'å b')
self._check_created(resp, u'å b/ç d-Copy1.ipynb')
resp = self.api.copy(u'å b/ç d-Copy1.ipynb', u'å b')
self._check_created(resp, u'å b/ç d-Copy2.ipynb')
def test_copy_path(self):
resp = self.api.copy(u'foo/a.ipynb', u'å b')
self._check_created(resp, u'å b/a.ipynb')
resp = self.api.copy(u'foo/a.ipynb', u'å b')
self._check_created(resp, u'å b/a-Copy1.ipynb')
def test_copy_put_400(self):
with assert_http_error(400):
resp = self.api.copy_put(u'å b/ç d.ipynb', u'å b/cøpy.ipynb')
def test_copy_dir_400(self):
# can't copy directories
with assert_http_error(400):
resp = self.api.copy(u'å b', u'foo')
def test_delete(self):
for d, name in self.dirs_nbs:
print('%r, %r' % (d, name))
resp = self.api.delete(url_path_join(d, name + '.ipynb'))
self.assertEqual(resp.status_code, 204)
for d in self.dirs + ['/']:
nbs = notebooks_only(self.api.list(d).json())
print('------')
print(d)
print(nbs)
self.assertEqual(nbs, [])
def test_delete_dirs(self):
# depth-first delete everything, so we don't try to delete empty directories
for name in sorted(self.dirs + ['/'], key=len, reverse=True):
listing = self.api.list(name).json()['content']
for model in listing:
self.api.delete(model['path'])
listing = self.api.list('/').json()['content']
self.assertEqual(listing, [])
def test_delete_non_empty_dir(self):
"""delete non-empty dir raises 400"""
with assert_http_error(400):
self.api.delete(u'å b')
def test_rename(self):
resp = self.api.rename('foo/a.ipynb', 'foo/z.ipynb')
self.assertEqual(resp.headers['Location'].split('/')[-1], 'z.ipynb')
self.assertEqual(resp.json()['name'], 'z.ipynb')
self.assertEqual(resp.json()['path'], 'foo/z.ipynb')
assert self.isfile('foo/z.ipynb')
nbs = notebooks_only(self.api.list('foo').json())
nbnames = set(n['name'] for n in nbs)
self.assertIn('z.ipynb', nbnames)
self.assertNotIn('a.ipynb', nbnames)
def test_checkpoints_follow_file(self):
# Read initial file state
orig = self.api.read('foo/a.ipynb')
# Create a checkpoint of initial state
r = self.api.new_checkpoint('foo/a.ipynb')
cp1 = r.json()
# Modify file and save
nbcontent = json.loads(orig.text)['content']
nb = from_dict(nbcontent)
hcell = new_markdown_cell('Created by test')
nb.cells.append(hcell)
nbmodel = {'content': nb, 'type': 'notebook'}
self.api.save('foo/a.ipynb', body=json.dumps(nbmodel))
# Rename the file.
self.api.rename('foo/a.ipynb', 'foo/z.ipynb')
# Looking for checkpoints in the old location should yield no results.
self.assertEqual(self.api.get_checkpoints('foo/a.ipynb').json(), [])
# Looking for checkpoints in the new location should work.
cps = self.api.get_checkpoints('foo/z.ipynb').json()
self.assertEqual(cps, [cp1])
# Delete the file. The checkpoint should be deleted as well.
self.api.delete('foo/z.ipynb')
cps = self.api.get_checkpoints('foo/z.ipynb').json()
self.assertEqual(cps, [])
def test_rename_existing(self):
with assert_http_error(409):
self.api.rename('foo/a.ipynb', 'foo/b.ipynb')
def test_save(self):
resp = self.api.read('foo/a.ipynb')
nbcontent = json.loads(resp.text)['content']
nb = from_dict(nbcontent)
nb.cells.append(new_markdown_cell(u'Created by test ³'))
nbmodel = {'content': nb, 'type': 'notebook'}
resp = self.api.save('foo/a.ipynb', body=json.dumps(nbmodel))
nbcontent = self.api.read('foo/a.ipynb').json()['content']
newnb = from_dict(nbcontent)
self.assertEqual(newnb.cells[0].source,
u'Created by test ³')
def test_checkpoints(self):
resp = self.api.read('foo/a.ipynb')
r = self.api.new_checkpoint('foo/a.ipynb')
self.assertEqual(r.status_code, 201)
cp1 = r.json()
self.assertEqual(set(cp1), {'id', 'last_modified'})
self.assertEqual(r.headers['Location'].split('/')[-1], cp1['id'])
# Modify it
nbcontent = json.loads(resp.text)['content']
nb = from_dict(nbcontent)
hcell = new_markdown_cell('Created by test')
nb.cells.append(hcell)
# Save
nbmodel= {'content': nb, 'type': 'notebook'}
resp = self.api.save('foo/a.ipynb', body=json.dumps(nbmodel))
# List checkpoints
cps = self.api.get_checkpoints('foo/a.ipynb').json()
self.assertEqual(cps, [cp1])
nbcontent = self.api.read('foo/a.ipynb').json()['content']
nb = from_dict(nbcontent)
self.assertEqual(nb.cells[0].source, 'Created by test')
# Restore cp1
r = self.api.restore_checkpoint('foo/a.ipynb', cp1['id'])
self.assertEqual(r.status_code, 204)
nbcontent = self.api.read('foo/a.ipynb').json()['content']
nb = from_dict(nbcontent)
self.assertEqual(nb.cells, [])
# Delete cp1
r = self.api.delete_checkpoint('foo/a.ipynb', cp1['id'])
self.assertEqual(r.status_code, 204)
cps = self.api.get_checkpoints('foo/a.ipynb').json()
self.assertEqual(cps, [])
def test_file_checkpoints(self):
"""
Test checkpointing of non-notebook files.
"""
filename = 'foo/a.txt'
resp = self.api.read(filename)
orig_content = json.loads(resp.text)['content']
# Create a checkpoint.
r = self.api.new_checkpoint(filename)
self.assertEqual(r.status_code, 201)
cp1 = r.json()
self.assertEqual(set(cp1), {'id', 'last_modified'})
self.assertEqual(r.headers['Location'].split('/')[-1], cp1['id'])
# Modify the file and save.
new_content = orig_content + '\nsecond line'
model = {
'content': new_content,
'type': 'file',
'format': 'text',
}
resp = self.api.save(filename, body=json.dumps(model))
# List checkpoints
cps = self.api.get_checkpoints(filename).json()
self.assertEqual(cps, [cp1])
content = self.api.read(filename).json()['content']
self.assertEqual(content, new_content)
# Restore cp1
r = self.api.restore_checkpoint(filename, cp1['id'])
self.assertEqual(r.status_code, 204)
restored_content = self.api.read(filename).json()['content']
self.assertEqual(restored_content, orig_content)
# Delete cp1
r = self.api.delete_checkpoint(filename, cp1['id'])
self.assertEqual(r.status_code, 204)
cps = self.api.get_checkpoints(filename).json()
self.assertEqual(cps, [])
@contextmanager
def patch_cp_root(self, dirname):
"""
Temporarily patch the root dir of our checkpoint manager.
"""
cpm = self.notebook.contents_manager.checkpoints
old_dirname = cpm.root_dir
cpm.root_dir = dirname
try:
yield
finally:
cpm.root_dir = old_dirname
def test_checkpoints_separate_root(self):
"""
Test that FileCheckpoints functions correctly even when it's
using a different root dir from FileContentsManager. This also keeps
the implementation honest for use with ContentsManagers that don't map
models to the filesystem
Override this method to a no-op when testing other managers.
"""
with TemporaryDirectory() as td:
with self.patch_cp_root(td):
self.test_checkpoints()
with TemporaryDirectory() as td:
with self.patch_cp_root(td):
self.test_file_checkpoints()
class GenericFileCheckpointsAPITest(APITest):
"""
Run the tests from APITest with GenericFileCheckpoints.
"""
config = Config()
config.FileContentsManager.checkpoints_class = GenericFileCheckpoints
def test_config_did_something(self):
self.assertIsInstance(
self.notebook.contents_manager.checkpoints,
GenericFileCheckpoints,
)
|
the-stack_106_27453 | from conans import ConanFile, tools, Meson, VisualStudioBuildEnvironment
from conans.errors import ConanInvalidConfiguration
from conan.tools.microsoft import msvc_runtime_flag
import glob
import os
import shutil
class GStLibAVConan(ConanFile):
name = "gst-libav"
description = "GStreamer is a development framework for creating applications like media players, video editors, " \
"streaming media broadcasters and so on"
topics = ("gstreamer", "multimedia", "video", "audio", "broadcasting", "framework", "media")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://gstreamer.freedesktop.org/"
license = "GPL-2.0-only"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_introspection": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_introspection": False,
}
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
exports_sources = ["patches/*.patch"]
generators = "pkg_config"
@property
def _is_msvc(self):
return self.settings.compiler == "Visual Studio"
def validate(self):
if self.options.shared != self.options["gstreamer"].shared or \
self.options.shared != self.options["glib"].shared or \
self.options.shared != self.options["gst-plugins-base"].shared:
# https://gitlab.freedesktop.org/gstreamer/gst-build/-/issues/133
raise ConanInvalidConfiguration("GLib, GStreamer and GstPlugins must be either all shared, or all static")
if tools.Version(self.version) >= "1.18.2" and\
self.settings.compiler == "gcc" and\
tools.Version(self.settings.compiler.version) < "5":
raise ConanInvalidConfiguration(
"gst-plugins-good %s does not support gcc older than 5" % self.version
)
if self.options.shared and str(msvc_runtime_flag(self)).startswith("MT"):
raise ConanInvalidConfiguration('shared build with static runtime is not supported due to the FlsAlloc limit')
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
self.options['gstreamer'].shared = self.options.shared
self.options['gst-plugins-base'].shared = self.options.shared
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
def requirements(self):
self.requires("glib/2.70.1")
self.requires("gstreamer/1.19.1")
self.requires("gst-plugins-base/1.19.1")
self.requires('ffmpeg/4.4')
if self.settings.os == 'Linux':
self.requires('libalsa/1.2.5.1') # temp - conflict with gst-plugins-base
def build_requirements(self):
self.build_requires("meson/0.54.2")
if not tools.which("pkg-config"):
self.build_requires("pkgconf/1.7.4")
if self.settings.os == 'Windows':
self.build_requires("winflexbison/2.5.24")
else:
self.build_requires("bison/3.7.6")
self.build_requires("flex/2.6.4")
if self.options.with_introspection:
self.build_requires("gobject-introspection/1.68.0")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_meson(self):
defs = dict()
def add_flag(name, value):
if name in defs:
defs[name] += " " + value
else:
defs[name] = value
def add_compiler_flag(value):
add_flag("c_args", value)
add_flag("cpp_args", value)
def add_linker_flag(value):
add_flag("c_link_args", value)
add_flag("cpp_link_args", value)
meson = Meson(self)
if self.settings.compiler == "Visual Studio":
add_linker_flag("-lws2_32")
add_compiler_flag("-%s" % self.settings.compiler.runtime)
if int(str(self.settings.compiler.version)) < 14:
add_compiler_flag("-Dsnprintf=_snprintf")
if self.settings.get_safe("compiler.runtime"):
defs["b_vscrt"] = str(self.settings.compiler.runtime).lower()
defs["tools"] = "disabled"
defs["examples"] = "disabled"
defs["benchmarks"] = "disabled"
defs["tests"] = "disabled"
defs["wrap_mode"] = "nofallback"
defs["introspection"] = "enabled" if self.options.with_introspection else "disabled"
meson.configure(build_folder=self._build_subfolder,
source_folder=self._source_subfolder,
defs=defs)
return meson
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
with tools.environment_append(VisualStudioBuildEnvironment(self).vars) if self._is_msvc else tools.no_op():
meson = self._configure_meson()
meson.build()
def _fix_library_names(self, path):
# regression in 1.16
if self.settings.compiler == "Visual Studio":
with tools.chdir(path):
for filename_old in glob.glob("*.a"):
filename_new = filename_old[3:-2] + ".lib"
self.output.info("rename %s into %s" % (filename_old, filename_new))
shutil.move(filename_old, filename_new)
def package(self):
self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
with tools.environment_append(VisualStudioBuildEnvironment(self).vars) if self._is_msvc else tools.no_op():
meson = self._configure_meson()
meson.install()
self._fix_library_names(os.path.join(self.package_folder, "lib"))
self._fix_library_names(os.path.join(self.package_folder, "lib", "gstreamer-1.0"))
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "lib", "gstreamer-1.0", "pkgconfig"))
tools.remove_files_by_mask(self.package_folder, "*.pdb")
def package_info(self):
plugins = ["libav"]
gst_plugin_path = os.path.join(self.package_folder, "lib", "gstreamer-1.0")
if self.options.shared:
self.output.info("Appending GST_PLUGIN_PATH env var : %s" % gst_plugin_path)
self.cpp_info.bindirs.append(gst_plugin_path)
self.runenv_info.prepend_path("GST_PLUGIN_PATH", gst_plugin_path)
self.env_info.GST_PLUGIN_PATH.append(gst_plugin_path)
else:
self.cpp_info.defines.append("GST_LIBAV_STATIC")
self.cpp_info.libdirs.append(gst_plugin_path)
self.cpp_info.libs.extend(["gst%s" % plugin for plugin in plugins])
self.cpp_info.includedirs = ["include", os.path.join("include", "gstreamer-1.0")]
|
the-stack_106_27454 | # Make dummy data
from sklearn.linear_model import LinearRegression
import numpy as np
X = np.random.randn(500, 4)
y = X.sum(axis = 1)
print(y)
np.savetxt('X.csv', X, delimiter= ',')
np.savetxt('y.csv', y, delimiter=',')
model = LinearRegression()
model.fit(X, y)
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType
initial_type = [("input", FloatTensorType(None, X.shape))]
onnx = convert_sklearn(model, name = "regression", initial_types = initial_type)
with open("simple_reg.onnx", "wb") as f:
f.write(onnx.SerializeToString())
|
the-stack_106_27455 | from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from bs4 import BeautifulSoup
import re
import sys
link_start = "<a href=\""
link_end = "\" target=\"_blank\">블로그 링크</a>"
visitor_start = "<a href=\"http://blog.naver.com/NVisitorgp4Ajax.nhn?blogId="
visitor_end = "&logNo=221492203765\" target=\"_blank\">방문자 수</a>"
chrome_options = Options()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument('--headless')
def gets_link(compiled_link):
try:
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get(compiled_link)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
driver.find_element_by_xpath("//*[starts-with(@id, 'area_sympathy')]/div/a").send_keys(Keys.SPACE)
except:
print("program excepted!")
driver.close()
driver.quit()
return
html_data = driver.page_source
soup = BeautifulSoup(html_data, 'html.parser')
driver.close()
driver.quit()
like_data = soup.find_all('em', class_='u_cnt _count')
like = re.findall('[0-9]+', str(like_data))
if(like!=[]):
like_value = int(like[0])
else:
like_value = 0
comment_data = soup.find_all('div', class_= 'area_comment pcol2')
comment = re.findall('[ ][0-9]+', str(comment_data))
title_data = soup.find_all('span', class_= 'se-fs- se-ff-')
'''
try:
title_data = title_data[0]
except:
title_data = "제목을 읽지 못했습니다."
'''
if(comment!=[]):
comment_value = int(comment[0][1:])
else:
comment_value = 0
if(like_value+comment_value):
blog_id = re.findall('Id=[a-z0-9_]+', compiled_link)
visitor_link = " "*2 + visitor_start + blog_id[0][3:] + visitor_end + "</h4>" + "<br>"
compiled_link = " "*2 + link_start + compiled_link + link_end
print("<h4>공감 수: %d 댓글 수: %d"%(like_value, comment_value))
print(compiled_link)
print(visitor_link)
print("")
sys.stdout.flush()
return 0
def compile_link(link):
compiled_link = []
for i in range(0, len(link)):
blog_id_data = re.findall('[/][0-9a-z-_]+[?]', link[i])
blog_id = blog_id_data[0].replace('/', '').replace('?', '')
log_no_data = re.findall('[logNo=][0-9]{10,}', link[i])
log_no = log_no_data[0][1:]
data = "http://blog.naver.com/PostView.nhn?blogId=" + blog_id + "&logNo=" + log_no
compiled_link.append(data)
return compiled_link
|
the-stack_106_27457 | import os
import pytest
from io import StringIO
from pytest_mock import MockerFixture
from vkbottle.modules import json
from vkbottle.tools import (
Callback,
CtxStorage,
Keyboard,
KeyboardButtonColor,
LoopWrapper,
TemplateElement,
Text,
load_blueprints_from_package,
template_gen,
EqualsValidator,
IsInstanceValidator,
CallableValidator,
keyboard_gen,
run_in_task,
convert_shorten_filter,
)
from vkbottle.dispatch import ABCRule, OrFilter, AndFilter
from vkbottle.bot import run_multibot, Bot
from vkbottle import API
KEYBOARD_JSON = json.dumps(
{
"one_time": True,
"inline": False,
"buttons": [
[
{
"action": {
"label": "I love nuggets",
"payload": {"love": "nuggets"},
"type": "text",
}
}
],
[
{
"action": {
"label": "Eat nuggets",
"payload": {"eat": "nuggets"},
"type": "callback",
},
"color": "positive",
}
],
],
}
)
TEMPLATE_DICT = {
"type": "carousel",
"elements": [
{
"photo_id": "-109837093_457242811",
"action": {"type": "open_photo"},
"buttons": [{"action": {"type": "text", "label": "text", "payload": "{}"}}],
},
{
"photo_id": "-109837093_457242811",
"action": {"type": "open_photo"},
"buttons": [{"action": {"type": "text", "label": "text 2", "payload": "{}"}}],
},
],
}
ctx_storage = CtxStorage()
def assert_rule(res, rev=False):
assert (res is not False) is not rev
class MockedLoop:
@staticmethod
def create_task(task):
ctx_storage.set("checked-test-lw-create-task", task.__name__)
@staticmethod
def run_until_complete(task):
c = ctx_storage.get("checked-test-lw-run-until-complete") or []
ctx_storage.set("checked-test-lw-run-until-complete", [*c, task.__name__])
@staticmethod
def run_forever():
ctx_storage.set("checked-test-lw-run-forever", True)
@staticmethod
def is_running():
return False
def test_keyboard_non_builder():
keyboard = Keyboard(one_time=True)
keyboard.add(Text("I love nuggets", {"love": "nuggets"}))
keyboard.row()
keyboard.add(Callback("Eat nuggets", {"eat": "nuggets"}), color=KeyboardButtonColor.POSITIVE)
assert keyboard.get_json() == KEYBOARD_JSON
def test_keyboard_builder():
assert (
Keyboard(one_time=True)
.add(Text("I love nuggets", {"love": "nuggets"}))
.row()
.add(Callback("Eat nuggets", {"eat": "nuggets"}), color=KeyboardButtonColor.POSITIVE)
.get_json()
) == KEYBOARD_JSON
def test_keyboard_generator():
assert json.loads(
keyboard_gen(
[
[{"label": "I love nuggets", "payload": {"love": "nuggets"}}],
[
{
"type": "callback",
"label": "Eat nuggets",
"payload": {"eat": "nuggets"},
"color": "positive",
}
],
],
one_time=True,
)
) == json.loads(KEYBOARD_JSON)
def test_bp_importer(mocker: MockerFixture):
required_files = ["bp1.py", "bp2.py", "bp3.py", "bp4.py"]
main_package = os.path.join("src", "folder")
main_files = {
os.path.join(main_package, "bp1.py"): "bp = Blueprint('blup')",
os.path.join(main_package, "bp2.py"): "\n#\nbp = Blueprint('blup2')",
os.path.join(
main_package, "__init__.py"
): "from . import bp1, bp2\nfrom .bps import bp3, bp4",
}
bps_files = {
os.path.join(main_package, "bps", "bp3.py"): "blueprint = Blueprint('blup')",
os.path.join(main_package, "bps", "bp4.py"): "bp = BotBlueprint()",
}
mocker.patch(
"os.listdir",
lambda f: ["bp1.py", "__init__.py", "bp2.py", "bps"]
if "bps" not in f
else ["bp3.py", "bp4.py", "__init__.py"],
)
mocker.patch(
"builtins.open", lambda fln: StringIO((main_files if "bps" not in fln else bps_files)[fln])
)
mocker.patch(
"importlib.import_module",
lambda pn: type("A", (object,), {"__getattr__": lambda x, y: pn})(),
)
for bp in load_blueprints_from_package(main_package):
required_files.pop(required_files.index(str(bp).split(".")[-1] + ".py"))
assert not len(required_files)
def test_template_generator():
assert (
json.loads(
template_gen(
TemplateElement(
photo_id="-109837093_457242811",
action={"type": "open_photo"},
buttons=[{"action": {"type": "text", "label": "text", "payload": "{}"}}],
),
TemplateElement(
photo_id="-109837093_457242811",
action={"type": "open_photo"},
buttons=[{"action": {"type": "text", "label": "text 2", "payload": "{}"}}],
),
)
)
== TEMPLATE_DICT
)
@pytest.mark.asyncio
async def test_validators():
assert await IsInstanceValidator((int, str)).check("foo")
assert not await EqualsValidator("foo").check("bar")
assert await CallableValidator(lambda _: True).check(0)
def test_loop_wrapper():
async def task1():
pass
async def task2():
pass
async def task3():
pass
lw = LoopWrapper(tasks=[task1])
lw.on_startup.append(task2)
lw.on_shutdown.append(task3)
lw.run_forever(MockedLoop())
assert ctx_storage.get("checked-test-lw-create-task") == task1.__name__
assert ctx_storage.get("checked-test-lw-run-until-complete") == [
task2.__name__,
task3.__name__,
]
assert ctx_storage.get("checked-test-lw-run-forever")
@pytest.mark.asyncio
async def test_utils(mocker: MockerFixture):
async def task_to_run(s, y: int):
return s.x == y
mocker.patch("asyncio.get_running_loop", lambda: MockedLoop())
run_in_task(task_to_run(1, 1))
assert ctx_storage.get("checked-test-lw-create-task") == "task_to_run"
c_rule = type(
"c_rule", (ABCRule,), {"check": task_to_run, "__init__": lambda s, i: setattr(s, "x", i)}
)
assert convert_shorten_filter((c_rule(None),)).__class__ == OrFilter(c_rule(None),).__class__
assert convert_shorten_filter({c_rule(None)}).__class__ == AndFilter(c_rule(None),).__class__
assert_rule(await convert_shorten_filter((c_rule(1), c_rule(2))).check(2))
assert_rule(await convert_shorten_filter((c_rule(1), c_rule(2))).check(4), True)
assert_rule(await convert_shorten_filter({c_rule(4), c_rule(4)}).check(4))
assert_rule(await convert_shorten_filter({c_rule(2), c_rule(4)}).check(4), True)
def test_run_multibot(mocker: MockerFixture):
bot_apis = []
mocker.patch("vkbottle.bot.Bot.run_polling", lambda s, custom_polling: s.api)
mocker.patch("asyncio.iscoroutine", lambda _: True)
mocker.patch(
"vkbottle.tools.dev_tools.loop_wrapper.LoopWrapper.run_forever",
lambda s, l: bot_apis.extend(s.tasks),
)
run_multibot(Bot(), (API("1"), API("2"), API("3")))
assert len(bot_apis) == 3
|
the-stack_106_27460 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import gc
import sys
import copy
from io import StringIO
from collections import OrderedDict
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy.io import fits
from astropy.table import Table, QTable, MaskedColumn
from astropy.tests.helper import (assert_follows_unicode_guidelines,
ignore_warnings, catch_warnings)
from astropy.utils.data import get_pkg_data_filename
from astropy import table
from astropy import units as u
from astropy.time import Time, TimeDelta
from .conftest import MaskedTable, MIXIN_COLS
try:
with ignore_warnings(DeprecationWarning):
# Ignore DeprecationWarning on pandas import in Python 3.5--see
# https://github.com/astropy/astropy/issues/4380
import pandas # pylint: disable=W0611
except ImportError:
HAS_PANDAS = False
else:
HAS_PANDAS = True
class SetupData:
def _setup(self, table_types):
self._table_type = table_types.Table
self._column_type = table_types.Column
@property
def a(self):
if self._column_type is not None:
if not hasattr(self, '_a'):
self._a = self._column_type(
[1, 2, 3], name='a', format='%d',
meta={'aa': [0, 1, 2, 3, 4]})
return self._a
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(
[4, 5, 6], name='b', format='%d', meta={'aa': 1})
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type([7, 8, 9], 'c')
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type([7, 8, 7], 'd')
return self._d
@property
def obj(self):
if self._column_type is not None:
if not hasattr(self, '_obj'):
self._obj = self._column_type([1, 'string', 3], 'obj', dtype='O')
return self._obj
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b])
return self._t
@pytest.mark.usefixtures('table_types')
class TestSetTableColumn(SetupData):
def test_set_row(self, table_types):
"""Set a row from a tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[1] = (20, 21)
assert t['a'][0] == 1
assert t['a'][1] == 20
assert t['a'][2] == 3
assert t['b'][0] == 4
assert t['b'][1] == 21
assert t['b'][2] == 6
def test_set_row_existing(self, table_types):
"""Set a row from another existing row"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[0] = t[1]
assert t[0][0] == 2
assert t[0][1] == 5
def test_set_row_fail_1(self, table_types):
"""Set a row from an incorrectly-sized or typed set of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = (20, 21, 22)
with pytest.raises(ValueError):
t[1] = 0
def test_set_row_fail_2(self, table_types):
"""Set a row from an incorrectly-typed tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = ('abc', 'def')
def test_set_new_col_new_table(self, table_types):
"""Create a new column in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = self.a
# Test that the new column name is 'aa' and that the values match
assert np.all(t['aa'] == self.a)
assert t.colnames == ['aa']
def test_set_new_col_new_table_quantity(self, table_types):
"""Create a new column (from a quantity) in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = np.array([1, 2, 3]) * u.m
assert np.all(t['aa'] == np.array([1, 2, 3]))
assert t['aa'].unit == u.m
t['bb'] = 3 * u.m
assert np.all(t['bb'] == 3)
assert t['bb'].unit == u.m
def test_set_new_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Add a column
t['bb'] = self.b
assert np.all(t['bb'] == self.b)
assert t.colnames == ['a', 'bb']
assert t['bb'].meta == self.b.meta
assert t['bb'].format == self.b.format
# Add another column
t['c'] = t['a']
assert np.all(t['c'] == t['a'])
assert t.colnames == ['a', 'bb', 'c']
assert t['c'].meta == t['a'].meta
assert t['c'].format == t['a'].format
# Add a multi-dimensional column
t['d'] = table_types.Column(np.arange(12).reshape(3, 2, 2))
assert t['d'].shape == (3, 2, 2)
assert t['d'][0, 0, 1] == 1
# Add column from a list
t['e'] = ['hello', 'the', 'world']
assert np.all(t['e'] == np.array(['hello', 'the', 'world']))
# Make sure setting existing column still works
t['e'] = ['world', 'hello', 'the']
assert np.all(t['e'] == np.array(['world', 'hello', 'the']))
# Add a column via broadcasting
t['f'] = 10
assert np.all(t['f'] == 10)
# Add a column from a Quantity
t['g'] = np.array([1, 2, 3]) * u.m
assert np.all(t['g'].data == np.array([1, 2, 3]))
assert t['g'].unit == u.m
# Add a column from a (scalar) Quantity
t['g'] = 3 * u.m
assert np.all(t['g'].data == 3)
assert t['g'].unit == u.m
def test_set_new_unmasked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.Column(name='b', data=[1, 2, 3]) # unmasked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_masked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.MaskedColumn(name='b', data=[1, 2, 3]) # masked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_col_existing_table_fail(self, table_types):
"""Generate failure when creating a new column using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Wrong size
with pytest.raises(ValueError):
t['b'] = [1, 2]
@pytest.mark.usefixtures('table_types')
class TestEmptyData():
def test_1(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, length=100))
assert len(t['a']) == 100
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, ), length=100))
assert len(t['a']) == 100
def test_3(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int))
assert len(t['a']) == 0
def test_4(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, 4)))
assert len(t['a']) == 0
def test_5(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a')) # dtype is not specified
assert len(t['a']) == 0
def test_add_via_setitem_and_slice(self, table_types):
"""Test related to #3023 where a MaskedColumn is created with name=None
and then gets changed to name='a'. After PR #2790 this test fails
without the #3023 fix."""
t = table_types.Table()
t['a'] = table_types.Column([1, 2, 3])
t2 = t[:]
assert t2.colnames == t.colnames
@pytest.mark.usefixtures('table_types')
class TestNewFromColumns():
def test_simple(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6], dtype=np.float32)]
t = table_types.Table(cols)
assert np.all(t['a'].data == np.array([1, 2, 3]))
assert np.all(t['b'].data == np.array([4, 5, 6], dtype=np.float32))
assert type(t['b'][1]) is np.float32
def test_from_np_array(self, table_types):
cols = [table_types.Column(name='a', data=np.array([1, 2, 3], dtype=np.int64),
dtype=np.float64),
table_types.Column(name='b', data=np.array([4, 5, 6], dtype=np.float32))]
t = table_types.Table(cols)
assert np.all(t['a'] == np.array([1, 2, 3], dtype=np.float64))
assert np.all(t['b'] == np.array([4, 5, 6], dtype=np.float32))
assert type(t['a'][1]) is np.float64
assert type(t['b'][1]) is np.float32
def test_size_mismatch(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6, 7])]
with pytest.raises(ValueError):
table_types.Table(cols)
def test_name_none(self, table_types):
"""Column with name=None can init a table whether or not names are supplied"""
c = table_types.Column(data=[1, 2], name='c')
d = table_types.Column(data=[3, 4])
t = table_types.Table([c, d], names=(None, 'd'))
assert t.colnames == ['c', 'd']
t = table_types.Table([c, d])
assert t.colnames == ['c', 'col1']
@pytest.mark.usefixtures('table_types')
class TestReverse():
def test_reverse(self, table_types):
t = table_types.Table([[1, 2, 3],
['a', 'b', 'cc']])
t.reverse()
assert np.all(t['col0'] == np.array([3, 2, 1]))
assert np.all(t['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=False)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=True)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2.sort('col0')
assert np.all(t2['col0'] == np.array([1, 2, 3]))
assert np.all(t2['col1'] == np.array(['a', 'b', 'cc']))
def test_reverse_big(self, table_types):
x = np.arange(10000)
y = x + 1
t = table_types.Table([x, y], names=('x', 'y'))
t.reverse()
assert np.all(t['x'] == x[::-1])
assert np.all(t['y'] == y[::-1])
@pytest.mark.usefixtures('table_types')
class TestColumnAccess():
def test_1(self, table_types):
t = table_types.Table()
with pytest.raises(KeyError):
t['a']
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[1, 2, 3]))
assert np.all(t['a'] == np.array([1, 2, 3]))
with pytest.raises(KeyError):
t['b'] # column does not exist
def test_itercols(self, table_types):
names = ['a', 'b', 'c']
t = table_types.Table([[1], [2], [3]], names=names)
for name, col in zip(names, t.itercols()):
assert name == col.name
assert isinstance(col, table_types.Column)
@pytest.mark.usefixtures('table_types')
class TestAddLength(SetupData):
def test_right_length(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b)
def test_too_long(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5, 6, 7])) # data too long
def test_too_short(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5])) # data too short
@pytest.mark.usefixtures('table_types')
class TestAddPosition(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 0)
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 1)
def test_3(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, -1)
def test_5(self, table_types):
self._setup(table_types)
t = table_types.Table()
with pytest.raises(ValueError):
t.index_column('b')
def test_6(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b)
assert t.columns.keys() == ['a', 'b']
def test_7(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a'))
assert t.columns.keys() == ['b', 'a']
def test_8(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a') + 1)
assert t.columns.keys() == ['a', 'b']
def test_9(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b, t.index_column('a') + 1)
t.add_column(self.c, t.index_column('b'))
assert t.columns.keys() == ['a', 'c', 'b']
def test_10(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
ia = t.index_column('a')
t.add_column(self.b, ia + 1)
t.add_column(self.c, ia)
assert t.columns.keys() == ['c', 'a', 'b']
@pytest.mark.usefixtures('table_types')
class TestAddName(SetupData):
def test_override_name(self, table_types):
self._setup(table_types)
t = table_types.Table()
# Check that we can override the name of the input column in the Table
t.add_column(self.a, name='b')
t.add_column(self.b, name='a')
assert t.columns.keys() == ['b', 'a']
# Check that we did not change the name of the input column
assert self.a.info.name == 'a'
assert self.b.info.name == 'b'
# Now test with an input column from another table
t2 = table_types.Table()
t2.add_column(t['a'], name='c')
assert t2.columns.keys() == ['c']
# Check that we did not change the name of the input column
assert t.columns.keys() == ['b', 'a']
# Check that we can give a name if none was present
col = table_types.Column([1, 2, 3])
t.add_column(col, name='c')
assert t.columns.keys() == ['b', 'a', 'c']
def test_default_name(self, table_types):
t = table_types.Table()
col = table_types.Column([1, 2, 3])
t.add_column(col)
assert t.columns.keys() == ['col0']
@pytest.mark.usefixtures('table_types')
class TestInitFromTable(SetupData):
def test_from_table_cols(self, table_types):
"""Ensure that using cols from an existing table gives
a clean copy.
"""
self._setup(table_types)
t = self.t
cols = t.columns
# Construct Table with cols via Table._new_from_cols
t2a = table_types.Table([cols['a'], cols['b'], self.c])
# Construct with add_column
t2b = table_types.Table()
t2b.add_column(cols['a'])
t2b.add_column(cols['b'])
t2b.add_column(self.c)
t['a'][1] = 20
t['b'][1] = 21
for t2 in [t2a, t2b]:
t2['a'][2] = 10
t2['b'][2] = 11
t2['c'][2] = 12
t2.columns['a'].meta['aa'][3] = 10
assert np.all(t['a'] == np.array([1, 20, 3]))
assert np.all(t['b'] == np.array([4, 21, 6]))
assert np.all(t2['a'] == np.array([1, 2, 10]))
assert np.all(t2['b'] == np.array([4, 5, 11]))
assert np.all(t2['c'] == np.array([7, 8, 12]))
assert t2['a'].name == 'a'
assert t2.columns['a'].meta['aa'][3] == 10
assert t.columns['a'].meta['aa'][3] == 3
@pytest.mark.usefixtures('table_types')
class TestAddColumns(SetupData):
def test_add_columns1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c])
assert t.colnames == ['a', 'b', 'c']
def test_add_columns2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d])
assert t.colnames == ['a', 'b', 'c', 'd']
assert np.all(t['c'] == np.array([7, 8, 9]))
def test_add_columns3(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[1, 0])
assert t.colnames == ['d', 'a', 'c', 'b']
def test_add_columns4(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[0, 0])
assert t.colnames == ['c', 'd', 'a', 'b']
def test_add_columns5(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[2, 2])
assert t.colnames == ['a', 'b', 'c', 'd']
def test_add_columns6(self, table_types):
"""Check that we can override column names."""
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c], names=['b', 'c', 'a'])
assert t.colnames == ['b', 'c', 'a']
def test_add_columns7(self, table_types):
"""Check that default names are used when appropriate."""
t = table_types.Table()
col0 = table_types.Column([1, 2, 3])
col1 = table_types.Column([4, 5, 3])
t.add_columns([col0, col1])
assert t.colnames == ['col0', 'col1']
def test_add_duplicate_column(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='a', data=[0, 1, 2]))
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
t.add_column(self.b)
t.add_column(self.c)
assert t.colnames == ['a', 'a_1', 'b', 'c']
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2']
# test adding column from a separate Table
t1 = table_types.Table()
t1.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(t1['a'])
t.add_column(t1['a'], rename_duplicate=True)
t1['a'][0] = 100 # Change original column
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3']
assert t1.colnames == ['a']
# Check new column didn't change (since name conflict forced a copy)
assert t['a_3'][0] == self.a[0]
# Check that rename_duplicate=True is ok if there are no duplicates
t.add_column(table_types.Column(name='q', data=[0, 1, 2]),
rename_duplicate=True)
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3', 'q']
def test_add_duplicate_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
with pytest.raises(ValueError):
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), table_types.Column(name='b', data=[0, 1, 2])])
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]),
table_types.Column(name='b', data=[0, 1, 2])],
rename_duplicate=True)
t.add_column(self.d)
assert t.colnames == ['a', 'b', 'c', 'a_1', 'b_1', 'd']
@pytest.mark.usefixtures('table_types')
class TestAddRow(SetupData):
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(name='b', data=[4.0, 5.1, 6.2])
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type(name='c', data=['7', '8', '9'])
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type(name='d', data=[[1, 2], [3, 4], [5, 6]])
return self._d
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b, self.c])
return self._t
def test_add_none_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'c'), dtype=('(2,)i', 'S4', 'O'))
t.add_row()
assert np.all(t['a'][0] == [0, 0])
assert t['b'][0] == ''
assert t['c'][0] == 0
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['c'][1] == 0
def test_add_stuff_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'obj'), dtype=('(2,)i', 'S8', 'O'))
t.add_row([[1, 2], 'hello', 'world'])
assert np.all(t['a'][0] == [1, 2])
assert t['b'][0] == 'hello'
assert t['obj'][0] == 'world'
# Make sure it is not repeating last row but instead
# adding zeros (as documented)
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['obj'][1] == 0
def test_add_table_row(self, table_types):
self._setup(table_types)
t = self.t
t['d'] = self.d
t2 = table_types.Table([self.a, self.b, self.c, self.d])
t.add_row(t2[0])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
assert np.all(t['d'] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]]))
def test_add_table_row_obj(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.obj])
t.add_row([1, 4.0, [10]])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['obj'] == np.array([1, 'string', 3, [10]], dtype='O'))
def test_add_qtable_row_multidimensional(self):
q = [[1, 2], [3, 4]] * u.m
qt = table.QTable([q])
qt.add_row(([5, 6] * u.km,))
assert np.all(qt['col0'] == [[1, 2], [3, 4], [5000, 6000]] * u.m)
def test_add_with_tuple(self, table_types):
self._setup(table_types)
t = self.t
t.add_row((4, 7.2, '1'))
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '1']))
def test_add_with_list(self, table_types):
self._setup(table_types)
t = self.t
t.add_row([4, 7.2, '10'])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '1']))
def test_add_with_dict(self, table_types):
self._setup(table_types)
t = self.t
t.add_row({'a': 4, 'b': 7.2})
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
if t.masked:
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
else:
assert np.all(t['c'] == np.array(['7', '8', '9', '']))
def test_add_with_none(self, table_types):
self._setup(table_types)
t = self.t
t.add_row()
assert len(t) == 4
assert np.all(t['a'].data == np.array([1, 2, 3, 0]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 0.0]))
assert np.all(t['c'].data == np.array(['7', '8', '9', '']))
def test_add_missing_column(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row({'bad_column': 1})
def test_wrong_size_tuple(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row((1, 2))
def test_wrong_vals_type(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(TypeError):
t.add_row(1)
def test_add_row_failures(self, table_types):
self._setup(table_types)
t = self.t
t_copy = table_types.Table(t, copy=True)
# Wrong number of columns
try:
t.add_row([1, 2, 3, 4])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
# Wrong data type
try:
t.add_row(['one', 2, 3])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
def test_insert_table_row(self, table_types):
"""
Light testing of Table.insert_row() method. The deep testing is done via
the add_row() tests which calls insert_row(index=len(self), ...), so
here just test that the added index parameter is handled correctly.
"""
self._setup(table_types)
row = (10, 40.0, 'x', [10, 20])
for index in range(-3, 4):
indices = np.insert(np.arange(3), index, 3)
t = table_types.Table([self.a, self.b, self.c, self.d])
t2 = t.copy()
t.add_row(row) # By now we know this works
t2.insert_row(index, row)
for name in t.colnames:
if t[name].dtype.kind == 'f':
assert np.allclose(t[name][indices], t2[name])
else:
assert np.all(t[name][indices] == t2[name])
for index in (-4, 4):
t = table_types.Table([self.a, self.b, self.c, self.d])
with pytest.raises(IndexError):
t.insert_row(index, row)
@pytest.mark.usefixtures('table_types')
class TestTableColumn(SetupData):
def test_column_view(self, table_types):
self._setup(table_types)
t = self.t
a = t.columns['a']
a[2] = 10
assert t['a'][2] == 10
@pytest.mark.usefixtures('table_types')
class TestArrayColumns(SetupData):
def test_1d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, ), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2)
assert t['b'][0].shape == (2, )
def test_2d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, 4), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2, 4)
assert t['b'][0].shape == (2, 4)
def test_3d(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
b = table_types.Column(name='b', dtype=int, shape=(2, 4, 6), length=3)
t.add_column(b)
assert t['b'].shape == (3, 2, 4, 6)
assert t['b'][0].shape == (2, 4, 6)
@pytest.mark.usefixtures('table_types')
class TestRemove(SetupData):
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a])
return self._t
@property
def t2(self):
if self._table_type is not None:
if not hasattr(self, '_t2'):
self._t2 = self._table_type([self.a, self.b, self.c])
return self._t2
def test_1(self, table_types):
self._setup(table_types)
self.t.remove_columns('a')
assert self.t.columns.keys() == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray)
assert (self.t == None).size == 0
def test_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_columns('a')
assert self.t.columns.keys() == ['b']
assert self.t.dtype.names == ('b',)
assert np.all(self.t['b'] == np.array([4, 5, 6]))
def test_3(self, table_types):
"""Check remove_columns works for a single column with a name of
more than one character. Regression test against #2699"""
self._setup(table_types)
self.t['new_column'] = self.t['a']
assert 'new_column' in self.t.columns.keys()
self.t.remove_columns('new_column')
assert 'new_column' not in self.t.columns.keys()
def test_remove_nonexistent_row(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
self.t.remove_row(4)
def test_remove_row_0(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(0)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['b'] == np.array([5, 6]))
def test_remove_row_1(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(1)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
def test_remove_row_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(2)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([7, 8]))
def test_remove_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows(slice(0, 2, 1))
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_remove_row_list(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows([0, 2])
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_remove_row_preserves_meta(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_rows([0, 2])
assert self.t['a'].meta == {'aa': [0, 1, 2, 3, 4]}
assert self.t.dtype == np.dtype([('a', 'int'),
('b', 'int')])
def test_delitem_row(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[1]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
@pytest.mark.parametrize("idx", [[0, 2], np.array([0, 2])])
def test_delitem_row_list(self, table_types, idx):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[idx]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_delitem_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[0:2]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_delitem_row_fail(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[4]
def test_delitem_row_float(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[1.]
def test_delitem1(self, table_types):
self._setup(table_types)
del self.t['a']
assert self.t.columns.keys() == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray)
assert (self.t == None).size == 0
def test_delitem2(self, table_types):
self._setup(table_types)
del self.t2['b']
assert self.t2.colnames == ['a', 'c']
def test_delitems(self, table_types):
self._setup(table_types)
del self.t2['a', 'b']
assert self.t2.colnames == ['c']
def test_delitem_fail(self, table_types):
self._setup(table_types)
with pytest.raises(KeyError):
del self.t['d']
@pytest.mark.usefixtures('table_types')
class TestKeep(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns([])
assert t.columns.keys() == []
assert t.as_array().size == 0
# Regression test for gh-8640
assert not t
assert isinstance(t == None, np.ndarray)
assert (t == None).size == 0
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns('b')
assert t.columns.keys() == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([4, 5, 6]))
@pytest.mark.usefixtures('table_types')
class TestRename(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.rename_column('a', 'b')
assert t.columns.keys() == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([1, 2, 3]))
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.rename_column('a', 'c')
t.rename_column('b', 'a')
assert t.columns.keys() == ['c', 'a']
assert t.dtype.names == ('c', 'a')
if t.masked:
assert t.mask.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
def test_rename_by_attr(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t['a'].name = 'c'
t['b'].name = 'a'
assert t.columns.keys() == ['c', 'a']
assert t.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
def test_rename_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
t.rename_columns(('a', 'b', 'c'), ('aa', 'bb', 'cc'))
assert t.colnames == ['aa', 'bb', 'cc']
t.rename_columns(['bb', 'cc'], ['b', 'c'])
assert t.colnames == ['aa', 'b', 'c']
with pytest.raises(TypeError):
t.rename_columns(('aa'), ['a'])
with pytest.raises(ValueError):
t.rename_columns(['a'], ['b', 'c'])
@pytest.mark.usefixtures('table_types')
class TestSort():
def test_single(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4]))
t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
t.sort('a')
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['b'] == np.array([5, 6, 4]))
assert np.all(t['c'] == np.array([[3, 4],
[1, 2],
[4, 5]]))
t.sort('b')
assert np.all(t['a'] == np.array([3, 1, 2]))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['c'] == np.array([[4, 5],
[3, 4],
[1, 2]]))
def test_single_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4]))
t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
t.sort('a', reverse=True)
assert np.all(t['a'] == np.array([3, 2, 1]))
assert np.all(t['b'] == np.array([4, 6, 5]))
assert np.all(t['c'] == np.array([[4, 5],
[1, 2],
[3, 4]]))
t.sort('b', reverse=True)
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
assert np.all(t['c'] == np.array([[1, 2],
[3, 4],
[4, 5]]))
def test_single_big(self, table_types):
"""Sort a big-ish table with a non-trivial sort order"""
x = np.arange(10000)
y = np.sin(x)
t = table_types.Table([x, y], names=('x', 'y'))
t.sort('y')
idx = np.argsort(y)
assert np.all(t['x'] == x[idx])
assert np.all(t['y'] == y[idx])
@pytest.mark.parametrize('reverse', [True, False])
def test_empty_reverse(self, table_types, reverse):
t = table_types.Table([[], []], dtype=['f4', 'U1'])
t.sort('col1', reverse=reverse)
def test_multiple(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(['a', 'b'])
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
t.sort(['b', 'a'])
assert np.all(t['a'] == np.array([2, 1, 3, 1, 3, 2]))
assert np.all(t['b'] == np.array([3, 4, 4, 5, 5, 6]))
t.sort(('a', 'b'))
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
def test_multiple_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(['a', 'b'], reverse=True)
assert np.all(t['a'] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t['b'] == np.array([5, 4, 6, 3, 5, 4]))
t.sort(['b', 'a'], reverse=True)
assert np.all(t['a'] == np.array([2, 3, 1, 3, 1, 2]))
assert np.all(t['b'] == np.array([6, 5, 5, 4, 4, 3]))
t.sort(('a', 'b'), reverse=True)
assert np.all(t['a'] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t['b'] == np.array([5, 4, 6, 3, 5, 4]))
def test_multiple_with_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array([b"John", b"Jo", b"Max"])])
assert np.all([t['name'] == np.array([b"Jackson", b"Miller", b"Miller"])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_multiple_with_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[str(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[str(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array(
[str(x) for x in ["John", "Jo", "Max"]])])
assert np.all([t['name'] == np.array(
[str(x) for x in ["Jackson", "Miller", "Miller"]])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_argsort(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort() == t.as_array().argsort())
i0 = t.argsort('a')
i1 = t.as_array().argsort(order=['a'])
assert np.all(t['a'][i0] == t['a'][i1])
i0 = t.argsort(['a', 'b'])
i1 = t.as_array().argsort(order=['a', 'b'])
assert np.all(t['a'][i0] == t['a'][i1])
assert np.all(t['b'][i0] == t['b'][i1])
def test_argsort_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort(reverse=True) == np.array([4, 2, 0, 3, 1, 5]))
i0 = t.argsort('a', reverse=True)
i1 = np.array([4, 2, 3, 0, 5, 1])
assert np.all(t['a'][i0] == t['a'][i1])
i0 = t.argsort(['a', 'b'], reverse=True)
i1 = np.array([4, 2, 0, 3, 1, 5])
assert np.all(t['a'][i0] == t['a'][i1])
assert np.all(t['b'][i0] == t['b'][i1])
def test_argsort_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_argsort_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[str(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[str(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_rebuild_column_view_then_rename(self, table_types):
"""
Issue #2039 where renaming fails after any method that calls
_rebuild_table_column_view (this includes sort and add_row).
"""
t = table_types.Table([[1]], names=('a',))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.add_row((2,))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.rename_column('a', 'b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.sort('b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.rename_column('b', 'c')
assert t.colnames == ['c']
assert t.dtype.names == ('c',)
@pytest.mark.usefixtures('table_types')
class TestIterator():
def test_iterator(self, table_types):
d = np.array([(2, 1),
(3, 6),
(4, 5)], dtype=[('a', 'i4'), ('b', 'i4')])
t = table_types.Table(d)
if t.masked:
with pytest.raises(ValueError):
t[0] == d[0]
else:
for row, np_row in zip(t, d):
assert np.all(row == np_row)
@pytest.mark.usefixtures('table_types')
class TestSetMeta():
def test_set_meta(self, table_types):
d = table_types.Table(names=('a', 'b'))
d.meta['a'] = 1
d.meta['b'] = 1
d.meta['c'] = 1
d.meta['d'] = 1
assert list(d.meta.keys()) == ['a', 'b', 'c', 'd']
@pytest.mark.usefixtures('table_types')
class TestConvertNumpyArray():
def test_convert_numpy_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=('a', 'b'))
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert np_data is not d.as_array()
assert d.colnames == list(np_data.dtype.names)
np_data = np.array(d, copy=False)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[('c', 'i8'), ('d', 'i8')])
def test_as_array_byteswap(self, table_types):
"""Test for https://github.com/astropy/astropy/pull/4080"""
byte_orders = ('>', '<')
native_order = byte_orders[sys.byteorder == 'little']
for order in byte_orders:
col = table_types.Column([1.0, 2.0], name='a', dtype=order + 'f8')
t = table_types.Table([col])
arr = t.as_array()
assert arr['a'].dtype.byteorder in (native_order, '=')
arr = t.as_array(keep_byteorder=True)
if order == native_order:
assert arr['a'].dtype.byteorder in (order, '=')
else:
assert arr['a'].dtype.byteorder == order
def test_byteswap_fits_array(self, table_types):
"""
Test for https://github.com/astropy/astropy/pull/4080, demonstrating
that FITS tables are converted to native byte order.
"""
non_native_order = ('>', '<')[sys.byteorder != 'little']
filename = get_pkg_data_filename('data/tb.fits',
'astropy.io.fits.tests')
t = table_types.Table.read(filename)
arr = t.as_array()
for idx in range(len(arr.dtype)):
assert arr.dtype[idx].byteorder != non_native_order
with fits.open(filename, character_as_bytes=True) as hdul:
data = hdul[1].data
for colname in data.columns.names:
assert np.all(data[colname] == arr[colname])
arr2 = t.as_array(keep_byteorder=True)
for colname in data.columns.names:
assert (data[colname].dtype.byteorder ==
arr2[colname].dtype.byteorder)
def _assert_copies(t, t2, deep=True):
assert t.colnames == t2.colnames
np.testing.assert_array_equal(t.as_array(), t2.as_array())
assert t.meta == t2.meta
for col, col2 in zip(t.columns.values(), t2.columns.values()):
if deep:
assert not np.may_share_memory(col, col2)
else:
assert np.may_share_memory(col, col2)
def test_copy():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_masked():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'], masked=True,
meta={'name': 'test'})
t['x'].mask == [True, False, True]
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_protocol():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
_assert_copies(t, t2, deep=False)
_assert_copies(t, t3)
def test_disallow_inequality_comparisons():
"""
Regression test for #828 - disallow comparison operators on whole Table
"""
t = table.Table()
with pytest.raises(TypeError):
t > 2
with pytest.raises(TypeError):
t < 1.1
with pytest.raises(TypeError):
t >= 5.5
with pytest.raises(TypeError):
t <= -1.1
def test_equality():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
def test_equality_masked():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# Make into masked table
t = table.Table(t, masked=True)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that masking a value causes the row to differ
t.mask['a'][0] = True
assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
@pytest.mark.xfail
def test_equality_masked_bug():
"""
This highlights a Numpy bug. Once it works, it can be moved into the
test_equality_masked test. Related Numpy bug report:
https://github.com/numpy/numpy/issues/3840
"""
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
t = table.Table(t, masked=True)
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert np.all((t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from astropy.utils.tests.test_metadata import MetaBaseTest
class TestMetaTable(MetaBaseTest):
test_class = table.Table
args = ()
def test_unicode_content():
# If we don't have unicode literals then return
if isinstance('', bytes):
return
# Define unicode literals
string_a = 'астрономическая питона'
string_b = 'миллиарды световых лет'
a = table.Table(
[[string_a, 2],
[string_b, 3]],
names=('a', 'b'))
assert string_a in str(a)
# This only works because the coding of this file is utf-8, which
# matches the default encoding of Table.__str__
assert string_a.encode('utf-8') in bytes(a)
def test_unicode_policy():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert_follows_unicode_guidelines(t)
@pytest.mark.parametrize('uni', ['питона', 'ascii'])
def test_unicode_bytestring_conversion(table_types, uni):
"""
Test converting columns to all unicode or all bytestring. Thi
makes two columns, one which is unicode (str in Py3) and one which
is bytes (UTF-8 encoded). There are two code paths in the conversions,
a faster one where the data are actually ASCII and a slower one where
UTF-8 conversion is required. This tests both via the ``uni`` param.
"""
byt = uni.encode('utf-8')
t = table_types.Table([[byt], [uni], [1]], dtype=('S', 'U', 'i'))
assert t['col0'].dtype.kind == 'S'
assert t['col1'].dtype.kind == 'U'
assert t['col2'].dtype.kind == 'i'
t['col0'].description = 'col0'
t['col1'].description = 'col1'
t['col0'].meta['val'] = 'val0'
t['col1'].meta['val'] = 'val1'
# Unicode to bytestring
t1 = t.copy()
t1.convert_unicode_to_bytestring()
assert t1['col0'].dtype.kind == 'S'
assert t1['col1'].dtype.kind == 'S'
assert t1['col2'].dtype.kind == 'i'
# Meta made it through
assert t1['col0'].description == 'col0'
assert t1['col1'].description == 'col1'
assert t1['col0'].meta['val'] == 'val0'
assert t1['col1'].meta['val'] == 'val1'
# Need to de-fang the automatic unicode sandwiching of Table
assert np.array(t1['col0'])[0] == byt
assert np.array(t1['col1'])[0] == byt
assert np.array(t1['col2'])[0] == 1
# Bytestring to unicode
t1 = t.copy()
t1.convert_bytestring_to_unicode()
assert t1['col0'].dtype.kind == 'U'
assert t1['col1'].dtype.kind == 'U'
assert t1['col2'].dtype.kind == 'i'
# Meta made it through
assert t1['col0'].description == 'col0'
assert t1['col1'].description == 'col1'
assert t1['col0'].meta['val'] == 'val0'
assert t1['col1'].meta['val'] == 'val1'
# No need to de-fang the automatic unicode sandwiching of Table here, but
# do just for consistency to prove things are working.
assert np.array(t1['col0'])[0] == uni
assert np.array(t1['col1'])[0] == uni
assert np.array(t1['col2'])[0] == 1
def test_table_deletion():
"""
Regression test for the reference cycle discussed in
https://github.com/astropy/astropy/issues/2877
"""
deleted = set()
# A special table subclass which leaves a record when it is finalized
class TestTable(table.Table):
def __del__(self):
deleted.add(id(self))
t = TestTable({'a': [1, 2, 3]})
the_id = id(t)
assert t['a'].parent_table is t
del t
# Cleanup
gc.collect()
assert the_id in deleted
def test_nested_iteration():
"""
Regression test for issue 3358 where nested iteration over a single table fails.
"""
t = table.Table([[0, 1]], names=['a'])
out = []
for r1 in t:
for r2 in t:
out.append((r1['a'], r2['a']))
assert out == [(0, 0), (0, 1), (1, 0), (1, 1)]
def test_table_init_from_degenerate_arrays(table_types):
t = table_types.Table(np.array([]))
assert len(t.columns) == 0
with pytest.raises(ValueError):
t = table_types.Table(np.array(0))
t = table_types.Table(np.array([1, 2, 3]))
assert len(t.columns) == 3
@pytest.mark.skipif('not HAS_PANDAS')
class TestPandas:
def test_simple(self):
t = table.Table()
for endian in ['<', '>']:
for kind in ['f', 'i']:
for byte in ['2', '4', '8']:
dtype = np.dtype(endian + kind + byte)
x = np.array([1, 2, 3], dtype=dtype)
t[endian + kind + byte] = x
t['u'] = ['a', 'b', 'c']
t['s'] = ['a', 'b', 'c']
d = t.to_pandas()
for column in t.columns:
if column == 'u':
assert np.all(t['u'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
elif column == 's':
assert np.all(t['s'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
else:
# We should be able to compare exact values here
assert np.all(t[column] == d[column])
if t[column].dtype.byteorder in ('=', '|'):
assert d[column].dtype == t[column].dtype
else:
assert d[column].dtype == t[column].byteswap().newbyteorder().dtype
# Regression test for astropy/astropy#1156 - the following code gave a
# ValueError: Big-endian buffer not supported on little-endian
# compiler. We now automatically swap the endian-ness to native order
# upon adding the arrays to the data frame.
d[['<i4', '>i4']]
d[['<f4', '>f4']]
t2 = table.Table.from_pandas(d)
for column in t.columns:
if column in ('u', 's'):
assert np.all(t[column] == t2[column])
else:
assert_allclose(t[column], t2[column])
if t[column].dtype.byteorder in ('=', '|'):
assert t[column].dtype == t2[column].dtype
else:
assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype
def test_2d(self):
t = table.Table()
t['a'] = [1, 2, 3]
t['b'] = np.ones((3, 2))
with pytest.raises(ValueError) as exc:
t.to_pandas()
assert (exc.value.args[0] ==
"Cannot convert a table with multi-dimensional columns "
"to a pandas DataFrame. Offending columns are: ['b']")
def test_mixin_pandas(self):
t = table.QTable()
for name in sorted(MIXIN_COLS):
if name != 'ndarray':
t[name] = MIXIN_COLS[name]
t['dt'] = TimeDelta([0, 2, 4, 6], format='sec')
tp = t.to_pandas()
t2 = table.Table.from_pandas(tp)
assert np.allclose(t2['quantity'], [0, 1, 2, 3])
assert np.allclose(t2['longitude'], [0., 1., 5., 6.])
assert np.allclose(t2['latitude'], [5., 6., 10., 11.])
assert np.allclose(t2['skycoord.ra'], [0, 1, 2, 3])
assert np.allclose(t2['skycoord.dec'], [0, 1, 2, 3])
assert np.allclose(t2['arraywrap'], [0, 1, 2, 3])
assert np.allclose(t2['earthlocation.y'], [0, 110708, 547501, 654527], rtol=0, atol=1)
# For pandas, Time, TimeDelta are the mixins that round-trip the class
assert isinstance(t2['time'], Time)
assert np.allclose(t2['time'].jyear, [2000, 2001, 2002, 2003])
assert np.all(t2['time'].isot == ['2000-01-01T12:00:00.000',
'2000-12-31T18:00:00.000',
'2002-01-01T00:00:00.000',
'2003-01-01T06:00:00.000'])
assert t2['time'].format == 'isot'
# TimeDelta
assert isinstance(t2['dt'], TimeDelta)
assert np.allclose(t2['dt'].value, [0, 2, 4, 6])
assert t2['dt'].format == 'sec'
def test_to_pandas_index(self):
import pandas as pd
row_index = pd.RangeIndex(0, 2, 1)
tm_index = pd.DatetimeIndex(['1998-01-01', '2002-01-01'],
dtype='datetime64[ns]',
name='tm', freq=None)
tm = Time([1998, 2002], format='jyear')
x = [1, 2]
t = table.QTable([tm, x], names=['tm', 'x'])
tp = t.to_pandas()
assert np.all(tp.index == row_index)
tp = t.to_pandas(index='tm')
assert np.all(tp.index == tm_index)
t.add_index('tm')
tp = t.to_pandas()
assert np.all(tp.index == tm_index)
# Make sure writing to pandas didn't hack the original table
assert t['tm'].info.indices
tp = t.to_pandas(index=True)
assert np.all(tp.index == tm_index)
tp = t.to_pandas(index=False)
assert np.all(tp.index == row_index)
with pytest.raises(ValueError) as err:
t.to_pandas(index='not a column')
assert 'index must be None, False' in str(err.value)
def test_mixin_pandas_masked(self):
tm = Time([1, 2, 3], format='cxcsec')
dt = TimeDelta([1, 2, 3], format='sec')
tm[1] = np.ma.masked
dt[1] = np.ma.masked
t = table.QTable([tm, dt], names=['tm', 'dt'])
tp = t.to_pandas()
assert np.all(tp['tm'].isnull() == [False, True, False])
assert np.all(tp['dt'].isnull() == [False, True, False])
t2 = table.Table.from_pandas(tp)
assert np.all(t2['tm'].mask == tm.mask)
assert np.ma.allclose(t2['tm'].jd, tm.jd, rtol=1e-14, atol=1e-14)
assert np.all(t2['dt'].mask == dt.mask)
assert np.ma.allclose(t2['dt'].jd, dt.jd, rtol=1e-14, atol=1e-14)
def test_from_pandas_index(self):
tm = Time([1998, 2002], format='jyear')
x = [1, 2]
t = table.Table([tm, x], names=['tm', 'x'])
tp = t.to_pandas(index='tm')
t2 = table.Table.from_pandas(tp)
assert t2.colnames == ['x']
t2 = table.Table.from_pandas(tp, index=True)
assert t2.colnames == ['tm', 'x']
assert np.allclose(t2['tm'].jyear, tm.jyear)
def test_masking(self):
t = table.Table(masked=True)
t['a'] = [1, 2, 3]
t['a'].mask = [True, False, True]
t['b'] = [1., 2., 3.]
t['b'].mask = [False, False, True]
t['u'] = ['a', 'b', 'c']
t['u'].mask = [False, True, False]
t['s'] = ['a', 'b', 'c']
t['s'].mask = [False, True, False]
# https://github.com/astropy/astropy/issues/7741
t['Source'] = [2584290278794471936, 2584290038276303744,
2584288728310999296]
t['Source'].mask = [False, False, False]
d = t.to_pandas()
t2 = table.Table.from_pandas(d)
for name, column in t.columns.items():
assert np.all(column.data == t2[name].data)
if hasattr(t2[name], 'mask'):
assert np.all(column.mask == t2[name].mask)
# Masked integer type comes back as float. Nothing we can do about this.
if column.dtype.kind == 'i':
if np.any(column.mask):
assert t2[name].dtype.kind == 'f'
else:
assert t2[name].dtype.kind == 'i'
assert_array_equal(column.data,
t2[name].data.astype(column.dtype))
else:
if column.dtype.byteorder in ('=', '|'):
assert column.dtype == t2[name].dtype
else:
assert column.byteswap().newbyteorder().dtype == t2[name].dtype
@pytest.mark.usefixtures('table_types')
class TestReplaceColumn(SetupData):
def test_fail_replace_column(self, table_types):
"""Raise exception when trying to replace column via table.columns object"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t.columns['a'] = [1, 2, 3]
with pytest.raises(ValueError):
t.replace_column('not there', [1, 2, 3])
with pytest.raises(ValueError) as exc:
t.replace_column('a', [1, 2])
assert "length of new column must match table length" in str(exc.value)
def test_replace_column(self, table_types):
"""Replace existing column with a new column"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
ta = t['a']
tb = t['b']
vals = [1.2, 3.4, 5.6]
for col in (vals,
table_types.Column(vals),
table_types.Column(vals, name='a'),
table_types.Column(vals, name='b')):
t.replace_column('a', col)
assert np.all(t['a'] == vals)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].meta == {}
assert t['a'].format is None
# Special case: replacing the only column can resize table
del t['b']
assert len(t) == 3
t['a'] = [1, 2]
assert len(t) == 2
def test_replace_index_column(self, table_types):
"""Replace index column and generate expected exception"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_index('a')
with pytest.raises(ValueError) as err:
t.replace_column('a', [1, 2, 3])
assert err.value.args[0] == 'cannot replace a table index column'
def test_replace_column_no_copy(self):
t = Table([[1, 2], [3, 4]], names=['a', 'b'])
a = np.array([1.5, 2.5])
t.replace_column('a', a, copy=False)
assert t['a'][0] == a[0]
t['a'][0] = 10
assert t['a'][0] == a[0]
def test_replace_with_masked_col_with_units_in_qtable(self):
"""This is a small regression from #8902"""
t = QTable([[1, 2], [3, 4]], names=['a', 'b'])
t['a'] = MaskedColumn([5, 6], unit='m')
assert isinstance(t['a'], u.Quantity)
class Test__Astropy_Table__():
"""
Test initializing a Table subclass from a table-like object that
implements the __astropy_table__ interface method.
"""
class SimpleTable:
def __init__(self):
self.columns = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9] * u.m]
self.names = ['a', 'b', 'c']
self.meta = OrderedDict([('a', 1), ('b', 2)])
def __astropy_table__(self, cls, copy, **kwargs):
a, b, c = self.columns
c.info.name = 'c'
cols = [table.Column(a, name='a'),
table.MaskedColumn(b, name='b'),
c]
names = [col.info.name for col in cols]
return cls(cols, names=names, copy=copy, meta=kwargs or self.meta)
def test_simple_1(self):
"""Make a SimpleTable and convert to Table, QTable with copy=False, True"""
for table_cls in (table.Table, table.QTable):
col_c_class = u.Quantity if table_cls is table.QTable else table.Column
for cpy in (False, True):
st = self.SimpleTable()
# Test putting in a non-native kwarg `extra_meta` to Table initializer
t = table_cls(st, copy=cpy, extra_meta='extra!')
assert t.colnames == ['a', 'b', 'c']
assert t.meta == {'extra_meta': 'extra!'}
assert np.all(t['a'] == st.columns[0])
assert np.all(t['b'] == st.columns[1])
vals = t['c'].value if table_cls is table.QTable else t['c']
assert np.all(st.columns[2].value == vals)
assert isinstance(t['a'], table.Column)
assert isinstance(t['b'], table.MaskedColumn)
assert isinstance(t['c'], col_c_class)
assert t['c'].unit is u.m
assert type(t) is table_cls
# Copy being respected?
t['a'][0] = 10
assert st.columns[0][0] == 1 if cpy else 10
def test_simple_2(self):
"""Test converting a SimpleTable and changing column names and types"""
st = self.SimpleTable()
dtypes = [np.int32, np.float32, np.float16]
names = ['a', 'b', 'c']
meta = OrderedDict([('c', 3)])
t = table.Table(st, dtype=dtypes, names=names, meta=meta)
assert t.colnames == names
assert all(col.dtype.type is dtype
for col, dtype in zip(t.columns.values(), dtypes))
# The supplied meta is overrides the existing meta. Changed in astropy 3.2.
assert t.meta != st.meta
assert t.meta == meta
def test_kwargs_exception(self):
"""If extra kwargs provided but without initializing with a table-like
object, exception is raised"""
with pytest.raises(TypeError) as err:
table.Table([[1]], extra_meta='extra!')
assert '__init__() got unexpected keyword argument' in str(err.value)
def test_table_meta_copy():
"""
Test no copy vs light (key) copy vs deep copy of table meta for different
situations. #8404.
"""
t = table.Table([[1]])
meta = {1: [1, 2]}
# Assigning meta directly implies using direct object reference
t.meta = meta
assert t.meta is meta
# Table slice implies key copy, so values are unchanged
t2 = t[:]
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the list same object
# Table init with copy=False implies key copy
t2 = table.Table(t, copy=False)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the same list object
# Table init with copy=True implies deep copy
t2 = table.Table(t, copy=True)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is not t.meta[1] # Value is NOT the same list object
def test_table_meta_copy_with_meta_arg():
"""
Test no copy vs light (key) copy vs deep copy of table meta when meta is
supplied as a table init argument. #8404.
"""
meta = {1: [1, 2]}
meta2 = {2: [3, 4]}
t = table.Table([[1]], meta=meta, copy=False)
assert t.meta is meta
t = table.Table([[1]], meta=meta) # default copy=True
assert t.meta is not meta
assert t.meta == meta
# Test initializing from existing table with meta with copy=False
t2 = table.Table(t, meta=meta2, copy=False)
assert t2.meta is meta2
assert t2.meta != t.meta # Change behavior in #8404
# Test initializing from existing table with meta with default copy=True
t2 = table.Table(t, meta=meta2)
assert t2.meta is not meta2
assert t2.meta != t.meta # Change behavior in #8404
# Table init with copy=True and empty dict meta gets that empty dict
t2 = table.Table(t, copy=True, meta={})
assert t2.meta == {}
# Table init with copy=True and kwarg meta=None gets the original table dict.
# This is a somewhat ambiguous case because it could be interpreted as the
# user wanting NO meta set on the output. This could be implemented by inspecting
# call args.
t2 = table.Table(t, copy=True, meta=None)
assert t2.meta == t.meta
# Test initializing empty table with meta with copy=False
t = table.Table(meta=meta, copy=False)
assert t.meta is meta
assert t.meta[1] is meta[1]
# Test initializing empty table with meta with default copy=True (deepcopy meta)
t = table.Table(meta=meta)
assert t.meta is not meta
assert t.meta == meta
assert t.meta[1] is not meta[1]
def test_replace_column_qtable():
"""Replace existing Quantity column with a new column in a QTable"""
a = [1, 2, 3] * u.m
b = [4, 5, 6]
t = table.QTable([a, b], names=['a', 'b'])
ta = t['a']
tb = t['b']
ta.info.meta = {'aa': [0, 1, 2, 3, 4]}
ta.info.format = '%f'
t.replace_column('a', a.to('cm'))
assert np.all(t['a'] == ta)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].info.meta is None
assert t['a'].info.format is None
def test_replace_update_column_via_setitem():
"""
Test table update like ``t['a'] = value``. This leverages off the
already well-tested ``replace_column`` and in-place update
``t['a'][:] = value``, so this testing is fairly light.
"""
a = [1, 2] * u.m
b = [3, 4]
t = table.QTable([a, b], names=['a', 'b'])
assert isinstance(t['a'], u.Quantity)
# Inplace update
ta = t['a']
t['a'] = 5 * u.m
assert np.all(t['a'] == [5, 5] * u.m)
assert t['a'] is ta
# Replace
t['a'] = [5, 6]
assert np.all(t['a'] == [5, 6])
assert isinstance(t['a'], table.Column)
assert t['a'] is not ta
def test_replace_update_column_via_setitem_warnings_normal():
"""
Test warnings related to table replace change in #5556:
Normal warning-free replace
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert len(w) == 0
t['a'] = [10, 20, 30] # replace column
assert len(w) == 0
def test_replace_update_column_via_setitem_warnings_slice():
"""
Test warnings related to table replace change in #5556:
Replace a slice, one warning.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t2 = t[:2]
t2['a'] = 0 # in-place slice update
assert np.all(t['a'] == [0, 0, 3])
assert len(w) == 0
t2['a'] = [10, 20] # replace slice
assert len(w) == 1
assert "replaced column 'a' which looks like an array slice" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_attributes():
"""
Test warnings related to table replace change in #5556:
Lost attributes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
assert "replaced column 'a' and column attributes ['unit']" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_refcount():
"""
Test warnings related to table replace change in #5556:
Reference count changes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a'] # Generate an extra reference to original column
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
assert "replaced column 'a' and the number of references" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_always():
"""
Test warnings related to table replace change in #5556:
Test 'always' setting that raises warning for any replace.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings', ['always']):
t['a'] = 0 # in-place slice update
assert len(w) == 0
from inspect import currentframe, getframeinfo
frameinfo = getframeinfo(currentframe())
t['a'] = [10, 20, 30] # replace column
assert len(w) == 1
assert "replaced column 'a'" == str(w[0].message)
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert w[0].category is table.TableReplaceWarning
assert 'test_table' in w[0].filename
def test_replace_update_column_via_setitem_replace_inplace():
"""
Test the replace_inplace config option related to #5556. In this
case no replace is done.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a']
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_inplace', True):
with table.conf.set_temp('replace_warnings',
['always', 'refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert len(w) == 0
assert ta is t['a']
t['a'] = [10, 20, 30] # normally replaces column, but not now
assert len(w) == 0
assert ta is t['a']
assert np.all(t['a'] == [10, 20, 30])
def test_primary_key_is_inherited():
"""Test whether a new Table inherits the primary_key attribute from
its parent Table. Issue #4672"""
t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=('a', 'b'))
t.add_index('a')
original_key = t.primary_key
# can't test if tuples are equal, so just check content
assert original_key[0] is 'a'
t2 = t[:]
t3 = t.copy()
t4 = table.Table(t)
# test whether the reference is the same in the following
assert original_key == t2.primary_key
assert original_key == t3.primary_key
assert original_key == t4.primary_key
# just test one element, assume rest are equal if assert passes
assert t.loc[1] == t2.loc[1]
assert t.loc[1] == t3.loc[1]
assert t.loc[1] == t4.loc[1]
def test_qtable_read_for_ipac_table_with_char_columns():
'''Test that a char column of a QTable is assigned no unit and not
a dimensionless unit, otherwise conversion of reader output to
QTable fails.'''
t1 = table.QTable([["A"]], names="B")
out = StringIO()
t1.write(out, format="ascii.ipac")
t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False)
assert t2["B"].unit is None
def test_create_table_from_final_row():
"""Regression test for issue #8422: passing the last row of a table into
Table should return a new table containing that row."""
t1 = table.Table([(1, 2)], names=['col'])
row = t1[-1]
t2 = table.Table(row)['col']
assert t2[0] == 2
def test_key_values_in_as_array():
# Test for cheking column slicing using key_values in Table.as_array()
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
# Creating a table with three columns
t1 = table.Table(rows=data_rows, names=('a', 'b', 'c'),
meta={'name': 'first table'},
dtype=('i4', 'f8', 'S1'))
# Values of sliced column a,b is stored in a numpy array
a = np.array([(1, 2.), (4, 5.), (5, 8.2)],
dtype=[('a', '<i4'), ('b', '<f8')])
# Values fo sliced column c is stored in a numpy array
b = np.array([(b'x',), (b'y',), (b'z',)], dtype=[('c', 'S1')])
# Comparing initialised array with sliced array using Table.as_array()
assert np.array_equal(a, t1.as_array(names=['a', 'b']))
assert np.array_equal(b, t1.as_array(names=['c']))
def test_tolist():
t = table.Table([[1, 2, 3], [1.1, 2.2, 3.3], [b'foo', b'bar', b'hello']],
names=('a', 'b', 'c'))
assert t['a'].tolist() == [1, 2, 3]
assert_array_equal(t['b'].tolist(), [1.1, 2.2, 3.3])
assert t['c'].tolist() == ['foo', 'bar', 'hello']
assert isinstance(t['a'].tolist()[0], int)
assert isinstance(t['b'].tolist()[0], float)
assert isinstance(t['c'].tolist()[0], str)
t = table.Table([[[1, 2], [3, 4]],
[[b'foo', b'bar'], [b'hello', b'world']]],
names=('a', 'c'))
assert t['a'].tolist() == [[1, 2], [3, 4]]
assert t['c'].tolist() == [['foo', 'bar'], ['hello', 'world']]
assert isinstance(t['a'].tolist()[0][0], int)
assert isinstance(t['c'].tolist()[0][0], str)
def test_broadcasting_8933():
"""Explicitly check re-work of code related to broadcasting in #8933"""
t = table.Table([[1, 2]]) # Length=2 table
t['a'] = [[3, 4]] # Can broadcast if ndim > 1 and shape[0] == 1
t['b'] = 5
t['c'] = [1] # Treat as broadcastable scalar, not length=1 array (which would fail)
assert np.all(t['a'] == [[3, 4], [3, 4]])
assert np.all(t['b'] == [5, 5])
assert np.all(t['c'] == [1, 1])
# Test that broadcasted column is writeable
t['c'][1] = 10
assert np.all(t['c'] == [1, 10])
def test_custom_masked_column_in_nonmasked_table():
"""Test the refactor and change in column upgrades introduced
in 95902650f. This fixes a regression introduced by #8789
(Change behavior of Table regarding masked columns)."""
class MyMaskedColumn(table.MaskedColumn):
pass
class MySubMaskedColumn(MyMaskedColumn):
pass
class MyColumn(table.Column):
pass
class MySubColumn(MyColumn):
pass
class MyTable(table.Table):
Column = MyColumn
MaskedColumn = MyMaskedColumn
a = table.Column([1])
b = table.MaskedColumn([2], mask=[True])
c = MyMaskedColumn([3], mask=[True])
d = MySubColumn([4])
e = MySubMaskedColumn([5], mask=[True])
# Two different pathways for making table
t1 = MyTable([a, b, c, d, e], names=['a', 'b', 'c', 'd', 'e'])
t2 = MyTable()
t2['a'] = a
t2['b'] = b
t2['c'] = c
t2['d'] = d
t2['e'] = e
for t in (t1, t2):
assert type(t['a']) is MyColumn
assert type(t['b']) is MyMaskedColumn # upgrade
assert type(t['c']) is MyMaskedColumn
assert type(t['d']) is MySubColumn
assert type(t['e']) is MySubMaskedColumn # sub-class not downgraded
def test_data_to_col_convert_strategy():
"""Test the update to how data_to_col works (#8972), using the regression
example from #8971.
"""
t = table.Table([[0, 1]])
t['a'] = 1
t['b'] = np.int64(2) # Failed previously
assert np.all(t['a'] == [1, 1])
assert np.all(t['b'] == [2, 2])
|
the-stack_106_27461 | '''texplain
Create a clean output directory with only included files/citations.
Usage:
texplain [options] <input.tex> <output-directory>
Options:
--version Show version.
-h, --help Show help.
(c - MIT) T.W.J. de Geus | [email protected] | www.geus.me | github.com/tdegeus/texplain
'''
__version__ = '0.3.4'
import os
import re
import sys
import docopt
import click
from copy import deepcopy
from shutil import copyfile
from shutil import rmtree
class TeX:
def __init__(self, filename):
if not os.path.isfile(filename):
raise IOError('"{0:s}" does not exist'.format(filename))
self.tex = open(filename, 'r').read()
self.dirname = os.path.dirname(filename)
self.filename = os.path.split(filename)[1]
if len(self.dirname) == 0:
self.dirname = '.'
has_input = re.search(r'(.*)(\\input\{)(.*)(\})', self.tex, re.MULTILINE)
has_include = re.search(r'(.*)(\\include\{)(.*)(\})', self.tex, re.MULTILINE)
if has_input or has_include:
raise IOError(r'TeX-files with \input{...} or \include{...} not yet supported')
def read_float(self, cmd=r'\includegraphics'):
r'''
Extract the keys of 'float' commands (e.g. "\includegraphics{...}", "\bibliography{...}") and
reconstruct their file-names.
:options:
**cmd** ([``r'\includegraphics'``] | ``<str>``)
The command to look for.
:returns:
A list ``[('key', 'filename'), (...), ...]`` in order of appearance.
'''
import numpy as np
# mimic the LaTeX behaviour where an extension is automatically added to a
# file-name without any extension
def filename(dirname, name):
if os.path.isfile(os.path.join(dirname, name)):
return os.path.relpath(os.path.join(dirname, name), dirname)
if os.path.isfile(os.path.join(dirname, name) + '.pdf'):
return os.path.relpath(os.path.join(dirname, name) + '.pdf', dirname)
if os.path.isfile(os.path.join(dirname, name) + '.eps'):
return os.path.relpath(os.path.join(dirname, name) + '.eps', dirname)
if os.path.isfile(os.path.join(dirname, name) + '.png'):
return os.path.relpath(os.path.join(dirname, name) + '.png', dirname)
if os.path.isfile(os.path.join(dirname, name) + '.jpg'):
return os.path.relpath(os.path.join(dirname, name) + '.jpg', dirname)
if os.path.isfile(os.path.join(dirname, name) + '.tex'):
return os.path.relpath(os.path.join(dirname, name) + '.tex', dirname)
if os.path.isfile(os.path.join(dirname, name) + '.bib'):
return os.path.relpath(os.path.join(dirname, name) + '.bib', dirname)
raise IOError('Cannot find {0:s}'.format(name))
# read the contents of the command
# - "\includegraphics" accepts "\includegraphics[...]{...}"
# - "\bibliography" rejects "\bibliographystyle{...}"
include = []
for i in self.tex.split(cmd)[1:]:
if i[0] in ['[', '{']:
include += [i.split('{')[1].split('}')[0]]
# extract the filename
out = [(i, filename(self.dirname, i)) for i in include]
# check for duplicates
filenames = [i[1] for i in out]
assert(np.unique(np.array(filenames)).size == len(filenames))
return out
def rename_float(self, old, new, cmd=r'\includegraphics'):
r'''
Rename a key of a 'float' command (e.g. "\includegraphics{...}", "\bibliography{...}").
:arguments:
**old, new** (``<str>``)
The old and the new key.
:options:
**cmd** ([``r'\includegraphics'``] | ``<str>``)
The command to look for.
'''
text = self.tex.split(cmd)
for i in range(1, len(text)):
pre, key = text[i].split('{', 1)
key, post = key.split('}', 1)
if key != old:
continue
if text[i][0] not in ['[', '{']:
continue
text[i] = pre + '{' + new + '}' + post
self.tex = cmd.join(text)
def read_citation_keys(self):
r'''
Read the citation keys in the TeX file (those keys in "\cite{...}", "\citet{...}", ...).
Note that the output is unique, in the order or appearance.
'''
# extract keys from "cite"
def extract(string):
try:
return list(re.split(
r'([pt])?(\[.*\]\[.*\])?(\{[a-zA-Z0-9\.\,\-\ \_]*\})',
string)[3][1: -1].split(','))
except:
if len(string) >= 100:
string = string[:100]
raise IOError('Error in interpreting\n {0:s} ...'.format(string))
# read all keys in "cite", "citet", "citep" commands
cite = [extract(i) for i in self.tex.split(r'\cite')[1:]]
cite = list(set([item for sublist in cite for item in sublist]))
cite = [i.replace(' ', '') for i in cite]
return cite
def find_by_extension(self, ext):
r'''
Find all files with a certain extensions in the directory of the TeX-file.
'''
filenames = os.listdir(self.dirname)
return [i for i in filenames if os.path.splitext(i)[1] == ext]
def read_config(self):
r'''
Read configuration files in the directory of the TeX-file. A possible extension would be to look
if the files are actually used or not.
'''
ext = ['.sty', '.cls', '.bst']
out = []
for e in ext:
out += self.find_by_extension(e)
return out
def bib_select(text, keys):
r'''
Limit a BibTeX file to a list of keys.
:arguments:
**test** (``<str>``)
The BibTeX file, opened and read.
**keys** (``<list<str>>``)
The list of keys to select.
:returns:
The (reduced) BibTeX file, as string.
'''
text = '\n' + text
bib = list(filter(None, text.split('@')))[1:]
out = []
for i in bib:
if re.match(r'(string\{)(.*)', i):
continue
if re.match(r'(Comment\ )(.*)', i, re.IGNORECASE):
continue
if re.match(r'(comment\{)(.*)', i, re.IGNORECASE):
continue
if re.split(r'(.*\{)(.*)(,\n.*)', i)[2] in keys:
out += [i]
out = '\n@' + '\n@'.join(out)
while '\n\n\n' in out:
out = out.replace('\n\n\n', '\n\n')
return out
def from_commandline():
r'''
Main function (see command-line help)
'''
args = docopt.docopt(__doc__, version=__version__)
newdir = args['<output-directory>']
if not os.path.isfile(args['<input.tex>']):
raise IOError('"{0:s}" does not exist'.format(args['<input.tex>']))
if os.path.isdir(newdir):
if os.listdir(newdir):
raise IOError('"{0:s}" is not empty, please provide a new or empty directory'.format(newdir))
else:
os.makedirs(newdir)
old = TeX(args['<input.tex>'])
new = deepcopy(old)
new.dirname = newdir
includegraphics = old.read_float(r'\includegraphics')
bibfiles = old.read_float(r'\bibliography')
bibkeys = old.read_citation_keys()
config_files = old.read_config()
# Copy configuration files
for ofile in config_files:
copyfile(
os.path.join(old.dirname, ofile),
os.path.join(new.dirname, ofile))
# Copy/rename figures
if len(includegraphics) > 0:
new_includegraphics = []
for i, (okey, ofile) in enumerate(includegraphics):
nkey = 'figure_{0:d}'.format(i + 1)
ext = os.path.splitext(ofile)[1]
nfile = ofile.replace(os.path.normpath(okey), nkey)
if len(os.path.splitext(nfile)[1]) == 0:
nfile += ext
new_includegraphics += [(nkey, nfile)]
for (okey, ofile), (nkey, nfile) in zip(includegraphics, new_includegraphics):
new.rename_float(
okey,
nkey,
r'\includegraphics')
copyfile(
os.path.join(old.dirname, ofile),
os.path.join(new.dirname, nfile))
# Copy/reduce BibTeX files
if len(bibfiles) > 0:
if len(bibfiles) > 1:
raise IOError('texplain is only implemented for one BibTeX file')
okey, ofile = bibfiles[0]
nkey = 'library'
nfile = ofile.replace(os.path.normpath(okey), nkey)
bib = bib_select(
open(os.path.join(old.dirname, ofile), 'r').read(),
bibkeys)
new.rename_float(
okey,
nkey,
r'\bibliography')
open(os.path.join(new.dirname, nfile), 'w').write(bib)
# Write modified TeX file
output = os.path.join(new.dirname, 'main.tex')
if os.path.isfile(output):
output = os.path.join(new.dirname, new.filename)
open(output, 'w').write(new.tex)
def main():
try:
from_commandline()
except Exception as e:
print(e)
return 1
|
the-stack_106_27467 | from __future__ import unicode_literals
from mongoengine import *
from flask_mongoengine.wtf import model_form
from core.entities import Entity
from core.database import TagListField, StringListField
class Actor(Entity):
aliases = ListField(StringField(), verbose_name="Aliases")
DISPLAY_FIELDS = Entity.DISPLAY_FIELDS + [("aliases", "Aliases")]
@classmethod
def get_form(klass):
form = Entity.get_form(override=klass)
form.aliases = StringListField("Aliases")
return form
def generate_tags(self):
return [self.name.lower()]
def info(self):
i = Entity.info(self)
i['aliases'] = self.aliases
i['type'] = "Actor"
return i
|
the-stack_106_27468 | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from logging.handlers import RotatingFileHandler
LOG_FILE = 'var/log/enodebd.log'
MAX_BYTES = 1024 * 1024 * 10 # 10MB
BACKUP_COUNT = 5 # 10MB, 5 files, 50MB total
class EnodebdLogger:
"""
EnodebdLogger backs up debug logs with a RotatingFileHandler.
Debug logs will be propagated to root level if the root logger is set to
debug level.
"""
_LOGGER = logging.getLogger(__name__) # type: logging.Logger
@staticmethod
def init() -> None:
if logging.root.level is not logging.DEBUG:
EnodebdLogger._LOGGER.propagate = False
handler = RotatingFileHandler(LOG_FILE,
maxBytes=MAX_BYTES,
backupCount=BACKUP_COUNT)
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
EnodebdLogger._LOGGER.addHandler(handler)
EnodebdLogger._LOGGER.setLevel(logging.DEBUG)
@staticmethod
def debug(msg, *args, **kwargs):
EnodebdLogger._LOGGER.debug(msg, *args, **kwargs)
@staticmethod
def info(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.info(msg, *args, **kwargs)
EnodebdLogger._LOGGER.info(msg, *args, **kwargs)
@staticmethod
def warning(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.warning(msg, *args, **kwargs)
EnodebdLogger._LOGGER.warning(msg, *args, **kwargs)
@staticmethod
def error(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.error(msg, *args, **kwargs)
EnodebdLogger._LOGGER.error(msg, *args, **kwargs)
@staticmethod
def exception(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.exception(msg, *args, **kwargs)
EnodebdLogger._LOGGER.exception(msg, *args, **kwargs)
@staticmethod
def critical(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.critical(msg, *args, **kwargs)
EnodebdLogger._LOGGER.critical(msg, *args, **kwargs)
|
the-stack_106_27469 | # pylint: disable = invalid-name
""" Methods to parse strings/datatypes to find currencies """
import numpy as np
from pandas.api.types import is_list_like
import money
from .dtypes import money_patterns
def to_money(values, default_money_code=None):
"""Convert values to MoneyArray
Parameters
----------
values : int, str, bytes, or sequence of those
Returns
-------
addresses : MoneyArray
Examples
--------
Parse strings
>>> to_money(['£128',
... '129 EUR'])
<MoneyArray(['128 GBP', '129 EUR'])>
Or integers
>>> to_money([128, 131], default_money_code='GBP')
<MoneyArray(['128 GBP', '131 GBP'])>
"""
from . import MoneyArray
if not is_list_like(values):
values = [values]
values, default_money_code = _to_money_array(
values, default_money_code=default_money_code)
return MoneyArray(
values,
default_money_code=default_money_code
)
def _to_money_array(values, default_money_code=None):
""" Method to convert a money object to a money array """
from .money_array import MoneyType, MoneyArray
if isinstance(values, MoneyArray):
if values.default_money_code:
default_money_code = default_money_code
return values.data, default_money_code
values = [_as_money_object(v, default_money_code) for v in values]
return np.atleast_1d(np.asarray(values, dtype=MoneyType._record_type)), default_money_code
def _as_money_object(val, default_money_code=None):
""" Method to return a tuple with the monetary value
and the currency. Attempt to parse 'val' as any Money object.
Uses regex (money_patterns) to get the amount & the currency.
'cu' represents currency, and 'va' represents value.
"""
cu, va = None, None
if isinstance(val, np.void):
cu = val['cu']
va = val['va']
elif val in (None, '', np.nan):
cu = ''
va = 0
elif isinstance(val, money.Money):
cu = val.currency
va = np.float64(val.amount)
elif isinstance(val, str):
for r, extract in money_patterns:
m = r.match(val)
if m:
# calls a lambda function that gets the value that matches the expressions
va, cu = extract(m)
elif is_list_like(val) and len(val) == 2:
try:
va = np.float64(val[0])
cu = str(val[1])
except TypeError:
pass
if cu is not None and va is not None:
return va, cu
try:
va = np.float64(val)
except TypeError:
pass
else:
if default_money_code:
cu = default_money_code
return va, cu
else:
raise ValueError(
"Currency code is unavailable - cannot convert {}. Set a default?".format(val))
raise ValueError("Could not parse {} as money".format(val))
|
the-stack_106_27470 | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import glob
import os
import ah_bootstrap # noqa
from setuptools import setup
import builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import register_commands, get_package_info
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
# Get some values from the setup.cfg
from configparser import ConfigParser
conf = ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('name', 'ginga')
# Get the long description from the package's docstring
__import__(PACKAGENAME)
package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = package.__doc__
# Store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = metadata.get('version', '0.0.dev')
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands()
# Freeze build information in version.py
generate_version_py()
# Treat everything in scripts except README.rst and fits2pdf.py
# as a script to be installed
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if (os.path.basename(fname) != 'README.rst' and
os.path.basename(fname) != 'fits2pdf.py')]
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('examples/*/*')
package_info['package_data'][PACKAGENAME].append('web/pgw/js/*.js')
package_info['package_data'][PACKAGENAME].append('web/pgw/js/*.css')
# Define entry points for command-line scripts
entry_points = {'console_scripts': []}
entry_point_list = conf.items('entry_points')
for entry_point in entry_point_list:
entry_points['console_scripts'].append('{0} = {1}'.format(entry_point[0],
entry_point[1]))
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
setup(version=VERSION,
scripts=scripts,
long_description=LONG_DESCRIPTION,
cmdclass=cmdclassd,
entry_points=entry_points,
**package_info)
|
the-stack_106_27471 | """Utilities for the chimera_app tools"""
import os
import re
import shutil
import subprocess
from datetime import datetime
import psutil
import chimera_app.context as context
import chimera_app.shortcuts as shortcuts
def ensure_directory(directory):
if not os.path.isdir(directory):
os.makedirs(directory, mode=0o755, exist_ok=True)
def ensure_directory_for_file(file):
d = os.path.dirname(file)
ensure_directory(d)
def yearsago(years):
from_date = datetime.now().date()
try:
return from_date.replace(year=from_date.year - years)
except ValueError:
return from_date.replace(month=2, day=28, year=from_date.year - years)
def replace_all(text, dic):
for i, j in dic.items():
text = text.replace(i, j)
return text
def sanitize(string):
if isinstance(string, str):
retval = string
for r in ['\n', '\r', '/', '\\', '\0']:
retval = retval.replace(r, '_')
retval.replace('"', '')
return retval
return string
def delete_file_link(base_dir, platform, name):
e = re.escape(name) + r"\.[^.]+$"
d = os.path.join(base_dir, platform)
links = []
if os.path.isdir(d):
links = [os.path.join(d, l) for l in os.listdir(d) if re.match(e, l)]
if len(links) < 1:
return
for link in links:
if os.path.islink(link) or os.path.exists(link):
os.remove(link)
def is_direct(platform, content_type):
return ((platform == "arcade" or platform == "neo-geo") and
content_type == "content")
def upsert_file(src_path, base_dir, platform, name, dst_name):
if not src_path:
return
content_type = os.path.basename(base_dir)
filename = sanitize(dst_name)
file_dir = f"{base_dir}/{platform}/.{name}"
rel_file_dir = f".{name}"
# mame ROM files have dependencies on each other,
# so store them all in a single directory
if is_direct(platform, content_type):
file_dir = f"{base_dir}/{platform}/.{platform}"
rel_file_dir = f".{platform}"
if not os.path.exists(file_dir):
os.makedirs(file_dir)
file_path = f"{file_dir}/{filename}"
rel_file_path = f"{rel_file_dir}/{filename}"
if os.path.exists(file_path):
os.remove(file_path)
shutil.move(src_path, file_path)
_, ext = os.path.splitext(filename)
dst = f"{base_dir}/{platform}/{name}{ext}"
delete_file_link(base_dir, platform, name)
os.symlink(rel_file_path, dst)
# mame requires ROM files to have a specific name,
# so launch original file directly
if is_direct(platform, content_type):
return file_path
return dst
def strip(string):
if string.startswith('"') and string.endswith('"'):
return string[1:-1]
return string
def delete_file(base_dir, platform, name):
if is_direct(platform, os.path.basename(base_dir)):
shortcuts_file = shortcuts.PlatformShortcutsFile(platform)
shortcut = shortcuts_file.get_shortcut_match(name, platform)
if 'dir' in shortcut and 'params' in shortcut:
file_path = os.path.join(strip(shortcut['dir']),
strip(shortcut['params']))
if os.path.exists(file_path):
os.remove(file_path)
else:
file_dir = f"{base_dir}/{platform}/.{name}"
if os.path.exists(file_dir):
shutil.rmtree(file_dir)
delete_file_link(base_dir, platform, name)
def client_running() -> False:
"""Check if the Steam client is running"""
pid_path = os.path.expanduser('~/.steam/steam.pid')
if not os.path.exists(pid_path):
return False
with open(pid_path) as pid_file:
pid = pid_file.read()
try:
maybe_steam = psutil.Process(pid)
except psutil.NoSuchProcess:
return False
return maybe_steam.name() == 'steam'
def install_by_id(steam_id: str) -> None:
if client_running():
subprocess.run(['steam', 'steam://install/' + steam_id],
check=True)
else:
raise Exception('Steam Client is not running')
|
the-stack_106_27473 | #!/usr/bin/env python3
import os
import io
import re
import csv
import json
import hashlib
import canonicaljson
from pathlib import Path
from digital_land.load import detect_encoding
from digital_land.plugins.wfs import strip_variable_content
resource_dir = "collection/resource"
resource_log = {}
def save(path, data):
with open(path, "wb") as f:
f.write(data)
for row in csv.DictReader(open("collection/log.csv")):
resource_log.setdefault(row["resource"], [])
resource_log[row["resource"]].append(
"collection/log/" + row["entry-date"][:10] + "/" + row["endpoint"] + ".json"
)
for old_resource in os.listdir(resource_dir):
old_path = Path(resource_dir) / old_resource
if os.path.isfile(old_path):
content = open(old_path, mode="rb").read()
encoding = detect_encoding(io.BytesIO(content))
if encoding:
content = strip_variable_content(content)
new_resource = hashlib.sha256(content).hexdigest()
new_path = Path(resource_dir) / new_resource
if str(new_path) != str(old_path):
print("removing", old_path)
os.remove(old_path)
print("saving", new_path)
save(new_path, content)
for log_path in resource_log[old_resource]:
log = json.load(open(log_path))
if log["resource"] != old_resource:
print("expected %s in %s", old_resource, log_path)
exit(2)
else:
log["resource"] = new_resource
print("fixing", log_path)
save(log_path, canonicaljson.encode_canonical_json(log))
|
the-stack_106_27476 | import importlib
import os
from django.db import connections, router
from dj_anonymizer.conf import settings
VENDOR_TO_TRUNCATE = {
'postgresql': 'TRUNCATE TABLE',
'mysql': 'TRUNCATE TABLE',
'sqlite': 'DELETE FROM',
'oracle': 'TRUNCATE TABLE',
}
def import_if_exist(filename):
"""
Check if file exist in appropriate path and import it
"""
filepath = os.path.join(settings.ANONYMIZER_MODEL_DEFINITION_DIR, filename)
full_filepath = os.path.abspath(filepath + '.py')
if os.path.isfile(full_filepath):
spec = importlib.util.spec_from_file_location(filename, full_filepath)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
def truncate_table(model):
"""
Generate and execute via Django ORM proper SQL to truncate table
"""
db = router.db_for_write(model)
connection = connections[db]
vendor = connection.vendor
try:
operation = VENDOR_TO_TRUNCATE[vendor]
except KeyError:
raise NotImplementedError(
"Database vendor %s is not supported" % vendor
)
dbtable = '"{}"'.format(model._meta.db_table)
sql = '{operation} {dbtable}'.format(
operation=operation,
dbtable=dbtable,
)
with connection.cursor() as c:
c.execute(sql)
|
the-stack_106_27479 | import unittest
from streamlink.plugins.ard_live import ard_live
class TestPluginard_live(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'https://daserste.de/live/index.html',
'https://www.daserste.de/live/index.html',
]
for url in should_match:
self.assertTrue(ard_live.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'http://mediathek.daserste.de/live',
]
for url in should_not_match:
self.assertFalse(ard_live.can_handle_url(url))
|
the-stack_106_27480 | """
@name: Modules/House/__init__.py
@author: D. Brian Kimmel
@contact: [email protected]
@copyright: (c) 2013-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Apr 10, 2013
@summary: Handle all of the information for a house.
"""
__updated__ = '2020-02-16'
__version_info__ = (20, 2, 3)
__version__ = '.'.join(map(str, __version_info__))
# Note that the following are in the order needed to sequence the startup
MODULES = [ # All modules for the House must be listed here. They will be loaded if configured.
'Lighting',
'Hvac',
'Security',
'Irrigation',
'Pool',
'Rules',
'Schedule',
'Sync',
'Entertainment',
'Family'
]
PARTS = [
'Location',
'Floors',
'Rooms'
]
CONFIG_NAME = 'house'
class HouseInformation:
"""
==> PyHouse_obj.House.xxx
"""
def __init__(self):
self.Name: Union[str, None] = None
self.Comment: str = ''
self._Apis = {}
class LocationInformation:
""" Location of the houses
Latitude, Longitude and Elevation allow the computation of local sunrise and sunset
"""
def __init__(self):
self.Street = None
self.City = None
self.State = None
self.ZipCode = None
self.Country = None
self.Phone = None
self.Latitude = None
self.Longitude = None
self.Elevation = None
self.TimeZone = None
self._RiseSet = None
class CoordinateInformation:
"""
If applied to components of a house (facing the 'Front' of a house:
X or the distance to the Right from the room's Left side.
Y or the distance back from the Front of the room.
Z or the Height above the floor.
Preferably the distance is kept in Meters but for you die hard Imperial measurement people in Decimal feet (no inches)!
In case you need some hints:
Light switches are about 1.0 meters above the floor.
Outlets are about 0.2 meters above the floor.
"""
def __init__(self):
self.X_Easting = 0.0
self.Y_Northing = 0.0
self.Z_Height = 0.0
# ## END DBK
|
the-stack_106_27483 | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for transformed features extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import unittest
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import tensorflow as tf
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis.api import model_eval_lib
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.extractors import features_extractor
from tensorflow_model_analysis.extractors import transformed_features_extractor
from tfx_bsl.tfxio import tensor_adapter
from tfx_bsl.tfxio import test_util
from google.protobuf import text_format
from tensorflow_metadata.proto.v0 import schema_pb2
_TF_MAJOR_VERSION = int(tf.version.VERSION.split('.')[0])
class TransformedFeaturesExtractorTest(testutil.TensorflowModelAnalysisTest,
parameterized.TestCase):
def createDenseInputsSchema(self):
return text_format.Parse(
"""
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "input_1"
value {
dense_tensor {
column_name: "input_1"
shape { dim { size: 1 } }
}
}
}
tensor_representation {
key: "input_2"
value {
dense_tensor {
column_name: "input_2"
shape { dim { size: 1 } }
}
}
}
}
}
feature {
name: "input_1"
type: FLOAT
}
feature {
name: "input_2"
type: FLOAT
}
feature {
name: "non_model_feature"
type: INT
}
""", schema_pb2.Schema())
def createModelWithMultipleDenseInputs(self, save_as_keras):
input1 = tf.keras.layers.Input(shape=(1,), name='input_1')
input2 = tf.keras.layers.Input(shape=(1,), name='input_2')
inputs = [input1, input2]
input_layer = tf.keras.layers.concatenate(inputs)
output_layer = tf.keras.layers.Dense(
1, activation=tf.nn.sigmoid, name='output')(
input_layer)
model = tf.keras.models.Model(inputs, output_layer)
# Add tft_layer to model to test callables stored as attributes
model.tft_layer = tf.keras.models.Model(inputs, {
'tft_feature': output_layer,
'tft_label': output_layer
})
@tf.function
def serving_default(serialized_tf_examples):
parsed_features = tf.io.parse_example(
serialized_tf_examples, {
'input_1': tf.io.FixedLenFeature([1], dtype=tf.float32),
'input_2': tf.io.FixedLenFeature([1], dtype=tf.float32)
})
return model(parsed_features)
@tf.function
def transformed_features(features):
return {
'transformed_feature': features['input_1'],
}
@tf.function
def transformed_labels(features):
return {'transformed_label': features['input_2']}
@tf.function
def custom_preprocessing(features):
return {
'custom_feature': features['input_1'],
'custom_label': features['input_2']
}
single_input_spec = tf.TensorSpec(
shape=(None,), dtype=tf.string, name='examples')
multi_input_spec = {
'input_1':
tf.TensorSpec(shape=(None, 1), dtype=tf.float32, name='input_1'),
'input_2':
tf.TensorSpec(shape=(None, 1), dtype=tf.float32, name='input_2')
}
signatures = {
'serving_default':
serving_default.get_concrete_function(single_input_spec),
'transformed_labels':
transformed_labels.get_concrete_function(multi_input_spec),
'transformed_features':
transformed_features.get_concrete_function(multi_input_spec),
'custom_preprocessing':
custom_preprocessing.get_concrete_function(multi_input_spec)
}
export_path = tempfile.mkdtemp()
if save_as_keras:
model.save(export_path, save_format='tf', signatures=signatures)
else:
tf.saved_model.save(model, export_path, signatures=signatures)
return export_path
@parameterized.named_parameters(
(
'keras_defaults',
True,
[],
{
'features': [
'input_1', # raw feature
'input_2', # raw feature
'non_model_feature', # from schema
],
'transformed_features': [
'tft_feature', # added by tft_layer
'tft_label', # added by tft_layer
'transformed_feature', # added by transformed_features
'transformed_label', # added by transformed_labels
]
}),
(
'tf_defaults',
False,
[],
{
'features': [
'input_1', # raw feature
'input_2', # raw feature
'non_model_feature', # from schema
],
'transformed_features': [
'tft_feature', # added by tft_layer
'tft_label', # added by tft_layer
'transformed_feature', # added by transformed_features
'transformed_label', # added by transformed_labels
]
}),
(
'keras_custom',
True,
['custom_preprocessing'],
{
'features': [
'input_1', # raw feature
'input_2', # raw feature
'non_model_feature', # from schema
],
'transformed_features': [
'custom_feature', # added by custom_preprocessing
'custom_label', # added by custom_preprocessing
]
}),
(
'tf_custom',
False,
['custom_preprocessing'],
{
'features': [
'input_1', # raw feature
'input_2', # raw feature
'non_model_feature', # from schema
],
'transformed_features': [
'custom_feature', # added by custom_preprocessing
'custom_label', # added by custom_preprocessing
]
}),
)
@unittest.skipIf(_TF_MAJOR_VERSION < 2,
'not all signatures supported for TF1')
def testPreprocessedFeaturesExtractor(self, save_as_keras,
preprocessing_function_names,
expected_extract_keys):
export_path = self.createModelWithMultipleDenseInputs(save_as_keras)
eval_config = config.EvalConfig(model_specs=[
config.ModelSpec(
preprocessing_function_names=preprocessing_function_names)
])
eval_shared_model = self.createTestEvalSharedModel(
eval_saved_model_path=export_path, tags=[tf.saved_model.SERVING])
schema = self.createDenseInputsSchema()
tfx_io = test_util.InMemoryTFExampleRecord(
schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
arrow_schema=tfx_io.ArrowSchema(),
tensor_representations=tfx_io.TensorRepresentations())
feature_extractor = features_extractor.FeaturesExtractor(eval_config)
transformation_extractor = (
transformed_features_extractor.TransformedFeaturesExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
tensor_adapter_config=tensor_adapter_config))
examples = [
self._makeExample(input_1=1.0, input_2=2.0),
self._makeExample(input_1=3.0, input_2=4.0),
self._makeExample(input_1=5.0, input_2=6.0),
]
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([e.SerializeToString() for e in examples],
reshuffle=False)
| 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
| 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
| feature_extractor.stage_name >> feature_extractor.ptransform
| transformation_extractor.stage_name >>
transformation_extractor.ptransform)
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 2)
for item in got:
for extracts_key, feature_keys in expected_extract_keys.items():
self.assertIn(extracts_key, item)
for value in item[extracts_key]:
self.assertEqual(
set(feature_keys),
set(value.keys()),
msg='got={}'.format(item))
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
|
the-stack_106_27484 | # File: V (Python 2.4)
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
import random
from PooledEffect import PooledEffect
from EffectController import EffectController
class VoodooSmoke(PooledEffect, EffectController):
cardScale = 64.0
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('models/effects/candleHalo')
self.card = model.find('**/effectCandleHalo')
if not VoodooSmoke.particleDummy:
VoodooSmoke.particleDummy = render.attachNewNode(ModelNode('VoodooSmokeParticleDummy'))
VoodooSmoke.particleDummy.setDepthWrite(0)
VoodooSmoke.particleDummy.setLightOff()
VoodooSmoke.particleDummy.setColorScaleOff()
VoodooSmoke.particleDummy.setFogOff()
self.f = ParticleEffect.ParticleEffect('VoodooSmoke')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('PointParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('DiscEmitter')
self.f.addParticles(self.p0)
f0 = ForceGroup.ForceGroup('Vortex')
self.f.addForceGroup(f0)
def createTrack(self):
self.p0.setPoolSize(128)
self.p0.setBirthRate(0.02)
self.p0.setLitterSize(1)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(2.0)
self.p0.factory.setLifespanSpread(0.0)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAINOUT)
self.p0.renderer.setUserAlpha(1.0)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(0)
self.p0.renderer.setInitialXScale(0.01 * self.cardScale)
self.p0.renderer.setFinalXScale(0.02 * self.cardScale)
self.p0.renderer.setInitialYScale(0.040000000000000001 * self.cardScale)
self.p0.renderer.setFinalYScale(0.040000000000000001 * self.cardScale)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingColor, ColorBlendAttrib.OOneMinusIncomingAlpha)
self.p0.renderer.getColorInterpolationManager().addLinear(0.0, 1.0, Vec4(1.0, 0.58823531866073608, 1.0, 1.0), Vec4(0.0, 1.0, 1.0, 0.19607843458652496), 1)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitude(1.0)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setOffsetForce(Vec3(0.0, 0.0, 6.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.p0.emitter.setRadius(0.59999999999999998)
self.startEffect = Sequence(Func(self.p0.setBirthRate, 0.02), Func(self.p0.clearToInitial), Func(self.f.start, self, self.particleDummy))
self.endEffect = Sequence(Func(self.p0.setBirthRate, 100), Wait(7.0), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(1.0), self.endEffect)
def cleanUpEffect(self):
self.f.disable()
self.detachNode()
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
|
the-stack_106_27485 | from django.urls import include, path
from .views import (annotation, annotation_relations, auto_labeling, comment,
example, example_state, export_dataset, health,
import_dataset, import_export, label, project,
relation_types, role, statistics, tag, task, user)
from .views.tasks import category, span, text
urlpatterns_project = [
path(
route='upload',
view=import_dataset.UploadAPI.as_view(),
name='upload'
),
path(
route='catalog',
view=import_dataset.DatasetCatalog.as_view(),
name='catalog'
),
path(
route='download-format',
view=export_dataset.DownloadDatasetCatalog.as_view(),
name='download-format'
),
path(
route='download',
view=export_dataset.DownloadAPI.as_view(),
name='download-dataset'
),
path(
route='statistics',
view=statistics.StatisticsAPI.as_view(),
name='statistics'),
path(
route='labels',
view=label.LabelList.as_view(),
name='label_list'
),
path(
route='label-upload',
view=label.LabelUploadAPI.as_view(),
name='label_upload'
),
path(
route='labels/<int:label_id>',
view=label.LabelDetail.as_view(),
name='label_detail'
),
path(
route='examples',
view=example.ExampleList.as_view(),
name='example_list'
),
path(
route='examples/<int:example_id>',
view=example.ExampleDetail.as_view(),
name='example_detail'
),
path(
route='relation_types',
view=relation_types.RelationTypesList.as_view(),
name='relation_types_list'
),
path(
route='relation_type-upload',
view=relation_types.RelationTypesUploadAPI.as_view(),
name='relation_type-upload'
),
path(
route='relation_types/<int:relation_type_id>',
view=relation_types.RelationTypesDetail.as_view(),
name='relation_type_detail'
),
path(
route='annotation_relations',
view=annotation_relations.AnnotationRelationsList.as_view(),
name='relation_types_list'
),
path(
route='annotation_relation-upload',
view=annotation_relations.AnnotationRelationsUploadAPI.as_view(),
name='annotation_relation-upload'
),
path(
route='annotation_relations/<int:annotation_relation_id>',
view=annotation_relations.AnnotationRelationsDetail.as_view(),
name='annotation_relation_detail'
),
# Todo: remove.
path(
route='docs',
view=example.DocumentList.as_view(),
name='doc_list'
),
path(
route='docs/<int:doc_id>',
view=example.DocumentDetail.as_view(),
name='doc_detail'
),
path(
route='approval/<int:example_id>',
view=annotation.ApprovalAPI.as_view(),
name='approve_labels'
),
# Todo: change.
path(
route='docs/<int:doc_id>/annotations',
view=annotation.AnnotationList.as_view(),
name='annotation_list'
),
path(
route='docs/<int:doc_id>/annotations/<int:annotation_id>',
view=annotation.AnnotationDetail.as_view(),
name='annotation_detail'
),
path(
route='examples/<int:example_id>/categories',
view=category.CategoryListAPI.as_view(),
name='category_list'
),
path(
route='examples/<int:example_id>/categories/<int:annotation_id>',
view=category.CategoryDetailAPI.as_view(),
name='category_detail'
),
path(
route='examples/<int:example_id>/spans',
view=span.SpanListAPI.as_view(),
name='span_list'
),
path(
route='examples/<int:example_id>/spans/<int:annotation_id>',
view=span.SpanDetailAPI.as_view(),
name='span_detail'
),
path(
route='examples/<int:example_id>/texts',
view=text.TextLabelListAPI.as_view(),
name='text_list'
),
path(
route='examples/<int:example_id>/texts/<int:annotation_id>',
view=text.TextLabelDetailAPI.as_view(),
name='text_detail'
),
path(
route='tags',
view=tag.TagList.as_view(),
name='tag_list'
),
path(
route='tags/<int:tag_id>',
view=tag.TagDetail.as_view(),
name='tag_detail'
),
path(
route='examples/<int:example_id>/comments',
view=comment.CommentListDoc.as_view(),
name='comment_list_doc'
),
path(
route='comments',
view=comment.CommentListProject.as_view(),
name='comment_list_project'
),
path(
route='examples/<int:example_id>/comments/<int:comment_id>',
view=comment.CommentDetail.as_view(),
name='comment_detail'
),
path(
route='examples/<int:example_id>/states',
view=example_state.ExampleStateList.as_view(),
name='example_state_list'
),
path(
route='roles',
view=role.RoleMappingList.as_view(),
name='rolemapping_list'
),
path(
route='roles/<int:rolemapping_id>',
view=role.RoleMappingDetail.as_view(),
name='rolemapping_detail'
),
path(
route='auto-labeling-templates',
view=auto_labeling.AutoLabelingTemplateListAPI.as_view(),
name='auto_labeling_templates'
),
path(
route='auto-labeling-templates/<str:option_name>',
view=auto_labeling.AutoLabelingTemplateDetailAPI.as_view(),
name='auto_labeling_template'
),
path(
route='auto-labeling-configs',
view=auto_labeling.AutoLabelingConfigList.as_view(),
name='auto_labeling_configs'
),
path(
route='auto-labeling-configs/<int:config_id>',
view=auto_labeling.AutoLabelingConfigDetail.as_view(),
name='auto_labeling_config'
),
path(
route='auto-labeling-config-testing',
view=auto_labeling.AutoLabelingConfigTest.as_view(),
name='auto_labeling_config_test'
),
path(
route='examples/<int:example_id>/auto-labeling',
view=auto_labeling.AutoLabelingAnnotation.as_view(),
name='auto_labeling_annotation'
),
path(
route='auto-labeling-parameter-testing',
view=auto_labeling.AutoLabelingConfigParameterTest.as_view(),
name='auto_labeling_parameter_testing'
),
path(
route='auto-labeling-template-testing',
view=auto_labeling.AutoLabelingTemplateTest.as_view(),
name='auto_labeling_template_test'
),
path(
route='auto-labeling-mapping-testing',
view=auto_labeling.AutoLabelingMappingTest.as_view(),
name='auto_labeling_mapping_test'
)
]
urlpatterns = [
path(
route='health',
view=health.Health.as_view(),
name='health'
),
path('auth/', include('dj_rest_auth.urls')),
path('fp/', include('django_drf_filepond.urls')),
path(
route='me',
view=user.Me.as_view(),
name='me'
),
path(
route='features',
view=import_export.Features.as_view(),
name='features'
),
path(
route='projects',
view=project.ProjectList.as_view(),
name='project_list'
),
path(
route='users',
view=user.Users.as_view(),
name='user_list'
),
path(
route='roles',
view=role.Roles.as_view(),
name='roles'
),
path(
route='tasks/status/<task_id>',
view=task.TaskStatus.as_view(),
name='task_status'
),
path(
route='projects/<int:project_id>',
view=project.ProjectDetail.as_view(),
name='project_detail'
),
path('projects/<int:project_id>/', include(urlpatterns_project))
]
|
the-stack_106_27487 | # -*- coding: utf-8 -*-
import os
import pytest
from girder.models.setting import Setting
from girder.models.user import User
from girder.utility import mail_utils
from girder.plugin import GirderPlugin
from girder.settings import SettingKey
class MailPlugin(GirderPlugin):
def load(self, info):
mail_utils.addTemplateDirectory(
os.path.join(os.path.dirname(__file__), 'data', 'mail_templates'),
prepend=True
)
def testEmailAdmins(smtp):
assert smtp.isMailQueueEmpty()
for i in range(2):
# Create 2 admin users to test sending mail to admins
User().createUser(
firstName='Admin%d' % i, lastName='Admin', login='admin%d' % i,
password='password', admin=True, email='admin%[email protected]' % i)
# Set the email from address
Setting().set(SettingKey.EMAIL_FROM_ADDRESS, '[email protected]')
# Test sending email to admin users
mail_utils.sendMailToAdmins('Notification', 'hello')
assert smtp.waitForMail()
message = smtp.getMail(parse=True)
assert message['subject'] == 'Notification'
assert message['content-type'] == 'text/html; charset="utf-8"'
assert message['to'] == '[email protected], [email protected]'
assert message['from'] == '[email protected]'
assert message.get_payload(decode=True) == b'hello'
# Test sending email to multiple recipients
assert smtp.isMailQueueEmpty()
mail_utils.sendMail('Email alert', 'world', to=['[email protected]', '[email protected]'])
assert smtp.waitForMail()
message = smtp.getMail(parse=True)
assert message['subject'] == 'Email alert'
assert message['to'] == '[email protected], [email protected]'
assert message['from'] == '[email protected]'
assert message.get_payload(decode=True) == b'world'
# Pass empty list in the "to" field, check exception
msg = 'You must specify email recipients via "to" or "bcc".$'
with pytest.raises(Exception, match=msg):
mail_utils.sendMail('alert', 'hello', to=[])
@pytest.mark.plugin('mail_test', MailPlugin)
def testPluginTemplates(server):
val = 'OVERRIDE CORE FOOTER'
assert mail_utils.renderTemplate('_footer.mako').strip() == val
# Make sure it also works from in-mako import statements
content = mail_utils.renderTemplate('temporaryAccess.mako', {
'url': 'x'
})
assert val in content
def testUnicodeEmail(smtp):
text = u'Contains unic\xf8de \u0420\u043e\u0441\u0441\u0438\u044f'
mail_utils.sendMail(text, text, ['[email protected]'])
assert smtp.waitForMail()
message = smtp.getMail(parse=True)
assert message.get_payload(decode=True) == text.encode('utf8')
def testBcc(smtp):
bcc = ['[email protected]', '[email protected]']
mail_utils.sendMail('hi', 'hi', ['[email protected]'], bcc=bcc)
assert smtp.waitForMail()
message = smtp.getMail(parse=True)
assert message['To'] == '[email protected]'
assert message['Bcc'] == ', '.join(bcc)
|
the-stack_106_27489 | import cv2
import rospy
import sensor_msgs.point_cloud2 as pcl2
import time
from numba import jit
import numpy as np
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
from sensor_msgs.msg import PointCloud2
import message_filters
import torch
from loss import CustomLoss
from utils import get_model_name, load_config, get_logger, plot_bev, plot_label_map, plot_pr_curve, get_bev
from model import PIXOR
from datagen import KITTI
from postprocess import filter_pred, compute_matches, compute_ap, non_max_suppression
def build_model(config, device, train=True):
net = PIXOR(config['geometry'], config['use_bn'])
loss_fn = CustomLoss(device, config, num_classes=1)
if torch.cuda.device_count() <= 1:
config['mGPUs'] = False
if config['mGPUs']:
print("using multi gpu")
net = nn.DataParallel(net)
net = net.to(device)
loss_fn = loss_fn.to(device)
if not train:
return net, loss_fn
optimizer = torch.optim.SGD(net.parameters(), lr=config['learning_rate'], momentum=config['momentum'], weight_decay=config['weight_decay'])
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=config['lr_decay_at'], gamma=0.1)
return net, loss_fn, optimizer, scheduler
class synchronizer:
def __init__(self):
# self.pub_Image = rospy.Publisher('image_raw_sync', SesnorImage, queue_size=1)
# self.pub_Cam_Info = rospy.Publisher('camera_info_sync', CameraInfo, queue_size=1)
# self.pub_Lidar = rospy.Publisher('rslidar_points_sync', PointCloud2, queue_size=1)
self.config, _, _, _ = load_config("default")
self.net, _ = build_model(self.config, "cuda", train=False)
self.net.load_state_dict(torch.load(get_model_name(self.config), map_location="cuda"))
self.net.set_decode(True)
self.net.eval()
self.dataset = KITTI(1000)
self.dataset.load_velo()
self.image_pub = rospy.Publisher("/test/bev_prev", Image, queue_size=2)
self.imageInput = message_filters.Subscriber("/pointgrey/image_raw", Image)
self.lidar = message_filters.Subscriber('/velo/pointcloud', PointCloud2)
self.ts = message_filters.TimeSynchronizer([self.imageInput
#,self.cameraInfo
, self.lidar
], 10)
self.ts.registerCallback(self.general_callback)
self._image_raw = Image()
self._camera_info = CameraInfo()
self._lidar_points = PointCloud2()
def general_callback(self, image_raw, lidar_points):
self._image_raw = image_raw
self._lidar_points = lidar_points
# print (msg)
date = time.time()
points = pcl2.read_points_list(self._lidar_points)
print ('1: ', time.time()-date)
points = [[point.x, point.y, point.z, 1] for point in points]
points = np.array(points)
print ('2: ', time.time()-date)
cv_image = CvBridge().imgmsg_to_cv2(self._image_raw, "bgr8")
print ('3: ', time.time()-date)
print ("points: ", type(points))
print ("cv_image: ", type(cv_image))
# TODO
cv_image = np.resize(cv_image,(370,1240,3))
pred, bev = self.one_test(cv_image, points)
print ('4: ', time.time()-date)
corners, scores = filter_pred(self.config, pred)
input_np = bev.permute(1, 2, 0).cpu().numpy()
pred_bev = get_bev(input_np, corners)
pub_image = CvBridge().cv2_to_imgmsg(pred_bev, "bgr8")
pub_image.header.frame_id = image_raw.header.frame_id
pub_image.header.stamp = image_raw.header.stamp
print ('5: ', time.time()-date)
self.image_pub.publish(pub_image)
def one_test(self, image_raw, lidar_points):
device = "cuda"
bev, image, bev2image, pc_diff = self.dataset.raw_to_tensor(lidar_points, image_raw)
bev = bev.to(device)
image = image.to(device)
bev2image = bev2image.to(device)
pc_diff = pc_diff.to(device)
print ("bev: ",bev.shape)
print ("image: ",image.shape)
print ("bev2image: ",bev2image.shape)
print ("pc_diff: ",pc_diff.shape)
# bev = torch.ones(33,512,448).cuda()
# image = torch.ones(3,370,1240).cuda()
# bev2image = torch.ones(256, 224, 16, 1, 2).cuda()
# pc_diff = torch.ones(256, 224, 16, 1, 3).cuda()
pred = self.net(bev.unsqueeze(0), image.unsqueeze(0), bev2image.unsqueeze(0), pc_diff.unsqueeze(0))
# print (pred)
return pred, bev
# def publisher(self):
# while True:
# self.pub_Image.publish(self._image_raw)
# self.pub_Cam_Info.publish(self._camera_info)
# # self.pub_Lidar.publish(self._rslidar_points)
if __name__ == '__main__':
rospy.init_node('synchronizer')
synchronizer = synchronizer()
# synchronizer.publisher()
rospy.spin() |
the-stack_106_27492 | """
Auth module that contains all code needed for authentication/authorization
policies setup.
In particular:
:includeme: Function that actually creates routes listed above and
connects view to them
:create_system_user: Function that creates system/admin user
:_setup_ticket_policy: Setup Pyramid AuthTktAuthenticationPolicy
:_setup_apikey_policy: Setup nefertari.ApiKeyAuthenticationPolicy
:setup_auth_policies: Runs generation of particular auth policy
"""
import logging
import transaction
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.security import Allow, ALL_PERMISSIONS
import cryptacular.bcrypt
from nefertari.utils import dictset
from nefertari.json_httpexceptions import *
from nefertari.authentication.policies import ApiKeyAuthenticationPolicy
log = logging.getLogger(__name__)
class ACLAssignRegisterMixin(object):
""" Mixin that sets ``User._acl`` field after user is registered. """
def register(self, *args, **kwargs):
response = super(ACLAssignRegisterMixin, self).register(
*args, **kwargs)
user = self.request._user
mapping = self.request.registry._model_collections
if not user._acl and self.Model.__name__ in mapping:
from nefertari_guards import engine as guards_engine
factory = mapping[self.Model.__name__].view._factory
acl = factory(self.request).generate_item_acl(user)
acl = guards_engine.ACLField.stringify_acl(acl)
user.update({'_acl': acl})
return response
def _setup_ticket_policy(config, params):
""" Setup Pyramid AuthTktAuthenticationPolicy.
Notes:
* Initial `secret` params value is considered to be a name of config
param that represents a cookie name.
* `auth_model.get_groups_by_userid` is used as a `callback`.
* Also connects basic routes to perform authentication actions.
:param config: Pyramid Configurator instance.
:param params: Nefertari dictset which contains security scheme
`settings`.
"""
from nefertari.authentication.views import (
TicketAuthRegisterView, TicketAuthLoginView,
TicketAuthLogoutView)
log.info('Configuring Pyramid Ticket Authn policy')
if 'secret' not in params:
raise ValueError(
'Missing required security scheme settings: secret')
params['secret'] = config.registry.settings[params['secret']]
auth_model = config.registry.auth_model
params['callback'] = auth_model.get_groups_by_userid
config.add_request_method(
auth_model.get_authuser_by_userid, 'user', reify=True)
policy = AuthTktAuthenticationPolicy(**params)
RegisterViewBase = TicketAuthRegisterView
if config.registry.database_acls:
class RegisterViewBase(ACLAssignRegisterMixin,
TicketAuthRegisterView):
pass
class RamsesTicketAuthRegisterView(RegisterViewBase):
Model = config.registry.auth_model
class RamsesTicketAuthLoginView(TicketAuthLoginView):
Model = config.registry.auth_model
class RamsesTicketAuthLogoutView(TicketAuthLogoutView):
Model = config.registry.auth_model
common_kw = {
'prefix': 'auth',
'factory': 'nefertari.acl.AuthenticationACL',
}
root = config.get_root_resource()
root.add('register', view=RamsesTicketAuthRegisterView, **common_kw)
root.add('login', view=RamsesTicketAuthLoginView, **common_kw)
root.add('logout', view=RamsesTicketAuthLogoutView, **common_kw)
return policy
def _setup_apikey_policy(config, params):
""" Setup `nefertari.ApiKeyAuthenticationPolicy`.
Notes:
* User may provide model name in :params['user_model']: do define
the name of the user model.
* `auth_model.get_groups_by_token` is used to perform username and
token check
* `auth_model.get_token_credentials` is used to get username and
token from userid
* Also connects basic routes to perform authentication actions.
Arguments:
:config: Pyramid Configurator instance.
:params: Nefertari dictset which contains security scheme `settings`.
"""
from nefertari.authentication.views import (
TokenAuthRegisterView, TokenAuthClaimView,
TokenAuthResetView)
log.info('Configuring ApiKey Authn policy')
auth_model = config.registry.auth_model
params['check'] = auth_model.get_groups_by_token
params['credentials_callback'] = auth_model.get_token_credentials
params['user_model'] = auth_model
config.add_request_method(
auth_model.get_authuser_by_name, 'user', reify=True)
policy = ApiKeyAuthenticationPolicy(**params)
RegisterViewBase = TokenAuthRegisterView
if config.registry.database_acls:
class RegisterViewBase(ACLAssignRegisterMixin,
TokenAuthRegisterView):
pass
class RamsesTokenAuthRegisterView(RegisterViewBase):
Model = auth_model
class RamsesTokenAuthClaimView(TokenAuthClaimView):
Model = auth_model
class RamsesTokenAuthResetView(TokenAuthResetView):
Model = auth_model
common_kw = {
'prefix': 'auth',
'factory': 'nefertari.acl.AuthenticationACL',
}
root = config.get_root_resource()
root.add('register', view=RamsesTokenAuthRegisterView, **common_kw)
root.add('token', view=RamsesTokenAuthClaimView, **common_kw)
root.add('reset_token', view=RamsesTokenAuthResetView, **common_kw)
return policy
""" Map of `security_scheme_type`: `generator_function`, where:
* `security_scheme_type`: String that represents RAML security scheme type
name that should be used to apply a particular authentication system.
* `generator_function`: Function that receives instance of Pyramid
Configurator instance and dictset of security scheme settings and returns
generated Pyramid authentication policy instance.
"""
AUTHENTICATION_POLICIES = {
'x-ApiKey': _setup_apikey_policy,
'x-Ticket': _setup_ticket_policy,
}
def setup_auth_policies(config, raml_root):
""" Setup authentication, authorization policies.
Performs basic validation to check all the required values are present
and performs authentication, authorization policies generation using
generator functions from `AUTHENTICATION_POLICIES`.
:param config: Pyramid Configurator instance.
:param raml_root: Instance of ramlfications.raml.RootNode.
"""
log.info('Configuring auth policies')
secured_by_all = raml_root.secured_by or []
secured_by = [item for item in secured_by_all if item]
if not secured_by:
log.info('API is not secured. `secured_by` attribute '
'value missing.')
return
secured_by = secured_by[0]
schemes = {scheme.name: scheme
for scheme in raml_root.security_schemes}
if secured_by not in schemes:
raise ValueError(
'Undefined security scheme used in `secured_by`: {}'.format(
secured_by))
scheme = schemes[secured_by]
if scheme.type not in AUTHENTICATION_POLICIES:
raise ValueError('Unsupported security scheme type: {}'.format(
scheme.type))
# Setup Authentication policy
policy_generator = AUTHENTICATION_POLICIES[scheme.type]
params = dictset(scheme.settings or {})
authn_policy = policy_generator(config, params)
config.set_authentication_policy(authn_policy)
# Setup Authorization policy
authz_policy = ACLAuthorizationPolicy()
config.set_authorization_policy(authz_policy)
def create_system_user(config):
log.info('Creating system user')
crypt = cryptacular.bcrypt.BCRYPTPasswordManager()
settings = config.registry.settings
try:
auth_model = config.registry.auth_model
s_user = settings['system.user']
s_pass = str(crypt.encode(settings['system.password']))
s_email = settings['system.email']
defaults = dict(
password=s_pass,
email=s_email,
groups=['admin'],
)
if config.registry.database_acls:
defaults['_acl'] = [(Allow, 'g:admin', ALL_PERMISSIONS)]
user, created = auth_model.get_or_create(
username=s_user, defaults=defaults)
if created:
transaction.commit()
except KeyError as e:
log.error('Failed to create system user. Missing config: %s' % e)
def get_authuser_model():
""" Define and return AuthUser model using nefertari base classes """
from nefertari.authentication.models import AuthUserMixin
from nefertari import engine
class AuthUser(AuthUserMixin, engine.BaseDocument):
__tablename__ = 'ramses_authuser'
return AuthUser
def includeme(config):
create_system_user(config)
|
the-stack_106_27493 | import datetime
import re
from typing import Any, Dict
import kubernetes
from dateutil.parser import parse
from kubernetes.client import ApiClient
from dagster import check
from dagster.utils import frozendict
def _k8s_value(data, classname, attr_name):
if classname.startswith("list["):
sub_kls = re.match(r"list\[(.*)\]", classname).group(1)
return [
_k8s_value(data[index], sub_kls, f"{attr_name}[{index}]") for index in range(len(data))
]
if classname.startswith("dict("):
sub_kls = re.match(r"dict\(([^,]*), (.*)\)", classname).group(2)
return {k: _k8s_value(v, sub_kls, f"{attr_name}[{k}]") for k, v in data.items()}
if classname in ApiClient.NATIVE_TYPES_MAPPING:
klass = ApiClient.NATIVE_TYPES_MAPPING[classname]
else:
klass = getattr(kubernetes.client.models, classname)
if klass in ApiClient.PRIMITIVE_TYPES:
return klass(data)
elif klass == object:
return data
elif klass == datetime.date:
return parse(data).date()
elif klass == datetime.datetime:
return parse(data)
else:
if not isinstance(data, (frozendict, dict)):
raise Exception(
f"Attribute {attr_name} of type {klass.__name__} must be a dict, received {data} instead"
)
return k8s_model_from_dict(klass, data)
# Heavily inspired by kubernetes.client.ApiClient.__deserialize_model, with more validation
# that the keys and values match the expected format. Expects atribute names to be in camelCase.
def k8s_model_from_dict(model_class, model_dict: Dict[str, Any]):
check.dict_param(model_dict, "model_dict")
kwargs = {}
expected_keys = set(model_class.attribute_map.values())
invalid_keys = set(model_dict).difference(expected_keys)
if len(invalid_keys):
raise Exception(f"Unexpected keys in model class {model_class.__name__}: {invalid_keys}")
for attr, attr_type in model_class.openapi_types.items():
# e.g. config_map => configMap
mapped_attr = model_class.attribute_map[attr]
if mapped_attr in model_dict:
value = model_dict[mapped_attr]
kwargs[attr] = _k8s_value(value, attr_type, mapped_attr)
return model_class(**kwargs)
|
the-stack_106_27494 | import hashlib
import logging
import textwrap
from urllib.parse import unquote
from django.contrib import messages
from django.http import (
Http404,
HttpResponse,
HttpResponseNotModified,
HttpResponsePermanentRedirect,
HttpResponseRedirect,
)
from django.urls import resolve, reverse
from django.utils.functional import cached_property
from django.utils.translation import activate
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
from django_context_decorator import context
from pretalx.common.mixins.views import EventPermissionRequired
from pretalx.common.signals import register_data_exporters
from pretalx.common.utils import safe_filename
from pretalx.schedule.ascii import draw_ascii_schedule
from pretalx.schedule.exporters import ScheduleData
logger = logging.getLogger(__name__)
class ScheduleMixin:
@cached_property
def version(self):
if "version" in self.kwargs:
return unquote(self.kwargs["version"])
return None
def get_object(self):
if self.version:
return self.request.event.schedules.filter(
version__iexact=self.version
).first()
return self.request.event.current_schedule
@context
@cached_property
def schedule(self):
return self.get_object()
def dispatch(self, request, *args, **kwargs):
if "version" in request.GET:
kwargs["version"] = request.GET["version"]
return HttpResponsePermanentRedirect(
reverse(
f"agenda:versioned-{request.resolver_match.url_name}",
args=args,
kwargs=kwargs,
)
)
return super().dispatch(request, *args, **kwargs)
class ExporterView(EventPermissionRequired, ScheduleMixin, TemplateView):
permission_required = "agenda.view_schedule"
def get_context_data(self, **kwargs):
result = super().get_context_data(**kwargs)
schedule = self.schedule
if not schedule and self.version:
result["version"] = self.version
result["error"] = f'Schedule "{self.version}" not found.'
return result
if not schedule:
result["error"] = "Schedule not found."
return result
result["schedules"] = self.request.event.schedules.filter(
published__isnull=False
).values_list("version")
return result
def get_exporter(self, request):
url = resolve(request.path_info)
if url.url_name == "export":
exporter = url.kwargs.get("name") or unquote(
self.request.GET.get("exporter")
)
else:
exporter = url.url_name
exporter = (
exporter[len("export.") :] if exporter.startswith("export.") else exporter
)
responses = register_data_exporters.send(request.event)
for __, response in responses:
ex = response(request.event)
if ex.identifier == exporter:
if ex.public or request.is_orga:
return ex
def get(self, request, *args, **kwargs):
exporter = self.get_exporter(request)
if not exporter:
raise Http404()
lang_code = request.GET.get("lang")
if lang_code and lang_code in request.event.locales:
activate(lang_code)
elif "lang" in request.GET:
activate(request.event.locale)
exporter.schedule = self.schedule
exporter.is_orga = getattr(self.request, "is_orga", False)
try:
file_name, file_type, data = exporter.render()
etag = hashlib.sha1(str(data).encode()).hexdigest()
except Exception:
logger.exception(
f"Failed to use {exporter.identifier} for {self.request.event.slug}"
)
raise Http404()
if "If-None-Match" in request.headers:
if request.headers["If-None-Match"] == etag:
return HttpResponseNotModified()
headers = {"ETag": etag}
if file_type not in ["application/json", "text/xml"]:
headers[
"Content-Disposition"
] = f'attachment; filename="{safe_filename(file_name)}"'
if exporter.cors:
headers["Access-Control-Allow-Origin"] = exporter.cors
return HttpResponse(data, content_type=file_type, headers=headers)
class ScheduleView(EventPermissionRequired, ScheduleMixin, TemplateView):
template_name = "agenda/schedule.html"
def get_permission_required(self):
if self.version == "wip":
return ["orga.view_schedule"]
return ["agenda.view_schedule"]
def get_text(self, request, **kwargs):
data = ScheduleData(
event=self.request.event,
schedule=self.schedule,
with_accepted=False,
with_breaks=True,
).data
response_start = textwrap.dedent(
f"""
\033[1m{request.event.name}\033[0m
Get different formats:
curl {request.event.urls.schedule.full()}\\?format=table (default)
curl {request.event.urls.schedule.full()}\\?format=list
"""
)
output_format = request.GET.get("format", "table")
if output_format not in ["list", "table"]:
output_format = "table"
result = draw_ascii_schedule(data, output_format=output_format)
return HttpResponse(
response_start + result, content_type="text/plain; charset=utf-8"
)
def dispatch(self, request, **kwargs):
if not self.has_permission() and self.request.user.has_perm(
"agenda.view_featured_submissions", self.request.event
):
messages.success(request, _("Our schedule is not live yet."))
return HttpResponseRedirect(self.request.event.urls.featured)
return super().dispatch(request, **kwargs)
def get(self, request, **kwargs):
accept_header = request.headers.get("Accept", "")
if getattr(self, "is_html_export", False) or "text/html" in accept_header:
return super().get(request, **kwargs)
if not accept_header or accept_header in ("plain", "text/plain"):
return self.get_text(request, **kwargs)
export_headers = {
"frab_xml": ["application/xml", "text/xml"],
"frab_json": ["application/json"],
}
for url_name, headers in export_headers.items():
if any(header in accept_header for header in headers):
target_url = getattr(self.request.event.urls, url_name).full()
response = HttpResponseRedirect(target_url)
response.status_code = 303
return response
if "*/*" in accept_header:
return self.get_text(request, **kwargs)
return super().get(request, **kwargs) # Fallback to standard HTML response
def get_object(self):
if self.version == "wip":
return self.request.event.wip_schedule
schedule = super().get_object()
if not schedule:
raise Http404()
return schedule
@context
def exporters(self):
return list(
exporter(self.request.event)
for _, exporter in register_data_exporters.send(self.request.event)
)
@context
def show_talk_list(self):
return (
self.request.path.endswith("/talk/")
or self.request.event.display_settings["schedule"] == "list"
)
class ScheduleNoJsView(ScheduleView):
template_name = "agenda/schedule_nojs.html"
def get_schedule_data(self):
data = ScheduleData(
event=self.request.event,
schedule=self.schedule,
with_accepted=self.schedule and not self.schedule.version,
with_breaks=True,
).data
for date in data:
rooms = date.pop("rooms")
talks = [talk for room in rooms for talk in room.get("talks", [])]
talks.sort(
key=lambda x: (x.start, x.submission.title if x.submission else "")
)
date["talks"] = talks
return {"data": list(data)}
def get_context_data(self, **kwargs):
result = super().get_context_data(**kwargs)
if "schedule" not in result:
return result
result.update(**self.get_schedule_data())
result["day_count"] = len(result["data"])
return result
class ChangelogView(EventPermissionRequired, TemplateView):
template_name = "agenda/changelog.html"
permission_required = "agenda.view_schedule"
|
the-stack_106_27495 | # -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import __version__ as mars_version
from ...utils import parse_readable_size
DEFAULT_IMAGE = 'marsproject/mars:v' + mars_version
DEFAULT_SERVICE_PORT = 7103
def _remove_nones(cfg):
return dict((k, v) for k, v in cfg.items() if v is not None)
class RoleConfig:
"""
Configuration builder for Kubernetes RBAC roles
"""
def __init__(self, name, namespace, api_groups, resources, verbs):
self._name = name
self._namespace = namespace
self._api_groups = api_groups.split(',')
self._resources = resources.split(',')
self._verbs = verbs.split(',')
def build(self):
return {
'kind': 'Role',
'metadata': {'name': self._name, 'namespace': self._namespace},
'rules': [{
'apiGroups': self._api_groups,
'resources': self._resources,
'verbs': self._verbs,
}]
}
class RoleBindingConfig:
"""
Configuration builder for Kubernetes RBAC role bindings
"""
def __init__(self, name, namespace, role_name, service_account_name):
self._name = name
self._namespace = namespace
self._role_name = role_name
self._service_account_name = service_account_name
def build(self):
return {
'kind': 'RoleBinding',
'metadata': {'name': self._name, 'namespace': self._namespace},
'roleRef': {
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'Role',
'name': self._role_name,
},
'subjects': [{
'kind': 'ServiceAccount',
'name': self._service_account_name,
'namespace': self._namespace,
}]
}
class NamespaceConfig:
"""
Configuration builder for Kubernetes namespaces
"""
def __init__(self, name):
self._name = name
def build(self):
return {
'kind': 'Namespace',
'metadata': {
'name': self._name,
'labels': {
'name': self._name,
}
}
}
class ServiceConfig:
"""
Configuration builder for Kubernetes services
"""
def __init__(self, name, service_type, selector, port, target_port=None,
protocol=None):
self._name = name
self._type = service_type
self._protocol = protocol or 'TCP'
self._selector = selector
self._port = port
self._target_port = target_port or DEFAULT_SERVICE_PORT
def build(self):
return {
'kind': 'Service',
'metadata': {
'name': self._name,
},
'spec': _remove_nones({
'type': self._type,
'selector': self._selector,
'ports': [
_remove_nones({
'protocol': self._protocol,
'port': self._port,
'targetPort': self._target_port,
}),
]
}),
}
class ResourceConfig:
"""
Configuration builder for Kubernetes computation resources
"""
def __init__(self, cpu, memory):
self._cpu = cpu
self._memory, ratio = parse_readable_size(memory) if memory is not None else (None, False)
assert not ratio
def build(self):
return {
'cpu': '%dm' % int(self._cpu * 1000),
'memory': '%d' % int(self._memory),
}
class PortConfig:
"""
Configuration builder for Kubernetes ports definition for containers
"""
def __init__(self, container_port):
self._container_port = container_port
def build(self):
return {
'containerPort': self._container_port,
}
class VolumeConfig:
"""
Base configuration builder for Kubernetes volumes
"""
def __init__(self, name, mount_path):
self.name = name
self.mount_path = mount_path
def build(self):
raise NotImplementedError
def build_mount(self):
return {
'name': self.name,
'mountPath': self.mount_path,
}
class HostPathVolumeConfig(VolumeConfig):
"""
Configuration builder for Kubernetes host volumes
"""
def __init__(self, name, mount_path, host_path, volume_type=None):
super().__init__(name, mount_path)
self._host_path = host_path
self._volume_type = volume_type or 'DirectoryOrCreate'
def build(self):
return {
'name': self.name,
'hostPath': {'path': self._host_path, 'type': self._volume_type},
}
class EmptyDirVolumeConfig(VolumeConfig):
"""
Configuration builder for Kubernetes empty-dir volumes
"""
def __init__(self, name, mount_path, use_memory=False):
super().__init__(name, mount_path)
self._medium = 'Memory' if use_memory else None
def build(self):
result = {
'name': self.name,
'emptyDir': {}
}
if self._medium:
result['emptyDir']['medium'] = self._medium
return result
class ContainerEnvConfig:
"""
Configuration builder for Kubernetes container environments
"""
def __init__(self, name, value=None, field_path=None):
self._name = name
self._value = value
self._field_path = field_path
def build(self):
result = dict(name=self._name)
if self._value is not None:
result['value'] = self._value
elif self._field_path is not None: # pragma: no branch
result['valueFrom'] = {'fieldRef': {'fieldPath': self._field_path}}
return result
class ProbeConfig:
"""
Base configuration builder for Kubernetes liveness and readiness probes
"""
def __init__(self, initial_delay=5, period=1, timeout=None,
success_thresh=None, failure_thresh=None):
self._initial_delay = initial_delay
self._period = period
self._timeout = timeout
self._success_thresh = success_thresh
self._failure_thresh = failure_thresh
def build(self):
return _remove_nones({
'initialDelaySeconds': self._initial_delay,
'periodSeconds': self._period,
'timeoutSeconds': self._timeout,
'successThreshold': self._success_thresh,
'failureThreshold': self._failure_thresh,
})
class ExecProbeConfig(ProbeConfig):
"""
Configuration builder for Kubernetes probes by executing commands
"""
def __init__(self, command, *args, **kwargs):
super().__init__(*args, **kwargs)
self._command = command
def build(self):
result = {
'exec': {'command': self._command}
}
result.update(super().build())
return result
class TcpProbeConfig(ProbeConfig):
"""
Configuration builder for Kubernetes probes by checking TCP ports
"""
def __init__(self, port, *args, **kwargs):
super().__init__(*args, **kwargs)
self._port = port
def build(self):
result = {
'tcpSocket': {'port': self._port}
}
result.update(super().build())
return result
class ReplicationControllerConfig:
"""
Base configuration builder for Kubernetes replication controllers
"""
def __init__(self, name, image, replicas, resource_request=None, resource_limit=None,
liveness_probe=None, readiness_probe=None, pre_stop_command=None):
self._name = name
self._image = image
self._replicas = replicas
self._ports = []
self._volumes = []
self._envs = dict()
self.add_default_envs()
self._resource_request = resource_request
self._resource_limit = resource_limit
self._liveness_probe = liveness_probe
self._readiness_probe = readiness_probe
self._pre_stop_command = pre_stop_command
def add_env(self, name, value=None, field_path=None):
self._envs[name] = ContainerEnvConfig(name, value=value, field_path=field_path)
def add_simple_envs(self, envs):
for k, v in envs.items() or ():
self.add_env(k, v)
def add_port(self, container_port):
self._ports.append(PortConfig(container_port))
def add_default_envs(self):
pass # pragma: no cover
def add_volume(self, vol):
self._volumes.append(vol)
def build_container_command(self):
raise NotImplementedError
def build_container(self):
resources_dict = {
'requests': self._resource_request.build() if self._resource_request else None,
'limits': self._resource_limit.build() if self._resource_limit else None,
}
lifecycle_dict = _remove_nones({
'preStop': {
'exec': {'command': self._pre_stop_command},
} if self._pre_stop_command else None,
})
return _remove_nones({
'command': self.build_container_command(),
'env': [env.build() for env in self._envs.values()] or None,
'image': self._image,
'name': self._name,
'resources': dict((k, v) for k, v in resources_dict.items() if v) or None,
'ports': [p.build() for p in self._ports] or None,
'volumeMounts': [vol.build_mount() for vol in self._volumes] or None,
'livenessProbe': self._liveness_probe.build() if self._liveness_probe else None,
'readinessProbe': self._readiness_probe.build() if self._readiness_probe else None,
'lifecycle': lifecycle_dict or None,
})
def build_template_spec(self):
result = {
'containers': [self.build_container()],
'volumes': [vol.build() for vol in self._volumes]
}
return dict((k, v) for k, v in result.items() if v)
def build(self):
return {
'kind': 'ReplicationController',
'metadata': {
'name': self._name,
},
'spec': {
'replicas': int(self._replicas),
'selector': {'name': self._name},
'template': {
'metadata': {
'labels': {'name': self._name},
},
'spec': self.build_template_spec()
}
},
}
class MarsReplicationControllerConfig(ReplicationControllerConfig):
"""
Base configuration builder for replication controllers for Mars
"""
rc_name = None
def __init__(self, replicas, cpu=None, memory=None, limit_resources=False,
image=None, modules=None, volumes=None, stat_type='cgroup', **kwargs):
self._cpu = cpu
self._memory, ratio = parse_readable_size(memory) if memory is not None else (None, False)
assert not ratio
self._stat_type = stat_type
if isinstance(modules, str):
self._modules = modules.split(',')
else:
self._modules = modules
res = ResourceConfig(cpu, memory) if cpu or memory else None
super().__init__(
self.rc_name, image or DEFAULT_IMAGE, replicas,
resource_request=res, resource_limit=res if limit_resources else None,
readiness_probe=self.config_readiness_probe(), **kwargs
)
self.add_port(DEFAULT_SERVICE_PORT)
for vol in volumes or ():
self.add_volume(vol)
def add_default_envs(self):
self.add_env('MARS_K8S_POD_NAME', field_path='metadata.name')
self.add_env('MARS_K8S_POD_NAMESPACE', field_path='metadata.namespace')
self.add_env('MARS_K8S_POD_IP', field_path='status.podIP')
self.add_env('MARS_K8S_SERVICE_PORT', str(DEFAULT_SERVICE_PORT))
self.add_env('MARS_CONTAINER_IP', field_path='status.podIP')
if self._cpu:
self.add_env('MKL_NUM_THREADS', str(self._cpu))
self.add_env('MARS_CPU_TOTAL', str(self._cpu))
if self._stat_type == 'cgroup':
self.add_env('MARS_USE_CGROUP_STAT', '1')
if self._memory:
self.add_env('MARS_MEMORY_TOTAL', str(int(self._memory)))
if self._modules:
self.add_env('MARS_LOAD_MODULES', ','.join(self._modules))
def config_readiness_probe(self):
raise NotImplementedError
@staticmethod
def get_local_app_module(mod_name):
return __name__.rsplit('.', 1)[0] + '.' + mod_name
class MarsSchedulersConfig(MarsReplicationControllerConfig):
"""
Configuration builder for Mars scheduler service
"""
rc_name = 'marsscheduler'
def config_readiness_probe(self):
readiness_cmd = [
'/srv/entrypoint.sh', self.get_local_app_module('probe'),
]
return ExecProbeConfig(readiness_cmd, timeout=5, failure_thresh=3)
def build_container_command(self):
return [
'/srv/entrypoint.sh', self.get_local_app_module('scheduler'),
'-p', str(DEFAULT_SERVICE_PORT),
]
class MarsWorkersConfig(MarsReplicationControllerConfig):
"""
Configuration builder for Mars worker service
"""
rc_name = 'marsworker'
def __init__(self, *args, **kwargs):
spill_volumes = kwargs.pop('spill_volumes', None) or ()
mount_shm = kwargs.pop('mount_shm', True)
self._limit_resources = kwargs['limit_resources'] = kwargs.get('limit_resources', True)
worker_cache_mem = kwargs.pop('worker_cache_mem', None)
super().__init__(*args, **kwargs)
self._spill_volumes = []
for idx, vol in enumerate(spill_volumes):
if isinstance(vol, str):
path = '/mnt/hostpath%d' % idx
self.add_volume(HostPathVolumeConfig('host-path-vol-%d' % idx, path, vol))
self._spill_volumes.append(path)
else:
self.add_volume(vol)
self._spill_volumes.append(vol.mount_path)
if self._spill_volumes:
self.add_env('MARS_SPILL_DIRS', ':'.join(self._spill_volumes))
if mount_shm:
self.add_volume(EmptyDirVolumeConfig('shm-volume', '/dev/shm', True))
if worker_cache_mem:
self.add_env('MARS_CACHE_MEM_SIZE', worker_cache_mem)
def config_readiness_probe(self):
readiness_cmd = [
'/srv/entrypoint.sh', self.get_local_app_module('probe'),
]
return ExecProbeConfig(readiness_cmd, timeout=60, failure_thresh=3)
def build_container_command(self):
return [
'/srv/entrypoint.sh', self.get_local_app_module('worker'),
'-p', str(DEFAULT_SERVICE_PORT),
]
class MarsWebsConfig(MarsReplicationControllerConfig):
"""
Configuration builder for Mars web service
"""
rc_name = 'marsweb'
def config_readiness_probe(self):
return TcpProbeConfig(DEFAULT_SERVICE_PORT, timeout=60, failure_thresh=3)
def build_container_command(self):
return [
'/srv/entrypoint.sh', self.get_local_app_module('web'),
'-p', str(DEFAULT_SERVICE_PORT),
]
|
the-stack_106_27496 | import os
import os.path
import sys
import torch
import torch.utils.data as data
import cv2
import numpy as np
CLASSES = ( '__background__', 'face')
class AnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self):
self.class_to_ind = dict( zip(CLASSES, range(len(CLASSES))))
#self.keep_difficult = keep_difficult
def __call__(self, target):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = np.empty((0, 5))
#for obj in target.iter('object'):
#difficult = int(obj.find('difficult').text) == 1
#if not self.keep_difficult and difficult:
# continue
#name = obj.find('name').text.lower().strip()
#bbox = obj.find('bndbox')
#pts = ['xmin', 'ymin', 'xmax', 'ymax']
#bndbox = []
#for i, pt in enumerate(pts):
# cur_pt = int(bbox.find(pt).text)
# bndbox.append(cur_pt)
#label_idx = self.class_to_ind[name]
#bndbox.append(label_idx)
#res = np.vstack((res, bndbox)) # [xmin, ymin, xmax, ymax, label_ind]
label_idx = self.class_to_ind['face']
bndbox = [int(target[0]),int(target[1]),int(target[2]),int(target[3])]
bndbox.append(label_idx)
res = np.vstack((res, bndbox)) # [xmin, ymin, xmax, ymax, label_ind]
return res
class AnimeDetection(data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to WIDER folder
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
"""
def __init__(self, filelist, preproc=None, target_transform=None):
self.filelist = filelist
self.preproc = preproc
self.target_transform = target_transform
#self._annopath = os.path.join(self.root, 'annotations', '%s')
#self._imgpath = os.path.join(self.root, 'images', '%s')
self.ids = list()
with open(self.filelist, 'r') as f:
self.ids = [tuple(line.split()) for line in f]
def __getitem__(self, index):
img_id = self.ids[index]
#print(img_id)
#target = ET.parse(self._annopath % img_id[1]).getroot()
target = img_id[1:]
img = cv2.imread(img_id[0], cv2.IMREAD_COLOR)
height, width, _ = img.shape
if self.target_transform is not None:
target = self.target_transform(target)
if self.preproc is not None:
img, target = self.preproc(img, target)
return torch.from_numpy(img), target
def __len__(self):
return len(self.ids)
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
targets = []
imgs = []
for _, sample in enumerate(batch):
for _, tup in enumerate(sample):
if torch.is_tensor(tup):
imgs.append(tup)
elif isinstance(tup, type(np.empty(0))):
annos = torch.from_numpy(tup).float()
targets.append(annos)
return (torch.stack(imgs, 0), targets)
|
the-stack_106_27500 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import sys
import json
import math
import fiona
from fiona.errors import DriverError
import rasterio
import warnings
from rasterio.transform import guard_transform
from affine import Affine
import numpy as np
try:
from shapely.errors import ReadingError
except:
from shapely.geos import ReadingError
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from shapely import wkt, wkb
from collections import Iterable, Mapping
geom_types = ["Point", "LineString", "Polygon",
"MultiPoint", "MultiLineString", "MultiPolygon"]
PY3 = sys.version_info[0] >= 3
if PY3:
string_types = str, # pragma: no cover
else:
string_types = basestring, # pragma: no cover
def wrap_geom(geom):
""" Wraps a geometry dict in an GeoJSON Feature
"""
return {'type': 'Feature',
'properties': {},
'geometry': geom}
def parse_feature(obj):
""" Given a python object
attemp to a GeoJSON-like Feature from it
"""
# object implementing geo_interface
if hasattr(obj, '__geo_interface__'):
gi = obj.__geo_interface__
if gi['type'] in geom_types:
return wrap_geom(gi)
elif gi['type'] == 'Feature':
return gi
# wkt
try:
shape = wkt.loads(obj)
return wrap_geom(shape.__geo_interface__)
except (ReadingError, TypeError, AttributeError):
pass
# wkb
try:
shape = wkb.loads(obj)
return wrap_geom(shape.__geo_interface__)
except (ReadingError, TypeError):
pass
# geojson-like python mapping
try:
if obj['type'] in geom_types:
return wrap_geom(obj)
elif obj['type'] == 'Feature':
return obj
except (AssertionError, TypeError):
pass
raise ValueError("Can't parse %s as a geojson Feature object" % obj)
def read_features(obj, layer=0):
features_iter = None
if isinstance(obj, string_types):
try:
# test it as fiona data source
with fiona.open(obj, 'r', layer=layer) as src:
assert len(src) > 0
def fiona_generator(obj):
with fiona.open(obj, 'r', layer=layer) as src:
for feature in src:
yield feature
features_iter = fiona_generator(obj)
except (AssertionError, TypeError, IOError, OSError, DriverError, UnicodeDecodeError):
try:
mapping = json.loads(obj)
if 'type' in mapping and mapping['type'] == 'FeatureCollection':
features_iter = mapping['features']
elif mapping['type'] in geom_types + ['Feature']:
features_iter = [parse_feature(mapping)]
except (ValueError, JSONDecodeError):
# Single feature-like string
features_iter = [parse_feature(obj)]
elif isinstance(obj, Mapping):
if 'type' in obj and obj['type'] == 'FeatureCollection':
features_iter = obj['features']
else:
features_iter = [parse_feature(obj)]
elif isinstance(obj, bytes):
# Single binary object, probably a wkb
features_iter = [parse_feature(obj)]
elif hasattr(obj, '__geo_interface__'):
mapping = obj.__geo_interface__
if mapping['type'] == 'FeatureCollection':
features_iter = mapping['features']
else:
features_iter = [parse_feature(mapping)]
elif isinstance(obj, Iterable):
# Iterable of feature-like objects
features_iter = (parse_feature(x) for x in obj)
if not features_iter:
raise ValueError("Object is not a recognized source of Features")
return features_iter
def read_featurecollection(obj, layer=0):
features = read_features(obj, layer=layer)
fc = {'type': 'FeatureCollection', 'features': []}
fc['features'] = [f for f in features]
return fc
def rowcol(x, y, affine, op=math.floor):
""" Get row/col for a x/y
"""
r = int(op((y - affine.f) / affine.e))
c = int(op((x - affine.c) / affine.a))
return r, c
def bounds_window(bounds, affine):
"""Create a full cover rasterio-style window
"""
w, s, e, n = bounds
row_start, col_start = rowcol(w, n, affine)
row_stop, col_stop = rowcol(e, s, affine, op=math.ceil)
return (row_start, row_stop), (col_start, col_stop)
def window_bounds(window, affine):
(row_start, row_stop), (col_start, col_stop) = window
w, s = (col_start, row_stop) * affine
e, n = (col_stop, row_start) * affine
return w, s, e, n
def boundless_array(arr, window, nodata, masked=False):
dim3 = False
if len(arr.shape) == 3:
dim3 = True
elif len(arr.shape) != 2:
raise ValueError("Must be a 2D or 3D array")
# unpack for readability
(wr_start, wr_stop), (wc_start, wc_stop) = window
# Calculate overlap
olr_start = max(min(window[0][0], arr.shape[-2:][0]), 0)
olr_stop = max(min(window[0][1], arr.shape[-2:][0]), 0)
olc_start = max(min(window[1][0], arr.shape[-2:][1]), 0)
olc_stop = max(min(window[1][1], arr.shape[-2:][1]), 0)
# Calc dimensions
overlap_shape = (olr_stop - olr_start, olc_stop - olc_start)
if dim3:
window_shape = (arr.shape[0], wr_stop - wr_start, wc_stop - wc_start)
else:
window_shape = (wr_stop - wr_start, wc_stop - wc_start)
# create an array of nodata values
out = np.ones(shape=window_shape) * nodata
# Fill with data where overlapping
nr_start = olr_start - wr_start
nr_stop = nr_start + overlap_shape[0]
nc_start = olc_start - wc_start
nc_stop = nc_start + overlap_shape[1]
if dim3:
out[:, nr_start:nr_stop, nc_start:nc_stop] = \
arr[:, olr_start:olr_stop, olc_start:olc_stop]
else:
out[nr_start:nr_stop, nc_start:nc_stop] = \
arr[olr_start:olr_stop, olc_start:olc_stop]
if masked:
out = np.ma.MaskedArray(out, mask=(out == nodata))
return out
class Raster(object):
""" Raster abstraction for data access to 2/3D array-like things
Use as a context manager to ensure dataset gets closed properly::
>>> with Raster(path) as rast:
...
Parameters
----------
raster: 2/3D array-like data source, required
Currently supports paths to rasterio-supported rasters and
numpy arrays with Affine transforms.
affine: Affine object
Maps row/col to coordinate reference system
required if raster is ndarray
nodata: nodata value, optional
Overrides the datasource's internal nodata if specified
band: integer
raster band number, optional (default: 1)
Methods
-------
index
read
"""
def __init__(self, raster, affine=None, nodata=None, band=1):
self.array = None
self.src = None
if isinstance(raster, np.ndarray):
if affine is None:
raise ValueError("Specify affine transform for numpy arrays")
self.array = raster
self.affine = affine
self.shape = raster.shape
self.nodata = nodata
else:
self.src = rasterio.open(raster, 'r')
self.affine = guard_transform(self.src.transform)
self.shape = (self.src.height, self.src.width)
self.band = band
if nodata is not None:
# override with specified nodata
self.nodata = float(nodata)
else:
self.nodata = self.src.nodata
def index(self, x, y):
""" Given (x, y) in crs, return the (row, column) on the raster
"""
col, row = [math.floor(a) for a in (~self.affine * (x, y))]
return row, col
def read(self, bounds=None, window=None, masked=False):
""" Performs a boundless read against the underlying array source
Parameters
----------
bounds: bounding box
in w, s, e, n order, iterable, optional
window: rasterio-style window, optional
bounds OR window are required,
specifying both or neither will raise exception
masked: boolean
return a masked numpy array, default: False
bounds OR window are required, specifying both or neither will raise exception
Returns
-------
Raster object with update affine and array info
"""
# Calculate the window
if bounds and window:
raise ValueError("Specify either bounds or window")
if bounds:
win = bounds_window(bounds, self.affine)
elif window:
win = window
else:
raise ValueError("Specify either bounds or window")
c, _, _, f = window_bounds(win, self.affine) # c ~ west, f ~ north
a, b, _, d, e, _, _, _, _ = tuple(self.affine)
new_affine = Affine(a, b, c, d, e, f)
nodata = self.nodata
if nodata is None:
nodata = -999
warnings.warn("Setting nodata to -999; specify nodata explicitly")
if self.array is not None:
# It's an ndarray already
new_array = boundless_array(
self.array, window=win, nodata=nodata, masked=masked)
elif self.src:
# It's an open rasterio dataset
new_array = self.src.read(
self.band, window=win, boundless=True, masked=masked)
return Raster(new_array, new_affine, nodata)
def __enter__(self):
return self
def __exit__(self, *args):
if self.src is not None:
# close the rasterio reader
self.src.close()
|
the-stack_106_27502 | #
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2018, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pysnmp/license.html
#
import random
random.seed()
class Integer(object):
"""Return a next value in a reasonably MT-safe manner"""
def __init__(self, maximum, increment=256):
self.__maximum = maximum
if increment >= maximum:
increment = maximum
self.__increment = increment
self.__threshold = increment // 2
e = random.randrange(self.__maximum - self.__increment)
self.__bank = list(range(e, e + self.__increment))
def __repr__(self):
return '%s(%d, %d)' % (
self.__class__.__name__,
self.__maximum,
self.__increment
)
def __call__(self):
v = self.__bank.pop(0)
if v % self.__threshold:
return v
else:
# this is MT-safe unless too many (~ increment/2) threads
# bump into this code simultaneously
e = self.__bank[-1] + 1
if e > self.__maximum:
e = 0
self.__bank.extend(range(e, e + self.__threshold))
return v
|
the-stack_106_27504 | import csv
import datetime
import os
import subprocess
from golem.core import utils
def new_directory_test_case(root_path, project, parents, test_name):
parents = os.sep.join(parents)
errors = []
if directory_already_exists(root_path, project, 'tests', parents, test_name):
errors.append('A directory with that name already exists')
else:
utils.create_new_directory(path_list=[root_path, 'projects', project, 'tests',
parents, test_name], add_init=True)
return errors
def new_directory_page_object(root_path, project, parents, page_name):
parents = os.sep.join(parents)
errors = []
if directory_already_exists(root_path, project, 'pages', parents, page_name):
errors.append('A directory with that name already exists')
else:
utils.create_new_directory(path_list=[root_path, 'projects', project, 'pages',
parents, page_name], add_init=True)
return errors
def run_test_case(project, test_case_name):
timestamp = utils.get_timestamp()
subprocess.Popen(['python', 'golem.py', 'run', project, test_case_name,
'--timestamp', timestamp])
return timestamp
def run_suite(project, suite_name):
timestamp = utils.get_timestamp()
subprocess.Popen(['python', 'golem.py', 'run', project, suite_name, '--timestamp', timestamp])
return timestamp
def get_time_span(task_id):
path = os.path.join('results', '{0}.csv'.format(task_id))
if not os.path.isfile(path):
log_to_file('an error')
return
else:
with open(path, 'r') as f:
reader = csv.DictReader(f, delimiter=';')
last_row = list(reader)[-1]
exec_time = string_to_time(last_row['time'])
time_delta = datetime.datetime.now() - exec_time
total_seconds = time_delta.total_seconds()
return total_seconds
def directory_already_exists(root_path, project, root_dir, parents, dir_name):
parents_joined = os.sep.join(parents)
directory_path = os.path.join(root_path, 'projects', project, root_dir,
parents_joined, dir_name)
if os.path.exists(directory_path):
return True
else:
return False
def time_to_string():
time_format = '%Y-%m-%d-%H-%M-%S-%f'
return datetime.datetime.now().strftime(time_format)
def string_to_time(time_string):
return datetime.datetime.strptime(time_string, '%Y-%m-%d-%H-%M-%S-%f')
def get_global_actions():
global_actions = [
{
'name': 'assert contains',
'parameters': [{'name': 'element', 'type': 'value'},
{'name': 'value', 'type': 'value'}]
},
{
'name': 'assert equals',
'parameters': [{'name': 'actual value', 'type': 'value'},
{'name': 'expected value', 'type': 'value'}]
},
{
'name': 'assert false',
'parameters': [{'name': 'condition', 'type': 'value'}]
},
{
'name': 'assert true',
'parameters': [{'name': 'condition', 'type': 'value'}]
},
{
'name': 'capture',
'parameters': [{'name': 'message (optional)', 'type': 'value'}]
},
{
'name': 'click',
'parameters': [{'name': 'element', 'type': 'element'}]
},
{
'name': 'close',
'parameters': []
},
{
'name': 'debug',
'parameters': []
},
{
'name': 'get',
'parameters': [{'name': 'url', 'type': 'value'},
{'name': 'headers', 'type': 'multiline-value'},
{'name': 'params', 'type': 'value'}]
},
{
'name': 'navigate',
'parameters': [{'name': 'url', 'type': 'value'}]
},
{
'name': 'post',
'parameters': [{'name': 'url', 'type': 'value'},
{'name': 'headers', 'type': 'value'},
{'name': 'data', 'type': 'value'}]
},
{
'name': 'press key',
'parameters': [{'name': 'element', 'type': 'element'},
{'name': 'key', 'type': 'value'}]
},
{
'name': 'random',
'parameters': [{'name': 'args', 'type': 'value'}]
},
{
'name': 'select by index',
'parameters': [{'name': 'from element', 'type': 'element'},
{'name': 'index', 'type': 'value'}]
},
{
'name': 'select by text',
'parameters': [{'name': 'from element', 'type': 'element'},
{'name': 'text', 'type': 'value'}]
},
{
'name': 'select by value',
'parameters': [{'name': 'from element', 'type': 'element'},
{'name': 'value', 'type': 'value'}]
},
{
'name': 'send keys',
'parameters': [{'name': 'element', 'type': 'element'},
{'name': 'value', 'type': 'value'}]
},
{
'name': 'step',
'parameters': [{'name': 'message', 'type': 'value'}]
},
{
'name': 'store',
'parameters': [{'name': 'key', 'type': 'value'},
{'name': 'value', 'type': 'value'}]
},
{
'name': 'verify exists',
'parameters': [{'name': 'element', 'type': 'element'}]
},
{
'name': 'verify is enabled',
'parameters': [{'name': 'element', 'type': 'element'}]
},
{
'name': 'verify is not enabled',
'parameters': [{'name': 'element', 'type': 'element'}]
},
{
'name': 'verify is not selected',
'parameters': [{'name': 'element', 'type': 'element'}]
},
{
'name': 'verify is not visible',
'parameters': [{'name': 'element', 'type': 'element'}]
},
{
'name': 'verify is selected',
'parameters': [{'name': 'element', 'type': 'element'}]
},
{
'name': 'verify is visible',
'parameters': [{'name': 'element', 'type': 'element'}]
},
{
'name': 'verify not exists',
'parameters': [{'name': 'element', 'type': 'element'}]
},
{
'name': 'verify selected option',
'parameters': [{'name': 'select', 'type': 'element'},
{'name': 'text option', 'type': 'value'}]
},
{
'name': 'verify text',
'parameters': [{'name': 'text', 'type': 'value'}]
},
{
'name': 'verify text in element',
'parameters': [{'name': 'element', 'type': 'element'},
{'name': 'text', 'type': 'value'}]
},
{
'name': 'wait',
'parameters': [{'name': 'seconds', 'type': 'value'}]
},
{
'name': 'wait for element visible',
'parameters': [{'name': 'element', 'type': 'element'},
{'name': 'timeout (optional)', 'type': 'value'}]
},
{
'name': 'wait for element not visible',
'parameters': [{'name': 'element', 'type': 'element'},
{'name': 'timeout (optional)', 'type': 'value'}]
},
{
'name': 'wait for element enabled',
'parameters': [{'name': 'element', 'type': 'element'},
{'name': 'timeout (optional)', 'type': 'value'}]
}
]
return global_actions
def get_supported_browsers_suggestions():
# supported_browsers = {
# 'suggestions': [
# {'value': 'chrome', 'data': 'chrome'},
# {'value': 'chrome-remote', 'data': 'chrome-remote'},
# {'value': 'chrome-headless', 'data': 'chrome-headless'},
# {'value': 'chrome-remote-headless', 'data': 'chrome-remote-headless'},
# {'value': 'firefox', 'data': 'firefox'},
# {'value': 'firefox-remote', 'data': 'firefox-remote'}
# ]
# }
supported_browsers = [
'chrome',
'chrome-remote',
'chrome-headless',
'chrome-remote-headless',
'firefox',
'firefox-remote'
]
return supported_browsers
|
the-stack_106_27506 | """Implementation of Rule L052."""
from typing import List, NamedTuple, Optional
from sqlfluff.core.parser import SymbolSegment
from sqlfluff.core.parser.segments.base import BaseSegment, IdentitySet
from sqlfluff.core.parser.segments.raw import NewlineSegment
from sqlfluff.core.rules.base import BaseRule, LintResult, LintFix, RuleContext
from sqlfluff.core.rules.doc_decorators import (
document_configuration,
document_fix_compatible,
)
from sqlfluff.core.rules.functional import Segments, sp
class SegmentMoveContext(NamedTuple):
"""Context information for moving a segment."""
anchor_segment: BaseSegment
is_one_line: bool
before_segment: Segments
whitespace_deletions: Segments
@document_configuration
@document_fix_compatible
class Rule_L052(BaseRule):
"""Statements must end with a semi-colon.
**Anti-pattern**
A statement is not immediately terminated with a semi-colon. The ``•`` represents
space.
.. code-block:: sql
:force:
SELECT
a
FROM foo
;
SELECT
b
FROM bar••;
**Best practice**
Immediately terminate the statement with a semi-colon.
.. code-block:: sql
:force:
SELECT
a
FROM foo;
"""
config_keywords = ["multiline_newline", "require_final_semicolon"]
@staticmethod
def _handle_preceding_inline_comments(before_segment, anchor_segment):
"""Adjust segments to not move preceding inline comments.
We don't want to move inline comments that are on the same line
as the preceding code segment as they could contain noqa instructions.
"""
# See if we have a preceding inline comment on the same line as the preceding
# segment.
same_line_comment = next(
(
s
for s in before_segment
if s.is_comment
and s.name != "block_comment"
and s.pos_marker.working_line_no
== anchor_segment.pos_marker.working_line_no
),
None,
)
# If so then make that our new anchor segment and adjust
# before_segment accordingly.
if same_line_comment:
anchor_segment = same_line_comment
before_segment = before_segment[: before_segment.index(same_line_comment)]
return before_segment, anchor_segment
@staticmethod
def _handle_trailing_inline_comments(context, anchor_segment):
"""Adjust anchor_segment to not move trailing inline comment.
We don't want to move inline comments that are on the same line
as the preceding code segment as they could contain noqa instructions.
"""
# See if we have a trailing inline comment on the same line as the preceding
# segment.
for parent_segment in context.parent_stack[::-1]:
for comment_segment in parent_segment.recursive_crawl("comment"):
if (
comment_segment.pos_marker.working_line_no
== anchor_segment.pos_marker.working_line_no
) and (comment_segment.name != "block_comment"):
anchor_segment = comment_segment
return anchor_segment
@staticmethod
def _is_one_line_statement(context, segment):
"""Check if the statement containing the provided segment is one line."""
# Find statement segment containing the current segment.
statement_segment = next(
(
s
for s in (context.parent_stack[0].path_to(segment) or [])
if s.is_type("statement")
),
None,
)
if statement_segment is None: # pragma: no cover
# If we can't find a parent statement segment then don't try anything
# special.
return False
if not any(statement_segment.recursive_crawl("newline")):
# Statement segment has no newlines therefore starts and ends on the same
# line.
return True
return False
def _get_segment_move_context(self, context: RuleContext) -> SegmentMoveContext:
# Locate the segment to be moved (i.e. context.segment) and search back
# over the raw stack to find the end of the preceding statement.
reversed_raw_stack = context.functional.raw_stack.reversed()
before_code = reversed_raw_stack.select(loop_while=sp.not_(sp.is_code()))
before_segment = before_code.select(sp.not_(sp.is_meta()))
anchor_segment = before_code[-1] if before_code else context.segment
first_code = reversed_raw_stack.select(sp.is_code()).first()
is_one_line = (
self._is_one_line_statement(context, first_code[0]) if first_code else False
)
# We can tidy up any whitespace between the segment
# and the preceding code/comment segment.
# Don't mess with comment spacing/placement.
whitespace_deletions = before_segment.select(loop_while=sp.is_whitespace())
return SegmentMoveContext(
anchor_segment, is_one_line, before_segment, whitespace_deletions
)
def _handle_semicolon(self, context: RuleContext) -> Optional[LintResult]:
info = self._get_segment_move_context(context)
semicolon_newline = self.multiline_newline if not info.is_one_line else False
# Semi-colon on same line.
if not semicolon_newline:
return self._handle_semicolon_same_line(context, info)
# Semi-colon on new line.
else:
return self._handle_semicolon_newline(context, info)
def _handle_semicolon_same_line(
self, context: RuleContext, info: SegmentMoveContext
) -> Optional[LintResult]:
if not info.before_segment:
return None
# If preceding segments are found then delete the old
# semi-colon and its preceding whitespace and then insert
# the semi-colon in the correct location.
fixes = self._create_semicolon_and_delete_whitespace(
context,
info.anchor_segment,
info.whitespace_deletions,
[
SymbolSegment(raw=";", type="symbol", name="semicolon"),
],
)
return LintResult(
anchor=info.anchor_segment,
fixes=fixes,
)
def _handle_semicolon_newline(
self, context: RuleContext, info: SegmentMoveContext
) -> Optional[LintResult]:
# Adjust before_segment and anchor_segment for preceding inline
# comments. Inline comments can contain noqa logic so we need to add the
# newline after the inline comment.
(before_segment, anchor_segment,) = self._handle_preceding_inline_comments(
info.before_segment, info.anchor_segment
)
if (len(before_segment) == 1) and all(
s.is_type("newline") for s in before_segment
):
return None
# If preceding segment is not a single newline then delete the old
# semi-colon/preceding whitespace and then insert the
# semi-colon in the correct location.
# This handles an edge case in which an inline comment comes after
# the semi-colon.
anchor_segment = self._handle_trailing_inline_comments(context, anchor_segment)
fixes = []
if anchor_segment is context.segment:
fixes.append(
LintFix.replace(
anchor_segment,
[
NewlineSegment(),
SymbolSegment(raw=";", type="symbol", name="semicolon"),
],
)
)
else:
fixes.extend(
self._create_semicolon_and_delete_whitespace(
context,
anchor_segment,
info.whitespace_deletions,
[
NewlineSegment(),
SymbolSegment(raw=";", type="symbol", name="semicolon"),
],
)
)
return LintResult(
anchor=anchor_segment,
fixes=fixes,
)
def _create_semicolon_and_delete_whitespace(
self,
context: RuleContext,
anchor_segment: BaseSegment,
whitespace_deletions: Segments,
create_segments: List[BaseSegment],
) -> List[LintFix]:
anchor_segment = self._choose_anchor_segment(
context, "create_after", anchor_segment, filter_meta=True
)
lintfix_fn = LintFix.create_after
# :TRICKY: Use IdentitySet rather than set() since
# different segments may compare as equal.
whitespace_deletion_set = IdentitySet(whitespace_deletions)
if anchor_segment in whitespace_deletion_set:
# Can't delete() and create_after() the same segment. Use replace()
# instead.
lintfix_fn = LintFix.replace
whitespace_deletions = whitespace_deletions.select(
lambda seg: seg is not anchor_segment
)
fixes = [
lintfix_fn(
anchor_segment,
create_segments,
),
LintFix.delete(
context.segment,
),
]
fixes.extend(LintFix.delete(d) for d in whitespace_deletions)
return fixes
def _ensure_final_semicolon(self, context: RuleContext) -> Optional[LintResult]:
# Locate the end of the file.
if not self.is_final_segment(context):
return None
# Include current segment for complete stack.
complete_stack: List[BaseSegment] = list(context.raw_stack)
complete_stack.append(context.segment)
# Iterate backwards over complete stack to find
# if the final semi-colon is already present.
anchor_segment = context.segment
semi_colon_exist_flag = False
is_one_line = False
before_segment = []
for segment in complete_stack[::-1]:
if segment.name == "semicolon":
semi_colon_exist_flag = True
elif segment.is_code:
is_one_line = self._is_one_line_statement(context, segment)
break
elif not segment.is_meta:
before_segment.append(segment)
anchor_segment = segment
semicolon_newline = self.multiline_newline if not is_one_line else False
if not semi_colon_exist_flag:
# Create the final semi-colon if it does not yet exist.
# Semi-colon on same line.
if not semicolon_newline:
fixes = [
LintFix.create_after(
self._choose_anchor_segment(
context, "create_after", anchor_segment, filter_meta=True
),
[
SymbolSegment(raw=";", type="symbol", name="semicolon"),
],
)
]
# Semi-colon on new line.
else:
# Adjust before_segment and anchor_segment for inline
# comments.
(
before_segment,
anchor_segment,
) = self._handle_preceding_inline_comments(
before_segment, anchor_segment
)
fixes = [
LintFix.create_after(
self._choose_anchor_segment(
context, "create_after", anchor_segment, filter_meta=True
),
[
NewlineSegment(),
SymbolSegment(raw=";", type="symbol", name="semicolon"),
],
)
]
return LintResult(
anchor=anchor_segment,
fixes=fixes,
)
return None
def _eval(self, context: RuleContext) -> Optional[LintResult]:
"""Statements must end with a semi-colon."""
# Config type hints
self.multiline_newline: bool
self.require_final_semicolon: bool
# First we can simply handle the case of existing semi-colon alignment.
result = None
if context.segment.name == "semicolon":
result = self._handle_semicolon(context)
elif self.require_final_semicolon:
result = self._ensure_final_semicolon(context)
return result
|
the-stack_106_27508 | """
This scripts for main function of supervised domain adaptation on image classification
"""
from utils.parse_args import parse_args_sda
from train_val.training_sda import ClsModel, CCSA, dSNE
def main():
"""
Main function
CCSA: ICCV 17 model
V0: train on source and test on target
V1: train on source and target
dsne: dSNE model
dsnet: dSNE-Triplet model
:return:
"""
if args.method == 'v0':
model = ClsModel(args, train_tgt=False)
elif args.method == 'v1':
model = ClsModel(args, train_tgt=True)
elif args.method == 'ccsa':
model = CCSA(args)
elif args.method == 'dsne':
model = dSNE(args)
else:
raise NotImplementedError
print('hey finish creating model!!!!')
if args.training:
model.train()
else:
model.test()
if __name__ == '__main__':
args = parse_args_sda()
main()
|
the-stack_106_27512 | """Program to automate and optimise a workforce schedule."""
import sys
import random
import time
from math import isclose
from string import ascii_lowercase
from enum import Enum, IntFlag, auto
from pulp import *
START_TIME = time.time()
DEFAULT_OPTIMISATION_ACCURACY = .15
ID_LOWER_BOUND = 10000000
ID_UPPER_BOUND = 99999999
NUMBER_OF_WORKDAYS = 7
MAXIMUM_CONSECUTIVE_WORKDAYS = 7
PERIODS_PER_HOUR = 2
SHIFT_START_INTERVAL = 1
DEFAULT_SHIFT_IN_PERIODS = 8 * PERIODS_PER_HOUR
MINIMUM_SHIFT_IN_PERIODS = 4 * PERIODS_PER_HOUR
MAXIMUM_SHIFT_IN_PERIODS = DEFAULT_SHIFT_IN_PERIODS
DEFAULT_WEEKLY_MAXIMUM_SHIFTS = 5
PREFERENCE_MULTIPLIER = 4
DEFAULT_WEIGHTS = {'preference': .25, 'day_pairs_off': .25,
'weekends_off': .25, 'excess_workforce': .25}
WEEKDAY_FRI = 4
WEEKDAY_SAT = 5
WEEKDAY_SUN = 6
RANDOM_CHANCES = {'absence': .05, 'preference': .06,
'open_and_close': .87, 'weekend': .1}
class Contract(Enum):
"""Represents contract types."""
FULLTIME = 1
PARTTIME = 2
class PropertyFlag(IntFlag):
"""Represents all special properties employees can have."""
NONE = 0
CAN_OPEN = auto()
CAN_CLOSE = auto()
IS_STUDENT = auto()
IS_IN_SCHOOL = auto()
HAS_KEYS = auto()
class Preference(IntFlag):
"""Represents employee preference or availability for a shift.
Undesirable flag also works as a dissatisfaction
factor in the objective function.
"""
NORMAL = 0
UNAVAILABLE = 1
UNDESIRABLE = 8
class Employee:
"""Employee class with required properties.
Attributes:
id:
Unique ID for every employee.
name:
A string representing the name of the employee.
type_of_contract:
An enumerator representing full-time or part-time contracts.
min_hours:
An integer defining the employee's minimum weekly hours.
max_hours:
An integer defining the employee's maximum weekly hours. Defaults to match minimum hours.
max_shifts:
An integer representing the maximum amount of shifts per week for the employee. Defaults to 5.
seniority:
A float representing employee's seniority. Defaults to 0.
special_properties:
PropertyFlag flags for special properties the employee satisfies. For example can open or close business.
current_workday_streak:
An integer representing the length of the streak of days the
employee has worked at the end of the previous schedule.
weekends_config:
A dictionary where 'single' is a list of all weekend indices that the employee must have off duty. The item
for the key 'groups' is a list of lists, where the first items of the innermost lists define the minimum
weekends off selected from the list. The rest of the items are the weekend indices that belong to the group.
preferences:
A dictionary for setting special preferences for shifts. Defaults to an empty dictionary.
"""
def __init__(self, new_id, name, type_of_contract, min_hours, max_hours=None, max_shifts=None, seniority=None,
special_properties=None, current_workday_streak=None, weekends_config=None, preferences=None):
"""Initialise employee."""
self.id = new_id
self.name = name
self.type_of_contract = type_of_contract
self.min_hours = min_hours
self.max_hours = min_hours if (max_hours is None) else max_hours
self.max_shifts = DEFAULT_WEEKLY_MAXIMUM_SHIFTS if (max_shifts is None) else max_shifts
self.seniority = 0 if (seniority is None) else seniority
self.special_properties = PropertyFlag.NONE if (special_properties is None) else special_properties
self.current_workday_streak = 0 if (current_workday_streak is None) else current_workday_streak
self.weekends_config = {} if (weekends_config is None) else weekends_config
self.preferences = {} if (preferences is None) else preferences
def __str__(self):
"""Return string representation of employee."""
return self.to_text()
def to_text(self):
"""Return a text representation of employee."""
min_h = int(self.min_hours / PERIODS_PER_HOUR)
max_h = int(self.max_hours / PERIODS_PER_HOUR)
hour_range = f'{min_h}-{max_h}'
padding = ''
if self.min_hours == self.max_hours and False:
hour_range = str(int(self.min_hours / PERIODS_PER_HOUR))
padding = ' '
preferences_text = {}
for day, day_preference in self.preferences.items():
preferences_text[day] = {}
for shift, flag in day_preference.items():
preferences_text[day][shift] = int(flag)
return str(f'ID: {self.id}, Name: {self.name}, ' +
f'Contract: {self.type_of_contract.name}, ' +
f'Hours: {hour_range},{padding} ' +
f'Max shifts: {self.max_shifts}, ' +
f'Seniority: {self.seniority}, ' +
f'Properties: {self.special_properties}, ' +
f'Streak: {self.current_workday_streak},\n ' +
f'Weekends: {self.weekends_config}, ' +
f'Preferences: {preferences_text}')
def set_employee_shifts(self, work_site_demands):
"""Find all employee's plausible shifts.
Assign the list of shifts as an attribute to the employee.
Args:
work_site_demands:
A list of tuples defining work site demands. Shifts will be generated in respect to opening hours.
"""
all_shifts = []
for day_index in range(len(work_site_demands)):
todays_shifts = []
days_preferences = None
try:
# Try to set preferences set for today.
days_preferences = self.preferences[day_index]
except KeyError:
pass
minimum_shift_length = MINIMUM_SHIFT_IN_PERIODS
if self.special_properties & PropertyFlag.IS_IN_SCHOOL:
minimum_shift_length = 2 * PERIODS_PER_HOUR
for shift_length in range(minimum_shift_length, MAXIMUM_SHIFT_IN_PERIODS + 1):
workday_periods = len(work_site_demands[day_index])
todays_shifts += self.get_possible_shifts_for_day(workday_periods, shift_length, days_preferences)
all_shifts.append(todays_shifts)
self.shifts = all_shifts
def get_possible_shifts_for_day(self, number_of_periods, shift_length=None, days_preferences=None):
"""Get all consecutive defined-length sets of periods from a given set of periods.
Args:
number_of_periods:
An integer representing the number of total periods on given day.
shift_length:
An integer defining the length of shift in periods.
days_preferences:
Employee's preferences for today.
Returns:
A list of possible shifts, unavailabilities factored in.
"""
if shift_length is None:
shift_length = DEFAULT_SHIFT_IN_PERIODS
if days_preferences is None:
days_preferences = {}
possible_shifts = []
for i in range(0, number_of_periods - shift_length + 1, SHIFT_START_INTERVAL):
eligible_shift = True
try:
for shift_index in days_preferences:
if (
(shift_index >= i) and
(shift_index < i + shift_length) and
(days_preferences[shift_index] == Preference.UNAVAILABLE)
):
eligible_shift = False
except (AttributeError):
# Expected if preferences is not defined or it's an empty dictionary.
pass
if eligible_shift:
shift_as_periods = [x for x in range(i, i + shift_length)]
possible_shifts.append(shift_as_periods)
return possible_shifts
class Employees:
"""Maintains a list of employees.
Capable of creating random lists for testing purposes.
Attributes:
list:
A dictionary of current employees. Keys are employee IDs and items instances of Employee class.
Named in a potentially confusing way. Might need renaming.
"""
def __init__(self, employee_list=None):
"""Initialise the object with an existing dictionary of employees or empty dictionary."""
self.list = {} if (employee_list is None) else employee_list
def count(self):
"""Return the number of current employees."""
return len(self.list)
def add(self, employee):
"""Add employee to dictionary.
Args:
employee: Employee instance to add to the dictionary.
Returns:
A boolean value. True if adding successful, False if not.
"""
if not isinstance(employee, Employee):
return False
self.list[employee.id] = employee
return True
def remove(self, employee):
"""Remove employee from dictionary.
Args:
employee:
Employee instance to remove from dictionary.
"""
self.list.pop(employee.id, None)
def generate_employee_id(self):
"""Create ID for employee randomly.
Return None if no unique ID is available after a pre-defined maximum amount of tries.
"""
maximum_iterations = 2500
for _ in range(maximum_iterations):
new_id = random.randint(ID_LOWER_BOUND, ID_UPPER_BOUND)
if not self.id_exists(new_id):
return new_id
return None
def id_exists(self, checked_id):
"""Check if ID exists within existing employees.
Args:
checked_id: ID whose value is checked for uniqueness.
"""
for _, employee in self.list.items():
if checked_id == employee.id:
return True
return False
def create_dummy_employees(self, count_of_employees, work_site_demands, fixed_hours=False, start_day=0):
"""Create a random list of employees for testing purposes.
Args:
count_of_employees:
Number of employees to be created. If None, employees are created as long
as needed to fulfill the total hours in work site demands.
work_site_demands:
List of tuples defining future work site schedules.
Employee preferences are created in respect to opening hours.
fixed_hours:
A boolean defining if random employees will have a fixed number of hours
in their contracts instead of a range. Defaults to False.
start_day:
Index of the weekday from where scheduling starts. Affects the number of full weekends.
Defaults to 0 i.e. Monday.
Returns:
Boolean value. True if employees' total hours are above the first week's demands.
"""
fulfill_hours = False
if not count_of_employees:
fulfill_hours = True
count_of_employees = sys.maxsize
weeks_in_schedule = len(work_site_demands) / 7
total_weekly_hours = sum([sum(x) for x in work_site_demands])
total_weekly_hours /= weeks_in_schedule
employee_hours_currently = 0
seniors_created = 0
print('total needed hours:', total_weekly_hours / PERIODS_PER_HOUR)
extras = 0
needed_extras = 0
for i in range(count_of_employees):
new_employee = self.create_random_employee(work_site_demands, fixed_hours, start_day)
if new_employee is None:
break
if new_employee.seniority != 0:
seniors_created += 1
self.list[new_employee.id] = new_employee
employee_hours_currently += ((new_employee.min_hours + new_employee.max_hours) / 2)
# Add ~7% extra employees for more probable feasibility.
if i % 15 == 0:
needed_extras += 1
if extras >= needed_extras:
break
if ((employee_hours_currently > total_weekly_hours) and fulfill_hours):
extras += 1
print('total (avg) hours :',
employee_hours_currently / PERIODS_PER_HOUR)
if not seniors_created:
_, random_employee = random.choice(list(self.list.items()))
random_employee.seniority = 1
return employee_hours_currently >= total_weekly_hours
def create_random_employee(self, work_site_demands,
fixed_hours=False, start_day=0):
"""Create random employee and return the instance.
Args:
work_site_demands:
List of tuples defining work site schedule. Preferences are created in respect to opening hours.
fixed_hours:
A boolean defining if the employee will have a fixed number of
hours in their contract instead of a range. Defaults to False.
start_day:
Index of the weekday from where scheduling starts. Affects the number of full weekends.
Defaults to 0 i.e. Monday.
"""
contract_type = random.choice((Contract.FULLTIME, Contract.PARTTIME))
if contract_type == Contract.FULLTIME:
min_hours = 38 * PERIODS_PER_HOUR
max_hours = (38 if fixed_hours else 40) * PERIODS_PER_HOUR
else:
min_hours = random.choice(range(15 * PERIODS_PER_HOUR, 30 * PERIODS_PER_HOUR, 2))
max_hours = random.choice(range(min_hours, 30 * PERIODS_PER_HOUR, 2))
if fixed_hours:
min_hours = max_hours
max_shifts = None
if max_hours is None:
pass
elif max_hours < 20 * PERIODS_PER_HOUR:
max_shifts = 4
elif max_hours < 15 * PERIODS_PER_HOUR:
max_shifts = 3
random_id = self.generate_employee_id()
if random_id is None:
return None
random_name = ''
for _ in range(8):
random_name += random.choice(ascii_lowercase)
random_seniority = 1 if (random.random() < .05) else 0
random_properties = PropertyFlag.NONE
if random.random() < RANDOM_CHANCES['open_and_close']:
random_properties += PropertyFlag.CAN_OPEN
random_properties += PropertyFlag.CAN_CLOSE
random_streak = random.choice([6] + 2 * [5] + 3 * [4] + 4 * [3] + 5 * [2] + 6 * [1] + 7 * [0])
random_weekends = {}
if random.random() < RANDOM_CHANCES['weekend']:
s = 1 if (start_day == WEEKDAY_SUN) else 0
weekend_range = range(int(len(work_site_demands) / 7) - s)
random_weekends['single'] = [random.choice(weekend_range)]
random_weekends['groups'] = []
if len(work_site_demands) / 7 > 3:
week_count = int(len(work_site_demands) / 7)
weekend_list = list(range(0, week_count))
slice_length = 5
groups = [weekend_list[i:i + slice_length] for i in range(0, len(weekend_list), slice_length)]
for split_group in groups:
if random.random() < RANDOM_CHANCES['weekend']:
weekends_off = random.choice((1, 2))
random_weekends['groups'].append([weekends_off] + split_group)
random_preferences = {}
for i in range(len(work_site_demands)):
rand = random.random()
if rand < RANDOM_CHANCES['absence']:
unavailable_period_index = random.choice(range(len(work_site_demands[i])))
random_preferences[i] = {unavailable_period_index: Preference.UNAVAILABLE}
elif (rand < RANDOM_CHANCES['absence'] + RANDOM_CHANCES['preference']):
undesirable_period_index = random.choice(range(len(work_site_demands[i])))
random_preferences[i] = {undesirable_period_index: Preference.UNDESIRABLE}
return Employee(random_id, random_name, contract_type, min_hours, max_hours, max_shifts, random_seniority,
random_properties, random_streak, random_weekends, random_preferences)
class Scheduler:
"""Scheduler class.
Maximises employee preferences while minimising costs.
Attributes:
employees:
Employees object from which the schedule will be generated.
work_site_schedule:
A list of tuples representing work site needs. Elements of the list are workdays and elements of the tuples
are the required amount of employees for every period of the day. The length of the tuples are the total
periods that the site is open for that day.
weights:
A length 3 tuple representing objective function weights.
Format: (preference, shift lengths, excess workforce)
start_day:
An integer between 0-6, representing the weekday from where scheduling is started.
Affects weekends allocation in the model.
shift_start_interval:
An integer defining the number of time periods between every new starting shift.
accuracy:
A float representing the desired solver accuracy. As soon as the best known solution is within
this fraction of the best possible solution, the solver will return.
time_limit:
Maximum allowed running time in seconds.
debug:
A boolean to define whether to print debug messages. Defaults to False
"""
def __init__(self, employees, work_site_demands, weights=None, start_day=None, shift_start_interval=None,
accuracy=None, time_limit=None, debug=False):
"""Initialise scheduler with list of employees."""
self.employees = employees
self.work_site_demands = work_site_demands
self.workday_count = len(work_site_demands)
self.workdays_period_demand = [len(all_periods_needs) for all_periods_needs in work_site_demands]
self.weights = weights
if (weights is None) or not isclose(sum(weights.values()), 1):
print('No objective weights provided or their sum is not 1. Using defaults.')
self.weights = DEFAULT_WEIGHTS
self.start_day = start_day if start_day else 0
self.shift_start_interval = SHIFT_START_INTERVAL if (shift_start_interval is None) else shift_start_interval
self.accuracy = accuracy if accuracy else DEFAULT_OPTIMISATION_ACCURACY
self.time_limit = time_limit
self.debug = debug
[print(x.to_text()) for _, x in self.employees.list.items()]
def run(self, time_limit=None):
"""Create LP problem from employees.
Solve the created problem and return a schedule.
Args:
time_limit:
Optional time limit in seconds. This overrides the time limit property for this run.
"""
print('workdays:', self.workday_count)
decision_variables = self.create_lp_problem()
if not time_limit:
time_limit = self.time_limit
self.problem.solve(PULP_CBC_CMD(gapRel=self.accuracy, timeLimit=time_limit))
print(f'Solved in {time.time() - START_TIME}s')
self.print_results(decision_variables, self.workday_count / 7, self.problem.status)
def create_lp_problem(self):
"""Create the LP problem for this scheduler."""
self.problem = LpProblem('schedule', LpMinimize)
for _, employee in self.employees.list.items():
employee.set_employee_shifts(self.work_site_demands)
print(f'All shifts created in {time.time() - START_TIME}s')
decision_variables = self.create_decision_variables()
time_passed = time.time() - START_TIME
print(f'Decision variables created in {time_passed}s')
self.create_objective(decision_variables)
print(f'Objective created in {time.time() - START_TIME}s')
self.create_constraints(decision_variables)
print(f'Constraints created in {time.time() - START_TIME}s')
print('decision variables:', len(self.problem.variables()))
print('constraints:', len(self.problem.constraints))
return decision_variables
def print_results(self, decision_variables, number_of_weeks=None, status=None, print_daily=False):
"""Print out the results given by the solved model.
Args:
decision_variables:
Problem decision variables
number_of_weeks:
Number of weeks scheduled. Defaults to None. If not provided, employee hours
in console show the total for the whole schedule.
status:
Problem status.
print_daily:
If daily excess hours should be printed or not. Defaults to False.
"""
x = decision_variables['shifts']
y = decision_variables['workforce']
d = decision_variables['days']
w = decision_variables['weekends']
if not number_of_weeks:
number_of_weeks = 1
for key, employee in x.items():
employee_hours = 0
for day in employee:
for shift in day:
if shift.value() != 0:
employee_id, day_index, shift_index = self.get_decision_var_ids(shift)
print(shift, '->', shift.value(), '->',
self.employees.list[employee_id].shifts[day_index][shift_index])
employee_hours += len(self.employees.list[employee_id].shifts[day_index][shift_index])
min_h = self.employees.list[key].min_hours / PERIODS_PER_HOUR
max_h = self.employees.list[key].max_hours / PERIODS_PER_HOUR
raw_h = employee_hours / PERIODS_PER_HOUR / number_of_weeks
print(f'Employee {key} hours:', round(raw_h, 2), f'{min_h}-{max_h}')
days_off_list = []
for day_off_var in d[key]:
if day_off_var.value() == 1:
days_off_list.append(self.get_decision_var_ids(day_off_var)[1])
print('Days off:', days_off_list)
total_excess_hours = 0
for day in y:
for lpvariable in day:
if print_daily:
print(lpvariable.name, '->', lpvariable.value())
total_excess_hours += lpvariable.value()
print('Weekends off:')
for key, employee in w.items():
print(key, [weekend[0].value() for weekend in employee])
print('obj value:', self.problem.objective.value())
print('excess hours:', total_excess_hours / PERIODS_PER_HOUR)
print('problem status (1=opt):', status)
def get_decision_var_ids(self, variable):
"""Return parsed variables's IDs as integers.
Args:
variable: A decision variable to process.
Returns:
A tuple of IDs. Length depends on the type of the decision variable being processed.
"""
return [int(x) for x in variable.name[1:].split(':')]
def create_decision_variables(self):
"""Create decision variables for the LP model.
Returns:
A dictionary of different kinds of decision variables:
shifts:
Dictionary of lists of lists. Defines if employee i is assigned to day j:s shift k.
workforce:
List of lists. Keeps track of excess employees on day i:s period j.
days_off:
Dictionary of lists. Defines if employee i:s day j is off duty.
day_pairs:
Dictionary of lists. Defines if employee i has days j and j+1 both off duty.
weekends:
Dictionary of lists of tuples. Defines if either Fri-Sat or Sat-Sun of employee i:s week j is off.
Innermost tuple consists of: (decision_variable, [Fri and/or Sat indices])
"""
# 1. Create decision variables in format:
# x{employee_id: [day_index][shift_index]}
# These determine if a shift is assigned to employee.
# 2. Create surplus variables representing days off for all employees.
# 3. Create binary variables for every subsequent two days off. The variables will "overlap".
x_eds = {}
days_off = {}
subsequent_days_off = {}
weekends_off = {}
recent_days_off = {}
for _, employee in self.employees.list.items():
x_eds[employee.id] = []
days_off[employee.id] = []
subsequent_days_off[employee.id] = []
recent_days_off[employee.id] = []
weekend_indices = []
for day_index in range(len(employee.shifts)):
# Add employee-shift -assignment variables.
x_eds[employee.id].append([])
day_shift_count = len(employee.shifts[day_index])
for shift_index in range(day_shift_count):
lp_var_name = str(f'x{employee.id}:{day_index}:' + f'{shift_index}')
x_eds[employee.id][day_index].append(LpVariable(lp_var_name, 0, 1, 'Integer'))
# Add days off variables.
days_off[employee.id].append(LpVariable(f'd{employee.id}:{day_index}', 0, 1, 'Integer'))
# Add binary variables to define if a consecutive pair of days is off-duty for the employee.
if (day_index + 1 < len(employee.shifts)):
subsequent_days_var = LpVariable(f'p{employee.id}:{day_index}-{day_index + 1}', 0, 1, 'Integer')
subsequent_days_off[employee.id].append(subsequent_days_var)
if (self.start_day + day_index) % 7 in (WEEKDAY_FRI, WEEKDAY_SAT):
weekend_indices.append(day_index)
# Combine same weekend's indices to pairs. If start day is Saturday, start splitting to
# pairs from index 1. The first item will then be a single item list because the first
# weekend only has Sat-Sun pair but not a Fri-Sat pair.
weekends_off[employee.id] = []
split_start_idx = 1 if (self.start_day == WEEKDAY_SAT) else 0
weekends_split = [weekend_indices[i:i + 2] for i in range(split_start_idx, len(weekend_indices), 2)]
for pair in weekends_split:
weekend_variable_idx = len(weekends_off[employee.id])
weekend_variable = LpVariable(f'w{employee.id}:{weekend_variable_idx}', 0, 1, 'Integer')
weekends_off[employee.id].append((weekend_variable, pair))
# Create more decision variables in format:
# y[day_index][period_index]
# These represent the excess employees working during every period of day.
y_dp = []
for i, day_length in enumerate(self.workdays_period_demand):
y_dp.append([])
for j in range(day_length):
lp_var_name = f'y{i}:{j}'
y_dp[i].append(LpVariable(lp_var_name, 0, cat='Integer'))
return {'shifts': x_eds, 'workforce': y_dp, 'days': days_off,
'pairs': subsequent_days_off, 'weekends': weekends_off}
def create_objective(self, decision_variables):
"""Create objective function for LP model.
Args:
decision_variables:
A dictionary of decision variables in correct format.
"""
objective = []
main_variables = decision_variables['shifts']
period_surplus_variables = decision_variables['workforce']
day_pairs = decision_variables['pairs']
weekends_off = decision_variables['weekends']
for _, employee in self.employees.list.items():
shift_count = len(employee.shifts)
for day_index in range(shift_count):
for shift_index, shift in enumerate(employee.shifts[day_index]):
preference_factor = 1
try:
if (employee.preferences[day_index][shift_index] & Preference.UNDESIRABLE):
# Violating a preference results in a hefty rise in the objective value.
# The multiplier needs to be big since preferences are relatively rare
# considering the total amount of terms in the objective function.
preference_factor = int(Preference.UNDESIRABLE)
except KeyError:
pass
# Add employee's dissatisfaction towards a certain shift into the objective.
final_preference_factor = self.weights['preference'] * preference_factor
objective += [final_preference_factor * main_variables[employee.id][day_index][shift_index]]
# Add one off-duty subsequent day pair to the objective each week.
if (day_index % 7 == 6):
# Default ending offset set to 1 due to range function behaviour.
# Set to 0 in case the current day is the last in the schedule.
offset = 0 if (day_index == shift_count - 1) else 1
indices = range(day_index - 6, day_index - offset)
random_index = random.choice(indices)
objective += [-self.weights['day_pairs_off'] * day_pairs[employee.id][random_index]]
# Add off-duty weekends to the objective.
weight_key = 'weekends_off'
objective += [(-self.weights[weight_key] * weekend_tuple[0]) for weekend_tuple in weekends_off[employee.id]]
# Add excess workers for each shift to the objective to minimise expenses.
objective += [(self.weights['excess_workforce'] * period_variable for period_variable in day) for day in (
period_surplus_variables)]
self.problem += lpSum(objective)
def create_constraints(self, decision_variables):
"""Create constraints to LP model.
Args:
decision_variables:
A dictionary of decision variables in correct format.
"""
main_variables = decision_variables['shifts']
period_surplus_variables = decision_variables['workforce']
day_off_surplus_variables = decision_variables['days']
day_pair_off_variables = decision_variables['pairs']
weekend_variables = decision_variables['weekends']
# Prepare first constraints of a kind to be added to debug messages.
db_msgs = []
first_constraint = 11 * [True]
# Add constraints for fulfilling all work site's time period needs. Iterate over all days in work site schedule.
for day_index in range(len(self.work_site_demands)):
# Create vectors to hold all opening and closing shifts from eligible employees.
all_open_capable_employees_shifts = []
all_close_capable_employees_shifts = []
# Iterate over every period in every day.
period_count = len(self.work_site_demands[day_index])
for period_index in range(period_count):
# Create a vector to hold all shifts that contain said period.
all_shifts_matching_period = []
# Iterate over each employee.
for _, employee in self.employees.list.items():
# Iterate over each open shift for the employee on the given day.
shift_count = len(employee.shifts[day_index])
for shift_index in range(shift_count):
# If current processed shift contains current period, add decision variable to vector.
if period_index in employee.shifts[day_index][shift_index]:
all_shifts_matching_period.append(main_variables[employee.id][day_index][shift_index])
# If current shift is also an opening shift and employee can open, add to vector.
# Do the equivalent for closing as well.
if (period_index == 0) and (employee.special_properties & PropertyFlag.CAN_OPEN):
all_open_capable_employees_shifts.append(main_variables[employee.id][day_index][shift_index])
last_period_index = period_count - 1
if (period_index == last_period_index) and (employee.special_properties & PropertyFlag.CAN_CLOSE):
all_close_capable_employees_shifts.append(main_variables[employee.id][day_index][shift_index])
# Ensure all periods of the day have enough shifts overlapping them.
constraint = (lpSum(all_shifts_matching_period) - (
period_surplus_variables[day_index][(period_index)])) == self.work_site_demands[(day_index)][period_index]
self.problem += constraint
if first_constraint[0]:
db_msgs.append(constraint)
first_constraint[0] = False
# For every first and last period per day, ensure that an employee who can open or close is at work.
constraint = lpSum(all_open_capable_employees_shifts) >= 1
self.problem += constraint
if first_constraint[1]:
db_msgs.append(constraint)
first_constraint[1] = False
constraint = lpSum(all_close_capable_employees_shifts) >= 1
self.problem += constraint
if first_constraint[2]:
db_msgs.append(constraint)
first_constraint[2] = False
# Add multiple constraints employee by employee.
# Iterate over employees.
for _, employee in self.employees.list.items():
employee_weekly_shifts = []
streaks_start_index = MAXIMUM_CONSECUTIVE_WORKDAYS - employee.current_workday_streak
# Iterate over every day for each employee.
for day_index in range(len(employee.shifts)):
# Any employee mustn't be assigned to more than one shift per day.
constraint = lpSum([x for x in main_variables[employee.id][day_index]]) + (
day_off_surplus_variables[employee.id][day_index]) == 1
self.problem += constraint
if first_constraint[3]:
db_msgs.append(constraint)
first_constraint[3] = False
# Weekly working hours have lower and upper bounds. Also any worker mustn't work more than the maximum
# number of shifts defined for them. Resolve weekly shift boundaries for every seven days passed.
# Weekly shifts are (length, decision variable) -pairs.
shift_count = len(employee.shifts[day_index])
for shift_index in range(shift_count):
employee_weekly_shifts.append((len(employee.shifts[day_index][shift_index]),
main_variables[employee.id][day_index][shift_index]))
if (day_index % 7 == 6):
# Limit the number of periods (hours) in weekly shifts.
if employee.min_hours == employee.max_hours:
constraint = lpSum([l * x for l, x in employee_weekly_shifts]) == employee.min_hours
self.problem += constraint
if first_constraint[4]:
db_msgs.append(constraint)
first_constraint[4] = False
else:
self.problem += lpSum([l * x for l, x in employee_weekly_shifts]) >= employee.min_hours
constraint = lpSum([l * x for l, x in employee_weekly_shifts]) <= employee.max_hours
self.problem += constraint
if first_constraint[4]:
db_msgs.append(constraint)
first_constraint[4] = False
# Limit the number of weekly shifts.
constraint = lpSum([x for _, x in employee_weekly_shifts]) <= employee.max_shifts
employee_weekly_shifts = []
self.problem += constraint
if first_constraint[5]:
db_msgs.append(constraint)
first_constraint[5] = False
# For every day, ensure that the previous n days have at least one day off. This prevents
# over n day-long consecutive streaks. Some first days in schedule get ignored.
if day_index >= streaks_start_index:
first_streak_day = day_index - MAXIMUM_CONSECUTIVE_WORKDAYS
if first_streak_day < 0:
first_streak_day = 0
# Use i+1 as the endpoint due to range function behaviour.
vars_list = [day_off_surplus_variables[employee.id][i] for i in range(first_streak_day, day_index + 1)]
constraint = lpSum(vars_list) >= 1
self.problem += constraint
if first_constraint[6]:
db_msgs.append(constraint)
first_constraint[6] = False
# For each two-day pair, assign a binary variable that takes the value of
# day1 * day2, i.e. works as an AND logical operator.
pair_count = len(day_pair_off_variables[employee.id])
for pair_idx in range(pair_count):
day1_off = day_off_surplus_variables[employee.id][pair_idx]
day2_off = day_off_surplus_variables[employee.id][pair_idx + 1]
pair_off_variable = day_pair_off_variables[employee.id][pair_idx]
self.problem += pair_off_variable <= day1_off
self.problem += pair_off_variable <= day2_off
constraint = pair_off_variable >= day1_off + day2_off - 1
self.problem += constraint
if first_constraint[7]:
db_msgs.append(constraint)
first_constraint[7] = False
# For each weekend per employee, assign a new binary variable that takes the value of day1*day2, i.e.
# create an AND logical operator. These will be later combined to ensure enough weekends off for everyone.
weekend_count = len(weekend_variables[employee.id])
for weekend_idx in range(weekend_count):
weekend_variable, day_indices = weekend_variables[employee.id][weekend_idx]
pair1_off = day_pair_off_variables[employee.id][day_indices[0]]
if len(day_indices) > 1:
pair2_off = day_pair_off_variables[employee.id][day_indices[1]]
self.problem += weekend_variable >= pair1_off
self.problem += weekend_variable >= pair2_off
constraint = weekend_variable <= pair1_off + pair2_off
self.problem += constraint
if first_constraint[8]:
db_msgs.append(constraint)
first_constraint[8] = False
else:
# Weekend only has one pair because it was cut in half.
constraint = weekend_variable == pair1_off
self.problem += constraints
if first_constraint[8]:
db_msgs.append(constraint)
first_constraint[8] = False
# Add constraints for ensuring the required weekends off.
try:
for obligatory_weekend_off_idx in employee.weekends_config['single']:
constraint = weekend_variables[employee.id][obligatory_weekend_off_idx][0] == 1
self.problem += constraint
if first_constraint[9]:
db_msgs.append(constraint)
first_constraint[9] = False
print(f'free weekend {obligatory_weekend_off_idx}', f'for {employee.id}')
except KeyError:
# Employee has no single weekend constraints.
pass
try:
key = 'groups'
for weekend_group_off in employee.weekends_config[key]:
minimum_weekends = weekend_group_off[0]
weekend_indices = weekend_group_off[1:]
constraint = lpSum([weekend_variables[employee.id][i][0] for i in weekend_indices]) >= minimum_weekends
self.problem += constraint
if first_constraint[10]:
db_msgs.append(constraint)
first_constraint[10] = False
except KeyError:
# Employee has no multi weekend constraints.
pass
if self.debug:
for msg in db_msgs:
line_len = 50
print(line_len * '-')
print(msg)
print(line_len * '-')
print()
if __name__ == '__main__':
# Testing code goes here.
# Example process to test the program:
# 1. Create a matrix (nested list or tuple) that holds workforce
# demands for each day.
# 2. Create employees.
# 3. Create scheduler with desired parameters.
# 4. Run scheduler.
pass
|
the-stack_106_27513 | import numpy as np
import nibabel as nib
import pandas as pd
import pickle
def calc_vertex_correlations(path_lh, path_rh):
# load fmri data for left and right hemi
lh_fmri=nib.load(path_lh)
rh_fmri=nib.load(path_rh)
#get image data and resize
lh_imagedata=lh_fmri.get_data()
lh_imagedata.resize(lh_imagedata.shape[1],lh_imagedata.shape[3])
rh_imagedata=rh_fmri.get_data()
rh_imagedata.resize(rh_imagedata.shape[1],rh_imagedata.shape[3])
# there are vetices without signal: check if thery are the only ones with zero variance
lh_zerovertex=~lh_imagedata.any(axis=1)
rh_zerovertex=~rh_imagedata.any(axis=1)
lh_cov=np.cov(lh_imagedata)
lh_var=np.diag(lh_cov)
lh_var=np.expand_dims(lh_var,axis=1)
lh_varzero=~lh_var.any(axis=1)
rh_cov=np.cov(rh_imagedata)
rh_var=np.diag(rh_cov)
rh_var=np.expand_dims(rh_var,axis=1)
rh_varzero=~rh_var.any(axis=1)
if np.all(lh_varzero==lh_zerovertex)==False or np.all(rh_varzero==rh_zerovertex)==False:
print("\n###########################################################")
print("All Vertices with zero variance of left hemi have no signal: ")
print(np.all(lh_varzero==lh_zerovertex))
print("All Vertices with zero variance of right hemi have no signal: ")
print(np.all(rh_varzero==rh_zerovertex))
print("############################################################\n")
#calculate correlations and remove vertices without signal as seeds
imagedata=np.concatenate((lh_imagedata,rh_imagedata),axis=0)
zerovertex=np.concatenate((lh_zerovertex,rh_zerovertex),axis=0)
deleteind=np.where(zerovertex)[0]
imagedata=np.delete(imagedata,deleteind,0)
cor=np.corrcoef(imagedata)
return {'fullcor':cor,'zerovertices': deleteind, 'splitposition': lh_zerovertex.shape[0]}
def calc_vertex_correlations_combined_runs (path_lh1, path_rh1,path_lh2, path_rh2):
# load fmri data for left and right hemi
lh_fmri1=nib.load(path_lh1)
rh_fmri1=nib.load(path_rh1)
lh_fmri2=nib.load(path_lh2)
rh_fmri2=nib.load(path_rh2)
#get image data and resize
lh_imagedata1=lh_fmri1.get_data()
lh_imagedata1.resize(lh_imagedata1.shape[1],lh_imagedata1.shape[3])
rh_imagedata1=rh_fmri1.get_data()
rh_imagedata1.resize(rh_imagedata1.shape[1],rh_imagedata1.shape[3])
lh_imagedata2=lh_fmri2.get_data()
lh_imagedata2.resize(lh_imagedata2.shape[1],lh_imagedata2.shape[3])
rh_imagedata2=rh_fmri2.get_data()
rh_imagedata2.resize(rh_imagedata2.shape[1],rh_imagedata2.shape[3])
# there are vetices without signal
lh_zerovertex=~lh_imagedata1.any(axis=1)
rh_zerovertex=~rh_imagedata1.any(axis=1)
#calculate correlations and remove vertices without signal as seeds
imagedata1=np.concatenate((lh_imagedata1,rh_imagedata1),axis=0)
imagedata2=np.concatenate((lh_imagedata2,rh_imagedata2),axis=0)
zerovertex=np.concatenate((lh_zerovertex,rh_zerovertex),axis=0)
deleteind=np.where(zerovertex)[0]
imagedata1=np.delete(imagedata1,deleteind,0)
imagedata2=np.delete(imagedata2,deleteind,0)
imagedata=np.concatenate((imagedata1,imagedata2),axis=1)
cor=np.corrcoef(imagedata)
return {'fullcor':cor,'zerovertices': deleteind, 'splitposition': lh_zerovertex.shape[0]}
def subjects_into_csv_gz_7nets(nrOFNetworks,truesubjects,WhichModelType,corr_mat_dir,targetdir,nameslhrois,namesrhrois,dataframe2,targetdir2=None,functional=None,spatial=None):
#For each ROI as seed create csv file containing connections to all other ROIs for all subjects
labels=np.arange(nrOFNetworks)
labels=labels.tolist(labels)
name=['Subject','Zygosity','Mother_ID']
nonvertexdat=np.zeros((len(truesubjects),3),dtype=object)
if WhichModelType=='AfterEmbedding':
spatialvar=[]
for j in range(len(labels)):
tablelh=[]
tablerh=[]
for i in range(len(truesubjects)):
infile=corr_mat_dir +str(i)+'.csv.gz'
corr=pd.read_csv(infile,compression='gzip')
tmp=corr[['Unnamed: 0',nameslhrois[j]]]
tmp=tmp.drop([j])
tmp=tmp.transpose()
tmptablelh=tmp.drop(['Unnamed: 0'])
tmp=corr[['Unnamed: 0',namesrhrois[j]]]
tmp=tmp.drop([j+7])
tmp=tmp.transpose()
tmptablerh=tmp.drop(['Unnamed: 0'])
tablelh.append(tmptablelh)
tablerh.append(tmptablerh)
if j==0:
index=dataframe2[dataframe2['Subject']==truesubjects[i]].index.tolist()
tmp1=np.array([str(truesubjects[i]),dataframe2['Zygosity'][index].values[0], str(dataframe2['Mother_ID'][index].values[0])])
nonvertexdat[i,:]=tmp1
if WhichModelType=='AfterEmbedding':
spatframe=pd.DataFrame(np.expand_dims(spatial[i],0)).groupby(functional[1],axis=1).mean()
spatframe.columns=nameslhrois+namesrhrois
spatialvar.append(spatframe)
tablelh=pd.concat(tablelh,axis=0,ignore_index=True)
tablerh=pd.concat(tablerh,axis=0,ignore_index=True)
roinames=nameslhrois+namesrhrois
del roinames[j]
tablelh.columns=roinames
roinames=nameslhrois+namesrhrois
del roinames[j+7]
tablerh.columns=roinames
nonvertextable=pd.DataFrame(data=nonvertexdat)
nonvertextable.columns=name
table=pd.concat([nonvertextable,tablelh],1)
table=table.sort_values(['Zygosity', 'Mother_ID'], axis=0, ascending=[True,True])
table.reset_index(inplace=True)
table=table.drop('index',axis=1)
writefile=targetdir+'lh_net'+str(j+1)+'.csv.gz'
table.to_csv(writefile, compression='gzip')
table=pd.concat([nonvertextable,tablerh],1)
table=table.sort_values(['Zygosity', 'Mother_ID'], axis=0,ascending=[True,True])
table.reset_index(inplace=True)
table=table.drop('index',axis=1)
writefile=targetdir+'rh_net'+str(j+1)+'.csv.gz'
table.to_csv(writefile, compression='gzip')
if WhichModelType=='AfterEmbedding':
spatialframe=pd.concat(spatialvar,axis=0,ignore_index=True)
table=pd.concat([nonvertextable,spatialframe],axis=1)
table=table.sort_values(['Zygosity', 'Mother_ID'], axis=0, ascending=[True,True])
table.reset_index(inplace=True)
table=table.drop('index',axis=1)
writefile=targetdir2+'mean_distances.csv.gz'
table.to_csv(writefile, compression='gzip')
def subjects_into_csv_gz_specifiedROIs(ROIs,NetNr,hemi,truesubjects,WhichModelType,corr_mat_dir,targetdir,nameslhrois,namesrhrois,dataframe2,source_dir=None,targetdir2=None):
#For each ROI as seed create csv file containing connections to all other ROIs for all subjects
name=['Subject','Zygosity','Mother_ID']
nonvertexdat=np.zeros((len(truesubjects),3),dtype=object)
for j in range(ROIs[0],ROIs[1]):
table=[]
if WhichModelType=='AfterEmbedding_notSelected':
spatialvar=[]
for i in range(len(truesubjects)):
infile=corr_mat_dir+hemi+'_'+str(j)+'/' +str(i)+'.csv.gz'
corr=pd.read_csv(infile,compression='gzip')
if hemi=='lh':
nameslhrois[NetNr-1]='lh_'+str(j)
tmp=corr[['Unnamed: 0',nameslhrois[NetNr-1]]]
tmp=tmp.drop([NetNr-1])
if hemi=='rh':
namesrhrois[NetNr-1]='rh_'+str(j)
tmp=corr[['Unnamed: 0',namesrhrois[NetNr-1]]]
tmp=tmp.drop([NetNr-1+7])
tmp=tmp.transpose()
tmptable=tmp.drop(['Unnamed: 0'])
table.append(tmptable)
if j==ROIs[0]:
index=dataframe2[dataframe2['Subject']==truesubjects[i]].index.tolist()
tmp1=np.array([str(truesubjects[i]),dataframe2['Zygosity'][index].values[0], str(dataframe2['Mother_ID'][index].values[0])])
nonvertexdat[i,:]=tmp1
if WhichModelType=='AfterEmbedding_notSelected':
if hemi=='lh':
functional=pickle.load(open(source_dir+'lh_'+str(j)+'_'+'correspondingvertices.p','rb'))
spatial= pickle.load(open(source_dir+'lh_'+str(j)+'_'+'FuncSpatDifference.p','rb'))
if hemi=='rh':
functional=pickle.load(open(source_dir+'rh_'+str(j)+'_'+'correspondingvertices.p','rb'))
spatial= pickle.load(open(source_dir+'rh_'+str(j)+'_'+'FuncSpatDifference.p','rb'))
spatframe=pd.DataFrame(np.expand_dims(spatial[i],0)).groupby(functional[1],axis=1).mean()
spatframe.columns=nameslhrois+namesrhrois
spatialvar.append(spatframe)
table=pd.concat(table,axis=0,ignore_index=True)
roinames=nameslhrois+namesrhrois
if hemi=='lh':
del roinames[NetNr-1]
if hemi=='rh':
del roinames[NetNr-1+7]
table.columns=roinames
nonvertextable=pd.DataFrame(data=nonvertexdat)
nonvertextable.columns=name
table=pd.concat([nonvertextable,table],1)
table=table.sort_values(['Zygosity', 'Mother_ID'], axis=0, ascending=[True,True])
table.reset_index(inplace=True)
table=table.drop('index',axis=1)
if hemi=='lh':
writefile=targetdir+'lh_roi'+str(j)+'.csv.gz'
if hemi=='rh':
writefile=targetdir+'rh_roi'+str(j)+'.csv.gz'
table.to_csv(writefile, compression='gzip')
if WhichModelType=='AfterEmbedding_notSelected':
spatialframe=pd.concat(spatialvar,axis=0,ignore_index=True)
table=pd.concat([nonvertextable,spatialframe],axis=1)
table=table.sort_values(['Zygosity', 'Mother_ID'], axis=0, ascending=[True,True])
table.reset_index(inplace=True)
table=table.drop('index',axis=1)
if hemi=='lh':
writefile=targetdir2+'lh_'+str(j)+'_mean_distances.csv.gz'
if hemi=='rh':
writefile=targetdir2+'rh_'+str(j)+'_mean_distances.csv.gz'
table.to_csv(writefile, compression='gzip')
def myinsert(array,position,value,axis):
#insert rows or columns (based on axis) in array at given position(based on postion)containing the value 'value
m,n=array.shape
positionlength=len(position)
position=np.concatenate((position,np.array([np.nan])),axis=0)
counta=0
countp=0
if axis==0:
newarray=np.zeros((m+positionlength,n),dtype=float)
for i in range((m+positionlength)):
if i==position[countp]:
newarray[i,:]=value
countp=countp+1
else:
newarray[i,:]=array[counta,:]
counta=counta+1
if axis==1:
newarray=np.zeros((m,n+positionlength),dtype=float)
for i in range((n+positionlength)):
if i==position[countp]:
newarray[:,i]=value
countp=countp+1
else:
newarray[:,i]=array[:,counta]
counta=counta+1
return newarray
def subject_into_csv_vertex(nr_vertices,hemi,truesubjects,WhichModelType,labels,networkborders,corr_mat_dir,targetdir,nameslhrois,namesrhrois,dataframe2,source_dir=None,targetdir2=None):
name=['Subject','Zygosity','Mother_ID']
nonvertexdat=np.zeros((len(truesubjects),3),dtype=object)
lhroisnames=np.array(nameslhrois)
rhroisnames=np.array(namesrhrois)
for j in range(nr_vertices):
if labels[j]!=0:
table=[]
if WhichModelType=='AfterEmbedding':
spatialvar=[]
for i in range(len(truesubjects)):
infile=corr_mat_dir+hemi+'_vertex_'+str(j+1)+'/' +str(i)+'.csv.gz'
corr=pd.read_csv(infile,compression='gzip')
tmp=corr[['Unnamed: 0',str(j+1)]]
tmp=tmp.drop([0])
tmp=tmp.transpose()
tmptable=tmp.drop(['Unnamed: 0'])
table.append(tmptable)
assign=networkborders>labels[j]
assign=np.expand_dims(assign,0)
assign=np.where(np.any(assign,axis=0))[0][0]
assign=assign-2
if hemi=='lh':
roinames=np.concatenate([np.delete(lhroisnames,assign),rhroisnames],0)
if hemi=='rh':
roinames=np.concatenate([lhroisnames,np.delete(rhroisnames,assign)],0)
roinames=roinames.tolist()
if j==0:
index=dataframe2[dataframe2['Subject']==truesubjects[i]].index.tolist()
tmp1=np.array([str(truesubjects[i]),dataframe2['Zygosity'][index].values[0], str(dataframe2['Mother_ID'][index].values[0])])
nonvertexdat[i,:]=tmp1
if WhichModelType=='AfterEmbedding':
if hemi=='lh':
functional=pickle.load(open(source_dir+'lh_'+str(j+1)+'correspondingvertices.p','rb'))
spatial= pickle.load(open(source_dir+'lh_'+str(j+1)+'FuncSpatDifference.p','rb'))
if hemi=='rh':
functional=pickle.load(open(source_dir+'rh_'+str(j+1)+'correspondingvertices.p','rb'))
spatial= pickle.load(open(source_dir+'rh_'+str(j+1)+'FuncSpatDifference.p','rb'))
spatframe=pd.DataFrame(np.expand_dims(spatial[i],0)).groupby(functional[1],axis=1).mean()
spatframe.columns=[str(j+1)]+roinames
spatialvar.append(spatframe)
table=pd.concat(table,axis=0,ignore_index=True)
table.columns=roinames
nonvertextable=pd.DataFrame(data=nonvertexdat)
nonvertextable.columns=name
table=pd.concat([nonvertextable,table],1)
table=table.sort_values(['Zygosity', 'Mother_ID'], axis=0, ascending=[True,True])
table.reset_index(inplace=True)
table=table.drop('index',axis=1)
if hemi=='lh':
writefile=targetdir+'lh_vertex'+str(j+1)+'.csv.gz'
if hemi=='rh':
writefile=targetdir+'rh_vertex'+str(j+1)+'.csv.gz'
table.to_csv(writefile, compression='gzip')
if WhichModelType=='AfterEmbedding':
spatialframe=pd.concat(spatialvar,axis=0,ignore_index=True)
table=pd.concat([nonvertextable,spatialframe],axis=1)
table=table.sort_values(['Zygosity', 'Mother_ID'], axis=0, ascending=[True,True])
table.reset_index(inplace=True)
table=table.drop('index',axis=1)
if hemi=='lh':
writefile=targetdir2+'lh_'+str(j+1)+'_mean_distances.csv.gz'
if hemi=='rh':
writefile=targetdir2+'rh_'+str(j+1)+'_mean_distances.csv.gz'
table.to_csv(writefile, compression='gzip')
def get_polygon_area_for_each_vertex(vertex_coords, polygons):
triangle_area=np.zeros((polygons.shape[0]))
for i in range(polygons.shape[0]):
vertex_ind=polygons[i,1:4]
v1=vertex_coords[vertex_ind[0],:]
v2=vertex_coords[vertex_ind[1],:]
v3=vertex_coords[vertex_ind[2],:]
v_12=np.linalg.norm(v2-v1)
v_13=np.linalg.norm(v3-v1)
alpha=np.arccos(np.dot((v2-v1),(v3-v1))/(v_12*v_13))
height=v_13*np.sin(alpha)
triangle_area[i]=height*v_12/2
polygon_area=np.zeros((vertex_coords.shape[0]))
for i in range(vertex_coords.shape[0]):
indices=np.any(polygons==i,axis=1)
polygon_area[i]=np.sum(triangle_area[indices])
return polygon_area
|
the-stack_106_27515 | import numpy as np
import csv
import sys
from sklearn import preprocessing
import matplotlib.pyplot as plt
import random
import time as sleepy
questionCount = 16
sampleCount = 125
names = ['u', 's0','s1','s2','s3','s4','s5','s6','s7','s8','s9','q0','q1','q2','q3','a0', 'qc']
user = "u"
enjoy = "s0"
skills = "s1"
prepare = "s2"
time = "s3"
conscious = "s4"
new = "s5"
unexpected = "s6"
learnt = "s7"
better = "s8"
motivated = "s9"
quiz0 = "q0"
quiz1 = "q1"
quiz2 = "q2"
quiz3 = "q3"
assignment = "a0"
combined = "qc"
tests = [
#('Everything', [enjoy, skills, prepare, time, conscious, new, unexpected, learnt, better, motivated], []),
('Flow', [enjoy, skills, prepare, time, conscious], []),
]
def main():
dataset = loadDataset('mvp2std.csv')
samples = splitDataset(dataset, tests[0], sampleCount)
plotResults(samples[0])
def plotResults(data):
print(data[0])
enjoyment = [row[0] for row in data]
skill = [row[1] for row in data]
challenge = [row[2] for row in data]
minE = min(enjoyment)
maxE = max(enjoyment)
minS = min(skill)
maxS = max(skill)
minC = min(challenge)
maxC = max(challenge)
print(minE, maxE)
for i in range(len(enjoyment)):
enjoyment[i] = (enjoyment[i]-minE)/(maxE-minE)
skill[i] = (skill[i]-minS)/(maxS-minS)
challenge[i] = (challenge[i]-minC)/(maxC-minC)
print()
print(enjoyment)
plt.scatter(skill, challenge, s=15, c=enjoyment, cmap="viridis", alpha=1)
plt.colorbar(alpha=1)
#plt.show()
plt.savefig("flowDiagram.svg")
def loadDataset(filename):
with open(filename, 'r') as csvfile:
reader = csv.DictReader(csvfile)
dataset = np.zeros((sampleCount, 1 + questionCount)) #initialise dataset array
i = 0
for row in reader:
datum = np.array(int(row['u'])) #create user like this: 0000100000
questions = np.fromiter(map(float, list(row.values())[1:questionCount+1]), dtype=np.float) #get question responses
datum = np.append(datum, questions) #add questions to sample array
dataset[i] = datum #add sample to dataset
print(row)
i = i + 1
return dataset
def splitDataset(dataset, test, splitPoint):
testName, x, y = test
# np.random.shuffle(dataset)
trainingData = dataset[:splitPoint]
validationData = dataset[splitPoint:]
trainingX = trainingData[..., nums(x)]
trainingY = trainingData[..., nums(y)]
validationX = validationData[..., nums(x)]
validationY = validationData[..., nums(y)]
return (trainingX, trainingY, validationX, validationY)
def nums(inputList):
ret = []
for name in inputList:
ret.append(names.index(name))
return ret
main() |
the-stack_106_27516 | from datetime import datetime
import xml.etree.ElementTree as ET
import unicodedata as ud
import enchant
import re
from stdnum import isbn
from stdnum import exceptions
# -----------------------------------------------------------------------------
def preprocessISBNString(inputISBN):
"""This function normalizes a given string to return numbers only.
>>> preprocessISBNString('978-90-8558-138-3 test')
'9789085581383'
>>> preprocessISBNString('9789085581383 test test')
'9789085581383'
>>> preprocessISBNString('9031411515')
'9031411515'
>>> preprocessISBNString('9791032305690')
'9791032305690'
>>> preprocessISBNString('978 90 448 3374')
'978904483374'
>>> preprocessISBNString('90 223 1348 4 (Manteau)')
'9022313484'
>>> preprocessISBNString('90 223 1348 4 (Manteau 123)')
'9022313484'
>>> preprocessISBNString('978-90-303-6744-4 (dl. 1)')
'9789030367444'
>>> preprocessISBNString('979-10-235-1393-613')
'9791023513936'
>>> preprocessISBNString('90-295-3453-2 (Deel 1)')
'9029534532'
>>> preprocessISBNString('I am not a ISBN number')
''
>>> preprocessISBNString('')
''
"""
inputISBNNorm = re.sub('\D', '', inputISBN)
if len(inputISBNNorm) == 0:
return ''
elif len(inputISBNNorm) == 10:
return inputISBNNorm
elif len(inputISBNNorm) == 13:
if inputISBNNorm.startswith('978') or inputISBNNorm.startswith('979'):
return inputISBNNorm
else:
# it is a wrong ISBN number which happens to have 13 digits
# Best shot: it probably is a 10 digit ISBN and there were other numbers as part of text
return inputISBNNorm[:10]
else:
if len(inputISBNNorm) > 13:
return inputISBNNorm[:13]
elif len(inputISBNNorm) < 13 and len(inputISBNNorm) > 10:
if inputISBNNorm.startswith('978') or inputISBNNorm.startswith('979'):
# it is actually a wrong ISBN 13 number, nevertheless return all of it
return inputISBNNorm
else:
# maybe number parts of the text got added by accident to a valid 10 digit ISBN
return inputISBNNorm[:10]
else:
return inputISBNNorm
# -----------------------------------------------------------------------------
def getNormalizedISBN10(inputISBN):
"""This function normalizes an ISBN number.
>>> getNormalizedISBN10('978-90-8558-138-3')
'90-8558-138-9'
>>> getNormalizedISBN10('978-90-8558-138-3 test')
'90-8558-138-9'
>>> getNormalizedISBN10('9789085581383')
'90-8558-138-9'
>>> getNormalizedISBN10('9031411515')
'90-314-1151-5'
>>> getNormalizedISBN10('9791032305690')
''
>>> getNormalizedISBN10('')
''
>>> getNormalizedISBN10('979-10-235-1393-613')
''
>>> getNormalizedISBN10('978-10-235-1393-613')
Traceback (most recent call last):
...
stdnum.exceptions.InvalidFormat: Not a valid ISBN13.
"""
inputISBNNorm = preprocessISBNString(inputISBN)
if inputISBNNorm:
isbn10 = None
try:
isbn10 = isbn.format(isbn.to_isbn10(inputISBNNorm))
return isbn10
except exceptions.InvalidComponent:
# Probably an ISBN number with 979 prefix for which no ISBN10 can be created
if inputISBNNorm.startswith('979'):
return ''
else:
raise
else:
return ''
# -----------------------------------------------------------------------------
def getNormalizedISBN13(inputISBN):
"""This function normalizes an ISBN number.
>>> getNormalizedISBN13('978-90-8558-138-3')
'978-90-8558-138-3'
>>> getNormalizedISBN13('978-90-8558-138-3 test')
'978-90-8558-138-3'
>>> getNormalizedISBN13('9789085581383')
'978-90-8558-138-3'
>>> getNormalizedISBN13('9031411515')
'978-90-314-1151-1'
>>> getNormalizedISBN13('')
''
"""
inputISBNNorm = preprocessISBNString(inputISBN)
if inputISBNNorm:
isbn13 = None
try:
isbn13 = isbn.format(isbn.to_isbn13(inputISBNNorm))
return isbn13
except exceptions.InvalidFormat:
print(f'Error in ISBN 13 conversion for "{inputISBN}"')
raise
else:
return ''
# -----------------------------------------------------------------------------
if __name__ == "__main__":
import doctest
doctest.testmod()
|
the-stack_106_27517 | import bitmath
from common.file_response import FileResponse
class DatasetResponse(FileResponse):
def get_headings(self):
return ["Dataset", "Users", "Methods", "Accesses", "Size", "Activity Days"]
def _write_xlsx(self, json_data, worksheet, date_format):
worksheet.set_column(0, 0, 74)
worksheet.set_column(1, 5, 11)
for number, heading in enumerate(self.get_headings()):
worksheet.write_string(0, number, heading)
for row, result in enumerate(json_data["results"], start = 1):
worksheet.write_string(row, 0, result)
worksheet.write_number(row, 1, json_data["results"][result]["users"])
worksheet.write_number(row, 2, json_data["results"][result]["methods"])
worksheet.write_number(row, 3, json_data["results"][result]["accesses"])
size = bitmath.parse_string(f'{str(json_data["results"][result]["size"])}B').best_prefix(bitmath.NIST).format("{value:.1f} {unit}")
worksheet.write_string(row, 4, size)
worksheet.write_number(row, 5, json_data["results"][result]["activitydays"])
|
the-stack_106_27519 | from django import forms, template
from django.conf import settings
from django.db import models
from modelcluster.fields import ParentalKey, ParentalManyToManyField
from modelcluster.models import ClusterableModel
from modelcluster.contrib.taggit import ClusterTaggableManager
from taggit.models import TaggedItemBase
from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, StreamFieldPanel, MultiFieldPanel, PageChooserPanel
from wagtail.core import blocks
from wagtail.core.blocks import StructBlock, StructValue, BooleanBlock, FieldBlock, ChoiceBlock, CharBlock
from wagtail.core.models import Page, Orderable
from wagtail.core.fields import RichTextField, StreamField
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.snippets.models import register_snippet
from wagtail.search import index
from .blocks import InlineImageBlock, ScheduleBlock, schedule_table_options, DataBlock, ReviewerChoiceBlock, InstructionBlock
from django_comments_xtd.models import XtdComment
register = template.Library()
# Create your models here.
class HomePage(Page):
subpage_types = ['MissionIndexPage']
max_count = 1
section_title = models.CharField(
null=True,
blank=True,
max_length=255,
help_text=("Title to display above this section"),
)
mission_review_intro = RichTextField(blank=True)
mission_section = models.ForeignKey(
Page,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text=("Featured missions for the homepage"),
verbose_name=("Point to Section")
)
content_panels = Page.content_panels + [
MultiFieldPanel([
FieldPanel('section_title'),
FieldPanel('mission_review_intro', classname='full'),
PageChooserPanel('mission_section'),
], heading=("Mission Index Section"), classname='collapsible'),
]
def get_context(self, request):
# Update context to include only published posts, ordered by reverse-chron
context = super().get_context(request)
missionpages = MissionPage.objects.all()
context['missionpages'] = missionpages
return context
# =====================================================================
# Mission Page Setup
# =====================================================================
class MissionPage(Page):
parent_page_types = ['MissionIndexPage']
subpage_types = ['MissionDataPage', 'MissionCommentsPage', 'MissionLiensPage']
image = models.ForeignKey(
'wagtailimages.Image', blank=True, null=True, on_delete=models.SET_NULL, related_name='+', verbose_name=("Image")
)
featured = models.BooleanField(default=False)
site_layout = RichTextField(features=['h5', 'h6', 'bold', 'italic', 'hr',
'ol', 'ul', 'link', 'document-link', 'image', 'embed'], blank=True)
scheduling = StreamField([('schedule', ScheduleBlock(
table_options=schedule_table_options, max_num=2, blank=True))], blank=True)
instructions = StreamField(
[('instructions', InstructionBlock())], blank=True)
categories = ParentalManyToManyField('missions.MissionCategory', blank=True)
reviewers = ParentalManyToManyField('missions.Reviewer', blank=True)
content_panels = Page.content_panels + [
MultiFieldPanel([
FieldPanel('categories', widget=forms.CheckboxSelectMultiple),
FieldPanel('reviewers', widget=forms.CheckboxSelectMultiple),
FieldPanel('featured'),
],
heading="Mission information",
classname="collapsible"
),
MultiFieldPanel([
FieldPanel('site_layout', classname='full'),
StreamFieldPanel('scheduling'),
],
heading="Review Information",
classname="collapsible"
),
MultiFieldPanel([
StreamFieldPanel('instructions'),
],
heading="Review Instructions",
classname="collapsible"
),
]
def get_context(self, request):
# Update context to include only published posts, ordered by reverse-chron
context = super().get_context(request)
missionpages = MissionPage.objects.all()
context['missionpages'] = missionpages
return context
class MissionIndexPage(Page):
intro = RichTextField(blank=True)
# Specifies that only these page objects can live under this index page
parent_page_types = ['HomePage']
subpage_types = ['MissionPage']
max_count = 1
def missionpages(self):
return MissionPage.objects.child_of(self).live().order_by('-first_published_at')
def featured_missionpages(self):
return self.missionpages().filter(featured=True)
content_panels = Page.content_panels + [
FieldPanel('intro', classname='full'),
]
# =====================================================================
# Mission Data Page Setup
# =====================================================================
class MissionDataPage(Page):
parent_page_types = ['MissionPage']
subpage_types = []
data = StreamField(
[('data', DataBlock())], blank=True)
content_panels = Page.content_panels + [
MultiFieldPanel([
StreamFieldPanel('data'),
],
heading="Review Data",
classname="collapsible"
),
]
def get_context(self, request):
# Update context to include only published posts, ordered by reverse-chron
context = super().get_context(request)
missionpages = MissionPage.objects.all()
context['missionpages'] = missionpages
return context
# =====================================================================
# Mission Comments Page Setup
# =====================================================================
class MissionCommentsPage(Page):
parent_page_types = ['MissionPage']
subpage_types = []
allow_comments = models.BooleanField('allow comments', default=True)
comment = RichTextField(features=['h5', 'h6', 'bold', 'italic', 'hr',
'ol', 'ul', 'link', 'document-link', 'image', 'embed'], blank=True)
content_panels = Page.content_panels + [
InlinePanel('customcomments', label=("Comments")),
]
def __str__(self):
return self.title
def get_context(self, request):
# Update context to include only published posts, ordered by reverse-chron
context = super().get_context(request)
missionpages = MissionPage.objects.all()
context['missionpages'] = missionpages
return context
def get_absolute_url(self):
return self.get_url()
# =====================================================================
# Mission Liens Page Setup
# =====================================================================
class MissionLiensPage(Page):
parent_page_types = ['MissionPage']
subpage_types = []
info = RichTextField(features=['h5', 'h6', 'bold', 'italic', 'hr',
'ol', 'ul', 'link', 'document-link', 'image', 'embed'], blank=True)
def get_context(self, request):
# Update context to include only published posts, ordered by reverse-chron
context = super().get_context(request)
missionpages = MissionPage.objects.all()
context['missionpages'] = missionpages
return context
# =====================================================================
# Mission Comments Page Setup
# =====================================================================
class CustomComment(XtdComment):
page = ParentalKey(MissionCommentsPage, on_delete=models.CASCADE, related_name='customcomments')
def save(self, *args, **kwargs):
if self.user:
self.user_name = self.user.display_name
self.page = MissionCommentsPage.objects.get(pk=self.object_pk)
super(CustomComment, self).save(*args, **kwargs)
# =====================================================================
# Snippet Models
# =====================================================================
# Snippet model for mission categories (i.e. cruise data, landed data, etc)
@register_snippet
class MissionCategory(models.Model):
name = models.CharField(max_length=255)
icon = models.ForeignKey(
'wagtailimages.Image', null=True, blank=True,
on_delete=models.SET_NULL, related_name='+'
)
panels = [
FieldPanel('name'),
ImageChooserPanel('icon'),
]
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'mission categories'
# Snippet model for the roles that will be associated with reviewers
@register_snippet
class Role(models.Model):
role = models.CharField(max_length=50)
panels = [
FieldPanel('role'),
]
def __str__(self):
return self.role
class Meta:
verbose_name_plural = 'roles'
# Snippet model for reviewer affiliations
@register_snippet
class Affiliate(models.Model):
affiliation = models.CharField(max_length=250)
panels = [
FieldPanel('affiliation'),
]
def __str__(self):
return self.affiliation
class Meta:
verbose_name_plural = 'affiliations'
# Snippet model for reviewers
@register_snippet
class Reviewer(ClusterableModel):
name = models.CharField(max_length=250)
affiliation = models.ForeignKey(
'missions.Affiliate', on_delete=models.CASCADE, blank=True, null=True)
role = models.ForeignKey(
'missions.Role', on_delete=models.CASCADE, blank=True, null=True)
email = models.CharField(max_length=250, blank=True, null=True)
uid = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True, null=True)
panels = [
MultiFieldPanel([
FieldPanel('name'),
FieldPanel('email'),
FieldPanel('uid'),
FieldPanel('affiliation', widget=forms.Select),
FieldPanel('role', widget=forms.Select),
], heading="Reviewer information"),
]
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name_plural = 'reviewers'
# Snippet model for lien types
@register_snippet
class LienType(models.Model):
lienType = models.CharField(max_length=100)
panels = [
FieldPanel('type'),
]
def __str__(self):
return self.lienType
class Meta:
verbose_name_plural = 'lien types'
# Snippet model for lien statuses
@register_snippet
class LienStatus(models.Model):
status = models.CharField(max_length=20)
panels = [
FieldPanel('status')
]
def __str__(self):
return self.status
class Meta:
verbose_name_plural = 'lien statuses'
# Snippet model for liens
@register_snippet
class Lien(models.Model):
lienType = models.ForeignKey(
LienType, on_delete=models.CASCADE, blank=True, null=True)
assigned = models.ForeignKey(
Reviewer, on_delete=models.CASCADE, blank=True, null=True)
status = models.ForeignKey(
LienStatus, on_delete=models.CASCADE, blank=True, null=True)
comment = models.TextField(blank=True)
reporter = models.ForeignKey(
Reviewer, related_name='%(class)s_related', on_delete=models.CASCADE, blank=True, null=True)
notes = models.TextField(blank=True)
panels = [
MultiFieldPanel([
FieldPanel('lienType'),
FieldPanel('assigned'),
FieldPanel('status'),
FieldPanel('comment'),
FieldPanel('reporter'),
FieldPanel('notes'),
], heading="Lien information"),
]
def __str__(self):
return 'Description:' + self.description |
the-stack_106_27521 | from pydantic import ValidationError
from net_models.models import (
KeyBase,
KeyChain,
VLANModel,
RouteTarget,
VRFAddressFamily,
VRFModel
)
from tests.BaseTestClass import TestBaseNetModel, TestVendorIndependentBase
class TestKeyBase(TestVendorIndependentBase):
TEST_CLASS = KeyBase
def test_valid_01(self):
test_cases = [
{
"test_name": "Test-xyz",
"data": {
"encryption_type": 0,
"value": "SuperSecret"
}
}
]
for test_case in test_cases:
with self.subTest(msg=test_case["test_name"]):
test_obj = self.TEST_CLASS(**test_case["data"])
class TestKeyChain(TestVendorIndependentBase):
TEST_CLASS = KeyChain
def test_valid_01(self):
test_cases = [
{
"test_name": "Test-xyz",
"data": {
"name": "KC-01",
"keys_list": [
{
"encryption_type": 0,
"value": "SuperSecret"
}
]
}
}
]
for test_case in test_cases:
with self.subTest(msg=test_case["test_name"]):
test_obj = self.TEST_CLASS(**test_case["data"])
class TestVLANModel(TestVendorIndependentBase):
TEST_CLASS = VLANModel
def test_valid_01(self):
test_payload = {
"vlan_id": "100",
"name": "Vlan-100"
}
test_obj = self.TEST_CLASS(**test_payload)
self.assertTrue(
all([hasattr(test_obj, x) for x in test_payload.keys()])
)
class TestRouteTarget(TestVendorIndependentBase):
TEST_CLASS = RouteTarget
def test_valid_01(self):
test_cases = [
{
"test_name": "Test-01",
"data": {
"rt": "1:1",
"action": "both"
}
},
{
"test_name": "Test-01",
"data": {
"rt": "1:1",
"action": "both",
"rt_type": "stitching"
}
}
]
for test_case in test_cases:
with self.subTest(msg=test_case["test_name"]):
test_obj = self.TEST_CLASS(**test_case["data"])
class TestVRFAddressFamily(TestVendorIndependentBase):
TEST_CLASS = VRFAddressFamily
class TestVRFModel(TestVendorIndependentBase):
TEST_CLASS = VRFModel
def test_valid_01(self):
test_cases = [
{
"test_name": "Test-01",
"data": {
"name": "MGMT-VRF"
}
},
{
"test_name": "Test-02",
"data": {
"name": "MGMT-VRF",
"rd": "1:1"
}
},
{
"test_name": "Test-03",
"data": {
"name": "MGMT-VRF",
"rd": "1:1",
"address_families": [
{
"afi": "ipv4",
"route_targets": [
{
"rt": "1:1",
"action": "both"
},
{
"rt": "1:1",
"action": "both",
"rt_type": "stitching"
}
]
}
]
}
}
]
for test_case in test_cases:
with self.subTest(msg=test_case["test_name"]):
test_obj = self.TEST_CLASS(**test_case["data"])
if __name__ == '__main__':
unittest.main()
|
the-stack_106_27522 | """Exercício Python 62:
Melhore o DESAFIO 61;
pergunte para o usuário se ele quer mostrar mais alguns termos.
O programa encerrará quando ele disser que quer mostrar 0 termos."""
# dados do usuário
primeiro = int(input('Primeiro termo: '))
razao = int(input('Razão da PA: '))
# variáveis
termo = primeiro
contador = 1
total = 0
mais = 10
# estruturas aninhadas - while dentro de while
while mais != 0:
total += mais
while contador <= total:
print('{} ➞ '.format(termo), end='')
termo += razao
contador += 1
print('PAUSA')
mais = int(input('Quantos termos você quer a mais: '))
print('Progressão finalizada com {} termos exibidos.'.format(contador - 1)) # Guanabara: total
|
the-stack_106_27524 | '''This file is to matching the label with prediction and calculate evaluation metric for the algorithm'''
import numpy as np
class result_analysis:
'''match_type: 'iou_match' or 'hit_match', iou_match can find match pairs with maximum iou value,
while hit_macth can find matched pairs with maximum matched number'''
def __init__(self, iou_threshold: float, match_type: str):
self.iou_threshold = iou_threshold
self.match_type = match_type
self.conf_mat = np.zeros((2,2))
self.all_label_num = 0
self.all_pred_num = 0
self.all_match_num = 0
self.all_match_iou = 0
self.all_pred_event = []
self.all_label_event = []
def __call__(self, label_set, pred_set):
if label_set.ndim == 1:
label_set = label_set[np.newaxis, :]
pred_set = pred_set[np.newaxis, :]
for label_seq, pred_seq in zip(label_set, pred_set):
# print(label_seq.shape)
# print(pred_seq.shape)
match_mat, label_num, predict_num = self.Matching_matrix(label_seq, pred_seq, self.iou_threshold)
#chech whether predict event and label event are matched
pred_event = match_mat[:, :, 1].sum(axis = 0)
label_event = match_mat[:, :, 1].sum(axis = 1)
#change value not 0 to 1, keep 0
pred_event = pred_event.astype(bool).astype(int)
label_event = label_event.astype(bool).astype(int)
self.all_pred_event.extend(pred_event)
self.all_label_event.extend(label_event)
if self.match_type == 'hit_match':
match_number, match_iou = self.hit_matching(match_mat)
elif self.match_type == 'iou_match':
match_number, match_iou = self.iou_matching(match_mat)
else:
print('match type must be hit_matching or iou_matching')
self.all_label_num += label_num
self.all_pred_num += predict_num
self.all_match_num += match_number
self.all_match_iou += match_iou
self.confusion_matrix(label_seq, pred_seq, self.conf_mat)
def result_process(self):
self.result_summary = {'confusion matrix': self.conf_mat, 'segment precision': self.conf_mat[1,1] / (self.conf_mat[1,0] + self.conf_mat[1,1]),
'sgement recall': self.conf_mat[1,1] / (self.conf_mat[0,1] + self.conf_mat[1,1]),
'sgement F-score': 2 * self.conf_mat[1,1]/(2 * self.conf_mat[1,1] + self.conf_mat[0,1] + self.conf_mat[1,0]),
'number of label': self.all_label_num, 'number of prediction': self.all_pred_num, 'number of matching event': self.all_match_num,
'total matching iou': self.all_match_iou, 'encounter number error' : abs(self.all_pred_num - self.all_label_num),
'encounter error rate': abs(self.all_pred_num - self.all_label_num)/ self.all_label_num,
'event precision': self.all_match_num/ self.all_pred_num, 'event recall': self.all_match_num/ self.all_label_num,
'event F-score': 2*self.all_match_num/ (self.all_label_num + self.all_pred_num),
'pred event': np.asarray(self.all_pred_event), 'label event': np.asarray(self.all_label_event)}
# 绘制混淆矩阵 draw confusion matrix ( 对整个数组生效 )
@staticmethod
def confusion_matrix(labels, preds, conf_matrix):
for p, t in zip(preds, labels):
conf_matrix[p, t] += 1
return conf_matrix
@staticmethod
def Matching_matrix(label_seq, pred_seq, iou_threshold):
'''This funciton is to build a matching matrix between prediction and label sequence based on IOU threshold, the matrix will
be in three dimension, where row indew coordinate stands for label and column index stands for prediction, the value in first layer of
z dimension stands for IOU between corresponding label and prediction, and the second layer of z dimension stands for
whether the corresponding label and prediction are seen as matching (TP) with certain IOU threshold.
This function will also return the number of label event and prediction event'''
#make sure that the prediction seq and label seq are numpy array
pred_seq = np.asarray(pred_seq)
label_seq = np.asarray(label_seq)
#seperate all labels in the label sequence and store in list
if label_seq[0] == 1:
count = 0
label_chunk = [[0]]
elif label_seq[0] == 0:
count = -1
label_chunk = []
for i in range(1 , label_seq.shape[0]):
if label_seq[i] == 1 and label_seq[i-1] != 1:
label_chunk.append([i])
count += 1
elif label_seq[i] ==1 and label_seq[i-1] == 1:
label_chunk[count].append(i)
#seperate all prediction in the pred sequence and store in list
if pred_seq[0] == 1:
count = 0
pred_chunk = [[0]]
elif pred_seq[0] == 0:
count = -1
pred_chunk = []
for i in range(1 , pred_seq.shape[0]):
if pred_seq[i] == 1 and pred_seq[i-1] != 1:
pred_chunk.append([i])
count += 1
elif pred_seq[i] ==1 and pred_seq[i-1] == 1:
pred_chunk[count].append(i)
#build the zero value matrix with right shape
matching_mat = np.zeros((len(label_chunk), len(pred_chunk), 2))
#calculating IOU of the seperate # test the funtion
# label_seq = np.array((1,0,0,0,1,1,0,0,1,1,1,0,1))
# pred_seq = np.array((1,1,0,0,0,1,1,0,0,1,1,0,0))
# Matching_matrix(label_seq, pred_seq, 0)label and prediction and send value to the first layer of matrix
for i in range(len(label_chunk)):
for j in range(len(pred_chunk)):
intersection = list(set(label_chunk[i]) & set(pred_chunk[j]))
union = list(set(label_chunk[i]) | set(pred_chunk[j]))
matching_mat[i,j,0] = len(intersection)/len(union)
#decide whether counted as a hitting based on IOU value and threshold and send value to the second layer of matrix
if matching_mat[i,j,0] > iou_threshold:
matching_mat[i,j,1] = 1
return matching_mat, len(label_chunk), len(pred_chunk)
@staticmethod
def hit_matching(matching_mat):
'''This funtion is to find the hit based matching number (TP) search with sencond layer of matching matrix'''
if matching_mat.size == 0:
best_match_number = 0
best_match_iou = 0
else:
for i in range(1, min(matching_mat.shape[0], matching_mat.shape[1])):
for j in range(i, matching_mat.shape[0]):
#find the sub area for the maximum matching number and add to the current column number
matching_mat[j,i,1] += np.max(matching_mat[:j, :i, 1])
#find the maximum accumulate iou value correspond to maximum matching number and add to current column iou number
index = np.where(matching_mat[:j, :i, 1]==np.max(matching_mat[:j, :i, 1]))
matching_mat[j,i,0] += max(matching_mat[index[0], index[1], 0])
for k in range(i+1, matching_mat.shape[1]):
#find the sub area for the maximum matching number and add to the current row number
matching_mat[i,k,1] += np.max(matching_mat[:i, :k, 1])
#find the maximum accumulate iou value correspond to maximum matching number and add to current row iou number
index = np.where(matching_mat[:i, :k, 1]==np.max(matching_mat[:i, :k, 1]))
matching_mat[i,k,0] += max(matching_mat[index[0], index[1], 0])
#find the best matching number and corresponding IOU value
best_match_number = np.max(matching_mat[:, :, 1])
best_index = np.where(matching_mat[:, :, 1]==np.max(matching_mat[:, :, 1]))
best_match_iou = max(matching_mat[best_index[0], best_index[1], 0])
return best_match_number, best_match_iou
@staticmethod
def iou_matching(matching_mat):
'''This funtion is to find the IOU based matching number (TP) search with first layer of matching matrix'''
if matching_mat.size == 0:
best_match_number = 0
best_match_iou = 0
else:
for i in range(1, min(matching_mat.shape[0], matching_mat.shape[1])):
for j in range(i, matching_mat.shape[0]):
#find the sub area for the maximum matching IOU and add to the current column IOU
matching_mat[j,i,0] += np.max(matching_mat[:j, :i, 0])
#find the maximum accumulate iou value correspond to maximum matching number and add to current column number
index = np.where(matching_mat[:j, :i, 0]==np.max(matching_mat[:j, :i, 0]))
matching_mat[j,i,1] += max(matching_mat[index[0], index[1], 1])
for k in range(i+1, matching_mat.shape[1]):
#find the sub area for the maximum matching IOU and add to the current row IOU
matching_mat[i,k,0] += np.max(matching_mat[:i, :k, 0])
#find the maximum accumulate iou value correspond to maximum matching number and add to current row number
index = np.where(matching_mat[:i, :k, 0]==np.max(matching_mat[:i, :k, 0]))
matching_mat[i,k,1] += max(matching_mat[index[0], index[1], 1])
#find the best matching IOU value and corresponding matching number
best_match_iou = np.max(matching_mat[:, :, 0])
best_index = np.where(matching_mat[:, :, 0]==np.max(matching_mat[:, :, 0]))
best_match_number= max(matching_mat[best_index[0], best_index[1], 1])
return best_match_number, best_match_iou
|
the-stack_106_27525 | import json
import boto3
def getGardenerInfo(event):
params = event['queryStringParameters']
usr_id = event['requestContext']['authorizer']['claims']['cognito:username']
try:
enviroment = params['env']
except:
enviroment = 'dev'
# Choosing enviroment. Production (online) or Development (localhost)
if enviroment == 'dev':
ddb = boto3.client("dynamodb",
endpoint_url="http://localhost:8000",
aws_access_key_id="vmnu1",
aws_secret_access_key="z7m86")
else:
ddb = boto3.client("dynamodb")
# Ask for the gardeners info. Maybe it's his/her first time here!
query = {
"TableName": "gardeners",
"Key": {
"user_id": {"S":usr_id}
}
}
ans = ddb.get_item(**query)
gardener_item = ans.get('Item', None)
if gardener_item:
out_res = 'OK'
# Go for the gardens info
query = {
"TableName": "gardens",
"KeyConditionExpression": "#ea440 = :ea440",
"ExpressionAttributeNames": {"#ea440":"gardener_id"},
"ExpressionAttributeValues": {":ea440": {"S":usr_id}}
}
ans = ddb.query(**query)
print('hola')
print(ans)
gardens_item = ans.get('Items', None)
else:
out_res = 'USRCRT'
gardens_item = None
return {
'out_res': out_res,
'gardener_info' : gardener_item,
'gardens_info' : gardens_item
}
def lambda_handler(event, context):
try:
gardenerinfo = getGardenerInfo(event)
return {
'statusCode': 200,
'headers' : {
'Access-Control-Allow-Headers' : 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,QUMIR-TOKEN'
,'Access-Control-Allow-Methods' : 'GET,OPTIONS'
,'Access-Control-Allow-Origin' : '*'
#,'Access-Control-Allow-Credentials' : 'true'
},
'body': json.dumps(gardenerinfo)
#'body' : json.dumps(ans)
}
except BaseException as error:
#print("Unknown error while putting item: " + error.response['Error']['Message'])
print(error)
return {
'statusCode': 400,
'headers' : {
'Access-Control-Allow-Headers' : 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,QUMIR-TOKEN'
,'Access-Control-Allow-Methods' : 'GET,OPTIONS'
,'Access-Control-Allow-Origin' : '*'
#,'Access-Control-Allow-Credentials' : 'true'
},
'body': None
} |
the-stack_106_27526 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016 The Cartographer Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
from tf.msg import tfMessage
def main():
rospy.init_node('tf_remove_frames')
publisher = rospy.Publisher('/tf_out', tfMessage, queue_size=1)
remove_frames = rospy.get_param('~remove_frames', [])
def callback(msg):
msg.transforms = [
t for t in msg.transforms
if t.header.frame_id.lstrip('/') not in remove_frames and
t.child_frame_id.lstrip('/') not in remove_frames
]
publisher.publish(msg)
rospy.Subscriber('/tf_in', tfMessage, callback)
rospy.spin()
if __name__ == '__main__':
main()
|
the-stack_106_27527 | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class Walker2dEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, xml_file='walker2d.xml'):
mujoco_env.MujocoEnv.__init__(self, xml_file, 4)
utils.EzPickle.__init__(self)
def step(self, a):
posbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
posafter, height, ang = self.sim.data.qpos[0:3]
alive_bonus = 1.0
reward = ((posafter - posbefore) / self.dt)
reward += alive_bonus
reward -= 1e-3 * np.square(a).sum()
done = not (height > 0.8 and height < 2.0 and
ang > -1.0 and ang < 1.0)
ob = self._get_obs()
return ob, reward, done, {}
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos[1:], np.clip(qvel, -10, 10)]).ravel()
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 2
self.viewer.cam.distance = self.model.stat.extent * 0.5
self.viewer.cam.lookat[2] = 1.15
self.viewer.cam.elevation = -20
|
the-stack_106_27535 | import datetime
import sys
from pytorch_pfn_extras.training import extension
from pytorch_pfn_extras.training.extensions import util
class ProgressBar(extension.Extension):
"""An extension to print a progress bar and recent training updater.
This extension prints a progress bar at every call. It watches the current
iteration and epoch to print the bar.
Args:
training_length (tuple or None): Length of whole training. It consists
of an integer and either ``'epoch'`` or ``'iteration'``. If this
value is omitted and the stop trigger of the manager is
:class:`IntervalTrigger`, this extension uses its attributes to
determine the length of the training.
update_interval (int): Number of iterations to skip printing the
progress bar.
bar_length (int): Length of the progress bar in characters.
out: Stream to print the bar. Standard output is used by default.
"""
def __init__(self, training_length=None, update_interval=100,
bar_length=50, out=sys.stdout):
self._training_length = training_length
self._update_interval = update_interval
self._bar_length = bar_length
self._out = out
self._pbar = _ManagerProgressBar(
self._training_length, self._bar_length, self._out)
def __call__(self, manager):
iteration = manager.updater.iteration
# print the progress bar
if iteration % self._update_interval == 0:
self._pbar.update(manager)
def finalize(self):
self._pbar.close()
class _ManagerProgressBar(util.ProgressBar):
def __init__(self, training_length, bar_length, out):
super().__init__(out)
self.training_length = training_length
self.bar_length = bar_length
self.updater_template = None
def get_lines(self, manager):
assert manager is not None
lines = []
iteration = manager.updater.iteration
epoch = manager.updater.epoch_detail
if self.training_length is None:
t = manager._stop_trigger
self.training_length = t.get_training_length()
length, unit = self.training_length
if unit == 'iteration':
rate = iteration / length
else:
rate = epoch / length
rate = min(rate, 1.0)
bar_length = self.bar_length
marks = '#' * int(rate * bar_length)
lines.append(' total [{}{}] {:6.2%}\n'.format(
marks, '.' * (bar_length - len(marks)), rate))
epoch_rate = epoch - int(epoch)
marks = '#' * int(epoch_rate * bar_length)
lines.append('this epoch [{}{}] {:6.2%}\n'.format(
marks, '.' * (bar_length - len(marks)), epoch_rate))
if self.updater_template is None:
self.updater_template = (
'{0.iteration:10} iter, {0.epoch} epoch / %s %ss\n' %
self.training_length)
updater = self.updater_template.format(manager.updater)
lines.append(updater)
speed_t, speed_e = self.update_speed(iteration, epoch)
if unit == 'iteration':
estimated_time = (length - iteration) / speed_t
else:
estimated_time = (length - epoch) / speed_e
estimated_time = max(estimated_time, 0.0)
lines.append('{:10.5g} iters/sec. Estimated time to finish: {}.\n'
.format(speed_t,
datetime.timedelta(seconds=estimated_time)))
return lines
|
the-stack_106_27538 | import json
import random
import numpy
import tensorflow as tf
from tensorflow.contrib import rnn
class DataSet(object):
def __init__(self, cases, labels):
self._num_examples = cases.shape[0]
self._cases = cases
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def cases(self):
return self._cases
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._cases = self._cases[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._cases[start:end], self._labels[start:end]
with open('enwiki.features_damaging.20k_2015.tsv', 'r') as f:
res = [i.split('\t') for i in f.read().split('\n')]
new_res = []
labels = []
test_features = []
test_labels = []
for case in res:
new_case = []
for i in case[:-1]:
if i == 'False':
new_case.append(0)
elif i == 'True':
new_case.append(1)
else:
new_case.append(float(i))
label = case[-1]
if label == 'False':
label = [0, 1]
elif label == 'True':
label = [1, 0]
else:
continue
if random.random() < 0.2:
test_features.append(new_case)
test_labels.append(label)
else:
labels.append(label)
new_res.append(new_case)
dataset = DataSet(numpy.array(new_res), numpy.array(labels))
test = DataSet(numpy.array(test_features), numpy.array(test_labels))
# Parameters
learning_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10
# Network Parameters
n_input = 26 # MNIST data input (img shape: 28*28)
n_steps = 3 # timesteps
n_hidden = 128 # hidden layer num of features
n_classes = 2 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])
# Define weights
weights = {
# Hidden layer weights => 2*n_hidden because of forward + backward cells
'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def BiRNN(x, weights, biases):
# Prepare data shape to match `bidirectional_rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
# Permuting batch_size and n_steps
x = tf.transpose(x, [1, 0, 2])
# Reshape to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, n_input])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.split(x, n_steps, 0)
# Define lstm cells with tensorflow
# Forward direction cell
lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Backward direction cell
lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
try:
outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
except Exception: # Old TensorFlow version only returns outputs not states
outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = BiRNN(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = dataset.next_batch(batch_size)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((batch_size, n_steps, n_input))
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
step += 1
print("Optimization Finished!")
# Calculate accuracy for 128 mnist test images
test_data = test.cases.reshape((-1, n_steps, n_input))
test_label = test.labels
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: test_data, y: test_label})) |
the-stack_106_27539 | # COVID simulation
# Novice compartmental model with time-delay ODE, including incubation, quarantine, hospitalization, super spreader, quarantine leak, immunity, etc.
# The parameters for the COVID-19 are generally referenced from other papers
# -----Most parameters regarding medical containments are solely based on estimation and fittings------
# Typically I assume under governmental control, the parameters of contanct rate 'beta_e' and quarantine rate 'k0' for the exposed flocks can significally change. One can apply the _logistic function for the parameter modification under certain policies.
# It is highly recommended that Markov change Monte Carlo (MCMC) is applied on different nodes for a more precise forecast
from __future__ import division
import numpy as np
from numpy import array
from matplotlib.pylab import *
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from pylab import cos, linspace, subplots
from ddeint import ddeint
def _logistic(t,start,duration):
"""_logistic function, e.g. Fermi-Dirac statistics
from 'start', costs 'duration' days"""
return 1-1/(np.exp((t-start-duration/2)/(duration/8))+1)
#parameters
time = np.arange(0,18,1)
data = np.array([13,15,19,23,35,45,51,85,115,163,206,273,319,456,590,798,1140,1372]) # actual data from 27Feb
n = 67782310 # susceptible individuals # UK Population
beta_0 = 0.8 # contact rate
gamma_e1 = 1/4 # daily regular heal rate
gamma_e2 = 1/10 # daily super-spreader heal rate
gamma_e3 = 1/3 # recover rate for hospitalization
be = 0.995 # regular infected proportion
Lambda = 482 # daily birth rate
mu = 0.000024 # daily population death rate
mu_d = 0.034 # death rate of non-hospitalized
mu_d1 = 0.01 # death rate of hospitalized
sigma = 1/6 # latent period converting rate
pro = 0.17 # latent period infectous ratio
m1 = 0.3 # decay const 1
m2 = 0.3 # decay const 2
effi = 0.01 # leak rate of isolation, 179/804 on 13Mar
theta = 0.02 # immune waning rate
xi = 0.0002 # immune recover rate
k0 = 0.05 # quarantine rate for exposed
k1 = 0.84 # quarantine rate for infectious
alpha = 0.14 # hospitalization rate for quarantined
phi = 0.14 # hospit rate for infectious, 4/15 on 4Mar, 2/44 on 5Mar, 5/46 on 6Mar, 115/804 on 13 Mar
eini = 250 # initial exposed number
delaye = 6 # incubation time
delayR = 10 # recover duration
delayi1 = 10 # heal duration
delayi2 = 1 # hospitalization delay
delayP = 1 # hospital. delay
delayP2 = 10 # heal duration
delayQ = 4 # quarantine to hospitalization time
delayH = 10 # heal duration
k_prime = 0.06
beta_prime = 0.08
# modification after policies
#basic reproduction number estimation
D1 = mu + k0 + sigma + xi
D2 = k1 + mu + phi + gamma_e1
D3 = mu + alpha + xi
D4 = mu + gamma_e3
R0 = (sigma*beta_0)/((mu + xi + sigma)*(mu + gamma_e1 + mu_d)) #Basic reproduction number w/o control
Rc = (beta_0*pro)/D1 + (beta_0*sigma)/(D1*D2) + (k0*beta_0*effi)/(D1*D3) + (sigma*k1*beta_0*effi)/(D1*D2*D3) + (sigma*phi*beta_0*effi)/(D1*D2*D4) + (k0*alpha*beta_0*effi)/(D1*D3*D4)
Rc_prime = (1-beta_prime/beta_0)*((beta_0*pro)/(D1+k_prime) + (beta_0*sigma)/(D1*D2) + ((k0+k_prime)*beta_0*effi)/((D1+k_prime)*D3) + (sigma*k1*beta_0*effi)/((D1+k_prime)*D2*D3) + (sigma*phi*beta_0*effi)/((D1+k_prime)*D2*D4) + (k0*alpha*beta_0*effi)/(D1*D3*D4))#Basic reproduction number w/ control, disregard super spreader
def model(Y,t,de,dr,di1,di2,dp,dp2,dq,dh):
"""ODE groups for balance equations. See compartmental model."""
S,E,Q,I,P,H,R = Y(t)
"""corresponding to susp., expos., quaran., infec., super spreader, hospit., recov."""
Rdr = Y(t-dr)[6]
Ede = Y(t-de)[1]
Idi1 = Y(t-di1)[3]
Idi2 = Y(t-di2)[3]
Pdp = Y(t-dp)[4]
Pdp2 = Y(t-dp2)[4]
Qdq = Y(t-dq)[2]
Hdh = Y(t-dh)[5]
"""t-delay ODE"""
k = k0+k_prime*_logistic(t,19,7)
beta_e = beta_0-beta_prime*_logistic(t,19,7)
dsdt = Lambda - mu*S - beta_e*np.exp(-((m1*I+m2*(Q+H))/n))*(I+P+effi*(H+Q)+pro*E)*S/n + theta*Rdr
dedt = 2500000*np.exp(-5000*t**2)*t + beta_e*np.exp(-((m1*I+m2*(Q+H))/n))*(I+P+effi*(H+Q)+pro*E)*S/n - (mu+xi)*E - (k+ sigma)*Ede
"""inital condition of 250 import cases"""
dqdt = k*Ede + k1*Idi2 - (xi + mu)*Q - alpha*Qdq
didt = be*sigma*Ede - mu*I - (gamma_e1 + mu_d)*Idi1 - (phi + k1)*Idi2
dpdt = (1 - be)*sigma*Ede - mu*P - (gamma_e2 + mu_d)*Pdp2 - (k1 + phi)*Pdp
dhdt = alpha*Qdq + phi*Idi2 + (phi + k1)*Pdp - mu*H - (gamma_e3 + mu_d1)*Hdh
drdt = gamma_e1*Idi1 + gamma_e2*Pdp2 + gamma_e3*Hdh + xi*(Q + E) - mu*R - theta*Rdr
"""balance equations"""
return array([dsdt,dedt,dqdt,didt,dpdt,dhdt,drdt])
g = lambda t : array([n,0,0,0,0,0,0]) # initial value
nmax = 2000
tt = np.linspace(0,40,nmax) #time
yy = ddeint(model,g,tt,fargs=(delaye,delayR,delayi1,delayi2,delayP,delayP2,delayQ,delayH,))
# solving the ODEs
yy[np.where(yy<0)] = 0
fig, ax = subplots(1, figsize=(4, 4))
heal, = ax.plot(tt, yy[:,2]+yy[:,5],c='peru', lw=2) #plot the quarantine and hospitalizations
syndrom_heal, = ax.plot(tt,yy[:,2]+yy[:,3]+yy[:,4]+yy[:,5],c='r',lw=2)
# plot the quarantine, hospitalizations and the rest of illed patients
all_, = ax.plot(tt, yy[:,1]+yy[:,2]+yy[:,3]+yy[:,4]+yy[:,5],c='m', lw=2)
# all unrecovered patients
scatter = plt.scatter(time, data, c='c')
# actual data
plt.text(0, yy[nmax-1,2]/0.8, r'$R_0=%.2f$''\n' r'$R_c \approx %.2f \rightarrow %.2f$'%(R0,Rc,Rc_prime))
plt.legend([all_, heal, syndrom_heal,scatter], ["All infected","Quarantine+Hospitalization", "All syndromatic","Actual data"])
xticks(np.arange(6,40,step = 15), ('Mar', '15', 'Apr', '15', 'May'))
plt.title("Forecast of future United Kingdom\nIf the measures on 13 Mar work a little")
plt.show()
|
the-stack_106_27540 | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for jobs.job_test_utils."""
from __future__ import absolute_import
from __future__ import unicode_literals
from unittest import mock
from core import python_utils
from core.jobs import job_test_utils
from core.platform import models
from core.tests import test_utils
import apache_beam as beam
from apache_beam.testing import util as beam_testing_util
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import base_models
(base_models,) = models.Registry.import_models([models.NAMES.base_model])
class PipelinedTestBaseTests(job_test_utils.PipelinedTestBase):
def test_assert_pcoll_empty_raises_immediately(self) -> None:
# NOTE: Arbitrary operations that produce a non-empty PCollection.
output = self.pipeline | beam.Create([123]) | beam.Map(lambda x: x)
with self.assertRaisesRegexp(AssertionError, 'failed'): # type: ignore[no-untyped-call]
self.assert_pcoll_empty(output)
def test_assert_pcoll_equal_raises_immediately(self) -> None:
# NOTE: Arbitrary operations that produce an empty PCollection.
output = self.pipeline | beam.Create([]) | beam.Map(lambda x: x)
with self.assertRaisesRegexp(AssertionError, 'failed'): # type: ignore[no-untyped-call]
self.assert_pcoll_equal(output, [123])
def test_assert_pcoll_empty_raises_runtime_error_when_called_twice(
self
) -> None:
# NOTE: Arbitrary operations that produce a non-empty PCollection.
output = self.pipeline | beam.Create([]) | beam.Map(lambda x: x)
self.assert_pcoll_empty(output)
self.assertRaisesRegexp( # type: ignore[no-untyped-call]
RuntimeError, 'must be run in the pipeline context',
lambda: self.assert_pcoll_empty(output))
def test_assert_pcoll_equal_raises_runtime_error_when_called_twice(
self
) -> None:
# NOTE: Arbitrary operations that produce a non-empty PCollection.
output = self.pipeline | beam.Create([123]) | beam.Map(lambda x: x)
self.assert_pcoll_equal(output, [123])
self.assertRaisesRegexp( # type: ignore[no-untyped-call]
RuntimeError, 'must be run in the pipeline context',
lambda: self.assert_pcoll_equal(output, [123]))
def test_create_model_sets_date_properties(self) -> None:
model = self.create_model(base_models.BaseModel)
self.assertEqual(model.created_on, self.YEAR_AGO)
self.assertEqual(model.last_updated, self.YEAR_AGO)
class JobTestBaseTests(job_test_utils.JobTestBase):
JOB_CLASS = mock.Mock()
def tearDown(self) -> None:
self.JOB_CLASS.reset_mock()
super(JobTestBaseTests, self).tearDown()
def test_run_job(self) -> None:
self.run_job()
self.job.run.assert_called() # type: ignore[attr-defined]
def test_job_output_is(self) -> None:
self.job.run.return_value = ( # type: ignore[attr-defined]
# NOTE: Arbitrary operations that produce a non-empty PCollection.
self.pipeline | beam.Create([123]) | beam.Map(lambda x: x))
self.assert_job_output_is([123])
def test_job_output_is_empty(self) -> None:
self.job.run.return_value = ( # type: ignore[attr-defined]
# NOTE: Arbitrary operations that produce an empty PCollection.
self.pipeline | beam.Create([]) | beam.Map(lambda x: x))
self.assert_job_output_is_empty()
class DecorateBeamErrorsTests(test_utils.TestBase):
def assert_error_is_decorated(
self, actual_msg: str, decorated_msg: str
) -> None:
"""Asserts that decorate_beam_errors() raises with the right message.
Args:
actual_msg: str. The actual message raised originally.
decorated_msg: str. The expected decorated message produced by the
context manager.
"""
try:
with job_test_utils.decorate_beam_errors():
raise beam_testing_util.BeamAssertException(actual_msg)
except AssertionError as e:
self.assertMultiLineEqual(python_utils.UNICODE(e), decorated_msg)
def test_decorates_message_with_both_unexpected_and_missing(self) -> None:
actual_msg = (
'Error, unexpected elements ["abc", "def"], '
'missing elements ["123", "456"] [while running FooJob]')
decorated_msg = (
'failed while running FooJob\n'
'\n'
'Unexpected:\n'
' \'abc\'\n'
' \'def\'\n'
'\n'
'Missing:\n'
' \'123\'\n'
' \'456\'\n'
)
self.assert_error_is_decorated(actual_msg, decorated_msg)
def test_decorates_message_with_only_unexpected(self) -> None:
actual_msg = (
'Error, unexpected elements ["abc", "def"] [while running FooJob]')
decorated_msg = (
'failed while running FooJob\n'
'\n'
'Unexpected:\n'
' \'abc\'\n'
' \'def\'\n'
)
self.assert_error_is_decorated(actual_msg, decorated_msg)
def test_decorates_message_with_only_missing(self) -> None:
actual_msg = (
'Error, missing elements ["abc", "def"] [while running FooJob]')
decorated_msg = (
'failed while running FooJob\n'
'\n'
'Missing:\n'
' \'abc\'\n'
' \'def\'\n'
)
self.assert_error_is_decorated(actual_msg, decorated_msg)
def test_decorates_message_with_comparison_to_empty_list(self) -> None:
actual_msg = (
'Error [] == ["abc", "def"] [while running FooJob]')
decorated_msg = (
'failed while running FooJob\n'
'\n'
'Unexpected:\n'
' \'abc\'\n'
' \'def\'\n'
)
self.assert_error_is_decorated(actual_msg, decorated_msg)
def test_does_not_decorate_message_without_element_info(self) -> None:
actual_msg = 'Error something went wrong [while running FooJob]'
self.assert_error_is_decorated(actual_msg, actual_msg)
def test_does_not_decorate_message_with_invalid_unexpected_value(
self
) -> None:
actual_msg = (
'Error, unexpected elements [abc, def] [while running FooJob]')
self.assert_error_is_decorated(actual_msg, actual_msg)
def test_does_not_decorate_message_with_invalid_missing_value(self) -> None:
actual_msg = 'Error, missing elements [abc, def] [while running FooJob]'
self.assert_error_is_decorated(actual_msg, actual_msg)
def test_does_not_decorate_message_with_non_beam_type(self) -> None:
with self.assertRaisesRegexp(Exception, 'Error coming through!'): # type: ignore[no-untyped-call]
with job_test_utils.decorate_beam_errors():
raise Exception('Error coming through!')
|
the-stack_106_27542 | #!/usr/bin/env python
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import os
import numpy as np
import pandas as pd
from pKa_macrostate_analysis import mae, rmse#, barplot_with_CI_errorbars
from pKa_macrostate_analysis import compute_bootstrap_statistics
import shutil
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import cm
import joypy
# =============================================================================
# PLOTTING FUNCTIONS
# =============================================================================
def barplot_with_CI_errorbars(df, x_label, y_label, y_lower_label, y_upper_label, figsize=False):
"""Creates bar plot of a given dataframe with asymmetric error bars for y axis.
Args:
df: Pandas Dataframe that should have columns with columnnames specified in other arguments.
x_label: str, column name of x axis categories
y_label: str, column name of y axis values
y_lower_label: str, column name of lower error values of y axis
y_upper_label: str, column name of upper error values of y axis
figsize: tuple, size in inches. Default value is False.
"""
# Column names for new columns for delta y_err which is calculated as | y_err - y |
delta_lower_yerr_label = "$\Delta$" + y_lower_label
delta_upper_yerr_label = "$\Delta$" + y_upper_label
data = df # Pandas DataFrame
data.loc[:,delta_lower_yerr_label] = data.loc[:,y_label] - data.loc[:,y_lower_label]
data.loc[:,delta_upper_yerr_label] = data.loc[:,y_upper_label] - data.loc[:,y_label]
# Color
current_palette = sns.color_palette()
sns_color = current_palette[2]
# Plot style
plt.close()
plt.style.use(["seaborn-talk", "seaborn-whitegrid"])
plt.rcParams['axes.labelsize'] = 20 # 18
plt.rcParams['xtick.labelsize'] = 16 #14
plt.rcParams['ytick.labelsize'] = 18 #16
plt.rcParams['legend.fontsize'] = 16
plt.rcParams['legend.handlelength'] = 2
plt.rcParams['figure.autolayout'] = True
#plt.tight_layout()
# If figsize is specified
if figsize != False:
plt.figure(figsize=figsize)
# Plot
x = range(len(data[y_label]))
y = data[y_label]
plt.bar(x, y)
plt.xticks(x, data[x_label], rotation=90)#, horizontalalignment='right')
plt.errorbar(x, y, yerr=(data[delta_lower_yerr_label], data[delta_upper_yerr_label]),
fmt="none", ecolor=sns_color, capsize=3, capthick=True)
plt.xlabel(x_label)
plt.ylabel(y_label)
def barplot_with_CI_errorbars_and_4groups(df1, df2, df3, x_label, y_label, y_lower_label, y_upper_label,group_labels):
"""Creates bar plot of a given dataframe with asymmetric error bars for y axis.
Args:
df: Pandas Dataframe that should have columns with columnnames specified in other arguments.
x_label: str, column name of x axis categories
y_label: str, column name of y axis values
y_lower_label: str, column name of lower error values of y axis
y_upper_label: str, column name of upper error values of y axis
group_labels: List of 4 method category labels
"""
# Column names for new columns for delta y_err which is calculated as | y_err - y |
delta_lower_yerr_label = "$\Delta$" + y_lower_label
delta_upper_yerr_label = "$\Delta$" + y_upper_label
# Color
#current_palette = sns.color_palette("muted")
#current_palette = sns.color_palette("GnBu_d")
#error_color = sns.color_palette("GnBu_d")[0]
# Plot style
plt.close()
plt.style.use(["seaborn-talk", "seaborn-whitegrid"])
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 16
plt.tight_layout()
#plt.figure(figsize=(8, 6))
bar_width = 0.2
# Color
#current_palette = sns.color_palette("deep")
# Zesty colorblind-friendly color palette
color0 = "#0F2080" #dark blue
color1 = "#F5793A" #orange
#color2 = "#A95AA1" #purple
color3 = "#85C0F9" #light blue
current_palette = [color0, color1, color3]#, color2, color3]
error_color = 'gray'
fig, ax = plt.subplots(figsize=(8, 6))
# Plot 1st group of data
data = df1 # Pandas DataFrame
data[delta_lower_yerr_label] = data[y_label] - data[y_lower_label]
data[delta_upper_yerr_label] = data[y_upper_label] - data[y_label]
x = range(len(data[y_label]))
y = data[y_label]
ax.bar(x, y, label = "QM", width=bar_width, color=current_palette[0])
plt.xticks(x, data[x_label], rotation=90)
plt.errorbar(x, y, yerr=(data[delta_lower_yerr_label], data[delta_upper_yerr_label]),
fmt="none", ecolor=error_color, capsize=2, capthick=True, elinewidth=1)
# Plot 2nd group of data
data = df2 # Pandas DataFrame
data[delta_lower_yerr_label] = data[y_label] - data[y_lower_label]
data[delta_upper_yerr_label] = data[y_upper_label] - data[y_label]
index = np.arange(df2.shape[0])
x = range(len(data[y_label]))
y = data[y_label]
#plt.bar(x, y)
ax.bar(index + bar_width, y, label = "Empirical", width=bar_width, color=current_palette[1])
plt.xticks(index + bar_width/2, data[x_label], rotation=90)
plt.errorbar(index + bar_width, y, yerr=(data[delta_lower_yerr_label], data[delta_upper_yerr_label]),
fmt="none", ecolor=error_color, capsize=2, capthick=True, elinewidth=1)
# Plot 3nd group of data
data = df3 # Pandas DataFrame
data[delta_lower_yerr_label] = data[y_label] - data[y_lower_label]
data[delta_upper_yerr_label] = data[y_upper_label] - data[y_label]
index = np.arange(df3.shape[0])
x = range(len(data[y_label]))
y = data[y_label]
# plt.bar(x, y)
ax.bar(index + 2*bar_width, y, label="Empirical", width=bar_width, color=current_palette[2])
plt.xticks(index + bar_width + bar_width / 2, data[x_label], rotation=90)
plt.errorbar(index + 2*bar_width, y, yerr=(data[delta_lower_yerr_label], data[delta_upper_yerr_label]),
fmt="none", ecolor=error_color, capsize=2, capthick=True, elinewidth=1)
'''# Plot 4nd group of data
data = df4 # Pandas DataFrame
data[delta_lower_yerr_label] = data[y_label] - data[y_lower_label]
data[delta_upper_yerr_label] = data[y_upper_label] - data[y_label]
index = np.arange(df4.shape[0])
x = range(len(data[y_label]))
y = data[y_label]
# plt.bar(x, y)
ax.bar(index + 3*bar_width, y, label="Empirical", width=bar_width, color=current_palette[3])
plt.xticks(index + 1*bar_width + bar_width / 2, data[x_label], rotation=90)
plt.errorbar(index + 3*bar_width, y, yerr=(data[delta_lower_yerr_label], data[delta_upper_yerr_label]),
fmt="none", ecolor=error_color, capsize=2, capthick=True, elinewidth=1)'''
plt.xlabel(x_label)
plt.ylabel(y_label)
# create legend
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color=current_palette[0], lw=5),
Line2D([0], [0], color=current_palette[1], lw=5),
Line2D([0], [0], color=current_palette[2], lw=5)]
#, Line2D([0], [0], color=current_palette[3], lw=5)]
ax.legend(custom_lines, group_labels)
def ridge_plot(df, by, column, figsize, colormap):
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 14
plt.tight_layout()
# Make ridge plot
fig, axes = joypy.joyplot(data=df, by=by, column=column, figsize=figsize, colormap=colormap, linewidth=1)
# Add x-axis label
axes[-1].set_xlabel(column)
def ridge_plot_wo_overlap(df, by, column, figsize, colormap):
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 14
plt.tight_layout()
# Make ridge plot
fig, axes = joypy.joyplot(data=df, by=by, column=column, figsize=figsize, colormap=colormap, linewidth=1, overlap=0)
# Add x-axis label
axes[-1].set_xlabel(column)
# =============================================================================
# CONSTANTS
# =============================================================================
# Paths to input data.
pKa_COLLECTION_PATH_RANKED_SUBMISSIONS = './analysis_outputs_ranked_submissions/pKa_submission_collection.csv'
pKa_COLLECTION_PATH_ALL_SUBMISSIONS = './analysis_outputs_all_submissions/pKa_submission_collection.csv'
# =============================================================================
# UTILITY FUNCTIONS
# =============================================================================
def read_collection_file(collection_file_path):
"""
Function to read SAMPL6 collection CSV file that was created by pKaubmissionCollection.
:param collection_file_path
:return: Pandas DataFrame
"""
# Check if submission collection file already exists.
if os.path.isfile(collection_file_path):
print("Analysis will be done using the existing collection file: {}".format(collection_file_path))
collection_df = pd.read_csv(collection_file_path, index_col=0)
print("\n SubmissionCollection: \n")
print(collection_df)
else:
raise Exception("Collection file doesn't exist: {}".format(collection_file_path))
return collection_df
def calc_MAE_for_molecules_across_all_predictions(collection_df, directory_path, file_base_name):
"""
Calculate mean absolute error for each molecule for all methods.
:param collection_df: Pandas DataFrame of submission collection.
:param directory_path: Directory for outputs
:param file_base_name: Filename for outputs
:return:
"""
# Create list of Molecule IDs
mol_IDs= list(set(collection_df["Molecule ID"].values)) # List of unique IDs
mol_IDs.sort()
print(mol_IDs)
# List for keeping records of stats values for each molecule
molecular_statistics = []
# Slice the dataframe for each molecule to calculate MAE
for mol_ID in mol_IDs:
collection_df_mol_slice = collection_df.loc[collection_df["Molecule ID"] == mol_ID]
# 2D array of matched calculated and experimental pKas
data = collection_df_mol_slice[["pKa (calc)", "pKa (exp)"]].values
# Calculate mean absolute error
#MAE_value = mae(data)
# Calculate MAE and RMSE and their 95% confidence intervals
bootstrap_statistics = compute_bootstrap_statistics(samples=data,
stats_funcs=[mae, rmse],
percentile=0.95,
n_bootstrap_samples=10000)
MAE = bootstrap_statistics[0][0]
MAE_lower_CI = bootstrap_statistics[0][1][0]
MAE_upper_CI = bootstrap_statistics[0][1][1]
print("{} MAE: {} [{}, {}]".format(mol_ID, MAE, MAE_lower_CI, MAE_upper_CI))
RMSE = bootstrap_statistics[1][0]
RMSE_lower_CI = bootstrap_statistics[1][1][0]
RMSE_upper_CI = bootstrap_statistics[1][1][1]
print("{} RMSE: {} [{}, {}]\n".format(mol_ID, RMSE, RMSE_lower_CI, RMSE_upper_CI))
# Record in CSV file
molecular_statistics.append({'Molecule ID': mol_ID, 'MAE': MAE, 'MAE_lower_CI': MAE_lower_CI,
'MAE_upper_CI': MAE_upper_CI, 'RMSE': RMSE, 'RMSE_lower_CI': RMSE_lower_CI,
'RMSE_upper_CI': RMSE_upper_CI})
# Convert dictionary to Dataframe to create tables/plots easily and save as CSV.
molecular_statistics_df = pd.DataFrame(molecular_statistics)
#molecular_statistics_df.set_index('Molecule ID', inplace=True)
# Sort values by MAE values
print(molecular_statistics_df)
molecular_statistics_df.sort_values(by='MAE', inplace=True)
# Create CSV
os.makedirs(directory_path)
file_base_path = os.path.join(directory_path, file_base_name)
with open(file_base_path + '.csv', 'w') as f:
molecular_statistics_df.to_csv(f)
# Plot MAE and RMSE of each molecule across predictions as a bar plot
barplot_with_CI_errorbars(df = molecular_statistics_df, x_label = 'Molecule ID',
y_label = 'MAE', y_lower_label = 'MAE_lower_CI', y_upper_label = 'MAE_upper_CI',
figsize=(7.5, 6))
plt.savefig(directory_path + "/MAE_vs_molecule_ID_plot.pdf")
barplot_with_CI_errorbars(df=molecular_statistics_df, x_label = 'Molecule ID',
y_label = 'RMSE', y_lower_label = 'RMSE_lower_CI', y_upper_label = 'RMSE_upper_CI',
figsize=(7.5, 6))
plt.savefig(directory_path + "/RMSE_vs_molecule_ID_plot.pdf")
def select_subsection_of_collection(collection_df, method_group):
"""
Returns a dataframe which is the subset of rows of collecion dataframe that match the requested method category
:param collection_df: Pandas DataFrame of submission collection.
:param method_group: String that specifies with method group is requested. "Physical","Empirical","Mixed" or "Other"
:return: Pandas DataFrame of subsection of submission collection.
"""
print("Looking for submissions of selected method group...")
print("Method group: {}".format(method_group))
#print("Collection_df:\n",collection_df)
# Filter collection dataframe based on method category
#collection_df_of_selected_method_group = collection_df.loc[collection_df["reassigned category"] == method_group]
collection_df_of_selected_method_group = collection_df.loc[collection_df["category"] == method_group]
collection_df_of_selected_method_group = collection_df_of_selected_method_group.reset_index(drop=True)
print("collection_df_of_selected_method_group: \n {}".format(collection_df_of_selected_method_group))
return collection_df_of_selected_method_group
def calc_MAE_for_molecules_across_selected_predictions(collection_df, selected_method_group, directory_path, file_base_name):
"""
Calculates mean absolute error for each molecule across prediction method category
:param collection_df: Pandas DataFrame of submission collection.
:param selected_method_group: "Physical", "Empirical", "Mixed", or "Other"
:param directory_path: Directory path for outputs
:param file_base_name: Output file name
:return:
"""
# Create subsection of collection dataframe for selected methods
print("selected_method_group...", selected_method_group)
print("collection_df...", collection_df)
collection_df_subset = select_subsection_of_collection(collection_df=collection_df, method_group=selected_method_group)
# category_path_label_dict ={ "Physical (MM)": "Physical_MM",
# "Empirical": "Empirical",
# "Mixed": "Mixed",
# "Physical (QM)": "Physical_QM"}
subset_directory_path = os.path.join(directory_path, category_path_label_dict[selected_method_group])
# Calculate MAE using subsection of collection database
calc_MAE_for_molecules_across_all_predictions(collection_df=collection_df_subset, directory_path=subset_directory_path, file_base_name=file_base_name)
#def create_comparison_plot_of_molecular_MAE_of_method_categories(directory_path, group1, group2, group3, group4, file_base_name):
def create_comparison_plot_of_molecular_MAE_of_method_categories(directory_path, group1, group2, group3, file_base_name):
label1 = category_path_label_dict[group1]
label2 = category_path_label_dict[group2]
label3 = category_path_label_dict[group3]
#label4 = category_path_label_dict[group4]
# Read molecular_error_statistics table
df_gr1 = pd.read_csv(directory_path + "/" + label1 + "/molecular_error_statistics_for_{}_methods.csv".format(label1))
df_gr2 = pd.read_csv(directory_path + "/" + label2 + "/molecular_error_statistics_for_{}_methods.csv".format(label2))
df_gr3 = pd.read_csv(directory_path + "/" + label3 + "/molecular_error_statistics_for_{}_methods.csv".format(label3))
#df_gr4 = pd.read_csv(directory_path + "/" + label4 + "/molecular_error_statistics_for_{}_methods.csv".format(label4))
# Reorder dataframes based on the order of molecular MAE statistic of first group (Physical methods)
ordered_molecule_list = list(df_gr1["Molecule ID"])
print("ordered_molecule_list: \n", ordered_molecule_list)
df_gr2_reordered = df_gr2.set_index("Molecule ID")
df_gr2_reordered = df_gr2_reordered.reindex(index=df_gr1['Molecule ID']) #Reset row order based on index of df_gr1
df_gr2_reordered = df_gr2_reordered.reset_index()
df_gr3_reordered = df_gr3.set_index("Molecule ID")
df_gr3_reordered = df_gr3_reordered.reindex(index=df_gr1['Molecule ID']) # Reset row order based on index of df_gr1
df_gr3_reordered = df_gr3_reordered.reset_index()
print("df_gr3_reordered",df_gr3_reordered)
'''df_gr4_reordered = df_gr4.set_index("Molecule ID")
df_gr4_reordered = df_gr4_reordered.reindex(index=df_gr1['Molecule ID']) # Reset row order based on index of df_gr1
df_gr4_reordered = df_gr4_reordered.reset_index()
print("df_gr4_reordered",df_gr4_reordered)'''
# Plot
# Molecular labels will be taken from 1st dataframe, so the second dataframe should have the same molecule ID order.
barplot_with_CI_errorbars_and_4groups(df1=df_gr1, df2=df_gr2_reordered, df3=df_gr3_reordered, #df4=df_gr4_reordered,
x_label="Molecule ID", y_label="MAE",
y_lower_label="MAE_lower_CI", y_upper_label="MAE_upper_CI",
group_labels=[group1, group2, group3])#, group4])
plt.savefig(molecular_statistics_directory_path + "/" + file_base_name + ".pdf")
print("completed barplot_with_CI_errorbars_and_4groups")
# Same comparison plot with only QM results (only for presentation effects)
#barplot_with_CI_errorbars_and_1st_of_2groups(df1=df_qm, df2=df_empirical_reordered, x_label="Molecule ID", y_label="MAE",
# y_lower_label="MAE_lower_CI", y_upper_label="MAE_upper_CI")
#plt.savefig(molecular_statistics_directory_path + "/" + file_base_name + "_only_QM.pdf")
def create_molecular_error_distribution_plots(collection_df, directory_path, file_base_name):#, subset_of_method_ids):
# Ridge plot using all predictions
ridge_plot(df=collection_df, by = "Molecule ID", column = "$\Delta$pKa error (calc - exp)", figsize=(4, 6), colormap=cm.plasma)
plt.savefig(directory_path + "/" + file_base_name +"_all_methods.pdf")
# Ridge plot using only consistently well-performing methods
'''collection_subset_df = collection_df[collection_df["method_name"].isin(subset_of_method_ids)].reset_index(drop=True)
ridge_plot(df=collection_subset_df, by = "Molecule ID", column = "$\Delta$pKa error (calc - exp)", figsize=(4, 6),
colormap=cm.plasma)
plt.savefig(directory_path + "/" + file_base_name +"_well_performing_methods.pdf")'''
def create_category_error_distribution_plots(collection_df, directory_path, file_base_name):
# Ridge plot using all predictions
'''ridge_plot_wo_overlap(df=collection_df, by = "reassigned category", column = "$\Delta$pKa error (calc - exp)", figsize=(4, 4),
colormap=cm.plasma)'''
ridge_plot_wo_overlap(df=collection_df, by = "category", column = "$\Delta$pKa error (calc - exp)", figsize=(4, 4),
colormap=cm.plasma)
plt.savefig(directory_path + "/" + file_base_name +".pdf")
def calculate_summary_statistics_of_top_methods_of_each_category(statistics_df, categories, top, directory_path, file_base_name):
df_stat = pd.read_csv(statistics_df)
data = []
for category in categories:
#print(category)
#is_cat = (df_stat["category"] == "Physical")
#print(is_cat)
#df_cat = df_stat[df_stat["reassigned_category"] == category].reset_index(drop=False)
df_cat = df_stat[df_stat["category"] == category].reset_index(drop=False)
# Already ordered by RMSE
df_cat_top = df_cat.head(top).reset_index(drop=False)
RMSE_mean = df_cat_top["RMSE"].mean()
RMSE_std = df_cat_top["RMSE"].values.std(ddof=1)
# Reorder by increasing MEA
df_cat = df_cat.sort_values(by="MAE", inplace=False, ascending=True)
df_cat_top = df_cat.head(top).reset_index(drop=False)
MAE_mean = df_cat_top["MAE"].mean()
MAE_std = df_cat_top["MAE"].values.std(ddof=1)
# Reorder by decreasing Kendall's Tau
df_cat = df_cat.sort_values(by="kendall_tau", inplace=False, ascending=False)
df_cat_top = df_cat.head(top).reset_index(drop=False)
tau_mean = df_cat_top["kendall_tau"].mean()
tau_std = df_cat_top["kendall_tau"].values.std(ddof=1)
# Reorder by decreasing R-Squared
df_cat = df_cat.sort_values(by="R2", inplace=False, ascending=False)
df_cat_top = df_cat.head(top).reset_index(drop=False)
r2_mean = df_cat_top["R2"].mean()
r2_std = df_cat_top["R2"].values.std(ddof=1)
# Number of predictions, in case less than 10
num_predictions =df_cat_top.shape[0]
data.append({
'category': category,
'RMSE_mean': RMSE_mean,
'RMSE_std': RMSE_std,
'MAE_mean': MAE_mean,
'MAE_std': MAE_std,
'kendall_tau_mean': tau_mean,
'kendall_tau_std': tau_std,
'R2_mean': r2_mean,
'R2_std': r2_std,
'N': num_predictions
})
# Transform into Pandas DataFrame.
df_stat_summary = pd.DataFrame(data=data)
file_name = os.path.join(directory_path, file_base_name)
df_stat_summary.to_csv(file_name, index=False)
# =============================================================================
# MAIN
# =============================================================================
if __name__ == '__main__':
'''collection_data = read_collection_file(collection_file_path = "1to0/collection.csv")
# Create new directory to store molecular statistics
output_directory_path = '1to0/.'
analysis_directory_name = 'MolecularStatisticsTables'
if os.path.isdir('{}/{}'.format(output_directory_path, analysis_directory_name)):
shutil.rmtree('{}/{}'.format(output_directory_path, analysis_directory_name))
# Calculate MAE of each molecule across all predictions methods
molecular_statistics_directory_path = os.path.join(output_directory_path, "MolecularStatisticsTables")
calc_MAE_for_molecules_across_all_predictions(collection_df = collection_data,
directory_path = molecular_statistics_directory_path,
file_base_name = "molecular_error_statistics")'''
# ==========================================================================================
# Analysis of standard blind submissions (ranked and nonranked), including reference calculations
# ==========================================================================================
# Read collection file
collection_data = read_collection_file(collection_file_path = pKa_COLLECTION_PATH_ALL_SUBMISSIONS)
# Create new directory to store molecular statistics
output_directory_path = './analysis_outputs_all_submissions'
analysis_directory_name = 'MolecularStatisticsTables'
if os.path.isdir('{}/{}'.format(output_directory_path, analysis_directory_name)):
shutil.rmtree('{}/{}'.format(output_directory_path, analysis_directory_name))
# Calculate MAE of each molecule across all predictions methods
molecular_statistics_directory_path = os.path.join(output_directory_path, "MolecularStatisticsTables")
calc_MAE_for_molecules_across_all_predictions(collection_df = collection_data,
directory_path = molecular_statistics_directory_path,
file_base_name = "molecular_error_statistics")
# Calculate MAE for each molecule across each method category
#list_of_method_categories = ["Physical (MM)", "Empirical", "Mixed", "Physical (QM)"]
list_of_method_categories = ["QM", "QM+LEC", "QSPR/ML"]
# New labels for file naming for reassigned categories
category_path_label_dict = {"QM": "QM", "QM+LEC": "QM_LEC", "QSPR/ML": "QSPR_ML"}
for category in list_of_method_categories:
category_file_label = category_path_label_dict[category]
calc_MAE_for_molecules_across_selected_predictions(collection_df=collection_data,
selected_method_group=category,
directory_path=molecular_statistics_directory_path,
file_base_name="molecular_error_statistics_for_{}_methods".format(category_file_label))
# Create comparison plot of MAE for each molecule across method categories
create_comparison_plot_of_molecular_MAE_of_method_categories(directory_path=molecular_statistics_directory_path,
group1='QM', group2='QM+LEC',
#group3="Mixed",
group3='QSPR/ML',
file_base_name="molecular_MAE_comparison_between_method_categories")
# Create molecular error distribution ridge plots for all methods and a subset of well performing methods (found consistently in the top 15 across 4 metrics)
#well_performing_method_ids = ["4K631", "006AC", "43M66", "5W956", "847L9", "HC032", "7RS67", "D4406"]
#well_performing_method_ids = ["Chemprop", "ClassicalGSG DB2", "ClassicalGSG DB3", "ClassicalGSG DB4",
# "TFE MLR", "TFE-SM8-solvent-opt", "TFE-SM8-vacuum-opt"]
create_molecular_error_distribution_plots(collection_df=collection_data,
directory_path=molecular_statistics_directory_path,
#subset_of_method_ids=well_performing_method_ids,
file_base_name="molecular_error_distribution_ridge_plot")
# Compare method categories
# Calculate error distribution plots for each method category
category_comparison_directory_path = os.path.join(output_directory_path, "StatisticsTables/MethodCategoryComparison")
os.makedirs(category_comparison_directory_path, exist_ok=True)
create_category_error_distribution_plots(collection_df=collection_data,
directory_path=category_comparison_directory_path,
file_base_name="error_distribution_of_method_categories_ridge_plot")
'''# Calculate mean and standard deviation of performance statistics of top 10 methods of each category.
statistics_table_path = os.path.join(output_directory_path, "StatisticsTables/statistics.csv")
calculate_summary_statistics_of_top_methods_of_each_category(
statistics_df= statistics_table_path, categories=list_of_method_categories, top=10,
directory_path=category_comparison_directory_path, file_base_name="summary_statistics_of_method_categories_top10.csv"
)
# Calculate mean and standard deviation of performance statistics of top 5 methods of each category.
statistics_table_path = os.path.join(output_directory_path, "StatisticsTables/statistics.csv")
calculate_summary_statistics_of_top_methods_of_each_category(
statistics_df= statistics_table_path, categories=list_of_method_categories, top=5,
directory_path=category_comparison_directory_path, file_base_name="summary_statistics_of_method_categories_top5.csv"
)'''
# ==========================================================================================
# Repeat analysis for just ranked submissions
# ==========================================================================================
# Read collection file
collection_data = read_collection_file(collection_file_path = pKa_COLLECTION_PATH_RANKED_SUBMISSIONS)
# Create new directory to store molecular statistics
output_directory_path = './analysis_outputs_ranked_submissions'
analysis_directory_name = 'MolecularStatisticsTables'
if os.path.isdir('{}/{}'.format(output_directory_path, analysis_directory_name)):
shutil.rmtree('{}/{}'.format(output_directory_path, analysis_directory_name))
# Calculate MAE of each molecule across all predictions methods
molecular_statistics_directory_path = os.path.join(output_directory_path, "MolecularStatisticsTables")
calc_MAE_for_molecules_across_all_predictions(collection_df = collection_data,
directory_path = molecular_statistics_directory_path,
file_base_name = "molecular_error_statistics")
# Calculate MAE for each molecule across each method category
#list_of_method_categories = ["Physical (MM)", "Empirical", "Mixed", "Physical (QM)"]
list_of_method_categories = ["QM", "QM+LEC", "QSPR/ML"]
# New labels for file naming for reassigned categories
category_path_label_dict = {"QM": "QM", "QM+LEC": "QM_LEC", "QSPR/ML": "QSPR_ML"}
for category in list_of_method_categories:
category_file_label = category_path_label_dict[category]
calc_MAE_for_molecules_across_selected_predictions(collection_df=collection_data,
selected_method_group=category,
directory_path=molecular_statistics_directory_path,
file_base_name="molecular_error_statistics_for_{}_methods".format(category_file_label))
# Create comparison plot of MAE for each molecule across all method categories
create_comparison_plot_of_molecular_MAE_of_method_categories(directory_path=molecular_statistics_directory_path,
group1='QM', group2='QM+LEC',
#group3="Mixed",
group3='QSPR/ML',
file_base_name="molecular_MAE_comparison_between_method_categories")
# Create molecular error distribution ridge plots for all methods and a subset of well performing methods
# (found consistently in the top 10 across 4 metrics)
#well_performing_method_ids = ["4K631", "006AC", "43M66", "5W956", "847L9", "HC032", "7RS67", "D4406"]
#well_performing_method_ids = ["Chemprop", "ClassicalGSG DB3", "COSMO-RS","MD (CGenFF/TIP3P)", "TFE MLR"]
create_molecular_error_distribution_plots(collection_df=collection_data,
directory_path=molecular_statistics_directory_path,
#subset_of_method_ids=well_performing_method_ids,
file_base_name="molecular_error_distribution_ridge_plot")
# Compare method categories
# Calculate error distribution plots for each method category
category_comparison_directory_path = os.path.join(output_directory_path, "StatisticsTables/MethodCategoryComparison")
os.makedirs(category_comparison_directory_path, exist_ok=True)
create_category_error_distribution_plots(collection_df=collection_data,
directory_path=category_comparison_directory_path,
file_base_name="error_distribution_of_method_categories_ridge_plot")
'''
# Calculate mean and standard deviation of performance statistics of top 10 methods of each category.
statistics_table_path = os.path.join(output_directory_path, "StatisticsTables/statistics.csv")
calculate_summary_statistics_of_top_methods_of_each_category(
statistics_df= statistics_table_path, categories=list_of_method_categories, top=10,
directory_path=category_comparison_directory_path, file_base_name="summary_statistics_of_method_categories_top10.csv"
)
# Calculate mean and standard deviation of performance statistics of top 5 methods of each category.
statistics_table_path = os.path.join(output_directory_path, "StatisticsTables/statistics.csv")
calculate_summary_statistics_of_top_methods_of_each_category(
statistics_df= statistics_table_path, categories=list_of_method_categories, top=5,
directory_path=category_comparison_directory_path, file_base_name="summary_statistics_of_method_categories_top5.csv"
)
'''
|
the-stack_106_27543 | # fmt: off
import logging
import json
from pathlib import Path
import torch
from farm.data_handler.data_silo import DataSilo, DataSiloForCrossVal
from farm.data_handler.processor import TextClassificationProcessor
from farm.modeling.optimization import initialize_optimizer
from farm.modeling.adaptive_model import AdaptiveModel
from farm.modeling.language_model import LanguageModel
from farm.modeling.prediction_head import TextClassificationHead
from farm.modeling.tokenization import Tokenizer
from farm.train import Trainer, EarlyStopping
from farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings
from farm.eval import Evaluator
from sklearn.metrics import matthews_corrcoef, f1_score
from farm.evaluation.metrics import simple_accuracy, register_metrics
def doc_classification_crossvalidation():
##########################
########## Logging
##########################
logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO)
# reduce verbosity from transformers library
logging.getLogger('transformers').setLevel(logging.WARNING)
# ml_logger = MLFlowLogger(tracking_uri="https://public-mlflow.deepset.ai/")
# for local logging instead:
ml_logger = MLFlowLogger(tracking_uri="logs")
# ml_logger.init_experiment(experiment_name="Public_FARM", run_name="DocClassification_ES_f1_1")
##########################
########## Settings
##########################
xval_folds = 5
xval_stratified = True
set_all_seeds(seed=42)
device, n_gpu = initialize_device_settings(use_cuda=True)
n_epochs = 20
batch_size = 32
evaluate_every = 100
lang_model = "bert-base-german-cased"
do_lower_case = False
use_amp = None
# 1.Create a tokenizer
tokenizer = Tokenizer.load(
pretrained_model_name_or_path=lang_model,
do_lower_case=do_lower_case)
# The evaluation on the dev-set can be done with one of the predefined metrics or with a
# metric defined as a function from (preds, labels) to a dict that contains all the actual
# metrics values. The function must get registered under a string name and the string name must
# be used.
# For xval, we also store the actual predictions and labels in each result so we can
# calculate overall metrics over all folds later
def mymetrics(preds, labels):
acc = simple_accuracy(preds, labels).get("acc")
f1other = f1_score(y_true=labels, y_pred=preds, pos_label="OTHER")
f1offense = f1_score(y_true=labels, y_pred=preds, pos_label="OFFENSE")
f1macro = f1_score(y_true=labels, y_pred=preds, average="macro")
f1micro = f1_score(y_true=labels, y_pred=preds, average="micro")
mcc = matthews_corrcoef(labels, preds)
return {
"acc": acc,
"f1_other": f1other,
"f1_offense": f1offense,
"f1_macro": f1macro,
"f1_micro": f1micro,
"mcc": mcc
}
register_metrics('mymetrics', mymetrics)
metric = 'mymetrics'
# 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
# Here we load GermEval 2018 Data automaticaly if it is not available.
# GermEval 2018 only has train.tsv and test.tsv dataset - no dev.tsv
# The processor wants to know the possible labels ...
label_list = ["OTHER", "OFFENSE"]
processor = TextClassificationProcessor(tokenizer=tokenizer,
max_seq_len=64,
data_dir=Path("../data/germeval18"),
label_list=label_list,
metric=metric,
label_column_name="coarse_label"
)
# 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and calculates a few descriptive statistics of our datasets
data_silo = DataSilo(
processor=processor,
batch_size=batch_size)
# Load one silo for each fold in our cross-validation
silos = DataSiloForCrossVal.make(data_silo, n_splits=xval_folds)
# the following steps should be run for each of the folds of the cross validation, so we put them
# into a function
def train_on_split(silo_to_use, n_fold, save_dir):
logger.info(f"############ Crossvalidation: Fold {n_fold} ############")
# Create an AdaptiveModel
# a) which consists of a pretrained language model as a basis
language_model = LanguageModel.load(lang_model)
# b) and a prediction head on top that is suited for our task => Text classification
prediction_head = TextClassificationHead(
class_weights=data_silo.calculate_class_weights(task_name="text_classification"),
num_labels=len(label_list))
model = AdaptiveModel(
language_model=language_model,
prediction_heads=[prediction_head],
embeds_dropout_prob=0.2,
lm_output_types=["per_sequence"],
device=device)
# Create an optimizer
model, optimizer, lr_schedule = initialize_optimizer(
model=model,
learning_rate=0.5e-5,
device=device,
n_batches=len(silo_to_use.loaders["train"]),
n_epochs=n_epochs,
use_amp=use_amp)
# Feed everything to the Trainer, which keeps care of growing our model into powerful plant and evaluates it from time to time
# Also create an EarlyStopping instance and pass it on to the trainer
# An early stopping instance can be used to save the model that performs best on the dev set
# according to some metric and stop training when no improvement is happening for some iterations.
# NOTE: Using a different save directory for each fold, allows us afterwards to use the
# nfolds best models in an ensemble!
save_dir = Path(str(save_dir) + f"-{n_fold}")
earlystopping = EarlyStopping(
metric="f1_offense", mode="max", # use the metric from our own metrics function instead of loss
save_dir=save_dir, # where to save the best model
patience=5 # number of evaluations to wait for improvement before terminating the training
)
trainer = Trainer(
model=model,
optimizer=optimizer,
data_silo=silo_to_use,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=device,
early_stopping=earlystopping,
evaluator_test=False)
# train it
trainer.train()
return trainer.model
# for each fold, run the whole training, earlystopping to get a model, then evaluate the model
# on the test set of each fold
# Remember all the results for overall metrics over all predictions of all folds and for averaging
allresults = []
all_preds = []
all_labels = []
bestfold = None
bestf1_offense = -1
save_dir = Path("saved_models/bert-german-doc-tutorial-es")
for num_fold, silo in enumerate(silos):
model = train_on_split(silo, num_fold, save_dir)
# do eval on test set here (and not in Trainer),
# so that we can easily store the actual preds and labels for a "global" eval across all folds.
evaluator_test = Evaluator(
data_loader=silo.get_data_loader("test"),
tasks=silo.processor.tasks,
device=device
)
result = evaluator_test.eval(model, return_preds_and_labels=True)
evaluator_test.log_results(result, "Test", steps=len(silo.get_data_loader("test")), num_fold=num_fold)
allresults.append(result)
all_preds.extend(result[0].get("preds"))
all_labels.extend(result[0].get("labels"))
# keep track of best fold
f1_offense = result[0]["f1_offense"]
if f1_offense > bestf1_offense:
bestf1_offense = f1_offense
bestfold = num_fold
# emtpy cache to avoid memory leak and cuda OOM across multiple folds
model.cpu()
torch.cuda.empty_cache()
# Save the per-fold results to json for a separate, more detailed analysis
with open("doc_classification_xval.results.json", "wt") as fp:
json.dump(allresults, fp)
# calculate overall metrics across all folds
xval_f1_micro = f1_score(all_labels, all_preds, labels=label_list, average="micro")
xval_f1_macro = f1_score(all_labels, all_preds, labels=label_list, average="macro")
xval_f1_offense = f1_score(all_labels, all_preds, labels=label_list, pos_label="OFFENSE")
xval_f1_other = f1_score(all_labels, all_preds, labels=label_list, pos_label="OTHER")
xval_mcc = matthews_corrcoef(all_labels, all_preds)
logger.info("XVAL F1 MICRO: ", xval_f1_micro)
logger.info("XVAL F1 MACRO: ", xval_f1_macro)
logger.info("XVAL F1 OFFENSE: ", xval_f1_offense)
logger.info("XVAL F1 OTHER: ", xval_f1_other)
logger.info("XVAL MCC: ", xval_mcc)
# -----------------------------------------------------
# Just for illustration, use the best model from the best xval val for evaluation on
# the original (still unseen) test set.
logger.info("###### Final Eval on hold out test set using best model #####")
evaluator_origtest = Evaluator(
data_loader=data_silo.get_data_loader("test"),
tasks=data_silo.processor.tasks,
device=device
)
# restore model from the best fold
lm_name = model.language_model.name
save_dir = Path(f"saved_models/bert-german-doc-tutorial-es-{bestfold}")
model = AdaptiveModel.load(save_dir, device, lm_name=lm_name)
model.connect_heads_with_processor(data_silo.processor.tasks, require_labels=True)
result = evaluator_origtest.eval(model)
logger.info("TEST F1 MICRO: ", result[0]["f1_micro"])
logger.info("TEST F1 MACRO: ", result[0]["f1_macro"])
logger.info("TEST F1 OFFENSE: ", result[0]["f1_offense"])
logger.info("TEST F1 OTHER: ", result[0]["f1_other"])
logger.info("TEST MCC: ", result[0]["mcc"])
if __name__ == "__main__":
doc_classification_crossvalidation()
# fmt: on
|
the-stack_106_27546 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Model definition
'''
from tensorflow.keras import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
import matplotlib.pyplot as plt
def lenet5(dropout_prob=0.2):
"""Implements a LeNet5 architecture with dropout
Args:
dropout_prob (float): dropout probability
Returns:
tensorflow.keras.models.Sequential: LeNet5 model
"""
layers = [
Conv2D(filters=6, kernel_size=(5, 5), strides=(1, 1), padding='valid',
activation='relu', data_format='channels_last', input_shape=(32, 32, 1)),
MaxPooling2D((2, 2)),
Conv2D(16, (5, 5), activation='relu'),
MaxPooling2D((2, 2)),
Flatten()
]
if dropout_prob > 0:
layers.append(Dropout(dropout_prob))
layers.append(Dense(120, activation='relu'))
if dropout_prob > 0:
layers.append(Dropout(dropout_prob))
layers.append(Dense(84, activation='relu'))
layers.append(Dense(43, activation='softmax'))
return Sequential(layers)
def show_feature_map(image_input, model, layer_name, activation_min=-1, activation_max=-1):
"""Display activation map of a specific layer
Args:
image_input (numpy.ndarray[1, H, W, 1]): input image
model (tensorflow.keras.models.Sequential): image classification model
layer_name (str): name of the feature layer to visualize
activation_min (float): minimum value to display
activation_max (float): maximum value to display
"""
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer(layer_name).output)
activation = intermediate_layer_model.predict(image_input)
featuremaps = activation.shape[3]
plt.figure(1, figsize=(15, 15))
for featuremap in range(featuremaps):
plt.subplot(6, 8, featuremap + 1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0, :, :, featuremap], interpolation="nearest",
vmin=activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0, :, :, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min != -1:
plt.imshow(activation[0, :, :, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0, :, :, featuremap], interpolation="nearest", cmap="gray")
plt.axis('off')
|
the-stack_106_27547 | import logging
import sys
from flask import Flask
from fluffy import version
app = Flask(__name__)
app.config.from_envvar('FLUFFY_SETTINGS')
app.logger.addHandler(logging.StreamHandler(sys.stderr))
app.logger.setLevel(logging.DEBUG)
@app.context_processor
def defaults():
from fluffy.component.assets import asset_url as real_asset_url
return {
'abuse_contact': app.config['ABUSE_CONTACT'],
'app': app,
'asset_url': real_asset_url,
'branding': app.config['BRANDING'],
'fluffy_version': version,
'home_url': app.config['HOME_URL'],
'num_lines': lambda text: len(text.splitlines()),
}
|
the-stack_106_27549 | import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
__all__ = ['P2PDataset']
class P2PDataset(Dataset):
""" Pose to Pose dataset definition loads two frame/pose pairs
"""
def __init__(self, df=None, transform=None, data_path=''):
""" Dataset initialization
Parameters
----------
df : pd.DataFrame
Dataframe with datapoint metadata
transform : torchvision.transforms
Frame preprocessing transforms. Nt applied to poses
data_path : str
Global path to data directory
Returns
-------
"""
self.df = df
self.data_path = data_path
if transform is None:
self.transform = transforms.ToTensor()
else:
self.transform = transform
self.to_tensor = transforms.ToTensor()
def __getitem__(self, idx):
""" Returns dataset item
Parameters
----------
idx : int
Index for desired datapoint
Returns
-------
frames[0] : torch.tensor
First (input) frame
frames[1] : torch.tensor
Second (target) frame
pose : torch.tensor
Second (target) pose
"""
entry = self.df.iloc[idx]
dir_path = '{}/{}/{}'.format(self.data_path,
entry['name'], entry['snippet'])
index = np.random.choice([x for x in range(5)], 2, replace=False)
frames = [self.transform(Image.open(
'{}/frame_{}.jpg'.format(dir_path, x))) for x in index]
pose = self.to_tensor(Image.open(
'{}/pose_{}.jpg'.format(dir_path, index[-1])))
return frames[0], frames[1], pose
def __len__(self):
""" Lenght of dataset
Parameters
----------
Returns
-------
len : int
Len of dataframe/dataset
"""
return len(self.df)
|
the-stack_106_27550 | from ... import Header
from os import system, name
def clear():
if name == 'nt':
_ = system('cls')
clear()
import math
class Line:
def __init__(self,coor1,coor2):
self.coor1 = coor1
self.coor2 = coor2
def distance(self):
x1,y1 = self.coor1
x2,y2 = self.coor2
print(((x2-x1)**2 + (y2-y1)**2)**0.5)
# print(math.sqrt((self.coor1[1] - self.coor1[0])**2 +(self.coor2[1] - self.coor2[0])**2))
def slope(self):
pass
coordinate1 = (3,2)
coordinate2 = (8,10)
li = Line(coordinate1,coordinate2)
li.distance()
print(__author__) |
the-stack_106_27552 | import io
import asyncio
import contextlib
import logging
import math
import html
import cairo
import os
import time
import gi
gi.require_version('Pango', '1.0')
gi.require_version('PangoCairo', '1.0')
from gi.repository import Pango, PangoCairo
import discord
import random
from discord.ext import commands
from tle.util import cache_system2
from tle.util import codeforces_api as cf
from tle.util import codeforces_common as cf_common
from tle.util import discord_common
from tle.util import events
from tle.util import paginator
from tle.util import table
from tle.util import tasks
from tle.util import db
from tle import constants
from PIL import Image, ImageFont, ImageDraw
_HANDLES_PER_PAGE = 15
_NAME_MAX_LEN = 20
_PAGINATE_WAIT_TIME = 5 * 60 # 5 minutes
_PRETTY_HANDLES_PER_PAGE = 10
_TOP_DELTAS_COUNT = 10
_MAX_RATING_CHANGES_PER_EMBED = 15
_UPDATE_HANDLE_STATUS_INTERVAL = 6 * 60 * 60 # 6 hours
class HandleCogError(commands.CommandError):
pass
def rating_to_color(rating):
"""returns (r, g, b) pixels values corresponding to rating"""
# TODO: Integrate these colors with the ranks in codeforces_api.py
BLACK = (10, 10, 10)
RED = (255, 20, 20)
BLUE = (0, 0, 200)
GREEN = (0, 140, 0)
ORANGE = (250, 140, 30)
PURPLE = (160, 0, 120)
CYAN = (0, 165, 170)
GREY = (70, 70, 70)
if rating is None or rating=='N/A':
return BLACK
if rating < 1200:
return GREY
if rating < 1400:
return GREEN
if rating < 1600:
return CYAN
if rating < 1900:
return BLUE
if rating < 2100:
return PURPLE
if rating < 2400:
return ORANGE
return RED
FONTS = [
'Noto Sans',
'Noto Sans CJK JP',
'Noto Sans CJK SC',
'Noto Sans CJK TC',
'Noto Sans CJK HK',
'Noto Sans CJK KR',
]
def get_gudgitters_image(rankings):
"""return PIL image for rankings"""
SMOKE_WHITE = (250, 250, 250)
BLACK = (0, 0, 0)
DISCORD_GRAY = (.212, .244, .247)
ROW_COLORS = ((0.95, 0.95, 0.95), (0.9, 0.9, 0.9))
WIDTH = 900
HEIGHT = 450
BORDER_MARGIN = 20
COLUMN_MARGIN = 10
HEADER_SPACING = 1.25
WIDTH_RANK = 0.08*WIDTH
WIDTH_NAME = 0.38*WIDTH
LINE_HEIGHT = (HEIGHT - 2*BORDER_MARGIN)/(10 + HEADER_SPACING)
# Cairo+Pango setup
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, WIDTH, HEIGHT)
context = cairo.Context(surface)
context.set_line_width(1)
context.set_source_rgb(*DISCORD_GRAY)
context.rectangle(0, 0, WIDTH, HEIGHT)
context.fill()
layout = PangoCairo.create_layout(context)
layout.set_font_description(Pango.font_description_from_string(','.join(FONTS) + ' 20'))
layout.set_ellipsize(Pango.EllipsizeMode.END)
def draw_bg(y, color_index):
nxty = y + LINE_HEIGHT
# Simple
context.move_to(BORDER_MARGIN, y)
context.line_to(WIDTH, y)
context.line_to(WIDTH, nxty)
context.line_to(0, nxty)
context.set_source_rgb(*ROW_COLORS[color_index])
context.fill()
def draw_row(pos, username, handle, rating, color, y, bold=False):
context.set_source_rgb(*[x/255.0 for x in color])
context.move_to(BORDER_MARGIN, y)
def draw(text, width=-1):
text = html.escape(text)
if bold:
text = f'<b>{text}</b>'
layout.set_width((width - COLUMN_MARGIN)*1000) # pixel = 1000 pango units
layout.set_markup(text, -1)
PangoCairo.show_layout(context, layout)
context.rel_move_to(width, 0)
draw(pos, WIDTH_RANK)
draw(username, WIDTH_NAME)
draw(handle, WIDTH_NAME)
draw(rating)
#
y = BORDER_MARGIN
# draw header
draw_row('#', 'Name', 'Handle', 'Points', SMOKE_WHITE, y, bold=True)
y += LINE_HEIGHT*HEADER_SPACING
for i, (pos, name, handle, rating, score) in enumerate(rankings):
color = rating_to_color(rating)
draw_bg(y, i%2)
draw_row(str(pos), f'{name} ({rating if rating else "N/A"})', handle, str(score), color, y)
if rating and rating >= 3000: # nutella
draw_row('', name[0], handle[0], '', BLACK, y)
y += LINE_HEIGHT
image_data = io.BytesIO()
surface.write_to_png(image_data)
image_data.seek(0)
discord_file = discord.File(image_data, filename='gudgitters.png')
return discord_file
def get_prettyhandles_image(rows, font):
"""return PIL image for rankings"""
SMOKE_WHITE = (250, 250, 250)
BLACK = (0, 0, 0)
img = Image.new('RGB', (900, 450), color=SMOKE_WHITE)
draw = ImageDraw.Draw(img)
START_X, START_Y = 20, 20
Y_INC = 32
WIDTH_RANK = 64
WIDTH_NAME = 340
def draw_row(pos, username, handle, rating, color, y):
x = START_X
draw.text((x, y), pos, fill=color, font=font)
x += WIDTH_RANK
draw.text((x, y), username, fill=color, font=font)
x += WIDTH_NAME
draw.text((x, y), handle, fill=color, font=font)
x += WIDTH_NAME
draw.text((x, y), rating, fill=color, font=font)
y = START_Y
# draw header
draw_row('#', 'Username', 'Handle', 'Rating', BLACK, y)
y += int(Y_INC * 1.5)
# trim name to fit in the column width
def _trim(name):
width = WIDTH_NAME - 10
while font.getsize(name)[0] > width:
name = name[:-4] + '...' # "…" is printed as floating dots
return name
for pos, name, handle, rating in rows:
name = _trim(name)
handle = _trim(handle)
color = rating_to_color(rating)
draw_row(str(pos), name, handle, str(rating) if rating else 'N/A', color, y)
if rating and rating >= 3000: # nutella
nutella_x = START_X + WIDTH_RANK
draw.text((nutella_x, y), name[0], fill=BLACK, font=font)
nutella_x += WIDTH_NAME
draw.text((nutella_x, y), handle[0], fill=BLACK, font=font)
y += Y_INC
return img
def _make_profile_embed(member, user, *, mode):
assert mode in ('set', 'get')
if mode == 'set':
desc = f'Handle for {member.mention} successfully set to **[{user.handle}]({user.url})**'
else:
desc = f'Handle for {member.mention} is currently set to **[{user.handle}]({user.url})**'
if user.rating is None:
embed = discord.Embed(description=desc)
embed.add_field(name='Rating', value='Unrated', inline=True)
else:
embed = discord.Embed(description=desc, color=user.rank.color_embed)
embed.add_field(name='Rating', value=user.rating, inline=True)
embed.add_field(name='Rank', value=user.rank.title, inline=True)
embed.set_thumbnail(url=f'https:{user.titlePhoto}')
return embed
def _make_pages(users, title):
chunks = paginator.chunkify(users, _HANDLES_PER_PAGE)
pages = []
done = 0
style = table.Style('{:>} {:<} {:<} {:<}')
for chunk in chunks:
t = table.Table(style)
t += table.Header('#', 'Name', 'Handle', 'Rating')
t += table.Line()
for i, (member, handle, rating) in enumerate(chunk):
name = member.display_name
if len(name) > _NAME_MAX_LEN:
name = name[:_NAME_MAX_LEN - 1] + '…'
rank = cf.rating2rank(rating)
rating_str = 'N/A' if rating is None else str(rating)
t += table.Data(i + done, name, handle, f'{rating_str} ({rank.title_abbr})')
table_str = '```\n'+str(t)+'\n```'
embed = discord_common.cf_color_embed(description=table_str)
pages.append((title, embed))
done += len(chunk)
return pages
class Handles(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.logger = logging.getLogger(self.__class__.__name__)
self.font = ImageFont.truetype(constants.NOTO_SANS_CJK_BOLD_FONT_PATH, size=26) # font for ;handle pretty
@commands.Cog.listener()
@discord_common.once
async def on_ready(self):
cf_common.event_sys.add_listener(self._on_rating_changes)
self._set_ex_users_inactive_task.start()
@commands.Cog.listener()
async def on_member_remove(self, member):
cf_common.user_db.set_inactive([(member.guild.id, member.id)])
@commands.command(brief='update status, mark guild members as active')
@commands.has_role('Admin')
async def _updatestatus(self, ctx):
gid = ctx.guild.id
active_ids = [m.id for m in ctx.guild.members]
cf_common.user_db.reset_status(gid)
rc = sum(cf_common.user_db.update_status(gid, chunk) for chunk in paginator.chunkify(active_ids, 100))
await ctx.send(f'{rc} members active with handle')
@commands.Cog.listener()
async def on_member_join(self, member):
rc = cf_common.user_db.update_status(member.guild.id, [member.id])
if rc == 1:
handle = cf_common.user_db.get_handle(member.id, member.guild.id)
await self._update_ranks(member.guild, [(int(member.id), handle)])
@tasks.task_spec(name='SetExUsersInactive',
waiter=tasks.Waiter.fixed_delay(_UPDATE_HANDLE_STATUS_INTERVAL))
async def _set_ex_users_inactive_task(self, _):
# To set users inactive in case the bot was dead when they left.
to_set_inactive = []
for guild in self.bot.guilds:
user_id_handle_pairs = cf_common.user_db.get_handles_for_guild(guild.id)
to_set_inactive += [(guild.id, user_id) for user_id, _ in user_id_handle_pairs
if guild.get_member(user_id) is None]
cf_common.user_db.set_inactive(to_set_inactive)
@events.listener_spec(name='RatingChangesListener',
event_cls=events.RatingChangesUpdate,
with_lock=True)
async def _on_rating_changes(self, event):
contest, changes = event.contest, event.rating_changes
change_by_handle = {change.handle: change for change in changes}
async def update_for_guild(guild):
if cf_common.user_db.has_auto_role_update_enabled(guild.id):
with contextlib.suppress(HandleCogError):
await self._update_ranks_all(guild)
channel_id = cf_common.user_db.get_rankup_channel(guild.id)
channel = guild.get_channel(channel_id)
if channel is not None:
with contextlib.suppress(HandleCogError):
embeds = self._make_rankup_embeds(guild, contest, change_by_handle)
for embed in embeds:
await channel.send(embed=embed)
await asyncio.gather(*(update_for_guild(guild) for guild in self.bot.guilds),
return_exceptions=True)
self.logger.info(f'All guilds updated for contest {contest.id}.')
@commands.group(brief='Commands that have to do with handles', invoke_without_command=True)
async def handle(self, ctx):
"""Change or collect information about specific handles on Codeforces"""
await ctx.send_help(ctx.command)
@staticmethod
async def update_member_rank_role(member, role_to_assign, *, reason):
"""Sets the `member` to only have the rank role of `role_to_assign`. All other rank roles
on the member, if any, will be removed. If `role_to_assign` is None all existing rank roles
on the member will be removed.
"""
role_names_to_remove = {rank.title for rank in cf.RATED_RANKS}
if role_to_assign is not None:
role_names_to_remove.discard(role_to_assign.name)
if role_to_assign.name not in ['Newbie', 'Pupil', 'Specialist', 'Expert']:
role_names_to_remove.add('Purgatory')
to_remove = [role for role in member.roles if role.name in role_names_to_remove]
if to_remove:
await member.remove_roles(*to_remove, reason=reason)
if role_to_assign is not None and role_to_assign not in member.roles:
await member.add_roles(role_to_assign, reason=reason)
@handle.command(brief='Set Codeforces handle of a user')
@commands.has_any_role('Admin', 'Moderator')
async def set(self, ctx, member: discord.Member, handle: str):
"""Set Codeforces handle of a user."""
# CF API returns correct handle ignoring case, update to it
user, = await cf.user.info(handles=[handle])
await self._set(ctx, member, user)
embed = _make_profile_embed(member, user, mode='set')
await ctx.send(embed=embed)
async def _set(self, ctx, member, user):
handle = user.handle
try:
cf_common.user_db.set_handle(member.id, ctx.guild.id, handle)
except db.UniqueConstraintFailed:
raise HandleCogError(f'The handle `{handle}` is already associated with another user.')
cf_common.user_db.cache_cf_user(user)
if user.rank == cf.UNRATED_RANK:
role_to_assign = None
else:
roles = [role for role in ctx.guild.roles if role.name == user.rank.title]
if not roles:
raise HandleCogError(f'Role for rank `{user.rank.title}` not present in the server')
role_to_assign = roles[0]
await self.update_member_rank_role(member, role_to_assign,
reason='New handle set for user')
@handle.command(brief='Identify yourself', usage='[handle]')
@cf_common.user_guard(group='handle',
get_exception=lambda: HandleCogError('Identification is already running for you'))
async def identify(self, ctx, handle: str):
"""Link a codeforces account to discord account by submitting a compile error to a random problem"""
if cf_common.user_db.get_handle(ctx.author.id, ctx.guild.id):
raise HandleCogError(f'{ctx.author.mention}, you cannot identify when your handle is '
'already set. Ask an Admin or Moderator if you wish to change it')
if cf_common.user_db.get_user_id(handle, ctx.guild.id):
raise HandleCogError(f'The handle `{handle}` is already associated with another user. Ask an Admin or Moderator in case of an inconsistency.')
if handle in cf_common.HandleIsVjudgeError.HANDLES:
raise cf_common.HandleIsVjudgeError(handle)
users = await cf.user.info(handles=[handle])
invoker = str(ctx.author)
handle = users[0].handle
problems = [prob for prob in cf_common.cache2.problem_cache.problems
if prob.rating <= 1200]
problem = random.choice(problems)
await ctx.send(f'`{invoker}`, submit a compile error to <{problem.url}> within 60 seconds')
await asyncio.sleep(60)
subs = await cf.user.status(handle=handle, count=5)
if any(sub.problem.name == problem.name and sub.verdict == 'COMPILATION_ERROR' for sub in subs):
user, = await cf.user.info(handles=[handle])
await self._set(ctx, ctx.author, user)
embed = _make_profile_embed(ctx.author, user, mode='set')
await ctx.send(embed=embed)
else:
await ctx.send(f'Sorry `{invoker}`, can you try again?')
@handle.command(brief='Get handle by Discord username')
async def get(self, ctx, member: discord.Member):
"""Show Codeforces handle of a user."""
handle = cf_common.user_db.get_handle(member.id, ctx.guild.id)
if not handle:
raise HandleCogError(f'Handle for {member.mention} not found in database')
user = cf_common.user_db.fetch_cf_user(handle)
embed = _make_profile_embed(member, user, mode='get')
await ctx.send(embed=embed)
@handle.command(brief='Get Discord username by cf handle')
async def rget(self, ctx, handle: str):
"""Show Discord username of a cf handle."""
user_id = cf_common.user_db.get_user_id(handle, ctx.guild.id)
if not user_id:
raise HandleCogError(f'Discord username for `{handle}` not found in database')
user = cf_common.user_db.fetch_cf_user(handle)
member = ctx.guild.get_member(user_id)
embed = _make_profile_embed(member, user, mode='get')
await ctx.send(embed=embed)
@handle.command(brief='Remove handle for a user')
@commands.has_any_role('Admin', 'Moderator')
async def remove(self, ctx, member: discord.Member):
"""Remove Codeforces handle of a user."""
rc = cf_common.user_db.remove_handle(member.id, ctx.guild.id)
if not rc:
raise HandleCogError(f'Handle for {member.mention} not found in database')
await self.update_member_rank_role(member, role_to_assign=None,
reason='Handle removed for user')
embed = discord_common.embed_success(f'Removed handle for {member.mention}')
await ctx.send(embed=embed)
@handle.command(brief='Resolve redirect of a user\'s handle')
async def unmagic(self, ctx):
"""Updates handle of the calling user if they have changed handles
(typically new year's magic)"""
member = ctx.author
handle = cf_common.user_db.get_handle(member.id, ctx.guild.id)
await self._unmagic_handles(ctx, [handle], {handle: member})
@handle.command(brief='Resolve handles needing redirection')
@commands.has_any_role('Admin', 'Moderator')
async def unmagic_all(self, ctx):
"""Updates handles of all users that have changed handles
(typically new year's magic)"""
user_id_and_handles = cf_common.user_db.get_handles_for_guild(ctx.guild.id)
handles = []
rev_lookup = {}
for user_id, handle in user_id_and_handles:
member = ctx.guild.get_member(user_id)
handles.append(handle)
rev_lookup[handle] = member
await self._unmagic_handles(ctx, handles, rev_lookup)
async def _unmagic_handles(self, ctx, handles, rev_lookup):
handle_cf_user_mapping = await cf.resolve_redirects(handles)
mapping = {(rev_lookup[handle], handle): cf_user
for handle, cf_user in handle_cf_user_mapping.items()}
summary_embed = await self._fix_and_report(ctx, mapping)
await ctx.send(embed=summary_embed)
async def _fix_and_report(self, ctx, redirections):
fixed = []
failed = []
for (member, handle), cf_user in redirections.items():
if not cf_user:
failed.append(handle)
else:
await self._set(ctx, member, cf_user)
fixed.append((handle, cf_user.handle))
# Return summary embed
lines = []
if not fixed and not failed:
return discord_common.embed_success('No handles updated')
if fixed:
lines.append('**Fixed**')
lines += (f'{old} -> {new}' for old, new in fixed)
if failed:
lines.append('**Failed**')
lines += failed
return discord_common.embed_success('\n'.join(lines))
@commands.command(brief="Show gudgitters", aliases=["gitgudders"])
async def gudgitters(self, ctx):
"""Show the list of users of gitgud with their scores."""
res = cf_common.user_db.get_gudgitters()
res.sort(key=lambda r: r[1], reverse=True)
rankings = []
index = 0
for user_id, score in res:
member = ctx.guild.get_member(int(user_id))
if member is None:
continue
if score > 0:
handle = cf_common.user_db.get_handle(user_id, ctx.guild.id)
user = cf_common.user_db.fetch_cf_user(handle)
if user is None:
continue
discord_handle = member.display_name
rating = user.rating
rankings.append((index, discord_handle, handle, rating, score))
index += 1
if index == 10:
break
if not rankings:
raise HandleCogError('No one has completed a gitgud challenge, send ;gitgud to request and ;gotgud to mark it as complete')
discord_file = get_gudgitters_image(rankings)
await ctx.send(file=discord_file)
@handle.command(brief="Show all handles")
async def list(self, ctx, *countries):
"""Shows members of the server who have registered their handles and
their Codeforces ratings. You can additionally specify a list of countries
if you wish to display only members from those countries. Country data is
sourced from codeforces profiles. e.g. ;handle list Croatia Slovenia
"""
countries = [country.title() for country in countries]
res = cf_common.user_db.get_cf_users_for_guild(ctx.guild.id)
users = [(ctx.guild.get_member(user_id), cf_user.handle, cf_user.rating)
for user_id, cf_user in res if not countries or cf_user.country in countries]
users = [(member, handle, rating) for member, handle, rating in users if member is not None]
if not users:
raise HandleCogError('No members with registered handles.')
users.sort(key=lambda x: (1 if x[2] is None else -x[2], x[1])) # Sorting by (-rating, handle)
title = 'Handles of server members'
if countries:
title += ' from ' + ', '.join(f'`{country}`' for country in countries)
pages = _make_pages(users, title)
paginator.paginate(self.bot, ctx.channel, pages, wait_time=_PAGINATE_WAIT_TIME,
set_pagenum_footers=True)
@handle.command(brief="Show handles, but prettier")
async def pretty(self, ctx, page_no: int = None):
"""Show members of the server who have registered their handles and their Codeforces
ratings, in color.
"""
user_id_cf_user_pairs = cf_common.user_db.get_cf_users_for_guild(ctx.guild.id)
user_id_cf_user_pairs.sort(key=lambda p: p[1].rating if p[1].rating is not None else -1,
reverse=True)
rows = []
author_idx = None
for user_id, cf_user in user_id_cf_user_pairs:
member = ctx.guild.get_member(user_id)
if member is None:
continue
idx = len(rows)
if member == ctx.author:
author_idx = idx
rows.append((idx, member.display_name, cf_user.handle, cf_user.rating))
if not rows:
raise HandleCogError('No members with registered handles.')
max_page = math.ceil(len(rows) / _PRETTY_HANDLES_PER_PAGE) - 1
if author_idx is None and page_no is None:
raise HandleCogError(f'Please specify a page number between 0 and {max_page}.')
msg = None
if page_no is not None:
if page_no < 0 or max_page < page_no:
msg_fmt = 'Page number must be between 0 and {}. Showing page {}.'
if page_no < 0:
msg = msg_fmt.format(max_page, 0)
page_no = 0
else:
msg = msg_fmt.format(max_page, max_page)
page_no = max_page
start_idx = page_no * _PRETTY_HANDLES_PER_PAGE
else:
msg = f'Showing neighbourhood of user `{ctx.author.display_name}`.'
num_before = (_PRETTY_HANDLES_PER_PAGE - 1) // 2
start_idx = max(0, author_idx - num_before)
rows_to_display = rows[start_idx : start_idx + _PRETTY_HANDLES_PER_PAGE]
img = get_prettyhandles_image(rows_to_display, self.font)
buffer = io.BytesIO()
img.save(buffer, 'png')
buffer.seek(0)
await ctx.send(msg, file=discord.File(buffer, 'handles.png'))
async def _update_ranks_all(self, guild):
"""For each member in the guild, fetches their current ratings and updates their role if
required.
"""
res = cf_common.user_db.get_handles_for_guild(guild.id)
await self._update_ranks(guild, res)
async def _update_ranks(self, guild, res):
member_handles = [(guild.get_member(user_id), handle) for user_id, handle in res]
member_handles = [(member, handle) for member, handle in member_handles if member is not None]
if not member_handles:
raise HandleCogError('Handles not set for any user')
members, handles = zip(*member_handles)
users = await cf.user.info(handles=handles)
for user in users:
cf_common.user_db.cache_cf_user(user)
required_roles = {user.rank.title for user in users if user.rank != cf.UNRATED_RANK}
rank2role = {role.name: role for role in guild.roles if role.name in required_roles}
missing_roles = required_roles - rank2role.keys()
if missing_roles:
roles_str = ', '.join(f'`{role}`' for role in missing_roles)
plural = 's' if len(missing_roles) > 1 else ''
raise HandleCogError(f'Role{plural} for rank{plural} {roles_str} not present in the server')
for member, user in zip(members, users):
role_to_assign = None if user.rank == cf.UNRATED_RANK else rank2role[user.rank.title]
await self.update_member_rank_role(member, role_to_assign,
reason='Codeforces rank update')
@staticmethod
def _make_rankup_embeds(guild, contest, change_by_handle):
"""Make an embed containing a list of rank changes and top rating increases for the members
of this guild.
"""
user_id_handle_pairs = cf_common.user_db.get_handles_for_guild(guild.id)
member_handle_pairs = [(guild.get_member(user_id), handle)
for user_id, handle in user_id_handle_pairs]
def ispurg(member):
# TODO: temporary code, todo properly later
return any(role.name == 'Purgatory' for role in member.roles)
member_change_pairs = [(member, change_by_handle[handle])
for member, handle in member_handle_pairs
if member is not None and handle in change_by_handle and not ispurg(member)]
if not member_change_pairs:
raise HandleCogError(f'Contest `{contest.id} | {contest.name}` was not rated for any '
'member of this server.')
member_change_pairs.sort(key=lambda pair: pair[1].newRating, reverse=True)
rank_to_role = {role.name: role for role in guild.roles}
def rating_to_displayable_rank(rating):
rank = cf.rating2rank(rating).title
role = rank_to_role.get(rank)
return role.mention if role else rank
rank_changes_str = []
for member, change in member_change_pairs:
cache = cf_common.cache2.rating_changes_cache
if (change.oldRating == 1500
and len(cache.get_rating_changes_for_handle(change.handle)) == 1):
# If this is the user's first rated contest.
old_role = 'Unrated'
else:
old_role = rating_to_displayable_rank(change.oldRating)
new_role = rating_to_displayable_rank(change.newRating)
if new_role != old_role:
rank_change_str = (f'{member.mention} [{change.handle}]({cf.PROFILE_BASE_URL}{change.handle}): {old_role} '
f'\N{LONG RIGHTWARDS ARROW} {new_role}')
rank_changes_str.append(rank_change_str)
member_change_pairs.sort(key=lambda pair: pair[1].newRating - pair[1].oldRating,
reverse=True)
top_increases_str = []
for member, change in member_change_pairs[:_TOP_DELTAS_COUNT]:
delta = change.newRating - change.oldRating
if delta <= 0:
break
increase_str = (f'{member.mention} [{change.handle}]({cf.PROFILE_BASE_URL}{change.handle}): {change.oldRating} '
f'\N{HORIZONTAL BAR} **{delta:+}** \N{LONG RIGHTWARDS ARROW} '
f'{change.newRating}')
top_increases_str.append(increase_str)
rank_changes_str = rank_changes_str or ['No rank changes']
embed_heading = discord.Embed(
title=contest.name, url=contest.url, description="")
embed_heading.set_author(name="Rank updates")
embeds = [embed_heading]
for rank_changes_chunk in paginator.chunkify(
rank_changes_str, _MAX_RATING_CHANGES_PER_EMBED):
desc = '\n'.join(rank_changes_chunk)
embed = discord.Embed(description=desc)
embeds.append(embed)
top_rating_increases_embed = discord.Embed(description='\n'.join(
top_increases_str) or 'Nobody got a positive delta :(')
top_rating_increases_embed.set_author(name='Top rating increases')
embeds.append(top_rating_increases_embed)
discord_common.set_same_cf_color(embeds)
return embeds
@commands.group(brief='Commands for role updates',
invoke_without_command=True)
async def roleupdate(self, ctx):
"""Group for commands involving role updates."""
await ctx.send_help(ctx.command)
@roleupdate.command(brief='Update Codeforces rank roles')
@commands.has_any_role('Admin', 'Moderator')
async def now(self, ctx):
"""Updates Codeforces rank roles for every member in this server."""
await self._update_ranks_all(ctx.guild)
await ctx.send(embed=discord_common.embed_success('Roles updated successfully.'))
@roleupdate.command(brief='Enable or disable auto role updates',
usage='on|off')
@commands.has_any_role('Admin', 'Moderator')
async def auto(self, ctx, arg):
"""Auto role update refers to automatic updating of rank roles when rating
changes are released on Codeforces. 'on'/'off' disables or enables auto role
updates.
"""
if arg == 'on':
rc = cf_common.user_db.enable_auto_role_update(ctx.guild.id)
if not rc:
raise HandleCogError('Auto role update is already enabled.')
await ctx.send(embed=discord_common.embed_success('Auto role updates enabled.'))
elif arg == 'off':
rc = cf_common.user_db.disable_auto_role_update(ctx.guild.id)
if not rc:
raise HandleCogError('Auto role update is already disabled.')
await ctx.send(embed=discord_common.embed_success('Auto role updates disabled.'))
else:
raise ValueError(f"arg must be 'on' or 'off', got '{arg}' instead.")
@roleupdate.command(brief='Publish a rank update for the given contest',
usage='here|off|contest_id')
@commands.has_any_role('Admin', 'Moderator')
async def publish(self, ctx, arg):
"""This is a feature to publish a summary of rank changes and top rating
increases in a particular contest for members of this server. 'here' will
automatically publish the summary to this channel whenever rating changes on
Codeforces are released. 'off' will disable auto publishing. Specifying a
contest id will publish the summary immediately.
"""
if arg == 'here':
cf_common.user_db.set_rankup_channel(ctx.guild.id, ctx.channel.id)
await ctx.send(
embed=discord_common.embed_success('Auto rank update publishing enabled.'))
elif arg == 'off':
rc = cf_common.user_db.clear_rankup_channel(ctx.guild.id)
if not rc:
raise HandleCogError('Rank update publishing is already disabled.')
await ctx.send(embed=discord_common.embed_success('Rank update publishing disabled.'))
else:
try:
contest_id = int(arg)
except ValueError:
raise ValueError(f"arg must be 'here', 'off' or a contest ID, got '{arg}' instead.")
await self._publish_now(ctx, contest_id)
async def _publish_now(self, ctx, contest_id):
try:
contest = cf_common.cache2.contest_cache.get_contest(contest_id)
except cache_system2.ContestNotFound as e:
raise HandleCogError(f'Contest with id `{e.contest_id}` not found.')
if contest.phase != 'FINISHED':
raise HandleCogError(f'Contest `{contest_id} | {contest.name}` has not finished.')
try:
changes = await cf.contest.ratingChanges(contest_id=contest_id)
except cf.RatingChangesUnavailableError:
changes = None
if not changes:
raise HandleCogError(f'Rating changes are not available for contest `{contest_id} | '
f'{contest.name}`.')
change_by_handle = {change.handle: change for change in changes}
rankup_embeds = self._make_rankup_embeds(ctx.guild, contest, change_by_handle)
for rankup_embed in rankup_embeds:
await ctx.channel.send(embed=rankup_embed)
async def _generic_remind(self, ctx, action, role_name, what):
roles = [role for role in ctx.guild.roles if role.name == role_name]
if not roles:
raise HandleCogError(f'Role `{role_name}` not present in the server')
role = roles[0]
if action == 'give':
if role in ctx.author.roles:
await ctx.send(embed=discord_common.embed_neutral(f'You are already subscribed to {what} reminders'))
return
await ctx.author.add_roles(role, reason=f'User subscribed to {what} reminders')
await ctx.send(embed=discord_common.embed_success(f'Successfully subscribed to {what} reminders'))
elif action == 'remove':
if role not in ctx.author.roles:
await ctx.send(embed=discord_common.embed_neutral(f'You are not subscribed to {what} reminders'))
return
await ctx.author.remove_roles(role, reason=f'User unsubscribed from {what} reminders')
await ctx.send(embed=discord_common.embed_success(f'Successfully unsubscribed from {what} reminders'))
else:
raise HandleCogError(f'Invalid action {action}')
@commands.command(brief='Grants or removes the specified pingable role',
usage='[give/remove] [vc/duel]')
async def role(self, ctx, action: str, which: str):
"""e.g. ;role remove duel"""
if which == 'vc':
await self._generic_remind(ctx, action, 'Virtual Contestant', 'vc')
elif which == 'duel':
await self._generic_remind(ctx, action, 'Duelist', 'duel')
else:
raise HandleCogError(f'Invalid role {which}')
@discord_common.send_error_if(HandleCogError, cf_common.HandleIsVjudgeError)
async def cog_command_error(self, ctx, error):
pass
def setup(bot):
bot.add_cog(Handles(bot))
|
the-stack_106_27553 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for running models in a distributed setting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import string
import tensorflow as tf
def get_distribution_strategy(num_gpus,
all_reduce_alg=None,
turn_off_distribution_strategy=False):
"""Return a DistributionStrategy for running the model.
Args:
num_gpus: Number of GPUs to run this model.
all_reduce_alg: Specify which algorithm to use when performing all-reduce.
See tf.contrib.distribute.AllReduceCrossDeviceOps for available
algorithms. If None, DistributionStrategy will choose based on device
topology.
turn_off_distribution_strategy: when set to True, do not use any
distribution strategy. Note that when it is True, and num_gpus is
larger than 1, it will raise a ValueError.
Returns:
tf.contrib.distribute.DistibutionStrategy object.
Raises:
ValueError: if turn_off_distribution_strategy is True and num_gpus is
larger than 1
"""
if num_gpus == 0:
if turn_off_distribution_strategy:
return None
else:
return tf.contrib.distribute.OneDeviceStrategy('device:CPU:0')
elif num_gpus == 1:
if turn_off_distribution_strategy:
return None
else:
return tf.contrib.distribute.OneDeviceStrategy('device:GPU:0')
elif turn_off_distribution_strategy:
raise ValueError('When {} GPUs are specified, '
'turn_off_distribution_strategy flag cannot be set to'
'True.'.format(num_gpus))
else: # num_gpus > 1 and not turn_off_distribution_strategy
devices = ['device:GPU:%d' % i for i in range(num_gpus)]
if all_reduce_alg:
return tf.distribute.MirroredStrategy(
devices=devices,
cross_device_ops=tf.contrib.distribute.AllReduceCrossDeviceOps(
all_reduce_alg, num_packs=2))
else:
return tf.distribute.MirroredStrategy(devices=devices)
def per_device_batch_size(batch_size, num_gpus):
"""For multi-gpu, batch-size must be a multiple of the number of GPUs.
Note that distribution strategy handles this automatically when used with
Keras. For using with Estimator, we need to get per GPU batch.
Args:
batch_size: Global batch size to be divided among devices. This should be
equal to num_gpus times the single-GPU batch_size for multi-gpu training.
num_gpus: How many GPUs are used with DistributionStrategies.
Returns:
Batch size per device.
Raises:
ValueError: if batch_size is not divisible by number of devices
"""
if num_gpus <= 1:
return batch_size
remainder = batch_size % num_gpus
if remainder:
err = ('When running with multiple GPUs, batch size '
'must be a multiple of the number of available GPUs. Found {} '
'GPUs with a batch size of {}; try --batch_size={} instead.'
).format(num_gpus, batch_size, batch_size - remainder)
raise ValueError(err)
return int(batch_size / num_gpus)
# The `SyntheticDataset` is a temporary solution for generating synthetic data
# directly on devices. It is only useful for Keras with Distribution
# Strategies. We will have better support in `tf.data` or Distribution Strategy
# later.
class SyntheticDataset(object):
"""A dataset that generates synthetic data on each device."""
def __init__(self, dataset, split_by=1):
self._input_data = {}
# dataset.take(1) doesn't have GPU kernel.
with tf.device('device:CPU:0'):
tensor = tf.data.experimental.get_single_element(dataset.take(1))
flat_tensor = tf.nest.flatten(tensor)
variable_data = []
self._initializers = []
for t in flat_tensor:
rebatched_t = tf.split(t, num_or_size_splits=split_by, axis=0)[0]
assert rebatched_t.shape.is_fully_defined(), rebatched_t.shape
v = tf.compat.v1.get_local_variable(self.random_name(),
initializer=rebatched_t)
variable_data.append(v)
self._initializers.append(v.initializer)
self._input_data = tf.nest.pack_sequence_as(tensor, variable_data)
def get_next(self):
return self._input_data
def initialize(self):
if tf.executing_eagerly():
return tf.no_op()
else:
return self._initializers
def random_name(self, size=10, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def _monkey_patch_dataset_method(strategy):
"""Monkey-patch `strategy`'s `make_dataset_iterator` method."""
def make_dataset_iterator(self, dataset):
tf.compat.v1.logging.info('Using pure synthetic data.')
with self.scope():
if self.extended._global_batch_size: # pylint: disable=protected-access
return SyntheticDataset(dataset, self.num_replicas_in_sync)
else:
return SyntheticDataset(dataset)
strategy.org_make_dataset_iterator = strategy.make_dataset_iterator
strategy.make_dataset_iterator = make_dataset_iterator
def _undo_monkey_patch_dataset_method(strategy):
if hasattr(strategy, 'org_make_dataset_iterator'):
strategy.make_dataset_iterator = strategy.org_make_dataset_iterator
def set_up_synthetic_data():
_monkey_patch_dataset_method(tf.distribute.MirroredStrategy)
# TODO(tobyboyd): Remove when contrib.distribute is all in core.
if hasattr(tf, 'contrib'):
_monkey_patch_dataset_method(tf.contrib.distribute.MirroredStrategy)
_monkey_patch_dataset_method(tf.contrib.distribute.OneDeviceStrategy)
else:
print('Contrib missing: Skip monkey patch tf.contrib.distribute.*')
def undo_set_up_synthetic_data():
_undo_monkey_patch_dataset_method(tf.distribute.MirroredStrategy)
# TODO(tobyboyd): Remove when contrib.distribute is all in core.
if hasattr(tf, 'contrib'):
_undo_monkey_patch_dataset_method(tf.contrib.distribute.MirroredStrategy)
_undo_monkey_patch_dataset_method(tf.contrib.distribute.OneDeviceStrategy)
else:
print('Contrib missing: Skip remove monkey patch tf.contrib.distribute.*')
|
the-stack_106_27554 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
import spack.config
import spack.modules.common
import spack.paths
import spack.spec
import spack.util.path
@pytest.fixture()
def modulefile_content(request):
"""Returns a function that generates the content of a module file
as a list of lines.
"""
writer_cls = getattr(request.module, 'writer_cls')
def _impl(spec_str, module_set_name='default'):
# Write the module file
spec = spack.spec.Spec(spec_str)
spec.concretize()
generator = writer_cls(spec, module_set_name)
generator.write(overwrite=True)
# Get its filename
filename = generator.layout.filename
# Retrieve the content
with open(filename) as f:
content = f.readlines()
content = ''.join(content).split('\n')
generator.remove()
return content
return _impl
@pytest.fixture()
def update_template_dirs(config, monkeypatch):
"""Mocks the template directories for tests"""
dirs = spack.config.get_config('config')['template_dirs']
dirs = [spack.util.path.canonicalize_path(x) for x in dirs]
monkeypatch.setattr(spack, 'template_dirs', dirs)
@pytest.fixture()
def factory(request):
"""Function that, given a spec string, returns an instance of the writer
and the corresponding spec.
"""
# Class of the module file writer
writer_cls = getattr(request.module, 'writer_cls')
def _mock(spec_string, module_set_name='default'):
spec = spack.spec.Spec(spec_string)
spec.concretize()
return writer_cls(spec, module_set_name), spec
return _mock
|
the-stack_106_27555 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_config import cfg
from oslo_log import log as logging
from taskflow.patterns import linear_flow
from taskflow.patterns import unordered_flow
from octavia.api.drivers import utils as provider_utils
from octavia.common import constants
from octavia.common import exceptions
from octavia.controller.worker.v2.flows import amphora_flows
from octavia.controller.worker.v2.flows import listener_flows
from octavia.controller.worker.v2.flows import member_flows
from octavia.controller.worker.v2.flows import pool_flows
from octavia.controller.worker.v2.tasks import amphora_driver_tasks
from octavia.controller.worker.v2.tasks import compute_tasks
from octavia.controller.worker.v2.tasks import database_tasks
from octavia.controller.worker.v2.tasks import lifecycle_tasks
from octavia.controller.worker.v2.tasks import network_tasks
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class LoadBalancerFlows(object):
def __init__(self):
self.amp_flows = amphora_flows.AmphoraFlows()
self.listener_flows = listener_flows.ListenerFlows()
self.pool_flows = pool_flows.PoolFlows()
self.member_flows = member_flows.MemberFlows()
def get_create_load_balancer_flow(self, topology, listeners=None):
"""Creates a conditional graph flow that allocates a loadbalancer to
two spare amphorae.
:raises InvalidTopology: Invalid topology specified
:return: The graph flow for creating a loadbalancer.
"""
f_name = constants.CREATE_LOADBALANCER_FLOW
lb_create_flow = linear_flow.Flow(f_name)
lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(
requires=constants.LOADBALANCER_ID))
# allocate VIP
lb_create_flow.add(database_tasks.ReloadLoadBalancer(
name=constants.RELOAD_LB_BEFOR_ALLOCATE_VIP,
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER
))
lb_create_flow.add(network_tasks.AllocateVIP(
requires=constants.LOADBALANCER,
provides=constants.VIP))
lb_create_flow.add(database_tasks.UpdateVIPAfterAllocation(
requires=(constants.LOADBALANCER_ID, constants.VIP),
provides=constants.LOADBALANCER))
lb_create_flow.add(network_tasks.UpdateVIPSecurityGroup(
requires=constants.LOADBALANCER))
lb_create_flow.add(network_tasks.GetSubnetFromVIP(
requires=constants.LOADBALANCER,
provides=constants.SUBNET))
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
lb_create_flow.add(*self._create_active_standby_topology())
elif topology == constants.TOPOLOGY_SINGLE:
lb_create_flow.add(*self._create_single_topology())
else:
LOG.error("Unknown topology: %s. Unable to build load balancer.",
topology)
raise exceptions.InvalidTopology(topology=topology)
post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
lb_create_flow.add(
self.get_post_lb_amp_association_flow(
post_amp_prefix, topology, mark_active=(not listeners)))
if listeners:
lb_create_flow.add(*self._create_listeners_flow())
return lb_create_flow
def _create_single_topology(self):
return (self.amp_flows.get_amphora_for_lb_subflow(
prefix=constants.ROLE_STANDALONE,
role=constants.ROLE_STANDALONE), )
def _create_active_standby_topology(
self, lf_name=constants.CREATE_LOADBALANCER_FLOW):
# When we boot up amphora for an active/standby topology,
# we should leverage the Nova anti-affinity capabilities
# to place the amphora on different hosts, also we need to check
# if anti-affinity-flag is enabled or not:
anti_affinity = CONF.nova.enable_anti_affinity
flows = []
if anti_affinity:
# we need to create a server group first
flows.append(
compute_tasks.NovaServerGroupCreate(
name=lf_name + '-' +
constants.CREATE_SERVER_GROUP_FLOW,
requires=(constants.LOADBALANCER_ID),
provides=constants.SERVER_GROUP_ID))
# update server group id in lb table
flows.append(
database_tasks.UpdateLBServerGroupInDB(
name=lf_name + '-' +
constants.UPDATE_LB_SERVERGROUPID_FLOW,
requires=(constants.LOADBALANCER_ID,
constants.SERVER_GROUP_ID)))
f_name = constants.CREATE_LOADBALANCER_FLOW
amps_flow = unordered_flow.Flow(f_name)
master_amp_sf = self.amp_flows.get_amphora_for_lb_subflow(
prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER
)
backup_amp_sf = self.amp_flows.get_amphora_for_lb_subflow(
prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP)
amps_flow.add(master_amp_sf, backup_amp_sf)
return flows + [amps_flow]
def _create_listeners_flow(self):
flows = []
flows.append(
database_tasks.ReloadLoadBalancer(
name=constants.RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH,
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER
)
)
flows.append(
network_tasks.CalculateDelta(
requires=constants.LOADBALANCER, provides=constants.DELTAS
)
)
flows.append(
network_tasks.HandleNetworkDeltas(
requires=constants.DELTAS, provides=constants.ADDED_PORTS
)
)
flows.append(
amphora_driver_tasks.AmphoraePostNetworkPlug(
requires=(constants.LOADBALANCER, constants.ADDED_PORTS)
)
)
flows.append(
self.listener_flows.get_create_all_listeners_flow()
)
flows.append(
database_tasks.MarkLBActiveInDB(
mark_subobjects=True,
requires=constants.LOADBALANCER
)
)
return flows
def get_post_lb_amp_association_flow(self, prefix, topology,
mark_active=True):
"""Reload the loadbalancer and create networking subflows for
created/allocated amphorae.
:return: Post amphorae association subflow
"""
# Note: If any task in this flow failed, the created amphorae will be
# left ''incorrectly'' allocated to the loadbalancer. Likely,
# the get_new_LB_networking_subflow is the most prune to failure
# shall deallocate the amphora from its loadbalancer and put it in a
# READY state.
sf_name = prefix + '-' + constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
post_create_LB_flow = linear_flow.Flow(sf_name)
post_create_LB_flow.add(
database_tasks.ReloadLoadBalancer(
name=sf_name + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC,
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER))
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix)
post_create_LB_flow.add(vrrp_subflow)
post_create_LB_flow.add(database_tasks.UpdateLoadbalancerInDB(
requires=[constants.LOADBALANCER, constants.UPDATE_DICT]))
if mark_active:
post_create_LB_flow.add(database_tasks.MarkLBActiveInDB(
name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB,
requires=constants.LOADBALANCER))
return post_create_LB_flow
def _get_delete_listeners_flow(self, lb):
"""Sets up an internal delete flow
Because task flow doesn't support loops we store each listener
we want to delete in the store part and then rebind
:param lb: load balancer
:return: (flow, store) -- flow for the deletion and store with all
the listeners stored properly
"""
listeners_delete_flow = unordered_flow.Flow('listener_delete_flow')
store = {}
for listener in lb.listeners:
listener_name = 'listener_' + listener.id
prov_listener = provider_utils.db_listener_to_provider_listener(
listener)
store[listener_name] = prov_listener.to_dict()
listeners_delete_flow.add(
self.listener_flows.get_delete_listener_internal_flow(
listener_name))
store.update({constants.LOADBALANCER_ID: lb.id,
constants.PROJECT_ID: lb.project_id})
return (listeners_delete_flow, store)
def get_delete_load_balancer_flow(self, lb):
"""Creates a flow to delete a load balancer.
:returns: The flow for deleting a load balancer
"""
return self._get_delete_load_balancer_flow(lb, False)
def _get_delete_pools_flow(self, lb):
"""Sets up an internal delete flow
Because task flow doesn't support loops we store each pool
we want to delete in the store part and then rebind
:param lb: load balancer
:return: (flow, store) -- flow for the deletion and store with all
the listeners stored properly
"""
pools_delete_flow = unordered_flow.Flow('pool_delete_flow')
store = {}
for pool in lb.pools:
pool_name = 'pool' + pool.id
store[pool_name] = pool.id
pools_delete_flow.add(
self.pool_flows.get_delete_pool_flow_internal(
pool_name))
store[constants.PROJECT_ID] = lb.project_id
return (pools_delete_flow, store)
def _get_delete_load_balancer_flow(self, lb, cascade):
store = {}
delete_LB_flow = linear_flow.Flow(constants.DELETE_LOADBALANCER_FLOW)
delete_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask(
requires=constants.LOADBALANCER))
delete_LB_flow.add(compute_tasks.NovaServerGroupDelete(
requires=constants.SERVER_GROUP_ID))
delete_LB_flow.add(database_tasks.MarkLBAmphoraeHealthBusy(
requires=constants.LOADBALANCER))
if cascade:
(listeners_delete, store) = self._get_delete_listeners_flow(lb)
(pools_delete, pool_store) = self._get_delete_pools_flow(lb)
store.update(pool_store)
delete_LB_flow.add(pools_delete)
delete_LB_flow.add(listeners_delete)
delete_LB_flow.add(network_tasks.UnplugVIP(
requires=constants.LOADBALANCER))
delete_LB_flow.add(network_tasks.DeallocateVIP(
requires=constants.LOADBALANCER))
delete_LB_flow.add(compute_tasks.DeleteAmphoraeOnLoadBalancer(
requires=constants.LOADBALANCER))
delete_LB_flow.add(database_tasks.MarkLBAmphoraeDeletedInDB(
requires=constants.LOADBALANCER))
delete_LB_flow.add(database_tasks.DisableLBAmphoraeHealthMonitoring(
requires=constants.LOADBALANCER))
delete_LB_flow.add(database_tasks.MarkLBDeletedInDB(
requires=constants.LOADBALANCER))
delete_LB_flow.add(database_tasks.DecrementLoadBalancerQuota(
requires=constants.LOADBALANCER))
return (delete_LB_flow, store)
def get_cascade_delete_load_balancer_flow(self, lb):
"""Creates a flow to delete a load balancer.
:returns: The flow for deleting a load balancer
"""
return self._get_delete_load_balancer_flow(lb, True)
def get_new_LB_networking_subflow(self):
"""Create a sub-flow to setup networking.
:returns: The flow to setup networking for a new amphora
"""
new_LB_net_subflow = linear_flow.Flow(constants.
LOADBALANCER_NETWORKING_SUBFLOW)
new_LB_net_subflow.add(network_tasks.AllocateVIP(
requires=constants.LOADBALANCER,
provides=constants.VIP))
new_LB_net_subflow.add(database_tasks.UpdateVIPAfterAllocation(
requires=(constants.LOADBALANCER_ID, constants.VIP),
provides=constants.LOADBALANCER))
new_LB_net_subflow.add(network_tasks.PlugVIP(
requires=constants.LOADBALANCER,
provides=constants.AMPS_DATA))
new_LB_net_subflow.add(network_tasks.ApplyQos(
requires=(constants.LOADBALANCER, constants.AMPS_DATA,
constants.UPDATE_DICT)))
new_LB_net_subflow.add(database_tasks.UpdateAmphoraeVIPData(
requires=constants.AMPS_DATA))
new_LB_net_subflow.add(database_tasks.ReloadLoadBalancer(
name=constants.RELOAD_LB_AFTER_PLUG_VIP,
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER))
new_LB_net_subflow.add(network_tasks.GetAmphoraeNetworkConfigs(
requires=constants.LOADBALANCER,
provides=constants.AMPHORAE_NETWORK_CONFIG))
new_LB_net_subflow.add(amphora_driver_tasks.AmphoraePostVIPPlug(
requires=(constants.LOADBALANCER,
constants.AMPHORAE_NETWORK_CONFIG)))
return new_LB_net_subflow
def get_update_load_balancer_flow(self):
"""Creates a flow to update a load balancer.
:returns: The flow for update a load balancer
"""
update_LB_flow = linear_flow.Flow(constants.UPDATE_LOADBALANCER_FLOW)
update_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask(
requires=constants.LOADBALANCER))
update_LB_flow.add(network_tasks.ApplyQos(
requires=(constants.LOADBALANCER, constants.UPDATE_DICT)))
update_LB_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER_ID))
update_LB_flow.add(database_tasks.UpdateLoadbalancerInDB(
requires=[constants.LOADBALANCER, constants.UPDATE_DICT]))
update_LB_flow.add(database_tasks.MarkLBActiveInDB(
requires=constants.LOADBALANCER))
return update_LB_flow
|
the-stack_106_27556 | """
Alchemistry_toolkits
A short description of the project.
"""
import sys
from setuptools import setup, find_packages
import versioneer
short_description = __doc__.split("\n")
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = "\n".join(short_description[2:])
setup(
# Self-descriptive entries which should always be present
name='Alchemistry_toolkits',
author='Wei-Tse Hsu',
author_email='[email protected]',
description=short_description[0],
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='MIT',
# Which Python importable modules should be included when your package is installed
# Handled automatically by setuptools. Use 'exclude' to prevent some specific
# subpackage(s) from being added, if needed
packages=find_packages(),
# Optional include package data to ship with your package
# Customize MANIFEST.in if the general case does not suit your needs
# Comment out this line to prevent the files from being packaged with your software
include_package_data=True,
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
# Additional entries you may want simply uncomment the lines you want and fill in the data
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
|
the-stack_106_27560 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.utils.functional import SimpleLazyObject
from django.utils.module_loading import import_string
from pizza.settings import APP_NAME
def new_api_module(module_name, api_name):
mod = "{app_name}.utils.api.{mod}.{api}".format(app_name=APP_NAME, mod=module_name, api=api_name)
return import_string(mod)()
AccessApi = SimpleLazyObject(lambda: new_api_module("access", "_AccessApi"))
CCApi = SimpleLazyObject(lambda: new_api_module("cc", "_CCApi"))
DatabusApi = SimpleLazyObject(lambda: new_api_module("databus", "_DatabusApi"))
DatamanageApi = SimpleLazyObject(lambda: new_api_module("datamanage", "_DatamanageApi"))
MetaApi = SimpleLazyObject(lambda: new_api_module("meta", "_MetaApi"))
DataflowApi = SimpleLazyObject(lambda: new_api_module("dataflow", "_DataflowApi"))
JobnaviApi = SimpleLazyObject(lambda: new_api_module("jobnavi", "_JobnaviApi"))
StorekitApi = SimpleLazyObject(lambda: new_api_module("storekit", "_StorekitApi"))
ModelApi = SimpleLazyObject(lambda: new_api_module("model", "_ModelApi"))
DataqueryApi = SimpleLazyObject(lambda: new_api_module("dataquery", "_DataqueryApi"))
PaasApi = SimpleLazyObject(lambda: new_api_module("paas", "_PaasApi"))
__all__ = [
"DatabusApi",
"MetaApi",
"DataflowApi",
"DatamanageApi",
"JobnaviApi",
"AccessApi",
"StorekitApi",
"ModelApi",
"DataqueryApi",
"CCApi",
"PaasApi",
]
|
the-stack_106_27561 | """Emoji
Available Commands:
.ding"""
from telethon import events
import asyncio
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern=r"ding"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 10)
#input_str = event.pattern_match.group(1)
#if input_str == "டிங்":
await event.edit("டாங்")
animation_chars = [
"🔴⬛⬛⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬛⬜⬜⬜\n🔴⬜⬜⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬜⬛⬜⬜\n⬜⬜🔴⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜⬜🔴",
"⬜⬜⬛⬛🔴\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜⬜🔴",
"⬜⬜⬛⬜⬜\n⬜⬜⬛⬜⬜\n⬜⬜🔴⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬛⬜⬜⬜\n🔴⬜⬜⬜⬜",
"🔴⬛⬛⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜\n⬜ [BECOME A VIDHAYAK](https://t.Me/TamilUserBot/) ⬜\n⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
|
the-stack_106_27562 | from typing import Any, Dict, List
from overrides import overrides
import torch
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import (
Attention,
FeedForward,
Seq2SeqEncoder,
Seq2VecEncoder,
TextFieldEmbedder,
)
from allennlp_semparse.domain_languages import WikiTablesLanguage
from allennlp_semparse.fields.production_rule_field import ProductionRuleArray
from allennlp_semparse.models.wikitables.wikitables_semantic_parser import WikiTablesSemanticParser
from allennlp_semparse.state_machines.beam_search import Search
from allennlp_semparse.state_machines.states import GrammarBasedState
from allennlp_semparse.state_machines.trainers import MaximumMarginalLikelihood
from allennlp_semparse.state_machines.transition_functions import LinkingTransitionFunction
@Model.register("wikitables_mml_parser")
class WikiTablesMmlSemanticParser(WikiTablesSemanticParser):
"""
A ``WikiTablesMmlSemanticParser`` is a :class:`WikiTablesSemanticParser` which is trained to
maximize the marginal likelihood of an approximate set of logical forms which give the correct
denotation. This is a re-implementation of the model used for the paper `Neural Semantic Parsing with Type
Constraints for Semi-Structured Tables
<https://www.semanticscholar.org/paper/Neural-Semantic-Parsing-with-Type-Constraints-for-Krishnamurthy-Dasigi/8c6f58ed0ebf379858c0bbe02c53ee51b3eb398a>`_,
by Jayant Krishnamurthy, Pradeep Dasigi, and Matt Gardner (EMNLP 2017). The language used by
this model is different from LambdaDCS, the one in the paper above though. This model uses the
variable free language from ``allennlp_semparse.domain_languages.wikitables_language``.
Parameters
----------
vocab : ``Vocabulary``
question_embedder : ``TextFieldEmbedder``
Embedder for questions. Passed to super class.
action_embedding_dim : ``int``
Dimension to use for action embeddings. Passed to super class.
encoder : ``Seq2SeqEncoder``
The encoder to use for the input question. Passed to super class.
entity_encoder : ``Seq2VecEncoder``
The encoder to used for averaging the words of an entity. Passed to super class.
decoder_beam_search : ``BeamSearch``
When we're not training, this is how we will do decoding.
max_decoding_steps : ``int``
When we're decoding with a beam search, what's the maximum number of steps we should take?
This only applies at evaluation time, not during training. Passed to super class.
attention : ``Attention``
We compute an attention over the input question at each step of the decoder, using the
decoder hidden state as the query. Passed to the transition function.
mixture_feedforward : ``FeedForward``, optional (default=None)
If given, we'll use this to compute a mixture probability between global actions and linked
actions given the hidden state at every timestep of decoding, instead of concatenating the
logits for both (where the logits may not be compatible with each other). Passed to
the transition function.
add_action_bias : ``bool``, optional (default=True)
If ``True``, we will learn a bias weight for each action that gets used when predicting
that action, in addition to its embedding. Passed to super class.
training_beam_size : ``int``, optional (default=None)
If given, we will use a constrained beam search of this size during training, so that we
use only the top ``training_beam_size`` action sequences according to the model in the MML
computation. If this is ``None``, we will use all of the provided action sequences in the
MML computation.
use_neighbor_similarity_for_linking : ``bool``, optional (default=False)
If ``True``, we will compute a max similarity between a question token and the `neighbors`
of an entity as a component of the linking scores. This is meant to capture the same kind
of information as the ``related_column`` feature. Passed to super class.
dropout : ``float``, optional (default=0)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer). Passed to super class.
num_linking_features : ``int``, optional (default=10)
We need to construct a parameter vector for the linking features, so we need to know how
many there are. The default of 10 here matches the default in the ``KnowledgeGraphField``,
which is to use all ten defined features. If this is 0, another term will be added to the
linking score. This term contains the maximum similarity value from the entity's neighbors
and the question. Passed to super class.
rule_namespace : ``str``, optional (default=rule_labels)
The vocabulary namespace to use for production rules. The default corresponds to the
default used in the dataset reader, so you likely don't need to modify this. Passed to super
class.
"""
def __init__(
self,
vocab: Vocabulary,
question_embedder: TextFieldEmbedder,
action_embedding_dim: int,
encoder: Seq2SeqEncoder,
entity_encoder: Seq2VecEncoder,
decoder_beam_search: Search,
max_decoding_steps: int,
attention: Attention,
mixture_feedforward: FeedForward = None,
add_action_bias: bool = True,
training_beam_size: int = None,
use_neighbor_similarity_for_linking: bool = False,
dropout: float = 0.0,
num_linking_features: int = 10,
rule_namespace: str = "rule_labels",
) -> None:
use_similarity = use_neighbor_similarity_for_linking
super().__init__(
vocab=vocab,
question_embedder=question_embedder,
action_embedding_dim=action_embedding_dim,
encoder=encoder,
entity_encoder=entity_encoder,
max_decoding_steps=max_decoding_steps,
add_action_bias=add_action_bias,
use_neighbor_similarity_for_linking=use_similarity,
dropout=dropout,
num_linking_features=num_linking_features,
rule_namespace=rule_namespace,
)
self._beam_search = decoder_beam_search
self._decoder_trainer = MaximumMarginalLikelihood(training_beam_size)
self._decoder_step = LinkingTransitionFunction(
encoder_output_dim=self._encoder.get_output_dim(),
action_embedding_dim=action_embedding_dim,
input_attention=attention,
add_action_bias=self._add_action_bias,
mixture_feedforward=mixture_feedforward,
dropout=dropout,
)
@overrides
def forward(
self, # type: ignore
question: Dict[str, torch.LongTensor],
table: Dict[str, torch.LongTensor],
world: List[WikiTablesLanguage],
actions: List[List[ProductionRuleArray]],
target_values: List[List[str]] = None,
target_action_sequences: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
In this method we encode the table entities, link them to words in the question, then
encode the question. Then we set up the initial state for the decoder, and pass that
state off to either a DecoderTrainer, if we're training, or a BeamSearch for inference,
if we're not.
Parameters
----------
question : Dict[str, torch.LongTensor]
The output of ``TextField.as_array()`` applied on the question ``TextField``. This will
be passed through a ``TextFieldEmbedder`` and then through an encoder.
table : ``Dict[str, torch.LongTensor]``
The output of ``KnowledgeGraphField.as_array()`` applied on the table
``KnowledgeGraphField``. This output is similar to a ``TextField`` output, where each
entity in the table is treated as a "token", and we will use a ``TextFieldEmbedder`` to
get embeddings for each entity.
world : ``List[WikiTablesLanguage]``
We use a ``MetadataField`` to get the ``WikiTablesLanguage`` object for each input instance.
Because of how ``MetadataField`` works, this gets passed to us as a ``List[WikiTablesLanguage]``,
actions : ``List[List[ProductionRuleArray]]``
A list of all possible actions for each ``world`` in the batch, indexed into a
``ProductionRuleArray`` using a ``ProductionRuleField``. We will embed all of these
and use the embeddings to determine which action to take at each timestep in the
decoder.
target_values : ``List[List[str]]``, optional (default = None)
For each instance, a list of target values taken from the example lisp string. We pass
this list to the evaluator along with logical forms to compute denotation accuracy.
target_action_sequences : torch.Tensor, optional (default = None)
A list of possibly valid action sequences, where each action is an index into the list
of possible actions. This tensor has shape ``(batch_size, num_action_sequences,
sequence_length)``.
metadata : ``List[Dict[str, Any]]``, optional (default = None)
Metadata containing the original tokenized question within a 'question_tokens' field.
"""
outputs: Dict[str, Any] = {}
rnn_state, grammar_state = self._get_initial_rnn_and_grammar_state(
question, table, world, actions, outputs
)
batch_size = len(rnn_state)
initial_score = rnn_state[0].hidden_state.new_zeros(batch_size)
initial_score_list = [initial_score[i] for i in range(batch_size)]
initial_state = GrammarBasedState(
batch_indices=list(range(batch_size)), # type: ignore
action_history=[[] for _ in range(batch_size)],
score=initial_score_list,
rnn_state=rnn_state,
grammar_state=grammar_state,
possible_actions=actions,
extras=target_values,
debug_info=None,
)
if target_action_sequences is not None:
# Remove the trailing dimension (from ListField[ListField[IndexField]]).
target_action_sequences = target_action_sequences.squeeze(-1)
target_mask = target_action_sequences != self._action_padding_index
else:
target_mask = None
if self.training:
return self._decoder_trainer.decode(
initial_state, self._decoder_step, (target_action_sequences, target_mask)
)
else:
if target_action_sequences is not None:
outputs["loss"] = self._decoder_trainer.decode(
initial_state, self._decoder_step, (target_action_sequences, target_mask)
)["loss"]
num_steps = self._max_decoding_steps
# This tells the state to start keeping track of debug info, which we'll pass along in
# our output dictionary.
initial_state.debug_info = [[] for _ in range(batch_size)]
best_final_states = self._beam_search.search(
num_steps, initial_state, self._decoder_step, keep_final_unfinished_states=False, world=world, actions=actions
)
for i in range(batch_size):
# Decoding may not have terminated with any completed logical forms, if `num_steps`
# isn't long enough (or if the model is not trained enough and gets into an
# infinite action loop).
if i in best_final_states:
best_action_indices = best_final_states[i][0].action_history[0]
self._log_prob_avg(best_final_states[i][0].score[0].item())
if target_action_sequences is not None:
# Use a Tensor, not a Variable, to avoid a memory leak.
targets = target_action_sequences[i].data
sequence_in_targets = 0
sequence_in_targets = self._action_history_match(
best_action_indices, targets
)
self._action_sequence_accuracy(sequence_in_targets)
self._compute_validation_outputs(
actions, best_final_states, world, target_values, metadata, outputs
)
return outputs
default_predictor = "wikitables-parser"
|
the-stack_106_27564 | #!/usr/bin/env python3
import copy
import os
import logging
import datetime
import filecmp
import pathlib
import json
import shutil
import base64
from io import BytesIO
import jwt
import pem
import pycurl
import re
from git import Repo
import git
import validators
# Checks to ensure a url is valid
def urlIsValid(candidate_url):
# Regex to check valid URL
return validators.url(candidate_url)
class Node:
def __init__(self, dir_name="", rel_path=".", dir_sha=None):
"""
Creating a Node object
dir_name is the name of the directory the node contains information
about rel_path is the actual path to the directory.
The root node of a repository should be created by simply calling:
root_node = Node()
"""
self._dir = dir_name
self._dir_sha = dir_sha
self._type = "dir"
self._dirs = []
self._files = []
self._files_sha = {}
self._misc = []
self._misc_sha = {}
self._rel_path = rel_path + dir_name
def __getFilePaths(self, current_path):
"""Returns the full paths to the files in the current folder."""
rel_paths = []
for fil in self._files:
if current_path.endswith("/"):
rel_paths.append(current_path + fil)
else:
rel_paths.append(current_path + "/" + fil)
return rel_paths
def __getMiscPaths(self, current_path):
"""Returns the full paths to the misc content in the current folder."""
rel_paths = []
for mis in self._misc:
if current_path.endswith("/"):
rel_paths.append(current_path + mis)
else:
rel_paths.append(current_path + "/" + mis)
return rel_paths
def __getDirPaths(self, current_path):
rel_paths = []
for node in self._dirs:
if node.name[0] == "/":
if current_path[-1] == "/":
rel_paths.append(current_path + node.name[1:])
else:
rel_paths.append(current_path + node.name)
elif current_path[-1] == "/":
rel_paths.append(current_path + node.name)
else:
rel_paths.append(current_path + "/" + node.name)
return rel_paths
def __exists(self, current_path, path_to_obj):
for fil in self.__getFilePaths(current_path):
if fil == path_to_obj:
return True
for mis in self.__getMiscPaths(current_path):
if mis == path_to_obj:
return True
for dir_path in self.__getDirPaths(current_path):
if dir_path == path_to_obj:
return True
for node in self._dirs:
if current_path.endswith("/"):
if node.__exists(current_path + node.name, path_to_obj):
return True
else:
if node.__exists(current_path + "/" + node.name, path_to_obj):
return True
return False
def __type(self, path):
for fil in self._files:
if fil == path:
return "file"
for mis in self._misc:
if mis == path:
return "misc"
for node in self._dirs:
if path.count("/") == 0:
if node.name == path:
return "dir"
else:
new_path = path.split("/")[1][0:]
return node.__type(new_path)
return None
def __insert(self, current_path, content_path, content_type, content_sha):
# Check if content_path contains folders
sub_dir = None
if content_path.startswith("./"):
if content_path.count("/") > 1:
# Ignore the first ./ so grab [1]
sub_dir = content_path.split("/")[1]
new_content_path = content_path.split(sub_dir)[1][1:]
elif content_path.startswith("/"):
if content_path.count("/") > 1:
# Ignore the first / so grab [1]
sub_dir = content_path.split("/")[1]
new_content_path = content_path.split(sub_dir)[1][1:]
elif content_path.count("/") > 0:
sub_dir = content_path.split("/")[0]
new_content_path = content_path.split(sub_dir)[1][0:]
if sub_dir is not None:
# Check if the directory has already been created
found = False
for node in self.nodes:
if sub_dir == node.name:
found = True
node.__insert(
current_path + "/" + node.name,
new_content_path,
content_type,
content_sha,
)
if not found:
# Throw an error
error_msg = "Cannot add content, missing sub folders.\n"
error_msg += "content_path: " + content_path + "\n"
raise Exception(error_msg)
else:
if content_type == "dir":
if content_path.startswith("./"):
content_name = content_path[2:]
elif content_path.startswith("/"):
content_name = content_path[1:]
else:
content_name = content_path
self._dirs.append(Node(content_name, self._rel_path + "/", content_sha))
elif content_type == "file":
self._files.append(content_path)
self._files_sha[content_path] = content_sha
else:
self._misc.append(content_path)
self._misc_sha[content_path] = content_sha
def __sha(self, path):
"""
Will return the sha of the file object or None if sha is not found.
This is true with exception to the root directory which does not
have a sha associated with it, and so it will also return None.
"""
for fil in self._files:
if fil == path:
return self._files_sha[fil]
for mis in self._misc:
if mis == path:
return self._misc_sha[fil]
for node in self._dirs:
if node.name == path:
return self._dir_sha
else:
new_path = copy.deepcopy(path)
new_path = "/".join(new_path.strip("/").new_path("/")[1:])
return node.getSha(new_path)
return None
def insert(self, content_path, content_type, content_sha=None):
"""
Record the contents of a directory by inserting it
Will either store new information as a file, directory or misc type.
If the content type is of type dir than a new node is created.
"""
if not any(content_type in obj_name for obj_name in ["dir", "misc", "file"]):
error_msg = "Unknown content type specified, allowed types are:\n"
error_msg += "dir, misc, file\n"
error_msg += "\ncontent_path: " + content_path
error_msg += "\ncontent_type: " + content_type
error_msg += "\ncontent_sha: " + content_sha
raise Exception(error_msg)
if any(content_path == obj_name for obj_name in ["", ".", "./"]):
error_msg = "No content specified.\n"
error_msg += "\ncontent_path: " + content_path
error_msg += "\ncontent_type: " + content_type
error_msg += "\ncontent_sha: " + content_sha
raise Exception(error_msg)
if content_sha is not None:
if len(content_sha) != 40:
error_msg = "sha must be contain 40 characters.\n"
error_msg += "\ncontent_path: " + content_path
error_msg += "\ncontent_type: " + content_type
error_msg += "\ncontent_sha: " + content_sha
raise Exception(error_msg)
self.__insert("./", content_path, content_type, content_sha)
@property
def name(self):
return self._dir
@property
def sha(self):
return self._dir_sha
@property
def relative_path(self):
return self._rel_path
@property
def files(self):
"""Returns non miscellaneous content and non folders."""
return self._files
@property
def miscellaneous(self):
"""Returns miscellaneous content e.g. image files."""
return self._misc
@property
def nodes(self):
"""
Returns a list of all nodes in the current node.
This will essentially be the directories.
"""
return self._dirs
def exists(self, path_to_obj):
"""
Checks to see if a file object exists.
Path should be the full path to the object. e.g.
./bin
./tests/test_unit.py
./image.png
If the "./" are ommitted from the path it will be assumed that the
file objects are in reference to the root path e.g. if
bin
tests/test_unit.py
are passed in "./" will be prepended to the path.
"""
# Check to see if path_to_obj is root node
if path_to_obj == "." or path_to_obj == "./" or path_to_obj == "":
return True
if not path_to_obj.startswith("./"):
if path_to_obj[0] == "/":
path_to_obj = "." + path_to_obj
else:
path_to_obj = "./" + path_to_obj
return self.__exists("./", path_to_obj)
def getSha(self, path):
"""
Will return the sha of the file object or None if sha is not found.
This is true with exception to the root directory which does not
have a sha associated with it, and so it will also return None.
"""
if path.startswith("./"):
if len(path) > 2:
path = path[2:]
if path.startswith("/"):
if len(path) > 1:
path = path[1:]
for fil in self._files:
if fil == path:
return self._files_sha[fil]
for mis in self._misc:
if mis == path:
return self._misc_sha[mis]
for node in self._dirs:
if node.name == path:
return node._dir_sha
for node in self._dirs:
# Remove the dir1/ from dir1/dir2
if path.startswith(node.name + "/"):
new_path = path.split("/")[1][0:]
found_sha = node.getSha(new_path)
if found_sha is not None:
return found_sha
return None
def type(self, path):
if path == "" or path == "." or path == "./":
return "dir"
return self.__type(path)
@property
def path(self):
"""Get the relative path of the current node."""
return self._rel_path
def __str__(self):
"""Get contents of node and all child nodes as a string."""
return self._buildStr()
def _buildStr(self, indent=""):
"""Contents in string format indenting with each folder."""
content_string = ""
for fil in self._files:
content_string += indent + "file " + fil + "\n"
for mis in self._misc:
content_string += indent + "misc " + mis + "\n"
for node in self._dirs:
content_string += indent + "dir " + node.name + "\n"
content_string += node._buildStr(indent + " ")
return content_string
def _findRelPaths(self, current_path, obj_name):
"""Contents in string format indenting with each folder."""
rel_paths = []
for fil in self.__getFilePaths(current_path):
if fil.endswith(obj_name):
rel_paths.append(fil)
for mis in self.__getMiscPaths(current_path):
if mis.endswith(obj_name):
rel_paths.append(mis)
for dir_path in self.__getDirPaths(current_path):
if dir_path.endswith(obj_name):
rel_paths.append(dir_path)
for node in self._dirs:
potential_paths = node._findRelPaths(
current_path + "/" + node.name, obj_name
)
rel_paths += potential_paths
return rel_paths
@property
def print(self):
"""Print contents of node and all child nodes."""
print("Contents in folder: " + self._rel_path)
for fil in self._files:
print("file " + fil)
for mis in self._misc:
print("misc " + mis)
for node in self._dirs:
node.print
def getRelativePaths(self, obj_name):
"""
Get the path(s) to the object.
In the case that an object exists in the directory tree but we don't
know the path we can try to find it in the tree. E.g. if we are
searching for 'common.py' and our directory structure actually has
two instances:
./bin/common.py
./lib/file1.py
./common.py
./file2.py
A list will be returned with the relative paths:
["./bin/common.py", "./common.py"]
"""
return self._findRelPaths(".", obj_name)
class GitHubApp:
"""
GitHubApp Class
This class is responsible for authenticating against the app repository and
interacting with the github api.
"""
def __init__(
self,
app_id,
name,
user,
repo_name,
location_of_inheriting_class=None,
verbosity=0,
):
"""
The app is generic and provides a template, to create an app for a specefic repository the
following arguments are needed:
* the app id as provided when it is created on github
* the name of the app
* the owner of the repository it controls
* the name of the repository it controls
* the location of the github child class, should exist within a repo
"""
self._app_id = app_id
self._name = name
self._user = user
self._repo_name = repo_name
self._verbosity = verbosity
self._log = logging.getLogger(self._repo_name)
self._log.setLevel(logging.INFO)
fh = logging.FileHandler(self._repo_name + ".log", mode="w", encoding="utf-8")
fh.setLevel(logging.INFO)
self._log.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self._log.addHandler(ch)
self._config_file_dir = pathlib.Path(__file__).parent.absolute()
self._config_file_name = "githubapp_" + str(self._app_id) + ".config"
self._config_file_path = pathlib.Path.joinpath(
self._config_file_dir, self._config_file_name
)
self._child_class_path = None
if location_of_inheriting_class is not None:
if os.path.isfile(location_of_inheriting_class):
self._child_class_path = location_of_inheriting_class
# Create an empty config file if one does not exist
if not pathlib.Path.is_file(self._config_file_path):
open(self._config_file_path, "a").close()
@property
def name(self):
"""Returns the name of the app."""
return self._name
@property
def default_branch(self):
"""Return the default branch for the repository."""
if self._default_branch is None:
# Determine the default by calling the repo
js_obj_list, _ = self._PYCURL(self._header, self._repo_url)
self._default_branch = js_obj_list["default_branch"]
return self._default_branch
def initialize(
self,
pem_file,
use_wiki=False,
ignore=False,
create_branch=False,
path_to_repo=None,
):
"""
Sets basic properties of the app should be called before any other methods
use_wiki - determines if by default commands will refer to the wiki repository
create_branch - determines if you are giving the application the ability to create new
branches
pem_file - this is the authentication file needed to do anything with the github api.
ignore - if this is set to true than images will not be uploaded to a seperate figures
branch on the main repository. By default binary files are uploaded to a orphan branch so
as to prevent bloatting the commit history.
The initialization method is also responsible for authenticating with github and creating
an access token. The access token is needed to do any further communication or run any other
operations on github.
"""
self._ignore = ignore
self._use_wiki = use_wiki
self._repo_url = (
"https://api.github.com/repos/" + self._user + "/" + self._repo_name
)
if isinstance(create_branch, list):
self._create_branch = create_branch[0]
else:
self._create_branch = create_branch
self._default_branch = None
self._default_image_branch = "figures"
self._branches = []
self._branch_current_commit_sha = {}
self._api_version = "application/vnd.github.v3+json"
self._repo_root = Node()
self._repo_root_initialized = False
self._repo_root_branch = "None"
if path_to_repo is not None:
# Check that the repo specified is valid
if os.path.isdir(path_to_repo):
# Check if we are overwriting an existing repo stored in the config file
with open(self._config_file_path, "r") as file:
line = file.readline()
# Print a message if they are different
if line != path_to_repo:
self._log.info(
"Changing repo path from {} to {}".format(
line, path_to_repo
)
)
with open(self._config_file_path, "w") as file:
file.write(path_to_repo)
self._repo_path = path_to_repo
else:
error_msg = "The suggested repository path is not valid:\n{}".format(
path_to_repo
)
self._log.error(error_msg)
raise
else:
if pathlib.Path.is_file(self._config_file_path):
with open(self._config_file_path, "r") as file:
line = file.readline()
# Throw an error if the path is not valid
if not os.path.isdir(line):
error_msg = (
"The cached path to your repository is "
"not valid: ({})".format(line)
)
error_msg = (
error_msg
+ "\nThe config file is located at: ({})".format(
self._config_file_path
)
)
error_msg = (
error_msg
+ "\nConsider initializing the app "
+ self._name
+ " with the path of "
)
error_msg = error_msg + "repository it will be analyzing."
self._log.error(error_msg)
self._repo_path = line
else:
# If no config file exists throw an error
error_msg = (
"No repository path is known to the " + self._name + ".\n"
"Please call --repository-path or -rp with the path the repository to register it.\n"
)
self._log.error(error_msg)
raise
self._app_wiki_dir = os.path.normpath(
self._repo_path + "/../" + self._repo_name + ".wiki"
)
self._log.info(self._repo_name + " wiki dir is:")
self._log.info(self._app_wiki_dir)
if isinstance(pem_file, list):
pem_file = pem_file[0]
pem_file = self._validatePemFile(pem_file)
self._generateJWT(pem_file)
self._generateInstallationId()
self._generateAccessToken()
def _validatePemFile(self, pem_file):
"""Ensures pem file exists and checks env variable."""
if pem_file == None:
if "GITHUB_APP_PEM" in os.environ:
pem_file = os.environ.get("GITHUB_APP_PEM")
else:
error_msg = "A pem file has not been specified and "
error_msg += "GITHUB_APP_PEM env varaible is not defined"
raise Exception(error_msg)
# Check that pem file is actually a file
if not os.path.isfile(pem_file):
error_msg = "Permissions file ({})".format(pem_file)
error_msg = error_msg + " is not a valid file."
raise Exception(error_msg)
self._log.info("File loc %s" % pem_file)
return pem_file
def _generateJWT(self, pem_file):
"""
Generates Json web token
Method will take the permissions (.pem) file provided and populate the json web token
attribute
"""
# iss is the app id
# Ensuring that we request an access token that expires after a minute
payload = {
"iat": datetime.datetime.utcnow(),
"exp": datetime.datetime.utcnow() + datetime.timedelta(seconds=60),
"iss": self._app_id,
}
certs = pem.parse_file(pem_file)
PEM = str(certs[0])
if PEM is None:
error_msg = (
"No permissions enabled for " + self._name + " app, "
"either a pem file needs to be provided or the "
"GITHUB_APP_PEM variable needs to be defined"
)
raise Exception(error_msg)
self._jwt_token = jwt.encode(payload, PEM, algorithm="RS256")
if isinstance(self._jwt_token, bytes):
# Older versions of jwt return a byte string as opposed to a string
self._jwt_token = self._jwt_token.decode("utf-8")
def _PYCURL(self, header, url, option=None, custom_data=None):
buffer_temp = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(pycurl.VERBOSE, self._verbosity)
c.setopt(c.WRITEDATA, buffer_temp)
c.setopt(c.HTTPHEADER, header)
if option == "POST":
c.setopt(c.POST, 1)
c.setopt(c.POSTFIELDS, json.dumps(custom_data))
c.setopt(c.POSTFIELDSIZE, len(json.dumps(custom_data)))
elif option == "PUT":
c.setopt(c.PUT, 1)
elif option == "DELETE":
c.setopt(c.CUSTOMREQUEST, "DELETE")
c.setopt(c.POSTFIELDS, json.dumps(custom_data))
c.setopt(c.POSTFIELDSIZE, len(json.dumps(custom_data)))
if custom_data is not None:
buffer_temp2 = BytesIO(json.dumps(custom_data).encode("utf-8"))
c.setopt(c.READDATA, buffer_temp2)
c.perform()
code = c.getinfo(c.HTTP_CODE)
c.close()
if int(code) != 200:
print("Code is {}".format(code))
print(json.dumps(json.loads(buffer_temp.getvalue()), indent=4))
return json.loads(buffer_temp.getvalue()), code
def _generateInstallationId(self):
"""
Generate an installation id
This method will populate the installation id attribute using the
internally stored json web token.
"""
header = [
"Authorization: Bearer " + str(self._jwt_token),
"Accept: " + self._api_version,
]
js_obj, _ = self._PYCURL(header, "https://api.github.com/app/installations")
if isinstance(js_obj, list):
js_obj = js_obj[0]
# The installation id will be listed at the end of the url path
self._install_id = js_obj["html_url"].rsplit("/", 1)[-1]
def _generateAccessToken(self):
"""
Creates an access token
This method will populate the installation attribute using the
installation id. The token is needed to authenticate any actions
run by the application.
"""
header = [
"Authorization: Bearer " + str(self._jwt_token),
"Accept: " + self._api_version,
]
https_url_access_tokens = (
"https://api.github.com/app/installations/"
+ self._install_id
+ "/access_tokens"
)
js_obj, _ = self._PYCURL(header, https_url_access_tokens, option="POST")
if isinstance(js_obj, list):
js_obj = js_obj[0]
self._access_token = js_obj["token"]
self._header = [
"Authorization: token " + str(self._access_token),
"Accept: " + self._api_version,
]
def _fillTree(self, current_node, branch):
"""
Creates a content tree of the branch
This is an internal method that is meant to be used recursively
to grab the contents of a branch of a remote repository.
"""
nodes = current_node.nodes
for node in nodes:
js_obj, _ = self._PYCURL(
self._header,
self._repo_url + "/contents/" + node.path + "?ref=" + branch,
custom_data={"branch": branch},
)
if isinstance(js_obj, list):
for ob in js_obj:
node.insert(ob["name"], ob["type"], ob["sha"])
else:
node.insert(js_obj["name"], js_obj["type"], js_obj["sha"])
self._fillTree(node, branch)
def _getBranches(self):
"""Internal method for getting a list of the branches that are available on github."""
page_found = True
page_index = 1
self._branches = []
self._branch_current_commit_sha = {}
while page_found:
page_found = False
js_obj_list, _ = self._PYCURL(
self._header, self._repo_url + "/branches?page={}".format(page_index)
)
page_index = page_index + 1
for js_obj in js_obj_list:
page_found = True
self._branches.append(js_obj["name"])
self._branch_current_commit_sha.update(
{js_obj["name"]: js_obj["commit"]["sha"]}
)
def generateCandidateRepoPath(self):
"""Generate a possible path to the repo
Provides a suggestion for the repository path the app is meant to work
on. This will only provide a correct suggestion if the app code exists
within the repository. If it is unable to identify a suitable suggestion
it will return None.
"""
if self._child_class_path is not None:
index = self._child_class_path.rfind(self._repo_name)
if index != -1:
return self._child_class_path[0 : index + len(self._repo_name)]
return None
def getBranchMergingWith(self, branch):
"""Gets the name of the target branch of `branch` which it will merge with."""
js_obj_list, _ = self._PYCURL(self._header, self._repo_url + "/pulls")
self._log.info(
"Checking if branch is open as a pr and what branch it is targeted to merge with.\n"
)
self._log.info("Checking branch %s\n" % (self._user + ":" + branch))
for js_obj in js_obj_list:
self._log.info("Found branch: %s.\n" % js_obj.get("head").get("label"))
if js_obj.get("head").get("label") == self._user + ":" + branch:
return js_obj.get("base").get("label").split(":", 1)[1]
return None
# Public Methods
@property
def branches(self):
"""
Gets the branches of the repository
This method will check to see if branches have already been collected from the github
RESTful api. If the branch tree has not been collected it will update the branches
attribute.
"""
if not self._branches:
self._getBranches()
return self._branches
def getLatestCommitSha(self, target_branch):
"""Does what it says gets the latest commit sha for the taget_branch."""
if not self._branches:
self._getBranches()
return self._branch_current_commit_sha.get(target_branch)
def branchExist(self, branch):
"""
Determine if branch exists
This method will determine if a branch exists on the github repository by pinging the
github api.
"""
return branch in self.branches
def refreshBranchCache(self):
"""
Method forces an update of the localy stored list of branches.
Will update regardless of whether the class already contains a
local copy. Might be necessary if the remote github repository
is updated.
"""
self._getBranches()
def createBranch(self, branch, branch_to_fork_from=None):
"""
Creates a git branch
Will create a branch if it does not already exists, if the branch
does exist will do nothing. The new branch will be created by
forking it of the latest commit of the default branch
"""
if branch_to_fork_from is None:
branch_to_fork_from = self.default_branch
if self.branchExist(branch):
return
if not self.branchExist(branch_to_fork_from):
error_msg = (
"Cannot create new branch: "
+ branch
+ " from "
+ branch_to_fork_from
+ " because "
+ branch_to_fork_from
+ " does not exist."
)
raise Exception(error_msg)
self._PYCURL(
self._header,
self._repo_url + "/git/refs",
option="POST",
custom_data={
"ref": "refs/heads/" + branch,
"sha": self._branch_current_commit_sha[branch_to_fork_from],
},
)
def _generateContent(self, head):
contents = {}
dir_path = head.relative_path
for file_name in head.files:
contents[dir_path + "/" + file_name] = [file_name, head.getSha(file_name)]
for misc_name in head.miscellaneous:
contents[dir_path + "/" + misc_name] = [misc_name, head.getSha(misc_name)]
for node in head.nodes:
node_content = self._generateContent(node)
contents[dir_path + "/" + node.name] = [node.name, node.sha]
contents.update(node_content)
return contents
def refreshBranchTreeCache(self, branch):
"""
Method forces an update of the localy stored branch contents.
Will update regardless of whether the class already contains a
local copy. Might be necessary if the remote github repository
is updated. For instance if a file is added remotely. If however, you
are not worried about remote changes then it is not necessary, and it is
much faster to used the locally cached contents.
"""
# 1. Check if branch exists
js_obj, _ = self._PYCURL(self._header, self._repo_url + "/branches", "GET")
# Reset the cache
old_content = copy.deepcopy(self._repo_root)
self._repo_root = Node()
for obj in js_obj:
if obj["name"] == branch:
# Get the top level directory structure
js_obj2, _ = self._PYCURL(
self._header,
self._repo_url + "/contents?ref=" + branch,
custom_data={"branch": branch},
)
for obj2 in js_obj2:
self._repo_root.insert(obj2["name"], obj2["type"], obj2["sha"])
self._fillTree(self._repo_root, branch)
self._repo_root_branch = branch
self._repo_root_initialized = True
return self._repo_root
# Make idempotent revert the changes
self._repo_root = old_content
raise Exception(
"Branch missing from repository {} cannot refresh branch tree cache".format(
branch
)
)
def getContents(self, branch=None):
"""
Returns the contents of a branch
Returns the contents of a branch as a dictionary, where the key
is the content path and the value is a list of the file folder name
and the sha of the file/folder etc.
"""
branch_tree = self.getBranchTree(branch)
return self._generateContent(branch_tree)
def remove(self, file_name_path, branch=None, file_sha=None, use_wiki=False):
"""
This method will remove a file from the listed branch.
Provide the file name and path with respect to the repository root.
"""
if branch is None:
branch = "master"
# First check that the file exists in the repository
branch_tree = self.getBranchTree(branch)
# Only remove if the file actually exists
if branch_tree.exists(file_name_path):
if file_sha is None:
# Attempt to get it from the branch tree
file_sha = branch_tree.getSha(file_name_path)
if file_sha is None:
error_msg = "Unable to remove existing file: "
error_msg += "{}, sha is unknown.".format(file_name_path)
raise Exception(error_msg)
if file_name_path.startswith("/"):
file_name_path = file_name_path[1:]
elif file_name_path.startswith("./"):
file_name_path = file_name_path[2:]
message = self._name + " is removing {}".format(file_name_path)
self._PYCURL(
self._header,
self._repo_url + "/contents/" + file_name_path,
"DELETE",
custom_data={
"branch": branch,
"sha": file_sha,
"message": message,
},
)
def upload(self, file_name, branch=None, use_wiki=False):
"""
This method attempts to upload a file to the specified branch.
If the file is found to already exist it will be updated. Image
files will by default be placed in a figures branch of the main
repository, so as to not bloat the repositories commit history.
"""
# Will only be needed if we are creating a branch
branch_to_fork_from = self.default_branch
if isinstance(file_name, list):
file_name = file_name[0]
if branch is None:
branch = self.default_branch
if file_name.lower().endswith(
(".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".gif")
):
self._log.info("Image file detected")
if branch != self._default_image_branch and not self._ignore:
self._log.warning(
"Note all images will be uploaded to a branch named: "
+ self._default_image_branch
+ " in the main repository."
)
self._log.warning("Unless the ignore flag is used.")
branch = self._default_image_branch
branch_to_fork_from = "master"
self._use_wiki = False
if self._use_wiki or use_wiki:
if branch != "master":
error_msg = (
"Files can only be uploaded to the wiki repositories master branch"
)
raise Exception(error_msg)
if os.path.exists(
self._app_wiki_dir + "/" + os.path.basename(os.path.normpath(file_name))
):
commit_msg = "Updating file " + file_name
else:
commit_msg = "Adding file " + file_name
repo = self.getWikiRepo(branch)
destination = (
self._app_wiki_dir + "/" + os.path.basename(os.path.normpath(file_name))
)
if not filecmp.cmp(file_name, destination):
shutil.copy(file_name, destination)
repo.index.add(
[
str(
self._app_wiki_dir
+ "/"
+ os.path.basename(os.path.normpath(file_name))
)
]
)
repo.index.commit(commit_msg)
repo.git.push("--set-upstream", "origin", repo.head.reference)
return
if self._create_branch:
self.createBranch(branch, branch_to_fork_from)
elif not self.branchExist(branch):
error_msg = "branch: " + branch + " does not exist in repository."
raise Exception(error_msg)
contents = self.getContents(branch)
file_found = False
if os.path.basename(os.path.normpath(file_name)) in contents:
self._log.warning(
"File (%s) already exists in branch:%s"
% (os.path.basename(os.path.normpath(file_name)), branch)
)
file_found = True
# 2. convert file into base64 format
# b is needed if it is a png or image file/ binary file
with open(file_name, "rb") as f:
data = f.read()
encoded_file = base64.b64encode(data)
# 3. upload the file, overwrite if exists already
custom_data = {
"message": "%s %s file %s"
% (
self._name,
"overwriting" if file_found else "uploading",
os.path.basename(os.path.normpath(file_name)),
),
"name": self._name,
"branch": branch,
"content": encoded_file.decode("ascii"),
}
if file_found:
custom_data["sha"] = contents[os.path.basename(os.path.normpath(file_name))]
self._log.info(
"Uploading file (%s) to branch (%s)"
% (os.path.basename(os.path.normpath(file_name)), branch)
)
https_url_to_file = (
self._repo_url
+ "/contents/"
+ os.path.basename(os.path.normpath(file_name))
)
self._PYCURL(self._header, https_url_to_file, "PUT", custom_data)
def getBranchTree(self, branch=None):
"""
Gets the contents of a branch as a tree
Method will grab the contents of the specified branch from the
remote repository. It will return the contents as a tree object.
The tree object provides some basic functionality such as indicating
the content type
"""
if branch is None:
branch = self.default_branch
if branch != self._repo_root_branch:
# It is a different branch that is cached
self.refreshBranchTreeCache(branch)
if self._repo_root_initialized:
return self._repo_root
else:
self.refreshBranchTreeCache(branch)
return self._repo_root
def cloneWikiRepo(self):
"""
Clone a git repo
Will clone the wiki repository if it does not exist, if it does
exist it will update the access permissions by updating the wiki
remote url. The repository is then returned.
"""
wiki_remote = (
"https://x-access-token:"
+ str(self._access_token)
+ "@github.com/"
+ self._user
+ "/"
+ self._repo_name
+ ".wiki.git"
)
if not os.path.isdir(str(self._app_wiki_dir)):
repo = Repo.clone_from(wiki_remote, self._app_wiki_dir)
else:
repo = Repo(self._app_wiki_dir)
g = git.cmd.Git(self._app_wiki_dir)
self._log.info("Our remote url is %s" % wiki_remote)
# git remote show origini
self._log.info(g.execute(["git", "remote", "show", "origin"]))
g.execute(["git", "remote", "set-url", "origin", wiki_remote])
# Ensure local branches are synchronized with server
g.execute(["git", "fetch"])
# Will not overwrite files but will reset the index to match with the remote
g.execute(["git", "reset", "--mixed", "origin/master"])
return repo
def getWikiRepo(self, branch):
"""
Get the git wiki repo
The github api has only limited supported for interacting with
the github wiki, as such the best way to do this is to actually
clone the github repository and interact with the git repo
directly. This method will clone the repository if it does not
exist. It will then return a repo object.
"""
repo = self.cloneWikiRepo()
return repo
def postStatus(
self, state, commit_sha=None, context=None, description=None, target_url=None
):
if isinstance(state, list):
state = state[0]
"""Post status of current commit."""
self._log.info("Posting state: %s" % state)
self._log.info("Posting context: %s" % context)
self._log.info("Posting description: %s" % description)
self._log.info("Posting url: %s" % target_url)
state_list = ["pending", "failed", "error", "success"]
if state not in state_list:
raise Exception("Unrecognized state specified " + state)
if commit_sha is None:
commit_sha = os.getenv("CI_COMMIT_SHA")
if commit_sha is None:
commit_sha = os.getenv("TRAVIS_COMMIT")
if commit_sha is None:
error_msg = "CI_COMMIT_SHA and or TRAVIS_COMMIT not defined in "
error_msg = error_msg + "environment cannot post status."
raise Exception(error_msg)
if len(commit_sha) != 40:
error_msg = "Unconventional commit sha encountered (" + str(commit_sha)
error_msg = error_msg + ") environment cannot post status. Sha "
error_msg = error_msg + "should be 40 characters this one is "
error_msg = error_msg + str(len(commit_sha))
raise Exception(error_msg)
custom_data_tmp = {"state": state}
if context is not None:
custom_data_tmp.update({"context": context})
if description is not None:
custom_data_tmp.update({"description": description})
if target_url is not None:
# Make sure has http(s) scheme
if urlIsValid(target_url):
custom_data_tmp.update({"target_url": target_url})
else:
error_msg = "Invalid url detected while posting attempting"
error_msg = error_msg + " to post status.\n{}".format(target_url)
raise Exception(error_msg)
self._PYCURL(
self._header,
self._repo_url + "/statuses/" + commit_sha,
option="POST",
custom_data=custom_data_tmp,
)
def getStatuses(self, commit_sha=None):
"""Get status of provided commit or commit has defined in the env vars."""
if commit_sha is None:
commit_sha = os.getenv("CI_COMMIT_SHA")
if commit_sha is None:
commit_sha = os.getenv("TRAVIS_COMMIT")
if commit_sha is None:
error_msg = (
"Commit sha not provided and CI_COMMIT_SHA and "
"TRAVIS_COMMIT not defined in environment cannot get status"
)
raise Exception(error_msg)
# 1. Check if file exists if so get SHA
js_obj, code = self._PYCURL(
self._header, self._repo_url + "/commits/" + str(commit_sha) + "/statuses"
)
return js_obj, code, commit_sha
def getState(self, commit_sha=None, index=0):
"""Get state of the provided commit at the provided index"""
json_objs, code, commit_sha = self.getStatuses(commit_sha)
if len(json_objs) <= index:
error_msg = "Cannot get state of status at index {}".format(index)
error_msg += "\nThere are only a total of statuses {}".format(
len(json_objs)
)
error_msg += " at the provided commit ({})".format(commit_sha)
raise Exception(error_msg)
for count, json_obj in enumerate(json_objs):
if count == index:
return json_obj["state"], code, commit_sha
def printStatus(self):
js_obj = self.getStatuses()
print(js_obj)
|
the-stack_106_27566 | """
Core functions
To-Do:
- over limit for get_data
"""
import sys
import pandas as pd
from .. import config
from .api import API
from .util.clean import clean_dict_cols
from ..util.z2h import str_z2h
def get_list(statsCode=None, searchWord=None, outputRaw=False, key=None, lang=None, **kwargs):
api = API(key=key, lang=lang)
data = api.get_list(statsCode=statsCode, searchWord=searchWord, **kwargs)
df = pd.DataFrame(data['DATALIST_INF']['TABLE_INF'])
if outputRaw:
return df
cols_simple = ['@id', 'STAT_NAME', 'GOV_ORG',
'STATISTICS_NAME', 'TITLE',
'SURVEY_DATE', 'OPEN_DATE', 'OVERALL_TOTAL_NUMBER']
df = df[cols_simple].pipe(clean_dict_cols, ['STAT_NAME', 'GOV_ORG', 'TITLE'])
df = df.applymap(str_z2h)
return df
def get_stat(key=None, lang=None,):
api = API(key=key, lang=lang)
data = api.get_list(statsNameList="Y")
df = pd.DataFrame(data['DATALIST_INF']['LIST_INF'])
df = df.pipe(clean_dict_cols, ['STAT_NAME', 'GOV_ORG'])
df = df.applymap(str_z2h)
return df
def get_data(statsDataId, return_note=True, key=None, lang=None, **kwargs):
api = API(key=key, lang=lang)
data = api.get_data(statsDataId=statsDataId, **kwargs)
df = pd.DataFrame(data['STATISTICAL_DATA']['DATA_INF']['VALUE'])
res = data['STATISTICAL_DATA']['RESULT_INF']
while 'NEXT_KEY' in res:
_data = api.get_data(statsDataId=statsDataId, startPosition=res['NEXT_KEY'])
_df = pd.DataFrame(_data['STATISTICAL_DATA']['DATA_INF']['VALUE'])
df = pd.concat([df, _df], axis=0)
sys.stdout.write(".")
sys.stdout.flush()
res = _data['STATISTICAL_DATA']['RESULT_INF']
cats = data['STATISTICAL_DATA']['CLASS_INF']['CLASS_OBJ']
for cat in cats:
col_name = '@' + cat['@id']
_cat_map = cat['CLASS']
if isinstance(_cat_map, dict):
_cat_map = [_cat_map]
cat_map = {m['@code']: m['@name'] for m in _cat_map}
df[cat['@name']] = df[col_name].map(cat_map)
df.drop(col_name, axis=1, inplace=True)
df['Value'] = df['$']
df.drop('$', axis=1, inplace=True)
df = df.applymap(str_z2h)
if return_note:
try:
note = pd.DataFrame(data['STATISTICAL_DATA']['DATA_INF']['NOTE'])
note = note.rename(columns={"$": "EXPLAIN"})
except ValueError:
note = data['STATISTICAL_DATA']['DATA_INF']['NOTE']
return df, note
else:
return df
|
the-stack_106_27569 | # coding: utf-8
# YYeTsBot - bot.py
# 2019/8/15 18:27
__author__ = 'Benny <[email protected]>'
import io
import json
import logging
import re
import tempfile
import time
from urllib.parse import quote_plus
import telebot
from apscheduler.schedulers.background import BackgroundScheduler
from telebot import apihelper, types
from tgbot_ping import get_runtime
import fansub
from config import (FANSUB_ORDER, MAINTAINER, PROXY, REPORT, TOKEN,
YYETS_SEARCH_URL)
from utils import (get_error_dump, redis_announcement, reset_request,
save_error_dump, show_usage, today_request)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(filename)s [%(levelname)s]: %(message)s')
if PROXY:
apihelper.proxy = {'https': PROXY}
bot = telebot.TeleBot(TOKEN, num_threads=100)
angry_count = 0
@bot.message_handler(commands=['start'])
def send_welcome(message):
bot.send_chat_action(message.chat.id, 'typing')
bot.send_message(message.chat.id, '欢迎使用,直接发送想要的剧集标题给我就可以了,不需要其他关键字,我会帮你搜索。\n\n'
'别说了,现在连流浪地球都搜得到了。本小可爱再也不生气了😄,'
f'目前搜索优先级 {FANSUB_ORDER}\n '
f'另外,可以尝试使用一下 https://yyets.dmesg.app/ 哦!',
parse_mode='html', disable_web_page_preview=True)
@bot.message_handler(commands=['help'])
def send_help(message):
bot.send_chat_action(message.chat.id, 'typing')
bot.send_message(message.chat.id, '''机器人无法使用或者报错?从 /ping 里可以看到运行状态以及最新信息。
同时,你可以使用如下方式寻求使用帮助和报告错误:\n
1. @BennyThink
2. <a href='https://github.com/BennyThink/YYeTsBot/issues'>Github issues</a>
3. <a href='https://t.me/mikuri520'>Telegram Channel</a>''', parse_mode='html', disable_web_page_preview=True)
@bot.message_handler(commands=['ping'])
def send_ping(message):
logging.info("Pong!")
bot.send_chat_action(message.chat.id, 'typing')
info = get_runtime("botsrunner_yyets_1")
usage = ""
if str(message.chat.id) == MAINTAINER:
usage = show_usage()
announcement = redis_announcement() or ""
if announcement:
announcement = f"\n\n*公告:{announcement}*\n\n"
bot.send_message(message.chat.id, f"{info}\n\n{usage}\n{announcement}",
parse_mode='markdown')
@bot.message_handler(commands=['settings'])
def settings(message):
is_admin = str(message.chat.id) == MAINTAINER
# 普通用户只可以查看,不可以设置。
# 管理员可以查看可以设置
if message.text != "/settings" and not is_admin:
bot.send_message(message.chat.id, "此功能只允许管理员使用。请使用 /ping 和 /settings 查看相关信息")
return
# 删除公告,设置新公告
if message.text != "/settings":
date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
text = message.text.replace("/settings", f"{date}\t")
logging.info("New announcement %s", text)
redis_announcement(text, "set")
setattr(message, "text", "/settings")
settings(message)
return
announcement = redis_announcement()
markup = types.InlineKeyboardMarkup()
btn1 = types.InlineKeyboardButton("删除公告", callback_data="announcement")
if is_admin and announcement:
markup.add(btn1)
bot.send_message(message.chat.id, f"目前公告:\n\n {announcement or '暂无公告'}", reply_markup=markup)
@bot.callback_query_handler(func=lambda call: re.findall(r"announcement(\S*)", call.data))
def delete_announcement(call):
bot.send_chat_action(call.message.chat.id, 'typing')
redis_announcement(op="del")
bot.edit_message_text(f"目前公告:\n\n {redis_announcement() or '暂无公告'}",
call.message.chat.id,
call.message.message_id)
@bot.message_handler(commands=['credits'])
def send_credits(message):
bot.send_chat_action(message.chat.id, 'typing')
bot.send_message(message.chat.id, '''感谢字幕组的无私奉献!本机器人资源来源:\n
<a href="http://www.zmz2019.com/">人人影视</a>
<a href="http://cili001.com/">磁力下载站</a>
<a href="http://www.zhuixinfan.com/main.php">追新番</a>
<a href="https://www.zimuxia.cn/">FIX 字幕侠</a>
''', parse_mode='html', disable_web_page_preview=True)
for sub_name in dir(fansub):
if sub_name.endswith("_offline") or sub_name.endswith("_online"):
@bot.message_handler(commands=[sub_name])
def varies_fansub(message):
bot.send_chat_action(message.chat.id, 'typing')
# /YYeTsOffline 逃避可耻 /YYeTsOffline
tv_name: str = re.findall(r"/.*line\s*(\S*)", message.text)[0]
class_name: str = re.findall(r"/(.*line)", message.text)[0]
class_ = getattr(fansub, class_name)
if not tv_name:
bot.send_message(message.chat.id, f"{class_.__name__}: 请附加你要搜索的剧集名称,如 `/{class_name} 逃避可耻`",
parse_mode='markdown')
return
else:
setattr(message, "text", tv_name)
base_send_search(message, class_())
def download_to_io(photo):
logging.info("Initializing bytes io...")
mem = io.BytesIO()
file_id = photo[-1].file_id
logging.info("Downloading photos...")
file_info = bot.get_file(file_id)
content = bot.download_file(file_info.file_path)
mem.write(content)
logging.info("Downloading complete.")
return mem
def send_my_response(message):
bot.send_chat_action(message.chat.id, 'record_video_note')
# I may also send picture
photo = message.photo
uid = message.reply_to_message.caption
text = f"主人说:{message.text or message.caption or '啥也没说😯'}"
if photo:
bot.send_chat_action(message.chat.id, 'typing')
logging.info("Photo received from maintainer")
mem = download_to_io(photo)
mem.name = f'{uid}.jpg'
r = bot.send_photo(uid, mem.getvalue(), caption=text)
else:
r = bot.send_message(uid, text)
logging.info("Reply has been sent to %s with message id %s", uid, r.message_id)
bot.reply_to(message, "回复已经发送给这位用户")
fw = bot.forward_message(message.chat.id, uid, r.message_id)
time.sleep(3)
bot.delete_message(message.chat.id, fw.message_id)
logging.info("Forward has been deleted.")
@bot.message_handler(content_types=["photo", "text"])
def send_search(message):
# normal ordered search
if message.text in ("Voice Chat started", "Voice Chat ended"):
logging.warning("This is really funny %s", message.text)
return
base_send_search(message)
def base_send_search(message, instance=None):
if instance is None:
fan = fansub.FansubEntrance()
else:
fan = instance
bot.send_chat_action(message.chat.id, 'typing')
today_request("total")
if message.reply_to_message and message.reply_to_message.document and \
message.reply_to_message.document.file_name.startswith("error") and str(message.chat.id) == MAINTAINER:
today_request("answer")
send_my_response(message)
return
name = message.text
logging.info('Receiving message: %s from user %s(%s)', name, message.chat.username, message.chat.id)
if name is None:
today_request("invalid")
with open('warning.webp', 'rb') as sti:
bot.send_message(message.chat.id, "不要调戏我!我会报警的")
bot.send_sticker(message.chat.id, sti)
return
result = fan.search_preview(name)
markup = types.InlineKeyboardMarkup()
source = result.get("class")
result.pop("class")
count, MAX, warning = 0, 20, ""
for url_hash, detail in result.items():
if count > MAX:
warning = f"*结果太多啦,目前只显示前{MAX}个。关键词再精准一下吧!*\n\n"
break
btn = types.InlineKeyboardButton(detail["name"], callback_data="choose%s" % url_hash)
markup.add(btn)
count += 1
if result:
logging.info("🎉 Resource match.")
today_request("success")
bot.reply_to(message, f"{warning}呐🌹,一共%d个结果,选一个呀!来源:%s" % (len(result), source),
reply_markup=markup, parse_mode="markdown")
else:
logging.warning("⚠️️ Resource not found")
today_request("fail")
bot.send_chat_action(message.chat.id, 'typing')
encoded = quote_plus(name)
bot.reply_to(message, f"没有找到你想要的信息,是不是你打了错别字,或者搜索了一些国产影视剧。🤪\n"
f"还是你想调戏我哦🙅 本小可爱拒绝被调戏️\n\n"
"⚠️如果确定要我背锅,那么请使用 /help 来提交错误", disable_web_page_preview=True)
if REPORT:
btn = types.InlineKeyboardButton("快来修复啦", callback_data="fix")
markup.add(btn)
bot.send_chat_action(message.chat.id, 'upload_document')
bot.send_message(message.chat.id, f"《{name}》😭\n大部分情况下机器人是好用的,不要怀疑我的代码质量.\n"
f"如果你真的确定是机器人出问题了,那么点下面的按钮叫 @BennyThink 来修!\n"
f"⚠️报错前请三思,不要乱点,确保这锅应该甩给我。否则我会很生气的😡小心被拉黑哦",
reply_markup=markup)
content = f""" 报告者:{message.chat.first_name}{message.chat.last_name or ""}@{message.chat.username or ""}({message.chat.id})
问题发生时间:{time.strftime("%Y-%m-%data %H:%M:%S", time.localtime(message.date))}
请求内容:{name}
请求URL:{YYETS_SEARCH_URL.format(kw=encoded)}\n\n
"""
save_error_dump(message.chat.id, content)
def magic_recycle(fan, call, url_hash):
if fan.redis.exists(url_hash):
return False
else:
logging.info("👏 Wonderful magic!")
bot.answer_callback_query(call.id, "小可爱使用魔法回收了你的搜索结果,你再搜索一次试试看嘛🥺", show_alert=True)
bot.delete_message(call.message.chat.id, call.message.message_id)
return True
@bot.callback_query_handler(func=lambda call: re.findall(r"choose(\S*)", call.data))
def choose_link(call):
fan = fansub.FansubEntrance()
bot.send_chat_action(call.message.chat.id, 'typing')
# call.data is url_hash, with sha1, http://www.rrys2020.com/resource/36588
resource_url_hash = re.findall(r"choose(\S*)", call.data)[0]
if magic_recycle(fan, call, resource_url_hash):
return
result = fan.search_result(resource_url_hash)
with tempfile.NamedTemporaryFile(mode='wb+', prefix=result["cnname"], suffix=".txt") as tmp:
bytes_data = json.dumps(result["all"], ensure_ascii=False, indent=4).encode('u8')
tmp.write(bytes_data)
tmp.flush()
with open(tmp.name, "rb") as f:
if result.get("type") == "resource":
caption = "{}\n\n{}".format(result["cnname"], result["share"])
else:
caption = result["all"]
bot.send_chat_action(call.message.chat.id, 'upload_document')
bot.send_document(call.message.chat.id, f, caption=caption)
@bot.callback_query_handler(func=lambda call: re.findall(r"unwelcome(\d*)", call.data))
def send_unwelcome(call):
# this will come from me only
logging.warning("I'm so unhappy!")
message = call.message
bot.send_chat_action(message.chat.id, 'typing')
# angry_count = angry_count + 1
global angry_count
angry_count += 1
uid = re.findall(r"unwelcome(\d*)", call.data)[0]
if uid:
text = "人人影视主要提供欧美日韩等海外资源,你的这个真没有🤷。\n" \
"<b>麻烦你先从自己身上找原因</b>,我又不是你的专属客服。\n" \
"不要再报告这种错误了🙄️,面倒な。😡"
bot.send_message(uid, text, parse_mode="html")
bot.reply_to(message, f"有生之日 生气次数:{angry_count}")
@bot.callback_query_handler(func=lambda call: call.data == 'fix')
def report_error(call):
logging.error("Reporting error to maintainer.")
bot.send_chat_action(call.message.chat.id, 'typing')
error_content = get_error_dump(call.message.chat.id)
if error_content == "":
bot.answer_callback_query(call.id, '多次汇报重复的问题并不会加快处理速度。', show_alert=True)
return
text = f'人人影视机器人似乎出现了一些问题🤔🤔🤔……{error_content[0:300]}'
markup = types.InlineKeyboardMarkup()
btn = types.InlineKeyboardButton("unwelcome", callback_data=f"unwelcome{call.message.chat.id}")
markup.add(btn)
bot.send_message(MAINTAINER, text, disable_web_page_preview=True, reply_markup=markup)
with tempfile.NamedTemporaryFile(mode='wb+', prefix=f"error_{call.message.chat.id}_", suffix=".txt") as tmp:
tmp.write(error_content.encode('u8'))
tmp.flush()
with open(tmp.name, "rb") as f:
bot.send_chat_action(call.message.chat.id, 'upload_document')
bot.send_document(MAINTAINER, f, caption=str(call.message.chat.id))
bot.answer_callback_query(call.id, 'Debug信息已经发送给维护者,请耐心等待回复~', show_alert=True)
if __name__ == '__main__':
logging.info('YYeTs bot is running...')
scheduler = BackgroundScheduler()
scheduler.add_job(reset_request, 'cron', hour=0, minute=0)
scheduler.start()
bot.polling()
|
the-stack_106_27571 | import json
import requests
import logging
import hashlib
import time
from fake_useragent import UserAgent
from uuid import uuid4
from .camera import EzvizCamera
# from pyezviz.camera import EzvizCamera
COOKIE_NAME = "sessionId"
CAMERA_DEVICE_CATEGORY = "IPC"
DOORBELL_DEVICE_CATEGORY = "BDoorBell"
EU_API_DOMAIN = "apiieu"
API_BASE_TLD = "ezvizlife.com"
API_BASE_URI = "https://" + EU_API_DOMAIN + "." + API_BASE_TLD
API_ENDPOINT_LOGIN = "/v3/users/login"
API_ENDPOINT_CLOUDDEVICES = "/api/cloud/v2/cloudDevices/getAll"
API_ENDPOINT_PAGELIST = "/v3/userdevices/v1/devices/pagelist"
API_ENDPOINT_DEVICES = "/v3/devices/"
API_ENDPOINT_SWITCH_STATUS = '/api/device/switchStatus'
API_ENDPOINT_PTZCONTROL = "/ptzControl"
API_ENDPOINT_ALARM_SOUND = "/alarm/sound"
API_ENDPOINT_DATA_REPORT = "/api/other/data/report"
API_ENDPOINT_DETECTION_SENSIBILITY = "/api/device/configAlgorithm"
API_ENDPOINT_DETECTION_SENSIBILITY_GET = "/api/device/queryAlgorithmConfig"
LOGIN_URL = API_BASE_URI + API_ENDPOINT_LOGIN
CLOUDDEVICES_URL = API_BASE_URI + API_ENDPOINT_CLOUDDEVICES
DEVICES_URL = API_BASE_URI + API_ENDPOINT_DEVICES
PAGELIST_URL = API_BASE_URI + API_ENDPOINT_PAGELIST
DATA_REPORT_URL = API_BASE_URI + API_ENDPOINT_DATA_REPORT
SWITCH_STATUS_URL = API_BASE_URI + API_ENDPOINT_SWITCH_STATUS
DETECTION_SENSIBILITY_URL = API_BASE_URI + API_ENDPOINT_DETECTION_SENSIBILITY
DETECTION_SENSIBILITY_GET_URL = API_BASE_URI + API_ENDPOINT_DETECTION_SENSIBILITY_GET
DEFAULT_TIMEOUT = 10
MAX_RETRIES = 3
class PyEzvizError(Exception):
pass
class EzvizClient(object):
def __init__(self, account, password, session=None, sessionId=None, timeout=None, cloud=None, connection=None):
"""Initialize the client object."""
self.account = account
self.password = password
# self._user_id = None
# self._user_reference = None
self._session = session
self._sessionId = sessionId
self._data = {}
self._timeout = timeout
self._CLOUD = cloud
self._CONNECTION = connection
def _login(self, apiDomain=EU_API_DOMAIN):
"""Login to Ezviz' API."""
# Ezviz API sends md5 of password
m = hashlib.md5()
m.update(self.password.encode('utf-8'))
md5pass = m.hexdigest()
payload = {"account": self.account, "password": md5pass, "featureCode": "92c579faa0902cbfcfcc4fc004ef67e7"}
try:
req = self._session.post("https://" + apiDomain + "." + API_BASE_TLD + API_ENDPOINT_LOGIN,
data=payload,
headers={"Content-Type": "application/x-www-form-urlencoded",
"clientType": "1",
"customNo": "1000001"},
timeout=self._timeout)
except OSError:
raise PyEzvizError("Can not login to API")
if req.status_code == 400:
raise PyEzvizError("Login error: Please check your username/password: %s ", str(req.text))
# let's parse the answer, session is in {.."loginSession":{"sessionId":"xxx...}
try:
response_json = req.json()
# if the apidomain is not proper
if response_json["meta"]["code"] == 1100:
return self._login(response_json["loginArea"]["apiDomain"])
sessionId = str(response_json["loginSession"]["sessionId"])
if not sessionId:
raise PyEzvizError("Login error: Please check your username/password: %s ", str(req.text))
self._sessionId = sessionId
except (OSError, json.decoder.JSONDecodeError) as e:
raise PyEzvizError("Impossible to decode response: \nResponse was: [%s] %s", str(e), str(req.status_code), str(req.text))
return True
def _get_pagelist(self, filter=None, json_key=None, max_retries=0):
"""Get data from pagelist API."""
if max_retries > MAX_RETRIES:
raise PyEzvizError("Can't gather proper data. Max retries exceeded.")
if filter == None:
raise PyEzvizError("Trying to call get_pagelist without filter")
try:
req = self._session.get(PAGELIST_URL,
params={'filter': filter},
headers={ 'sessionId': self._sessionId},
timeout=self._timeout)
except OSError as e:
raise PyEzvizError("Could not access Ezviz' API: " + str(e))
if req.status_code == 401:
# session is wrong, need to relogin
self.login()
logging.info("Got 401, relogging (max retries: %s)",str(max_retries))
return self._get_pagelist(max_retries+1)
if req.text is "":
raise PyEzvizError("No data")
try:
json_output = req.json()
except (OSError, json.decoder.JSONDecodeError) as e:
raise PyEzvizError("Impossible to decode response: " + str(e) + "\nResponse was: " + str(req.text))
if json_key == None:
json_result = json_output
else:
json_result = json_output[json_key]
if not json_result:
raise PyEzvizError("Impossible to load the devices, here is the returned response: %s ", str(req.text))
return json_result
def _switch_status(self, serial, status_type, enable, max_retries=0):
"""Switch status on a device"""
try:
req = self._session.post(SWITCH_STATUS_URL,
data={ 'sessionId': self._sessionId,
'enable': enable,
'serial': serial,
'channel': '0',
'netType' : 'WIFI',
'clientType': '1',
'type': status_type},
timeout=self._timeout)
if req.status_code == 401:
# session is wrong, need to relogin
self.login()
logging.info("Got 401, relogging (max retries: %s)",str(max_retries))
return self._switch_status(serial, type, enable, max_retries+1)
response_json = req.json()
if response_json['resultCode'] != '0':
raise PyEzvizError("Could not set the switch, maybe a permission issue ?: Got %s : %s)",str(req.status_code), str(req.text))
return False
except OSError as e:
raise PyEzvizError("Could not access Ezviz' API: " + str(e))
return True
def _switch_devices_privacy(self, enable=0):
"""Switch privacy status on ALL devices (batch)"""
# enable=1 means privacy is ON
# get all devices
devices = self._get_devices()
# foreach, launch a switchstatus for the proper serial
for idx, device in enumerate(devices):
serial = devices[idx]['serial']
self._switch_status(serial, TYPE_PRIVACY_MODE, enable)
return True
def load_cameras(self):
"""Load and return all cameras objects"""
# get all devices
devices = self.get_DEVICE()
cameras = []
# foreach, launch a switchstatus for the proper serial
for idx, device in enumerate(devices):
if devices[idx]['deviceCategory'] == CAMERA_DEVICE_CATEGORY:
camera = EzvizCamera(self, device['deviceSerial'])
camera.load()
cameras.append(camera.status())
if devices[idx]['deviceCategory'] == DOORBELL_DEVICE_CATEGORY:
camera = EzvizCamera(self, device['deviceSerial'])
camera.load()
cameras.append(camera.status())
return cameras
def ptzControl(self, command, serial, action, speed=5, max_retries=0):
"""PTZ Control by API."""
if max_retries > MAX_RETRIES:
raise PyEzvizError("Can't gather proper data. Max retries exceeded.")
if command == None:
raise PyEzvizError("Trying to call ptzControl without command")
if action == None:
raise PyEzvizError("Trying to call ptzControl without action")
try:
req = self._session.put(DEVICES_URL + serial + API_ENDPOINT_PTZCONTROL,
data={'command': command,
'action': action,
'channelNo': "1",
'speed': speed,
'uuid': str(uuid4()),
'serial': serial},
headers={ 'sessionId': self._sessionId,
'clientType': "1"},
timeout=self._timeout)
except OSError as e:
raise PyEzvizError("Could not access Ezviz' API: " + str(e))
if req.status_code == 401:
# session is wrong, need to re-log-in
self.login()
logging.info("Got 401, relogging (max retries: %s)",str(max_retries))
return self.ptzControl(max_retries+1)
def login(self):
"""Set http session."""
if self._sessionId is None:
self._session = requests.session()
# adding fake user-agent header
self._session.headers.update({'User-agent': str(UserAgent().random)})
return self._login()
def data_report(self, serial, enable=1, max_retries=0):
"""Enable alarm notifications."""
if max_retries > MAX_RETRIES:
raise PyEzvizError("Can't gather proper data. Max retries exceeded.")
# operationType = 2 if disable, and 1 if enable
operationType = 2 - int(enable)
print(f"enable: {enable}, operationType: {operationType}")
try:
req = self._session.post(DATA_REPORT_URL,
data={ 'clientType': '1',
'infoDetail': json.dumps({
"operationType" : int(operationType),
"detail" : '0',
"deviceSerial" : serial + ",2"
}, separators=(',',':')),
'infoType': '3',
'netType': 'WIFI',
'reportData': None,
'requestType': '0',
'sessionId': self._sessionId
},
timeout=self._timeout)
except OSError as e:
raise PyEzvizError("Could not access Ezviz' API: " + str(e))
if req.status_code == 401:
# session is wrong, need to re-log-in
self.login()
logging.info("Got 401, relogging (max retries: %s)",str(max_retries))
return self.data_report(serial, enable, max_retries+1)
return True
# soundtype: 0 = normal, 1 = intensive, 2 = disabled ... don't ask me why...
def detection_sensibility(self, serial, sensibility=3, max_retries=0):
"""Enable alarm notifications."""
if max_retries > MAX_RETRIES:
raise PyEzvizError("Can't gather proper data. Max retries exceeded.")
if sensibility not in [0,1,2,3,4,5,6]:
raise PyEzvizError("Unproper sensibility (should be within 1 to 6).")
try:
req = self._session.post(DETECTION_SENSIBILITY_URL,
data={ 'subSerial' : serial,
'type': '0',
'sessionId': self._sessionId,
'value': sensibility,
},
timeout=self._timeout)
except OSError as e:
raise PyEzvizError("Could not access Ezviz' API: " + str(e))
if req.status_code == 401:
# session is wrong, need to re-log-in
self.login()
logging.info("Got 401, relogging (max retries: %s)",str(max_retries))
return self.detection_sensibility(serial, enable, max_retries+1)
return True
def get_detection_sensibility(self, serial, max_retries=0):
"""Enable alarm notifications."""
if max_retries > MAX_RETRIES:
raise PyEzvizError("Can't gather proper data. Max retries exceeded.")
try:
req = self._session.post(DETECTION_SENSIBILITY_GET_URL,
data={ 'subSerial' : serial,
'sessionId': self._sessionId,
'clientType': 1
},
timeout=self._timeout)
except OSError as e:
raise PyEzvizError("Could not access Ezviz' API: " + str(e))
if req.status_code == 401:
# session is wrong, need to re-log-in
self.login()
logging.info("Got 401, relogging (max retries: %s)",str(max_retries))
return self.get_detection_sensibility(serial, enable, max_retries+1)
elif req.status_code != 200:
raise PyEzvizError("Could not get detection sensibility: Got %s : %s)",str(req.status_code), str(req.text))
response_json = req.json()
if response_json['resultCode'] != '0':
# raise PyEzvizError("Could not get detection sensibility: Got %s : %s)",str(req.status_code), str(req.text))
return 'Unknown'
else:
return response_json['algorithmConfig']['algorithmList'][0]['value']
def alarm_sound(self, serial, soundType, enable=1, max_retries=0):
"""Enable alarm sound by API."""
if max_retries > MAX_RETRIES:
raise PyEzvizError("Can't gather proper data. Max retries exceeded.")
if soundType not in [0,1,2]:
raise PyEzvizError("Invalid soundType, should be 0,1,2: " + str(soundType))
try:
req = self._session.put(DEVICES_URL + serial + API_ENDPOINT_ALARM_SOUND,
data={ 'enable': enable,
'soundType': soundType,
'voiceId': '0',
'deviceSerial': serial
},
headers={ 'sessionId': self._sessionId},
timeout=self._timeout)
except OSError as e:
raise PyEzvizError("Could not access Ezviz' API: " + str(e))
if req.status_code == 401:
# session is wrong, need to re-log-in
self.login()
logging.info("Got 401, relogging (max retries: %s)",str(max_retries))
return self.alarm_sound(serial, enable, soundType, max_retries+1)
elif req.status_code != 200:
logging.error("Got %s : %s)",str(req.status_code), str(req.text))
return True
def switch_devices_privacy(self,enable=0):
"""Switch status on all devices."""
return self._switch_devices_privacy(enable)
def switch_status(self, serial, status_type, enable=0):
"""Switch status of a device."""
return self._switch_status(serial, status_type, enable)
def get_PAGE_LIST(self, max_retries=0):
return self._get_pagelist(filter='CLOUD,TIME_PLAN,CONNECTION,SWITCH,STATUS,WIFI,STATUS_EXT,NODISTURB,P2P,TTS,KMS,HIDDNS', json_key=None)
def get_DEVICE(self, max_retries=0):
return self._get_pagelist(filter='CLOUD',json_key='deviceInfos')
def get_CONNECTION(self, max_retries=0):
return self._get_pagelist(filter='CONNECTION',json_key='connectionInfos')
def get_STATUS(self, max_retries=0):
return self._get_pagelist(filter='STATUS',json_key='statusInfos')
def get_SWITCH(self, max_retries=0):
return self._get_pagelist(filter='SWITCH',json_key='switchStatusInfos')
def get_WIFI(self, max_retries=0):
return self._get_pagelist(filter='WIFI',json_key='wifiInfos')
def get_NODISTURB(self, max_retries=0):
return self._get_pagelist(filter='NODISTURB',json_key='alarmNodisturbInfos')
def get_P2P(self, max_retries=0):
return self._get_pagelist(filter='P2P',json_key='p2pInfos')
def get_KMS(self, max_retries=0):
return self._get_pagelist(filter='KMS',json_key='kmsInfos')
def get_TIME_PLAN(self, max_retries=0):
return self._get_pagelist(filter='TIME_PLAN',json_key='timePlanInfos')
def close_session(self):
"""Close current session."""
self._session.close()
self._session = None |
the-stack_106_27576 | from __future__ import print_function
from __future__ import absolute_import
import six
input_name = '../examples/homogenization/perfusion_micro.py'
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf = conf, options = options)
def compare_scalars(s1, s2, l1= 's1', l2 = 's2',
allowed_error = 1e-8):
diff = abs(s1 - s2)
TestCommon.report( '|%s - %s|: %e' % (l1, l2, diff))
if diff > allowed_error:
return False
else:
return True
compare_scalars = staticmethod(compare_scalars)
def test_solution(self):
from sfepy.base.base import Struct
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.homogenization.homogen_app import HomogenizationApp
#import numpy as nm
import os.path as op
ok = True
required, other = get_standard_keywords()
required.remove('equations')
print(input_name)
full_name = op.join(op.dirname(__file__), input_name)
test_conf = ProblemConf.from_file(full_name, required, other)
options = Struct(output_filename_trunk=None,
save_ebc=False,
save_ebc_nodes=False,
save_regions=False,
save_field_meshes=False,
save_regions_as_groups=False,
solve_not=False)
test_conf.options['output_dir'] = './output-tests'
app = HomogenizationApp(test_conf, options, 'homogen:' )
coefs = app()
aerr = 1.0e-9
self.report('allowed error: abs = %e' % (aerr, ))
# G^A = G^B ?
ok = ok and self.compare_scalars(coefs.GA, coefs.GB,\
'G^A', 'G^B', aerr)
# F^{A+} + F^{B+} = -1/h \int_{\partial_+Y_m} ?
aux = 1.0 / test_conf.param_h * coefs.Volume_bYMp
ok = ok and self.compare_scalars(coefs.FpA + coefs.FpB, -aux,
'F^{A+} + F^{B+}', '-bYM^+', aerr)
# F^{A-} + F^{B-} = -1/h \int_{\partial_-Y_m} ?
aux = 1.0 / test_conf.param_h * coefs.Volume_bYMm
ok = ok and self.compare_scalars(coefs.FmA + coefs.FmB, -aux,
'F^{A-} + F^{B-}', '-bYM^-', aerr)
# symmetry of H ?
ok = ok and self.compare_scalars(coefs.Hpm, coefs.Hmp,
'H^{+-}', 'H^{-+}', aerr)
# E = -F ?
ok = ok and self.compare_scalars(coefs.EmA, -coefs.FmA,
'E^{A-}', '-F^{A-}',aerr)
ok = ok and self.compare_scalars(coefs.EpA, -coefs.FpA,
'E^{A+}', '-F^{A+}',aerr)
ok = ok and self.compare_scalars(coefs.EmB, -coefs.FmB,
'E^{B-}', '-F^{B-}',aerr)
ok = ok and self.compare_scalars(coefs.EpB, -coefs.FpB,
'E^{B+}', '-F^{B+}',aerr)
# S = S_test ?
coefsd = coefs.to_dict()
compare = []
for ii in six.iterkeys(coefsd):
if 'S_test' in ii:
ch = ii[6]
io = ii[-1]
compare.append((ii, 'S%s_%s' % (ch, io)))
for s1, s2 in compare:
ok = ok and self.compare_vectors(coefsd[s1], -coefsd[s2],
label1='S_test', label2='S',
allowed_error=aerr)
return ok
|
the-stack_106_27578 | """
Module for testing functionality associated with calculating the
plasma frequency.
- `~plasmapy.formulary.frequencies.plasma_frequency`
- `~plasmapy.formulary.frequencies.plasma_frequency_lite`
- `~plasmapy.formulary.frequencies.wp_`
"""
import astropy.units as u
import numpy as np
import pytest
from astropy.constants.si import m_p
from numba.extending import is_jitted
from plasmapy.formulary.frequencies import plasma_frequency, plasma_frequency_lite, wp_
from plasmapy.particles import Particle
from plasmapy.utils.pytest_helpers import assert_can_handle_nparray
@pytest.mark.parametrize(
"alias, parent",
[(wp_, plasma_frequency)],
)
def test_aliases(alias, parent):
assert alias is parent
class TestPlasmaFrequency:
"""
Test class for `plasmapy.formulary.frequencies.plasma_frequency`.
Note: Testing of `plasma_frequency_lite` is done in a separate test
class.
"""
@pytest.mark.parametrize(
"bound_name, bound_attr",
[("lite", plasma_frequency_lite)],
)
def test_lite_function_binding(self, bound_name, bound_attr):
"""Test expected attributes are bound correctly."""
assert hasattr(plasma_frequency, bound_name)
assert getattr(plasma_frequency, bound_name) is bound_attr
def test_lite_function_marking(self):
"""
Test plasma_frequency is marked as having a Lite-Function.
"""
assert hasattr(plasma_frequency, "__bound_lite_func__")
assert isinstance(plasma_frequency.__bound_lite_func__, dict)
for bound_name, bound_origin in plasma_frequency.__bound_lite_func__.items():
assert hasattr(plasma_frequency, bound_name)
attr = getattr(plasma_frequency, bound_name)
origin = f"{attr.__module__}.{attr.__name__}"
assert origin == bound_origin
@pytest.mark.parametrize(
"args, kwargs, _error",
[
((u.m ** -3, "e-"), {}, TypeError),
(("not a density", "e-"), {}, TypeError),
((5 * u.s, "e-"), {}, u.UnitTypeError),
((5 * u.m ** -2, "e-"), {}, u.UnitTypeError),
((), {"n": 5 * u.m ** -3, "particle": "not a particle"}, ValueError),
],
)
def test_raises(self, args, kwargs, _error):
"""
Test scenarios that cause plasma_frequency to raise an
Exception.
"""
with pytest.raises(_error):
plasma_frequency(*args, **kwargs)
@pytest.mark.parametrize(
"args, kwargs, _warning, expected",
[
(
(1e19, "e-"),
{},
u.UnitsWarning,
plasma_frequency(1e19 * u.m ** -3, "e-"),
),
((1e19, "p"), {}, u.UnitsWarning, plasma_frequency(1e19 * u.m ** -3, "p")),
],
)
def test_warns(self, args, kwargs, _warning, expected):
"""
Test scenarios the cause plasma_frequency to issue a warning.
"""
with pytest.warns(_warning):
wp = plasma_frequency(*args, **kwargs)
assert isinstance(wp, u.Quantity)
assert wp.unit == u.rad / u.s
if expected is not None:
assert np.allclose(wp, expected)
@pytest.mark.parametrize(
"args, kwargs, expected, rtol",
[
((1 * u.cm ** -3, "e-"), {}, 5.64e4, 1e-2),
((1 * u.cm ** -3, "N"), {}, 3.53e2, 1e-1),
((1e17 * u.cm ** -3, "p"), {"z_mean": 0.8}, 333063562455.4028, 1e-6),
(
(5e19 * u.m ** -3, "p"),
{},
plasma_frequency(5e19 * u.m ** -3, particle="H-1+").value,
1e-5,
),
((m_p.to(u.u).value * u.cm ** -3,), {"particle": "p"}, 1.32e3, 1e-2),
],
)
def test_values(self, args, kwargs, expected, rtol):
"""Test various expected values."""
wp = plasma_frequency(*args, **kwargs)
assert isinstance(wp, u.Quantity)
assert wp.unit == u.rad / u.s
assert np.allclose(wp.value, expected, rtol=rtol)
@pytest.mark.parametrize(
"args, kwargs",
[((1 * u.cm ** -3, "N"), {}), ((1e12 * u.cm ** -3,), {"particle": "p"})],
)
def test_to_hz(self, args, kwargs):
"""Test behavior of the ``to_hz`` keyword."""
wp = plasma_frequency(*args, **kwargs)
fp = plasma_frequency(*args, to_hz=True, **kwargs)
assert isinstance(fp, u.Quantity)
assert fp.unit == u.Hz
assert fp.value == wp.value / (2.0 * np.pi)
def test_nans(self):
assert np.isnan(plasma_frequency(np.nan * u.m ** -3, "e-"))
def test_can_handle_numpy_arrays(self):
assert_can_handle_nparray(plasma_frequency)
class TestPlasmaFrequencyLite:
"""Test class for `plasma_frequency_lite`."""
def test_is_jitted(self):
"Ensure `plasmapy_frequency_lite` was jitted by `numba`."
assert is_jitted(plasma_frequency_lite)
@pytest.mark.parametrize(
"inputs",
[
{"n": 1e12 * u.cm ** -3, "particle": "e-"},
{"n": 1e12 * u.cm ** -3, "particle": "e-", "to_hz": True},
{"n": 1e11 * u.cm ** -3, "particle": "He", "z_mean": 0.8},
],
)
def test_normal_vs_lite_values(self, inputs):
"""
Test that plasma_frequency and plasma_frequency_lite calculate
the same values.
"""
particle = Particle(inputs["particle"])
inputs_unitless = {
"n": inputs["n"].to(u.m ** -3).value,
"mass": particle.mass.value,
}
if "z_mean" in inputs:
inputs_unitless["z_mean"] = inputs["z_mean"]
else:
try:
inputs_unitless["z_mean"] = np.abs(particle.charge_number)
except Exception:
inputs_unitless["z_mean"] = 1
if "to_hz" in inputs:
inputs_unitless["to_hz"] = inputs["to_hz"]
lite = plasma_frequency_lite(**inputs_unitless)
pylite = plasma_frequency_lite.py_func(**inputs_unitless)
assert pylite == lite
normal = plasma_frequency(**inputs)
assert np.allclose(normal.value, lite)
|
the-stack_106_27579 | ################################################################################
## Right widget files for automatic mode
# Author: Maleakhi, Alex, Faidon, Jamie, Olle, Harry
################################################################################
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from DQN import get_player, Model
import os
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
warnings.simplefilter("ignore", category=PendingDeprecationWarning)
from RL.medical import MedicalPlayer
from RL.common import play_n_episodes
from tensorpack import (PredictConfig, OfflinePredictor, get_model_loader,
logger, TrainConfig, ModelSaver, PeriodicTrigger,
ScheduledHyperParamSetter, ObjAttrParam,
HumanHyperParamSetter, argscope, RunOp, LinearWrap,
FullyConnected, PReLU, SimpleTrainer,
launch_train_with_config)
from GUI.thread import WorkerThread
from GUI.window import Window
from GUI.terminal import Terminal
from GUI.plot import Plot
from GUI.FilenamesGUI import FilenamesGUI
from matplotlib.backends.qt_compat import QtCore, QtWidgets
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
###############################################################################
# BATCH SIZE USED IN NATURE PAPER IS 32 - MEDICAL IS 256
BATCH_SIZE = 48
# BREAKOUT (84,84) - MEDICAL 2D (60,60) - MEDICAL 3D (26,26,26)
IMAGE_SIZE = (45, 45, 45)
# how many frames to keep
# in other words, how many observations the network can see
FRAME_HISTORY = 4
# the frequency of updating the target network
UPDATE_FREQ = 4
# DISCOUNT FACTOR - NATURE (0.99) - MEDICAL (0.9)
GAMMA = 0.9 #0.99
# REPLAY MEMORY SIZE - NATURE (1e6) - MEDICAL (1e5 view-patches)
MEMORY_SIZE = 1e5 #6
# consume at least 1e6 * 27 * 27 * 27 bytes
INIT_MEMORY_SIZE = MEMORY_SIZE // 20 #5e4
# each epoch is 100k played frames
STEPS_PER_EPOCH = 10000 // UPDATE_FREQ * 10
# num training epochs in between model evaluations
EPOCHS_PER_EVAL = 2
# the number of episodes to run during evaluation
EVAL_EPISODE = 50
###############################################################################
## Right Widget (Automatic Mode)
class RightWidgetSettings(QFrame):
"""
Class representing right widget for automatic.
"""
# Constant (indication of simulation state)
PAUSE = "Pause"
START = "Start"
RESUME = "Resume"
# Constant task indication
TASK_PLAY = "Play"
TASK_EVAL = "Evaluation"
# Signal
terminal_signal = pyqtSignal(dict)
def __init__(self, *args, **kwargs):
super(RightWidgetSettings, self).__init__(*args, **kwargs)
self.mounted = False # by default mounting is set to false
# Thread and window object which will be used to gain access to primary
# windows.
self.thread = WorkerThread(None)
self.window = None
# Placeholder for GUI file names, status
self.fname_images = FilenamesGUI()
self.fname_landmarks = FilenamesGUI()
self.fname_model = FilenamesGUI()
# Task
self.task = QLabel('Task', self)
self.play_button = QRadioButton("Play")
self.eval_button = QRadioButton("Evaluation")
self.eval_button.setChecked(True)
# Agent speed
label_speed = QLabel("Agent Speed")
self.speed_slider = QSlider(Qt.Horizontal, self)
self.speed_slider.setMinimum(0)
self.speed_slider.setMaximum(5)
self.speed_slider.setValue(5)
self.speed_slider.valueChanged[int].connect(self.changeValue)
# Run and terminate
self.run_button = QPushButton(self.START, self)
self.terminate_button = QPushButton('Terminate', self)
# Terminal
self.terminal = Terminal()
# Plot
self.plot = Plot()
## Layout
# Task layout
hbox_task = QHBoxLayout()
hbox_task.setSpacing(30)
hbox_task.addWidget(self.play_button)
hbox_task.addWidget(self.eval_button)
# Run layout
hbox_run = QHBoxLayout()
hbox_run.setSpacing(30)
hbox_run.addWidget(self.run_button)
hbox_run.addWidget(self.terminate_button)
# Task, agent speed, run, layout
grid = QGridLayout()
grid.setVerticalSpacing(20) # spacing
grid.addWidget(self.task, 1, 0)
grid.addLayout(hbox_task, 2, 0)
grid.addWidget(QLabel("<hr />"), 3, 0, 1, 2)
grid.addWidget(label_speed, 4, 0, 1, 2)
grid.addWidget(self.speed_slider, 5, 0, 1, 2)
grid.addLayout(hbox_run, 7, 0, 1, 2)
# Main layout
vbox = QVBoxLayout()
vbox.addLayout(grid)
vbox.addItem(QSpacerItem(300, 20)) # spacer
vbox.addWidget(self.terminal)
vbox.addWidget(self.plot)
vbox.addStretch()
self.setLayout(vbox)
# Event handler
self.run_button.clicked.connect(self.on_clicking_run)
self.terminal_signal.connect(self.terminal_signal_handler)
self.terminate_button.clicked.connect(self.on_clicking_terminate)
# CSS styling for some widget components
self.setStyleSheet("background:white")
self.run_button.setStyleSheet("background-color:#4CAF50; color:white")
self.terminate_button.setStyleSheet("background-color:#f44336; color:white")
def clear_custom_load(self):
"""
Clear load custom data selection
"""
self.fname_images.clear()
self.fname_landmarks.clear()
self.fname_model.clear()
self.window.left_widget.reset_file_edit_text()
def restart(self):
"""
Used to restart right widget state
"""
self.run_button.setStyleSheet("background-color:#4CAF50; color:white")
self.run_button.setText(self.START)
def changeValue(self, value):
"""
Event handler for slider (adjusting agent speed)
"""
if value >= 4:
self.thread.speed = WorkerThread.FAST
elif value >= 2:
self.thread.speed = WorkerThread.MEDIUM
else:
self.thread.speed = WorkerThread.SLOW
def on_clicking_terminate(self):
"""
Event handler to terminate simulation.
"""
self.thread.terminate = True # give signal to terminate thread
self.thread.pause = False # indicate that thread should not be paused
# Print in terminal and restart setup
self.terminal.add_log("blue", "Terminate")
self.restart()
self.enable_radio_button(True)
# Reset simple image viewer and windows
self.window.widget.reset()
self.window.statusbar.showMessage("Ready")
def which_task(self):
"""
Determine which radio button task is checked
"""
if self.play_button.isChecked():
return RightWidgetSettings.TASK_PLAY
else:
return RightWidgetSettings.TASK_EVAL
def which_usecase(self):
"""
Determine which radio button usecase is checked
"""
# If user does not specify specific file to load
if not self.fname_images.user_define or \
not self.fname_landmarks.user_define or \
not self.fname_model.user_define:
if self.window.left_widget.brain_button.isChecked():
return Window.BRAIN
elif self.window.left_widget.cardiac_button.isChecked():
return Window.CARDIAC
else:
return Window.FETAL
# Else user specify
else:
return Window.USER_DEFINED
def on_clicking_run(self):
"""
Event handler (slot) for when the run button is clicked
"""
if self.run_button.text() == self.START:
# Manage thread
self.thread.terminate = False
# Manage task
self.task_value = self.which_task()
self.GIF_value = False
self.video_value = False
# Manage run button
self.run_button.setText(self.PAUSE)
self.window.statusbar.showMessage("Running")
self.run_button.setStyleSheet("background-color:orange; color:white")
# Get usecase and set paths, print to terminal
self.window.usecase = self.which_usecase()
self.set_paths()
self.terminal.add_log("blue", f"Start {self.task_value} Mode ({self.window.usecase})")
# Run using setup
self.run_DQN()
# When resume is clicked
elif self.run_button.text() == self.RESUME:
# Manage threads
self.thread.pause = False
# Terminal logs and other details
self.run_button.setText(self.PAUSE)
self.terminal.add_log("blue", "Resume")
self.run_button.setStyleSheet("background-color:orange; color:white")
self.window.statusbar.showMessage("Running")
# When pause is clicked
else:
self.thread.pause = True
self.run_button.setText(self.RESUME)
self.run_button.setStyleSheet("background-color:#4CAF50; color:white")
self.terminal.add_log("blue", "Pause")
self.window.statusbar.showMessage("Paused")
def terminal_signal_handler(self, value):
"""
Used to handle agent signal when it moves.
:param value: dictionary from medical.py
"""
current_episode = value["current_episode"]
total_episode = value["total_episode"]
score = value["score"]
distance_error = value["distance_error"]
q_values = value["q_values"]
self.terminal.terminal_signal_handler(current_episode, total_episode, score,
distance_error, q_values)
def check_user_define_usecase(self, filename_model, filename_img, filename_landmark):
"""
Check which usecase that the user wants (in case of custom data loaded by user)
:param filename_model: string representing file name for model
:param filename_img: string representing file name for image
:param filename_landmark: string representing file name for landmark
"""
filename_model = filename_model.split("/")
filename_img = filename_img.split("/")
filename_landmark = filename_landmark.split("/")
# Ensure that user input file properly
if "cardiac" in filename_model[-2] \
and "cardiac" in filename_img[-1]\
and "cardiac" in filename_landmark[-1] :
return Window.CARDIAC
elif "brain" in filename_model[-2] \
and "brain" in filename_img[-1] \
and "brain" in filename_landmark[-1]:
return Window.BRAIN
elif "ultrasound" in filename_model[-2] \
and "fetal" in filename_img[-1] \
and "fetal" in filename_landmark[-1]:
return Window.FETAL
else:
return Window.USER_DEFINED # Invalid mode
def set_paths(self):
"""
Used to set paths before running the code
"""
redir = '' if self.mounted else 'local/'
if self.window.usecase == Window.BRAIN:
# Default MRI
self.fname_images.name = f"./data/filenames/{redir}brain_train_files_new_paths.txt"
self.fname_model.name = "./data/models/DQN_multiscale_brain_mri_point_pc_ROI_45_45_45/model-600000.data-00000-of-00001"
self.fname_landmarks.name = f"./data/filenames/{redir}brain_train_landmarks_new_paths.txt"
elif self.window.usecase == Window.CARDIAC:
# Default cardiac
self.fname_images.name = f"./data/filenames/{redir}cardiac_train_files_new_paths.txt"
self.fname_model.name = './data/models/DQN_cardiac_mri/model-600000.data-00000-of-00001'
self.fname_landmarks.name = f"./data/filenames/{redir}cardiac_train_landmarks_new_paths.txt"
elif self.window.usecase == Window.FETAL:
# Default fetal
self.fname_images.name = f"./data/filenames/{redir}fetalUS_train_files_new_paths.txt"
self.fname_model.name = './data/models/DQN_ultrasound/model-600000.data-00000-of-00001'
self.fname_landmarks.name = f"./data/filenames/{redir}fetalUS_train_landmarks_new_paths.txt"
else:
# User defined file selection
self.fname_images.name = self.window.left_widget.fname_images
self.fname_model.name = self.window.left_widget.fname_model
self.fname_landmarks.name = self.window.left_widget.fname_landmarks
# To tell the program which loader it should use
self.window.usecase = self.check_user_define_usecase(self.fname_model.name, self.fname_images.name, self.fname_landmarks.name)
def error_message_box(self):
"""
Display error when user incorrectly upload file
"""
msg = QMessageBox()
msg.setWindowTitle("Error on user defined settings")
msg.setText("Please use appropriate model, image, and landmarks.")
msg.setIcon(QMessageBox.Critical)
# Clean up
self.clear_custom_load()
self.window.usecase = self.which_usecase()
self.restart() # restart right widget state
# Display pop up message
msg.exec_()
def run_DQN(self):
"""
Run DQN algorithm.
"""
# if self.GPU_value:
# os.environ['CUDA_VISIBLE_DEVICES'] = self.GPU_value
# check input files
if self.task_value == RightWidgetSettings.TASK_PLAY:
self.selected_list = [self.fname_images]
else:
self.selected_list = [self.fname_images, self.fname_landmarks]
self.METHOD = "DQN"
# load files into env to set num_actions, num_validation_files
try:
init_player = MedicalPlayer(files_list=self.selected_list,
data_type=self.window.usecase,
screen_dims=IMAGE_SIZE,
task='play')
self.NUM_ACTIONS = init_player.action_space.n
self.num_files = init_player.files.num_files
# Create a thread to run background task
self.worker_thread = WorkerThread(target_function=self.thread_function)
self.worker_thread.window = self.window
# Change to appropriate layout
self.window.widget.change_layout(self.window.usecase)
self.enable_radio_button(False)
self.worker_thread.start()
# If there is a problem with the loader, then user incorrectly add file
except:
self.terminal.add_log("red", "Error loading user defined settings. Please use appropriate model, image, and landmarks." )
self.error_message_box()
def enable_radio_button(self, enabled):
"""
Toggle radio button and disable irrelevant one.
:enabled: True if enabled, False if disabled
"""
# Disable radio button for the irrelevant task
if self.which_task() == RightWidgetSettings.TASK_EVAL:
self.play_button.setEnabled(enabled)
else:
self.eval_button.setEnabled(enabled)
def thread_function(self):
"""
Run on secondary thread
"""
pred = OfflinePredictor(PredictConfig(
model=Model(IMAGE_SIZE, FRAME_HISTORY, self.METHOD, self.NUM_ACTIONS, GAMMA, ""),
session_init=get_model_loader(self.fname_model.name),
input_names=['state'],
output_names=['Qvalue']))
# demo pretrained model one episode at a time
if self.task_value == 'Play':
play_n_episodes(get_player(files_list=self.selected_list, viz=0.01,
data_type=self.window.usecase,
saveGif=self.GIF_value,
saveVideo=self.video_value,
task='play'),
pred, self.num_files, viewer=self.window)
# run episodes in parallel and evaluate pretrained model
elif self.task_value == 'Evaluation':
play_n_episodes(get_player(files_list=self.selected_list, viz=0.01,
data_type=self.window.usecase,
saveGif=self.GIF_value,
saveVideo=self.video_value,
task='eval'),
pred, self.num_files, viewer=self.window)
|
the-stack_106_27580 | from urllib import request
import json
class PushNotif():
key, event = "",""
def __init__(self,key,event):
self.key = key
self.event = event
def send(self,value1 = "",value2="",value3=""):
values = {
"value1": value1,
"value2": value2,
"value3": value3
}
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
data = json.dumps(values).encode("utf-8")
try:
url = "https://maker.ifttt.com/trigger/"+self.event+"/with/key/"+self.key
req = request.Request(url, data, headers)
with request.urlopen(req) as f:
res = f.read()
except Exception as e:
print(e) |
the-stack_106_27581 | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
Non-relativistic unrestricted Hartree-Fock zero-field splitting
(In testing)
Refs:
JCP, 134, 194113
PRB, 60, 9566
JCP, 127, 164112
'''
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.gto import mole
from pyscf.ao2mo import _ao2mo
from pyscf.soscf.newton_ah import _gen_uhf_response
from pyscf.prop.nmr import uhf as uhf_nmr
from pyscf.prop.ssc.rhf import _dm1_mo2ao
from pyscf.data import nist
def koseki_charge(z):
'''Koseki effective charge in SO correction
Ref:
JPC 96, 10768
JPC, 99, 12764
JPCA, 102, 10430
'''
# JPC 96, 10768
if z <= 2:
return z
elif z <= 10:
return z * (.3 + z * .05)
elif z <= 18:
return z * (1.05 - z * .0125)
else:
return z
def direct_spin_spin(zfsobj, mol, dm0, verbose=None):
log = logger.new_logger(zfsobj, verbose)
if isinstance(dm0, numpy.ndarray) and dm0.ndim == 2: # RHF DM
return numpy.zeros((3,3))
dma, dmb = dm0
spindm = dma - dmb
effspin = mol.spin * .5
nao = dma.shape[0]
# Use QED g-factor or Dirac g-factor
#g_fac = nist.G_ELECTRON**2/4 # QED
g_fac = 1
fac = g_fac * nist.ALPHA**2 / 8 / (effspin * (effspin - .5))
hss = mol.intor('int2e_ip1ip2', comp=9).reshape(3,3,nao,nao,nao,nao)
hss = hss + hss.transpose(0,1,3,2,4,5)
hss = hss + hss.transpose(0,1,2,3,5,4)
ej = numpy.einsum('xyijkl,ji,lk', hss, spindm, spindm)
ek = numpy.einsum('xyijkl,jk,li', hss, spindm, spindm)
dss = (ej - ek) * fac
# 2-electron Fermi contact term
# FC contribution is zero in mean-field calculations because of the 16-fold
# symmetry of the 4-index tensor.
# Generally, in a CI-like wfn, FC may have contributions to the direction
# spin-spin coupling.
if 0:
h_fc = mol.intor('int4c1e').reshape(nao,nao,nao,nao)
ej = numpy.einsum('ijkl,ji,lk', h_fc, spindm, spindm)
ek = numpy.einsum('ijkl,jk,li', h_fc, spindm, spindm)
e_fc = (ej - ek) * fac * (4*numpy.pi/3)
dss -= e_fc * numpy.eye(3)
return dss
# Note mo1 is the imaginary part of MO^1
def make_soc(zfsobj, mol, mo_coeff, mo_occ):
h1 = make_h1_soc(zfsobj, mol, mo_coeff, mo_occ)
mo1 = solve_mo1(zfsobj, h1)
h1aa, h1ab, h1ba, h1bb = h1
mo1aa, mo1ab, mo1ba, mo1bb = mo1
effspin = mol.spin * .5
if 0: # Pederson-Khanna formula , PRB, 60, 9566
fac = -.25 / effspin**2
dso = fac * numpy.einsum('xij,yij->xy', h1aa, mo1aa)
dso += fac * numpy.einsum('xij,yij->xy', h1bb, mo1bb)
dso -= fac * numpy.einsum('xij,yij->xy', h1ab, mo1ab)
dso -= fac * numpy.einsum('xij,yij->xy', h1ba, mo1ba)
elif 0: # Neese formula, see JCP, 127, 164112
facy = -.25 / ((effspin-.5)*effspin)
facz = -.25 / effspin**2
facx = -.25 / ((effspin+.5)*(effspin+1))
dso = facz * numpy.einsum('xij,yij->xy', h1aa, mo1aa)
dso += facz * numpy.einsum('xij,yij->xy', h1bb, mo1bb)
dso -= facx * numpy.einsum('xij,yij->xy', h1ab, mo1ab)
dso -= facy * numpy.einsum('xij,yij->xy', h1ba, mo1ba)
else: # van Wullen formula, JCP, 134, 194113
# Note the sign difference to van Wullen's paper, due to the
# anti-symmetricity of the Hamiltonian
fac = -.25 / (effspin*(effspin-.5))
dso = fac * numpy.einsum('xij,yij->xy', h1aa, mo1aa)
dso += fac * numpy.einsum('xij,yij->xy', h1bb, mo1bb)
dso -= fac * numpy.einsum('xij,yij->xy', h1ab, mo1ab)
dso -= fac * numpy.einsum('xij,yij->xy', h1ba, mo1ba)
dso *= nist.ALPHA ** 4 / 4
return dso
def make_h1_soc(zfsobj, mol, mo_coeff, mo_occ):
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
orboa = mo_coeff[0][:, occidxa]
orbob = mo_coeff[1][:, occidxb]
orbva = mo_coeff[0][:,~occidxa]
orbvb = mo_coeff[1][:,~occidxb]
# hso1e is the imaginary part of [i sigma dot pV x p]
# JCP, 122, 034107 Eq (2) = 1/4c^2 hso1e
if zfsobj.so_eff_charge:
hso1e = 0
for ia in range(mol.natm):
mol.set_rinv_origin(mol.atom_coord(ia))
#FIXME: when ECP is enabled
Z = koseki_charge(mol.atom_charge(ia))
hso1e += -Z * mol.intor('int1e_prinvxp', 3)
else:
hso1e = mol.intor('int1e_pnucxp', 3)
h1aa = numpy.asarray([reduce(numpy.dot, (orbva.T, x, orboa)) for x in hso1e])
h1bb = numpy.asarray([reduce(numpy.dot, (orbvb.T, x, orbob)) for x in hso1e])
h1ab = numpy.asarray([reduce(numpy.dot, (orbva.T, x, orbob)) for x in hso1e])
h1ba = numpy.asarray([reduce(numpy.dot, (orbvb.T, x, orboa)) for x in hso1e])
if zfsobj.sso or zfsobj.soo:
hso2e = make_soc2e(zfsobj, mo_coeff, mo_occ)
else:
hso2e = (0, 0, 0, 0)
h1aa += hso2e[0]
h1ab += hso2e[1]
h1ba += hso2e[2]
h1bb += hso2e[3]
return h1aa, h1ab, h1ba, h1bb
# Using the approximation in JCP, 122, 034107
def make_soc2e(zfsobj, mo_coeff, mo_occ):
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
orboa = mo_coeff[0][:,occidxa]
orbob = mo_coeff[1][:,occidxb]
orbva = mo_coeff[0][:,~occidxa]
orbvb = mo_coeff[1][:,~occidxb]
dma = numpy.dot(orboa, orboa.T)
dmb = numpy.dot(orbob, orbob.T)
dm1 = dma + dmb
nao = dma.shape[0]
# hso2e is the imaginary part of SSO
hso2e = mol.intor('int2e_p1vxp1', 3).reshape(3,nao,nao,nao,nao)
vj = numpy.einsum('yijkl,lk->yij', hso2e, dm1)
vk = numpy.einsum('yijkl,jk->yil', hso2e, dm1)
vk+= numpy.einsum('yijkl,li->ykj', hso2e, dm1)
hso2e = vj - vk * 1.5
haa = numpy.asarray([reduce(numpy.dot, (orbva.T, x, orboa)) for x in hso2e])
hab = numpy.asarray([reduce(numpy.dot, (orbva.T, x, orbob)) for x in hso2e])
hba = numpy.asarray([reduce(numpy.dot, (orbvb.T, x, orboa)) for x in hso2e])
hbb = numpy.asarray([reduce(numpy.dot, (orbvb.T, x, orbob)) for x in hso2e])
return haa, hab, hba, hbb
def solve_mo1(sscobj, h1):
cput1 = (time.clock(), time.time())
log = logger.Logger(sscobj.stdout, sscobj.verbose)
mol = sscobj.mol
mo_energy = sscobj._scf.mo_energy
mo_coeff = sscobj._scf.mo_coeff
mo_occ = sscobj._scf.mo_occ
h1aa, h1ab, h1ba, h1bb = h1
nset = len(h1aa)
eai_aa = 1. / lib.direct_sum('a-i->ai', mo_energy[0][mo_occ[0]==0], mo_energy[0][mo_occ[0]>0])
eai_ab = 1. / lib.direct_sum('a-i->ai', mo_energy[0][mo_occ[0]==0], mo_energy[1][mo_occ[1]>0])
eai_ba = 1. / lib.direct_sum('a-i->ai', mo_energy[1][mo_occ[1]==0], mo_energy[0][mo_occ[0]>0])
eai_bb = 1. / lib.direct_sum('a-i->ai', mo_energy[1][mo_occ[1]==0], mo_energy[1][mo_occ[1]>0])
mo1 = (numpy.asarray(h1aa) * -eai_aa,
numpy.asarray(h1ab) * -eai_ab,
numpy.asarray(h1ba) * -eai_ba,
numpy.asarray(h1bb) * -eai_bb)
h1aa = h1ab = h1ba = h1bb = None
if not sscobj.cphf:
return mo1
orboa = mo_coeff[0][:,mo_occ[0]> 0]
orbva = mo_coeff[0][:,mo_occ[0]==0]
orbob = mo_coeff[1][:,mo_occ[1]> 0]
orbvb = mo_coeff[1][:,mo_occ[1]==0]
nocca = orboa.shape[1]
nvira = orbva.shape[1]
noccb = orbob.shape[1]
nvirb = orbvb.shape[1]
p1 = nvira * nocca
p2 = p1 + nvira * noccb
p3 = p2 + nvirb * nocca
def _split_mo1(mo1):
mo1 = mo1.reshape(nset,-1)
mo1aa = mo1[:, :p1].reshape(nset,nvira,nocca)
mo1ab = mo1[:,p1:p2].reshape(nset,nvira,noccb)
mo1ba = mo1[:,p2:p3].reshape(nset,nvirb,nocca)
mo1bb = mo1[:,p3: ].reshape(nset,nvirb,noccb)
return mo1aa, mo1ab, mo1ba, mo1bb
mo1 = numpy.hstack((mo1[0].reshape(nset,-1),
mo1[1].reshape(nset,-1),
mo1[2].reshape(nset,-1),
mo1[3].reshape(nset,-1)))
vresp = _gen_uhf_response(mf, with_j=False, hermi=0)
mo_va_oa = numpy.asarray(numpy.hstack((orbva,orboa)), order='F')
mo_va_ob = numpy.asarray(numpy.hstack((orbva,orbob)), order='F')
mo_vb_oa = numpy.asarray(numpy.hstack((orbvb,orboa)), order='F')
mo_vb_ob = numpy.asarray(numpy.hstack((orbvb,orbob)), order='F')
def vind(mo1):
mo1aa, mo1ab, mo1ba, mo1bb = _split_mo1(mo1)
dm1aa = _dm1_mo2ao(mo1aa, orbva, orboa)
dm1ab = _dm1_mo2ao(mo1ab, orbva, orbob)
dm1ba = _dm1_mo2ao(mo1ba, orbvb, orboa)
dm1bb = _dm1_mo2ao(mo1bb, orbvb, orbob)
# imaginary Hermitian
dm1 = numpy.vstack([dm1aa-dm1aa.transpose(0,2,1),
dm1ab-dm1ba.transpose(0,2,1),
dm1ba-dm1ab.transpose(0,2,1),
dm1bb-dm1bb.transpose(0,2,1)])
v1 = vresp(dm1)
v1aa = _ao2mo.nr_e2(v1[ :nset ], mo_va_oa, (0,nvira,nvira,nvira+nocca))
v1ab = _ao2mo.nr_e2(v1[nset*1:nset*2], mo_va_ob, (0,nvira,nvira,nvira+noccb))
v1ba = _ao2mo.nr_e2(v1[nset*2:nset*3], mo_vb_oa, (0,nvirb,nvirb,nvirb+nocca))
v1bb = _ao2mo.nr_e2(v1[nset*3: ], mo_vb_ob, (0,nvirb,nvirb,nvirb+noccb))
v1aa = v1aa.reshape(nset,nvira,nocca)
v1ab = v1ab.reshape(nset,nvira,noccb)
v1ba = v1ba.reshape(nset,nvirb,nocca)
v1bb = v1bb.reshape(nset,nvirb,noccb)
v1aa *= eai_aa
v1ab *= eai_ab
v1ba *= eai_ba
v1bb *= eai_bb
v1mo = numpy.hstack((v1aa.reshape(nset,-1), v1ab.reshape(nset,-1),
v1ba.reshape(nset,-1), v1bb.reshape(nset,-1)))
return v1mo.ravel()
mo1 = lib.krylov(vind, mo1.ravel(), tol=1e-9, max_cycle=20, verbose=log)
log.timer('solving FC CPHF eqn', *cput1)
mo1 = _split_mo1(mo1)
return mo1
class ZeroFieldSplitting(lib.StreamObject):
'''dE = I dot gtensor dot s'''
def __init__(self, scf_method):
self.mol = scf_method.mol
self.verbose = scf_method.mol.verbose
self.stdout = scf_method.mol.stdout
self.chkfile = scf_method.chkfile
self._scf = scf_method
self.cphf = True
self.max_cycle_cphf = 20
self.conv_tol = 1e-9
self.sso = False # Two-electron spin-same-orbit coupling
self.soo = False # Two-electron spin-other-orbit coupling
self.so_eff_charge = True
self.mo10 = None
self.mo_e10 = None
self._keys = set(self.__dict__.keys())
logger.warn(self, 'This module is experimental. '
'Features/implementations may be changed in the future.')
def dump_flags(self):
log = logger.Logger(self.stdout, self.verbose)
log.info('\n')
log.info('******** %s for %s ********',
self.__class__, self._scf.__class__)
log.info('with cphf = %s', self.cphf)
if self.cphf:
log.info('CPHF conv_tol = %g', self.conv_tol)
log.info('CPHF max_cycle_cphf = %d', self.max_cycle_cphf)
logger.info(self, 'sso = %s (2e spin-same-orbit coupling)', self.sso)
logger.info(self, 'soo = %s (2e spin-other-orbit coupling)', self.soo)
logger.info(self, 'so_eff_charge = %s (1e SO effective charge)',
self.so_eff_charge)
return self
def kernel(self, mo1=None):
cput0 = (time.clock(), time.time())
self.check_sanity()
self.dump_flags()
mol = self.mol
dm0 = self._scf.make_rdm1()
zfs_ss = direct_spin_spin(self, mol, dm0)
zfs_soc = make_soc(zfsobj, mol, self._scf.mo_coeff, self._scf.mo_occ)
zfs_tensor = zfs_ss + zfs_soc
zfs_diag = numpy.linalg.eigh(zfs_tensor)[0]
dtrace = zfs_tensor.trace()
zfs_diag -= dtrace / 3
zidx = numpy.argmax(abs(zfs_diag))
dvalue = zfs_diag[zidx] * 1.5
tmp = zfs_diag + dvalue/3
tmp[zidx] = 0
evalue = abs(tmp).max()
au2cm = nist.HARTREE2J / nist.PLANCK / nist.LIGHT_SPEED_SI * 1e-2
logger.debug(self, 'D trace = %s', dtrace)
logger.note(self, 'Axial parameter D = %s (cm^{-1})', dvalue*au2cm)
logger.note(self, 'Rhombic parameter E = %s (cm^{-1})', evalue*au2cm)
if self.verbose > logger.debug:
self.stdout.write('\nZero-field splitting tensor\n')
self.stdout.write('S_x %s\n' % zfs_tensor[0])
self.stdout.write('S_y %s\n' % zfs_tensor[1])
self.stdout.write('S_z %s\n' % zfs_tensor[2])
self.stdout.flush()
logger.timer(self, 'ZFS tensor', *cput0)
return zfs_tensor
ZFS = ZeroFieldSplitting
if __name__ == '__main__':
from pyscf import gto, scf
mol = gto.M(atom='Ne 0 0 0',
basis='ccpvdz', spin=2, charge=-2, verbose=3)
mf = scf.UHF(mol)
mf.kernel()
zfsobj = ZFS(mf)
#zfsobj.cphf = False
#zfsobj.sso = True
#zfsobj.soo = True
#zfsobj.so_eff_charge = False
print(zfsobj.kernel())
|
the-stack_106_27582 | #!/usr/bin/python
#
# Copyright 2018 Red Hat, Inc.
#
# This file is part of ansible-nmstate.
#
# ansible-nmstate is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ansible-nmstate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ansible-nmstate. If not, see <https://www.gnu.org/licenses/>.
from copy import deepcopy
from libnmstate import netapplier
from libnmstate import netinfo
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.ansible_nmstate import get_interface_state
from ansible.module_utils.ansible_nmstate import write_debug_state
MODULE_NAME = "nmstate_linkagg"
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: nmstate_linkagg
version_added: "2.6"
author: "Till Maas (@tyll)"
short_description: Configure link aggregation with nmstate
description:
- "This module allows to configure link aggregation with
https://github.com/nmstate/nmstate"
options:
name:
description:
- Name of the link aggregation group.
required: true
mode:
description:
- Mode of the link aggregation group. A value of C(on) will enable
LACP/802.3ad, the same as C(active) configures the link to actively
information about the state of the link.
default: on
choices: ['on', 'active', 'passive', 'balance-rr', 'active-backup',
'balance-xor', 'broadcast', '802.3ad', 'balance-tlb',
'balance-alb']
members:
description:
- List of members interfaces of the link aggregation group. The value can
be single interface or list of interfaces.
required: true
min_links:
description:
- Minimum members that should be up
before bringing up the link aggregation group.
aggregate:
description: List of link aggregation definitions.
purge:
description:
- Purge link aggregation groups not defined in the I(aggregate)
parameter.
default: no
state:
description:
- State of the link aggregation group.
default: present
choices: ['present', 'absent', 'up', 'down']
'''
EXAMPLES = '''
- name: Take bond interface down
nmstate_linkagg:
name=bond0
state=down
members=eth10
- name: configure link aggregation group
net_linkagg:
name: bond0
members:
- eth0
- eth1
- name: remove configuration
net_linkagg:
name: bond0
state: absent
- name: Create aggregate of linkagg definitions
net_linkagg:
aggregate:
- { name: bond0, members: [eth1] }
- { name: bond1, members: [eth2] }
- name: Remove aggregate of linkagg definitions
net_linkagg:
aggregate:
- name: bond0
- name: bond1
state: absent
'''
RETURN = '''
state:
description: Network state after running the module
type: dict
'''
def run_module():
element_spec = dict(
members=dict(type='list'),
min_links=dict(type='int'),
# net_linkagg only knows on, active and passive
# on and active is 802.3ad on Linux, passive is not supported
mode=dict(choices=['on', 'active', 'passive', 'balance-rr',
'active-backup', 'balance-xor', 'broadcast',
'802.3ad', 'balance-tlb', 'balance-alb'],
default='on'),
name=dict(),
state=dict(default='present',
choices=['present', 'absent', 'up', 'down'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(default=False, type='bool'),
# not in net_* specification
debug=dict(default=False, type='bool'),
)
argument_spec.update(element_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
result = dict(
changed=False,
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True
)
if module.params['aggregate']:
module.fail_json(msg='Aggregate not yet supported', **result)
previous_state = netinfo.show()
members = module.params['members']
if not isinstance(members, list):
members = [members]
# Fail when member state is missing
if module.params['state'] in ['up', 'present']:
missing = []
for member in members:
member_state = get_interface_state(previous_state['interfaces'],
member)
if not member_state:
missing.append(member)
if missing:
module.fail_json(msg='Did not find specified members in network '
'state: ' + ', '.join(missing), **result)
mode = module.params['mode']
if mode in ['on', 'active']:
mode = '802.3ad'
elif mode in ['passive']:
# passive mode is not supported on Linux:
# noqa:
# https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/networking_guide/sec-comparison_of_network_teaming_to_bonding
module.fail_json(msg='passive mode is not supported on Linux',
**result)
link_aggregation = {'mode': mode,
'options': {}, # FIXME: add support for options?
'slaves': members,
}
interface_state = {'name': module.params['name'],
'state': module.params['state'],
'type': 'bond',
'link-aggregation': link_aggregation,
}
interfaces = []
interfaces.append(interface_state)
new_partial_state = {'interfaces': interfaces}
if module.params.get('debug'):
result['previous_state'] = previous_state
result['new_partial_state'] = new_partial_state
result['debugfile'] = write_debug_state(MODULE_NAME, new_partial_state)
if module.check_mode:
new_full_state = deepcopy(previous_state)
new_full_state.update(new_partial_state)
result['state'] = new_full_state
# TODO: maybe compare only the state of the defined interfaces
if previous_state != new_full_state:
result['changed'] = True
module.exit_json(**result)
else:
netapplier.apply(new_partial_state)
current_state = netinfo.show()
if current_state != previous_state:
result['changed'] = True
result['state'] = current_state
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
|
the-stack_106_27583 | """day 7: I'm so worried about the baggage retrieval system they've got at Heathrow"""
import networkx
from collections import defaultdict
from typing import Dict, List
TEST_INPUT = """light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags.""".splitlines()
PART_TWO_TEST = """shiny gold bags contain 2 dark red bags.
dark red bags contain 2 dark orange bags.
dark orange bags contain 2 dark yellow bags.
dark yellow bags contain 2 dark green bags.
dark green bags contain 2 dark blue bags.
dark blue bags contain 2 dark violet bags.
dark violet bags contain no other bags.""".splitlines()
with open("day07.txt") as infile:
REAL_INPUT = [line.strip() for line in infile]
def parse_input(puzzle_input: List[str]) -> networkx.DiGraph:
"""Parse the puzzle input for part 1 by making a graph of what is contained by what"""
graph = networkx.DiGraph()
nodes = []
for line in puzzle_input:
source, targets = line[:-1].split(" bags contain ")
for target in targets.split(", "):
if target == "no other bags":
# we've found an edge node
continue
# count = int(target.split()[0])
color = " ".join(target.split()[1:-1])
nodes.append((color, source))
graph.add_edges_from(nodes)
return graph
def part_one(graph: networkx.DiGraph) -> int:
"""Part 1: how many bags can eventually contain a shiny gold bag?"""
result = list(graph.successors("shiny gold"))
nodes_seen = set()
target_nodes = set(result)
for successor in result:
if successor in nodes_seen:
continue
nodes_seen.add(successor)
sub_result = list(graph.successors(successor))
target_nodes |= set(sub_result)
result += sub_result
return len(target_nodes)
def part_two(puzzle_input: List[str]) -> int:
"""Part 2: how many bags does your shiny gold bag contain?"""
dependencies = defaultdict(dict)
for line in puzzle_input:
source, targets = line[:-1].split(" bags contain ")
for target in targets.split(", "):
if target == "no other bags":
# doesn't hold anything else. No need to store it
continue
count = int(target.split()[0])
color = " ".join(target.split()[1:-1])
dependencies[source][color] = count
# get_bags_inside() counts the shiny gold bag, so we need to exclude it
return get_bags_inside("shiny gold", dependencies) - 1
def get_bags_inside(color: str, dependency_dict: Dict[str, Dict[str, int]]) -> int:
"""Recursively count the bags stored wthin color, including itself"""
count = 1
inner_bags = dependency_dict[color]
for bag_color, bag_count in inner_bags.items():
count += bag_count * get_bags_inside(bag_color, dependency_dict)
return count
assert part_one(parse_input(TEST_INPUT)) == 4
print(part_one(parse_input(REAL_INPUT)))
assert part_two(TEST_INPUT) == 32
assert part_two(PART_TWO_TEST) == 126
print(part_two(REAL_INPUT))
|
the-stack_106_27584 | import numpy as np
import random
import cv2
def vignette(img):
# reading the image
image = cv2.imread(img)
# resizing the image according to our need resize() function takes 2 parameters,
# the image and the dimensions
# Extracting the height and width of an image
rows, cols = image.shape[:2]
# generating vignette mask using Gaussian resultant_kernels
sigma_X = cols/(1.5 + random.random()*1.5)
sigma_Y = rows/(1.5 + random.random()*1.5)
scale = 1.6
X_resultant_kernel = cv2.getGaussianKernel(int(scale * cols), sigma_X)
Y_resultant_kernel = cv2.getGaussianKernel(int(scale * rows), sigma_Y)
start_X = random.randint(0, int(scale * cols) - cols)
start_Y = random.randint(0, int(scale * rows) - rows)
X_resultant_kernel = X_resultant_kernel[start_X:start_X+cols]
Y_resultant_kernel = Y_resultant_kernel[start_Y:start_Y+rows]
np.reshape(X)
# generating resultant_kernel matrix
resultant_kernel = Y_resultant_kernel * X_resultant_kernel.T
print(resultant_kernel.shape)
# creating mask and normalising by using np.linalg function
mask = resultant_kernel * (random.random() * 0.3 + 0.7) / np.max(resultant_kernel),
output = np.copy(image)
# applying the mask to each channel in the input image
for i in range(3):
output[:, :, i] = output[:, :, i] * mask
# displaying the orignal image
cv2.imshow('Original', image)
# displaying the vignette filter image
cv2.imshow('VIGNETTE', output)
cv2.waitKey(0)
if __name__ == "__main__":
vignette('0.png')
vignette('0.png')
vignette('0.png')
vignette('0.png')
vignette('1.png')
vignette('1.png')
vignette('1.png')
vignette('1.png')
vignette('2.png')
vignette('2.png')
vignette('2.png')
vignette('2.png')
vignette('3.png')
vignette('3.png')
vignette('3.png')
vignette('3.png')
|
the-stack_106_27586 | # Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
import logging
import sys
import threading
import copy
import re
import subprocess
from schema_salad.sourceline import SourceLine
import cwltool.docker
from cwltool.errors import WorkflowException
import arvados.commands.keepdocker
logger = logging.getLogger('arvados.cwl-runner')
cached_lookups = {}
cached_lookups_lock = threading.Lock()
def determine_image_id(dockerImageId):
for line in (
subprocess.check_output( # nosec
["docker", "images", "--no-trunc", "--all"]
)
.decode("utf-8")
.splitlines()
):
try:
match = re.match(r"^([^ ]+)\s+([^ ]+)\s+([^ ]+)", line)
split = dockerImageId.split(":")
if len(split) == 1:
split.append("latest")
elif len(split) == 2:
# if split[1] doesn't match valid tag names, it is a part of repository
if not re.match(r"[\w][\w.-]{0,127}", split[1]):
split[0] = split[0] + ":" + split[1]
split[1] = "latest"
elif len(split) == 3:
if re.match(r"[\w][\w.-]{0,127}", split[2]):
split[0] = split[0] + ":" + split[1]
split[1] = split[2]
del split[2]
# check for repository:tag match or image id match
if match and (
(split[0] == match.group(1) and split[1] == match.group(2))
or dockerImageId == match.group(3)
):
return match.group(3)
except ValueError:
pass
return None
def arv_docker_get_image(api_client, dockerRequirement, pull_image, project_uuid,
force_pull, tmp_outdir_prefix, match_local_docker):
"""Check if a Docker image is available in Keep, if not, upload it using arv-keepdocker."""
if "http://arvados.org/cwl#dockerCollectionPDH" in dockerRequirement:
return dockerRequirement["http://arvados.org/cwl#dockerCollectionPDH"]
if "dockerImageId" not in dockerRequirement and "dockerPull" in dockerRequirement:
dockerRequirement = copy.deepcopy(dockerRequirement)
dockerRequirement["dockerImageId"] = dockerRequirement["dockerPull"]
if hasattr(dockerRequirement, 'lc'):
dockerRequirement.lc.data["dockerImageId"] = dockerRequirement.lc.data["dockerPull"]
global cached_lookups
global cached_lookups_lock
with cached_lookups_lock:
if dockerRequirement["dockerImageId"] in cached_lookups:
return cached_lookups[dockerRequirement["dockerImageId"]]
with SourceLine(dockerRequirement, "dockerImageId", WorkflowException, logger.isEnabledFor(logging.DEBUG)):
sp = dockerRequirement["dockerImageId"].split(":")
image_name = sp[0]
image_tag = sp[1] if len(sp) > 1 else "latest"
images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3,
image_name=image_name,
image_tag=image_tag)
if images and match_local_docker:
local_image_id = determine_image_id(dockerRequirement["dockerImageId"])
if local_image_id:
# find it in the list
found = False
for i in images:
if i[1]["dockerhash"] == local_image_id:
found = True
images = [i]
break
if not found:
# force re-upload.
images = []
if not images:
# Fetch Docker image if necessary.
try:
result = cwltool.docker.DockerCommandLineJob.get_image(dockerRequirement, pull_image,
force_pull, tmp_outdir_prefix)
if not result:
raise WorkflowException("Docker image '%s' not available" % dockerRequirement["dockerImageId"])
except OSError as e:
raise WorkflowException("While trying to get Docker image '%s', failed to execute 'docker': %s" % (dockerRequirement["dockerImageId"], e))
# Upload image to Arvados
args = []
if project_uuid:
args.append("--project-uuid="+project_uuid)
args.append(image_name)
args.append(image_tag)
logger.info("Uploading Docker image %s:%s", image_name, image_tag)
try:
arvados.commands.put.api_client = api_client
arvados.commands.keepdocker.main(args, stdout=sys.stderr, install_sig_handlers=False, api=api_client)
except SystemExit as e:
# If e.code is None or zero, then keepdocker exited normally and we can continue
if e.code:
raise WorkflowException("keepdocker exited with code %s" % e.code)
images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3,
image_name=image_name,
image_tag=image_tag)
if not images:
raise WorkflowException("Could not find Docker image %s:%s" % (image_name, image_tag))
pdh = api_client.collections().get(uuid=images[0][0]).execute()["portable_data_hash"]
with cached_lookups_lock:
cached_lookups[dockerRequirement["dockerImageId"]] = pdh
return pdh
def arv_docker_clear_cache():
global cached_lookups
global cached_lookups_lock
with cached_lookups_lock:
cached_lookups = {}
|
the-stack_106_27587 | import paho.mqtt.client as mqtt
import time
broker_address = "localhost"
port = 8883
keep_alive_time = 60
topic = "new/temp"
#path to CA crt
CA_CERT = "ca.crt"
#CLIENT_CERT = ""
#CLIENT_KEY = ""
#CIPHERS = ""
def on_connect(client, userdata, flags, rc):
print("Connected with result code: " + str(rc) )
if client._clean_session == False:
print("session present flag: " + str(flags['session present']) )
else:
print("Starting a clean session")
def on_publish(client, userdata, mid):
print("Successfully published!!!")
def on_message(client, userdata, msg):
print("topic: " + msg.topic )
print("payload: " + str(msg.payload) )
print("qos: " + str(msg.qos) )
print("retain: " + str(msg.retain))
def on_disconnect(client, userdata, rc):
print("Disconnected with result code: " + str(rc) )
def on_log(client, userdata, level, buf):
print("log: " + buf)
client = mqtt.Client("client_test1", True, None )
client.on_connect = on_connect
client.on_publish = on_publish
client.on_message = on_message
client.on_disconnect = on_disconnect
client.on_log = on_log
client.tls_set(CA_CERT)
#username_pw_set(username, password=None)
#will_set(topic, payload=None, qos=0, retain=False)
client.connect(broker_address, port, keep_alive_time)
client.loop_start()
i=0
while i < 10:
client.publish(topic, "Hello", 2, False)
time.sleep(4)
i+=1
client.disconnect()
client.loop_stop() |
the-stack_106_27589 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import sys
from paddle.trainer_config_helpers import *
#file paths
word_dict_file = './data/wordDict.txt'
label_dict_file = './data/targetDict.txt'
predicate_file = './data/verbDict.txt'
train_list_file = './data/train.list'
test_list_file = './data/test.list'
is_test = get_config_arg('is_test', bool, False)
is_predict = get_config_arg('is_predict', bool, False)
if not is_predict:
#load dictionaries
word_dict = dict()
label_dict = dict()
predicate_dict = dict()
with open(word_dict_file, 'r') as f_word, \
open(label_dict_file, 'r') as f_label, \
open(predicate_file, 'r') as f_pre:
for i, line in enumerate(f_word):
w = line.strip()
word_dict[w] = i
for i, line in enumerate(f_label):
w = line.strip()
label_dict[w] = i
for i, line in enumerate(f_pre):
w = line.strip()
predicate_dict[w] = i
if is_test:
train_list_file = None
#define data provider
define_py_data_sources2(
train_list=train_list_file,
test_list=test_list_file,
module='dataprovider',
obj='process',
args={
'word_dict': word_dict,
'label_dict': label_dict,
'predicate_dict': predicate_dict
})
word_dict_len = len(word_dict)
label_dict_len = len(label_dict)
pred_len = len(predicate_dict)
else:
word_dict_len = get_config_arg('dict_len', int)
label_dict_len = get_config_arg('label_len', int)
pred_len = get_config_arg('pred_len', int)
############################## Hyper-parameters ##################################
mark_dict_len = 2
word_dim = 32
mark_dim = 5
hidden_dim = 512
depth = 8
########################### Optimizer #######################################
settings(
batch_size=150,
learning_method=MomentumOptimizer(momentum=0),
learning_rate=2e-2,
regularization=L2Regularization(8e-4),
is_async=False,
model_average=ModelAverage(
average_window=0.5, max_average_window=10000), )
####################################### network ##############################
#8 features and 1 target
word = data_layer(name='word_data', size=word_dict_len)
predicate = data_layer(name='verb_data', size=pred_len)
ctx_n2 = data_layer(name='ctx_n2_data', size=word_dict_len)
ctx_n1 = data_layer(name='ctx_n1_data', size=word_dict_len)
ctx_0 = data_layer(name='ctx_0_data', size=word_dict_len)
ctx_p1 = data_layer(name='ctx_p1_data', size=word_dict_len)
ctx_p2 = data_layer(name='ctx_p2_data', size=word_dict_len)
mark = data_layer(name='mark_data', size=mark_dict_len)
if not is_predict:
target = data_layer(name='target', size=label_dict_len)
default_std = 1 / math.sqrt(hidden_dim) / 3.0
emb_para = ParameterAttribute(name='emb', initial_std=0., learning_rate=0.)
std_0 = ParameterAttribute(initial_std=0.)
std_default = ParameterAttribute(initial_std=default_std)
predicate_embedding = embedding_layer(
size=word_dim,
input=predicate,
param_attr=ParameterAttribute(
name='vemb', initial_std=default_std))
mark_embedding = embedding_layer(
name='word_ctx-in_embedding', size=mark_dim, input=mark, param_attr=std_0)
word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2]
emb_layers = [
embedding_layer(
size=word_dim, input=x, param_attr=emb_para) for x in word_input
]
emb_layers.append(predicate_embedding)
emb_layers.append(mark_embedding)
hidden_0 = mixed_layer(
name='hidden0',
size=hidden_dim,
bias_attr=std_default,
input=[
full_matrix_projection(
input=emb, param_attr=std_default) for emb in emb_layers
])
mix_hidden_lr = 1e-3
lstm_para_attr = ParameterAttribute(initial_std=0.0, learning_rate=1.0)
hidden_para_attr = ParameterAttribute(
initial_std=default_std, learning_rate=mix_hidden_lr)
lstm_0 = lstmemory(
name='lstm0',
input=hidden_0,
act=ReluActivation(),
gate_act=SigmoidActivation(),
state_act=SigmoidActivation(),
bias_attr=std_0,
param_attr=lstm_para_attr)
#stack L-LSTM and R-LSTM with direct edges
input_tmp = [hidden_0, lstm_0]
for i in range(1, depth):
mix_hidden = mixed_layer(
name='hidden' + str(i),
size=hidden_dim,
bias_attr=std_default,
input=[
full_matrix_projection(
input=input_tmp[0], param_attr=hidden_para_attr),
full_matrix_projection(
input=input_tmp[1], param_attr=lstm_para_attr)
])
lstm = lstmemory(
name='lstm' + str(i),
input=mix_hidden,
act=ReluActivation(),
gate_act=SigmoidActivation(),
state_act=SigmoidActivation(),
reverse=((i % 2) == 1),
bias_attr=std_0,
param_attr=lstm_para_attr)
input_tmp = [mix_hidden, lstm]
feature_out = mixed_layer(
name='output',
size=label_dict_len,
bias_attr=std_default,
input=[
full_matrix_projection(
input=input_tmp[0], param_attr=hidden_para_attr),
full_matrix_projection(
input=input_tmp[1], param_attr=lstm_para_attr)
], )
if not is_predict:
crf_l = crf_layer(
name='crf',
size=label_dict_len,
input=feature_out,
label=target,
param_attr=ParameterAttribute(
name='crfw', initial_std=default_std, learning_rate=mix_hidden_lr))
crf_dec_l = crf_decoding_layer(
name='crf_dec_l',
size=label_dict_len,
input=feature_out,
label=target,
param_attr=ParameterAttribute(name='crfw'))
eval = sum_evaluator(input=crf_dec_l)
outputs(crf_l)
else:
crf_dec_l = crf_decoding_layer(
name='crf_dec_l',
size=label_dict_len,
input=feature_out,
param_attr=ParameterAttribute(name='crfw'))
outputs(crf_dec_l)
|
the-stack_106_27591 | from __future__ import print_function
from fabric.api import task, run, env, cd, sudo, put, get
from fabric.tasks import execute, Task
from .utils import hijack_output_loop
from .deploy import Deployment
from .project import Project
# Fabric prints all the messages with a '[hostname] out:' prefix.
# Hijacking it to remove the prefix
hijack_output_loop()
@task
def hello(name="world"):
with cd("."):
run("echo hello " + name)
@task
def run_command(command, workdir=None):
workdir = workdir or "/opt/rorolite/project"
command_str = " ".join(command)
with cd(workdir):
run(command_str)
@task
def run_notebook(workdir=None, args=None, kwargs=None):
args = args or []
kwargs = kwargs or {}
command = "jupyter notebook --ip {host} --allow-root".format(host=env.host).split() + list(args)
return run_command(command, workdir=workdir)
@task
def run_jupyterlab(workdir=None, args=None, kwargs=None):
args = args or []
kwargs = kwargs or {}
command = "jupyter lab --ip {host} --allow-root".format(host=env.host).split() + list(args)
return run_command(command, workdir=workdir)
@task
def restart(service):
sudo("supervisorctl restart " + service)
@task
def logs(service, n=10, follow=False):
follow_flag = "-f" if follow else ""
cmd = "tail -n {} {} /var/log/supervisor/{}.log".format(n, follow_flag, service)
sudo(cmd)
@task
def deploy():
d = Deployment()
d.deploy()
@task
def provision():
project = Project()
project.runtime.install()
setup_volumes()
@task
def putfile(src, dest):
put(src, dest)
@task
def getfile(src, dest):
get(src, dest)
@task
def supervisorctl(*args):
sudo("supervisorctl " + " ".join(args))
def setup_volumes():
sudo("mkdir -p /volumes/data")
sudo("chown {} /volumes".format(env.user))
sudo("chown {} /volumes/data".format(env.user))
def run_task(taskname, *args, **kwargs):
task = globals().get(taskname)
if isinstance(task, Task):
execute(task, *args, **kwargs)
else:
raise Exception("Invalid task: " + repr(taskname))
|
the-stack_106_27592 | # qubit number=3
# total number=11
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.cx(input_qubit[3],input_qubit[0]) # number=8
prog.z(input_qubit[3]) # number=9
prog.cx(input_qubit[3],input_qubit[0]) # number=10
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[3],input_qubit[0]) # number=5
prog.swap(input_qubit[3],input_qubit[0]) # number=6
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_QC91.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
the-stack_106_27594 | from questionary import prompt, Choice
from click import clear as click_clear
from os import scandir, DirEntry
from posixpath import join
from pdpp.styles.prompt_style import custom_style_fancy
from pdpp.utils.ignorelist import ignorelist
from pdpp.tasks.base_task import BaseTask
from typing import List
def q3(task: BaseTask) -> List[str]:
"""
A question which asks users to indicate which scripts in the chosen task's
'src' should be run to produce this task's targets.
"""
click_clear()
source_files = []
source_choices = []
src_loc = join(task.target_dir, task.SRC_DIR)
source_files = [s for s in scandir(src_loc) if ((s.name not in ignorelist) and (s.is_file()))]
for entry in source_files:
source_choices.append(
Choice(
title=entry.name,
value=entry,
checked= entry.name in task.src_files
)
)
if len(source_files) < 2:
return [s.name for s in source_files]
question_3 = [{
'type': 'checkbox',
'message': 'Select the source file(s) for "{}"'.format(task.target_dir),
'name': 'source',
'choices': source_choices,
}]
final_choices: List[DirEntry[str]] = prompt(question_3, style=custom_style_fancy)['source']
return [s.name for s in final_choices] |
the-stack_106_27596 | __all__ = [
'ps_output'
]
import re
import bg_helper as bh
import input_helper as ih
from input_helper.matcher import PsOutputMatcher
_ps_output_matcher = PsOutputMatcher()
def ps_output():
"""Return a list of dicts containing info about current running processes"""
cmd = 'ps -eo user,pid,ppid,tty,command'
output = bh.run_output(cmd)
results = [
_ps_output_matcher(line)
for line in re.split('\r?\n', output)
]
return results
|
the-stack_106_27597 | import os
from map_retrieve import mapRetrieve
from glob import glob
# create the mapRetrieve object
mr = mapRetrieve()
zip_files = glob('/media/zac/Seagate Portable Drive/orders/f06d9ed2c630d7ad6ecfd53ecda4d412/CMS_LiDAR_AGB_California/data/*.zip')
# for each zip file run the save_map method
count = 10000
for zf in zip_files:
count = count -1
if count > 0:
print(zf)
print(count)
mr.save_map(zf)
else:
continue
|
the-stack_106_27598 | #!/usr/bin/env python3
import tarfile
from tarfile import TarFile, TarInfo
import zipfile
from zipfile import ZipFile, ZipInfo
import json
import os
from io import BytesIO
import stat
from shutil import copyfileobj
import time
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
with open(os.path.join(PROJECT_DIR, "tool.config.json")) as f:
MODLOADER_METADATA = json.load(f)
MODLOADER_NAME = MODLOADER_METADATA["name"]
MODLOADER_VERSION = MODLOADER_METADATA["version"]
MODLOADER_DIR_NAME = MODLOADER_NAME
# specific archive extensions are appended when the respective archives are packed
ARCHIVE_NAME_QUICK_INSTALL = "{}_{}_quick-install".format(MODLOADER_NAME, MODLOADER_VERSION)
ARCHIVE_NAME_PACKAGE = "{}_{}_package".format(MODLOADER_NAME, MODLOADER_VERSION)
DEFAULT_FILE_MODE = 0o644
DEFAULT_DIR_MODE = 0o755
PACKAGE_JSON_DATA = {
"name":
"CrossCode",
"version":
"1.0.0",
"main":
MODLOADER_DIR_NAME + "/main.html",
"chromium-args":
" ".join([
"--ignore-gpu-blacklist",
"--ignore-gpu-blocklist",
"--disable-direct-composition",
"--disable-background-networking",
"--in-process-gpu",
"--password-store=basic",
]),
"window": {
"toolbar": False,
"icon": "favicon.png",
"width": 1136,
"height": 640,
"fullscreen": False,
},
}
class TarGzArchiveAdapter:
@classmethod
def open_for_writing(cls, path):
return cls(TarFile.open(path + ".tar.gz", "w:gz"))
def __init__(self, tarfile):
self._tarfile = tarfile
def __enter__(self):
self._tarfile.__enter__()
return self
def __exit__(self, type, value, traceback):
self._tarfile.__exit__(type, value, traceback)
def add_file_entry(self, name, data):
self._add_entry(name, tarfile.REGTYPE, DEFAULT_FILE_MODE, len(data), BytesIO(data))
def add_dir_entry(self, name):
self._add_entry(name, tarfile.DIRTYPE, DEFAULT_DIR_MODE, 0, None)
def _add_entry(self, name, type, mode, size, data):
info = TarInfo(name)
info.type = type
info.mode = mode
info.size = size
info.mtime = time.time()
self._tarfile.addfile(info, data)
def add_real_file(self, path, archived_path, recursive=True, predicate=None):
self._tarfile.add(
path,
arcname=archived_path,
recursive=recursive,
filter=lambda info: self._reset_tarinfo(info, predicate),
)
def _reset_tarinfo(self, info, predicate):
if predicate is not None and not predicate(info.name):
return None
# remove user and group IDs as they are irrelevant for distribution and
# may require subsequent `chown`ing on multi-tenant systems
info.uid = 0
info.uname = ""
info.gid = 0
info.gname = ""
return info
class ZipArchiveAdapter:
@classmethod
def open_for_writing(cls, path):
return cls(ZipFile(path + ".zip", "w", compression=zipfile.ZIP_DEFLATED))
def __init__(self, zipfile):
self._zipfile = zipfile
def __enter__(self):
self._zipfile.__enter__()
return self
def __exit__(self, type, value, traceback):
self._zipfile.__exit__(type, value, traceback)
def add_file_entry(self, name, data):
self._add_entry(name, (stat.S_IFREG | DEFAULT_FILE_MODE) << 16, data)
def add_dir_entry(self, name):
if not name.endswith("/"):
name += "/"
external_attr = (stat.S_IFDIR | DEFAULT_DIR_MODE) << 16
external_attr |= 0x10 # MS-DOS directory flag
self._add_entry(name, external_attr, b"")
def _add_entry(self, name, external_attr, data):
info = ZipInfo(name, time.localtime(time.time())[:6])
info.external_attr = external_attr
self._set_zipinfo_compression(info)
self._zipfile.writestr(info, data)
def add_real_file(self, path, archived_path, recursive=True, predicate=None):
info = ZipInfo.from_file(
path, archived_path, strict_timestamps=self._zipfile._strict_timestamps
)
self._set_zipinfo_compression(info)
if predicate is not None and not predicate(info.filename):
return
if info.is_dir():
self._zipfile.open(info, "w").close()
if recursive:
for f in sorted(os.listdir(path)):
self.add_real_file(
os.path.join(path, f),
os.path.join(archived_path, f),
recursive=recursive,
predicate=predicate,
)
else:
with open(path, "rb") as src, self._zipfile.open(info, "w") as dest:
copyfileobj(src, dest, 1024 * 8)
def _set_zipinfo_compression(self, zipinfo):
zipinfo.compress_type = self._zipfile.compression
zipinfo._compresslevel = self._zipfile.compresslevel
for ArchiveAdapter in [TarGzArchiveAdapter, ZipArchiveAdapter]:
def add_modloader_files(archive, archived_path_prefix):
def add(path, recursive=True):
archive.add_real_file(
os.path.join(PROJECT_DIR, path),
os.path.join(archived_path_prefix, path),
recursive=recursive,
predicate=lambda name: not name.endswith(".tsbuildinfo"),
)
add("LICENSE")
add("main.css")
add("main.html")
add("tool.config.json")
add("common/", recursive=False)
add("common/dist/")
add("common/vendor-libs/")
add("dist/")
add("runtime/", recursive=False)
add("runtime/ccmod.json")
add("runtime/assets/")
add("runtime/dist/")
add("runtime/media/")
with ArchiveAdapter.open_for_writing(ARCHIVE_NAME_PACKAGE) as archive:
add_modloader_files(archive, "")
with ArchiveAdapter.open_for_writing(ARCHIVE_NAME_QUICK_INSTALL) as archive:
archive.add_file_entry(
"package.json",
(json.dumps(PACKAGE_JSON_DATA, indent=2) + "\n").encode("utf8"),
)
archive.add_dir_entry("assets/")
archive.add_dir_entry("assets/mods/")
archive.add_dir_entry(MODLOADER_DIR_NAME)
add_modloader_files(archive, MODLOADER_DIR_NAME)
|
the-stack_106_27599 | # 构建配置
keyValues = Properties()
keyValues.load(loader.getResourceAsStream("data.properties"))
keyValues.load(loader.getResourceAsStream("model/benchmark/randomguess-test.properties"))
configurator = Configurator(keyValues)
# 此对象会返回给Java程序
_data = {}
# 构建排序任务
task = RankingTask(RandomGuessModel, configurator)
# 训练与评估模型并获取排序指标
measures = task.execute()
_data['precision'] = measures.get(PrecisionEvaluator)
_data['recall'] = measures.get(RecallEvaluator)
# 构建评分任务
task = RatingTask(RandomGuessModel, configurator)
# 训练与评估模型并获取评分指标
measures = task.execute()
_data['mae'] = measures.get(MAEEvaluator)
_data['mse'] = measures.get(MSEEvaluator) |
the-stack_106_27600 | from ..utils import utils, constants
from ..core.trajectorydataframe import *
from sklearn.cluster import DBSCAN
import numpy as np
import inspect
kms_per_radian = 6371.0088 # Caution: this is only true at the Equator!
# This may cause problems at high latitudes.
def cluster(tdf, cluster_radius_km=0.1, min_samples=1):
"""Clustering of locations.
Cluster the stops of each individual in a TrajDataFrame. The stops correspond to visits to the same location at different times, based on spatial proximity [RT2004]_. The clustering algorithm used is DBSCAN (by sklearn [DBSCAN]_).
Parameters
----------
tdf : TrajDataFrame
the input TrajDataFrame that should contain the stops, i.e., the output of a `preprocessing.detection` function.
cluster_radius_km : float, optional
the parameter `eps` of the function sklearn.cluster.DBSCAN, in kilometers. The default is `0.1`.
min_samples : int, optional
the parameter `min_samples` of the function sklearn.cluster.DBSCAN indicating the minimum number of stops to form a cluster. The default is `1`.
Returns
-------
TrajDataFrame
a TrajDataFrame with the additional column 'cluster' containing the cluster labels. The stops that belong to the same cluster have the same label. The labels are integers corresponding to the ranks of clusters according to the frequency of visitation (the most visited cluster has label 0, the second most visited has label 1, etc.).
Examples
--------
>>> import skmob
>>> import pandas as pd
>>> from skmob.preprocessing import detection, clustering
>>> # read the trajectory data (GeoLife)
>>> url = skmob.utils.constants.GEOLIFE_SAMPLE
>>> df = pd.read_csv(url, sep=',', compression='gzip')
>>> tdf = skmob.TrajDataFrame(df, latitude='lat', longitude='lon', user_id='user', datetime='datetime')
>>> print(tdf.head())
lat lng datetime uid
0 39.984094 116.319236 2008-10-23 05:53:05 1
1 39.984198 116.319322 2008-10-23 05:53:06 1
2 39.984224 116.319402 2008-10-23 05:53:11 1
3 39.984211 116.319389 2008-10-23 05:53:16 1
4 39.984217 116.319422 2008-10-23 05:53:21 1
>>> # detect the stops first
>>> stdf = detection.stops(tdf, stop_radius_factor=0.5, minutes_for_a_stop=20.0, spatial_radius_km=0.2, leaving_time=True)
>>> # cluster the stops
>>> cstdf = clustering.cluster(stdf, cluster_radius_km=0.1, min_samples=1)
>>> print(cstdf.head())
lat lng datetime uid leaving_datetime cluster
0 39.978030 116.327481 2008-10-23 06:01:37 1 2008-10-23 10:32:53 0
1 40.013820 116.306532 2008-10-23 11:10:19 1 2008-10-23 23:45:27 1
2 39.978419 116.326870 2008-10-24 00:21:52 1 2008-10-24 01:47:30 0
3 39.981166 116.308475 2008-10-24 02:02:31 1 2008-10-24 02:30:29 42
4 39.981431 116.309902 2008-10-24 02:30:29 1 2008-10-24 03:16:35 41
>>> print(cstdf.parameters)
{'detect': {'function': 'stops', 'stop_radius_factor': 0.5, 'minutes_for_a_stop': 20.0, 'spatial_radius_km': 0.2, 'leaving_time': True, 'no_data_for_minutes': 1000000000000.0, 'min_speed_kmh': None}, 'cluster': {'function': 'cluster', 'cluster_radius_km': 0.1, 'min_samples': 1}}
References
----------
.. [DBSCAN] DBSCAN implementation, scikit-learn, https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html
.. [RT2004] Ramaswamy, H. & Toyama, K. (2004) Project Lachesis: parsing and modeling location histories. In International Conference on Geographic Information Science, 106-124, http://kentarotoyama.com/papers/Hariharan_2004_Project_Lachesis.pdf
"""
# Sort
tdf = tdf.sort_by_uid_and_datetime()
# Save function arguments and values in a dictionary
frame = inspect.currentframe()
args, _, _, arg_values = inspect.getargvalues(frame)
arguments = dict([('function', cluster.__name__)]+[(i, arg_values[i]) for i in args[1:]])
groupby = []
if utils.is_multi_user(tdf):
groupby.append(constants.UID)
# if utils.is_multi_trajectory(data):
# groupby.append(constants.TID)
stops_df = tdf
# stops_df = detection.stops(data, stop_radius_factor=0.5, \
# minutes_for_a_stop=20.0, spatial_radius=0.2, leaving_time=True)
if len(groupby) > 0:
# Apply cluster stops to each group of points
ctdf = stops_df.groupby(groupby, group_keys=False, as_index=False).apply(_cluster_trajectory,
cluster_radius_km=cluster_radius_km, min_samples=min_samples).reset_index(drop=True)
else:
ctdf = _cluster_trajectory(stops_df, cluster_radius_km=cluster_radius_km, min_samples=min_samples).reset_index(drop=True)
# TODO: remove the following line when issue #71 (Preserve the TrajDataFrame index during preprocessing operations) is solved.
ctdf.reset_index(inplace=True, drop=True)
ctdf.parameters = tdf.parameters
ctdf.set_parameter(constants.CLUSTERING_PARAMS, arguments)
return ctdf
def _cluster_trajectory(tdf, cluster_radius_km, min_samples):
# From dataframe convert to numpy matrix
lat_lng_dtime_other = list(utils.to_matrix(tdf))
columns_order = list(tdf.columns)
l2x, cluster_IDs = _cluster_array(lat_lng_dtime_other, cluster_radius_km, min_samples)
clusters_df = nparray_to_trajdataframe(lat_lng_dtime_other, utils.get_columns(tdf), {})
# Put back to the original order
clusters_df = clusters_df[columns_order]
clusters_df.loc[:, 'cluster'] = cluster_IDs
return clusters_df
def group_by_label(X, labels):
"""
return a dictionary 'l2x' in which the elements 'x' of list 'X'
are grouped according to 'labels'
"""
l2x = dict([(l ,[]) for l in set(labels)])
for x ,l in list(zip(X ,labels)):
l2x[l] += [x]
return l2x
def _cluster_array(lat_lng_dtime_other, cluster_radius_km, min_samples, verbose=False):
X = np.array([[point[0], point[1]] for point in lat_lng_dtime_other])
# Compute DBSCAN
eps_rad = cluster_radius_km / kms_per_radian
db = DBSCAN(eps=eps_rad, min_samples=min_samples, algorithm='ball_tree', metric='haversine')
clus = db.fit(np.radians(X))
# core_samples = clus.core_sample_indices_
labels = clus.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
if verbose:
print('Estimated number of clusters: %d' % n_clusters_)
l02x = group_by_label(X, labels)
# Map cluster index to most frequent location: label2fml
c2mfl = dict([(c[1] ,i) for i ,c in \
enumerate(sorted([[len(v) ,l] for l ,v in l02x.items() if l> -0.5], reverse=True))])
l2x = dict([(c2mfl[k], v) for k, v in l02x.items() if k > -0.5])
try:
l2x[-1] = l02x[-1.]
except KeyError:
pass
return l2x, [c2mfl[k] for k in labels if k > -0.5]
def split_trajectories_in_tdf(tdf, stop_tdf):
"""Cluster the points of a TrajDataFrame into trajectories by using stop locations.
Parameters
----------
tdf : TrajDataFrame
original trajectories
stop_tdf : TrajDataFrame
the output of skmob.preprocessing.detection.stops, containing the stop locations of the users in the tdf
Returns
-------
TrajDataFrame
the TrajDataFrame with a new column 'tid' collecting the unique identifier of the trajectory to which the point
belongs.
Examples
--------
>>> import skmob
>>> import pandas as pd
>>> from skmob.preprocessing import detection, clustering
>>> # read the trajectory data (GeoLife)
>>> url = 'https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/tutorial/data/geolife_sample.txt.gz'
>>> df = pd.read_csv(url, sep=',', compression='gzip')
>>> tdf = skmob.TrajDataFrame(df, latitude='lat', longitude='lon', user_id='user', datetime='datetime')
>>> print(tdf.head())
lat lng datetime uid
0 39.984094 116.319236 2008-10-23 05:53:05 1
1 39.984198 116.319322 2008-10-23 05:53:06 1
2 39.984224 116.319402 2008-10-23 05:53:11 1
3 39.984211 116.319389 2008-10-23 05:53:16 1
4 39.984217 116.319422 2008-10-23 05:53:21 1
>>> # detect the stops first
>>> stdf = detection.stops(tdf, stop_radius_factor=0.5, minutes_for_a_stop=20.0, spatial_radius_km=0.2, leaving_time=True)
>>> # cluster the trajectories based on the stops
>>> tdf_splitted = split_trajectories_in_tdf(tdf, stdf)
>>> print(tdf_splitted.head())
lat lng datetime uid tid
0 39.984094 116.319236 2008-10-23 05:53:05 1 1
1 39.984198 116.319322 2008-10-23 05:53:06 1 1
2 39.984224 116.319402 2008-10-23 05:53:11 1 1
3 39.984211 116.319389 2008-10-23 05:53:16 1 1
4 39.984217 116.319422 2008-10-23 05:53:21 1 1
"""
tdf_with_tid = tdf.groupby('uid').apply(_split_trajectories, stop_tdf)
return tdf_with_tid.reset_index(drop=True)
def _split_trajectories(tdf, stop_tdf):
c_uid = tdf.uid[:1].item()
stop_tdf_current_user = stop_tdf[stop_tdf.uid == c_uid]
if stop_tdf_current_user.empty:
return
else:
first_traj = [tdf[tdf.datetime <= stop_tdf_current_user.datetime[:1].item()]]
last_traj = [tdf[tdf.datetime >= stop_tdf_current_user.leaving_datetime[-1:].item()]]
all_other_traj = [tdf[(tdf.datetime >= start_traj_time) & (tdf.datetime <= end_traj_time)] for end_traj_time, start_traj_time in zip(stop_tdf_current_user['datetime'][1:], stop_tdf_current_user['leaving_datetime'][:-1])]
all_traj = first_traj + all_other_traj + last_traj
tdf_with_tid = pd.concat(all_traj)
list_tids = [list(np.repeat(i, len(df))) for i, df in zip(range(1,len(all_traj)+1), all_traj)]
list_tids_ravel = [item for sublist in list_tids for item in sublist]
tdf_with_tid['tid'] = list_tids_ravel
return tdf_with_tid
|
the-stack_106_27601 | """
Example of how to train the Behavioral Cloning (BC) algorithm from scratch.
Also includes notes on how to resume training from an earlier checkpoint,
perform testing/evaluation, and run the baselines from the model_zoo.
"""
import logging
import os
from ilpyt.agents.imitation_agent import ImitationAgent
from ilpyt.algos.bc import BC
from ilpyt.utils.env_utils import build_env
from ilpyt.utils.net_utils import choose_net
from ilpyt.utils.seed_utils import set_seed
def build(
save_path: str,
load_path: str,
env_id: str,
num_env: int,
use_gpu: bool,
seed: int = 24,
):
# Set random seed
set_seed(seed)
# Build environment
env = build_env(env_id=env_id, num_env=num_env, seed=seed)
# Build agent
net = choose_net(env)
agent = ImitationAgent(net=net, lr=0.0001)
# Build algorithm
algo = BC(
agent=agent,
env=env,
use_gpu=use_gpu,
save_path=save_path,
load_path=load_path,
)
return algo
def evaluate_baselines():
envs = [
'MountainCar-v0',
'CartPole-v0',
'LunarLander-v2',
'MountainCarContinuous-v0',
'LunarLanderContinuous-v2',
]
for env in envs:
logging.debug(env)
save_path = os.path.join('logs/BC/', env)
load_path = os.path.join('/mnt/IMLEARN/model_zoo/BC/', env)
# Build experiment -----
algo = build(
save_path=save_path,
load_path=load_path,
env_id=env,
num_env=16,
use_gpu=True,
)
algo.env.close()
algo.test(num_episodes=100)
if __name__ == '__main__':
# NOTE
# To train a new model:
# save_path = 'dir/to/save/to/
# load_path = ''
# To continue training a model:
# save_path = 'dir/to/save/to/
# load_path = 'dir/to/load/from/'
# To test an old model:
# Comment out train() method and algo.agent.load()
# save_path = 'dir/to/save/to/'
# load_path = 'dir/to/load/from/'
if not os.path.exists('logs/BC'):
os.makedirs('logs/BC')
evaluate_baselines()
# LunarLander-v2 -----------------------------------------------------------
save_path = 'logs/BC/LunarLander-v2'
load_path = ''
# Build experiment -----
algo = build(
save_path=save_path,
load_path=load_path,
env_id='LunarLander-v2',
num_env=16,
use_gpu=True,
)
# Train
algo.train(
num_epochs=10000,
batch_size=20,
expert_demos='demos/LunarLander-v2/demos.pkl',
)
# Close training environmnet
algo.env.close()
# Load
algo.agent.load(save_path)
# Test
algo.test(num_episodes=100)
# CartPole-v0 --------------------------------------------------------------
save_path = 'logs/BC/CartPole-v0'
load_path = ''
# Build experiment -----
algo = build(
save_path=save_path,
load_path=load_path,
env_id='CartPole-v0',
num_env=16,
use_gpu=True,
)
# Train
algo.train(
num_epochs=10000,
batch_size=20,
expert_demos='demos/CartPole-v0/demos.pkl',
)
# Close training environmnet
algo.env.close()
# Load
algo.agent.load(save_path)
# Test
algo.test(num_episodes=100)
# MountainCar-v0 -----------------------------------------------------------
save_path = 'logs/BC/MountainCar-v0'
load_path = ''
# Build experiment -----
algo = build(
save_path=save_path,
load_path=load_path,
env_id='MountainCar-v0',
num_env=16,
use_gpu=True,
)
# Train
algo.train(
num_epochs=10000,
batch_size=20,
expert_demos='demos/MountainCar-v0/demos.pkl',
)
# Close training environmnet
algo.env.close()
# Load
algo.agent.load(save_path)
# Test
algo.test(num_episodes=100)
# MountainCarContinuous-v0 ----------------------------------------------------------------
save_path = 'logs/BC/MountainCarContinuous-v0'
load_path = ''
# Build experiment -----
algo = build(
save_path=save_path,
load_path=load_path,
env_id='MountainCarContinuous-v0',
num_env=16,
use_gpu=True,
)
# Train
algo.train(
num_epochs=10000,
batch_size=20,
expert_demos='demos/MountainCarContinuous-v0/demos.pkl',
)
# Close training environmnet
algo.env.close()
# Load
algo.agent.load(save_path)
# Test
algo.test(num_episodes=100)
# LunarLanderContinuous-v0 -------------------------------------------------
save_path = 'logs/BC/LunarLanderContinuous-v2'
load_path = ''
# Build experiment -----
algo = build(
save_path=save_path,
load_path=load_path,
env_id='LunarLanderContinuous-v0',
num_env=16,
use_gpu=True,
)
# Train
algo.train(
num_epochs=10000,
batch_size=20,
expert_demos='demos/LunarLanderContinuous-v0/demos.pkl',
)
# Close training environmnet
algo.env.close()
# Load
algo.agent.load(save_path)
# Test
algo.test(num_episodes=100)
|
the-stack_106_27604 | from __future__ import annotations
from optparse import SUPPRESS_HELP, OptionParser
import workflows
import workflows.frontend
import workflows.services
import workflows.transport
class ServiceStarter:
"""A helper class to start a workflows service from the command line.
A number of hooks are provided so that this class can be subclassed and
used in a number of scenarios."""
@staticmethod
def on_parser_preparation(parser):
"""Plugin hook to manipulate the OptionParser object before command line
parsing. If a value is returned here it will replace the OptionParser
object."""
@staticmethod
def on_parsing(options, args):
"""Plugin hook to manipulate the command line parsing results.
A tuple of values can be returned, which will replace (options, args).
"""
@staticmethod
def on_transport_factory_preparation(transport_factory):
"""Plugin hook to intercept/manipulate newly created Transport factories
before first invocation."""
@staticmethod
def on_transport_preparation(transport):
"""Plugin hook to intercept/manipulate newly created Transport objects
before connecting."""
@staticmethod
def before_frontend_construction(kwargs):
"""Plugin hook to manipulate the Frontend object constructor arguments. If
a value is returned here it will replace the keyword arguments
dictionary passed to the constructor."""
@staticmethod
def on_frontend_preparation(frontend):
"""Plugin hook to manipulate the Frontend object before starting it. If a
value is returned here it will replace the Frontend object."""
def run(
self,
cmdline_args=None,
program_name="start_service",
version=None,
add_metrics_option: bool = False,
**kwargs,
):
"""Example command line interface to start services.
:param cmdline_args: List of command line arguments to pass to parser
:param program_name: Name of the command line tool to display in help
:param version: Version number to print when run with '--version'
"""
# Enumerate all known services
known_services = sorted(workflows.services.get_known_services())
if version:
version = f"{version} (workflows {workflows.version()})"
else:
version = workflows.version()
# Set up parser
parser = OptionParser(
usage=program_name + " [options]" if program_name else None, version=version
)
parser.add_option("-?", action="help", help=SUPPRESS_HELP)
parser.add_option(
"-s",
"--service",
dest="service",
metavar="SVC",
default=None,
help="Name of the service to start. Known services: "
+ ", ".join(known_services),
)
if add_metrics_option:
parser.add_option(
"-m",
"--metrics",
dest="metrics",
action="store_true",
default=False,
help=(
"Record metrics for this service and expose them on the port defined by"
"the --metrics-port option."
),
)
parser.add_option(
"--metrics-port",
dest="metrics_port",
default=8080,
type="int",
help="Expose metrics via a prometheus endpoint on this port.",
)
workflows.transport.add_command_line_options(parser, transport_argument=True)
# Call on_parser_preparation hook
parser = self.on_parser_preparation(parser) or parser
# Parse command line options
(options, args) = parser.parse_args(cmdline_args)
# Call on_parsing hook
(options, args) = self.on_parsing(options, args) or (options, args)
# Create Transport factory
transport_factory = workflows.transport.lookup(options.transport)
# Call on_transport_factory_preparation hook
transport_factory = (
self.on_transport_factory_preparation(transport_factory)
or transport_factory
)
# Set up on_transport_preparation hook to affect newly created transport objects
true_transport_factory_call = transport_factory.__call__
def on_transport_preparation_hook():
transport_object = true_transport_factory_call()
return self.on_transport_preparation(transport_object) or transport_object
transport_factory.__call__ = on_transport_preparation_hook
# When service name is specified, check if service exists or can be derived
if options.service and options.service not in known_services:
matching = [s for s in known_services if s.startswith(options.service)]
if not matching:
matching = [
s
for s in known_services
if s.lower().startswith(options.service.lower())
]
if matching and len(matching) == 1:
options.service = matching[0]
kwargs.update(
{
"service": options.service,
"transport": transport_factory,
},
)
kwargs.setdefault("environment", {})
if add_metrics_option:
kwargs["environment"]["metrics"] = {"port": options.metrics_port}
# Call before_frontend_construction hook
kwargs = self.before_frontend_construction(kwargs) or kwargs
# Create Frontend object
frontend = workflows.frontend.Frontend(**kwargs)
# Call on_frontend_preparation hook
frontend = self.on_frontend_preparation(frontend) or frontend
# Start Frontend
try:
frontend.run()
except KeyboardInterrupt:
print("\nShutdown via Ctrl+C")
if __name__ == "__main__": # pragma: no cover
ServiceStarter().run()
|
the-stack_106_27605 | # coding: utf-8
"""
Algorithmia Management APIs
APIs for managing actions on the Algorithmia platform # noqa: E501
OpenAPI spec version: 1.0.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ScmConnection(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'scm_type': 'ScmType',
'label': 'str',
'web_endpoint': 'str',
'api_endpoint': 'str',
'client_id': 'str',
'owner_name': 'str'
}
attribute_map = {
'scm_type': 'scm_type',
'label': 'label',
'web_endpoint': 'web_endpoint',
'api_endpoint': 'api_endpoint',
'client_id': 'client_id',
'owner_name': 'owner_name'
}
def __init__(self, scm_type=None, label=None, web_endpoint=None, api_endpoint=None, client_id=None, owner_name=None): # noqa: E501
"""ScmConnection - a model defined in OpenAPI""" # noqa: E501
self._scm_type = None
self._label = None
self._web_endpoint = None
self._api_endpoint = None
self._client_id = None
self._owner_name = None
self.discriminator = None
if scm_type is not None:
self.scm_type = scm_type
if label is not None:
self.label = label
if web_endpoint is not None:
self.web_endpoint = web_endpoint
if api_endpoint is not None:
self.api_endpoint = api_endpoint
if client_id is not None:
self.client_id = client_id
if owner_name is not None:
self.owner_name = owner_name
@property
def scm_type(self):
"""Gets the scm_type of this ScmConnection. # noqa: E501
:return: The scm_type of this ScmConnection. # noqa: E501
:rtype: ScmType
"""
return self._scm_type
@scm_type.setter
def scm_type(self, scm_type):
"""Sets the scm_type of this ScmConnection.
:param scm_type: The scm_type of this ScmConnection. # noqa: E501
:type: ScmType
"""
self._scm_type = scm_type
@property
def label(self):
"""Gets the label of this ScmConnection. # noqa: E501
:return: The label of this ScmConnection. # noqa: E501
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this ScmConnection.
:param label: The label of this ScmConnection. # noqa: E501
:type: str
"""
self._label = label
@property
def web_endpoint(self):
"""Gets the web_endpoint of this ScmConnection. # noqa: E501
:return: The web_endpoint of this ScmConnection. # noqa: E501
:rtype: str
"""
return self._web_endpoint
@web_endpoint.setter
def web_endpoint(self, web_endpoint):
"""Sets the web_endpoint of this ScmConnection.
:param web_endpoint: The web_endpoint of this ScmConnection. # noqa: E501
:type: str
"""
self._web_endpoint = web_endpoint
@property
def api_endpoint(self):
"""Gets the api_endpoint of this ScmConnection. # noqa: E501
:return: The api_endpoint of this ScmConnection. # noqa: E501
:rtype: str
"""
return self._api_endpoint
@api_endpoint.setter
def api_endpoint(self, api_endpoint):
"""Sets the api_endpoint of this ScmConnection.
:param api_endpoint: The api_endpoint of this ScmConnection. # noqa: E501
:type: str
"""
self._api_endpoint = api_endpoint
@property
def client_id(self):
"""Gets the client_id of this ScmConnection. # noqa: E501
:return: The client_id of this ScmConnection. # noqa: E501
:rtype: str
"""
return self._client_id
@client_id.setter
def client_id(self, client_id):
"""Sets the client_id of this ScmConnection.
:param client_id: The client_id of this ScmConnection. # noqa: E501
:type: str
"""
self._client_id = client_id
@property
def owner_name(self):
"""Gets the owner_name of this ScmConnection. # noqa: E501
:return: The owner_name of this ScmConnection. # noqa: E501
:rtype: str
"""
return self._owner_name
@owner_name.setter
def owner_name(self, owner_name):
"""Sets the owner_name of this ScmConnection.
:param owner_name: The owner_name of this ScmConnection. # noqa: E501
:type: str
"""
self._owner_name = owner_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ScmConnection):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_27607 | from fasteners import (
InterProcessLock,
try_lock,
)
from contextlib import contextmanager
from .path import exists
from ..dochelpers import exc_str
from ..utils import (
ensure_unicode,
get_open_files,
unlink,
)
import logging
lgr = logging.getLogger('datalad.locking')
def _get(entry):
"""A helper to get the value, be it a callable or callable with args, or value
"""
if isinstance(entry, (tuple, list)):
func, args = entry
return func(*args)
elif callable(entry):
return entry()
else:
return entry
@contextmanager
def lock_if_check_fails(
check,
lock_path,
operation=None,
blocking=True,
_return_acquired=False,
**kwargs
):
"""A context manager to establish a lock conditionally on result of a check
It is intended to be used as a lock for a specific file and/or operation,
e.g. for `annex get`ing a file or extracting an archive, so only one process
would be performing such an operation.
If verification of the check fails, it tries to acquire the lock, but if
that fails on the first try, it will rerun check before proceeding to func
checker and lock_path_prefix could be a value, or callable, or
a tuple composing callable and its args
Unfortunately yoh did not find any way in Python 2 to have a context manager
which just skips the entire block if some condition is met (in Python3 there
is ExitStack which could potentially be used). So we would need still to
check in the block body if the context manager return value is not None.
Note also that the used type of the lock (fasteners.InterprocessLock) works
only across processes and would not lock within the same (threads) process.
Parameters
----------
check: callable or (callable, args) or value
If value (possibly after calling a callable) evaluates to True, no
lock is acquired, and no context is executed
lock_path: callable or (callable, args) or value
Provides a path for the lock file, composed from that path + '.lck'
extension
operation: str, optional
If provided, would be part of the locking extension
blocking: bool, optional
If blocking, process would be blocked until acquired and verified that it
was acquired after it gets the lock
_return_acquired: bool, optional
Return also if lock was acquired. For "private" use within DataLad (tests),
do not rely on it in 3rd party solutions.
**kwargs
Passed to `.acquire` of the fasteners.InterProcessLock
Returns
-------
result of check, lock[, acquired]
"""
check1 = _get(check)
if check1: # we are done - nothing to do
yield check1, None
return
# acquire blocking lock
lock_filename = _get(lock_path)
lock_filename += '.'
if operation:
lock_filename += operation + '-'
lock_filename += 'lck'
lock = InterProcessLock(lock_filename)
acquired = False
try:
lgr.debug("Acquiring a lock %s", lock_filename)
acquired = lock.acquire(blocking=blocking, **kwargs)
lgr.debug("Acquired? lock %s: %s", lock_filename, acquired)
if blocking:
assert acquired
check2 = _get(check)
ret_lock = None if check2 else lock
if _return_acquired:
yield check2, ret_lock, acquired
else:
yield check2, ret_lock
finally:
if acquired:
lgr.debug("Releasing lock %s", lock_filename)
lock.release()
if exists(lock_filename):
unlink(lock_filename)
@contextmanager
def try_lock_informatively(lock, purpose=None, timeouts=(5, 60, 240), proceed_unlocked=False):
"""Try to acquire lock (while blocking) multiple times while logging INFO messages on failure
Primary use case is for operations which are user-visible and thus should not lock
indefinetely or for long period of times (so user would just Ctrl-C if no update is provided)
without "feedback".
Parameters
----------
lock: fasteners._InterProcessLock
purpose: str, optional
timeouts: tuple or list, optional
proceed_unlocked: bool, optional
"""
purpose = " to " + str(purpose) if purpose else ''
# could be bytes, making formatting trickier
lock_path = ensure_unicode(lock.path)
def get_pids_msg():
try:
pids = get_open_files(lock_path)
if pids:
proc = pids[lock_path]
return f'Check following process: PID={proc.pid} CWD={proc.cwd()} CMDLINE={proc.cmdline()}.'
else:
return 'Stale lock? I found no processes using it.'
except Exception as exc:
lgr.debug(
"Failed to get a list of processes which 'posses' the file %s: %s",
lock_path,
exc_str(exc)
)
return 'Another process is using it (failed to determine one)?'
lgr.debug("Acquiring a currently %s lock%s. If stalls - check which process holds %s",
("existing" if lock.exists() else "absent"),
purpose,
lock_path)
was_locked = False # name of var the same as of within fasteners.try_lock
assert timeouts # we expect non-empty timeouts
try:
for trial, timeout in enumerate(timeouts):
was_locked = lock.acquire(blocking=True, timeout=timeout)
if not was_locked:
if trial < len(timeouts) - 1:
msg = " Will try again and wait for up to %4g seconds." % (timeouts[trial+1],)
else: # It was the last attempt
if proceed_unlocked:
msg = " Will proceed without locking."
else:
msg = ""
lgr.info("Failed to acquire lock%s at %s in %4g seconds. %s%s",
purpose, lock_path, timeout, get_pids_msg(), msg)
else:
yield True
return
else:
assert not was_locked
if proceed_unlocked:
yield False
else:
raise RuntimeError(
"Failed to acquire lock%s at %s in %d attempts.%s"
% (purpose, lock_path, len(timeouts), get_pids_msg()))
finally:
if was_locked:
lock.release() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.